diff --git a/.action_templates/e2e-fork-template.yaml b/.action_templates/e2e-fork-template.yaml new file mode 100644 index 000000000..c6378cceb --- /dev/null +++ b/.action_templates/e2e-fork-template.yaml @@ -0,0 +1,27 @@ +name: Run E2E Fork +jobs: + - template: display-github-context + - template: setup + # dependabot gets a read only github token, and so must use pull_request_target instead of pull_request. + if: contains(github.event.pull_request.labels.*.name, 'dependencies') || contains(github.event.pull_request.labels.*.name, 'safe-to-test') + steps: + - template: cancel-previous + - template: checkout-fork + - template: setup-and-install-python + - template: quay-login + - template: set-up-qemu + - template: build-and-push-development-images + - template: tests + steps: + - template: cancel-previous + - template: checkout-fork + - template: set-run-status + - template: setup-and-install-python + - template: setup-kind-cluster + if: steps.last_run_status.outputs.last_run_status != 'success' + - template: run-test-matrix + - template: save-run-status + - template: dump-and-upload-diagnostics + +events: + - template: pull-request-target diff --git a/.action_templates/e2e-pr-template.yaml b/.action_templates/e2e-pr-template.yaml new file mode 100644 index 000000000..8c4e79d14 --- /dev/null +++ b/.action_templates/e2e-pr-template.yaml @@ -0,0 +1,29 @@ +name: Run E2E +jobs: + - template: display-github-context + - template: setup + # run on master, or if a PR is being created from a branch, or if it has been manually triggered. + if: github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' || (github.event.pull_request.head.repo.full_name == github.repository && github.actor != 'dependabot[bot]') + steps: + - template: cancel-previous + - template: checkout + - template: setup-and-install-python + - template: quay-login + - template: set-up-qemu + - template: build-and-push-development-images + - template: tests + steps: + - template: cancel-previous + - template: checkout + - template: set-run-status + - template: setup-and-install-python + - template: setup-kind-cluster + if: steps.last_run_status.outputs.last_run_status != 'success' + - template: run-test-matrix + - template: save-run-status + - template: dump-and-upload-diagnostics + +events: + - template: on-pull-request-master + - template: on-push-master + - template: workflow-dispatch diff --git a/.action_templates/e2e-single-template.yaml b/.action_templates/e2e-single-template.yaml new file mode 100644 index 000000000..36e586af3 --- /dev/null +++ b/.action_templates/e2e-single-template.yaml @@ -0,0 +1,20 @@ +name: Run Single E2E +jobs: + - template: display-github-context + - template: setup + steps: + - template: checkout + - template: setup-and-install-python + - template: quay-login + - template: set-up-qemu + - template: build-and-push-development-images + - template: single-test + steps: + - template: checkout + - template: setup-and-install-python + - template: setup-kind-cluster + - template: run-test-single + - template: dump-and-upload-diagnostics-always + +events: + - template: single-e2e-workflow-dispatch diff --git a/.action_templates/events/on-pull-request-master.yaml b/.action_templates/events/on-pull-request-master.yaml new file mode 100644 index 000000000..9107a3d91 --- /dev/null +++ b/.action_templates/events/on-pull-request-master.yaml @@ -0,0 +1,5 @@ +pull_request: + branches: + - master + paths-ignore: + - 'docs/**' diff --git a/.action_templates/events/on-push-master.yaml b/.action_templates/events/on-push-master.yaml new file mode 100644 index 000000000..844e045c3 --- /dev/null +++ b/.action_templates/events/on-push-master.yaml @@ -0,0 +1,5 @@ +push: + branches: + - master + paths-ignore: + - 'docs/**' diff --git a/.action_templates/events/pull-request-target.yaml b/.action_templates/events/pull-request-target.yaml new file mode 100644 index 000000000..1e7743cd8 --- /dev/null +++ b/.action_templates/events/pull-request-target.yaml @@ -0,0 +1,7 @@ +# pull_request_target means that the secrets of this repo will be used. +pull_request_target: + types: [labeled] + branches: + - master + paths-ignore: + - 'docs/**' diff --git a/.action_templates/events/single-e2e-workflow-dispatch.yaml b/.action_templates/events/single-e2e-workflow-dispatch.yaml new file mode 100644 index 000000000..01cc9fcae --- /dev/null +++ b/.action_templates/events/single-e2e-workflow-dispatch.yaml @@ -0,0 +1,13 @@ +workflow_dispatch: + inputs: + distro: + description: 'Distro to run test' + required: true + default: "ubuntu" + test-name: + description: 'Name of test to run' + required: true + cluster-wide: + description: 'Whether or not the test is cluster wide' + required: true + default: "false" diff --git a/.action_templates/events/workflow-dispatch.yaml b/.action_templates/events/workflow-dispatch.yaml new file mode 100644 index 000000000..5de950ef3 --- /dev/null +++ b/.action_templates/events/workflow-dispatch.yaml @@ -0,0 +1 @@ +workflow_dispatch: {} diff --git a/.action_templates/jobs/display-github-context.yaml b/.action_templates/jobs/display-github-context.yaml new file mode 100644 index 000000000..37ecb1972 --- /dev/null +++ b/.action_templates/jobs/display-github-context.yaml @@ -0,0 +1,8 @@ +action-context: + if: always() + runs-on: ubuntu-latest + steps: + - name: Dump GitHub context + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + run: echo "$GITHUB_CONTEXT" diff --git a/.action_templates/jobs/setup.yaml b/.action_templates/jobs/setup.yaml new file mode 100644 index 000000000..ad46dc26d --- /dev/null +++ b/.action_templates/jobs/setup.yaml @@ -0,0 +1,11 @@ +setup: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent + - pipeline-argument: e2e diff --git a/.action_templates/jobs/single-test.yaml b/.action_templates/jobs/single-test.yaml new file mode 100644 index 000000000..b06a8a918 --- /dev/null +++ b/.action_templates/jobs/single-test.yaml @@ -0,0 +1,3 @@ +single-test: + runs-on: ubuntu-latest + needs: [setup] diff --git a/.action_templates/jobs/tests.yaml b/.action_templates/jobs/tests.yaml new file mode 100644 index 000000000..f360ee3d6 --- /dev/null +++ b/.action_templates/jobs/tests.yaml @@ -0,0 +1,68 @@ +tests: + runs-on: ubuntu-latest + needs: [setup] + strategy: + fail-fast: false + matrix: + include: + - test-name: replica_set + distro: ubi + - test-name: replica_set_enterprise_upgrade_4_5 + distro: ubi + - test-name: replica_set_enterprise_upgrade_5_6 + distro: ubi + - test-name: replica_set_enterprise_upgrade_6_7 + distro: ubi + - test-name: replica_set_enterprise_upgrade_7_8 + distro: ubi + - test-name: replica_set_recovery + distro: ubi + - test-name: replica_set_mongod_readiness + distro: ubi + - test-name: replica_set_scale + distro: ubi + - test-name: replica_set_scale_down + distro: ubi + - test-name: replica_set_change_version + distro: ubi + - test-name: feature_compatibility_version + distro: ubi + - test-name: prometheus + distro: ubi + - test-name: replica_set_tls + distro: ubi + - test-name: replica_set_tls_recreate_mdbc + distro: ubi + - test-name: replica_set_tls_rotate + distro: ubi + - test-name: replica_set_tls_rotate_delete_sts + distro: ubi + - test-name: replica_set_tls_upgrade + distro: ubi + - test-name: statefulset_arbitrary_config + distro: ubi + - test-name: statefulset_arbitrary_config_update + distro: ubi + - test-name: replica_set_mongod_config + distro: ubi + - test-name: replica_set_cross_namespace_deploy + distro: ubi + cluster-wide: true + - test-name: replica_set_custom_role + distro: ubi + - test-name: replica_set_arbiter + distro: ubi + - test-name: replica_set_custom_persistent_volume + distro: ubi + - test-name: replica_set_mount_connection_string + distro: ubi + - test-name: replica_set_mongod_port_change_with_arbiters + distro: ubi + - test-name: replica_set_operator_upgrade + distro: ubi + - test-name: replica_set_connection_string_options + distro: ubi + - test-name: replica_set_x509 + distro: ubi + - test-name: replica_set_remove_user + distro: ubi diff --git a/.action_templates/steps/build-and-push-development-images.yaml b/.action_templates/steps/build-and-push-development-images.yaml new file mode 100644 index 000000000..4fe3df401 --- /dev/null +++ b/.action_templates/steps/build-and-push-development-images.yaml @@ -0,0 +1,6 @@ +- name: Build and Push Images + run: | + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} + env: + MONGODB_COMMUNITY_CONFIG: "${{ github.workspace }}/scripts/ci/config.json" + version_id: "${{ github.run_id }}" diff --git a/.action_templates/steps/cancel-previous.yaml b/.action_templates/steps/cancel-previous.yaml new file mode 100644 index 000000000..301d5af50 --- /dev/null +++ b/.action_templates/steps/cancel-previous.yaml @@ -0,0 +1,4 @@ +- name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.12.1 + with: + access_token: ${{ github.token }} diff --git a/.action_templates/steps/checkout-fork.yaml b/.action_templates/steps/checkout-fork.yaml new file mode 100644 index 000000000..abd35041c --- /dev/null +++ b/.action_templates/steps/checkout-fork.yaml @@ -0,0 +1,9 @@ +# We checkout the forked repository code. +# Because we are using pull_request_target the Github Secrets will be passed +# So code should be reviewed before labeling as "safe-to-test" +- name: Checkout Code + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.sha}} + repository: ${{github.event.pull_request.head.repo.full_name}} + submodules: true diff --git a/.action_templates/steps/checkout.yaml b/.action_templates/steps/checkout.yaml new file mode 100644 index 000000000..da02fc2f3 --- /dev/null +++ b/.action_templates/steps/checkout.yaml @@ -0,0 +1,4 @@ +- name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: true diff --git a/.action_templates/steps/dump-and-upload-diagnostics-always.yaml b/.action_templates/steps/dump-and-upload-diagnostics-always.yaml new file mode 100644 index 000000000..968ecd9ce --- /dev/null +++ b/.action_templates/steps/dump-and-upload-diagnostics-always.yaml @@ -0,0 +1,12 @@ +- name: Dump Diagnostics + if: always() + continue-on-error: true + run: scripts/ci/dump_diagnostics.sh default # default since kind is running in the default namespace + +- name: Upload Diagnostics + if: always() + uses: actions/upload-artifact@v4 + continue-on-error: true + with: + name: "${{ github.event.inputs.test-name }}-${{ github.event.inputs.distro }}-diagnostics" + path: "${{ github.workspace }}/diagnostics" diff --git a/.action_templates/steps/dump-and-upload-diagnostics.yaml b/.action_templates/steps/dump-and-upload-diagnostics.yaml new file mode 100644 index 000000000..17f5d2688 --- /dev/null +++ b/.action_templates/steps/dump-and-upload-diagnostics.yaml @@ -0,0 +1,13 @@ +- name: Dump Diagnostics + id: dump_diagnostics + if: always() && steps.e2e_test.outcome == 'failure' + continue-on-error: true + run: scripts/ci/dump_diagnostics.sh default # default since kind is running in the default namespace + +- name: Upload Diagnostics + if: always() && steps.dump_diagnostics.outcome == 'success' + uses: actions/upload-artifact@v4 + continue-on-error: true + with: + name: "${{ matrix.test-name }}-${{ matrix.distro }}-diagnostics" + path: "${{ github.workspace }}/diagnostics" diff --git a/.action_templates/steps/quay-login.yaml b/.action_templates/steps/quay-login.yaml new file mode 100644 index 000000000..77a8dd06f --- /dev/null +++ b/.action_templates/steps/quay-login.yaml @@ -0,0 +1,6 @@ +- name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/.action_templates/steps/run-test-matrix.yaml b/.action_templates/steps/run-test-matrix.yaml new file mode 100644 index 000000000..9c572a89c --- /dev/null +++ b/.action_templates/steps/run-test-matrix.yaml @@ -0,0 +1,9 @@ +- name: Run Test + id: e2e_test + if: steps.last_run_status.outputs.last_run_status != 'success' + run: | + cluster_wide=${{ matrix.cluster-wide }} + if [ -z "$cluster_wide" ]; then + cluster_wide="false" + fi + python3 ./scripts/dev/e2e.py --test ${{ matrix.test-name }} --tag ${{ github.run_id }} --config_file ./scripts/ci/config.json --distro ${{ matrix.distro }} --cluster-wide ${cluster_wide} diff --git a/.action_templates/steps/run-test-single.yaml b/.action_templates/steps/run-test-single.yaml new file mode 100644 index 000000000..453425961 --- /dev/null +++ b/.action_templates/steps/run-test-single.yaml @@ -0,0 +1,3 @@ +- name: Run Test Single + run: | + python3 ./scripts/dev/e2e.py --test ${{ github.event.inputs.test-name }} --tag ${{ github.run_id }} --config_file ./scripts/ci/config.json --distro ${{ github.event.inputs.distro }} --cluster-wide ${{ github.event.inputs.cluster-wide }} diff --git a/.action_templates/steps/save-run-status.yaml b/.action_templates/steps/save-run-status.yaml new file mode 100644 index 000000000..84845013b --- /dev/null +++ b/.action_templates/steps/save-run-status.yaml @@ -0,0 +1,3 @@ +- name: Save run status + if: always() + run: echo "::set-output name=last_run_status::${{ steps.e2e_test.outcome }}" > last_run_status diff --git a/.action_templates/steps/set-run-status.yaml b/.action_templates/steps/set-run-status.yaml new file mode 100644 index 000000000..9f4a76541 --- /dev/null +++ b/.action_templates/steps/set-run-status.yaml @@ -0,0 +1,17 @@ +- name: Set default run status + run: echo "::set-output name=last_run_status::pending" > last_run_status + + # Tracking of the state of the previous test run is a workaround to the fact that it is not + # possible to re-run a single failed job, only re-running the entire workflow is currently possible. + # This workaround skips jobs if they have already passed. + # see https://github.com/actions/runner/issues/432 +- name: Restore last run status + id: last_run + uses: actions/cache@v4 + with: + path: last_run_status + key: ${{ github.run_id }}-${{ matrix.test-name }}-${{ matrix.distro }} + +- name: Set last run status + id: last_run_status + run: cat last_run_status diff --git a/.action_templates/steps/set-up-qemu.yaml b/.action_templates/steps/set-up-qemu.yaml new file mode 100644 index 000000000..c84384bfc --- /dev/null +++ b/.action_templates/steps/set-up-qemu.yaml @@ -0,0 +1,2 @@ +- name: Set up QEMU + uses: docker/setup-qemu-action@v3 diff --git a/.action_templates/steps/setup-and-install-python.yaml b/.action_templates/steps/setup-and-install-python.yaml new file mode 100644 index 000000000..b924e01ae --- /dev/null +++ b/.action_templates/steps/setup-and-install-python.yaml @@ -0,0 +1,11 @@ +- name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.10.4' +- name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} +- name: Install Python Dependencies + run: pip install -r requirements.txt diff --git a/.action_templates/steps/setup-kind-cluster.yaml b/.action_templates/steps/setup-kind-cluster.yaml new file mode 100644 index 000000000..b17558382 --- /dev/null +++ b/.action_templates/steps/setup-kind-cluster.yaml @@ -0,0 +1,11 @@ +- name: Setup Kind Cluster + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 + chmod +x ./kind + ./kind create cluster +- name: Create Directories + run: | + docker exec kind-control-plane mkdir -p /opt/data/mongo-data-0 /opt/data/mongo-data-1 /opt/data/mongo-data-2 /opt/data/mongo-logs-0 /opt/data/mongo-logs-1 /opt/data/mongo-logs-2 + +- name: Install CRD + run: kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml diff --git a/.dockerignore b/.dockerignore index 9ff6084c6..4d9a61d4f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,3 +4,10 @@ zz_* vendor/ scripts/ .git/ +bin/ +testbin/ +.mypy_cache/ +main +__debug_bin +# allow agent LICENSE +!scripts/dev/templates/agent/LICENSE diff --git a/.evergreen.yml b/.evergreen.yml deleted file mode 100644 index c45038b32..000000000 --- a/.evergreen.yml +++ /dev/null @@ -1,450 +0,0 @@ -ignore: - - "*.md" - -functions: - setup_virtualenv: - - command: subprocess.exec - type: setup - params: - working_dir: mongodb-kubernetes-operator - binary: scripts/ci/setup_virtualenv.sh - include_expansions_in_env: - - sonar_github_token - - clone: - - command: subprocess.exec - type: setup - params: - command: "mkdir -p mongodb-kubernetes-operator" - - command: git.get_project - type: setup - params: - directory: mongodb-kubernetes-operator - - # upload_e2e_logs has the responsibility of dumping as much information as - # possible into the S3 bucket - upload_e2e_logs: - - command: s3.put - params: - aws_key: ${community_aws_access_key_id} - aws_secret: ${community_aws_secret_access_key} - local_files_include_filter_prefix: mongodb-kubernetes-operator/logs/ - local_files_include_filter: - - e2e/*.txt - - e2e/*.log - - e2e/*.json - region: us-east-1 - remote_file: logs/${task_id}/${execution}/ - bucket: community-operator-e2e-logs - permissions: public-read - content_type: text/plain - - # This is a blocker for the release process. It will *always* fail and needs to be overriden - # if the release needs to proceed. - release_blocker: - - command: subprocess.exec - type: setup - params: - working_dir: mongodb-kubernetes-operator - binary: scripts/ci/release_blocker - - setup_kubernetes_environment: - - command: subprocess.exec - type: setup - params: - working_dir: mongodb-kubernetes-operator/scripts/ci - command: go run download.go - env: - URL: https://storage.googleapis.com/kubernetes-release/release/v1.15.4/bin/linux/amd64/kubectl - FILENAME: kubectl - DIR: ${workdir}/bin - - - command: subprocess.exec - type: setup - params: - working_dir: mongodb-kubernetes-operator/scripts/ci - command: go run download.go - env: - URL: https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-linux-amd64 - FILENAME: kind - DIR: ${workdir}/bin - - create_kind_cluster: - - command: subprocess.exec - type: setup - params: - add_to_path: - - ${workdir}/bin - working_dir: mongodb-kubernetes-operator - binary: scripts/ci/create_kind_cluster.sh - env: - KUBECONFIG: ${workdir}/kube_config - - run_e2e_test: - - command: subprocess.exec - type: test - params: - working_dir: mongodb-kubernetes-operator - env: - KUBECONFIG: ${workdir}/kube_config - include_expansions_in_env: - - version_id - - test - - clusterwide - - distro - binary: scripts/ci/run_test.sh - - - build_and_push_image: - - command: subprocess.exec - type: setup - params: - include_expansions_in_env: - - version_id - - quay_user_name - - quay_password - - image - - image_type - - expire_after - working_dir: mongodb-kubernetes-operator - binary: scripts/ci/build_and_push_image.sh - - build_and_push_image_sonar: - - command: subprocess.exec - type: setup - params: - env: - MONGODB_COMMUNITY_CONFIG: ${workdir}/mongodb-kubernetes-operator/scripts/ci/config.json - AWS_ACCESS_KEY_ID: ${community_aws_access_key_id} - AWS_SECRET_ACCESS_KEY: ${community_aws_secret_access_key} - include_expansions_in_env: - - version_id - - quay_user_name - - quay_password - - image_name - - release - working_dir: mongodb-kubernetes-operator - binary: scripts/ci/build_and_push_image_sonar.sh - - release_docker_image: - - command: subprocess.exec - type: system - params: - working_dir: mongodb-kubernetes-operator - include_expansions_in_env: - - version_id - - quay_user_name - - quay_password - - old_image - - new_image - - image_type - command: scripts/ci/run_image_release.sh - -task_groups: -- name: e2e_test_group - max_hosts: 8 - setup_group: - - func: clone - - func: setup_virtualenv - - func: setup_kubernetes_environment - setup_task: - - func: create_kind_cluster - tasks: - - e2e_test_replica_set - - e2e_test_replica_set_readiness_probe - - e2e_test_replica_set_scale - - e2e_test_replica_set_scale_down - - e2e_test_replica_set_change_version - - e2e_test_feature_compatibility_version - - e2e_test_feature_compatibility_version_upgrade - - e2e_test_replica_set_multiple - - e2e_test_replica_set_tls - - e2e_test_replica_set_tls_upgrade - - e2e_test_replica_set_tls_rotate - - e2e_test_statefulset_arbitrary_config - - e2e_test_statefulset_arbitrary_config_update - - e2e_test_replica_set_mongod_config - - e2e_test_replica_set_cross_namespace_deploy - - e2e_test_replica_set_custom_role - teardown_task: - - func: upload_e2e_logs - -tasks: - - name: build_operator_image - priority: 60 - exec_timeout_secs: 600 - commands: - - func: clone - - func: setup_virtualenv - - func: build_and_push_image - vars: - image_type: operator - image: quay.io/mongodb/community-operator-dev:${version_id} - expire_after: 48h - - - name: build_e2e_image - priority: 60 - exec_timeout_secs: 600 - commands: - - func: clone - - func: setup_virtualenv - - func: build_and_push_image - vars: - image: quay.io/mongodb/community-operator-e2e:${version_id} - image_type: e2e - expire_after: 48h - - - name: build_agent_image_ubuntu - priority: 60 - exec_timeout_secs: 600 - commands: - - func: clone - - func: setup_virtualenv - - func: build_and_push_image_sonar - vars: - image_name: agent-ubuntu - - - name: build_agent_image_ubi - priority: 60 - exec_timeout_secs: 600 - commands: - - func: clone - - func: setup_virtualenv - - func: build_and_push_image_sonar - vars: - image_name: agent-ubi - - - name: build_prehook_image - priority: 60 - exec_timeout_secs: 600 - commands: - - func: clone - - func: setup_virtualenv - - func: build_and_push_image_sonar - vars: - image_name: version-post-start-hook-init - - - name: build_readiness_probe_image - priority: 60 - exec_timeout_secs: 600 - commands: - - func: clone - - func: setup_virtualenv - - func: build_and_push_image_sonar - vars: - image_name: readiness-probe-init - - - - name: e2e_test_feature_compatibility_version - commands: - - func: run_e2e_test - vars: - test: feature_compatibility_version - - - name: e2e_test_feature_compatibility_version_upgrade - commands: - - func: run_e2e_test - vars: - test: feature_compatibility_version - - - name: e2e_test_replica_set - commands: - - func: run_e2e_test - vars: - test: replica_set - - - name: e2e_test_replica_set_readiness_probe - commands: - - func: run_e2e_test - vars: - test: replica_set_readiness_probe - - - name: e2e_test_replica_set_scale - commands: - - func: run_e2e_test - vars: - test: replica_set_scale - - - name: e2e_test_replica_set_scale_down - exec_timeout_secs: 3600 - commands: - - func: run_e2e_test - vars: - test: replica_set_scale_down - - - name: e2e_test_replica_set_change_version - commands: - - func: run_e2e_test - vars: - test: replica_set_change_version - - - name: e2e_test_replica_set_multiple - commands: - - func: run_e2e_test - vars: - test: replica_set_multiple - - - name: e2e_test_replica_set_tls - commands: - - func: run_e2e_test - vars: - test: replica_set_tls - - - name: e2e_test_replica_set_tls_upgrade - commands: - - func: run_e2e_test - vars: - test: replica_set_tls_upgrade - - - name: e2e_test_replica_set_tls_rotate - commands: - - func: run_e2e_test - vars: - test: replica_set_tls_rotate - - - name: e2e_test_statefulset_arbitrary_config - commands: - - func: run_e2e_test - vars: - test: statefulset_arbitrary_config - - - name: e2e_test_statefulset_arbitrary_config_update - commands: - - func: run_e2e_test - vars: - test: statefulset_arbitrary_config_update - - - name: e2e_test_replica_set_mongod_config - commands: - - func: run_e2e_test - vars: - test: replica_set_mongod_config - - - name: e2e_test_replica_set_cross_namespace_deploy - commands: - - func: run_e2e_test - vars: - test: replica_set_cross_namespace_deploy - clusterwide: true - - - name: e2e_test_replica_set_custom_role - commands: - - func: run_e2e_test - vars: - test: replica_set_custom_role - - - name: release_blocker - commands: - - func: clone - - func: release_blocker - - - name: release_operator - commands: - - func: clone - - func: setup_virtualenv - - func: release_docker_image - vars: - old_image: quay.io/mongodb/community-operator-dev - new_image: quay.io/mongodb/mongodb-kubernetes-operator - image_type: mongodb-kubernetes-operator - - - - name: release_version_upgrade_post_start_hook - commands: - - func: clone - - func: setup_virtualenv - - func: build_and_push_image_sonar - vars: - image_type: version-post-start-hook-init - release: true - - - - name: release_readiness_probe - commands: - - func: clone - - func: setup_virtualenv - - func: build_and_push_image_sonar - vars: - image_type: readiness-probe-init - release: true - - -buildvariants: - - name: e2e_tests_ubuntu - display_name: e2e_tests_ubuntu - expansions: - distro: ubuntu - run_on: - - ubuntu1604-build - depends_on: - - name: build_operator_image - variant: init_test_run - - name: build_e2e_image - variant: init_test_run - - name: build_prehook_image - variant: init_test_run - - name: build_readiness_probe_image - variant: init_test_run - - name: build_agent_image_ubuntu - variant: init_test_run - tasks: - - name: e2e_test_group - - - name: e2e_tests_ubi - display_name: e2e_tests_ubi - expansions: - distro: ubi - run_on: - - ubuntu1604-build - depends_on: - - name: build_operator_image - variant: init_test_run - - name: build_e2e_image - variant: init_test_run - - name: build_prehook_image - variant: init_test_run - - name: build_readiness_probe_image - variant: init_test_run - - name: build_agent_image_ubi - variant: init_test_run - tasks: - - name: e2e_test_group - - - name: init_test_run - display_name: init_test_run - run_on: - - ubuntu1604-build - tasks: - - name: build_operator_image - - name: build_e2e_image - - name: build_prehook_image - - name: build_agent_image_ubi - - name: build_agent_image_ubuntu - - name: build_readiness_probe_image - - - name: release_blocker - display_name: release_blocker - run_on: - - ubuntu1604-packer # Note: cheapest machine I found - tasks: - - name: release_blocker - - - name: release_images_quay - display_name: release_images_quay - depends_on: - - name: release_blocker - variant: release_blocker - - name: build_operator_image - variant: init_test_run - - name: build_prehook_image - variant: init_test_run - - name: build_agent_image_ubuntu - variant: init_test_run - - name: build_agent_image_ubi - variant: init_test_run - run_on: - - ubuntu1604-test - tasks: - - name: release_operator - - name: release_version_upgrade_post_start_hook - - name: release_readiness_probe diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..db61cf612 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @mircea-cosbuc @lsierant @nammn @Julien-Ben @MaciejKaras @lucian-tosa @fealebenpae @m1kola \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index f3021cc27..08b2b00ab 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -35,6 +35,30 @@ If applicable, add screenshots to help explain your problem. Add any other context about the problem here. If possible, please include: - - `kubectl describe` output - - yaml definitions for your objects - - log files for the operator and database pods + - The operator logs + - Below we assume that your replicaset database pods are named `mongo-<>`. For instance: +``` +❯ k get pods +NAME READY STATUS RESTARTS AGE +mongo-0 2/2 Running 0 19h +mongo-1 2/2 Running 0 19h + +❯ k get mdbc +NAME PHASE VERSION +mongo Running 4.4.0 +``` + - yaml definitions of your MongoDB Deployment(s): + - `kubectl get mdbc -oyaml` + - yaml definitions of your kubernetes objects like the statefulset(s), pods (we need to see the state of the containers): + - `kubectl get sts -oyaml` + - `kubectl get pods -oyaml` + - The Pod logs: + - `kubectl logs mongo-0` + - The agent clusterconfig of the faulty members: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/lib/automation/config/cluster-config.json` + - The agent health status of the faulty members: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/log/mongodb-mms-automation/healthstatus/agent-health-status.json` + - The verbose agent logs of the faulty members: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/log/mongodb-mms-automation/automation-agent-verbose.log` + - You might not have the verbose ones, in that case the non-verbose agent logs: + - `kubectl exec -it mongo-0 -c mongodb-agent -- cat /var/log/mongodb-mms-automation/automation-agent.log` diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f77733901..650880d32 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,12 @@ +### Summary: + + ### All Submissions: * [ ] Have you opened an Issue before filing this PR? diff --git a/.github/config_files/config_lint.yaml b/.github/config_files/config_lint.yaml new file mode 100644 index 000000000..435bc8a7b --- /dev/null +++ b/.github/config_files/config_lint.yaml @@ -0,0 +1,14 @@ +checks: + addAllBuiltIn: true + +#Reasons to exclude: + # non-existent-service-account because the service account is created in another file + # minimum-three-replicas because the deployment contains only 1 replica of the operator + # no-readiness-probe & no-liveness-probe because for now, it brings nothing to add these probes + # because they will not check whether the operator is actually ready/living + exclude: + - "non-existent-service-account" + - "minimum-three-replicas" + - "no-liveness-probe" + - "no-readiness-probe" + - "use-namespace" diff --git a/.github/config_files/config_lint_clusterwide.yaml b/.github/config_files/config_lint_clusterwide.yaml new file mode 100644 index 000000000..b69b5147d --- /dev/null +++ b/.github/config_files/config_lint_clusterwide.yaml @@ -0,0 +1,18 @@ +checks: + addAllBuiltIn: true + +#Reasons to exclude: + # non-existent-service-account because the service account is created in another file + # minimum-three-replicas because the deployment contains only 1 replica of the operator + # no-readiness-probe & no-liveness-probe because for now, it brings nothing to add these probes + # because they will not check whether the operator is actually ready/living. + # When using a clusterwide operator, it is required to be able to create StatefulSets and Secrets + # so we exclude "access-to-secrets" and "access-to-create-pods" + exclude: + - "non-existent-service-account" + - "minimum-three-replicas" + - "no-liveness-probe" + - "no-readiness-probe" + - "use-namespace" + - "access-to-secrets" + - "access-to-create-pods" diff --git a/.github/config_files/config_lint_openshift.yaml b/.github/config_files/config_lint_openshift.yaml new file mode 100644 index 000000000..34ff6e440 --- /dev/null +++ b/.github/config_files/config_lint_openshift.yaml @@ -0,0 +1,17 @@ +checks: + addAllBuiltIn: true + + #Reasons to exclude + # non-existent-service-account because the service account is created in another file + # minimum-three-replicas because the deployment contains only 1 replica of the operator + # no-readiness-probe & no-liveness-probe because for now it brings nothing to add theses probes + # because they will not check whether the operator is actually ready/living + # run-as-non-root & no-read-only-root-fs because the security is managed somewhere else + exclude: + - "non-existent-service-account" + - "minimum-three-replicas" + - "no-liveness-probe" + - "no-readiness-probe" + - "run-as-non-root" + - "no-read-only-root-fs" + - "use-namespace" diff --git a/.github/dependabot.yml b/.github/dependabot.yml index a6103f13d..eb3084c66 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,8 +5,14 @@ updates: schedule: interval: weekly day: monday - reviewers: - - "bznein" - - "chatton" - - "irajdeep" - - "rodrigovalin" + ignore: + - dependency-name: k8s.io/api + - dependency-name: k8s.io/apimachinery + - dependency-name: k8s.io/client-go + - dependency-name: k8s.io/code-generator + - dependency-name: sigs.k8s.io/controller-runtime + - package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + day: monday diff --git a/.github/workflows/close-stale-issues.yml b/.github/workflows/close-stale-issues.yml index e0592e2e5..942020dbd 100644 --- a/.github/workflows/close-stale-issues.yml +++ b/.github/workflows/close-stale-issues.yml @@ -10,7 +10,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v3 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: 'This issue is being marked stale because it has been open for 60 days with no activity. Please comment if this issue is still affecting you. If there is no change, this issue will be closed in 30 days.' @@ -22,3 +22,4 @@ jobs: days-before-pr-close: -1 # never close PRs exempt-issue-labels: 'bug,feature-request' + ascending: true diff --git a/.github/workflows/code-health.yml b/.github/workflows/code-health.yml new file mode 100644 index 000000000..345941c18 --- /dev/null +++ b/.github/workflows/code-health.yml @@ -0,0 +1,33 @@ +name: Code Health + +on: + pull_request: + branches: [ master ] +jobs: + Black: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Black Check + uses: jpetrucciani/black-check@7f5b2ad20fa5484f1884f07c1937e032ed8cd939 + + Mypy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Mypy linting + uses: jpetrucciani/mypy-check@179fdad632bf3ccf4cabb7ee4307ef25e51d2f96 + with: + path: scripts/*/*.py + + Golangci-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: stable + - name: golangci-lint + uses: golangci/golangci-lint-action@v6 diff --git a/.github/workflows/code_health.yml b/.github/workflows/code_health.yml deleted file mode 100644 index 8be014241..000000000 --- a/.github/workflows/code_health.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Code Health - -on: - pull_request: - branches: [ master ] -jobs: - Black: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Black Check - uses: jpetrucciani/black-check@20.8b1 - - Mypy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - name: Mypy linting - uses: jpetrucciani/mypy-check@master - with: - path: scripts/*/*.py diff --git a/.github/workflows/comment-release-pr.yml b/.github/workflows/comment-release-pr.yml new file mode 100644 index 000000000..3944aa660 --- /dev/null +++ b/.github/workflows/comment-release-pr.yml @@ -0,0 +1,21 @@ +name: Link Github Releases +on: + pull_request: + types: [closed] + +jobs: + comment: + # only link releases on release PRs + if: startsWith(github.event.pull_request.title, 'Release MongoDB Kubernetes Operator') + runs-on: ubuntu-latest + steps: + - uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + github.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: 'Review and publish the release here: https://github.com/mongodb/mongodb-kubernetes-operator/releases' + }) diff --git a/.github/workflows/e2e-dispatch.yml b/.github/workflows/e2e-dispatch.yml new file mode 100644 index 000000000..b3522124d --- /dev/null +++ b/.github/workflows/e2e-dispatch.yml @@ -0,0 +1,134 @@ + +################################################################################## +# +# This file is automatically generated using templates. Changes to this file +# should happen through editing the templates under .action_templates/* +# Manual edits will be overwritten. +# +################################################################################## + +name: Run Single E2E +on: + # template: .action_templates/events/single-e2e-workflow-dispatch.yaml + workflow_dispatch: + inputs: + distro: + description: Distro to run test + required: true + default: ubuntu + test-name: + description: Name of test to run + required: true + cluster-wide: + description: Whether or not the test is cluster wide + required: true + default: 'false' +jobs: + # template: .action_templates/jobs/display-github-context.yaml + action-context: + if: always() + runs-on: ubuntu-latest + steps: + - name: Dump GitHub context + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + run: echo "$GITHUB_CONTEXT" + # template: .action_templates/jobs/setup.yaml + setup: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent + - pipeline-argument: e2e + steps: + # template: .action_templates/steps/checkout.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: true + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/quay-login.yaml + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + # template: .action_templates/steps/build-and-push-development-images.yaml + - name: Build and Push Images + run: | + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} + env: + MONGODB_COMMUNITY_CONFIG: ${{ github.workspace }}/scripts/ci/config.json + version_id: ${{ github.run_id }} + # template: .action_templates/jobs/single-test.yaml + single-test: + runs-on: ubuntu-latest + needs: [setup] + steps: + # template: .action_templates/steps/checkout.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: true + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/setup-kind-cluster.yaml + - name: Setup Kind Cluster + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 + chmod +x ./kind + ./kind create cluster + - name: Create Directories + run: | + docker exec kind-control-plane mkdir -p /opt/data/mongo-data-0 /opt/data/mongo-data-1 /opt/data/mongo-data-2 /opt/data/mongo-logs-0 /opt/data/mongo-logs-1 /opt/data/mongo-logs-2 + + - name: Install CRD + run: kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml + # template: .action_templates/steps/run-test-single.yaml + - name: Run Test Single + run: | + python3 ./scripts/dev/e2e.py --test ${{ github.event.inputs.test-name }} --tag ${{ github.run_id }} --config_file ./scripts/ci/config.json --distro ${{ github.event.inputs.distro }} --cluster-wide ${{ github.event.inputs.cluster-wide }} + # template: .action_templates/steps/dump-and-upload-diagnostics-always.yaml + - name: Dump Diagnostics + if: always() + continue-on-error: true + run: scripts/ci/dump_diagnostics.sh default # default since kind is running in the default namespace + + - name: Upload Diagnostics + if: always() + uses: actions/upload-artifact@v4 + continue-on-error: true + with: + name: ${{ github.event.inputs.test-name }}-${{ github.event.inputs.distro + }}-diagnostics + path: ${{ github.workspace }}/diagnostics diff --git a/.github/workflows/e2e-fork.yml b/.github/workflows/e2e-fork.yml new file mode 100644 index 000000000..a5c3ae53e --- /dev/null +++ b/.github/workflows/e2e-fork.yml @@ -0,0 +1,240 @@ + +################################################################################## +# +# This file is automatically generated using templates. Changes to this file +# should happen through editing the templates under .action_templates/* +# Manual edits will be overwritten. +# +################################################################################## + +name: Run E2E Fork +on: + # template: .action_templates/events/pull-request-target.yaml + pull_request_target: + types: [labeled] + branches: + - master + paths-ignore: + - docs/** +jobs: + # template: .action_templates/jobs/display-github-context.yaml + action-context: + if: always() + runs-on: ubuntu-latest + steps: + - name: Dump GitHub context + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + run: echo "$GITHUB_CONTEXT" + # template: .action_templates/jobs/setup.yaml + setup: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent + - pipeline-argument: e2e + if: contains(github.event.pull_request.labels.*.name, 'dependencies') || contains(github.event.pull_request.labels.*.name, + 'safe-to-test') + steps: + # template: .action_templates/steps/cancel-previous.yaml + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.12.1 + with: + access_token: ${{ github.token }} + # template: .action_templates/steps/checkout-fork.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.sha}} + repository: ${{github.event.pull_request.head.repo.full_name}} + submodules: true + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/quay-login.yaml + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + # template: .action_templates/steps/build-and-push-development-images.yaml + - name: Build and Push Images + run: | + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} + env: + MONGODB_COMMUNITY_CONFIG: ${{ github.workspace }}/scripts/ci/config.json + version_id: ${{ github.run_id }} + # template: .action_templates/jobs/tests.yaml + tests: + runs-on: ubuntu-latest + needs: [setup] + strategy: + fail-fast: false + matrix: + include: + - test-name: replica_set + distro: ubi + - test-name: replica_set_enterprise_upgrade_4_5 + distro: ubi + - test-name: replica_set_enterprise_upgrade_5_6 + distro: ubi + - test-name: replica_set_enterprise_upgrade_6_7 + distro: ubi + - test-name: replica_set_enterprise_upgrade_7_8 + distro: ubi + - test-name: replica_set_recovery + distro: ubi + - test-name: replica_set_mongod_readiness + distro: ubi + - test-name: replica_set_scale + distro: ubi + - test-name: replica_set_scale_down + distro: ubi + - test-name: replica_set_change_version + distro: ubi + - test-name: feature_compatibility_version + distro: ubi + - test-name: prometheus + distro: ubi + - test-name: replica_set_tls + distro: ubi + - test-name: replica_set_tls_recreate_mdbc + distro: ubi + - test-name: replica_set_tls_rotate + distro: ubi + - test-name: replica_set_tls_rotate_delete_sts + distro: ubi + - test-name: replica_set_tls_upgrade + distro: ubi + - test-name: statefulset_arbitrary_config + distro: ubi + - test-name: statefulset_arbitrary_config_update + distro: ubi + - test-name: replica_set_mongod_config + distro: ubi + - test-name: replica_set_cross_namespace_deploy + distro: ubi + cluster-wide: true + - test-name: replica_set_custom_role + distro: ubi + - test-name: replica_set_arbiter + distro: ubi + - test-name: replica_set_custom_persistent_volume + distro: ubi + - test-name: replica_set_mount_connection_string + distro: ubi + - test-name: replica_set_mongod_port_change_with_arbiters + distro: ubi + - test-name: replica_set_operator_upgrade + distro: ubi + - test-name: replica_set_connection_string_options + distro: ubi + - test-name: replica_set_x509 + distro: ubi + - test-name: replica_set_remove_user + distro: ubi + steps: + # template: .action_templates/steps/cancel-previous.yaml + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.12.1 + with: + access_token: ${{ github.token }} + # template: .action_templates/steps/checkout-fork.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.sha}} + repository: ${{github.event.pull_request.head.repo.full_name}} + submodules: true + # template: .action_templates/steps/set-run-status.yaml + - name: Set default run status + run: echo "::set-output name=last_run_status::pending" > last_run_status + + # Tracking of the state of the previous test run is a workaround to the fact that it is not + # possible to re-run a single failed job, only re-running the entire workflow is currently possible. + # This workaround skips jobs if they have already passed. + # see https://github.com/actions/runner/issues/432 + - name: Restore last run status + id: last_run + uses: actions/cache@v4 + with: + path: last_run_status + key: ${{ github.run_id }}-${{ matrix.test-name }}-${{ matrix.distro }} + + - name: Set last run status + id: last_run_status + run: cat last_run_status + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/setup-kind-cluster.yaml + - name: Setup Kind Cluster + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 + chmod +x ./kind + ./kind create cluster + if: steps.last_run_status.outputs.last_run_status != 'success' + - name: Create Directories + run: | + docker exec kind-control-plane mkdir -p /opt/data/mongo-data-0 /opt/data/mongo-data-1 /opt/data/mongo-data-2 /opt/data/mongo-logs-0 /opt/data/mongo-logs-1 /opt/data/mongo-logs-2 + + if: steps.last_run_status.outputs.last_run_status != 'success' + - name: Install CRD + run: kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml + if: steps.last_run_status.outputs.last_run_status != 'success' + # template: .action_templates/steps/run-test-matrix.yaml + - name: Run Test + id: e2e_test + if: steps.last_run_status.outputs.last_run_status != 'success' + run: | + cluster_wide=${{ matrix.cluster-wide }} + if [ -z "$cluster_wide" ]; then + cluster_wide="false" + fi + python3 ./scripts/dev/e2e.py --test ${{ matrix.test-name }} --tag ${{ github.run_id }} --config_file ./scripts/ci/config.json --distro ${{ matrix.distro }} --cluster-wide ${cluster_wide} + # template: .action_templates/steps/save-run-status.yaml + - name: Save run status + if: always() + run: echo "::set-output name=last_run_status::${{ steps.e2e_test.outcome }}" + > last_run_status + # template: .action_templates/steps/dump-and-upload-diagnostics.yaml + - name: Dump Diagnostics + id: dump_diagnostics + if: always() && steps.e2e_test.outcome == 'failure' + continue-on-error: true + run: scripts/ci/dump_diagnostics.sh default # default since kind is running in the default namespace + + - name: Upload Diagnostics + if: always() && steps.dump_diagnostics.outcome == 'success' + uses: actions/upload-artifact@v4 + continue-on-error: true + with: + name: ${{ matrix.test-name }}-${{ matrix.distro }}-diagnostics + path: ${{ github.workspace }}/diagnostics diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml new file mode 100644 index 000000000..8501431b6 --- /dev/null +++ b/.github/workflows/e2e.yml @@ -0,0 +1,244 @@ + +################################################################################## +# +# This file is automatically generated using templates. Changes to this file +# should happen through editing the templates under .action_templates/* +# Manual edits will be overwritten. +# +################################################################################## + +name: Run E2E +on: + # template: .action_templates/events/on-pull-request-master.yaml + pull_request: + branches: + - master + paths-ignore: + - docs/** + # template: .action_templates/events/on-push-master.yaml + push: + branches: + - master + paths-ignore: + - docs/** + # template: .action_templates/events/workflow-dispatch.yaml + workflow_dispatch: {} +jobs: + # template: .action_templates/jobs/display-github-context.yaml + action-context: + if: always() + runs-on: ubuntu-latest + steps: + - name: Dump GitHub context + env: + GITHUB_CONTEXT: ${{ toJSON(github) }} + run: echo "$GITHUB_CONTEXT" + # template: .action_templates/jobs/setup.yaml + setup: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - pipeline-argument: operator + - pipeline-argument: version-upgrade-hook + - pipeline-argument: readiness-probe + - pipeline-argument: agent + - pipeline-argument: e2e + if: github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/master' + || (github.event.pull_request.head.repo.full_name == github.repository && github.actor + != 'dependabot[bot]') + steps: + # template: .action_templates/steps/cancel-previous.yaml + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.12.1 + with: + access_token: ${{ github.token }} + # template: .action_templates/steps/checkout.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: true + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/quay-login.yaml + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + # template: .action_templates/steps/build-and-push-development-images.yaml + - name: Build and Push Images + run: | + python pipeline.py --image-name ${{ matrix.pipeline-argument }} --tag ${{ github.run_id }} + env: + MONGODB_COMMUNITY_CONFIG: ${{ github.workspace }}/scripts/ci/config.json + version_id: ${{ github.run_id }} + # template: .action_templates/jobs/tests.yaml + tests: + runs-on: ubuntu-latest + needs: [setup] + strategy: + fail-fast: false + matrix: + include: + - test-name: replica_set + distro: ubi + - test-name: replica_set_enterprise_upgrade_4_5 + distro: ubi + - test-name: replica_set_enterprise_upgrade_5_6 + distro: ubi + - test-name: replica_set_enterprise_upgrade_6_7 + distro: ubi + - test-name: replica_set_enterprise_upgrade_7_8 + distro: ubi + - test-name: replica_set_recovery + distro: ubi + - test-name: replica_set_mongod_readiness + distro: ubi + - test-name: replica_set_scale + distro: ubi + - test-name: replica_set_scale_down + distro: ubi + - test-name: replica_set_change_version + distro: ubi + - test-name: feature_compatibility_version + distro: ubi + - test-name: prometheus + distro: ubi + - test-name: replica_set_tls + distro: ubi + - test-name: replica_set_tls_recreate_mdbc + distro: ubi + - test-name: replica_set_tls_rotate + distro: ubi + - test-name: replica_set_tls_rotate_delete_sts + distro: ubi + - test-name: replica_set_tls_upgrade + distro: ubi + - test-name: statefulset_arbitrary_config + distro: ubi + - test-name: statefulset_arbitrary_config_update + distro: ubi + - test-name: replica_set_mongod_config + distro: ubi + - test-name: replica_set_cross_namespace_deploy + distro: ubi + cluster-wide: true + - test-name: replica_set_custom_role + distro: ubi + - test-name: replica_set_arbiter + distro: ubi + - test-name: replica_set_custom_persistent_volume + distro: ubi + - test-name: replica_set_mount_connection_string + distro: ubi + - test-name: replica_set_mongod_port_change_with_arbiters + distro: ubi + - test-name: replica_set_operator_upgrade + distro: ubi + - test-name: replica_set_connection_string_options + distro: ubi + - test-name: replica_set_x509 + distro: ubi + - test-name: replica_set_remove_user + distro: ubi + steps: + # template: .action_templates/steps/cancel-previous.yaml + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.12.1 + with: + access_token: ${{ github.token }} + # template: .action_templates/steps/checkout.yaml + - name: Checkout Code + uses: actions/checkout@v4 + with: + submodules: true + # template: .action_templates/steps/set-run-status.yaml + - name: Set default run status + run: echo "::set-output name=last_run_status::pending" > last_run_status + + # Tracking of the state of the previous test run is a workaround to the fact that it is not + # possible to re-run a single failed job, only re-running the entire workflow is currently possible. + # This workaround skips jobs if they have already passed. + # see https://github.com/actions/runner/issues/432 + - name: Restore last run status + id: last_run + uses: actions/cache@v4 + with: + path: last_run_status + key: ${{ github.run_id }}-${{ matrix.test-name }}-${{ matrix.distro }} + + - name: Set last run status + id: last_run_status + run: cat last_run_status + # template: .action_templates/steps/setup-and-install-python.yaml + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.10.4 + - name: Cache Dependencies + uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + - name: Install Python Dependencies + run: pip install -r requirements.txt + # template: .action_templates/steps/setup-kind-cluster.yaml + - name: Setup Kind Cluster + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 + chmod +x ./kind + ./kind create cluster + if: steps.last_run_status.outputs.last_run_status != 'success' + - name: Create Directories + run: | + docker exec kind-control-plane mkdir -p /opt/data/mongo-data-0 /opt/data/mongo-data-1 /opt/data/mongo-data-2 /opt/data/mongo-logs-0 /opt/data/mongo-logs-1 /opt/data/mongo-logs-2 + + if: steps.last_run_status.outputs.last_run_status != 'success' + - name: Install CRD + run: kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml + if: steps.last_run_status.outputs.last_run_status != 'success' + # template: .action_templates/steps/run-test-matrix.yaml + - name: Run Test + id: e2e_test + if: steps.last_run_status.outputs.last_run_status != 'success' + run: | + cluster_wide=${{ matrix.cluster-wide }} + if [ -z "$cluster_wide" ]; then + cluster_wide="false" + fi + python3 ./scripts/dev/e2e.py --test ${{ matrix.test-name }} --tag ${{ github.run_id }} --config_file ./scripts/ci/config.json --distro ${{ matrix.distro }} --cluster-wide ${cluster_wide} + # template: .action_templates/steps/save-run-status.yaml + - name: Save run status + if: always() + run: echo "::set-output name=last_run_status::${{ steps.e2e_test.outcome }}" + > last_run_status + # template: .action_templates/steps/dump-and-upload-diagnostics.yaml + - name: Dump Diagnostics + id: dump_diagnostics + if: always() && steps.e2e_test.outcome == 'failure' + continue-on-error: true + run: scripts/ci/dump_diagnostics.sh default # default since kind is running in the default namespace + + - name: Upload Diagnostics + if: always() && steps.dump_diagnostics.outcome == 'success' + uses: actions/upload-artifact@v4 + continue-on-error: true + with: + name: ${{ matrix.test-name }}-${{ matrix.distro }}-diagnostics + path: ${{ github.workspace }}/diagnostics diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 8fdab9c06..ecce33378 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -9,12 +9,12 @@ jobs: UnitTests: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: 1.14 + go-version: '1.24' - name: Test api run: go test -v ./api/... @@ -30,3 +30,6 @@ jobs: - name: Test mongotester run: go test -v ./test/e2e/util/mongotester/... + + - name: Check licenses + run: make check-licenses diff --git a/.github/workflows/kubelinter-check.yml b/.github/workflows/kubelinter-check.yml new file mode 100644 index 000000000..2fcb5b725 --- /dev/null +++ b/.github/workflows/kubelinter-check.yml @@ -0,0 +1,48 @@ +name: Kubelinter-check + +on: + push: + branches: + - master + paths-ignore: + - docs/** + pull_request: + branches: + - master + workflow_dispatch: {} + +jobs: + Kubelinter-check: + name: Run Kube-linter check + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v4 + + - name: Scan directory ./deploy/clusterwide/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: deploy/clusterwide + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint_clusterwide.yaml + version: "48442350" # Note: This is the id for release 0.2.3 returned from api.github.com/repos/stackrox/kube-linter/releases + + - name: Scan directory ./deploy/openshift/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: deploy/openshift + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint_openshift.yaml + version: "48442350" + + - name: Scan directory ./config/manager/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/manager/manager.yaml + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: "48442350" + + - name: Scan directory ./config/samples/ with kube-linter + uses: stackrox/kube-linter-action@v1.0.3 + with: + directory: config/samples + config: ${GITHUB_WORKSPACE}/.github/config_files/config_lint.yaml + version: "48442350" diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 7813fbc6f..3442f28df 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -36,7 +36,10 @@ jobs: # Checkout the code base # ########################## - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 + with: + # Make sure we also get the helm-charts submodule! + submodules: true - name: Install missing python packages run: sudo apt-get install -y --no-install-recommends python3-venv python3-setuptools @@ -48,25 +51,3 @@ jobs: - name: Move the dependencies run: mv .venv /home/runner/work/_temp/_github_workflow - - # This part is not needed until we can add GO linting - # - name : Install Operator SDK - # run: | - # curl -s https://api.github.com/repos/operator-framework/operator-sdk/releases/latest | grep browser_download_url | grep x86_64-linux-gnu | cut -d '"' -f 4 | wget -i - - # sudo mv operator-sdk-*-x86_64-linux-gnu /usr/local/bin/operator-sdk - # sudo chmod 777 /usr/local/bin/operator-sdk - # - name: Generate DeepCopy - # Run: operator-sdk generate k8s - - - name: Lint Code Base - uses: docker://github/super-linter:v3 - env: - VALIDATE_ALL_CODEBASE: true - # Now we set the PYTHONPATH to the path of the dependencies *inside* the container - PYTHONPATH: "/github/workspace/:\ - /github/workflow/.venv/lib/python3.6/site-packages" - VALIDATE_YAML: true - VALIDATE_PYTHON: true - VALIDATE_BASH: true - # VALIDATE_GO: true This is currently broken: https://github.com/github/super-linter/issues/143 -... diff --git a/.github/workflows/release-images.yml b/.github/workflows/release-images.yml new file mode 100644 index 000000000..5ced57eae --- /dev/null +++ b/.github/workflows/release-images.yml @@ -0,0 +1,87 @@ +name: Release Images + +on: + pull_request_review: + types: [submitted] + workflow_dispatch: + +jobs: + release-images: + runs-on: ubuntu-latest + if: startsWith(github.event.pull_request.title, 'Release MongoDB Kubernetes Operator') && github.event.review.state == 'approved' + strategy: + matrix: + include: + - pipeline-argument: operator + release-key: operator + - pipeline-argument: version-upgrade-hook + release-key: version-upgrade-hook + - pipeline-argument: readiness-probe + release-key: readiness-probe + + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.10.4' + architecture: 'x64' + + - uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + + - name: Install Python Dependencies + run: pip install -r requirements.txt + - name: Determine if release is needed + id: release_status + run: | + OUTPUT=$(scripts/ci/determine_required_releases.py ${{ matrix.release-key }}) + echo "::set-output name=OUTPUT::$OUTPUT" + + - name: Login to Quay.io + uses: docker/login-action@v1 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} + + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Publish Image To Quay + if: steps.release_status.outputs.OUTPUT == 'unreleased' + run: python pipeline.py --image-name ${{ matrix.pipeline-argument }} --release --sign + env: + MONGODB_COMMUNITY_CONFIG: "${{ github.workspace }}/scripts/ci/config.json" + AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}" + AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}" + GRS_USERNAME: "${{ vars.GRS_USERNAME }}" + GRS_PASSWORD: "${{ secrets.GRS_PASSWORD }}" + PKCS11_URI: "${{ vars.PKCS11_URI }}" + ARTIFACTORY_USERNAME: "${{ vars.ARTIFACTORY_USERNAME }}" + ARTIFACTORY_PASSWORD: "${{ secrets.ARTIFACTORY_PASSWORD }}" + AWS_DEFAULT_REGION: "${{ vars.AWS_DEFAULT_REGION }}" + + create-draft-release: + runs-on: ubuntu-latest + needs: [release-images] + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Determine Release Tag + id: release_tag + run: | + OUTPUT=$(jq -r '."mongodb-kubernetes-operator"' < $GITHUB_WORKSPACE/release.json) + echo "::set-output name=OUTPUT::$OUTPUT" + - name: Create Github Release + uses: ncipollo/release-action@v1 + with: + tag: "v${{ steps.release_tag.outputs.OUTPUT }}" + name: MongoDB Kubernetes Operator + bodyFile: "${{ github.workspace }}/docs/RELEASE_NOTES.md" + draft: true + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-single-image.yml b/.github/workflows/release-single-image.yml new file mode 100644 index 000000000..162454391 --- /dev/null +++ b/.github/workflows/release-single-image.yml @@ -0,0 +1,58 @@ +name: Release Single Image +on: + workflow_dispatch: + inputs: + pipeline-argument: + description: 'Argument to pass to pipeline' + required: true + release-key: + description: 'Corresponding release.json key' + required: true +jobs: + release-single-image: + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v4 + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.10.4' + architecture: 'x64' + + - uses: actions/cache@v4 + with: + path: ~/.cache/pip + key: ${{ hashFiles('requirements.txt') }} + + - name: Install Python Dependencies + run: pip install -r requirements.txt + - name: Determine if release is needed + id: release_status + run: | + OUTPUT=$(scripts/ci/determine_required_releases.py ${{ github.event.inputs.release-key }}) + echo "::set-output name=OUTPUT::$OUTPUT" + + - name: Login to Quay.io + uses: docker/login-action@v1 + with: + registry: quay.io + username: ${{ secrets.QUAY_USERNAME }} + password: ${{ secrets.QUAY_ROBOT_TOKEN }} + + # template: .action_templates/steps/set-up-qemu.yaml + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Publish Image To Quay + if: steps.release_status.outputs.OUTPUT == 'unreleased' + run: python pipeline.py --image-name ${{ github.event.inputs.pipeline-argument }} --release --sign + env: + MONGODB_COMMUNITY_CONFIG: "${{ github.workspace }}/scripts/ci/config.json" + AWS_ACCESS_KEY_ID: "${{ secrets.AWS_ACCESS_KEY_ID }}" + AWS_SECRET_ACCESS_KEY: "${{ secrets.AWS_SECRET_ACCESS_KEY }}" + GRS_USERNAME: "${{ vars.GRS_USERNAME }}" + GRS_PASSWORD: "${{ secrets.GRS_PASSWORD }}" + PKCS11_URI: "${{ vars.PKCS11_URI }}" + ARTIFACTORY_USERNAME: "${{ vars.ARTIFACTORY_USERNAME }}" + ARTIFACTORY_PASSWORD: "${{ secrets.ARTIFACTORY_PASSWORD }}" diff --git a/.github/workflows/remove-label.yml b/.github/workflows/remove-label.yml new file mode 100644 index 000000000..60316ff49 --- /dev/null +++ b/.github/workflows/remove-label.yml @@ -0,0 +1,13 @@ +name: Remove Label +on: [ pull_request ] +jobs: + remove-safe-to-test-label: + runs-on: ubuntu-latest + name: Remove Label + steps: + - name: + uses: buildsville/add-remove-label@v1 + with: + token: ${{secrets.GITHUB_TOKEN}} + label: safe-to-test + type: remove diff --git a/.gitignore b/.gitignore index d1e3f2356..0229263df 100644 --- a/.gitignore +++ b/.gitignore @@ -92,3 +92,12 @@ testbin/bin # ignore files generated by sonar Dockerfile.ubi-* Dockerfile.ubuntu-* + +diagnostics + +!test/test-app/Dockerfile + +Pipfile +Pipfile.lock +.community-operator-dev +*.iml diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..80d9434c7 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "helm-charts"] + path = helm-charts + url = git@github.com:mongodb/helm-charts.git diff --git a/.golangci.yml b/.golangci.yml index 7e0ad5f76..795e08728 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -16,6 +16,12 @@ issues: - goconst - golint text: "underscore" + - path: ^pkg\/util\/envvar + linters: + - forbidigo + - path: ^cmd\/(readiness|versionhook|manager)\/main\.go$ + linters: + - forbidigo linters: enable: - govet @@ -23,17 +29,32 @@ linters: - staticcheck - unused - gosimple - - structcheck - - varcheck - ineffassign - - deadcode - typecheck - rowserrcheck - gosec - unconvert + - forbidigo +linters-settings: + gosec: + excludes: + - G115 + forbidigo: + forbid: + - p: os\.(Getenv|LookupEnv|Environ|ExpandEnv) + pkg: os + msg: "Reading environemnt variables here is prohibited. Please read environment variables in the main package." + - p: os\.(Clearenv|Unsetenv|Setenv) + msg: "Modifying environemnt variables is prohibited." + pkg: os + - p: envvar\.(Read.*?|MergeWithOverride|GetEnvOrDefault) + pkg: github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar + msg: "Using this envvar package here is prohibited. Please work with environment variables in the main package." + # Rules with the `pkg` depend on it + analyze-types: true run: - modules-download-mode: + modules-download-mode: mod # timeout for analysis, e.g. 30s, 5m, default is 1m timeout: 5m # default concurrency is a available CPU number diff --git a/LICENSE.md b/LICENSE.md index 3db28920f..9c600b1bc 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,5 +1,5 @@ -The MongoDB Agent binary in the agent/ directory may be used under the "Free for Commercial Use - Oct 2020" license found in agent/LICENSE. +The MongoDB Agent binary in the agent/ directory may be used under the "Free for Commercial Use - Oct 2020" license found in [agent/LICENSE](scripts/dev/templates/agent/LICENSE). -The source code of this Operator, and all other content in this repo are available under the Apache v2 license. The text of this license is available in [APACHE2](APACHE2) +The source code of this Operator, and all other content in this repository are available under the Apache v2 license. The text of this license is available in [APACHE2](APACHE2) To use this Operator, you must agree to both licenses. diff --git a/Makefile b/Makefile index 09387eca1..6f1811c8f 100644 --- a/Makefile +++ b/Makefile @@ -1,40 +1,25 @@ SHELL := /bin/bash -# VERSION defines the project version for the bundle. -# Update this value when you upgrade the version of your project. -# To re-generate a bundle for another specific version without changing the standard setup, you can: -# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) -# - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 0.0.1 - -# CHANNELS define the bundle channels used in the bundle. -# Add a new line here if you would like to change its default config. (E.g CHANNELS = "preview,fast,stable") -# To re-generate a bundle for other specific channels without changing the standard setup, you can: -# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=preview,fast,stable) -# - use environment variables to overwrite this value (e.g export CHANNELS="preview,fast,stable") -ifneq ($(origin CHANNELS), undefined) -BUNDLE_CHANNELS := --channels=$(CHANNELS) -endif - -# DEFAULT_CHANNEL defines the default channel used in the bundle. -# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") -# To re-generate a bundle for any other default channel without changing the default setup, you can: -# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) -# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") -ifneq ($(origin DEFAULT_CHANNEL), undefined) -BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) -endif -BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) - -# BUNDLE_IMG defines the image:tag used for the bundle. -# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) -BUNDLE_IMG ?= controller-bundle:$(VERSION) +MONGODB_COMMUNITY_CONFIG ?= $(HOME)/.community-operator-dev/config.json # Image URL to use all building/pushing image targets -IMG ?= /mongodb-kubernetes-operator +REPO_URL := $(shell jq -r .repo_url < $(MONGODB_COMMUNITY_CONFIG)) +OPERATOR_IMAGE := $(shell jq -r .operator_image < $(MONGODB_COMMUNITY_CONFIG)) +NAMESPACE := $(shell jq -r .namespace < $(MONGODB_COMMUNITY_CONFIG)) +UPGRADE_HOOK_IMG := $(shell jq -r .version_upgrade_hook_image < $(MONGODB_COMMUNITY_CONFIG)) +READINESS_PROBE_IMG := $(shell jq -r .readiness_probe_image < $(MONGODB_COMMUNITY_CONFIG)) +REGISTRY := $(shell jq -r .repo_url < $(MONGODB_COMMUNITY_CONFIG)) +AGENT_IMAGE_NAME := $(shell jq -r .agent_image < $(MONGODB_COMMUNITY_CONFIG)) +HELM_CHART ?= ./helm-charts/charts/community-operator + +STRING_SET_VALUES := --set namespace=$(NAMESPACE),versionUpgradeHook.name=$(UPGRADE_HOOK_IMG),readinessProbe.name=$(READINESS_PROBE_IMG),registry.operator=$(REPO_URL),operator.operatorImageName=$(OPERATOR_IMAGE),operator.version=latest,registry.agent=$(REGISTRY),registry.versionUpgradeHook=$(REGISTRY),registry.readinessProbe=$(REGISTRY),registry.operator=$(REGISTRY),versionUpgradeHook.version=latest,readinessProbe.version=latest,agent.version=latest,agent.name=$(AGENT_IMAGE_NAME) +STRING_SET_VALUES_LOCAL := $(STRING_SET_VALUES) --set operator.replicas=0 + DOCKERFILE ?= operator # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=true,crdVersions=v1beta1" +CRD_OPTIONS ?= "crd:crdVersions=v1" +RELEASE_NAME_HELM ?= mongodb-kubernetes-operator +TEST_NAMESPACE ?= $(NAMESPACE) # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -43,103 +28,215 @@ else GOBIN=$(shell go env GOBIN) endif +BASE_GO_PACKAGE = github.com/mongodb/mongodb-kubernetes-operator +GO_LICENSES = go-licenses +DISALLOWED_LICENSES = restricted # found reciprocal MPL-2.0 + all: manager -# Run unit tests +##@ Development + +fmt: ## Run go fmt against code + go fmt ./... + +vet: ## Run go vet against code + go vet ./... + +generate: controller-gen ## Generate code + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +$(GO_LICENSES): + @if ! which $@ &> /dev/null; then \ + go install github.com/google/go-licenses@latest; \ + fi + +licenses.csv: go.mod $(GO_LICENSES) ## Track licenses in a CSV file + @echo "Tracking licenses into file $@" + @echo "========================================" + GOOS=linux GOARCH=amd64 $(GO_LICENSES) csv --include_tests $(BASE_GO_PACKAGE)/... > $@ + +# We only check that go.mod is NOT newer than licenses.csv because the CI +# tends to generate slightly different results, so content comparison wouldn't work +licenses-tracked: ## Checks license.csv is up to date + @if [ go.mod -nt licenses.csv ]; then \ + echo "License.csv is stale! Please run 'make licenses.csv' and commit"; exit 1; \ + else echo "License.csv OK (up to date)"; fi + +.PHONY: check-licenses-compliance +check-licenses-compliance: licenses.csv ## Check licenses are compliant with our restrictions + @echo "Checking licenses not to be: $(DISALLOWED_LICENSES)" + @echo "============================================" + GOOS=linux GOARCH=amd64 $(GO_LICENSES) check --include_tests $(BASE_GO_PACKAGE)/... \ + --disallowed_types $(DISALLOWED_LICENSES) + @echo "--------------------" + @echo "Licenses check: PASS" + +.PHONY: check-licenses +check-licenses: licenses-tracked check-licenses-compliance ## Check license tracking & compliance + TEST ?= ./pkg/... ./api/... ./cmd/... ./controllers/... ./test/e2e/util/mongotester/... -ENVTEST_ASSETS_DIR=$(shell pwd)/testbin -test: generate fmt vet manifests - mkdir -p ${ENVTEST_ASSETS_DIR} - test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.7.0/hack/setup-envtest.sh - source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test $(TEST) -coverprofile cover.out - -# Build manager binary -manager: generate fmt vet +test: generate fmt vet manifests ## Run unit tests + go test $(options) $(TEST) -coverprofile cover.out + +manager: generate fmt vet ## Build operator binary go build -o bin/manager ./cmd/manager/main.go -# Run against the configured Kubernetes cluster in ~/.kube/config -run: generate fmt vet manifests +run: install ## Run the operator against the configured Kubernetes cluster in ~/.kube/config + eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \ go run ./cmd/manager/main.go -# Install CRDs into a cluster -install: manifests kustomize - $(KUSTOMIZE) build config/crd | kubectl apply -f - +debug: install install-rbac ## Run the operator in debug mode with dlv + eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \ + dlv debug ./cmd/manager/main.go + +CONTROLLER_GEN = $(shell pwd)/bin/controller-gen +controller-gen: ## Download controller-gen locally if necessary + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.15.0) + +# Try to use already installed helm from PATH +ifeq (ok,$(shell test -f "$$(which helm)" && echo ok)) + HELM=$(shell which helm) +else + HELM=/usr/local/bin/helm +endif + +helm: ## Download helm locally if necessary + $(call install-helm) + +install-prerequisites-macos: ## installs prerequisites for macos development + scripts/dev/install_prerequisites.sh + +##@ Installation/Uninstallation + +install: manifests helm install-crd ## Install CRDs into a cluster + +install-crd: + kubectl apply -f config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml + +install-chart: uninstall-crd + $(HELM) upgrade --install $(STRING_SET_VALUES) $(RELEASE_NAME_HELM) $(HELM_CHART) --namespace $(NAMESPACE) --create-namespace -# Uninstall CRDs from a cluster -uninstall: manifests kustomize - $(KUSTOMIZE) build config/crd | kubectl delete -f - +install-chart-local-operator: uninstall-crd + $(HELM) upgrade --install $(STRING_SET_VALUES_LOCAL) $(RELEASE_NAME_HELM) $(HELM_CHART) --namespace $(NAMESPACE) --create-namespace -# Deploy controller in the configured Kubernetes cluster in ~/.kube/config -deploy: manifests kustomize - cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} - $(KUSTOMIZE) build config/default | kubectl apply -f - +prepare-local-dev: generate-env-file install-chart-local-operator install-rbac setup-sas -# UnDeploy controller from the configured Kubernetes cluster in ~/.kube/config -undeploy: - $(KUSTOMIZE) build config/default | kubectl delete -f - +# patches all sas to use the local-image-registry +setup-sas: + scripts/dev/setup_sa.sh -# Generate manifests e.g. CRD, RBAC etc. -manifests: controller-gen +install-chart-with-tls-enabled: + $(HELM) upgrade --install --set createResource=true $(STRING_SET_VALUES) $(RELEASE_NAME_HELM) $(HELM_CHART) --namespace $(NAMESPACE) --create-namespace + +install-rbac: + $(HELM) template $(STRING_SET_VALUES) -s templates/database_roles.yaml $(HELM_CHART) | kubectl apply -f - + $(HELM) template $(STRING_SET_VALUES) -s templates/operator_roles.yaml $(HELM_CHART) | kubectl apply -f - + +uninstall-crd: + kubectl delete crd --ignore-not-found mongodbcommunity.mongodbcommunity.mongodb.com + +uninstall-chart: + $(HELM) uninstall $(RELEASE_NAME_HELM) -n $(NAMESPACE) + +uninstall-rbac: + $(HELM) template $(STRING_SET_VALUES) -s templates/database_roles.yaml $(HELM_CHART) | kubectl delete -f - + $(HELM) template $(STRING_SET_VALUES) -s templates/operator_roles.yaml $(HELM_CHART) | kubectl delete -f - + +uninstall: manifests helm uninstall-chart uninstall-crd ## Uninstall CRDs from a cluster + +##@ Deployment + +deploy: manifests helm install-chart install-crd ## Deploy controller in the configured Kubernetes cluster in ~/.kube/config + +undeploy: uninstall-chart uninstall-crd ## UnDeploy controller from the configured Kubernetes cluster in ~/.kube/config + +manifests: controller-gen ## Generate manifests e.g. CRD, RBAC etc. $(CONTROLLER_GEN) $(CRD_OPTIONS) paths="./..." output:crd:artifacts:config=config/crd/bases + cp config/crd/bases/* $(HELM_CHART)/crds -# Run go fmt against code -fmt: - go fmt ./... +##@ E2E -# Run go vet against code -vet: - go vet ./... +# Run e2e tests locally using go build while also setting up a proxy in the shell to allow +# the test to run as if it were inside the cluster. This enables mongodb connectivity while running locally. +# "MDB_LOCAL_OPERATOR=true" ensures the operator pod is not spun up while running the e2e test - since you're +# running it locally. +e2e-telepresence: cleanup-e2e install ## Run e2e tests locally using go build while also setting up a proxy e.g. make e2e-telepresence test=replica_set cleanup=true + export MDB_LOCAL_OPERATOR=true; \ + telepresence connect; \ + eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \ + go test -v -timeout=30m -failfast $(options) ./test/e2e/$(test) ; \ + telepresence quit -# Generate code -generate: controller-gen - $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." +e2e-k8s: cleanup-e2e install e2e-image ## Run e2e test by deploying test image in kubernetes, you can provide e2e.py flags e.g. make e2e-k8s test=replica_set e2eflags="--perform-cleanup". + python scripts/dev/e2e.py $(e2eflags) --test $(test) -# Build the docker image -docker-build: dockerfile - docker build -t ${IMG} . +e2e: cleanup-e2e install ## Run e2e test locally. e.g. make e2e test=replica_set cleanup=true + eval $$(scripts/dev/get_e2e_env_vars.py $(cleanup)); \ + go test -v -short -timeout=30m -failfast $(options) ./test/e2e/$(test) -# Push the docker image -docker-push: - docker push ${IMG} +e2e-gh: ## Trigger a Github Action of the given test + scripts/dev/run_e2e_gh.sh $(test) -# Download controller-gen locally if necessary -CONTROLLER_GEN = $(shell pwd)/bin/controller-gen -controller-gen: - $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) +cleanup-e2e: ## Cleans up e2e test env + kubectl delete mdbc,all,secrets -l e2e-test=true -n ${TEST_NAMESPACE} || true + # Most of the tests use StatefulSets, which in turn use stable storage. In order to + # avoid interleaving tests with each other, we need to drop them all. + kubectl delete pvc --all -n $(NAMESPACE) || true + kubectl delete pv --all -n $(NAMESPACE) || true -# Download kustomize locally if necessary -KUSTOMIZE = $(shell pwd)/bin/kustomize -kustomize: - $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) +generate-env-file: ## generates a local-test.env for local testing + mkdir -p .community-operator-dev + { python scripts/dev/get_e2e_env_vars.py | tee >(cut -d' ' -f2 > .community-operator-dev/local-test.env) ;} > .community-operator-dev/local-test.export.env + . .community-operator-dev/local-test.export.env + +##@ Image + +operator-image: ## Build and push the operator image + python pipeline.py --image-name operator $(IMG_BUILD_ARGS) + +e2e-image: ## Build and push e2e test image + python pipeline.py --image-name e2e $(IMG_BUILD_ARGS) + +agent-image: ## Build and push agent image + python pipeline.py --image-name agent $(IMG_BUILD_ARGS) + +readiness-probe-image: ## Build and push readiness probe image + python pipeline.py --image-name readiness-probe $(IMG_BUILD_ARGS) + +version-upgrade-post-start-hook-image: ## Build and push version upgrade post start hook image + python pipeline.py --image-name version-upgrade-hook $(IMG_BUILD_ARGS) + +all-images: operator-image e2e-image agent-image readiness-probe-image version-upgrade-post-start-hook-image ## create all required images + +define install-helm +@[ -f $(HELM) ] || { \ +set -e ;\ +TMP_DIR=$$(mktemp -d) ;\ +cd $$TMP_DIR ;\ +curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 ;\ +chmod 700 get_helm.sh ;\ +./get_helm.sh ;\ +rm -rf $(TMP_DIR) ;\ +} +endef -# go-get-tool will 'go get' any package $2 and install it to $1. +# go-install-tool will 'go install' any package $2 and install it to $1. PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) -define go-get-tool +define go-install-tool @[ -f $(1) ] || { \ set -e ;\ TMP_DIR=$$(mktemp -d) ;\ cd $$TMP_DIR ;\ go mod init tmp ;\ echo "Downloading $(2)" ;\ -GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ +GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\ rm -rf $$TMP_DIR ;\ } endef -# Generate bundle manifests and metadata, then validate generated files. -.PHONY: bundle -bundle: manifests kustomize - operator-sdk generate kustomize manifests -q - cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) - $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) - operator-sdk bundle validate ./bundle - -# Build the bundle image. -.PHONY: bundle-build -bundle-build: - docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . - -# Generate Dockerfile -.PHONY: dockerfile -dockerfile: - python scripts/dev/dockerfile_generator.py ${DOCKERFILE} > Dockerfile +help: ## Show this help screen. + @echo 'Usage: make ... ' + @echo '' + @echo 'Available targets are:' + @echo '' + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_-]+:.*?##/ { printf " \033[36m%-25s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/PROJECT b/PROJECT index 88d8efcf9..fcd3ceff3 100644 --- a/PROJECT +++ b/PROJECT @@ -1,13 +1,25 @@ domain: mongodb.com -layout: go.kubebuilder.io/v3 +layout: +- go.kubebuilder.io/v3 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} projectName: mko-v1 repo: github.com/mongodb/mongodb-kubernetes-operator resources: -- crdVersion: v1beta1 +- api: + crdVersion: v1 + namespaced: true group: mongodbcommunity kind: MongoDBCommunity version: v1 -version: 3-alpha -plugins: - manifests.sdk.operatorframework.io/v2: {} - scorecard.sdk.operatorframework.io/v2: {} +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: mongodb.com + group: mongodbcommunity + kind: SimpleMongoDBCommunity + path: github.com/mongodb/mongodb-kubernetes-operator/api/v1alpha1 + version: v1alpha1 +version: "3" diff --git a/README.md b/README.md index 5473481a3..5476e4383 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,31 @@ -# MongoDB Community Kubernetes Operator # +> **DEPRECATED:** This repository is deprecated but we will continue a best-effort support until November 2025. Please use the new repository at [mongodb/mongodb-kubernetes](https://github.com/mongodb/mongodb-kubernetes) instead. +> +> For more information on this decision - what it means and entails - see the [announcement](https://github.com/mongodb/mongodb-kubernetes/releases/tag/v1.0.0) and our [public documentation](https://www.mongodb.com/docs/kubernetes/current/). +> +> A detailed migration guide is available to help you transition smoothly - see [guide](https://github.com/mongodb/mongodb-kubernetes/blob/master/docs/migration/community-operator-migration.md). There will be no functional changes in the new repository - only a better and unified experience as well as improved visibility into the development process. - -###v0.6.0 has introduced breaking changes. If you are upgrading from a previous version, follow the upgrade instructions outlined [in the release notes](https://github.com/mongodb/mongodb-kubernetes-operator/releases/tag/v0.6.0) +# MongoDB Community Kubernetes Operator # + -This is a [Kubernetes Operator](https://coreos.com/operators/) which deploys MongoDB Community into Kubernetes clusters. +This is a [Kubernetes Operator](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) which deploys MongoDB Community into Kubernetes clusters. If you are a MongoDB Enterprise customer, or need Enterprise features such as Backup, you can use the [MongoDB Enterprise Operator for Kubernetes](https://github.com/mongodb/mongodb-enterprise-kubernetes). Here is a talk from MongoDB Live 2020 about the Community Operator: * [Run it in Kubernetes! Community and Enterprise MongoDB in Containers](https://www.youtube.com/watch?v=2Xszdg-4T6A&t=1368s) +> **Note** +> +> Hi, I'm Dan Mckean 👋 I'm the Product Manager for MongoDB's support of Kubernetes. +> +> The [Community Operator](https://github.com/mongodb/mongodb-kubernetes-operator) is something I inherited when I started, but it doesn't get as much attention from us as we'd like, and we're trying to understand how it's used in order to establish it's future. It will help us establish exactly what level of support we can offer, and what sort of timeframe we aim to provide support in 🙂 +> +>Here's a super short survey (it's much easier for us to review all the feedback that way!): [https://docs.google.com/forms/d/e/1FAIpQLSfwrwyxBSlUyJ6AmC-eYlgW_3JEdfA48SB2i5--_WpiynMW2w/viewform?usp=sf_link](https://docs.google.com/forms/d/e/1FAIpQLSfwrwyxBSlUyJ6AmC-eYlgW_3JEdfA48SB2i5--_WpiynMW2w/viewform?usp=sf_link) +> +> If you'd rather email me instead: [dan.mckean@mongodb.com](mailto:dan.mckean@mongodb.com?subject=MongoDB%20Community%20Operator%20feedback) + ## Table of Contents - [Documentation](#documentation) @@ -22,18 +36,21 @@ Here is a talk from MongoDB Live 2020 about the Community Operator: ## Documentation -See the [documentation](/docs) to learn how to: +See the [documentation](docs) to learn how to: -1. [Install or upgrade](/docs/install-upgrade.md) the Operator. -1. [Deploy and configure](/docs/deploy-configure.md) MongoDB resources. -1. [Create a database user](/docs/users.md) with SCRAM authentication. -1. [Secure MongoDB resource connections](/docs/secure.md) using TLS. +1. [Install or upgrade](docs/install-upgrade.md) the Operator. +1. [Deploy and configure](docs/deploy-configure.md) MongoDB resources. +1. [Configure Logging](docs/logging.md) of the MongoDB resource components. +1. [Create a database user](docs/users.md) with SCRAM authentication. +1. [Secure MongoDB resource connections](docs/secure.md) using TLS. + +*NOTE: [MongoDB Enterprise Kubernetes Operator](https://www.mongodb.com/docs/kubernetes-operator/master/) docs are for the enterprise operator use case and NOT for the community operator. In addition to the docs mentioned above, you can refer to this [blog post](https://www.mongodb.com/blog/post/run-secure-containerized-mongodb-deployments-using-the-mongo-db-community-kubernetes-oper) as well to learn more about community operator deployment* ## Supported Features The MongoDB Community Kubernetes Operator supports the following features: -- Create [replica sets](https://docs.mongodb.com/manual/replication/) +- Create [replica sets](https://www.mongodb.com/docs/manual/replication/) - Upgrade and downgrade MongoDB server version - Scale replica sets up and down - Read from and write to the replica set while scaling, upgrading, and downgrading. These operations are done in an "always up" manner. @@ -41,23 +58,32 @@ The MongoDB Community Kubernetes Operator supports the following features: - Use any of the available [Docker MongoDB images](https://hub.docker.com/_/mongo/) - Connect to the replica set from inside the Kubernetes cluster (no external connectivity) - Secure client-to-server and server-to-server connections with TLS -- Create users with [SCRAM](https://docs.mongodb.com/manual/core/security-scram/) authentication +- Create users with [SCRAM](https://www.mongodb.com/docs/manual/core/security-scram/) authentication - Create custom roles - -### Planned Features -- Server internal authentication via keyfile +- Enable a [metrics target that can be used with Prometheus](docs/prometheus/README.md) ## Contribute Before you contribute to the MongoDB Community Kubernetes Operator, please read: -- [MongoDB Community Kubernetes Operator Architecture](/docs/architecture.md) -- [Contributing to MongoDB Community Kubernetes Operator](/docs/contributing.md) +- [MongoDB Community Kubernetes Operator Architecture](docs/architecture.md) +- [Contributing to MongoDB Community Kubernetes Operator](docs/contributing.md) Please file issues before filing PRs. For PRs to be accepted, contributors must sign our [CLA](https://www.mongodb.com/legal/contributor-agreement). Reviewers, please ensure that the CLA has been signed by referring to [the contributors tool](https://contributors.corp.mongodb.com/) (internal link). +## Linting + +This project uses the following linters upon every Pull Request: + +* `gosec` is a tool that find security problems in the code +* `Black` is a tool that verifies if Python code is properly formatted +* `MyPy` is a Static Type Checker for Python +* `Kube-linter` is a tool that verified if all Kubernetes YAML manifests are formatted correctly +* `Go vet` A built-in Go static checker +* `Snyk` The vulnerability scanner + ## License Please see the [LICENSE](LICENSE.md) file. diff --git a/SECURITY.md b/SECURITY.md index 519058cbd..e35b8facc 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -3,6 +3,6 @@ ## Reporting a Vulnerability Any security concerns or vulnerabilities discovered in one of MongoDB’s products or hosted services -can be responsibly disclosed by utilizing one of the methods described in our [create a vulnerability report](https://docs.mongodb.com/manual/tutorial/create-a-vulnerability-report/) docs page. +can be responsibly disclosed by utilizing one of the methods described in our [create a vulnerability report](https://www.mongodb.com/docs/manual/tutorial/create-a-vulnerability-report/) docs page. While we greatly appreciate community reports regarding security issues, at this time MongoDB does not provide compensation for vulnerability reports. diff --git a/api/v1/doc.go b/api/v1/doc.go new file mode 100644 index 000000000..a6a3905a8 --- /dev/null +++ b/api/v1/doc.go @@ -0,0 +1,4 @@ +package v1 + +// +k8s:deepcopy-gen=package +// +versionName=v1 diff --git a/api/v1/mongodbcommunity_types.go b/api/v1/mongodbcommunity_types.go index fa6dd9f1a..6a5e4bf0c 100644 --- a/api/v1/mongodbcommunity_types.go +++ b/api/v1/mongodbcommunity_types.go @@ -3,39 +3,60 @@ package v1 import ( "encoding/json" "fmt" + "regexp" "strings" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scram" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" - appsv1 "k8s.io/api/apps/v1" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" - + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" + "github.com/stretchr/objx" ) type Type string const ( - ReplicaSet Type = "ReplicaSet" + ReplicaSet Type = "ReplicaSet" + defaultDBForUser string = "admin" ) type Phase string const ( - Running Phase = "Running" - Failed Phase = "Failed" - Pending Phase = "Pending" + Running Phase = "Running" + Failed Phase = "Failed" + Pending Phase = "Pending" + defaultPasswordKey = "password" + + // Keep in sync with controllers/prometheus.go + defaultPrometheusPort = 9216 ) +// SCRAM-SHA-256 and SCRAM-SHA-1 are the supported auth modes. const ( - defaultPasswordKey = "password" + defaultMode AuthMode = "SCRAM-SHA-256" +) + +const ( + defaultClusterDomain = "cluster.local" +) + +// Connection string options that should be ignored as they are set through other means. +var ( + protectedConnectionStringOptions = map[string]struct{}{ + "replicaSet": {}, + "ssl": {}, + "tls": {}, + } ) // MongoDBCommunitySpec defines the desired state of MongoDB @@ -47,7 +68,13 @@ type MongoDBCommunitySpec struct { // +kubebuilder:validation:Enum=ReplicaSet Type Type `json:"type"` // Version defines which version of MongoDB will be used - Version string `json:"version"` + Version string `json:"version,omitempty"` + + // Arbiters is the number of arbiters to add to the Replica Set. + // It is not recommended to have more than one arbiter per Replica Set. + // More info: https://www.mongodb.com/docs/manual/tutorial/add-replica-set-arbiter/ + // +optional + Arbiters int `json:"arbiters"` // FeatureCompatibilityVersion configures the feature compatibility version that will // be set for the deployment @@ -74,13 +101,93 @@ type MongoDBCommunitySpec struct { // +optional StatefulSetConfiguration StatefulSetConfiguration `json:"statefulSet,omitempty"` + // AgentConfiguration sets options for the MongoDB automation agent + // +optional + AgentConfiguration AgentConfiguration `json:"agent,omitempty"` + // AdditionalMongodConfig is additional configuration that can be passed to // each data-bearing mongod at runtime. Uses the same structure as the mongod - // configuration file: https://docs.mongodb.com/manual/reference/configuration-options/ + // configuration file: https://www.mongodb.com/docs/manual/reference/configuration-options/ // +kubebuilder:validation:Type=object // +optional + // +kubebuilder:pruning:PreserveUnknownFields // +nullable AdditionalMongodConfig MongodConfiguration `json:"additionalMongodConfig,omitempty"` + + // AutomationConfigOverride is merged on top of the operator created automation config. Processes are merged + // by name. Currently Only the process.disabled field is supported. + AutomationConfigOverride *AutomationConfigOverride `json:"automationConfig,omitempty"` + + // Prometheus configurations. + // +optional + Prometheus *Prometheus `json:"prometheus,omitempty"` + + // Additional options to be appended to the connection string. These options apply to the entire resource and to each user. + // +kubebuilder:validation:Type=object + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + AdditionalConnectionStringConfig MapWrapper `json:"additionalConnectionStringConfig,omitempty"` + + // MemberConfig + // +optional + MemberConfig []automationconfig.MemberOptions `json:"memberConfig,omitempty"` +} + +// MapWrapper is a wrapper for a map to be used by other structs. +// The CRD generator does not support map[string]interface{} +// on the top level and hence we need to work around this with +// a wrapping struct. +type MapWrapper struct { + Object map[string]interface{} `json:"-"` +} + +// MarshalJSON defers JSON encoding to the wrapped map +func (m *MapWrapper) MarshalJSON() ([]byte, error) { + return json.Marshal(m.Object) +} + +// UnmarshalJSON will decode the data into the wrapped map +func (m *MapWrapper) UnmarshalJSON(data []byte) error { + if m.Object == nil { + m.Object = map[string]interface{}{} + } + + // Handle keys like net.port to be set as nested maps. + // Without this after unmarshalling there is just key "net.port" which is not + // a nested map and methods like GetPort() cannot access the value. + tmpMap := map[string]interface{}{} + err := json.Unmarshal(data, &tmpMap) + if err != nil { + return err + } + + for k, v := range tmpMap { + m.SetOption(k, v) + } + + return nil +} + +func (m *MapWrapper) DeepCopy() *MapWrapper { + if m != nil && m.Object != nil { + return &MapWrapper{ + Object: runtime.DeepCopyJSON(m.Object), + } + } + c := NewMapWrapper() + return &c +} + +// NewMapWrapper returns an empty MapWrapper +func NewMapWrapper() MapWrapper { + return MapWrapper{Object: map[string]interface{}{}} +} + +// SetOption updated the MapWrapper with a new option +func (m MapWrapper) SetOption(key string, value interface{}) MapWrapper { + m.Object = objx.New(m.Object).Set(key, value) + return m } // ReplicaSetHorizonConfiguration holds the split horizon DNS settings for @@ -103,6 +210,43 @@ type CustomRole struct { AuthenticationRestrictions []AuthenticationRestriction `json:"authenticationRestrictions,omitempty"` } +type Prometheus struct { + // Port where metrics endpoint will bind to. Defaults to 9216. + // +optional + Port int `json:"port,omitempty"` + + // HTTP Basic Auth Username for metrics endpoint. + Username string `json:"username"` + + // Name of a Secret containing a HTTP Basic Auth Password. + PasswordSecretRef SecretKeyReference `json:"passwordSecretRef"` + + // Indicates path to the metrics endpoint. + // +kubebuilder:validation:Pattern=^\/[a-z0-9]+$ + MetricsPath string `json:"metricsPath,omitempty"` + + // Name of a Secret (type kubernetes.io/tls) holding the certificates to use in the + // Prometheus endpoint. + // +optional + TLSSecretRef SecretKeyReference `json:"tlsSecretKeyRef,omitempty"` +} + +func (p Prometheus) GetPasswordKey() string { + if p.PasswordSecretRef.Key != "" { + return p.PasswordSecretRef.Key + } + + return "password" +} + +func (p Prometheus) GetPort() int { + if p.Port != 0 { + return p.Port + } + + return defaultPrometheusPort +} + // ConvertToAutomationConfigCustomRole converts between a custom role defined by the crd and a custom role // that can be used in the automation config. func (c CustomRole) ConvertToAutomationConfigCustomRole() automationconfig.CustomRole { @@ -158,7 +302,7 @@ type Privilege struct { } // Resource specifies specifies the resources upon which a privilege permits actions. -// See https://docs.mongodb.com/manual/reference/resource-document for more. +// See https://www.mongodb.com/docs/manual/reference/resource-document for more. type Resource struct { // +optional DB *string `json:"db,omitempty"` @@ -177,10 +321,66 @@ type AuthenticationRestriction struct { ServerAddress []string `json:"serverAddress"` } +// AutomationConfigOverride contains fields which will be overridden in the operator created config. +type AutomationConfigOverride struct { + Processes []OverrideProcess `json:"processes,omitempty"` + ReplicaSet OverrideReplicaSet `json:"replicaSet,omitempty"` +} + +type OverrideReplicaSet struct { + // Id can be used together with additionalMongodConfig.replication.replSetName + // to manage clusters where replSetName differs from the MongoDBCommunity resource name + Id *string `json:"id,omitempty"` + // +kubebuilder:validation:Type=object + // +kubebuilder:pruning:PreserveUnknownFields + Settings MapWrapper `json:"settings,omitempty"` +} + +// Note: We do not use the automationconfig.Process type directly here as unmarshalling cannot happen directly +// with the Args26 which is a map[string]interface{} + +// OverrideProcess contains fields that we can override on the AutomationConfig processes. +type OverrideProcess struct { + Name string `json:"name"` + Disabled bool `json:"disabled"` + LogRotate *automationconfig.CrdLogRotate `json:"logRotate,omitempty"` +} + // StatefulSetConfiguration holds the optional custom StatefulSet // that should be merged into the operator created one. type StatefulSetConfiguration struct { + // +kubebuilder:pruning:PreserveUnknownFields SpecWrapper StatefulSetSpecWrapper `json:"spec"` + // +optional + MetadataWrapper StatefulSetMetadataWrapper `json:"metadata"` +} + +type LogLevel string + +const ( + LogLevelDebug LogLevel = "DEBUG" + LogLevelInfo LogLevel = "INFO" + LogLevelWarn LogLevel = "WARN" + LogLevelError LogLevel = "ERROR" + LogLevelFatal LogLevel = "FATAL" +) + +type AgentConfiguration struct { + // +optional + LogLevel LogLevel `json:"logLevel"` + // +optional + LogFile string `json:"logFile"` + // +optional + MaxLogFileDurationHours int `json:"maxLogFileDurationHours"` + // +optional + // LogRotate if enabled, will enable LogRotate for all processes. + LogRotate *automationconfig.CrdLogRotate `json:"logRotate,omitempty"` + // +optional + // AuditLogRotate if enabled, will enable AuditLogRotate for all processes. + AuditLogRotate *automationconfig.CrdLogRotate `json:"auditLogRotate,omitempty"` + // +optional + // SystemLog configures system log of mongod + SystemLog *automationconfig.SystemLog `json:"systemLog,omitempty"` } // StatefulSetSpecWrapper is a wrapper around StatefulSetSpec with a custom implementation @@ -206,34 +406,58 @@ func (m *StatefulSetSpecWrapper) DeepCopy() *StatefulSetSpecWrapper { } } +// StatefulSetMetadataWrapper is a wrapper around Labels and Annotations +type StatefulSetMetadataWrapper struct { + // +optional + Labels map[string]string `json:"labels,omitempty"` + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + +func (m *StatefulSetMetadataWrapper) DeepCopy() *StatefulSetMetadataWrapper { + return &StatefulSetMetadataWrapper{ + Labels: m.Labels, + Annotations: m.Annotations, + } +} + // MongodConfiguration holds the optional mongod configuration // that should be merged with the operator created one. -// -// The CRD generator does not support map[string]interface{} -// on the top level and hence we need to work around this with -// a wrapping struct. type MongodConfiguration struct { - Object map[string]interface{} `json:"-"` + MapWrapper `json:"-"` } -// MarshalJSON defers JSON encoding to the wrapped map -func (m *MongodConfiguration) MarshalJSON() ([]byte, error) { - return json.Marshal(m.Object) +// NewMongodConfiguration returns an empty MongodConfiguration +func NewMongodConfiguration() MongodConfiguration { + return MongodConfiguration{MapWrapper{map[string]interface{}{}}} } -// UnmarshalJSON will decode the data into the wrapped map -func (m *MongodConfiguration) UnmarshalJSON(data []byte) error { - if m.Object == nil { - m.Object = map[string]interface{}{} +// GetDBDataDir returns the db path which should be used. +func (m MongodConfiguration) GetDBDataDir() string { + return objx.New(m.Object).Get("storage.dbPath").Str(automationconfig.DefaultMongoDBDataDir) +} + +// GetDBPort returns the port that should be used for the mongod process. +// If port is not specified, the default port of 27017 will be used. +func (m MongodConfiguration) GetDBPort() int { + portValue := objx.New(m.Object).Get("net.port") + + // Underlying map could be manipulated in code, e.g. via SetDBPort (e.g. in unit tests) - then it will be as int, + // or it could be deserialized from JSON and then integer in an untyped map will be deserialized as float64. + // It's behavior of https://pkg.go.dev/encoding/json#Unmarshal that is converting JSON integers as float64. + if portValue.IsInt() { + return portValue.Int(automationconfig.DefaultDBPort) + } else if portValue.IsFloat64() { + return int(portValue.Float64(float64(automationconfig.DefaultDBPort))) } - return json.Unmarshal(data, &m.Object) + return automationconfig.DefaultDBPort } -func (m *MongodConfiguration) DeepCopy() *MongodConfiguration { - return &MongodConfiguration{ - Object: runtime.DeepCopyJSON(m.Object), - } +// SetDBPort ensures that port is stored as float64 +func (m MongodConfiguration) SetDBPort(port int) MongodConfiguration { + m.SetOption("net.port", float64(port)) + return m } type MongoDBUser struct { @@ -242,17 +466,39 @@ type MongoDBUser struct { // DB is the database the user is stored in. Defaults to "admin" // +optional - DB string `json:"db"` + // +kubebuilder:validation:Optional + // +kubebuilder:default:=admin + DB string `json:"db,omitempty"` // PasswordSecretRef is a reference to the secret containing this user's password - PasswordSecretRef SecretKeyReference `json:"passwordSecretRef"` + // +optional + PasswordSecretRef SecretKeyReference `json:"passwordSecretRef,omitempty"` // Roles is an array of roles assigned to this user Roles []Role `json:"roles"` // ScramCredentialsSecretName appended by string "scram-credentials" is the name of the secret object created by the mongoDB operator for storing SCRAM credentials + // These secrets names must be different for each user in a deployment. // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - ScramCredentialsSecretName string `json:"scramCredentialsSecretName"` + // +optional + ScramCredentialsSecretName string `json:"scramCredentialsSecretName,omitempty"` + + // ConnectionStringSecretName is the name of the secret object created by the operator which exposes the connection strings for the user. + // If provided, this secret must be different for each user in a deployment. + // +optional + ConnectionStringSecretName string `json:"connectionStringSecretName,omitempty"` + + // ConnectionStringSecretNamespace is the namespace of the secret object created by the operator which exposes the connection strings for the user. + // +optional + ConnectionStringSecretNamespace string `json:"connectionStringSecretNamespace,omitempty"` + + // Additional options to be appended to the connection string. + // These options apply only to this user and will override any existing options in the resource. + // +kubebuilder:validation:Type=object + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + AdditionalConnectionStringConfig MapWrapper `json:"additionalConnectionStringConfig,omitempty"` } func (m MongoDBUser) GetPasswordSecretKey() string { @@ -268,6 +514,50 @@ func (m MongoDBUser) GetScramCredentialsSecretName() string { return fmt.Sprintf("%s-%s", m.ScramCredentialsSecretName, "scram-credentials") } +// GetConnectionStringSecretName gets the connection string secret name provided by the user or generated +// from the SCRAM user configuration. +func (m MongoDBUser) GetConnectionStringSecretName(resourceName string) string { + if m.ConnectionStringSecretName != "" { + return m.ConnectionStringSecretName + } + + return normalizeName(fmt.Sprintf("%s-%s-%s", resourceName, m.DB, m.Name)) +} + +// GetConnectionStringSecretNamespace gets the connection string secret namespace provided by the user or generated +// from the SCRAM user configuration. +func (m MongoDBUser) GetConnectionStringSecretNamespace(resourceNamespace string) string { + if m.ConnectionStringSecretNamespace != "" { + return m.ConnectionStringSecretNamespace + } + + return resourceNamespace +} + +// normalizeName returns a string that conforms to RFC-1123 +func normalizeName(name string) string { + errors := validation.IsDNS1123Subdomain(name) + if len(errors) == 0 { + return name + } + + // convert name to lowercase and replace invalid characters with '-' + name = strings.ToLower(name) + re := regexp.MustCompile("[^a-z0-9-]+") + name = re.ReplaceAllString(name, "-") + + // Remove duplicate `-` resulting from contiguous non-allowed chars. + re = regexp.MustCompile(`\-+`) + name = re.ReplaceAllString(name, "-") + + name = strings.Trim(name, "-") + + if len(name) > validation.DNS1123SubdomainMaxLength { + name = name[0:validation.DNS1123SubdomainMaxLength] + } + return name +} + // SecretKeyReference is a reference to the secret containing the user's password type SecretKeyReference struct { // Name is the name of the secret storing this user's password @@ -308,27 +598,39 @@ type TLS struct { // CertificateKeySecret is a reference to a Secret containing a private key and certificate to use for TLS. // The key and cert are expected to be PEM encoded and available at "tls.key" and "tls.crt". // This is the same format used for the standard "kubernetes.io/tls" Secret type, but no specific type is required. + // Alternatively, an entry tls.pem, containing the concatenation of cert and key, can be provided. + // If all of tls.pem, tls.crt and tls.key are present, the tls.pem one needs to be equal to the concatenation of tls.crt and tls.key // +optional - CertificateKeySecret LocalObjectReference `json:"certificateKeySecretRef"` + CertificateKeySecret corev1.LocalObjectReference `json:"certificateKeySecretRef"` - // CaConfigMap is a reference to a ConfigMap containing the certificate for the CA which signed the server certificates + // CaCertificateSecret is a reference to a Secret containing the certificate for the CA which signed the server certificates // The certificate is expected to be available under the key "ca.crt" // +optional - CaConfigMap LocalObjectReference `json:"caConfigMapRef"` -} + CaCertificateSecret *corev1.LocalObjectReference `json:"caCertificateSecretRef,omitempty"` -// LocalObjectReference is a reference to another Kubernetes object by name. -// TODO: Replace with a type from the K8s API. CoreV1 has an equivalent -// "LocalObjectReference" type but it contains a TODO in its -// description that we don't want in our CRD. -type LocalObjectReference struct { - Name string `json:"name"` + // CaConfigMap is a reference to a ConfigMap containing the certificate for the CA which signed the server certificates + // The certificate is expected to be available under the key "ca.crt" + // This field is ignored when CaCertificateSecretRef is configured + // +optional + CaConfigMap *corev1.LocalObjectReference `json:"caConfigMapRef,omitempty"` } type Authentication struct { // Modes is an array specifying which authentication methods should be enabled. Modes []AuthMode `json:"modes"` + // AgentMode contains the authentication mode used by the automation agent. + // +optional + AgentMode AuthMode `json:"agentMode,omitempty"` + + // AgentCertificateSecret is a reference to a Secret containing the certificate and the key for the automation agent + // The secret needs to have available: + // - certificate under key: "tls.crt" + // - private key under key: "tls.key" + // If additionally, tls.pem is present, then it needs to be equal to the concatenation of tls.crt and tls.key + // +optional + AgentCertificateSecret *corev1.LocalObjectReference `json:"agentCertificateSecretRef,omitempty"` + // IgnoreUnknownUsers set to true will ensure any users added manually (not through the CRD) // will not be removed. @@ -337,20 +639,48 @@ type Authentication struct { // +optional // +kubebuilder:default:=true // +nullable - IgnoreUnknownUsers *bool `json:"ignoreUnknownUsers"` + IgnoreUnknownUsers *bool `json:"ignoreUnknownUsers,omitempty"` } -// +kubebuilder:validation:Enum=SCRAM +// +kubebuilder:validation:Enum=SCRAM;SCRAM-SHA-256;SCRAM-SHA-1;X509 type AuthMode string +func IsAuthPresent(authModes []AuthMode, auth string) bool { + for _, authMode := range authModes { + if string(authMode) == auth { + return true + } + } + return false +} + +// ConvertAuthModeToAuthMechanism acts as a map but is immutable. It allows users to use different labels to describe the +// same authentication mode. +func ConvertAuthModeToAuthMechanism(authModeLabel AuthMode) string { + switch authModeLabel { + case "SCRAM", "SCRAM-SHA-256": + return constants.Sha256 + case "SCRAM-SHA-1": + return constants.Sha1 + case "X509": + return constants.X509 + default: + return "" + } +} + // MongoDBCommunityStatus defines the observed state of MongoDB type MongoDBCommunityStatus struct { MongoURI string `json:"mongoUri"` Phase Phase `json:"phase"` + Version string `json:"version,omitempty"` CurrentStatefulSetReplicas int `json:"currentStatefulSetReplicas"` CurrentMongoDBMembers int `json:"currentMongoDBMembers"` + CurrentStatefulSetArbitersReplicas int `json:"currentStatefulSetArbitersReplicas,omitempty"` + CurrentMongoDBArbiters int `json:"currentMongoDBArbiters,omitempty"` + Message string `json:"message,omitempty"` } @@ -362,6 +692,12 @@ type MongoDBCommunityStatus struct { // +kubebuilder:resource:path=mongodbcommunity,scope=Namespaced,shortName=mdbc,singular=mongodbcommunity // +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Current state of the MongoDB deployment" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="Version of MongoDB server" +// +kubebuilder:metadata:annotations="service.binding/type=mongodb" +// +kubebuilder:metadata:annotations="service.binding/provider=community" +// +kubebuilder:metadata:annotations="service.binding=path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret" +// +kubebuilder:metadata:annotations="service.binding/connectionString=path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=connectionString.standardSrv" +// +kubebuilder:metadata:annotations="service.binding/username=path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=username" +// +kubebuilder:metadata:annotations="service.binding/password=path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=password" type MongoDBCommunity struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -370,145 +706,447 @@ type MongoDBCommunity struct { Status MongoDBCommunityStatus `json:"status,omitempty"` } -func (m MongoDBCommunity) GetAgentPasswordSecretNamespacedName() types.NamespacedName { +func (m *MongoDBCommunity) GetMongodConfiguration() MongodConfiguration { + mongodConfig := NewMongodConfiguration() + for k, v := range m.Spec.AdditionalMongodConfig.Object { + mongodConfig.SetOption(k, v) + } + return mongodConfig +} + +func (m *MongoDBCommunity) GetAgentPasswordSecretNamespacedName() types.NamespacedName { return types.NamespacedName{Name: m.Name + "-agent-password", Namespace: m.Namespace} } -func (m MongoDBCommunity) GetAgentKeyfileSecretNamespacedName() types.NamespacedName { +func (m *MongoDBCommunity) GetAgentKeyfileSecretNamespacedName() types.NamespacedName { return types.NamespacedName{Name: m.Name + "-keyfile", Namespace: m.Namespace} } -// GetScramOptions returns a set of Options that are used to configure scram -// authentication. -func (m MongoDBCommunity) GetScramOptions() scram.Options { +func (m *MongoDBCommunity) GetOwnerReferences() []metav1.OwnerReference { + ownerReference := *metav1.NewControllerRef(m, schema.GroupVersionKind{ + Group: GroupVersion.Group, + Version: GroupVersion.Version, + Kind: m.Kind, + }) + return []metav1.OwnerReference{ownerReference} +} +// GetAuthOptions returns a set of Options that are used to configure scram +// authentication. +func (m *MongoDBCommunity) GetAuthOptions() authtypes.Options { ignoreUnknownUsers := true if m.Spec.Security.Authentication.IgnoreUnknownUsers != nil { ignoreUnknownUsers = *m.Spec.Security.Authentication.IgnoreUnknownUsers } - return scram.Options{ - AuthoritativeSet: !ignoreUnknownUsers, - KeyFile: scram.AutomationAgentKeyFilePathInContainer, - AutoAuthMechanisms: []string{scram.Sha256}, - AgentName: scram.AgentName, - AutoAuthMechanism: scram.Sha256, + authModes := m.Spec.Security.Authentication.Modes + defaultAuthMechanism := ConvertAuthModeToAuthMechanism(defaultMode) + autoAuthMechanism := ConvertAuthModeToAuthMechanism(m.Spec.GetAgentAuthMode()) + authMechanisms := make([]string, len(authModes)) + + if autoAuthMechanism == "" { + autoAuthMechanism = defaultAuthMechanism + } + + if len(authModes) == 0 { + authMechanisms = []string{defaultAuthMechanism} + } else { + for i, authMode := range authModes { + if authMech := ConvertAuthModeToAuthMechanism(authMode); authMech != "" { + authMechanisms[i] = authMech + } + } + } + + return authtypes.Options{ + AuthoritativeSet: !ignoreUnknownUsers, + KeyFile: constants.AutomationAgentKeyFilePathInContainer, + AuthMechanisms: authMechanisms, + AgentName: constants.AgentName, + AutoAuthMechanism: autoAuthMechanism, } } -// GetScramUsers converts all of the users from the spec into users -// that can be used to configure scram authentication. -func (m MongoDBCommunity) GetScramUsers() []scram.User { - users := make([]scram.User, len(m.Spec.Users)) +// GetAuthUsers converts all the users from the spec into users +// that can be used to configure authentication. +func (m *MongoDBCommunity) GetAuthUsers() []authtypes.User { + users := make([]authtypes.User, len(m.Spec.Users)) for i, u := range m.Spec.Users { - roles := make([]scram.Role, len(u.Roles)) + roles := make([]authtypes.Role, len(u.Roles)) for j, r := range u.Roles { - roles[j] = scram.Role{ + + roles[j] = authtypes.Role{ Name: r.Name, Database: r.DB, } } - users[i] = scram.User{ - Username: u.Name, - Database: u.DB, - Roles: roles, - PasswordSecretKey: u.GetPasswordSecretKey(), - PasswordSecretName: u.PasswordSecretRef.Name, - ScramCredentialsSecretName: u.GetScramCredentialsSecretName(), + + // When the MongoDB resource has been fetched from Kubernetes, + // the User's database will be set to "admin" because this is set + // by default on the CRD, but when running e2e tests, the resource + // we are working with is local -- it has not been posted to the + // Kubernetes API and the `u.DB` was not set to the default ("admin"). + // This is why the "admin" value is being set here. + if u.DB == "" { + u.DB = defaultDBForUser + } + + users[i] = authtypes.User{ + Username: u.Name, + Database: u.DB, + Roles: roles, + ConnectionStringSecretName: u.GetConnectionStringSecretName(m.Name), + ConnectionStringSecretNamespace: u.GetConnectionStringSecretNamespace(m.Namespace), + ConnectionStringOptions: u.AdditionalConnectionStringConfig.Object, + } + + if u.DB != constants.ExternalDB { + users[i].ScramCredentialsSecretName = u.GetScramCredentialsSecretName() + users[i].PasswordSecretKey = u.GetPasswordSecretKey() + users[i].PasswordSecretName = u.PasswordSecretRef.Name } } return users } -func (m MongoDBCommunity) AutomationConfigMembersThisReconciliation() int { - // determine the correct number of automation config replica set members - // based on our desired number, and our current number +// AgentCertificateSecretNamespacedName returns the namespaced name of the secret containing the agent certificate. +func (m *MongoDBCommunity) AgentCertificateSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{ + Namespace: m.Namespace, + Name: m.Spec.GetAgentCertificateRef(), + } +} + +// AgentCertificatePemSecretNamespacedName returns the namespaced name of the secret containing the agent certificate in pem format. +func (m *MongoDBCommunity) AgentCertificatePemSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{ + Namespace: m.Namespace, + Name: m.Spec.GetAgentCertificateRef() + "-pem", + } +} + +// GetAgentCertificateRef returns the name of the secret containing the agent certificate. +// If it is specified in the CR, it will return this. Otherwise, it default to agent-certs. +func (m *MongoDBCommunitySpec) GetAgentCertificateRef() string { + agentCertSecret := "agent-certs" + if m.Security.Authentication.AgentCertificateSecret != nil && m.Security.Authentication.AgentCertificateSecret.Name != "" { + agentCertSecret = m.Security.Authentication.AgentCertificateSecret.Name + } + return agentCertSecret +} + +// GetAgentAuthMode return the agent authentication mode. If the agent auth mode is specified, it will return this. +// Otherwise, if the spec.security.authentication.modes array is empty, it will default to SCRAM-SHA-256. +// If spec.security.authentication.modes has one element, the agent auth mode will default to that. +// If spec.security.authentication.modes has more than one element, then agent auth will need to be specified, +// with one exception: if spec.security.authentication.modes contains only SCRAM-SHA-256 and SCRAM-SHA-1, then it defaults to SCRAM-SHA-256 (for backwards compatibility). +func (m *MongoDBCommunitySpec) GetAgentAuthMode() AuthMode { + if m.Security.Authentication.AgentMode != "" { + return m.Security.Authentication.AgentMode + } + + if len(m.Security.Authentication.Modes) == 0 { + return "SCRAM-SHA-256" + } else if len(m.Security.Authentication.Modes) == 1 { + return m.Security.Authentication.Modes[0] + } else if len(m.Security.Authentication.Modes) == 2 { + if (IsAuthPresent(m.Security.Authentication.Modes, "SCRAM") || IsAuthPresent(m.Security.Authentication.Modes, "SCRAM-SHA-256")) && + IsAuthPresent(m.Security.Authentication.Modes, "SCRAM-SHA-1") { + return "SCRAM-SHA-256" + } + } + return "" +} + +func (m *MongoDBCommunitySpec) IsAgentX509() bool { + return m.GetAgentAuthMode() == "X509" +} + +// IsStillScaling returns true if this resource is currently scaling, +// considering both arbiters and regular members. +func (m *MongoDBCommunity) IsStillScaling() bool { + arbiters := automationConfigReplicasScaler{ + current: m.CurrentArbiters(), + desired: m.DesiredArbiters(), + forceIndividualScaling: true, + } + + return scale.IsStillScaling(m) || scale.IsStillScaling(arbiters) +} + +// AutomationConfigMembersThisReconciliation determines the correct number of +// automation config replica set members based on our desired number, and our +// current number. +func (m *MongoDBCommunity) AutomationConfigMembersThisReconciliation() int { return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ - desired: m.Spec.Members, current: m.Status.CurrentMongoDBMembers, + desired: m.Spec.Members, + }) +} + +// AutomationConfigArbitersThisReconciliation determines the correct number of +// automation config replica set arbiters based on our desired number, and our +// current number. +// +// Will not update arbiters until members have reached desired number. +func (m *MongoDBCommunity) AutomationConfigArbitersThisReconciliation() int { + if scale.IsStillScaling(m) { + return m.Status.CurrentMongoDBArbiters + } + + return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ + desired: m.Spec.Arbiters, + current: m.Status.CurrentMongoDBArbiters, + forceIndividualScaling: true, }) } +// GetOptionsString return a string format of the connection string +// options that can be appended directly to the connection string. +// +// Only takes into account options for the resource and not any user. +func (m *MongoDBCommunity) GetOptionsString() string { + generalOptionsMap := m.Spec.AdditionalConnectionStringConfig.Object + optionValues := make([]string, len(generalOptionsMap)) + i := 0 + + for key, value := range generalOptionsMap { + if _, protected := protectedConnectionStringOptions[key]; !protected { + optionValues[i] = fmt.Sprintf("%s=%v", key, value) + i += 1 + } + } + + optionValues = optionValues[:i] + + optionsString := "" + if i > 0 { + optionsString = "&" + strings.Join(optionValues, "&") + } + return optionsString +} + +// GetUserOptionsString return a string format of the connection string +// options that can be appended directly to the connection string. +// +// Takes into account both user options and resource options. +// User options will override any existing options in the resource. +func (m *MongoDBCommunity) GetUserOptionsString(user authtypes.User) string { + generalOptionsMap := m.Spec.AdditionalConnectionStringConfig.Object + userOptionsMap := user.ConnectionStringOptions + optionValues := make([]string, len(generalOptionsMap)+len(userOptionsMap)) + i := 0 + for key, value := range userOptionsMap { + if _, protected := protectedConnectionStringOptions[key]; !protected { + optionValues[i] = fmt.Sprintf("%s=%v", key, value) + i += 1 + } + } + + for key, value := range generalOptionsMap { + _, ok := userOptionsMap[key] + if _, protected := protectedConnectionStringOptions[key]; !ok && !protected { + optionValues[i] = fmt.Sprintf("%s=%v", key, value) + i += 1 + } + } + + optionValues = optionValues[:i] + + optionsString := "" + if i > 0 { + optionsString = "&" + strings.Join(optionValues, "&") + } + return optionsString +} + // MongoURI returns a mongo uri which can be used to connect to this deployment -func (m MongoDBCommunity) MongoURI() string { - members := make([]string, m.Spec.Members) - clusterDomain := "svc.cluster.local" // TODO: make this configurable - for i := 0; i < m.Spec.Members; i++ { - members[i] = fmt.Sprintf("%s-%d.%s.%s.%s:%d", m.Name, i, m.ServiceName(), m.Namespace, clusterDomain, 27017) +func (m *MongoDBCommunity) MongoURI(clusterDomain string) string { + optionsString := m.GetOptionsString() + + return fmt.Sprintf("mongodb://%s/?replicaSet=%s%s", strings.Join(m.Hosts(clusterDomain), ","), m.Name, optionsString) +} + +// MongoSRVURI returns a mongo srv uri which can be used to connect to this deployment +func (m *MongoDBCommunity) MongoSRVURI(clusterDomain string) string { + if clusterDomain == "" { + clusterDomain = defaultClusterDomain + } + + optionsString := m.GetOptionsString() + + return fmt.Sprintf("mongodb+srv://%s.%s.svc.%s/?replicaSet=%s%s", m.ServiceName(), m.Namespace, clusterDomain, m.Name, optionsString) +} + +// MongoAuthUserURI returns a mongo uri which can be used to connect to this deployment +// and includes the authentication data for the user +func (m *MongoDBCommunity) MongoAuthUserURI(user authtypes.User, password string, clusterDomain string) string { + optionsString := m.GetUserOptionsString(user) + return fmt.Sprintf("mongodb://%s%s/%s?replicaSet=%s&ssl=%t%s", + user.GetLoginString(password), + strings.Join(m.Hosts(clusterDomain), ","), + user.Database, + m.Name, + m.Spec.Security.TLS.Enabled, + optionsString) +} + +// MongoAuthUserSRVURI returns a mongo srv uri which can be used to connect to this deployment +// and includes the authentication data for the user +func (m *MongoDBCommunity) MongoAuthUserSRVURI(user authtypes.User, password string, clusterDomain string) string { + if clusterDomain == "" { + clusterDomain = defaultClusterDomain } - return fmt.Sprintf("mongodb://%s", strings.Join(members, ",")) + + optionsString := m.GetUserOptionsString(user) + return fmt.Sprintf("mongodb+srv://%s%s.%s.svc.%s/%s?replicaSet=%s&ssl=%t%s", + user.GetLoginString(password), + m.ServiceName(), + m.Namespace, + clusterDomain, + user.Database, + m.Name, + m.Spec.Security.TLS.Enabled, + optionsString) } -func (m MongoDBCommunity) Hosts() []string { +func (m *MongoDBCommunity) Hosts(clusterDomain string) []string { hosts := make([]string, m.Spec.Members) - clusterDomain := "svc.cluster.local" // TODO: make this configurable + + if clusterDomain == "" { + clusterDomain = defaultClusterDomain + } + for i := 0; i < m.Spec.Members; i++ { - hosts[i] = fmt.Sprintf("%s-%d.%s.%s.%s:%d", m.Name, i, m.ServiceName(), m.Namespace, clusterDomain, 27017) + hosts[i] = fmt.Sprintf("%s-%d.%s.%s.svc.%s:%d", + m.Name, i, + m.ServiceName(), + m.Namespace, + clusterDomain, + m.GetMongodConfiguration().GetDBPort()) } return hosts } -// ServiceName returns the name of the Service that should be created for -// this resource -func (m MongoDBCommunity) ServiceName() string { +// ServiceName returns the name of the Service that should be created for this resource. +func (m *MongoDBCommunity) ServiceName() string { + serviceName := m.Spec.StatefulSetConfiguration.SpecWrapper.Spec.ServiceName + if serviceName != "" { + return serviceName + } return m.Name + "-svc" } -func (m MongoDBCommunity) AutomationConfigSecretName() string { +func (m *MongoDBCommunity) ArbiterNamespacedName() types.NamespacedName { + return types.NamespacedName{Namespace: m.Namespace, Name: m.Name + "-arb"} +} + +func (m *MongoDBCommunity) AutomationConfigSecretName() string { return m.Name + "-config" } +// TLSCaCertificateSecretNamespacedName will get the namespaced name of the Secret containing the CA certificate +// As the Secret will be mounted to our pods, it has to be in the same namespace as the MongoDB resource +func (m *MongoDBCommunity) TLSCaCertificateSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Spec.Security.TLS.CaCertificateSecret.Name, Namespace: m.Namespace} +} + // TLSConfigMapNamespacedName will get the namespaced name of the ConfigMap containing the CA certificate // As the ConfigMap will be mounted to our pods, it has to be in the same namespace as the MongoDB resource -func (m MongoDBCommunity) TLSConfigMapNamespacedName() types.NamespacedName { +func (m *MongoDBCommunity) TLSConfigMapNamespacedName() types.NamespacedName { return types.NamespacedName{Name: m.Spec.Security.TLS.CaConfigMap.Name, Namespace: m.Namespace} } // TLSSecretNamespacedName will get the namespaced name of the Secret containing the server certificate and key -func (m MongoDBCommunity) TLSSecretNamespacedName() types.NamespacedName { +func (m *MongoDBCommunity) TLSSecretNamespacedName() types.NamespacedName { return types.NamespacedName{Name: m.Spec.Security.TLS.CertificateKeySecret.Name, Namespace: m.Namespace} } +// PrometheusTLSSecretNamespacedName will get the namespaced name of the Secret containing the server certificate and key +func (m *MongoDBCommunity) PrometheusTLSSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Spec.Prometheus.TLSSecretRef.Name, Namespace: m.Namespace} +} + +func (m *MongoDBCommunity) TLSOperatorCASecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Name + "-ca-certificate", Namespace: m.Namespace} +} + // TLSOperatorSecretNamespacedName will get the namespaced name of the Secret created by the operator // containing the combined certificate and key. -func (m MongoDBCommunity) TLSOperatorSecretNamespacedName() types.NamespacedName { +func (m *MongoDBCommunity) TLSOperatorSecretNamespacedName() types.NamespacedName { return types.NamespacedName{Name: m.Name + "-server-certificate-key", Namespace: m.Namespace} } -func (m MongoDBCommunity) NamespacedName() types.NamespacedName { - return types.NamespacedName{Name: m.Name, Namespace: m.Namespace} +// PrometheusTLSOperatorSecretNamespacedName will get the namespaced name of the Secret created by the operator +// containing the combined certificate and key. +func (m *MongoDBCommunity) PrometheusTLSOperatorSecretNamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Name + "-prometheus-certificate-key", Namespace: m.Namespace} } -func (m MongoDBCommunity) GetAgentScramCredentialsNamespacedName() types.NamespacedName { - return types.NamespacedName{Name: fmt.Sprintf("%s-agent-scram-credentials", m.Name), Namespace: m.Namespace} +func (m *MongoDBCommunity) NamespacedName() types.NamespacedName { + return types.NamespacedName{Name: m.Name, Namespace: m.Namespace} } -func (m MongoDBCommunity) DesiredReplicas() int { +func (m *MongoDBCommunity) DesiredReplicas() int { return m.Spec.Members } -func (m MongoDBCommunity) CurrentReplicas() int { +func (m *MongoDBCommunity) CurrentReplicas() int { return m.Status.CurrentStatefulSetReplicas } -func (m MongoDBCommunity) GetMongoDBVersion() string { +// ForcedIndividualScaling if set to true, will always scale the deployment 1 by +// 1, even if the resource has been just created. +// +// The reason for this is that we have 2 types of resources that are scaled at +// different times: a) Regular members, which can be scaled from 0->n, for +// instance, when the resource was just created; and b) Arbiters, which will be +// scaled from 0->M 1 by 1 at all times. +// +// This was done to simplify the process of scaling arbiters, *after* members +// have reached the desired amount of replicas. +func (m *MongoDBCommunity) ForcedIndividualScaling() bool { + return false +} + +func (m *MongoDBCommunity) DesiredArbiters() int { + return m.Spec.Arbiters +} + +func (m *MongoDBCommunity) CurrentArbiters() int { + return m.Status.CurrentStatefulSetArbitersReplicas +} + +func (m *MongoDBCommunity) GetMongoDBVersion() string { return m.Spec.Version } // GetMongoDBVersionForAnnotation returns the MDB version used to annotate the object. // Here it's the same as GetMongoDBVersion, but a different name is used in order to make // the usage clearer in enterprise (where it's a method of OpsManager but is used for the AppDB) -func (m MongoDBCommunity) GetMongoDBVersionForAnnotation() string { +func (m *MongoDBCommunity) GetMongoDBVersionForAnnotation() string { return m.GetMongoDBVersion() } func (m *MongoDBCommunity) StatefulSetReplicasThisReconciliation() int { - return scale.ReplicasThisReconciliation(m) + return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ + desired: m.DesiredReplicas(), + current: m.CurrentReplicas(), + forceIndividualScaling: false, + }) +} + +func (m *MongoDBCommunity) StatefulSetArbitersThisReconciliation() int { + return scale.ReplicasThisReconciliation(automationConfigReplicasScaler{ + desired: m.DesiredArbiters(), + current: m.CurrentArbiters(), + forceIndividualScaling: true, + }) } // GetUpdateStrategyType returns the type of RollingUpgradeStrategy that the // MongoDB StatefulSet should be configured with. -func (m MongoDBCommunity) GetUpdateStrategyType() appsv1.StatefulSetUpdateStrategyType { +func (m *MongoDBCommunity) GetUpdateStrategyType() appsv1.StatefulSetUpdateStrategyType { if !m.IsChangingVersion() { return appsv1.RollingUpdateStatefulSetStrategyType } @@ -516,26 +1154,51 @@ func (m MongoDBCommunity) GetUpdateStrategyType() appsv1.StatefulSetUpdateStrate } // IsChangingVersion returns true if an attempted version change is occurring. -func (m MongoDBCommunity) IsChangingVersion() bool { - prevVersion := m.getPreviousVersion() - return prevVersion != "" && prevVersion != m.Spec.Version +func (m *MongoDBCommunity) IsChangingVersion() bool { + lastVersion := m.getLastVersion() + return lastVersion != "" && lastVersion != m.Spec.Version } -// GetPreviousVersion returns the last MDB version the statefulset was configured with. -func (m MongoDBCommunity) getPreviousVersion() string { - return annotations.GetAnnotation(&m, annotations.LastAppliedMongoDBVersion) +// GetLastVersion returns the MDB version the statefulset was configured with. +func (m *MongoDBCommunity) getLastVersion() string { + return annotations.GetAnnotation(m, annotations.LastAppliedMongoDBVersion) } -func (m MongoDBCommunity) HasSeparateDataAndLogsVolumes() bool { +func (m *MongoDBCommunity) HasSeparateDataAndLogsVolumes() bool { return true } -func (m MongoDBCommunity) GetAnnotations() map[string]string { +func (m *MongoDBCommunity) GetAnnotations() map[string]string { return m.Annotations } +func (m *MongoDBCommunity) DataVolumeName() string { + return "data-volume" +} + +func (m *MongoDBCommunity) LogsVolumeName() string { + return "logs-volume" +} + +func (m *MongoDBCommunity) NeedsAutomationConfigVolume() bool { + return true +} + +func (m MongoDBCommunity) GetAgentLogLevel() LogLevel { + return m.Spec.AgentConfiguration.LogLevel +} + +func (m MongoDBCommunity) GetAgentLogFile() string { + return m.Spec.AgentConfiguration.LogFile +} + +func (m MongoDBCommunity) GetAgentMaxLogFileDurationHours() int { + return m.Spec.AgentConfiguration.MaxLogFileDurationHours +} + type automationConfigReplicasScaler struct { - current, desired int + current, desired int + forceIndividualScaling bool } func (a automationConfigReplicasScaler) DesiredReplicas() int { @@ -546,6 +1209,10 @@ func (a automationConfigReplicasScaler) CurrentReplicas() int { return a.current } +func (a automationConfigReplicasScaler) ForcedIndividualScaling() bool { + return a.forceIndividualScaling +} + // +kubebuilder:object:root=true // MongoDBCommunityList contains a list of MongoDB diff --git a/api/v1/mongodbcommunity_types_test.go b/api/v1/mongodbcommunity_types_test.go index 692e9732a..19b365527 100644 --- a/api/v1/mongodbcommunity_types_test.go +++ b/api/v1/mongodbcommunity_types_test.go @@ -1,19 +1,224 @@ package v1 import ( + "encoding/json" + "testing" - "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +type args struct { + members int + name string + namespace string + clusterDomain string + additionalMongodConfig map[string]interface{} + additionalConnectionStringConfig map[string]interface{} + userConnectionStringConfig map[string]interface{} + connectionString string +} + func TestMongoDB_MongoURI(t *testing.T) { + tests := []args{ + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + clusterDomain: "", + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/?replicaSet=my-rs", + }, + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + clusterDomain: "my.cluster", + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.my.cluster:27017,my-rs-1.my-rs-svc.my-namespace.svc.my.cluster:27017/?replicaSet=my-rs", + }, + { + members: 1, + name: "my-single-rs", + namespace: "my-single-namespace", + clusterDomain: "", + connectionString: "mongodb://my-single-rs-0.my-single-rs-svc.my-single-namespace.svc.cluster.local:27017/?replicaSet=my-single-rs", + }, + { + members: 1, + name: "my-single-rs", + namespace: "my-single-namespace", + clusterDomain: "my.cluster", + connectionString: "mongodb://my-single-rs-0.my-single-rs-svc.my-single-namespace.svc.my.cluster:27017/?replicaSet=my-single-rs", + }, + { + members: 5, + name: "my-big-rs", + namespace: "my-big-namespace", + clusterDomain: "", + connectionString: "mongodb://my-big-rs-0.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-1.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-2.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-3.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-4.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017/?replicaSet=my-big-rs", + }, + { + members: 5, + name: "my-big-rs", + namespace: "my-big-namespace", + clusterDomain: "my.cluster", + connectionString: "mongodb://my-big-rs-0.my-big-rs-svc.my-big-namespace.svc.my.cluster:27017,my-big-rs-1.my-big-rs-svc.my-big-namespace.svc.my.cluster:27017,my-big-rs-2.my-big-rs-svc.my-big-namespace.svc.my.cluster:27017,my-big-rs-3.my-big-rs-svc.my-big-namespace.svc.my.cluster:27017,my-big-rs-4.my-big-rs-svc.my-big-namespace.svc.my.cluster:27017/?replicaSet=my-big-rs", + }, + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + clusterDomain: "", + additionalMongodConfig: map[string]interface{}{ + "net.port": 40333., + }, + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:40333,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:40333/?replicaSet=my-rs", + }, + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + clusterDomain: "my.cluster", + additionalMongodConfig: map[string]interface{}{ + "net.port": 40333., + }, + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.my.cluster:40333,my-rs-1.my-rs-svc.my-namespace.svc.my.cluster:40333/?replicaSet=my-rs", + }, + } + + for _, params := range tests { + mdb := newReplicaSet(params.members, params.name, params.namespace) + mdb.Spec.AdditionalMongodConfig.Object = params.additionalMongodConfig + assert.Equal(t, mdb.MongoURI(params.clusterDomain), params.connectionString) + } +} + +func TestMongoDB_MongoURI_With_Options(t *testing.T) { + tests := []args{ + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/?replicaSet=my-rs&readPreference=primary", + }, + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true}, + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/?replicaSet=my-rs&readPreference=primary", + }, + { + members: 1, + name: "my-single-rs", + namespace: "my-single-namespace", + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary"}, + connectionString: "mongodb://my-single-rs-0.my-single-rs-svc.my-single-namespace.svc.cluster.local:27017/?replicaSet=my-single-rs&readPreference=primary", + }, + { + members: 5, + name: "my-big-rs", + namespace: "my-big-namespace", + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary"}, + connectionString: "mongodb://my-big-rs-0.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-1.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-2.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-3.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-4.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017/?replicaSet=my-big-rs&readPreference=primary", + }, + { + members: 2, + name: "my-rs", + namespace: "my-namespace", + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary"}, + additionalMongodConfig: map[string]interface{}{ + "net.port": 40333., + }, + connectionString: "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:40333,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:40333/?replicaSet=my-rs&readPreference=primary", + }, + } + + for _, params := range tests { + mdb := newReplicaSet(params.members, params.name, params.namespace) + mdb.Spec.AdditionalMongodConfig.Object = params.additionalMongodConfig + mdb.Spec.AdditionalConnectionStringConfig.Object = params.additionalConnectionStringConfig + assert.Equal(t, mdb.MongoURI(params.clusterDomain), params.connectionString) + } +} + +func TestMongoDB_MongoSRVURI(t *testing.T) { mdb := newReplicaSet(2, "my-rs", "my-namespace") - assert.Equal(t, mdb.MongoURI(), "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017") - mdb = newReplicaSet(1, "my-single-rs", "my-single-namespace") - assert.Equal(t, mdb.MongoURI(), "mongodb://my-single-rs-0.my-single-rs-svc.my-single-namespace.svc.cluster.local:27017") - mdb = newReplicaSet(5, "my-big-rs", "my-big-namespace") - assert.Equal(t, mdb.MongoURI(), "mongodb://my-big-rs-0.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-1.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-2.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-3.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017,my-big-rs-4.my-big-rs-svc.my-big-namespace.svc.cluster.local:27017") + assert.Equal(t, mdb.MongoSRVURI(""), "mongodb+srv://my-rs-svc.my-namespace.svc.cluster.local/?replicaSet=my-rs") + assert.Equal(t, mdb.MongoSRVURI("my.cluster"), "mongodb+srv://my-rs-svc.my-namespace.svc.my.cluster/?replicaSet=my-rs") +} + +func TestMongoDB_MongoSRVURI_With_Options(t *testing.T) { + mdb := newReplicaSet(2, "my-rs", "my-namespace") + mdb.Spec.AdditionalConnectionStringConfig.Object = map[string]interface{}{ + "readPreference": "primary"} + assert.Equal(t, mdb.MongoSRVURI(""), "mongodb+srv://my-rs-svc.my-namespace.svc.cluster.local/?replicaSet=my-rs&readPreference=primary") + assert.Equal(t, mdb.MongoSRVURI("my.cluster"), "mongodb+srv://my-rs-svc.my-namespace.svc.my.cluster/?replicaSet=my-rs&readPreference=primary") + + mdb = newReplicaSet(2, "my-rs", "my-namespace") + mdb.Spec.AdditionalConnectionStringConfig.Object = map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true} + assert.Equal(t, mdb.MongoSRVURI(""), "mongodb+srv://my-rs-svc.my-namespace.svc.cluster.local/?replicaSet=my-rs&readPreference=primary") + assert.Equal(t, mdb.MongoSRVURI("my.cluster"), "mongodb+srv://my-rs-svc.my-namespace.svc.my.cluster/?replicaSet=my-rs&readPreference=primary") +} + +func TestMongodConfiguration(t *testing.T) { + mc := NewMongodConfiguration() + assert.Equal(t, mc.Object, map[string]interface{}{}) + assert.Equal(t, mc.GetDBDataDir(), "/data") + assert.Equal(t, mc.GetDBPort(), 27017) + mc.SetOption("net.port", 40333.) + assert.Equal(t, mc.GetDBPort(), 40333) + mc.SetOption("storage", map[string]interface{}{"dbPath": "/other/data/path"}) + assert.Equal(t, mc.GetDBDataDir(), "/other/data/path") + assert.Equal(t, mc.Object, map[string]interface{}{ + "net": map[string]interface{}{ + "port": 40333., + }, + "storage": map[string]interface{}{ + "dbPath": "/other/data/path", + }, + }) +} + +func TestMongodConfigurationWithNestedMapsAfterUnmarshalling(t *testing.T) { + jsonStr := ` + { + "net.port": 40333, + "storage.dbPath": "/other/data/path" + } + ` + mc := NewMongodConfiguration() + require.NoError(t, json.Unmarshal([]byte(jsonStr), &mc)) + assert.Equal(t, map[string]interface{}{ + "net": map[string]interface{}{ + "port": 40333., + }, + "storage": map[string]interface{}{ + "dbPath": "/other/data/path", + }, + }, mc.Object) +} + +func TestGetAuthOptions(t *testing.T) { + t.Run("Default AutoAuthMechanism set if modes array empty", func(t *testing.T) { + mdb := newModesArray(nil, "empty-modes-array", "my-namespace") + + options := mdb.GetAuthOptions() + + assert.EqualValues(t, defaultMode, options.AutoAuthMechanism) + assert.EqualValues(t, []string{constants.Sha256}, options.AuthMechanisms) + }) } func TestGetScramCredentialsSecretName(t *testing.T) { @@ -85,6 +290,270 @@ func TestGetScramCredentialsSecretName(t *testing.T) { } +func TestGetConnectionStringSecretName(t *testing.T) { + testusers := []struct { + in MongoDBUser + exp string + }{ + { + MongoDBUser{ + Name: "mdb-0", + DB: "admin", + ScramCredentialsSecretName: "scram-credential-secret-name-0", + }, + "replica-set-admin-mdb-0", + }, + { + MongoDBUser{ + Name: "?_normalize/_-username/?@with/[]?no]?/:allowed:chars[only?", + DB: "admin", + ScramCredentialsSecretName: "scram-credential-secret-name-0", + }, + "replica-set-admin-normalize-username-with-no-allowed-chars-only", + }, + { + MongoDBUser{ + Name: "AppUser", + DB: "Administrators", + ScramCredentialsSecretName: "scram-credential-secret-name-0", + }, + "replica-set-administrators-appuser", + }, + { + MongoDBUser{ + Name: "mdb-0", + DB: "admin", + ScramCredentialsSecretName: "scram-credential-secret-name-0", + ConnectionStringSecretName: "connection-string-secret", + }, + "connection-string-secret", + }, + { + MongoDBUser{ + Name: "mdb-2", + DB: "admin", + ScramCredentialsSecretName: "scram-credential-secret-name-2", + ConnectionStringSecretName: "connection-string-secret-2", + ConnectionStringSecretNamespace: "other-namespace", + }, + "connection-string-secret-2", + }, + } + + for _, tt := range testusers { + assert.Equal(t, tt.exp, tt.in.GetConnectionStringSecretName("replica-set")) + } +} + +func TestMongoDBCommunity_MongoAuthUserURI(t *testing.T) { + testuser := authtypes.User{ + Username: "testuser", + Database: "admin", + } + mdb := newReplicaSet(2, "my-rs", "my-namespace") + + tests := []args{ + { + additionalConnectionStringConfig: map[string]interface{}{}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"readPreference": "secondary"}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&readPreference=secondary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"retryReads": true}, + connectionString: "mongodb://testuser:password@my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/admin?replicaSet=my-rs&ssl=false&retryReads=true&readPreference=primary", + }, + } + + for _, params := range tests { + mdb.Spec.AdditionalConnectionStringConfig.Object = params.additionalConnectionStringConfig + testuser.ConnectionStringOptions = params.userConnectionStringConfig + assert.Equal(t, mdb.MongoAuthUserURI(testuser, "password", ""), params.connectionString) + } + + testuser = authtypes.User{ + Username: "testuser", + Database: "$external", + } + mdb = newReplicaSet(2, "my-rs", "my-namespace") + + assert.Equal(t, mdb.MongoAuthUserURI(testuser, "", ""), "mongodb://my-rs-0.my-rs-svc.my-namespace.svc.cluster.local:27017,my-rs-1.my-rs-svc.my-namespace.svc.cluster.local:27017/$external?replicaSet=my-rs&ssl=false") +} + +func TestMongoDBCommunity_MongoAuthUserSRVURI(t *testing.T) { + testuser := authtypes.User{ + Username: "testuser", + Database: "admin", + } + mdb := newReplicaSet(2, "my-rs", "my-namespace") + + tests := []args{ + { + additionalConnectionStringConfig: map[string]interface{}{}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{ + "readPreference": "primary", "replicaSet": "differentName", "tls": true, "ssl": true}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&readPreference=primary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"readPreference": "secondary"}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&readPreference=secondary", + }, + { + additionalConnectionStringConfig: map[string]interface{}{"readPreference": "primary"}, + userConnectionStringConfig: map[string]interface{}{"retryReads": true}, + connectionString: "mongodb+srv://testuser:password@my-rs-svc.my-namespace.svc.cluster.local/admin?replicaSet=my-rs&ssl=false&retryReads=true&readPreference=primary", + }, + } + + for _, params := range tests { + mdb.Spec.AdditionalConnectionStringConfig.Object = params.additionalConnectionStringConfig + testuser.ConnectionStringOptions = params.userConnectionStringConfig + assert.Equal(t, mdb.MongoAuthUserSRVURI(testuser, "password", ""), params.connectionString) + } + + testuser = authtypes.User{ + Username: "testuser", + Database: "$external", + } + mdb = newReplicaSet(2, "my-rs", "my-namespace") + + assert.Equal(t, mdb.MongoAuthUserSRVURI(testuser, "", ""), "mongodb+srv://my-rs-svc.my-namespace.svc.cluster.local/$external?replicaSet=my-rs&ssl=false") +} + +func TestConvertAuthModeToAuthMechanism(t *testing.T) { + assert.Equal(t, constants.X509, ConvertAuthModeToAuthMechanism("X509")) + assert.Equal(t, constants.Sha256, ConvertAuthModeToAuthMechanism("SCRAM")) + assert.Equal(t, constants.Sha256, ConvertAuthModeToAuthMechanism("SCRAM-SHA-256")) + assert.Equal(t, constants.Sha1, ConvertAuthModeToAuthMechanism("SCRAM-SHA-1")) + assert.Equal(t, "", ConvertAuthModeToAuthMechanism("LDAP")) +} + +func TestMongoDBCommunity_GetAuthOptions(t *testing.T) { + mdb := newReplicaSet(3, "mdb", "mongodb") + mdb.Spec.Security.Authentication.Modes = []AuthMode{"SCRAM", "X509"} + + opts := mdb.GetAuthOptions() + + assert.Equal(t, constants.Sha256, opts.AutoAuthMechanism) + assert.Equal(t, []string{constants.Sha256, constants.X509}, opts.AuthMechanisms) + assert.Equal(t, false, opts.AuthoritativeSet) + + mdb.Spec.Security.Authentication.Modes = []AuthMode{"X509"} + mdb.Spec.Security.Authentication.AgentMode = "X509" + + opts = mdb.GetAuthOptions() + assert.Equal(t, constants.X509, opts.AutoAuthMechanism) + assert.Equal(t, []string{constants.X509}, opts.AuthMechanisms) +} + +func TestMongoDBCommunity_GetAuthUsers(t *testing.T) { + mdb := newReplicaSet(3, "mdb", "mongodb") + mdb.Spec.Users = []MongoDBUser{ + { + Name: "my-user", + DB: "admin", + PasswordSecretRef: SecretKeyReference{Name: "my-user-password"}, + Roles: []Role{ + { + DB: "admin", + Name: "readWriteAnyDatabase", + }, + }, + ScramCredentialsSecretName: "my-scram", + ConnectionStringSecretName: "", + AdditionalConnectionStringConfig: MapWrapper{}, + }, + { + Name: "CN=my-x509-authenticated-user,OU=organizationalunit,O=organization", + DB: "$external", + PasswordSecretRef: SecretKeyReference{}, + Roles: []Role{ + { + DB: "admin", + Name: "readWriteAnyDatabase", + }, + }, + ScramCredentialsSecretName: "", + ConnectionStringSecretName: "", + AdditionalConnectionStringConfig: MapWrapper{}, + }, + } + + authUsers := mdb.GetAuthUsers() + + assert.Equal(t, authtypes.User{ + Username: "my-user", + Database: "admin", + Roles: []authtypes.Role{{ + Database: "admin", + Name: "readWriteAnyDatabase", + }}, + PasswordSecretKey: "password", + PasswordSecretName: "my-user-password", + ScramCredentialsSecretName: "my-scram-scram-credentials", + ConnectionStringSecretName: "mdb-admin-my-user", + ConnectionStringSecretNamespace: mdb.Namespace, + ConnectionStringOptions: nil, + }, authUsers[0]) + assert.Equal(t, authtypes.User{ + Username: "CN=my-x509-authenticated-user,OU=organizationalunit,O=organization", + Database: "$external", + Roles: []authtypes.Role{{ + Database: "admin", + Name: "readWriteAnyDatabase", + }}, + PasswordSecretKey: "", + PasswordSecretName: "", + ScramCredentialsSecretName: "", + ConnectionStringSecretName: "mdb-external-cn-my-x509-authenticated-user-ou-organizationalunit-o-organization", + ConnectionStringSecretNamespace: mdb.Namespace, + ConnectionStringOptions: nil, + }, authUsers[1]) +} + func newReplicaSet(members int, name, namespace string) MongoDBCommunity { return MongoDBCommunity{ TypeMeta: metav1.TypeMeta{}, @@ -97,3 +566,129 @@ func newReplicaSet(members int, name, namespace string) MongoDBCommunity { }, } } + +func newModesArray(modes []AuthMode, name, namespace string) MongoDBCommunity { + return MongoDBCommunity{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: MongoDBCommunitySpec{ + Security: Security{ + Authentication: Authentication{ + Modes: modes, + IgnoreUnknownUsers: nil, + }, + }, + }, + } +} + +func TestMongoDBCommunitySpec_GetAgentCertificateRef(t *testing.T) { + m := newReplicaSet(3, "mdb", "mdb") + + assert.Equal(t, "agent-certs", m.Spec.GetAgentCertificateRef()) + + m.Spec.Security.Authentication.AgentCertificateSecret = &corev1.LocalObjectReference{Name: "my-agent-certificate"} + + assert.Equal(t, "my-agent-certificate", m.Spec.GetAgentCertificateRef()) +} + +func TestMongoDBCommunity_AgentCertificateSecretNamespacedName(t *testing.T) { + m := newReplicaSet(3, "mdb", "mdb") + + assert.Equal(t, "agent-certs", m.AgentCertificateSecretNamespacedName().Name) + assert.Equal(t, "mdb", m.AgentCertificateSecretNamespacedName().Namespace) + + m.Spec.Security.Authentication.AgentCertificateSecret = &corev1.LocalObjectReference{Name: "agent-certs-custom"} + assert.Equal(t, "agent-certs-custom", m.AgentCertificateSecretNamespacedName().Name) +} + +func TestMongoDBCommunity_AgentCertificatePemSecretNamespacedName(t *testing.T) { + m := newReplicaSet(3, "mdb", "mdb") + + assert.Equal(t, "agent-certs-pem", m.AgentCertificatePemSecretNamespacedName().Name) + assert.Equal(t, "mdb", m.AgentCertificatePemSecretNamespacedName().Namespace) + + m.Spec.Security.Authentication.AgentCertificateSecret = &corev1.LocalObjectReference{Name: "agent-certs-custom"} + assert.Equal(t, "agent-certs-custom-pem", m.AgentCertificatePemSecretNamespacedName().Name) + +} + +func TestMongoDBCommunitySpec_GetAgentAuthMode(t *testing.T) { + type fields struct { + agentAuth AuthMode + modes []AuthMode + } + tests := []struct { + name string + fields fields + want AuthMode + }{ + { + name: "Agent auth not specified and modes array empty", + fields: fields{ + agentAuth: "", + modes: []AuthMode{}, + }, + want: AuthMode("SCRAM-SHA-256"), + }, + { + name: "Agent auth specified and modes array empty", + fields: fields{ + agentAuth: "X509", + modes: []AuthMode{}, + }, + want: AuthMode("X509"), + }, + { + name: "Modes array one element", + fields: fields{ + agentAuth: "", + modes: []AuthMode{"X509"}, + }, + want: AuthMode("X509"), + }, + { + name: "Modes array has sha256 and sha1", + fields: fields{ + agentAuth: "", + modes: []AuthMode{"SCRAM-SHA-256", "SCRAM-SHA-1"}, + }, + want: AuthMode("SCRAM-SHA-256"), + }, + { + name: "Modes array has scram and sha1", + fields: fields{ + agentAuth: "", + modes: []AuthMode{"SCRAM", "SCRAM-SHA-1"}, + }, + want: AuthMode("SCRAM-SHA-256"), + }, + { + name: "Modes array has 2 different auth modes", + fields: fields{ + agentAuth: "", + modes: []AuthMode{"SCRAM", "X509"}, + }, + want: AuthMode(""), + }, + { + name: "Modes array has 3 auth modes", + fields: fields{ + agentAuth: "", + modes: []AuthMode{"SCRAM-SHA-256", "SCRAM-SHA-1", "X509"}, + }, + want: AuthMode(""), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := newReplicaSet(3, "mdb", "mdb") + m.Spec.Security.Authentication.Modes = tt.fields.modes + m.Spec.Security.Authentication.AgentMode = tt.fields.agentAuth + assert.Equalf(t, tt.want, m.Spec.GetAgentAuthMode(), "GetAgentAuthMode()") + }) + } +} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 8e97f95a6..df22b4876 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -1,4 +1,4 @@ -// +build !ignore_autogenerated +//go:build !ignore_autogenerated /* Copyright 2021. @@ -22,9 +22,40 @@ package v1 import ( "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentConfiguration) DeepCopyInto(out *AgentConfiguration) { + *out = *in + if in.LogRotate != nil { + in, out := &in.LogRotate, &out.LogRotate + *out = new(automationconfig.CrdLogRotate) + **out = **in + } + if in.AuditLogRotate != nil { + in, out := &in.AuditLogRotate, &out.AuditLogRotate + *out = new(automationconfig.CrdLogRotate) + **out = **in + } + if in.SystemLog != nil { + in, out := &in.SystemLog, &out.SystemLog + *out = new(automationconfig.SystemLog) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentConfiguration. +func (in *AgentConfiguration) DeepCopy() *AgentConfiguration { + if in == nil { + return nil + } + out := new(AgentConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Authentication) DeepCopyInto(out *Authentication) { *out = *in @@ -33,6 +64,11 @@ func (in *Authentication) DeepCopyInto(out *Authentication) { *out = make([]AuthMode, len(*in)) copy(*out, *in) } + if in.AgentCertificateSecret != nil { + in, out := &in.AgentCertificateSecret, &out.AgentCertificateSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } if in.IgnoreUnknownUsers != nil { in, out := &in.IgnoreUnknownUsers, &out.IgnoreUnknownUsers *out = new(bool) @@ -75,6 +111,29 @@ func (in *AuthenticationRestriction) DeepCopy() *AuthenticationRestriction { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutomationConfigOverride) DeepCopyInto(out *AutomationConfigOverride) { + *out = *in + if in.Processes != nil { + in, out := &in.Processes, &out.Processes + *out = make([]OverrideProcess, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ReplicaSet.DeepCopyInto(&out.ReplicaSet) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutomationConfigOverride. +func (in *AutomationConfigOverride) DeepCopy() *AutomationConfigOverride { + if in == nil { + return nil + } + out := new(AutomationConfigOverride) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomRole) DeepCopyInto(out *CustomRole) { *out = *in @@ -110,18 +169,9 @@ func (in *CustomRole) DeepCopy() *CustomRole { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. -func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { - if in == nil { - return nil - } - out := new(LocalObjectReference) - in.DeepCopyInto(out) - return out +func (in *MapWrapper) DeepCopyInto(out *MapWrapper) { + clone := in.DeepCopy() + *out = *clone } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -208,7 +258,26 @@ func (in *MongoDBCommunitySpec) DeepCopyInto(out *MongoDBCommunitySpec) { } } in.StatefulSetConfiguration.DeepCopyInto(&out.StatefulSetConfiguration) + in.AgentConfiguration.DeepCopyInto(&out.AgentConfiguration) in.AdditionalMongodConfig.DeepCopyInto(&out.AdditionalMongodConfig) + if in.AutomationConfigOverride != nil { + in, out := &in.AutomationConfigOverride, &out.AutomationConfigOverride + *out = new(AutomationConfigOverride) + (*in).DeepCopyInto(*out) + } + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(Prometheus) + **out = **in + } + in.AdditionalConnectionStringConfig.DeepCopyInto(&out.AdditionalConnectionStringConfig) + if in.MemberConfig != nil { + in, out := &in.MemberConfig, &out.MemberConfig + *out = make([]automationconfig.MemberOptions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBCommunitySpec. @@ -245,6 +314,7 @@ func (in *MongoDBUser) DeepCopyInto(out *MongoDBUser) { *out = make([]Role, len(*in)) copy(*out, *in) } + in.AdditionalConnectionStringConfig.DeepCopyInto(&out.AdditionalConnectionStringConfig) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBUser. @@ -259,8 +329,59 @@ func (in *MongoDBUser) DeepCopy() *MongoDBUser { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MongodConfiguration) DeepCopyInto(out *MongodConfiguration) { - clone := in.DeepCopy() - *out = *clone + *out = *in + in.MapWrapper.DeepCopyInto(&out.MapWrapper) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodConfiguration. +func (in *MongodConfiguration) DeepCopy() *MongodConfiguration { + if in == nil { + return nil + } + out := new(MongodConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideProcess) DeepCopyInto(out *OverrideProcess) { + *out = *in + if in.LogRotate != nil { + in, out := &in.LogRotate, &out.LogRotate + *out = new(automationconfig.CrdLogRotate) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideProcess. +func (in *OverrideProcess) DeepCopy() *OverrideProcess { + if in == nil { + return nil + } + out := new(OverrideProcess) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OverrideReplicaSet) DeepCopyInto(out *OverrideReplicaSet) { + *out = *in + if in.Id != nil { + in, out := &in.Id, &out.Id + *out = new(string) + **out = **in + } + in.Settings.DeepCopyInto(&out.Settings) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideReplicaSet. +func (in *OverrideReplicaSet) DeepCopy() *OverrideReplicaSet { + if in == nil { + return nil + } + out := new(OverrideReplicaSet) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -284,6 +405,23 @@ func (in *Privilege) DeepCopy() *Privilege { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Prometheus) DeepCopyInto(out *Prometheus) { + *out = *in + out.PasswordSecretRef = in.PasswordSecretRef + out.TLSSecretRef = in.TLSSecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus. +func (in *Prometheus) DeepCopy() *Prometheus { + if in == nil { + return nil + } + out := new(Prometheus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in ReplicaSetHorizonConfiguration) DeepCopyInto(out *ReplicaSetHorizonConfiguration) { { @@ -370,7 +508,7 @@ func (in *SecretKeyReference) DeepCopy() *SecretKeyReference { func (in *Security) DeepCopyInto(out *Security) { *out = *in in.Authentication.DeepCopyInto(&out.Authentication) - out.TLS = in.TLS + in.TLS.DeepCopyInto(&out.TLS) if in.Roles != nil { in, out := &in.Roles, &out.Roles *out = make([]CustomRole, len(*in)) @@ -394,6 +532,7 @@ func (in *Security) DeepCopy() *Security { func (in *StatefulSetConfiguration) DeepCopyInto(out *StatefulSetConfiguration) { *out = *in in.SpecWrapper.DeepCopyInto(&out.SpecWrapper) + in.MetadataWrapper.DeepCopyInto(&out.MetadataWrapper) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetConfiguration. @@ -406,6 +545,12 @@ func (in *StatefulSetConfiguration) DeepCopy() *StatefulSetConfiguration { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetMetadataWrapper) DeepCopyInto(out *StatefulSetMetadataWrapper) { + clone := in.DeepCopy() + *out = *clone +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetSpecWrapper) DeepCopyInto(out *StatefulSetSpecWrapper) { clone := in.DeepCopy() @@ -416,7 +561,16 @@ func (in *StatefulSetSpecWrapper) DeepCopyInto(out *StatefulSetSpecWrapper) { func (in *TLS) DeepCopyInto(out *TLS) { *out = *in out.CertificateKeySecret = in.CertificateKeySecret - out.CaConfigMap = in.CaConfigMap + if in.CaCertificateSecret != nil { + in, out := &in.CaCertificateSecret, &out.CaCertificateSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.CaConfigMap != nil { + in, out := &in.CaConfigMap, &out.CaConfigMap + *out = new(corev1.LocalObjectReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLS. diff --git a/cmd/manager/main.go b/cmd/manager/main.go index dba576fc0..b8dd5d184 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -4,22 +4,23 @@ import ( "fmt" "os" + "sigs.k8s.io/controller-runtime/pkg/cache" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" "github.com/mongodb/mongodb-kubernetes-operator/controllers" "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" ) var ( - scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") + scheme = runtime.NewScheme() ) const ( @@ -54,17 +55,24 @@ func hasRequiredVariables(logger *zap.Logger, envVariables ...string) bool { func main() { log, err := configureLogger() if err != nil { - os.Exit(1) + log.Sugar().Fatalf("Failed to configure logger: %v", err) } - if !hasRequiredVariables(log, construct.AgentImageEnv, construct.VersionUpgradeHookImageEnv, construct.ReadinessProbeImageEnv) { + if !hasRequiredVariables( + log, + construct.MongodbRepoUrlEnv, + construct.MongodbImageEnv, + construct.AgentImageEnv, + construct.VersionUpgradeHookImageEnv, + construct.ReadinessProbeImageEnv, + ) { os.Exit(1) } // Get watch namespace from environment variable. namespace, nsSpecified := os.LookupEnv(WatchNamespaceEnv) if !nsSpecified { - os.Exit(1) + log.Sugar().Fatal("No namespace specified to watch") } // If namespace is a wildcard use the empty string to represent all namespaces @@ -79,31 +87,37 @@ func main() { // Get a config to talk to the apiserver cfg, err := config.GetConfig() if err != nil { - setupLog.Error(err, "Unable to get config") - os.Exit(1) + log.Sugar().Fatalf("Unable to get config: %v", err) } // Create a new Cmd to provide shared dependencies and start components mgr, err := manager.New(cfg, manager.Options{ - Namespace: watchNamespace, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{watchNamespace: {}}, + }, }) if err != nil { - setupLog.Error(err, "Unable to create manager") - os.Exit(1) + log.Sugar().Fatalf("Unable to create manager: %v", err) } log.Info("Registering Components.") // Setup Scheme for all resources if err := mdbv1.AddToScheme(mgr.GetScheme()); err != nil { - setupLog.Error(err, "Unable to add mdbv1 to scheme") - os.Exit(1) + log.Sugar().Fatalf("Unable to add mdbv1 to scheme: %v", err) } // Setup Controller. - if err = controllers.NewReconciler(mgr).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "Unable to create controller") - os.Exit(1) + if err = controllers.NewReconciler( + mgr, + os.Getenv(construct.MongodbRepoUrlEnv), + os.Getenv(construct.MongodbImageEnv), + envvar.GetEnvOrDefault(construct.MongoDBImageTypeEnv, construct.DefaultImageType), + os.Getenv(construct.AgentImageEnv), + os.Getenv(construct.VersionUpgradeHookImageEnv), + os.Getenv(construct.ReadinessProbeImageEnv), + ).SetupWithManager(mgr); err != nil { + log.Sugar().Fatalf("Unable to create controller: %v", err) } // +kubebuilder:scaffold:builder @@ -111,7 +125,6 @@ func main() { // Start the Cmd if err := mgr.Start(signals.SetupSignalHandler()); err != nil { - setupLog.Error(err, "Unable to start manager") - os.Exit(1) + log.Sugar().Fatalf("Unable to start manager: %v", err) } } diff --git a/cmd/readiness/main.go b/cmd/readiness/main.go index a01eddcb6..6cf9e7804 100644 --- a/cmd/readiness/main.go +++ b/cmd/readiness/main.go @@ -1,16 +1,18 @@ package main import ( + "context" "encoding/json" "fmt" - "io/ioutil" + "io" "os" "time" "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/headless" "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/health" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -19,16 +21,14 @@ import ( ) const ( - headlessAgent = "HEADLESS_AGENT" + headlessAgent = "HEADLESS_AGENT" + mongodNotReadyIntervalMinutes = time.Minute * 1 ) -var riskySteps []string var logger *zap.SugaredLogger func init() { - riskySteps = []string{"WaitAllRsMembersUp", "WaitRsInit"} - - // By default we log to the output (convenient for tests) + // By default, we log to the output (convenient for tests) cfg := zap.NewDevelopmentConfig() log, err := cfg.Build() if err != nil { @@ -42,77 +42,62 @@ func init() { // The logic depends on if the pod is a standard MongoDB or an AppDB one. // - If MongoDB: then just the 'statuses[0].IsInGoalState` field is used to learn if the Agent has reached the goal // - if AppDB: the 'mmsStatus[0].lastGoalVersionAchieved' field is compared with the one from mounted automation config -// Additionally if the previous check hasn't returned 'true' the "deadlock" case is checked to make sure the Agent is -// not waiting for the other members. -func isPodReady(conf config.Config) bool { - fd, err := os.Open(conf.HealthStatusFilePath) +// Additionally if the previous check hasn't returned 'true' an additional check for wait steps is being performed +func isPodReady(ctx context.Context, conf config.Config) (bool, error) { + healthStatus, err := parseHealthStatus(conf.HealthStatusReader) if err != nil { - logger.Warn("No health status file exists, assuming the Automation agent is old") - return true - } - defer fd.Close() - - health, err := readAgentHealthStatus(fd) - if err != nil { - logger.Errorf("Failed to read agent health status file: %s", err) - // panicking allows to see the problem in the events for the pod (kubectl describe pod ..) - panic("Failed to read agent health status file: %s") + logger.Errorf("There was problem parsing health status file: %s", err) + return false, nil } // The 'statuses' file can be empty only for OM Agents - if len(health.Healthiness) == 0 && !isHeadlessMode() { - logger.Info("'statuses' is empty. We assume there is no automation config for the agent yet.") - return true + if len(healthStatus.Statuses) == 0 && !isHeadlessMode() { + logger.Debug("'statuses' is empty. We assume there is no automation config for the agent yet. Returning ready.") + return true, nil } - // If the agent has reached the goal state - returning true - inGoalState, err := isInGoalState(health, conf) + // If the agent has reached the goal state + inGoalState, err := isInGoalState(ctx, healthStatus, conf) if err != nil { logger.Errorf("There was problem checking the health status: %s", err) - panic(err) + return false, err } - inReadyState := isInReadyState(health) - if inGoalState && inReadyState { - logger.Info("Agent has reached goal state") - return true + inReadyState := isInReadyState(healthStatus) + if !inReadyState { + logger.Info("Mongod is not ready") } - // Failback logic: the agent is not in goal state and got stuck in some steps - if hasDeadlockedSteps(health) { - return true + if inGoalState && inReadyState { + logger.Info("The Agent has reached goal state. Returning ready.") + return true, nil } - return false -} - -func readAgentHealthStatus(file *os.File) (health.Status, error) { - var health health.Status - - data, err := ioutil.ReadAll(file) - if err != nil { - return health, err + // Fallback logic: the agent is not in goal state and got stuck in some steps + if !inGoalState && isOnWaitingStep(healthStatus) { + logger.Info("The Agent is on wait Step. Returning ready.") + return true, nil } - err = json.Unmarshal(data, &health) - return health, err + logger.Info("Reached the end of the check. Returning not ready.") + return false, nil } -// hasDeadlockedSteps returns true if the agent is stuck on waiting for the other agents -func hasDeadlockedSteps(health health.Status) bool { - currentStep := findCurrentStep(health.ProcessPlans) +// isOnWaitingStep returns true if the agent is stuck on waiting for the other Agents or something else to happen. +func isOnWaitingStep(health health.Status) bool { + currentStep := findCurrentStep(health.MmsStatus) if currentStep != nil { - return isDeadlocked(currentStep) + return isWaitStep(currentStep) } return false } -// findCurrentStep returns the step which seems to be run by the Agent now. The step is always in the last plan -// (see https://github.com/10gen/ops-manager-kubernetes/pull/401#discussion_r333071555) so we iterate over all the steps -// there and find the last step which has "Started" non nil -// (indeed this is not the perfect logic as sometimes the agent doesn't update the 'Started' as well - see -// 'health-status-ok.json', but seems it works for finding deadlocks still -//noinspection GoNilness +// findCurrentStep returns the step which the Agent is working now. +// The algorithm (described in https://github.com/10gen/ops-manager-kubernetes/pull/401#discussion_r333071555): +// - Obtain the latest plan (the last one in the plans array) +// - Find the last step, which has Started not nil and Completed nil. The Steps are processed as a tree in a BFS fashion. +// The last element is very likely to be the Step the Agent is performing at the moment. There are some chances that +// this is a waiting step, use isWaitStep to verify this. func findCurrentStep(processStatuses map[string]health.MmsDirectorStatus) *health.StepStatus { var currentPlan *health.PlanStatus if len(processStatuses) == 0 { @@ -124,13 +109,14 @@ func findCurrentStep(processStatuses map[string]health.MmsDirectorStatus) *healt logger.Errorf("Only one process status is expected but got %d!", len(processStatuses)) return nil } + // There is always only one process managed by the Agent - so there will be only one loop - for k, v := range processStatuses { - if len(v.Plans) == 0 { - logger.Errorf("The process %s doesn't contain any plans!", k) + for processName, processStatus := range processStatuses { + if len(processStatus.Plans) == 0 { + logger.Errorf("The process %s doesn't contain any plans!", processName) return nil } - currentPlan = v.Plans[len(v.Plans)-1] + currentPlan = processStatus.Plans[len(processStatus.Plans)-1] } if currentPlan.Completed != nil { @@ -142,7 +128,7 @@ func findCurrentStep(processStatuses map[string]health.MmsDirectorStatus) *healt var lastStartedStep *health.StepStatus for _, m := range currentPlan.Moves { for _, s := range m.Steps { - if s.Started != nil { + if s.Started != nil && s.Completed == nil { lastStartedStep = s } } @@ -151,21 +137,32 @@ func findCurrentStep(processStatuses map[string]health.MmsDirectorStatus) *healt return lastStartedStep } -func isDeadlocked(status *health.StepStatus) bool { - // Some logic behind 15 seconds: the health status file is dumped each 10 seconds so we are sure that if the agent - // has been in the the step for 10 seconds - this means it is waiting for the other hosts and they are not available +// isWaitStep returns true is the Agent is currently waiting for something to happen. +// +// Most of the time, the Agent waits for an initialization by other member of the cluster. In such case, +// holding the rollout does not improve the overall system state. Even if the probe returns true too quickly +// the worst thing that can happen is a short service interruption, which is still better than full service outage. +// +// The 15 seconds explanation: +// - The status file is written every 10s but the Agent processes steps independently of it +// - In order to avoid reacting on a newly added wait Step (as they can naturally go away), we're giving the Agent +// at least 15 sends to spend on that Step. +// - This hopefully prevents the Probe from flipping False to True too quickly. +func isWaitStep(status *health.StepStatus) bool { + // Some logic behind 15 seconds: the health status file is dumped each 10 seconds, so we are sure that if the agent + // has been in the step for 10 seconds - this means it is waiting for the other hosts, and they are not available fifteenSecondsAgo := time.Now().Add(time.Duration(-15) * time.Second) - if contains.String(riskySteps, status.Step) && status.Completed == nil && status.Started.Before(fifteenSecondsAgo) { - logger.Infof("Indicated a possible deadlock, status: %s, started at %s but hasn't finished "+ + if status.IsWaitStep && status.Completed == nil && status.Started.Before(fifteenSecondsAgo) { + logger.Debugf("Indicated a wait Step, status: %s, started at %s but hasn't finished "+ "yet. Marking the probe as ready", status.Step, status.Started.Format(time.RFC3339)) return true } return false } -func isInGoalState(health health.Status, conf config.Config) (bool, error) { +func isInGoalState(ctx context.Context, health health.Status, conf config.Config) (bool, error) { if isHeadlessMode() { - return headless.PerformCheckHeadlessMode(health, conf) + return headless.PerformCheckHeadlessMode(ctx, health, conf) } return performCheckOMMode(health), nil } @@ -173,7 +170,7 @@ func isInGoalState(health health.Status, conf config.Config) (bool, error) { // performCheckOMMode does a general check if the Agent has reached the goal state - must be called when Agent is in // "OM mode" func performCheckOMMode(health health.Status) bool { - for _, v := range health.Healthiness { + for _, v := range health.Statuses { logger.Debug(v) if v.IsInGoalState { return true @@ -199,48 +196,94 @@ func kubernetesClientset() (kubernetes.Interface, error) { return clientset, nil } +func parseHealthStatus(reader io.Reader) (health.Status, error) { + var health health.Status + data, err := io.ReadAll(reader) + if err != nil { + return health, err + } + + err = json.Unmarshal(data, &health) + return health, err +} + +func initLogger(l *lumberjack.Logger) { + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + + consoleCore := zapcore.NewCore( + zapcore.NewJSONEncoder(encoderConfig), + zapcore.AddSync(os.Stdout), + zap.DebugLevel) + + cores := []zapcore.Core{consoleCore} + if config.ReadBoolWitDefault(config.WithAgentFileLogging, "true") { + fileCore := zapcore.NewCore( + zapcore.NewJSONEncoder(encoderConfig), + zapcore.AddSync(l), + zap.DebugLevel) + cores = append(cores, fileCore) + } + + core := zapcore.NewTee(cores...) + log := zap.New(core, zap.Development()) + logger = log.Sugar() + + logger.Infof("logging configuration: %+v", l) +} + func main() { + ctx := context.Background() clientSet, err := kubernetesClientset() if err != nil { panic(err) } - config, err := config.BuildFromEnvVariables(clientSet, isHeadlessMode()) + initLogger(config.GetLogger()) + + healthStatusFilePath := config.GetEnvOrDefault(config.AgentHealthStatusFilePathEnv, config.DefaultAgentHealthStatusFilePath) + file, err := os.Open(healthStatusFilePath) + // The agent might be slow in creating the health status file. + // In that case, we don't want to panic to show the message + // in the kubernetes description. That would be a red herring, since that will solve itself with enough time. + if err != nil { + logger.Errorf("health status file not avaible yet: %s ", err) + os.Exit(1) + } + + cfg, err := config.BuildFromEnvVariables(clientSet, isHeadlessMode(), file) if err != nil { panic(err) } - cfg := zap.NewDevelopmentConfig() - // In production we log to the file - cfg.OutputPaths = []string{ - config.LogFilePath, - } - log, err := cfg.Build() + + ready, err := isPodReady(ctx, cfg) if err != nil { panic(err) } - logger = log.Sugar() - if !isPodReady(config) { + if !ready { os.Exit(1) } } -// isInReadyState checks the MongoDB Server state. It returns true if the state +// isInReadyState checks the MongoDB Server state. It returns true if the mongod process is up and its state // is PRIMARY or SECONDARY. func isInReadyState(health health.Status) bool { - if len(health.Healthiness) == 0 { + if len(health.Statuses) == 0 { return true } - for _, processHealth := range health.Healthiness { + for _, processHealth := range health.Statuses { // We know this loop should run only once, in Kubernetes there's // only 1 server managed per host. + if !processHealth.ExpectedToBeUp { + // Process may be down intentionally (if the process is marked as disabled in the automation config) + return true + } - // Every time the process health is created by the agent, - // it checks if the MongoDB process is up and populates this field - // (https://github.com/10gen/mms-automation/blob/bb72f74a22d98cfa635c1317e623386b089dc69f/go_planner/src/com.tengen/cm/healthcheck/status.go#L43) - // So it's enough to check that this value is not the zero-value for int64 - + timeMongoUp := time.Unix(processHealth.LastMongoUpTime, 0) + mongoUpThreshold := time.Now().Add(-mongodNotReadyIntervalMinutes) + mongoIsHealthy := timeMongoUp.After(mongoUpThreshold) // The case in which the agent is too old to publish replication status is handled inside "IsReadyState" - return processHealth.LastMongoUpTime != 0 && processHealth.IsReadyState() + return mongoIsHealthy && processHealth.IsReadyState() } return false } diff --git a/cmd/readiness/readiness_test.go b/cmd/readiness/readiness_test.go index 988acb571..11222effa 100644 --- a/cmd/readiness/readiness_test.go +++ b/cmd/readiness/readiness_test.go @@ -1,7 +1,10 @@ package main import ( + "bytes" "context" + "encoding/json" + "io" "os" "testing" "time" @@ -19,73 +22,232 @@ import ( // TestDeadlockDetection verifies that if the agent is stuck in "WaitAllRsMembersUp" phase (started > 15 seconds ago) // then the function returns "ready" func TestDeadlockDetection(t *testing.T) { - assert.True(t, isPodReady(testConfig("testdata/health-status-deadlocked.json"))) -} - -// TestNoDeadlock verifies that if the agent has started (but not finished) "WaitRsInit" and then there is another -// started phase ("WaitFeatureCompatibilityVersionCorrect") then no deadlock is found as the latter is considered to -// be the "current" step -func TestNoDeadlock(t *testing.T) { - health := readHealthinessFile("testdata/health-status-no-deadlock.json") - stepStatus := findCurrentStep(health.ProcessPlans) - - assert.Equal(t, "WaitFeatureCompatibilityVersionCorrect", stepStatus.Step) - - assert.False(t, isPodReady(testConfig("testdata/health-status-no-deadlock.json"))) -} - -// TestDeadlockDetection verifies that if the agent is in "WaitAllRsMembersUp" phase but started < 15 seconds ago -// then the function returns "not ready". To achieve this "started" is put into some long future. -// Note, that the status file is artificial: it has two plans (the first one is complete and has no moves) to make sure -// the readiness logic takes only the last plan for consideration -func TestNotReadyWaitingForRsReady(t *testing.T) { - assert.False(t, isPodReady(testConfig("testdata/health-status-pending.json"))) -} - -// TestNotReadyHealthFileHasNoPlans verifies that the readiness script doesn't panic if the health file has unexpected -// data (there are no plans at all) -func TestNotReadyHealthFileHasNoPlans(t *testing.T) { - assert.False(t, isPodReady(testConfig("testdata/health-status-no-plans.json"))) -} - -// TestNotReadyHealthFileHasNoProcesses verifies that the readiness script doesn't panic if the health file has unexpected -// data (there are no processes at all) -func TestNotReadyHealthFileHasNoProcesses(t *testing.T) { - assert.False(t, isPodReady(testConfig("testdata/health-status-no-processes.json"))) + ctx := context.Background() + type TestConfig struct { + conf config.Config + isErrorExpected bool + isReadyExpected bool + } + tests := map[string]TestConfig{ + "Ready but deadlocked on WaitAllRsMembersUp": { + conf: testConfig("testdata/health-status-deadlocked.json"), + isReadyExpected: true, + }, + "Ready but deadlocked on WaitCanUpdate while changing the versions with multiple plans": { + conf: testConfig("testdata/health-status-deadlocked-with-prev-config.json"), + isReadyExpected: true, + }, + "Ready but deadlocked on WaitHasCorrectAutomationCredentials (HELP-39937, HELP-39966)": { + conf: testConfig("testdata/health-status-deadlocked-waiting-for-correct-automation-credentials.json"), + isReadyExpected: true, + }, + "Ready and no deadlock detected": { + conf: testConfig("testdata/health-status-no-deadlock.json"), + isReadyExpected: true, + }, + "Ready and positive scenario": { + conf: testConfig("testdata/health-status-ok.json"), + isReadyExpected: true, + }, + "Ready and Pod readiness is correctly checked when no ReplicationStatus is present on the file": { + conf: testConfig("testdata/health-status-no-replication.json"), + isReadyExpected: true, + }, + "Ready and MongoDB replication state is reported by agents": { + conf: testConfig("testdata/health-status-ok-no-replica-status.json"), + isReadyExpected: true, + }, + "Not Ready If replication state is not PRIMARY or SECONDARY, Pod is not ready": { + conf: testConfig("testdata/health-status-not-readable-state.json"), + isReadyExpected: false, + }, + "Not Ready because of less than 15 seconds passed by after the health file update": { + conf: testConfig("testdata/health-status-pending.json"), + isReadyExpected: false, + }, + "Not Ready because there are no plans": { + conf: testConfig("testdata/health-status-no-plans.json"), + isReadyExpected: false, + }, + "Not Ready because there are no statuses": { + conf: testConfig("testdata/health-status-no-plans.json"), + isReadyExpected: false, + }, + "Not Ready because there are no processes": { + conf: testConfig("testdata/health-status-no-processes.json"), + isReadyExpected: false, + }, + "Not Ready because mongod is down for 90 seconds": { + conf: testConfigWithMongoUp("testdata/health-status-ok.json", time.Second*90), + isReadyExpected: false, + }, + "Not Ready because mongod is down for 1 hour": { + conf: testConfigWithMongoUp("testdata/health-status-ok.json", time.Hour*1), + isReadyExpected: false, + }, + "Not Ready because mongod is down for 2 days": { + conf: testConfigWithMongoUp("testdata/health-status-ok.json", time.Hour*48), + isReadyExpected: false, + }, + "Ready and mongod is up for 30 seconds": { + conf: testConfigWithMongoUp("testdata/health-status-ok.json", time.Second*30), + isReadyExpected: true, + }, + "Ready and mongod is up for 1 second": { + conf: testConfigWithMongoUp("testdata/health-status-ok.json", time.Second*30), + isReadyExpected: true, + }, + "Not Ready because of mongod bootstrap errors": { + conf: testConfigWithMongoUp("testdata/health-status-error-tls.json", time.Second*30), + isReadyExpected: false, + }, + "Not Ready because of waiting on an upgrade start in a recomputed plan (a real scenario for an interrupted start in EA)": { + conf: testConfigWithMongoUp("testdata/health-status-enterprise-upgrade-interrupted.json", time.Second*30), + isReadyExpected: false, + }, + } + for testName := range tests { + testConfig := tests[testName] + t.Run(testName, func(t *testing.T) { + ready, err := isPodReady(ctx, testConfig.conf) + if testConfig.isErrorExpected { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, testConfig.isReadyExpected, ready) + }) + } } -// TestReady verifies that the probe reports "ready" despite "WaitRsInit" stage reporting as not reached -// (this is some bug in Automation Agent which we can work with) -func TestReady(t *testing.T) { - assert.True(t, isPodReady(testConfig("testdata/health-status-ok.json"))) -} +func TestObtainingCurrentStep(t *testing.T) { + noDeadlockHealthExample, _ := parseHealthStatus(testConfig("testdata/health-status-no-deadlock.json").HealthStatusReader) + now := time.Now() + tenMinutesAgo := time.Now().Add(-time.Minute * 10) -// TestNoDeadlockForDownloadProcess verifies that the steps not listed as "riskySteps" (like "download") are not -// considered as stuck -func TestNoDeadlockForDownloadProcess(t *testing.T) { - before := time.Now().Add(time.Duration(-30) * time.Second) - downloadStatus := &health.StepStatus{ - Step: "Download", - Started: &before, - Completed: nil, - Result: "", + type TestConfig struct { + processStatuses map[string]health.MmsDirectorStatus + expectedStep string + } + tests := map[string]TestConfig{ + "No deadlock example should point to WaitFeatureCompatibilityVersionCorrect": { + processStatuses: noDeadlockHealthExample.MmsStatus, + expectedStep: "WaitFeatureCompatibilityVersionCorrect", + }, + "Find single Started Step": { + processStatuses: map[string]health.MmsDirectorStatus{ + "ignore": { + Plans: []*health.PlanStatus{ + { + Moves: []*health.MoveStatus{ + { + Steps: []*health.StepStatus{ + { + Step: "will be ignored as completed", + Started: &tenMinutesAgo, + Completed: &now, + }, + { + Step: "test", + Started: &tenMinutesAgo, + }, + { + Step: "will be ignored as completed", + Started: &tenMinutesAgo, + Completed: &now, + }, + }, + }, + }, + Started: &tenMinutesAgo, + }, + }, + }, + }, + expectedStep: "test", + }, + "Find no Step in completed plan": { + processStatuses: map[string]health.MmsDirectorStatus{ + "ignore": { + Plans: []*health.PlanStatus{ + { + Moves: []*health.MoveStatus{ + { + Steps: []*health.StepStatus{ + { + Step: "test", + Started: &tenMinutesAgo, + }, + }, + }, + }, + Started: &tenMinutesAgo, + Completed: &now, + }, + }, + }, + }, + expectedStep: "", + }, + "Find single Started step in the latest plan only": { + processStatuses: map[string]health.MmsDirectorStatus{ + "ignore": { + Plans: []*health.PlanStatus{ + { + Moves: []*health.MoveStatus{ + { + Steps: []*health.StepStatus{ + { + Step: "will be ignored as only the last plan is evaluated", + Started: &tenMinutesAgo, + }, + }, + }, + }, + Started: &tenMinutesAgo, + }, + { + Moves: []*health.MoveStatus{ + { + Steps: []*health.StepStatus{ + { + Step: "test", + Started: &tenMinutesAgo, + }, + }, + }, + }, + Started: &tenMinutesAgo, + }, + }, + }, + }, + expectedStep: "test", + }, + } + for testName := range tests { + testConfig := tests[testName] + t.Run(testName, func(t *testing.T) { + step := findCurrentStep(testConfig.processStatuses) + if len(testConfig.expectedStep) == 0 { + assert.Nil(t, step) + } else { + assert.Equal(t, testConfig.expectedStep, step.Step) + } + }) } - - assert.False(t, isDeadlocked(downloadStatus)) } -// TestNoDeadlockForImmediateWaitRs verifies the "WaitRsInit" step is not marked as deadlocked if -// it was started < 15 seconds ago -func TestNoDeadlockForImmediateWaitRs(t *testing.T) { - before := time.Now().Add(time.Duration(-10) * time.Second) - downloadStatus := &health.StepStatus{ - Step: "WaitRsInit", - Started: &before, - Completed: nil, - Result: "Wait", - } +// TestReadyWithWaitForCorrectBinaries tests the Static Containers Architecture mode for the Agent. +// In this case, the Readiness Probe needs to return Ready and let the StatefulSet Controller to proceed +// with the Pod rollout. +func TestReadyWithWaitForCorrectBinaries(t *testing.T) { + ctx := context.Background() + c := testConfigWithMongoUp("testdata/health-status-ok-with-WaitForCorrectBinaries.json", time.Second*30) + ready, err := isPodReady(ctx, c) - assert.False(t, isDeadlocked(downloadStatus)) + assert.True(t, ready) + assert.NoError(t, err) } // TestHeadlessAgentHasntReachedGoal verifies that the probe reports "false" if the config version is higher than the @@ -94,58 +256,65 @@ func TestNoDeadlockForImmediateWaitRs(t *testing.T) { // (as Agent doesn't marks all the step statuses finished when it reaches the goal) but this doesn't affect the result // as the whole plan is complete already func TestHeadlessAgentHasntReachedGoal(t *testing.T) { - _ = os.Setenv(headlessAgent, "true") + ctx := context.Background() + t.Setenv(headlessAgent, "true") c := testConfig("testdata/health-status-ok.json") c.ClientSet = fake.NewSimpleClientset(testdata.TestPod(c.Namespace, c.Hostname), testdata.TestSecret(c.Namespace, c.AutomationConfigSecretName, 6)) - assert.False(t, isPodReady(c)) - thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(context.TODO(), c.Hostname, metav1.GetOptions{}) + ready, err := isPodReady(ctx, c) + assert.False(t, ready) + assert.NoError(t, err) + thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(ctx, c.Hostname, metav1.GetOptions{}) assert.Equal(t, map[string]string{"agent.mongodb.com/version": "5"}, thePod.Annotations) - - os.Unsetenv(headlessAgent) } // TestHeadlessAgentReachedGoal verifies that the probe reports "true" if the config version is equal to the // last achieved version of the Agent func TestHeadlessAgentReachedGoal(t *testing.T) { - _ = os.Setenv(headlessAgent, "true") + ctx := context.Background() + t.Setenv(headlessAgent, "true") c := testConfig("testdata/health-status-ok.json") c.ClientSet = fake.NewSimpleClientset(testdata.TestPod(c.Namespace, c.Hostname), testdata.TestSecret(c.Namespace, c.AutomationConfigSecretName, 5)) - assert.True(t, isPodReady(c)) - thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(context.TODO(), c.Hostname, metav1.GetOptions{}) + ready, err := isPodReady(ctx, c) + assert.True(t, ready) + assert.NoError(t, err) + thePod, _ := c.ClientSet.CoreV1().Pods(c.Namespace).Get(ctx, c.Hostname, metav1.GetOptions{}) assert.Equal(t, map[string]string{"agent.mongodb.com/version": "5"}, thePod.Annotations) - - os.Unsetenv(headlessAgent) } -func TestPodReadiness(t *testing.T) { - t.Run("Pod readiness is correctly checked when no ReplicationStatus is present on the file ", func(t *testing.T) { - assert.True(t, isPodReady(testConfig("testdata/health-status-no-replication.json"))) - }) - - t.Run("MongoDB replication state is reported by agents", func(t *testing.T) { - assert.True(t, isPodReady(testConfig("testdata/health-status-ok-no-replica-status.json"))) - }) +func testConfig(healthFilePath string) config.Config { + return testConfigWithMongoUp(healthFilePath, 15*time.Second) +} - t.Run("If replication state is not PRIMARY or SECONDARY, Pod is not ready", func(t *testing.T) { - assert.False(t, isPodReady(testConfig("testdata/health-status-not-readable-state.json"))) - }) +func testConfigWithMongoUp(healthFilePath string, timeSinceMongoLastUp time.Duration) config.Config { + file, err := os.Open(healthFilePath) + if err != nil { + panic(err) + } + defer file.Close() - t.Run("If replication state is readable", func(t *testing.T) { - assert.True(t, isPodReady(testConfig("testdata/health-status-readable-state.json"))) - }) -} + status, err := parseHealthStatus(file) + if err != nil { + panic(err) + } -func readHealthinessFile(path string) health.Status { - fd, _ := os.Open(path) - health, _ := readAgentHealthStatus(fd) - return health -} + for key, processHealth := range status.Statuses { + processHealth.LastMongoUpTime = time.Now().Add(-timeSinceMongoLastUp).Unix() + // Need to reassign the object back to map as 'processHealth' is a copy of the struct + status.Statuses[key] = processHealth + } -func testConfig(healthFilePath string) config.Config { return config.Config{ - HealthStatusFilePath: healthFilePath, + HealthStatusReader: NewTestHealthStatusReader(status), Namespace: "test-ns", AutomationConfigSecretName: "test-mongodb-automation-config", Hostname: "test-mongodb-0", } } + +func NewTestHealthStatusReader(status health.Status) io.Reader { + data, err := json.Marshal(status) + if err != nil { + panic(err) + } + return bytes.NewReader(data) +} diff --git a/cmd/readiness/testdata/health-status-deadlocked-waiting-for-correct-automation-credentials.json b/cmd/readiness/testdata/health-status-deadlocked-waiting-for-correct-automation-credentials.json new file mode 100644 index 000000000..c6e3053df --- /dev/null +++ b/cmd/readiness/testdata/health-status-deadlocked-waiting-for-correct-automation-credentials.json @@ -0,0 +1,116 @@ +{ + "statuses": { + "svcprovider-cluster-config-0": { + "IsInGoalState": false, + "LastMongoUpTime": 1669378820, + "ExpectedToBeUp": true, + "ReplicationStatus": 2 + } + }, + "mmsStatus": { + "svcprovider-cluster-config-0": { + "name": "svcprovider-cluster-config-0", + "lastGoalVersionAchieved": -1, + "plans": [ + { + "started": "2022-11-25T11:35:45.442597196Z", + "completed": null, + "moves": [ + { + "move": "Download", + "moveDoc": "Download mongodb binaries", + "steps": [ + { + "step": "Download", + "stepDoc": "Download mongodb binaries (may take a while)", + "isWaitStep": false, + "started": "2022-11-25T11:35:45.44261521Z", + "completed": "2022-11-25T11:35:50.8280641Z", + "result": "success" + } + ] + }, + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2022-11-25T11:35:50.828139893Z", + "completed": "2022-11-25T11:35:52.623601143Z", + "result": "success" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": "2022-11-25T11:35:52.623699243Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + }, + { + "started": "2022-11-25T11:35:53.820885768Z", + "completed": null, + "moves": [ + { + "move": "WaitHasCorrectAutomationCredentials", + "moveDoc": "Wait for the automation user to be added (if needed)", + "steps": [ + { + "step": "WaitHasCorrectAutomationCredentials", + "stepDoc": "Wait for the automation user to be added (if needed)", + "isWaitStep": true, + "started": "2022-11-25T11:35:53.820925028Z", + "completed": null, + "result": "wait" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} \ No newline at end of file diff --git a/cmd/readiness/testdata/health-status-deadlocked-with-prev-config.json b/cmd/readiness/testdata/health-status-deadlocked-with-prev-config.json new file mode 100644 index 000000000..0c96f6fc1 --- /dev/null +++ b/cmd/readiness/testdata/health-status-deadlocked-with-prev-config.json @@ -0,0 +1,159 @@ +{ + "statuses": { + "mdb0-1": { + "IsInGoalState": false, + "LastMongoUpTime": 1674151493, + "ExpectedToBeUp": true, + "ReplicationStatus": 1 + } + }, + "mmsStatus": { + "mdb0-1": { + "name": "mdb0-1", + "lastGoalVersionAchieved": 2, + "plans": [ + { + "automationConfigVersion": 2, + "started": "2023-01-19T17:27:17.438126081Z", + "completed": "2023-01-19T17:27:22.74117999Z", + "moves": [ + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2023-01-19T17:27:17.438319285Z", + "completed": "2023-01-19T17:27:21.672553263Z", + "result": "success" + } + ] + }, + { + "move": "WaitRsInit", + "moveDoc": "Wait for the replica set to be initialized by another member", + "steps": [ + { + "step": "WaitRsInit", + "stepDoc": "Wait for the replica set to be initialized by another member", + "isWaitStep": true, + "started": "2023-01-19T17:27:21.672605664Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + }, + { + "automationConfigVersion": 2, + "started": "2023-01-19T17:36:34.742889301Z", + "completed": "2023-01-19T17:36:47.913043483Z", + "moves": [ + { + "move": "WaitHasCorrectAutomationCredentials", + "moveDoc": "Wait for the automation user to be added (if needed)", + "steps": [ + { + "step": "WaitHasCorrectAutomationCredentials", + "stepDoc": "Wait for the automation user to be added (if needed)", + "isWaitStep": true, + "started": "2023-01-19T17:36:34.742906201Z", + "completed": null, + "result": "wait" + } + ] + } + ] + }, + { + "automationConfigVersion": 3, + "started": "2023-01-19T17:38:33.622622261Z", + "completed": null, + "moves": [ + { + "move": "ChangeVersion", + "moveDoc": "Change MongoDB Version", + "steps": [ + { + "step": "CheckWrongVersion", + "stepDoc": "Check that MongoDB version is wrong", + "isWaitStep": false, + "started": "2023-01-19T17:38:33.622638561Z", + "completed": "2023-01-19T17:38:33.622959367Z", + "result": "success" + }, + { + "step": "CheckRsCorrect", + "stepDoc": "Check that replica set configuration is correct", + "isWaitStep": false, + "started": "2023-01-19T17:38:33.622960067Z", + "completed": "2023-01-19T17:38:33.623363973Z", + "result": "success" + }, + { + "step": "WaitCanUpdate", + "stepDoc": "Wait until the update can be made", + "isWaitStep": true, + "started": "2023-01-19T17:38:33.623364774Z", + "completed": null, + "result": "wait" + }, + { + "step": "DisableBalancerIfFirst", + "stepDoc": "Disable the balancer (may take a while)", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + }, + { + "step": "Stop", + "stepDoc": "Shutdown the process", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + }, + { + "step": "RemoveDbFilesIfArbiterDowngrade", + "stepDoc": "Delete db files if this is an arbiter downgrade.", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + }, + { + "step": "StartWithUpgrade", + "stepDoc": "Start a mongo instance (upgrade)", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} diff --git a/cmd/readiness/testdata/health-status-enterprise-upgrade-interrupted.json b/cmd/readiness/testdata/health-status-enterprise-upgrade-interrupted.json new file mode 100644 index 000000000..55678d574 --- /dev/null +++ b/cmd/readiness/testdata/health-status-enterprise-upgrade-interrupted.json @@ -0,0 +1,271 @@ +{ + "statuses": { + "my-replica-set-0": { + "IsInGoalState": false, + "LastMongoUpTime": 1689233828, + "ExpectedToBeUp": true, + "ReplicationStatus": 2 + } + }, + "mmsStatus": { + "my-replica-set-0": { + "name": "my-replica-set-0", + "lastGoalVersionAchieved": 8, + "plans": [ + { + "automationConfigVersion": 8, + "started": "2023-07-13T07:31:43.706340549Z", + "completed": null, + "moves": [ + { + "move": "Download", + "moveDoc": "Download mongodb binaries", + "steps": [ + { + "step": "Download", + "stepDoc": "Download mongodb binaries (may take a while)", + "isWaitStep": false, + "started": "2023-07-13T07:31:43.706368293Z", + "completed": "2023-07-13T07:31:52.545770428Z", + "result": "success" + } + ] + }, + { + "move": "DownloadMongosh", + "moveDoc": "Download Mongosh", + "steps": [ + { + "step": "DownloadMongosh", + "stepDoc": "Download mongosh (may take a while)", + "isWaitStep": false, + "started": "2023-07-13T07:31:52.545834821Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + }, + { + "automationConfigVersion": 8, + "started": "2023-07-13T07:32:02.715922827Z", + "completed": "2023-07-13T07:32:20.938102204Z", + "moves": [ + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2023-07-13T07:32:02.715947483Z", + "completed": "2023-07-13T07:32:09.844613082Z", + "result": "success" + } + ] + }, + { + "move": "UpdateSymLink", + "moveDoc": "Update the mongosh binary symlink", + "steps": [ + { + "step": "UpdateSymLink", + "stepDoc": "Update the mongosh binary symlink", + "isWaitStep": false, + "started": "2023-07-13T07:32:09.844681639Z", + "completed": "2023-07-13T07:32:14.893961595Z", + "result": "success" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": "2023-07-13T07:32:14.894030206Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + }, + { + "automationConfigVersion": 9, + "started": "2023-07-13T07:35:56.706945979Z", + "completed": null, + "moves": [ + { + "move": "Download", + "moveDoc": "Download mongodb binaries", + "steps": [ + { + "step": "Download", + "stepDoc": "Download mongodb binaries (may take a while)", + "isWaitStep": false, + "started": "2023-07-13T07:35:56.706976268Z", + "completed": "2023-07-13T07:36:01.116832943Z", + "result": "success" + } + ] + }, + { + "move": "ChangeVersion", + "moveDoc": "Change MongoDB Version", + "steps": [ + { + "step": "CheckWrongVersion", + "stepDoc": "Check that MongoDB version is wrong", + "isWaitStep": false, + "started": "2023-07-13T07:36:01.11709619Z", + "completed": "2023-07-13T07:36:01.11734988Z", + "result": "success" + }, + { + "step": "CheckRsCorrect", + "stepDoc": "Check that replica set configuration is correct", + "isWaitStep": false, + "started": "2023-07-13T07:36:01.117352255Z", + "completed": "2023-07-13T07:36:01.117626127Z", + "result": "success" + }, + { + "step": "WaitCanUpdate", + "stepDoc": "Wait until the update can be made", + "isWaitStep": true, + "started": "2023-07-13T07:36:01.117628516Z", + "completed": "2023-07-13T07:36:01.117818709Z", + "result": "success" + }, + { + "step": "DisableBalancerIfFirst", + "stepDoc": "Disable the balancer (may take a while)", + "isWaitStep": false, + "started": "2023-07-13T07:36:01.117821034Z", + "completed": "2023-07-13T07:36:01.18783613Z", + "result": "success" + }, + { + "step": "Stop", + "stepDoc": "Shutdown the process", + "isWaitStep": false, + "started": "2023-07-13T07:36:01.187839391Z", + "completed": null, + "result": "" + }, + { + "step": "RemoveDbFilesIfArbiterDowngrade", + "stepDoc": "Delete db files if this is an arbiter downgrade.", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + }, + { + "step": "StartWithUpgrade", + "stepDoc": "Start a mongo instance (upgrade)", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} \ No newline at end of file diff --git a/cmd/readiness/testdata/health-status-error-tls.json b/cmd/readiness/testdata/health-status-error-tls.json new file mode 100644 index 000000000..d6c4f2ebb --- /dev/null +++ b/cmd/readiness/testdata/health-status-error-tls.json @@ -0,0 +1,146 @@ +{ + "statuses": { + "test-tls-base-rs-require-ssl-1": { + "IsInGoalState": false, + "LastMongoUpTime": 0, + "ExpectedToBeUp": true, + "ReplicationStatus": -1 + } + }, + "mmsStatus": { + "test-tls-base-rs-require-ssl-1": { + "name": "test-tls-base-rs-require-ssl-1", + "lastGoalVersionAchieved": -1, + "plans": [ + { + "automationConfigVersion": 5, + "started": "2023-07-13T07:01:44.951990751Z", + "completed": null, + "moves": [ + { + "move": "DownloadMongosh", + "moveDoc": "Download Mongosh", + "steps": [ + { + "step": "DownloadMongosh", + "stepDoc": "Download mongosh (may take a while)", + "isWaitStep": false, + "started": "2023-07-13T07:01:44.952016495Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitRsInit", + "moveDoc": "Wait for the replica set to be initialized by another member", + "steps": [ + { + "step": "WaitRsInit", + "stepDoc": "Wait for the replica set to be initialized by another member", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + }, + { + "automationConfigVersion": 5, + "started": "2023-07-13T07:01:49.72582887Z", + "completed": null, + "moves": [ + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2023-07-13T07:01:49.725856903Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "UpdateSymLink", + "moveDoc": "Update the mongosh binary symlink", + "steps": [ + { + "step": "UpdateSymLink", + "stepDoc": "Update the mongosh binary symlink", + "isWaitStep": false, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitRsInit", + "moveDoc": "Wait for the replica set to be initialized by another member", + "steps": [ + { + "step": "WaitRsInit", + "stepDoc": "Wait for the replica set to be initialized by another member", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": null, + "completed": null, + "result": "" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "\u003ctest-tls-base-rs-require-ssl-1\u003e [07:03:13.893] Plan execution failed on step StartFresh as part of move Start : \u003ctest-tls-base-rs-require-ssl-1\u003e [07:03:13.893] Failed to apply action. Result = \u003cnil\u003e : \u003ctest-tls-base-rs-require-ssl-1\u003e [07:03:13.893] Error starting mongod : \u003ctest-tls-base-rs-require-ssl-1\u003e [07:03:13.893] Error running start command. cmd=[Args=[/var/lib/mongodb-mms-automation/mongodb-linux-x86_64-6.0.5-ent/bin/mongod -f /data/automation-mongod.conf]], stip=[args={\"net\":{\"bindIp\":\"0.0.0.0\",\"port\":27017,\"tls\":{\"CAFile\":\"/mongodb-automation/tls/ca/ca-pem\",\"FIPSMode\":true,\"allowConnectionsWithoutCertificates\":true,\"certificateKeyFile\":\"/mongodb-automation/tls/ZQHTF7GVI23UNJD4IHNM23NCX7Z6PUCB3PPAWCJ7TO3NB2WIHRDA\",\"mode\":\"requireTLS\"}},\"replication\":{\"replSetName\":\"test-tls-base-rs-require-ssl\"},\"storage\":{\"dbPath\":\"/data\"},\"systemLog\":{\"destination\":\"file\",\"path\":\"/var/log/mongodb-mms-automation/mongodb.log\"}}[],confPath=/data/automation-mongod.conf,version=6.0.5-ent-c9a99c120371d4d4c52cbb15dac34a36ce8d3b1d(enterprise),isKmipRotateMasterKey=false,useOldConfFile=false]\n\t,\nConfig Used:\n# THIS FILE IS MAINTAINED BY https://cloud-qa.mongodb.com . DO NOT MODIFY AS IT WILL BE OVERWRITTEN.\n# To make changes to your MongoDB deployment, please visit https://cloud-qa.mongodb.com . Your Group ID is 64a3eb7b7b02b627c635ea2b .\nnet:\n bindIp: 0.0.0.0\n port: 27017\n tls:\n CAFile: /mongodb-automation/tls/ca/ca-pem\n FIPSMode: true\n allowConnectionsWithoutCertificates: true\n certificateKeyFile: /mongodb-automation/tls/ZQHTF7GVI23UNJD4IHNM23NCX7Z6PUCB3PPAWCJ7TO3NB2WIHRDA\n mode: requireTLS\nprocessManagement:\n fork: \"true\"\nreplication:\n replSetName: test-tls-base-rs-require-ssl\nstorage:\n dbPath: /data\nsystemLog:\n destination: file\n path: /var/log/mongodb-mms-automation/mongodb.log\n\t- Output (stdout/stderr): \nabout to fork child process, waiting until server is ready for connections.\nforked process: 823\nERROR: child process failed, exited with 1\nTo see additional information in this output, start without the \"--fork\" option.\n\n\t- Mongo Logs: \n{\"t\":{\"$date\":\"2023-07-13T07:03:13.883+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":23172, \"ctx\":\"-\",\"msg\":\"FIPS 140-2 mode activated\"}\n{\"t\":{\"$date\":\"2023-07-13T07:03:13.884+00:00\"},\"s\":\"I\", \"c\":\"NETWORK\", \"id\":4915701, \"ctx\":\"main\",\"msg\":\"Initialized wire specification\",\"attr\":{\"spec\":{\"incomingExternalClient\":{\"minWireVersion\":0,\"maxWireVersion\":17},\"incomingInternalClient\":{\"minWireVersion\":0,\"maxWireVersion\":17},\"outgoing\":{\"minWireVersion\":6,\"maxWireVersion\":17},\"isInternalClient\":true}}}\n{\"t\":{\"$date\":\"2023-07-13T07:03:13.888+00:00\"},\"s\":\"E\", \"c\":\"NETWORK\", \"id\":23248, \"ctx\":\"main\",\"msg\":\"Cannot read certificate file\",\"attr\":{\"keyFile\":\"/mongodb-automation/tls/ZQHTF7GVI23UNJD4IHNM23NCX7Z6PUCB3PPAWCJ7TO3NB2WIHRDA\",\"error\":\"error:02001002:system library:fopen:No such file or directory\"}}\n{\"t\":{\"$date\":\"2023-07-13T07:03:13.888+00:00\"},\"s\":\"F\", \"c\":\"CONTROL\", \"id\":20574, \"ctx\":\"main\",\"msg\":\"Error during global initialization\",\"attr\":{\"error\":{\"code\":140,\"codeName\":\"InvalidSSLConfiguration\",\"errmsg\":\"Can not set up PEM key file.\"}}}\n : exit status 1" + } + } +} \ No newline at end of file diff --git a/cmd/readiness/testdata/health-status-no-replication.json b/cmd/readiness/testdata/health-status-no-replication.json index 0398e8e4e..325d4a3b4 100644 --- a/cmd/readiness/testdata/health-status-no-replication.json +++ b/cmd/readiness/testdata/health-status-no-replication.json @@ -1,81 +1,81 @@ { - "mmsStatus": { - "bar": { - "errorString": "", - "errorCode": 0, - "plans": [ + "mmsStatus": { + "bar": { + "errorString": "", + "errorCode": 0, + "plans": [ + { + "moves": [ + { + "steps": [ { - "moves": [ - { - "steps": [ - { - "result": "success", - "completed": "2019-09-11T14:20:55.645615846Z", - "started": "2019-09-11T14:20:40.631404367Z", - "isWaitStep": false, - "stepDoc": "Download mongodb binaries (may take a while)", - "step": "Download" - } - ], - "moveDoc": "Download mongodb binaries", - "move": "Download" - }, - { - "steps": [ - { - "result": "success", - "completed": "2019-09-11T14:20:59.325129842Z", - "started": "2019-09-11T14:20:55.645743003Z", - "isWaitStep": false, - "stepDoc": "Start a mongo instance (start fresh)", - "step": "StartFresh" - } - ], - "moveDoc": "Start the process", - "move": "Start" - }, - { - "steps": [ - { - "result": "wait", - "completed": null, - "started": "2019-09-11T14:20:59.325272608Z", - "isWaitStep": true, - "stepDoc": "Wait for the replica set to be initialized by another member", - "step": "WaitRsInit" - } - ], - "moveDoc": "Wait for the replica set to be initialized by another member", - "move": "WaitRsInit" - }, - { - "steps": [ - { - "result": "", - "completed": null, - "started": null, - "isWaitStep": true, - "stepDoc": "Wait for featureCompatibilityVersion to be right", - "step": "WaitFeatureCompatibilityVersionCorrect" - } - ], - "moveDoc": "Wait for featureCompatibilityVersion to be right", - "move": "WaitFeatureCompatibilityVersionCorrect" - } - ], - "completed": "2019-09-11T14:21:42.034934358Z", - "started": "2019-09-11T14:20:40.631348806Z" + "result": "success", + "completed": "2019-09-11T14:20:55.645615846Z", + "started": "2019-09-11T14:20:40.631404367Z", + "isWaitStep": false, + "stepDoc": "Download mongodb binaries (may take a while)", + "step": "Download" } - ], - "lastGoalVersionAchieved": 5, - "name": "bar" - } - }, - "statuses": { - "bar": { - "ExpectedToBeUp": true, - "LastMongoUpTime": 1568222195, - "IsInGoalState": true + ], + "moveDoc": "Download mongodb binaries", + "move": "Download" + }, + { + "steps": [ + { + "result": "success", + "completed": "2019-09-11T14:20:59.325129842Z", + "started": "2019-09-11T14:20:55.645743003Z", + "isWaitStep": false, + "stepDoc": "Start a mongo instance (start fresh)", + "step": "StartFresh" + } + ], + "moveDoc": "Start the process", + "move": "Start" + }, + { + "steps": [ + { + "result": "wait", + "completed": null, + "started": "2019-09-11T14:20:59.325272608Z", + "isWaitStep": true, + "stepDoc": "Wait for the replica set to be initialized by another member", + "step": "WaitRsInit" + } + ], + "moveDoc": "Wait for the replica set to be initialized by another member", + "move": "WaitRsInit" + }, + { + "steps": [ + { + "result": "", + "completed": null, + "started": null, + "isWaitStep": true, + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "step": "WaitFeatureCompatibilityVersionCorrect" + } + ], + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "move": "WaitFeatureCompatibilityVersionCorrect" + } + ], + "completed": "2019-09-11T14:21:42.034934358Z", + "started": "2019-09-11T14:20:40.631348806Z" } + ], + "lastGoalVersionAchieved": 5, + "name": "bar" + } + }, + "statuses": { + "bar": { + "ExpectedToBeUp": true, + "LastMongoUpTime": 1568222195, + "IsInGoalState": true } + } } diff --git a/cmd/readiness/testdata/health-status-ok-no-replica-status.json b/cmd/readiness/testdata/health-status-ok-no-replica-status.json index caaf67f56..fbf69490c 100644 --- a/cmd/readiness/testdata/health-status-ok-no-replica-status.json +++ b/cmd/readiness/testdata/health-status-ok-no-replica-status.json @@ -1,82 +1,82 @@ { - "mmsStatus": { - "bar": { - "errorString": "", - "errorCode": 0, - "plans": [ + "mmsStatus": { + "bar": { + "errorString": "", + "errorCode": 0, + "plans": [ + { + "moves": [ + { + "steps": [ { - "moves": [ - { - "steps": [ - { - "result": "success", - "completed": "2019-09-11T14:20:55.645615846Z", - "started": "2019-09-11T14:20:40.631404367Z", - "isWaitStep": false, - "stepDoc": "Download mongodb binaries (may take a while)", - "step": "Download" - } - ], - "moveDoc": "Download mongodb binaries", - "move": "Download" - }, - { - "steps": [ - { - "result": "success", - "completed": "2019-09-11T14:20:59.325129842Z", - "started": "2019-09-11T14:20:55.645743003Z", - "isWaitStep": false, - "stepDoc": "Start a mongo instance (start fresh)", - "step": "StartFresh" - } - ], - "moveDoc": "Start the process", - "move": "Start" - }, - { - "steps": [ - { - "result": "wait", - "completed": null, - "started": "2019-09-11T14:20:59.325272608Z", - "isWaitStep": true, - "stepDoc": "Wait for the replica set to be initialized by another member", - "step": "WaitRsInit" - } - ], - "moveDoc": "Wait for the replica set to be initialized by another member", - "move": "WaitRsInit" - }, - { - "steps": [ - { - "result": "", - "completed": null, - "started": null, - "isWaitStep": true, - "stepDoc": "Wait for featureCompatibilityVersion to be right", - "step": "WaitFeatureCompatibilityVersionCorrect" - } - ], - "moveDoc": "Wait for featureCompatibilityVersion to be right", - "move": "WaitFeatureCompatibilityVersionCorrect" - } - ], - "completed": "2019-09-11T14:21:42.034934358Z", - "started": "2019-09-11T14:20:40.631348806Z" + "result": "success", + "completed": "2019-09-11T14:20:55.645615846Z", + "started": "2019-09-11T14:20:40.631404367Z", + "isWaitStep": false, + "stepDoc": "Download mongodb binaries (may take a while)", + "step": "Download" } - ], - "lastGoalVersionAchieved": 5, - "name": "bar" - } - }, - "statuses": { - "bar": { - "ReplicationStatus": null, - "ExpectedToBeUp": true, - "LastMongoUpTime": 1568222195, - "IsInGoalState": true + ], + "moveDoc": "Download mongodb binaries", + "move": "Download" + }, + { + "steps": [ + { + "result": "success", + "completed": "2019-09-11T14:20:59.325129842Z", + "started": "2019-09-11T14:20:55.645743003Z", + "isWaitStep": false, + "stepDoc": "Start a mongo instance (start fresh)", + "step": "StartFresh" + } + ], + "moveDoc": "Start the process", + "move": "Start" + }, + { + "steps": [ + { + "result": "wait", + "completed": null, + "started": "2019-09-11T14:20:59.325272608Z", + "isWaitStep": true, + "stepDoc": "Wait for the replica set to be initialized by another member", + "step": "WaitRsInit" + } + ], + "moveDoc": "Wait for the replica set to be initialized by another member", + "move": "WaitRsInit" + }, + { + "steps": [ + { + "result": "", + "completed": null, + "started": null, + "isWaitStep": true, + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "step": "WaitFeatureCompatibilityVersionCorrect" + } + ], + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "move": "WaitFeatureCompatibilityVersionCorrect" + } + ], + "completed": "2019-09-11T14:21:42.034934358Z", + "started": "2019-09-11T14:20:40.631348806Z" } + ], + "lastGoalVersionAchieved": 5, + "name": "bar" + } + }, + "statuses": { + "bar": { + "ReplicationStatus": null, + "ExpectedToBeUp": true, + "LastMongoUpTime": 1568222195, + "IsInGoalState": true } + } } diff --git a/cmd/readiness/testdata/health-status-ok-with-WaitForCorrectBinaries.json b/cmd/readiness/testdata/health-status-ok-with-WaitForCorrectBinaries.json new file mode 100644 index 000000000..c2c6bb307 --- /dev/null +++ b/cmd/readiness/testdata/health-status-ok-with-WaitForCorrectBinaries.json @@ -0,0 +1,144 @@ +{ + "statuses": { + "my-replica-set-downgrade-0": { + "IsInGoalState": false, + "LastMongoUpTime": 1701853492, + "ExpectedToBeUp": true, + "ReplicationStatus": 1 + } + }, + "mmsStatus": { + "my-replica-set-downgrade-0": { + "name": "my-replica-set-downgrade-0", + "lastGoalVersionAchieved": 1, + "plans": [ + { + "automationConfigVersion": 1, + "started": "2023-12-06T09:03:33.709679218Z", + "completed": "2023-12-06T09:03:43.65117796Z", + "moves": [ + { + "move": "Start", + "moveDoc": "Start the process", + "steps": [ + { + "step": "StartFresh", + "stepDoc": "Start a mongo instance (start fresh)", + "isWaitStep": false, + "started": "2023-12-06T09:03:33.709703572Z", + "completed": null, + "result": "error" + } + ] + }, + { + "move": "WaitAllRsMembersUp", + "moveDoc": "Wait until all members of this process' repl set are up", + "steps": [ + { + "step": "WaitAllRsMembersUp", + "stepDoc": "Wait until all members of this process' repl set are up", + "isWaitStep": true, + "started": "2023-12-06T09:03:35.652236845Z", + "completed": null, + "result": "wait" + } + ] + }, + { + "move": "RsInit", + "moveDoc": "Initialize a replica set including the current MongoDB process", + "steps": [ + { + "step": "RsInit", + "stepDoc": "Initialize a replica set", + "isWaitStep": false, + "started": "2023-12-06T09:03:43.536653463Z", + "completed": "2023-12-06T09:03:43.650871495Z", + "result": "success" + } + ] + }, + { + "move": "WaitFeatureCompatibilityVersionCorrect", + "moveDoc": "Wait for featureCompatibilityVersion to be right", + "steps": [ + { + "step": "WaitFeatureCompatibilityVersionCorrect", + "stepDoc": "Wait for featureCompatibilityVersion to be right", + "isWaitStep": true, + "started": "2023-12-06T09:03:43.650920722Z", + "completed": "2023-12-06T09:03:43.65111749Z", + "result": "success" + } + ] + } + ] + }, + { + "automationConfigVersion": 2, + "started": "2023-12-06T09:04:03.576712545Z", + "completed": null, + "moves": [ + { + "move": "ChangeVersionKube", + "moveDoc": "Change MongoDB Version on operator mode", + "steps": [ + { + "step": "CheckRunningOperatorMode", + "stepDoc": "Check Running in operator mode", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.576729706Z", + "completed": "2023-12-06T09:04:03.576893698Z", + "result": "success" + }, + { + "step": "CheckWrongVersion", + "stepDoc": "Check that MongoDB version is wrong", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.576894027Z", + "completed": "2023-12-06T09:04:03.577041016Z", + "result": "success" + }, + { + "step": "CheckRsCorrect", + "stepDoc": "Check that replica set configuration is correct", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.577041402Z", + "completed": "2023-12-06T09:04:03.577219188Z", + "result": "success" + }, + { + "step": "WaitAllRouterConfigsFlushedForUpgrade", + "stepDoc": "Wait until flushRouterConfig has been run on all mongoses", + "isWaitStep": true, + "started": "2023-12-06T09:04:03.577219563Z", + "completed": "2023-12-06T09:04:03.577356271Z", + "result": "success" + }, + { + "step": "DisableBalancerIfFirst", + "stepDoc": "Disable the balancer (may take a while)", + "isWaitStep": false, + "started": "2023-12-06T09:04:03.577356599Z", + "completed": "2023-12-06T09:04:03.604579059Z", + "result": "success" + }, + { + "step": "WaitForCorrectBinaries", + "stepDoc": "Wait until correct binaries are available", + "isWaitStep": true, + "started": "2023-12-06T09:04:03.60458063Z", + "completed": null, + "result": "wait" + } + ] + } + ] + } + ], + "errorCode": 0, + "errorString": "" + } + } +} \ No newline at end of file diff --git a/cmd/readiness/testdata/k8sobjects.go b/cmd/readiness/testdata/k8sobjects.go index 145fa0f63..e4e6d4d64 100644 --- a/cmd/readiness/testdata/k8sobjects.go +++ b/cmd/readiness/testdata/k8sobjects.go @@ -7,6 +7,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// Currently seems like the appending functionality on the library used by the fake +// implementation to simulate JSONPatch is broken: https://github.com/evanphx/json-patch/issues/138 +// The short term workaround is to have the annotation empty. + // These are just k8s objects used for testing. Note, that these are defined in a non "_test.go" file as they are reused // by other modules func TestSecret(namespace, name string, version int) *corev1.Secret { @@ -21,6 +25,9 @@ func TestPod(namespace, name string) *corev1.Pod { ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, + Annotations: map[string]string{ + "agent.mongodb.com/version": "", + }, }, } } diff --git a/cmd/versionhook/main.go b/cmd/versionhook/main.go index 11ddcf3f2..6e0d02f95 100644 --- a/cmd/versionhook/main.go +++ b/cmd/versionhook/main.go @@ -3,14 +3,12 @@ package main import ( "context" "encoding/json" + "fmt" "io" - "io/ioutil" "os" "strings" "time" - "github.com/pkg/errors" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/agent" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -29,6 +27,7 @@ const ( ) func main() { + ctx := context.Background() logger := setupLogger() logger.Info("Running version change post-start hook") @@ -59,9 +58,9 @@ func main() { if shouldDelete { logger.Infof("Pod should be deleted") - if err := deletePod(); err != nil { + if err := deletePod(ctx); err != nil { // We should not raise an error if the Pod could not be deleted. It can have even - // worst consequences: Pod being restarted with the same version, and the agent + // worse consequences: Pod being restarted with the same version, and the agent // killing it immediately after. logger.Errorf("Could not manually trigger restart of this Pod because of: %s", err) logger.Errorf("Make sure the Pod is restarted in order for the upgrade process to continue") @@ -109,7 +108,7 @@ func waitForAgentHealthStatus() (agent.Health, error) { status, ok := health.Healthiness[getHostname()] if !ok { - return agent.Health{}, errors.Errorf("couldn't find status for hostname %s", getHostname()) + return agent.Health{}, fmt.Errorf("couldn't find status for hostname %s", getHostname()) } // We determine if the file has been updated by checking if the process is not in goal state. @@ -118,7 +117,7 @@ func waitForAgentHealthStatus() (agent.Health, error) { return health, nil } } - return agent.Health{}, errors.Errorf("agent health status not ready after waiting %s", pollingDuration.String()) + return agent.Health{}, fmt.Errorf("agent health status not ready after waiting %s", pollingDuration.String()) } @@ -127,13 +126,13 @@ func waitForAgentHealthStatus() (agent.Health, error) { func getAgentHealthStatus() (agent.Health, error) { f, err := os.Open(os.Getenv(agentStatusFilePathEnv)) if err != nil { - return agent.Health{}, errors.Errorf("could not open file: %s", err) + return agent.Health{}, err } defer f.Close() h, err := readAgentHealthStatus(f) if err != nil { - return agent.Health{}, errors.Errorf("could not read health status file: %s", err) + return agent.Health{}, fmt.Errorf("could not read health status file: %s", err) } return h, err } @@ -142,7 +141,7 @@ func getAgentHealthStatus() (agent.Health, error) { // io.Reader func readAgentHealthStatus(reader io.Reader) (agent.Health, error) { var h agent.Health - data, err := ioutil.ReadAll(reader) + data, err := io.ReadAll(reader) if err != nil { return h, err } @@ -160,7 +159,7 @@ func getHostname() string { func shouldDeletePod(health agent.Health) (bool, error) { status, ok := health.ProcessPlans[getHostname()] if !ok { - return false, errors.Errorf("hostname %s was not in the process plans", getHostname()) + return false, fmt.Errorf("hostname %s was not in the process plans", getHostname()) } return isWaitingToBeDeleted(status), nil } @@ -184,18 +183,18 @@ func isWaitingToBeDeleted(healthStatus agent.MmsDirectorStatus) bool { } // deletePod attempts to delete the pod this mongod is running in -func deletePod() error { +func deletePod(ctx context.Context) error { thisPod, err := getThisPod() if err != nil { - return errors.Errorf("could not get pod: %s", err) + return fmt.Errorf("could not get pod: %s", err) } k8sClient, err := inClusterClient() if err != nil { - return errors.Errorf("could not get client: %s", err) + return fmt.Errorf("could not get client: %s", err) } - if err := k8sClient.Delete(context.TODO(), &thisPod); err != nil { - return errors.Errorf("could not delete pod: %s", err) + if err := k8sClient.Delete(ctx, &thisPod); err != nil { + return fmt.Errorf("could not delete pod: %s", err) } return nil } @@ -204,12 +203,12 @@ func deletePod() error { func getThisPod() (corev1.Pod, error) { podName := getHostname() if podName == "" { - return corev1.Pod{}, errors.Errorf("environment variable HOSTNAME was not present") + return corev1.Pod{}, fmt.Errorf("environment variable HOSTNAME was not present") } ns, err := getNamespace() if err != nil { - return corev1.Pod{}, errors.Errorf("could not read namespace: %s", err) + return corev1.Pod{}, fmt.Errorf("could not read namespace: %s", err) } return corev1.Pod{ @@ -223,18 +222,18 @@ func getThisPod() (corev1.Pod, error) { func inClusterClient() (client.Client, error) { config, err := rest.InClusterConfig() if err != nil { - return nil, errors.Errorf("could not get cluster config: %s", err) + return nil, fmt.Errorf("could not get cluster config: %s", err) } k8sClient, err := client.New(config, client.Options{}) if err != nil { - return nil, errors.Errorf("could not create client: %s", err) + return nil, fmt.Errorf("could not create client: %s", err) } return k8sClient, nil } func getNamespace() (string, error) { - data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") if err != nil { return "", err } diff --git a/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml b/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml index 31d86b059..12207a6bd 100644 --- a/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml +++ b/config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml @@ -1,22 +1,17 @@ - --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.4.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.15.0 + service.binding: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret + service.binding/connectionString: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=connectionString.standardSrv + service.binding/password: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=password + service.binding/provider: community + service.binding/type: mongodb + service.binding/username: path={.metadata.name}-{.spec.users[0].db}-{.spec.users[0].name},objectType=Secret,sourceKey=username name: mongodbcommunity.mongodbcommunity.mongodb.com spec: - additionalPrinterColumns: - - JSONPath: .status.phase - description: Current state of the MongoDB deployment - name: Phase - type: string - - JSONPath: .status.version - description: Version of MongoDB server - name: Version - type: string group: mongodbcommunity.mongodb.com names: kind: MongoDBCommunity @@ -26,226 +21,276 @@ spec: - mdbc singular: mongodbcommunity scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: MongoDBCommunity is the Schema for the mongodbs API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: MongoDBCommunitySpec defines the desired state of MongoDB - properties: - additionalMongodConfig: - description: 'AdditionalMongodConfig is additional configuration that - can be passed to each data-bearing mongod at runtime. Uses the same - structure as the mongod configuration file: https://docs.mongodb.com/manual/reference/configuration-options/' - nullable: true - type: object - featureCompatibilityVersion: - description: FeatureCompatibilityVersion configures the feature compatibility - version that will be set for the deployment - type: string - members: - description: Members is the number of members in the replica set - type: integer - replicaSetHorizons: - description: ReplicaSetHorizons Add this parameter and values if you - need your database to be accessed outside of Kubernetes. This setting - allows you to provide different DNS settings within the Kubernetes - cluster and to the Kubernetes cluster. The Kubernetes Operator uses - split horizon DNS for replica set members. This feature allows communication - both within the Kubernetes cluster and from outside Kubernetes. - items: - additionalProperties: - type: string + versions: + - additionalPrinterColumns: + - description: Current state of the MongoDB deployment + jsonPath: .status.phase + name: Phase + type: string + - description: Version of MongoDB server + jsonPath: .status.version + name: Version + type: string + name: v1 + schema: + openAPIV3Schema: + description: MongoDBCommunity is the Schema for the mongodbs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MongoDBCommunitySpec defines the desired state of MongoDB + properties: + additionalConnectionStringConfig: + description: Additional options to be appended to the connection string. + These options apply to the entire resource and to each user. + nullable: true type: object - type: array - security: - description: Security configures security features, such as TLS, and - authentication settings for a deployment - properties: - authentication: - properties: - ignoreUnknownUsers: - nullable: true - type: boolean - modes: - description: Modes is an array specifying which authentication - methods should be enabled. - items: - enum: - - SCRAM + x-kubernetes-preserve-unknown-fields: true + additionalMongodConfig: + description: |- + AdditionalMongodConfig is additional configuration that can be passed to + each data-bearing mongod at runtime. Uses the same structure as the mongod + configuration file: https://www.mongodb.com/docs/manual/reference/configuration-options/ + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + agent: + description: AgentConfiguration sets options for the MongoDB automation + agent + properties: + auditLogRotate: + description: AuditLogRotate if enabled, will enable AuditLogRotate + for all processes. + properties: + includeAuditLogsWithMongoDBLogs: + description: |- + set to 'true' to have the Automation Agent rotate the audit files along + with mongodb log files + type: boolean + numTotal: + description: maximum number of log files to have total + type: integer + numUncompressed: + description: maximum number of log files to leave uncompressed + type: integer + percentOfDiskspace: + description: |- + Maximum percentage of the total disk space these log files should take up. + The string needs to be able to be converted to float64 type: string - type: array - required: - - modes - type: object - roles: - description: User-specified custom MongoDB roles that should be - configured in the deployment. - items: - description: CustomRole defines a custom MongoDB role. + sizeThresholdMB: + description: |- + Maximum size for an individual log file before rotation. + The string needs to be able to be converted to float64. + Fractional values of MB are supported. + type: string + timeThresholdHrs: + description: maximum hours for an individual log file before + rotation + type: integer + required: + - sizeThresholdMB + - timeThresholdHrs + type: object + logFile: + type: string + logLevel: + type: string + logRotate: + description: LogRotate if enabled, will enable LogRotate for all + processes. properties: - authenticationRestrictions: - description: The authentication restrictions the server enforces - on the role. - items: - description: AuthenticationRestriction specifies a list - of IP addresses and CIDR ranges users are allowed to connect - to or from. - properties: - clientSource: - items: - type: string - type: array - serverAddress: - items: - type: string - type: array - required: - - clientSource - - serverAddress - type: object - type: array - db: - description: The database of the role. + includeAuditLogsWithMongoDBLogs: + description: |- + set to 'true' to have the Automation Agent rotate the audit files along + with mongodb log files + type: boolean + numTotal: + description: maximum number of log files to have total + type: integer + numUncompressed: + description: maximum number of log files to leave uncompressed + type: integer + percentOfDiskspace: + description: |- + Maximum percentage of the total disk space these log files should take up. + The string needs to be able to be converted to float64 type: string - privileges: - description: The privileges to grant the role. - items: - description: Privilege defines the actions a role is allowed - to perform on a given resource. - properties: - actions: - items: - type: string - type: array - resource: - description: Resource specifies specifies the resources - upon which a privilege permits actions. See https://docs.mongodb.com/manual/reference/resource-document - for more. - properties: - anyResource: - type: boolean - cluster: - type: boolean - collection: - type: string - db: - type: string - type: object - required: - - actions - - resource - type: object - type: array - role: - description: The name of the role. + sizeThresholdMB: + description: |- + Maximum size for an individual log file before rotation. + The string needs to be able to be converted to float64. + Fractional values of MB are supported. type: string - roles: - description: An array of roles from which this role inherits - privileges. - items: - description: Role is the database role this user should - have + timeThresholdHrs: + description: maximum hours for an individual log file before + rotation + type: integer + required: + - sizeThresholdMB + - timeThresholdHrs + type: object + maxLogFileDurationHours: + type: integer + systemLog: + description: SystemLog configures system log of mongod + properties: + destination: + type: string + logAppend: + type: boolean + path: + type: string + required: + - destination + - logAppend + - path + type: object + type: object + arbiters: + description: |- + Arbiters is the number of arbiters to add to the Replica Set. + It is not recommended to have more than one arbiter per Replica Set. + More info: https://www.mongodb.com/docs/manual/tutorial/add-replica-set-arbiter/ + type: integer + automationConfig: + description: |- + AutomationConfigOverride is merged on top of the operator created automation config. Processes are merged + by name. Currently Only the process.disabled field is supported. + properties: + processes: + items: + description: OverrideProcess contains fields that we can override + on the AutomationConfig processes. + properties: + disabled: + type: boolean + logRotate: + description: CrdLogRotate is the crd definition of LogRotate + including fields in strings while the agent supports them + as float64 properties: - db: - description: DB is the database the role can act on + includeAuditLogsWithMongoDBLogs: + description: |- + set to 'true' to have the Automation Agent rotate the audit files along + with mongodb log files + type: boolean + numTotal: + description: maximum number of log files to have total + type: integer + numUncompressed: + description: maximum number of log files to leave uncompressed + type: integer + percentOfDiskspace: + description: |- + Maximum percentage of the total disk space these log files should take up. + The string needs to be able to be converted to float64 type: string - name: - description: Name is the name of the role + sizeThresholdMB: + description: |- + Maximum size for an individual log file before rotation. + The string needs to be able to be converted to float64. + Fractional values of MB are supported. type: string + timeThresholdHrs: + description: maximum hours for an individual log file + before rotation + type: integer required: - - db - - name + - sizeThresholdMB + - timeThresholdHrs type: object - type: array - required: - - db - - privileges - - role - type: object - type: array - tls: - description: TLS configuration for both client-server and server-server - communication - properties: - caConfigMapRef: - description: CaConfigMap is a reference to a ConfigMap containing - the certificate for the CA which signed the server certificates - The certificate is expected to be available under the key - "ca.crt" - properties: name: type: string required: + - disabled - name type: object - certificateKeySecretRef: - description: CertificateKeySecret is a reference to a Secret - containing a private key and certificate to use for TLS. The - key and cert are expected to be PEM encoded and available - at "tls.key" and "tls.crt". This is the same format used for - the standard "kubernetes.io/tls" Secret type, but no specific - type is required. - properties: - name: - type: string - required: - - name + type: array + replicaSet: + properties: + id: + description: |- + Id can be used together with additionalMongodConfig.replication.replSetName + to manage clusters where replSetName differs from the MongoDBCommunity resource name + type: string + settings: + description: |- + MapWrapper is a wrapper for a map to be used by other structs. + The CRD generator does not support map[string]interface{} + on the top level and hence we need to work around this with + a wrapping struct. + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: object + featureCompatibilityVersion: + description: |- + FeatureCompatibilityVersion configures the feature compatibility version that will + be set for the deployment + type: string + memberConfig: + description: MemberConfig + items: + properties: + priority: + type: string + tags: + additionalProperties: + type: string type: object - enabled: - type: boolean - optional: - description: Optional configures if TLS should be required or - optional for connections - type: boolean - required: - - enabled - type: object - type: object - statefulSet: - description: StatefulSetConfiguration holds the optional custom StatefulSet - that should be merged into the operator created one. - properties: - spec: + votes: + type: integer type: object - required: - - spec - type: object - type: - description: Type defines which type of MongoDB deployment the resource - should create - enum: - - ReplicaSet - type: string - users: - description: Users specifies the MongoDB users that should be configured - in your deployment - items: + type: array + members: + description: Members is the number of members in the replica set + type: integer + prometheus: + description: Prometheus configurations. properties: - db: - description: DB is the database the user is stored in. Defaults - to "admin" - type: string - name: - description: Name is the username of the user + metricsPath: + description: Indicates path to the metrics endpoint. + pattern: ^\/[a-z0-9]+$ type: string passwordSecretRef: - description: PasswordSecretRef is a reference to the secret containing - this user's password + description: Name of a Secret containing a HTTP Basic Auth Password. + properties: + key: + description: Key is the key in the secret storing this password. + Defaults to "password" + type: string + name: + description: Name is the name of the secret storing this user's + password + type: string + required: + - name + type: object + port: + description: Port where metrics endpoint will bind to. Defaults + to 9216. + type: integer + tlsSecretKeyRef: + description: |- + Name of a Secret (type kubernetes.io/tls) holding the certificates to use in the + Prometheus endpoint. properties: key: description: Key is the key in the secret storing this password. @@ -258,72 +303,375 @@ spec: required: - name type: object + username: + description: HTTP Basic Auth Username for metrics endpoint. + type: string + required: + - passwordSecretRef + - username + type: object + replicaSetHorizons: + description: |- + ReplicaSetHorizons Add this parameter and values if you need your database + to be accessed outside of Kubernetes. This setting allows you to + provide different DNS settings within the Kubernetes cluster and + to the Kubernetes cluster. The Kubernetes Operator uses split horizon + DNS for replica set members. This feature allows communication both + within the Kubernetes cluster and from outside Kubernetes. + items: + additionalProperties: + type: string + type: object + type: array + security: + description: Security configures security features, such as TLS, and + authentication settings for a deployment + properties: + authentication: + properties: + agentCertificateSecretRef: + description: |- + AgentCertificateSecret is a reference to a Secret containing the certificate and the key for the automation agent + The secret needs to have available: + - certificate under key: "tls.crt" + - private key under key: "tls.key" + If additionally, tls.pem is present, then it needs to be equal to the concatenation of tls.crt and tls.key + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + agentMode: + description: AgentMode contains the authentication mode used + by the automation agent. + enum: + - SCRAM + - SCRAM-SHA-256 + - SCRAM-SHA-1 + - X509 + type: string + ignoreUnknownUsers: + default: true + nullable: true + type: boolean + modes: + description: Modes is an array specifying which authentication + methods should be enabled. + items: + enum: + - SCRAM + - SCRAM-SHA-256 + - SCRAM-SHA-1 + - X509 + type: string + type: array + required: + - modes + type: object roles: - description: Roles is an array of roles assigned to this user + description: User-specified custom MongoDB roles that should be + configured in the deployment. items: - description: Role is the database role this user should have + description: CustomRole defines a custom MongoDB role. properties: + authenticationRestrictions: + description: The authentication restrictions the server + enforces on the role. + items: + description: |- + AuthenticationRestriction specifies a list of IP addresses and CIDR ranges users + are allowed to connect to or from. + properties: + clientSource: + items: + type: string + type: array + serverAddress: + items: + type: string + type: array + required: + - clientSource + - serverAddress + type: object + type: array db: - description: DB is the database the role can act on + description: The database of the role. type: string - name: - description: Name is the name of the role + privileges: + description: The privileges to grant the role. + items: + description: Privilege defines the actions a role is allowed + to perform on a given resource. + properties: + actions: + items: + type: string + type: array + resource: + description: |- + Resource specifies specifies the resources upon which a privilege permits actions. + See https://www.mongodb.com/docs/manual/reference/resource-document for more. + properties: + anyResource: + type: boolean + cluster: + type: boolean + collection: + type: string + db: + type: string + type: object + required: + - actions + - resource + type: object + type: array + role: + description: The name of the role. type: string + roles: + description: An array of roles from which this role inherits + privileges. + items: + description: Role is the database role this user should + have + properties: + db: + description: DB is the database the role can act on + type: string + name: + description: Name is the name of the role + type: string + required: + - db + - name + type: object + type: array required: - db - - name + - privileges + - role type: object type: array - scramCredentialsSecretName: - description: ScramCredentialsSecretName appended by string "scram-credentials" - is the name of the secret object created by the mongoDB operator - for storing SCRAM credentials - pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string + tls: + description: TLS configuration for both client-server and server-server + communication + properties: + caCertificateSecretRef: + description: |- + CaCertificateSecret is a reference to a Secret containing the certificate for the CA which signed the server certificates + The certificate is expected to be available under the key "ca.crt" + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + caConfigMapRef: + description: |- + CaConfigMap is a reference to a ConfigMap containing the certificate for the CA which signed the server certificates + The certificate is expected to be available under the key "ca.crt" + This field is ignored when CaCertificateSecretRef is configured + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + certificateKeySecretRef: + description: |- + CertificateKeySecret is a reference to a Secret containing a private key and certificate to use for TLS. + The key and cert are expected to be PEM encoded and available at "tls.key" and "tls.crt". + This is the same format used for the standard "kubernetes.io/tls" Secret type, but no specific type is required. + Alternatively, an entry tls.pem, containing the concatenation of cert and key, can be provided. + If all of tls.pem, tls.crt and tls.key are present, the tls.pem one needs to be equal to the concatenation of tls.crt and tls.key + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + TODO: Add other useful fields. apiVersion, kind, uid? + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. + type: string + type: object + x-kubernetes-map-type: atomic + enabled: + type: boolean + optional: + description: Optional configures if TLS should be required + or optional for connections + type: boolean + required: + - enabled + type: object + type: object + statefulSet: + description: |- + StatefulSetConfiguration holds the optional custom StatefulSet + that should be merged into the operator created one. + properties: + metadata: + description: StatefulSetMetadataWrapper is a wrapper around Labels + and Annotations + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + type: object + x-kubernetes-preserve-unknown-fields: true required: - - name - - passwordSecretRef - - roles - - scramCredentialsSecretName + - spec type: object - type: array - version: - description: Version defines which version of MongoDB will be used - type: string - required: - - security - - type - - users - - version - type: object - status: - description: MongoDBCommunityStatus defines the observed state of MongoDB - properties: - currentMongoDBMembers: - type: integer - currentStatefulSetReplicas: - type: integer - message: - type: string - mongoUri: - type: string - phase: - type: string - required: - - currentMongoDBMembers - - currentStatefulSetReplicas - - mongoUri - - phase - type: object - type: object - version: v1 - versions: - - name: v1 + type: + description: Type defines which type of MongoDB deployment the resource + should create + enum: + - ReplicaSet + type: string + users: + description: Users specifies the MongoDB users that should be configured + in your deployment + items: + properties: + additionalConnectionStringConfig: + description: |- + Additional options to be appended to the connection string. + These options apply only to this user and will override any existing options in the resource. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + connectionStringSecretName: + description: |- + ConnectionStringSecretName is the name of the secret object created by the operator which exposes the connection strings for the user. + If provided, this secret must be different for each user in a deployment. + type: string + connectionStringSecretNamespace: + description: ConnectionStringSecretNamespace is the namespace + of the secret object created by the operator which exposes + the connection strings for the user. + type: string + db: + default: admin + description: DB is the database the user is stored in. Defaults + to "admin" + type: string + name: + description: Name is the username of the user + type: string + passwordSecretRef: + description: PasswordSecretRef is a reference to the secret + containing this user's password + properties: + key: + description: Key is the key in the secret storing this password. + Defaults to "password" + type: string + name: + description: Name is the name of the secret storing this + user's password + type: string + required: + - name + type: object + roles: + description: Roles is an array of roles assigned to this user + items: + description: Role is the database role this user should have + properties: + db: + description: DB is the database the role can act on + type: string + name: + description: Name is the name of the role + type: string + required: + - db + - name + type: object + type: array + scramCredentialsSecretName: + description: |- + ScramCredentialsSecretName appended by string "scram-credentials" is the name of the secret object created by the mongoDB operator for storing SCRAM credentials + These secrets names must be different for each user in a deployment. + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - name + - roles + type: object + type: array + version: + description: Version defines which version of MongoDB will be used + type: string + required: + - security + - type + - users + type: object + status: + description: MongoDBCommunityStatus defines the observed state of MongoDB + properties: + currentMongoDBArbiters: + type: integer + currentMongoDBMembers: + type: integer + currentStatefulSetArbitersReplicas: + type: integer + currentStatefulSetReplicas: + type: integer + message: + type: string + mongoUri: + type: string + phase: + type: string + version: + type: string + required: + - currentMongoDBMembers + - currentStatefulSetReplicas + - mongoUri + - phase + type: object + type: object served: true storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] + subresources: + status: {} diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml index 651b34fd4..ec5c150a9 100644 --- a/config/crd/kustomizeconfig.yaml +++ b/config/crd/kustomizeconfig.yaml @@ -4,13 +4,13 @@ nameReference: version: v1 fieldSpecs: - kind: CustomResourceDefinition - version: v1beta1 + version: v1 group: apiextensions.k8s.io path: spec/conversion/webhook/clientConfig/service/name namespace: - kind: CustomResourceDefinition - version: v1beta1 + version: v1 group: apiextensions.k8s.io path: spec/conversion/webhook/clientConfig/service/namespace create: false diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index f7385fb48..bd972fd91 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,14 +1,6 @@ -# Adds namespace to all resources. -namespace: default - -# Value of this field is prepended to the -# names of all resources, e.g. a deployment named -# "wordpress" becomes "alices-wordpress". -# Note that it should also match with the prefix (text before '-') of the namespace -# field above. namePrefix: "" -bases: -- ../crd -- ../rbac -- ../manager +resources: + - ../crd + - ../rbac + - ../manager diff --git a/config/local_run/kustomization.yaml b/config/local_run/kustomization.yaml new file mode 100644 index 000000000..0a33b94d2 --- /dev/null +++ b/config/local_run/kustomization.yaml @@ -0,0 +1,6 @@ +# used to run the operator locally +namePrefix: "" + +resources: + - ../crd + - ../rbac diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index a952985e6..0705e7eae 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -1,43 +1,74 @@ ---- apiVersion: apps/v1 kind: Deployment metadata: + annotations: + email: support@mongodb.com + labels: + owner: mongodb name: mongodb-kubernetes-operator spec: replicas: 1 selector: matchLabels: name: mongodb-kubernetes-operator + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate template: metadata: labels: name: mongodb-kubernetes-operator spec: - serviceAccountName: mongodb-kubernetes-operator + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: name + operator: In + values: + - mongodb-kubernetes-operator + topologyKey: kubernetes.io/hostname containers: - - name: mongodb-kubernetes-operator - image: quay.io/mongodb/mongodb-kubernetes-operator:0.6.0 - command: - - /usr/local/bin/entrypoint - imagePullPolicy: Always - env: - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: "mongodb-kubernetes-operator" - - name: AGENT_IMAGE # The MongoDB Agent the operator will deploy to manage MongoDB deployments - value: quay.io/mongodb/mongodb-agent:10.29.0.6830-1 - - name: VERSION_UPGRADE_HOOK_IMAGE - value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.2 - - name: READINESS_PROBE_IMAGE - value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.3 - - name: MONGODB_IMAGE - value: "library/mongo" - - name: MONGODB_REPO_URL - value: "registry.hub.docker.com" + - command: + - /usr/local/bin/entrypoint + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: mongodb-kubernetes-operator + - name: AGENT_IMAGE + value: quay.io/mongodb/mongodb-agent-ubi:108.0.6.8796-1 + - name: VERSION_UPGRADE_HOOK_IMAGE + value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.10 + - name: READINESS_PROBE_IMAGE + value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.23 + - name: MONGODB_IMAGE + value: mongodb-community-server + - name: MONGODB_REPO_URL + value: quay.io/mongodb + image: quay.io/mongodb/mongodb-kubernetes-operator:0.13.0 + imagePullPolicy: Always + name: mongodb-kubernetes-operator + resources: + limits: + cpu: 1100m + memory: 1Gi + requests: + cpu: 500m + memory: 200Mi + securityContext: + readOnlyRootFilesystem: true + runAsUser: 2000 + allowPrivilegeEscalation: false + securityContext: + seccompProfile: + type: RuntimeDefault + serviceAccountName: mongodb-kubernetes-operator diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index c110cb8e3..f1fe88a33 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -2,3 +2,6 @@ resources: - role.yaml - role_binding.yaml - service_account.yaml +- service_account_database.yaml +- role_binding_database.yaml +- role_database.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 4cb3c74c1..6a9c42070 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -2,7 +2,6 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - creationTimestamp: null name: mongodb-kubernetes-operator rules: - apiGroups: @@ -10,10 +9,6 @@ rules: resources: - pods - services - - services/finalizers - - endpoints - - persistentvolumeclaims - - events - configmaps - secrets verbs: @@ -27,9 +22,6 @@ rules: - apiGroups: - apps resources: - - deployments - - daemonsets - - replicasets - statefulsets verbs: - create @@ -39,34 +31,6 @@ rules: - patch - update - watch -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - get - - create -- apiGroups: - - apps - resourceNames: - - mongodb-kubernetes-operator - resources: - - deployments/finalizers - verbs: - - update -- apiGroups: - - "" - resources: - - pods - verbs: - - get -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - get - apiGroups: - mongodbcommunity.mongodb.com resources: @@ -75,10 +39,8 @@ rules: - mongodbcommunity/spec - mongodbcommunity/finalizers verbs: - - create - - delete - get - - list - patch + - list - update - watch diff --git a/config/rbac/role_binding_database.yaml b/config/rbac/role_binding_database.yaml new file mode 100644 index 000000000..b02a52db3 --- /dev/null +++ b/config/rbac/role_binding_database.yaml @@ -0,0 +1,11 @@ +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-database +subjects: +- kind: ServiceAccount + name: mongodb-database +roleRef: + kind: Role + name: mongodb-database + apiGroup: rbac.authorization.k8s.io diff --git a/config/rbac/role_database.yaml b/config/rbac/role_database.yaml new file mode 100644 index 000000000..eaeef740b --- /dev/null +++ b/config/rbac/role_database.yaml @@ -0,0 +1,19 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-database +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - apiGroups: + - "" + resources: + - pods + verbs: + - patch + - delete + - get diff --git a/config/rbac/service_account_database.yaml b/config/rbac/service_account_database.yaml new file mode 100644 index 000000000..b24ae9d58 --- /dev/null +++ b/config/rbac/service_account_database.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: mongodb-database diff --git a/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_custom_volume_cr.yaml b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_custom_volume_cr.yaml index 900691a7c..89e8dbf7a 100644 --- a/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_custom_volume_cr.yaml +++ b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_custom_volume_cr.yaml @@ -5,7 +5,7 @@ metadata: spec: members: 3 type: ReplicaSet - version: "4.2.6" + version: "6.0.5" security: authentication: modes: ["SCRAM"] @@ -23,9 +23,21 @@ spec: statefulSet: spec: + # Name for the service object created by the operator serviceName: example-openshift-mongodb-svc selector: {} + # Specifies a size for the data volume different from the default 10Gi + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: [ "ReadWriteOnce", "ReadWriteMany" ] + resources: + requests: + storage: 50Gi + template: + # Adds a custom volume to the pods spec: volumes: - name: custom-volume diff --git a/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_hostpath.yaml b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_hostpath.yaml new file mode 100644 index 000000000..45b811f10 --- /dev/null +++ b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_hostpath.yaml @@ -0,0 +1,245 @@ +# This example deploys a 3 members ReplicaSet with HostPath volumes +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: mdb0 +spec: + members: 3 + security: + authentication: + modes: + - SCRAM + statefulSet: + spec: + template: + spec: + # Hostpath volumes are owned by root + # but MongoDB containers run as non root + # so we use an init container to change the owner of + # the directory (init containers run as root) + initContainers: + - command: + - chown + - -R + - "2000" + - /data + image: busybox + volumeMounts: + - mountPath: /data + name: data-volume + securityContext: + runAsNonRoot: false + runAsUser: 0 + runAsGroup: 0 + name: change-dir-permissions + volumeClaimTemplates: + - metadata: + name: data-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8G + selector: + matchLabels: + # We set this labels when creating the volume + # (see below) + type: data + storageClassName: default + - metadata: + name: logs-volume + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 8G + selector: + matchLabels: + type: logs + storageClassName: default + type: ReplicaSet + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + version: 6.0.5 +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: +--- +# Here we create 6 PVs: two for each ReplicaSet member +# (one for data, one for logs) +apiVersion: v1 +items: +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: data + name: data-volume-0 + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-data-0 + type: "" + nodeAffinity: + required: + # This is just an example for matchexpression + # This field is required depends on the specific + # of the environment the resource is deployed in + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: data + name: data-volume-1 + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-data-1 + type: "" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem + status: + phase: Available +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: data + name: data-volume-2 + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-data-2 + type: "" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: logs + name: logs-volume-0 + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-logs-0 + type: "" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: logs + name: logs-volume-1 + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-logs-1 + type: "" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem +- apiVersion: v1 + kind: PersistentVolume + metadata: + labels: + type: logs + name: logs-volume-2 + + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 8G + hostPath: + path: /opt/data/mongo-logs-2 + type: "" + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + persistentVolumeReclaimPolicy: Retain + storageClassName: default + volumeMode: Filesystem +kind: List +--- diff --git a/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_metadata.yaml b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_metadata.yaml new file mode 100644 index 000000000..91227aa24 --- /dev/null +++ b/config/samples/arbitrary_statefulset_configuration/mongodb.com_v1_metadata.yaml @@ -0,0 +1,59 @@ +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: mdb0 +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: [ "SCRAM" ] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + + statefulSet: + metadata: + annotations: + statefulSetAnnotationTest: testValue + labels: + statefulSetLabelTest: testValue + spec: + selector: + matchLabels: + podTemplateLabelTest: testValue + + template: + metadata: + annotations: + podTemplateAnnotationTest: testValue + labels: + podTemplateLabelTest: testValue + + volumeClaimTemplates: + - metadata: + name: data-volume + annotations: + pvcTemplateAnnotationTest: testValue + labels: + pvcTemplateLabelTest: testValue + +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/external_access/agent-certificate.yaml b/config/samples/external_access/agent-certificate.yaml new file mode 100644 index 000000000..c47c82a55 --- /dev/null +++ b/config/samples/external_access/agent-certificate.yaml @@ -0,0 +1,28 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: agent-certs +spec: + commonName: mms-automation-agent + dnsNames: + - automation + duration: 240h0m0s + issuerRef: # should point to your issuer + name: ca-issuer + renewBefore: 120h0m0s + secretName: agent-certs # should be equal to agentCertificateSecretRef from the MDBC resource + subject: + countries: + - US + localities: + - NY + organizationalUnits: + - a-1635241837-m5yb81lfnrz + organizations: + - cluster.local-agent + provinces: + - NY + usages: + - digital signature + - key encipherment + - client auth \ No newline at end of file diff --git a/config/samples/external_access/cert-manager-certificate.yaml b/config/samples/external_access/cert-manager-certificate.yaml new file mode 100644 index 000000000..6551bcda3 --- /dev/null +++ b/config/samples/external_access/cert-manager-certificate.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Certificate +metadata: + name: cert-manager-certificate +spec: + secretName: mongodb-tls + issuerRef: + name: ca-issuer + kind: Issuer + commonName: "*.-svc..svc.cluster.local" + dnsNames: + - "*.-svc..svc.cluster.local" + - + - + - diff --git a/config/samples/external_access/cert-manager-issuer.yaml b/config/samples/external_access/cert-manager-issuer.yaml new file mode 100644 index 000000000..578c343b0 --- /dev/null +++ b/config/samples/external_access/cert-manager-issuer.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + name: ca-issuer +spec: + ca: + secretName: ca-key-pair diff --git a/config/samples/external_access/cert-x509.yaml b/config/samples/external_access/cert-x509.yaml new file mode 100644 index 000000000..0f2eb0906 --- /dev/null +++ b/config/samples/external_access/cert-x509.yaml @@ -0,0 +1,20 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: x509-user-cert + spec: + commonName: my-x509-authenticated-user + duration: 240h0m0s + issuerRef: + name: ca-issuer + renewBefore: 120h0m0s + secretName: x509-client-cert + subject: + organizationalUnits: + - organizationalunit + organizations: + - organization + usages: + - digital signature + - client auth + \ No newline at end of file diff --git a/config/samples/external_access/external_services.yaml b/config/samples/external_access/external_services.yaml new file mode 100644 index 000000000..b14f1a673 --- /dev/null +++ b/config/samples/external_access/external_services.yaml @@ -0,0 +1,53 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: external-mongo-service-0 + annotations: + kube-linter.io/ignore-all: "used for sample" +spec: + type: NodePort + selector: + app: -svc + statefulset.kubernetes.io/pod-name: -0 + ports: + - protocol: TCP + nodePort: 31181 + port: 31181 + targetPort: 27017 + + +--- +kind: Service +apiVersion: v1 +metadata: + name: external-mongo-service-1 + annotations: + kube-linter.io/ignore-all: "used for sample" +spec: + type: NodePort + selector: + app: -svc + statefulset.kubernetes.io/pod-name: -1 + ports: + - nodePort: 31182 + port: 31182 + targetPort: 27017 + + +--- +kind: Service +apiVersion: v1 +metadata: + name: external-mongo-service-2 + annotations: + kube-linter.io/ignore-all: "used for sample" +spec: + type: NodePort + selector: + app: -svc + statefulset.kubernetes.io/pod-name: -2 + ports: + - nodePort: 31183 + port: 31183 + targetPort: 27017 diff --git a/config/samples/external_access/mongodb.com_v1_mongodbcommunity_cr.yaml b/config/samples/external_access/mongodb.com_v1_mongodbcommunity_cr.yaml new file mode 100644 index 000000000..4ca651ee4 --- /dev/null +++ b/config/samples/external_access/mongodb.com_v1_mongodbcommunity_cr.yaml @@ -0,0 +1,46 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + replicaSetHorizons: + - horizon: :31181 + - horizon: :31182 + - horizon: :31183 + security: + tls: + enabled: true + certificateKeySecretRef: + name: mongodb-tls + caConfigMapRef: + name: ca-config-map + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: + diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_additional_connection_string_options.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_additional_connection_string_options.yaml new file mode 100644 index 000000000..9023cdcab --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_additional_connection_string_options.yaml @@ -0,0 +1,40 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalConnectionStringConfig: + readPreference: secondary + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + additionalConnectionStringConfig: + readPreference: primary + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_additional_mongod_config_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_additional_mongod_config_cr.yaml new file mode 100644 index 000000000..c6b21f546 --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_additional_mongod_config_cr.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + # the additional config passed to the mongod process can be specified + # either in nested or dot notation + storage.wiredTiger.engineConfig.journalCompressor: zlib + net: + port: 40333 + + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_connection_string_secret_namespace.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_connection_string_secret_namespace.yaml new file mode 100644 index 000000000..47e55aaae --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_connection_string_secret_namespace.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + connectionStringSecretNamespace: other-namespace + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_cr.yaml index 37b28813e..89fe86096 100644 --- a/config/samples/mongodb.com_v1_mongodbcommunity_cr.yaml +++ b/config/samples/mongodb.com_v1_mongodbcommunity_cr.yaml @@ -6,7 +6,7 @@ metadata: spec: members: 3 type: ReplicaSet - version: "4.2.6" + version: "6.0.5" security: authentication: modes: ["SCRAM"] diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_cr_podantiaffinity.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_cr_podantiaffinity.yaml new file mode 100644 index 000000000..8d7a274a4 --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_cr_podantiaffinity.yaml @@ -0,0 +1,60 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + statefulSet: +# NOTE: Overwriting the "app" labelSelectors via the sts wrapper is not supported since this labelselector is not +# getting propagated to the service. You can add others like defined below + spec: + selector: + matchLabels: + app.kubernetes.io/name: mongodb + template: + metadata: + # label the pod which is used by the "labelSelector" in podAntiAffinty + # you can label it witch some other labels as well -- make sure it change the podAntiAffinity labelselector accordingly + labels: + app.kubernetes.io/name: mongodb + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/name + operator: In + values: + - mongodb + topologyKey: kubernetes.io/hostname + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_custom_role.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_custom_role.yaml index dd740f2cc..4d89bfd9a 100644 --- a/config/samples/mongodb.com_v1_mongodbcommunity_custom_role.yaml +++ b/config/samples/mongodb.com_v1_mongodbcommunity_custom_role.yaml @@ -6,7 +6,7 @@ metadata: spec: members: 3 type: ReplicaSet - version: "4.2.6" + version: "6.0.5" security: authentication: modes: ["SCRAM"] diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_disabled_process_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_disabled_process_cr.yaml new file mode 100644 index 000000000..bb8788c36 --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_disabled_process_cr.yaml @@ -0,0 +1,38 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + automationConfig: + processes: + - name: example-mongodb-1 + disabled: true + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_ignore_unkown_users_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_ignore_unkown_users_cr.yaml index 2109bcb61..b131f95df 100644 --- a/config/samples/mongodb.com_v1_mongodbcommunity_ignore_unkown_users_cr.yaml +++ b/config/samples/mongodb.com_v1_mongodbcommunity_ignore_unkown_users_cr.yaml @@ -6,7 +6,7 @@ metadata: spec: members: 3 type: ReplicaSet - version: "4.4.0" + version: "6.0.5" security: authentication: ignoreUnknownUsers: true # users can be added to the deployment through other sources. (not through the CRD) and will not be removed by the agent. diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_openshift_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_openshift_cr.yaml index 74370ae36..3310c67fc 100644 --- a/config/samples/mongodb.com_v1_mongodbcommunity_openshift_cr.yaml +++ b/config/samples/mongodb.com_v1_mongodbcommunity_openshift_cr.yaml @@ -6,7 +6,7 @@ metadata: spec: members: 3 type: ReplicaSet - version: "4.2.6" + version: "6.0.5" security: authentication: modes: ["SCRAM"] diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_override_ac_setting.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_override_ac_setting.yaml new file mode 100644 index 000000000..0a8a1566a --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_override_ac_setting.yaml @@ -0,0 +1,42 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + # to override ReplicaSet Configuration settings: + # https://www.mongodb.com/docs/manual/reference/replica-configuration/#replica-set-configuration-document-example + automationConfig: + replicaSet: + settings: + electionTimeoutMillis: 20 + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + storage.wiredTiger.engineConfig.journalCompressor: zlib + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_prometheus.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_prometheus.yaml new file mode 100644 index 000000000..d813ce0cf --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_prometheus.yaml @@ -0,0 +1,66 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-prometheus +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + + # You can expose metrics for Prometheus polling using the + # `prometheus` entry. + prometheus: + # Metrics endpoint HTTP Basic Auth username + username: + + # Metrics endpoint HTTP Basic Auth password + passwordSecretRef: + name: metrics-endpoint-password + + # Optional, defaults to `/metrics` + # metricsPath: /metrics + + # Optional defaults to 9216 + # port: 9216 + + # Prometheus endpoint can be configured to use HTTPS + # tlsSecretKeyRef: + # name: "" + + security: + authentication: + modes: ["SCRAM"] + + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: + +# Secret holding the prometheus metrics endpoint HTTP Password. +--- +apiVersion: v1 +kind: Secret +metadata: + name: metrics-endpoint-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_readiness_probe_values.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_readiness_probe_values.yaml new file mode 100644 index 000000000..e07a434e1 --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_readiness_probe_values.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: mongodb-specify-readiness-probe-values +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + statefulSet: + spec: + template: + spec: + containers: + - name: mongodb-agent + readinessProbe: + failureThreshold: 50 + initialDelaySeconds: 10 + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml index 0b4746fe9..84f8e66af 100644 --- a/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml +++ b/config/samples/mongodb.com_v1_mongodbcommunity_specify_pod_resources.yaml @@ -6,7 +6,7 @@ metadata: spec: members: 3 type: ReplicaSet - version: "4.4.0" + version: "6.0.5" security: authentication: modes: ["SCRAM"] @@ -44,6 +44,15 @@ spec: requests: cpu: "0.2" memory: 200M + initContainers: + - name: mongodb-agent-readinessprobe + resources: + limits: + cpu: "2" + memory: 200M + requests: + cpu: "1" + memory: 100M # the user credentials will be generated from this secret # once the credentials are generated, this secret is no longer required --- diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_tls_cr.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_tls_cr.yaml index 6b3ce3640..d4fbf7bc9 100644 --- a/config/samples/mongodb.com_v1_mongodbcommunity_tls_cr.yaml +++ b/config/samples/mongodb.com_v1_mongodbcommunity_tls_cr.yaml @@ -6,7 +6,7 @@ metadata: spec: members: 3 type: ReplicaSet - version: "4.2.7" + version: "6.0.5" security: authentication: modes: ["SCRAM"] diff --git a/config/samples/mongodb.com_v1_mongodbcommunity_x509.yaml b/config/samples/mongodb.com_v1_mongodbcommunity_x509.yaml new file mode 100644 index 000000000..ad4a99c2a --- /dev/null +++ b/config/samples/mongodb.com_v1_mongodbcommunity_x509.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb +spec: + members: 3 + type: ReplicaSet + version: "6.0.5" + security: + tls: + enabled: true + certificateKeySecretRef: + name: mongodb-tls + caConfigMapRef: + name: ca-issuer + authentication: + modes: ["X509", "SCRAM"] + agentMode: "X509" + agentCertificateSecretRef: + name: my-agent-certificate + users: + - name: my-user + db: admin + passwordSecretRef: # a reference to the secret that will be used to generate the user's password + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + - db: admin + name: readWriteAnyDatabase + scramCredentialsSecretName: my-scram + - name: "CN=my-x509-authenticated-user,OU=organizationalunit,O=organization" + db: "$external" + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + - db: admin + name: readWriteAnyDatabase + + +# the user credentials will be generated from this secret +# once the credentials are generated, this secret is no longer required +--- +apiVersion: v1 +kind: Secret +metadata: + name: my-user-password +type: Opaque +stringData: + password: password + diff --git a/controllers/construct/build_statefulset_test.go b/controllers/construct/build_statefulset_test.go index 72f438bd3..791fa5a8b 100644 --- a/controllers/construct/build_statefulset_test.go +++ b/controllers/construct/build_statefulset_test.go @@ -1,10 +1,14 @@ package construct import ( - "os" "reflect" "testing" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" + corev1 "k8s.io/api/core/v1" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/probes" @@ -16,10 +20,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func init() { - os.Setenv(VersionUpgradeHookImageEnv, "version-upgrade-hook-image") -} - func newTestReplicaSet() mdbv1.MongoDBCommunity { return mdbv1.MongoDBCommunity{ ObjectMeta: metav1.ObjectMeta{ @@ -29,18 +29,14 @@ func newTestReplicaSet() mdbv1.MongoDBCommunity { }, Spec: mdbv1.MongoDBCommunitySpec{ Members: 3, - Version: "4.2.2", + Version: "6.0.5", }, } } func TestMultipleCalls_DoNotCauseSideEffects(t *testing.T) { - _ = os.Setenv(MongodbRepoUrl, "repo") - _ = os.Setenv(MongodbImageEnv, "mongo") - _ = os.Setenv(AgentImageEnv, "agent-image") - mdb := newTestReplicaSet() - stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, mdb) + stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, "fake-mongodbImage", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage", true) sts := &appsv1.StatefulSet{} t.Run("1st Call", func(t *testing.T) { @@ -57,24 +53,83 @@ func TestMultipleCalls_DoNotCauseSideEffects(t *testing.T) { }) } +func TestManagedSecurityContext(t *testing.T) { + t.Setenv(podtemplatespec.ManagedSecurityContextEnv, "true") + + mdb := newTestReplicaSet() + stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, "fake-mongodbImage", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage", true) + + sts := &appsv1.StatefulSet{} + stsFunc(sts) + + assertStatefulSetIsBuiltCorrectly(t, mdb, sts) +} + +func TestMongod_Container(t *testing.T) { + const mongodbImageMock = "fake-mongodbImage" + c := container.New(mongodbContainer(mongodbImageMock, []corev1.VolumeMount{}, mdbv1.NewMongodConfiguration())) + + t.Run("Has correct Env vars", func(t *testing.T) { + assert.Len(t, c.Env, 1) + assert.Equal(t, agentHealthStatusFilePathEnv, c.Env[0].Name) + assert.Equal(t, "/healthstatus/agent-health-status.json", c.Env[0].Value) + }) + + t.Run("Image is correct", func(t *testing.T) { + assert.Equal(t, mongodbImageMock, c.Image) + }) + + t.Run("Resource requirements are correct", func(t *testing.T) { + assert.Equal(t, resourcerequirements.Defaults(), c.Resources) + }) +} + +func TestMongoDBAgentCommand(t *testing.T) { + cmd := AutomationAgentCommand(false, mdbv1.LogLevelInfo, "testfile", 24) + baseCmd := MongodbUserCommand + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + assert.Len(t, cmd, 3) + assert.Equal(t, cmd[0], "/bin/bash") + assert.Equal(t, cmd[1], "-c") + assert.Equal(t, cmd[2], baseCmd+" -logFile testfile -logLevel INFO -maxLogFileDurationHrs 24") + + cmd = AutomationAgentCommand(false, mdbv1.LogLevelInfo, "/dev/stdout", 24) + assert.Len(t, cmd, 3) + assert.Equal(t, cmd[0], "/bin/bash") + assert.Equal(t, cmd[1], "-c") + assert.Equal(t, cmd[2], baseCmd+" -logLevel INFO") +} + func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDBCommunity, sts *appsv1.StatefulSet) { assert.Len(t, sts.Spec.Template.Spec.Containers, 2) assert.Len(t, sts.Spec.Template.Spec.InitContainers, 2) assert.Equal(t, mdb.ServiceName(), sts.Spec.ServiceName) assert.Equal(t, mdb.Name, sts.Name) assert.Equal(t, mdb.Namespace, sts.Namespace) - assert.Equal(t, operatorServiceAccountName, sts.Spec.Template.Spec.ServiceAccountName) + assert.Equal(t, mongodbDatabaseServiceAccountName, sts.Spec.Template.Spec.ServiceAccountName) assert.Len(t, sts.Spec.Template.Spec.Containers[0].Env, 4) assert.Len(t, sts.Spec.Template.Spec.Containers[1].Env, 1) + managedSecurityContext := envvar.ReadBool(podtemplatespec.ManagedSecurityContextEnv) // nolint:forbidigo + if !managedSecurityContext { + assert.NotNil(t, sts.Spec.Template.Spec.SecurityContext) + assert.Equal(t, podtemplatespec.DefaultPodSecurityContext(), *sts.Spec.Template.Spec.SecurityContext) + } else { + assert.Nil(t, sts.Spec.Template.Spec.SecurityContext) + } + agentContainer := sts.Spec.Template.Spec.Containers[0] - assert.Equal(t, "agent-image", agentContainer.Image) + assert.Equal(t, "fake-agentImage", agentContainer.Image) probe := agentContainer.ReadinessProbe assert.True(t, reflect.DeepEqual(probes.New(DefaultReadiness()), *probe)) assert.Equal(t, probes.New(DefaultReadiness()).FailureThreshold, probe.FailureThreshold) - assert.Equal(t, int32(5), probe.InitialDelaySeconds) - assert.Len(t, agentContainer.VolumeMounts, 6) + assert.Len(t, agentContainer.VolumeMounts, 7) assert.NotNil(t, agentContainer.ReadinessProbe) + if !managedSecurityContext { + assert.NotNil(t, sts.Spec.Template.Spec.Containers[0].SecurityContext) + assert.Equal(t, container.DefaultSecurityContext(), *sts.Spec.Template.Spec.Containers[0].SecurityContext) + } else { + assert.Nil(t, agentContainer.SecurityContext) + } assertContainsVolumeMountWithName(t, agentContainer.VolumeMounts, "agent-scripts") assertContainsVolumeMountWithName(t, agentContainer.VolumeMounts, "automation-config") @@ -84,8 +139,14 @@ func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDBCommunity, assertContainsVolumeMountWithName(t, agentContainer.VolumeMounts, "my-rs-keyfile") mongodContainer := sts.Spec.Template.Spec.Containers[1] - assert.Equal(t, "repo/mongo:4.2.2", mongodContainer.Image) - assert.Len(t, mongodContainer.VolumeMounts, 5) + assert.Equal(t, "fake-mongodbImage", mongodContainer.Image) + assert.Len(t, mongodContainer.VolumeMounts, 6) + if !managedSecurityContext { + assert.NotNil(t, sts.Spec.Template.Spec.Containers[1].SecurityContext) + assert.Equal(t, container.DefaultSecurityContext(), *sts.Spec.Template.Spec.Containers[1].SecurityContext) + } else { + assert.Nil(t, agentContainer.SecurityContext) + } assertContainsVolumeMountWithName(t, mongodContainer.VolumeMounts, "data-volume") assertContainsVolumeMountWithName(t, mongodContainer.VolumeMounts, "healthstatus") @@ -95,8 +156,14 @@ func assertStatefulSetIsBuiltCorrectly(t *testing.T, mdb mdbv1.MongoDBCommunity, initContainer := sts.Spec.Template.Spec.InitContainers[0] assert.Equal(t, versionUpgradeHookName, initContainer.Name) - assert.Equal(t, "version-upgrade-hook-image", initContainer.Image) + assert.Equal(t, "fake-versionUpgradeHookImage", initContainer.Image) assert.Len(t, initContainer.VolumeMounts, 1) + if !managedSecurityContext { + assert.NotNil(t, sts.Spec.Template.Spec.InitContainers[0].SecurityContext) + assert.Equal(t, container.DefaultSecurityContext(), *sts.Spec.Template.Spec.InitContainers[0].SecurityContext) + } else { + assert.Nil(t, agentContainer.SecurityContext) + } } func assertContainsVolumeMountWithName(t *testing.T, mounts []corev1.VolumeMount, name string) { diff --git a/controllers/construct/mongodbstatefulset.go b/controllers/construct/mongodbstatefulset.go index 38f00c106..ec94a6eac 100644 --- a/controllers/construct/mongodbstatefulset.go +++ b/controllers/construct/mongodbstatefulset.go @@ -3,7 +3,9 @@ package construct import ( "fmt" "os" - "strings" + "strconv" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" @@ -12,41 +14,51 @@ import ( "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/probes" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/resourcerequirements" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/envvar" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/types" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" corev1 "k8s.io/api/core/v1" ) +var ( + OfficialMongodbRepoUrls = []string{"docker.io/mongodb", "quay.io/mongodb"} +) + +// Environment variables used to configure the MongoDB StatefulSet. +const ( + MongodbRepoUrlEnv = "MONGODB_REPO_URL" + MongodbImageEnv = "MONGODB_IMAGE" + MongoDBImageTypeEnv = "MDB_IMAGE_TYPE" + AgentImageEnv = "AGENT_IMAGE" + VersionUpgradeHookImageEnv = "VERSION_UPGRADE_HOOK_IMAGE" + ReadinessProbeImageEnv = "READINESS_PROBE_IMAGE" +) + const ( AgentName = "mongodb-agent" MongodbName = "mongod" - versionUpgradeHookName = "mongod-posthook" - ReadinessProbeContainerName = "mongodb-agent-readinessprobe" - dataVolumeName = "data-volume" - logVolumeName = "logs-volume" - readinessProbePath = "/opt/scripts/readinessprobe" - agentHealthStatusFilePathEnv = "AGENT_STATUS_FILEPATH" - clusterFilePath = "/var/lib/automation/config/cluster-config.json" - operatorServiceAccountName = "mongodb-kubernetes-operator" - agentHealthStatusFilePathValue = "/var/log/mongodb-mms-automation/healthstatus/agent-health-status.json" + DefaultImageType = "ubi8" - MongodbRepoUrl = "MONGODB_REPO_URL" + versionUpgradeHookName = "mongod-posthook" + ReadinessProbeContainerName = "mongodb-agent-readinessprobe" + readinessProbePath = "/opt/scripts/readinessprobe" + agentHealthStatusFilePathEnv = "AGENT_STATUS_FILEPATH" + clusterFilePath = "/var/lib/automation/config/cluster-config.json" + mongodbDatabaseServiceAccountName = "mongodb-database" + agentHealthStatusFilePathValue = "/var/log/mongodb-mms-automation/healthstatus/agent-health-status.json" + + OfficialMongodbEnterpriseServerImageName = "mongodb-enterprise-server" headlessAgentEnv = "HEADLESS_AGENT" podNamespaceEnv = "POD_NAMESPACE" automationConfigEnv = "AUTOMATION_CONFIG_MAP" - AgentImageEnv = "AGENT_IMAGE" - MongodbImageEnv = "MONGODB_IMAGE" - VersionUpgradeHookImageEnv = "VERSION_UPGRADE_HOOK_IMAGE" - ReadinessProbeImageEnv = "READINESS_PROBE_IMAGE" - ManagedSecurityContextEnv = "MANAGED_SECURITY_CONTEXT" + MongoDBAssumeEnterpriseEnv = "MDB_ASSUME_ENTERPRISE" - automationconfFilePath = "/data/automation-mongod.conf" - keyfileFilePath = "/var/lib/mongodb-mms-automation/authentication/keyfile" + automationMongodConfFileName = "automation-mongod.conf" + keyfileFilePath = "/var/lib/mongodb-mms-automation/authentication/keyfile" automationAgentOptions = " -skipMongoStart -noDaemonize -useLocalMongoDbTools" @@ -59,6 +71,18 @@ export NSS_WRAPPER_PASSWD=/tmp/passwd export LD_PRELOAD=libnss_wrapper.so export NSS_WRAPPER_GROUP=/etc/group fi +` + //nolint:gosec //The credentials path is hardcoded in the container. + MongodbUserCommandWithAPIKeyExport = `current_uid=$(id -u) +AGENT_API_KEY="$(cat /mongodb-automation/agent-api-key/agentApiKey)" +declare -r current_uid +if ! grep -q "${current_uid}" /etc/passwd ; then +sed -e "s/^mongodb:/builder:/" /etc/passwd > /tmp/passwd +echo "mongodb:x:$(id -u):$(id -g):,,,:/:/bin/bash" >> /tmp/passwd +export NSS_WRAPPER_PASSWD=/tmp/passwd +export LD_PRELOAD=libnss_wrapper.so +export NSS_WRAPPER_GROUP=/etc/group +fi ` ) @@ -70,7 +94,7 @@ type MongoDBStatefulSetOwner interface { GetName() string // GetNamespace returns the namespace the resource is defined in. GetNamespace() string - // GetMongoDBVersion returns the version of MongoDB to be used for this resource + // GetMongoDBVersion returns the version of MongoDB to be used for this resource. GetMongoDBVersion() string // AutomationConfigSecretName returns the name of the secret which will contain the automation config. AutomationConfigSecretName() string @@ -78,14 +102,30 @@ type MongoDBStatefulSetOwner interface { GetUpdateStrategyType() appsv1.StatefulSetUpdateStrategyType // HasSeparateDataAndLogsVolumes returns whether or not the volumes for data and logs would need to be different. HasSeparateDataAndLogsVolumes() bool - // GetAgentScramKeyfileSecretNamespacedName returns the NamespacedName of the secret which stores the keyfile for the agent. + // GetAgentKeyfileSecretNamespacedName returns the NamespacedName of the secret which stores the keyfile for the agent. GetAgentKeyfileSecretNamespacedName() types.NamespacedName + // DataVolumeName returns the name that the data volume should have. + DataVolumeName() string + // LogsVolumeName returns the name that the data volume should have. + LogsVolumeName() string + // GetAgentLogLevel returns the log level for the MongoDB automation agent. + GetAgentLogLevel() mdbv1.LogLevel + // GetAgentLogFile returns the log file for the MongoDB automation agent. + GetAgentLogFile() string + // GetAgentMaxLogFileDurationHours returns the number of hours after which the log file should be rolled. + GetAgentMaxLogFileDurationHours() int + + // GetMongodConfiguration returns the MongoDB configuration for each member. + GetMongodConfiguration() mdbv1.MongodConfiguration + + // NeedsAutomationConfigVolume returns whether the statefulset needs to have a volume for the automationconfig. + NeedsAutomationConfigVolume() bool } // BuildMongoDBReplicaSetStatefulSetModificationFunction builds the parts of the replica set that are common between every resource that implements // MongoDBStatefulSetOwner. // It doesn't configure TLS or additional containers/env vars that the statefulset might need. -func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSetOwner, scaler scale.ReplicaSetScaler) statefulset.Modification { +func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSetOwner, scaler scale.ReplicaSetScaler, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage string, withInitContainers bool) statefulset.Modification { labels := map[string]string{ "app": mdb.ServiceName(), } @@ -97,48 +137,89 @@ func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSe agentHealthStatusVolumeMount := statefulset.CreateVolumeMount(healthStatusVolume.Name, "/var/log/mongodb-mms-automation/healthstatus") mongodHealthStatusVolumeMount := statefulset.CreateVolumeMount(healthStatusVolume.Name, "/healthstatus") - // hooks volume is only required on the mongod pod. - hooksVolume := statefulset.CreateVolumeFromEmptyDir("hooks") - hooksVolumeMount := statefulset.CreateVolumeMount(hooksVolume.Name, "/hooks", statefulset.WithReadOnly(false)) + hooksVolume := corev1.Volume{} + scriptsVolume := corev1.Volume{} + upgradeInitContainer := podtemplatespec.NOOP() + readinessInitContainer := podtemplatespec.NOOP() - // scripts volume is only required on the mongodb-agent pod. - scriptsVolume := statefulset.CreateVolumeFromEmptyDir("agent-scripts") - scriptsVolumeMount := statefulset.CreateVolumeMount(scriptsVolume.Name, "/opt/scripts", statefulset.WithReadOnly(false)) - - automationConfigVolume := statefulset.CreateVolumeFromSecret("automation-config", mdb.AutomationConfigSecretName()) - automationConfigVolumeMount := statefulset.CreateVolumeMount(automationConfigVolume.Name, "/var/lib/automation/config", statefulset.WithReadOnly(true)) + // tmp volume is required by the mongodb-agent and mongod + tmpVolume := statefulset.CreateVolumeFromEmptyDir("tmp") + tmpVolumeMount := statefulset.CreateVolumeMount(tmpVolume.Name, "/tmp", statefulset.WithReadOnly(false)) keyFileNsName := mdb.GetAgentKeyfileSecretNamespacedName() keyFileVolume := statefulset.CreateVolumeFromEmptyDir(keyFileNsName.Name) keyFileVolumeVolumeMount := statefulset.CreateVolumeMount(keyFileVolume.Name, "/var/lib/mongodb-mms-automation/authentication", statefulset.WithReadOnly(false)) keyFileVolumeVolumeMountMongod := statefulset.CreateVolumeMount(keyFileVolume.Name, "/var/lib/mongodb-mms-automation/authentication", statefulset.WithReadOnly(false)) - mongodbAgentVolumeMounts := []corev1.VolumeMount{agentHealthStatusVolumeMount, automationConfigVolumeMount, scriptsVolumeMount, keyFileVolumeVolumeMount} - mongodVolumeMounts := []corev1.VolumeMount{mongodHealthStatusVolumeMount, hooksVolumeMount, keyFileVolumeVolumeMountMongod} + mongodbAgentVolumeMounts := []corev1.VolumeMount{agentHealthStatusVolumeMount, keyFileVolumeVolumeMount, tmpVolumeMount} + + automationConfigVolumeFunc := podtemplatespec.NOOP() + if mdb.NeedsAutomationConfigVolume() { + automationConfigVolume := statefulset.CreateVolumeFromSecret("automation-config", mdb.AutomationConfigSecretName()) + automationConfigVolumeFunc = podtemplatespec.WithVolume(automationConfigVolume) + automationConfigVolumeMount := statefulset.CreateVolumeMount(automationConfigVolume.Name, "/var/lib/automation/config", statefulset.WithReadOnly(true)) + mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, automationConfigVolumeMount) + } + mongodVolumeMounts := []corev1.VolumeMount{mongodHealthStatusVolumeMount, keyFileVolumeVolumeMountMongod, tmpVolumeMount} + + hooksVolumeMod := podtemplatespec.NOOP() + scriptsVolumeMod := podtemplatespec.NOOP() + + // This is temporary code; + // once we make the operator fully deploy static workloads, we will remove those init containers. + if withInitContainers { + // hooks volume is only required on the mongod pod. + hooksVolume = statefulset.CreateVolumeFromEmptyDir("hooks") + hooksVolumeMount := statefulset.CreateVolumeMount(hooksVolume.Name, "/hooks", statefulset.WithReadOnly(false)) + + // scripts volume is only required on the mongodb-agent pod. + scriptsVolume = statefulset.CreateVolumeFromEmptyDir("agent-scripts") + scriptsVolumeMount := statefulset.CreateVolumeMount(scriptsVolume.Name, "/opt/scripts", statefulset.WithReadOnly(false)) + + upgradeInitContainer = podtemplatespec.WithInitContainer(versionUpgradeHookName, versionUpgradeHookInit([]corev1.VolumeMount{hooksVolumeMount}, versionUpgradeHookImage)) + readinessInitContainer = podtemplatespec.WithInitContainer(ReadinessProbeContainerName, readinessProbeInit([]corev1.VolumeMount{scriptsVolumeMount}, readinessProbeImage)) + scriptsVolumeMod = podtemplatespec.WithVolume(scriptsVolume) + hooksVolumeMod = podtemplatespec.WithVolume(hooksVolume) + + mongodVolumeMounts = append(mongodVolumeMounts, hooksVolumeMount) + mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, scriptsVolumeMount) + } + dataVolumeClaim := statefulset.NOOP() logVolumeClaim := statefulset.NOOP() singleModeVolumeClaim := func(s *appsv1.StatefulSet) {} if mdb.HasSeparateDataAndLogsVolumes() { - logVolumeMount := statefulset.CreateVolumeMount(logVolumeName, automationconfig.DefaultAgentLogPath) - dataVolumeMount := statefulset.CreateVolumeMount(dataVolumeName, "/data") - dataVolumeClaim = statefulset.WithVolumeClaim(dataVolumeName, dataPvc()) - logVolumeClaim = statefulset.WithVolumeClaim(logVolumeName, logsPvc()) + logVolumeMount := statefulset.CreateVolumeMount(mdb.LogsVolumeName(), automationconfig.DefaultAgentLogPath) + dataVolumeMount := statefulset.CreateVolumeMount(mdb.DataVolumeName(), mdb.GetMongodConfiguration().GetDBDataDir()) + dataVolumeClaim = statefulset.WithVolumeClaim(mdb.DataVolumeName(), dataPvc(mdb.DataVolumeName())) + logVolumeClaim = statefulset.WithVolumeClaim(mdb.LogsVolumeName(), logsPvc(mdb.LogsVolumeName())) mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, dataVolumeMount, logVolumeMount) mongodVolumeMounts = append(mongodVolumeMounts, dataVolumeMount, logVolumeMount) } else { mounts := []corev1.VolumeMount{ - statefulset.CreateVolumeMount(dataVolumeName, "/data", statefulset.WithSubPath("data")), - statefulset.CreateVolumeMount(dataVolumeName, automationconfig.DefaultAgentLogPath, statefulset.WithSubPath("logs")), + statefulset.CreateVolumeMount(mdb.DataVolumeName(), mdb.GetMongodConfiguration().GetDBDataDir(), statefulset.WithSubPath("data")), + statefulset.CreateVolumeMount(mdb.DataVolumeName(), automationconfig.DefaultAgentLogPath, statefulset.WithSubPath("logs")), } mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, mounts...) mongodVolumeMounts = append(mongodVolumeMounts, mounts...) - singleModeVolumeClaim = statefulset.WithVolumeClaim(dataVolumeName, dataPvc()) + singleModeVolumeClaim = statefulset.WithVolumeClaim(mdb.DataVolumeName(), dataPvc(mdb.DataVolumeName())) + } + + podSecurityContext, _ := podtemplatespec.WithDefaultSecurityContextsModifications() + + agentLogLevel := mdbv1.LogLevelInfo + if mdb.GetAgentLogLevel() != "" { + agentLogLevel = mdb.GetAgentLogLevel() + } + + agentLogFile := automationconfig.DefaultAgentLogFile + if mdb.GetAgentLogFile() != "" { + agentLogFile = mdb.GetAgentLogFile() } - podSecurityContext := podtemplatespec.NOOP() - managedSecurityContext := envvar.ReadBool(ManagedSecurityContextEnv) - if !managedSecurityContext { - podSecurityContext = podtemplatespec.WithSecurityContext(podtemplatespec.DefaultPodSecurityContext()) + agentMaxLogFileDurationHours := automationconfig.DefaultAgentMaxLogFileDurationHours + if mdb.GetAgentMaxLogFileDurationHours() != 0 { + agentMaxLogFileDurationHours = mdb.GetAgentMaxLogFileDurationHours() } return statefulset.Apply( @@ -157,42 +238,56 @@ func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSe podSecurityContext, podtemplatespec.WithPodLabels(labels), podtemplatespec.WithVolume(healthStatusVolume), - podtemplatespec.WithVolume(hooksVolume), - podtemplatespec.WithVolume(automationConfigVolume), - podtemplatespec.WithVolume(scriptsVolume), + automationConfigVolumeFunc, + hooksVolumeMod, + scriptsVolumeMod, + podtemplatespec.WithVolume(tmpVolume), podtemplatespec.WithVolume(keyFileVolume), - podtemplatespec.WithServiceAccount(operatorServiceAccountName), - podtemplatespec.WithContainer(AgentName, mongodbAgentContainer(mdb.AutomationConfigSecretName(), mongodbAgentVolumeMounts)), - podtemplatespec.WithContainer(MongodbName, mongodbContainer(mdb.GetMongoDBVersion(), mongodVolumeMounts)), - podtemplatespec.WithInitContainer(versionUpgradeHookName, versionUpgradeHookInit([]corev1.VolumeMount{hooksVolumeMount})), - podtemplatespec.WithInitContainer(ReadinessProbeContainerName, readinessProbeInit([]corev1.VolumeMount{scriptsVolumeMount})), + podtemplatespec.WithServiceAccount(mongodbDatabaseServiceAccountName), + podtemplatespec.WithContainer(AgentName, mongodbAgentContainer(mdb.AutomationConfigSecretName(), mongodbAgentVolumeMounts, agentLogLevel, agentLogFile, agentMaxLogFileDurationHours, agentImage)), + podtemplatespec.WithContainer(MongodbName, mongodbContainer(mongodbImage, mongodVolumeMounts, mdb.GetMongodConfiguration())), + upgradeInitContainer, + readinessInitContainer, ), )) } func BaseAgentCommand() string { - return "agent/mongodb-agent -cluster=" + clusterFilePath + " -healthCheckFilePath=" + agentHealthStatusFilePathValue + " -serveStatusPort=5000" + return "agent/mongodb-agent -healthCheckFilePath=" + agentHealthStatusFilePathValue + " -serveStatusPort=5000" } -func AutomationAgentCommand() []string { - return []string{"/bin/bash", "-c", MongodbUserCommand + BaseAgentCommand() + automationAgentOptions} -} +// AutomationAgentCommand withAgentAPIKeyExport detects whether we want to deploy this agent with the agent api key exported +// it can be used to register the agent with OM. +func AutomationAgentCommand(withAgentAPIKeyExport bool, logLevel mdbv1.LogLevel, logFile string, maxLogFileDurationHours int) []string { + // This is somewhat undocumented at https://www.mongodb.com/docs/ops-manager/current/reference/mongodb-agent-settings/ + // Not setting the -logFile option make the mongodb-agent log to stdout. Setting -logFile /dev/stdout will result in + // an error by the agent trying to open /dev/stdout-verbose and still trying to do log rotation. + // To keep consistent with old behavior not setting the logFile in the config does not log to stdout but keeps + // the default logFile as defined by DefaultAgentLogFile. Setting the logFile explictly to "/dev/stdout" will log to stdout. + agentLogOptions := "" + if logFile == "/dev/stdout" { + agentLogOptions += " -logLevel " + string(logLevel) + } else { + agentLogOptions += " -logFile " + logFile + " -logLevel " + string(logLevel) + " -maxLogFileDurationHrs " + strconv.Itoa(maxLogFileDurationHours) + } -func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []corev1.VolumeMount) container.Modification { - securityContext := container.NOOP() - managedSecurityContext := envvar.ReadBool(ManagedSecurityContextEnv) - if !managedSecurityContext { - securityContext = container.WithSecurityContext(container.DefaultSecurityContext()) + if withAgentAPIKeyExport { + return []string{"/bin/bash", "-c", MongodbUserCommandWithAPIKeyExport + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + agentLogOptions} } + return []string{"/bin/bash", "-c", MongodbUserCommand + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + agentLogOptions} +} + +func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []corev1.VolumeMount, logLevel mdbv1.LogLevel, logFile string, maxLogFileDurationHours int, agentImage string) container.Modification { + _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() return container.Apply( container.WithName(AgentName), - container.WithImage(os.Getenv(AgentImageEnv)), + container.WithImage(agentImage), container.WithImagePullPolicy(corev1.PullAlways), container.WithReadinessProbe(DefaultReadiness()), container.WithResourceRequirements(resourcerequirements.Defaults()), container.WithVolumeMounts(volumeMounts), - securityContext, - container.WithCommand(AutomationAgentCommand()), + container.WithCommand(AutomationAgentCommand(false, logLevel, logFile, maxLogFileDurationHours)), + containerSecurityContext, container.WithEnvs( corev1.EnvVar{ Name: headlessAgentEnv, @@ -219,25 +314,28 @@ func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []cor ) } -func versionUpgradeHookInit(volumeMount []corev1.VolumeMount) container.Modification { +func versionUpgradeHookInit(volumeMount []corev1.VolumeMount, versionUpgradeHookImage string) container.Modification { + _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() return container.Apply( container.WithName(versionUpgradeHookName), container.WithCommand([]string{"cp", "version-upgrade-hook", "/hooks/version-upgrade"}), - container.WithImage(os.Getenv(VersionUpgradeHookImageEnv)), + container.WithImage(versionUpgradeHookImage), + container.WithResourceRequirements(resourcerequirements.Defaults()), container.WithImagePullPolicy(corev1.PullAlways), container.WithVolumeMounts(volumeMount), + containerSecurityContext, ) } func DefaultReadiness() probes.Modification { return probes.Apply( probes.WithExecCommand([]string{readinessProbePath}), - probes.WithFailureThreshold(60), // TODO: this value needs further consideration + probes.WithFailureThreshold(40), probes.WithInitialDelaySeconds(5), ) } -func dataPvc() persistentvolumeclaim.Modification { +func dataPvc(dataVolumeName string) persistentvolumeclaim.Modification { return persistentvolumeclaim.Apply( persistentvolumeclaim.WithName(dataVolumeName), persistentvolumeclaim.WithAccessModes(corev1.ReadWriteOnce), @@ -245,9 +343,9 @@ func dataPvc() persistentvolumeclaim.Modification { ) } -func logsPvc() persistentvolumeclaim.Modification { +func logsPvc(logsVolumeName string) persistentvolumeclaim.Modification { return persistentvolumeclaim.Apply( - persistentvolumeclaim.WithName(logVolumeName), + persistentvolumeclaim.WithName(logsVolumeName), persistentvolumeclaim.WithAccessModes(corev1.ReadWriteOnce), persistentvolumeclaim.WithResourceRequests(resourcerequirements.BuildStorageRequirements("2G")), ) @@ -255,37 +353,34 @@ func logsPvc() persistentvolumeclaim.Modification { // readinessProbeInit returns a modification function which will add the readiness probe container. // this container will copy the readiness probe binary into the /opt/scripts directory. -func readinessProbeInit(volumeMount []corev1.VolumeMount) container.Modification { +func readinessProbeInit(volumeMount []corev1.VolumeMount, readinessProbeImage string) container.Modification { + _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() return container.Apply( container.WithName(ReadinessProbeContainerName), container.WithCommand([]string{"cp", "/probes/readinessprobe", "/opt/scripts/readinessprobe"}), - container.WithImage(os.Getenv(ReadinessProbeImageEnv)), + container.WithImage(readinessProbeImage), container.WithImagePullPolicy(corev1.PullAlways), container.WithVolumeMounts(volumeMount), + container.WithResourceRequirements(resourcerequirements.Defaults()), + containerSecurityContext, ) } -func getMongoDBImage(version string) string { - repoUrl := os.Getenv(MongodbRepoUrl) - if strings.HasSuffix(repoUrl, "/") { - repoUrl = strings.TrimRight(repoUrl, "/") - } - mongoImageName := os.Getenv(MongodbImageEnv) - return fmt.Sprintf("%s/%s:%s", repoUrl, mongoImageName, version) -} - -func mongodbContainer(version string, volumeMounts []corev1.VolumeMount) container.Modification { +func mongodbContainer(mongodbImage string, volumeMounts []corev1.VolumeMount, additionalMongoDBConfig mdbv1.MongodConfiguration) container.Modification { + filePath := additionalMongoDBConfig.GetDBDataDir() + "/" + automationMongodConfFileName mongoDbCommand := fmt.Sprintf(` -#run post-start hook to handle version changes -/hooks/version-upgrade +if [ -e "/hooks/version-upgrade" ]; then + #run post-start hook to handle version changes (if exists) + /hooks/version-upgrade +fi # wait for config and keyfile to be created by the agent - while ! [ -f %s -a -f %s ]; do sleep 3 ; done ; sleep 2 ; - +while ! [ -f %s -a -f %s ]; do sleep 3 ; done ; sleep 2 ; # start mongod with this configuration exec mongod -f %s; -`, automationconfFilePath, keyfileFilePath, automationconfFilePath) + +`, filePath, keyfileFilePath, filePath) containerCommand := []string{ "/bin/sh", @@ -293,25 +388,49 @@ exec mongod -f %s; mongoDbCommand, } - securityContext := container.NOOP() - managedSecurityContext := envvar.ReadBool(ManagedSecurityContextEnv) - if !managedSecurityContext { - securityContext = container.WithSecurityContext(container.DefaultSecurityContext()) - } + _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() return container.Apply( container.WithName(MongodbName), - container.WithImage(getMongoDBImage(version)), + container.WithImage(mongodbImage), container.WithResourceRequirements(resourcerequirements.Defaults()), container.WithCommand(containerCommand), + // The official image provides both CMD and ENTRYPOINT. We're reusing the former and need to replace + // the latter with an empty string. + container.WithArgs([]string{""}), + containerSecurityContext, container.WithEnvs( - corev1.EnvVar{ - Name: agentHealthStatusFilePathEnv, - Value: "/healthstatus/agent-health-status.json", - }, + collectEnvVars()..., ), container.WithVolumeMounts(volumeMounts), - - securityContext, ) } + +// Function to collect and return the environment variables to be used in the +// MongoDB container. +func collectEnvVars() []corev1.EnvVar { + var envVars []corev1.EnvVar + + envVars = append(envVars, corev1.EnvVar{ + Name: agentHealthStatusFilePathEnv, + Value: "/healthstatus/agent-health-status.json", + }) + + addEnvVarIfSet := func(name string) { + value := os.Getenv(name) // nolint:forbidigo + if value != "" { + envVars = append(envVars, corev1.EnvVar{ + Name: name, + Value: value, + }) + } + } + + addEnvVarIfSet(config.ReadinessProbeLoggerBackups) + addEnvVarIfSet(config.ReadinessProbeLoggerMaxSize) + addEnvVarIfSet(config.ReadinessProbeLoggerMaxAge) + addEnvVarIfSet(config.ReadinessProbeLoggerCompress) + addEnvVarIfSet(config.WithAgentFileLogging) + + return envVars +} diff --git a/controllers/construct/mongodbstatefulset_test.go b/controllers/construct/mongodbstatefulset_test.go new file mode 100644 index 000000000..67d78174b --- /dev/null +++ b/controllers/construct/mongodbstatefulset_test.go @@ -0,0 +1,97 @@ +package construct + +import ( + "github.com/mongodb/mongodb-kubernetes-operator/pkg/readiness/config" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "testing" +) + +func TestCollectEnvVars(t *testing.T) { + tests := []struct { + name string + envSetup map[string]string + expectedEnv []corev1.EnvVar + }{ + { + name: "Basic env vars set", + envSetup: map[string]string{ + config.ReadinessProbeLoggerBackups: "3", + config.ReadinessProbeLoggerMaxSize: "10M", + config.ReadinessProbeLoggerMaxAge: "7", + config.WithAgentFileLogging: "enabled", + }, + expectedEnv: []corev1.EnvVar{ + { + Name: config.AgentHealthStatusFilePathEnv, + Value: "/healthstatus/agent-health-status.json", + }, + { + Name: config.ReadinessProbeLoggerBackups, + Value: "3", + }, + { + Name: config.ReadinessProbeLoggerMaxSize, + Value: "10M", + }, + { + Name: config.ReadinessProbeLoggerMaxAge, + Value: "7", + }, + { + Name: config.WithAgentFileLogging, + Value: "enabled", + }, + }, + }, + { + name: "Additional env var set", + envSetup: map[string]string{ + config.ReadinessProbeLoggerBackups: "3", + config.ReadinessProbeLoggerMaxSize: "10M", + config.ReadinessProbeLoggerMaxAge: "7", + config.ReadinessProbeLoggerCompress: "true", + config.WithAgentFileLogging: "enabled", + }, + expectedEnv: []corev1.EnvVar{ + { + Name: config.AgentHealthStatusFilePathEnv, + Value: "/healthstatus/agent-health-status.json", + }, + { + Name: config.ReadinessProbeLoggerBackups, + Value: "3", + }, + { + Name: config.ReadinessProbeLoggerMaxSize, + Value: "10M", + }, + { + Name: config.ReadinessProbeLoggerMaxAge, + Value: "7", + }, + { + Name: config.ReadinessProbeLoggerCompress, + Value: "true", + }, + { + Name: config.WithAgentFileLogging, + Value: "enabled", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup environment variables + for key, value := range tt.envSetup { + t.Setenv(key, value) + } + + actualEnvVars := collectEnvVars() + + assert.EqualValues(t, tt.expectedEnv, actualEnvVars) + }) + } +} diff --git a/controllers/mongodb_cleanup.go b/controllers/mongodb_cleanup.go new file mode 100644 index 000000000..d13b0426d --- /dev/null +++ b/controllers/mongodb_cleanup.go @@ -0,0 +1,122 @@ +package controllers + +import ( + "context" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" +) + +// cleanupPemSecret cleans up the old pem secret generated for the agent certificate. +func (r *ReplicaSetReconciler) cleanupPemSecret(ctx context.Context, currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string) { + if currentMDBSpec.GetAgentAuthMode() == lastAppliedMDBSpec.GetAgentAuthMode() { + return + } + + if !currentMDBSpec.IsAgentX509() && lastAppliedMDBSpec.IsAgentX509() { + agentCertSecret := lastAppliedMDBSpec.GetAgentCertificateRef() + if err := r.client.DeleteSecret(ctx, types.NamespacedName{ + Namespace: namespace, + Name: agentCertSecret + "-pem", + }); err != nil { + if apiErrors.IsNotFound(err) { + r.log.Debugf("Agent pem file secret %s-pem was already deleted", agentCertSecret) + } else { + r.log.Warnf("Could not cleanup old agent pem file %s-pem: %s", agentCertSecret, err) + } + } + } +} + +// cleanupScramSecrets cleans up old scram secrets based on the last successful applied mongodb spec. +func (r *ReplicaSetReconciler) cleanupScramSecrets(ctx context.Context, currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string) { + secretsToDelete := getScramSecretsToDelete(currentMDBSpec, lastAppliedMDBSpec) + + for _, s := range secretsToDelete { + if err := r.client.DeleteSecret(ctx, types.NamespacedName{ + Name: s, + Namespace: namespace, + }); err != nil { + r.log.Warnf("Could not cleanup old secret %s: %s", s, err) + } else { + r.log.Debugf("Sucessfully cleaned up secret: %s", s) + } + } +} + +// cleanupConnectionStringSecrets cleans up old scram secrets based on the last successful applied mongodb spec. +func (r *ReplicaSetReconciler) cleanupConnectionStringSecrets(ctx context.Context, currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, namespace string, resourceName string) { + secretsToDelete := getConnectionStringSecretsToDelete(currentMDBSpec, lastAppliedMDBSpec, resourceName) + + for _, s := range secretsToDelete { + if err := r.client.DeleteSecret(ctx, types.NamespacedName{ + Name: s, + Namespace: namespace, + }); err != nil { + r.log.Warnf("Could not cleanup old secret %s: %s", s, err) + } else { + r.log.Debugf("Sucessfully cleaned up secret: %s", s) + } + } +} + +func getScramSecretsToDelete(currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec) []string { + type user struct { + db string + name string + } + m := map[user]string{} + var secretsToDelete []string + + for _, mongoDBUser := range currentMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] = mongoDBUser.GetScramCredentialsSecretName() + } + + for _, mongoDBUser := range lastAppliedMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + currentScramSecretName, ok := m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] + if !ok { // not used anymore + secretsToDelete = append(secretsToDelete, mongoDBUser.GetScramCredentialsSecretName()) + } else if currentScramSecretName != mongoDBUser.GetScramCredentialsSecretName() { // have changed + secretsToDelete = append(secretsToDelete, mongoDBUser.GetScramCredentialsSecretName()) + } + } + return secretsToDelete +} + +func getConnectionStringSecretsToDelete(currentMDBSpec mdbv1.MongoDBCommunitySpec, lastAppliedMDBSpec mdbv1.MongoDBCommunitySpec, resourceName string) []string { + type user struct { + db string + name string + } + m := map[user]string{} + var secretsToDelete []string + + for _, mongoDBUser := range currentMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] = mongoDBUser.GetConnectionStringSecretName(resourceName) + } + + for _, mongoDBUser := range lastAppliedMDBSpec.Users { + if mongoDBUser.DB == constants.ExternalDB { + continue + } + currentConnectionStringSecretName, ok := m[user{db: mongoDBUser.DB, name: mongoDBUser.Name}] + if !ok { // user was removed + secretsToDelete = append(secretsToDelete, mongoDBUser.GetConnectionStringSecretName(resourceName)) + } else if currentConnectionStringSecretName != mongoDBUser.GetConnectionStringSecretName(resourceName) { + // this happens when a new ConnectionStringSecretName was set for the old user + secretsToDelete = append(secretsToDelete, mongoDBUser.GetConnectionStringSecretName(resourceName)) + } + } + return secretsToDelete +} diff --git a/controllers/mongodb_cleanup_test.go b/controllers/mongodb_cleanup_test.go new file mode 100644 index 000000000..0123f63ee --- /dev/null +++ b/controllers/mongodb_cleanup_test.go @@ -0,0 +1,242 @@ +package controllers + +import ( + "context" + "testing" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + kubeClient "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestReplicaSetReconcilerCleanupScramSecrets(t *testing.T) { + lastApplied := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials", + }) + + t.Run("no change same resource", func(t *testing.T) { + actual := getScramSecretsToDelete(lastApplied.Spec, lastApplied.Spec) + + assert.Equal(t, []string(nil), actual) + }) + + t.Run("new user new secret", func(t *testing.T) { + current := newScramReplicaSet( + mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials", + }, + mdbv1.MongoDBUser{ + Name: "newUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials-2", + }, + ) + + actual := getScramSecretsToDelete(current.Spec, lastApplied.Spec) + + assert.Equal(t, []string(nil), actual) + }) + + t.Run("old user new secret", func(t *testing.T) { + current := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials-2", + }) + + expected := []string{"scram-credentials-scram-credentials"} + actual := getScramSecretsToDelete(current.Spec, lastApplied.Spec) + + assert.Equal(t, expected, actual) + }) + + t.Run("removed one user and changed secret of the other", func(t *testing.T) { + lastApplied = newScramReplicaSet( + mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials", + }, + mdbv1.MongoDBUser{ + Name: "anotherUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "another-scram-credentials", + }, + ) + + current := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials-2", + }) + + expected := []string{"scram-credentials-scram-credentials", "another-scram-credentials-scram-credentials"} + actual := getScramSecretsToDelete(current.Spec, lastApplied.Spec) + + assert.Equal(t, expected, actual) + }) + +} +func TestReplicaSetReconcilerCleanupPemSecret(t *testing.T) { + ctx := context.Background() + lastAppliedSpec := mdbv1.MongoDBCommunitySpec{ + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"X509"}, + }, + }, + } + mdb := mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-rs", + Namespace: "my-ns", + Annotations: map[string]string{}, + }, + Spec: mdbv1.MongoDBCommunitySpec{ + Members: 3, + Version: "4.2.2", + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + TLS: mdbv1.TLS{ + Enabled: true, + CaConfigMap: &corev1.LocalObjectReference{ + Name: "caConfigMap", + }, + CaCertificateSecret: &corev1.LocalObjectReference{ + Name: "certificateKeySecret", + }, + CertificateKeySecret: corev1.LocalObjectReference{ + Name: "certificateKeySecret", + }, + }, + }, + }, + } + + mgr := kubeClient.NewManager(ctx, &mdb) + + client := kubeClient.NewClient(mgr.GetClient()) + err := createAgentCertPemSecret(ctx, client, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + secret, err := r.client.GetSecret(ctx, mdb.AgentCertificatePemSecretNamespacedName()) + assert.NoError(t, err) + assert.Equal(t, "CERT", string(secret.Data["tls.crt"])) + assert.Equal(t, "KEY", string(secret.Data["tls.key"])) + + r.cleanupPemSecret(ctx, mdb.Spec, lastAppliedSpec, "my-ns") + + _, err = r.client.GetSecret(ctx, mdb.AgentCertificatePemSecretNamespacedName()) + assert.Error(t, err) +} + +func TestReplicaSetReconcilerCleanupConnectionStringSecrets(t *testing.T) { + lastApplied := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + }) + + t.Run("no change same resource", func(t *testing.T) { + actual := getConnectionStringSecretsToDelete(lastApplied.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, []string(nil), actual) + }) + + t.Run("new user does not require existing user cleanup", func(t *testing.T) { + current := newScramReplicaSet( + mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + }, + mdbv1.MongoDBUser{ + Name: "newUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-2", + }, + ) + + actual := getConnectionStringSecretsToDelete(current.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, []string(nil), actual) + }) + + t.Run("old user new secret", func(t *testing.T) { + current := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-2", + }) + + expected := []string{"connection-string-secret"} + actual := getConnectionStringSecretsToDelete(current.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, expected, actual) + }) + + t.Run("removed one user and changed secret of the other", func(t *testing.T) { + lastApplied = newScramReplicaSet( + mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret", + }, + mdbv1.MongoDBUser{ + Name: "anotherUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-2", + }, + ) + + current := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testUser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ConnectionStringSecretName: "connection-string-secret-1", + }) + + expected := []string{"connection-string-secret", "connection-string-secret-2"} + actual := getConnectionStringSecretsToDelete(current.Spec, lastApplied.Spec, "my-rs") + + assert.Equal(t, expected, actual) + }) + +} diff --git a/controllers/mongodb_status_options.go b/controllers/mongodb_status_options.go index 105596ba8..5961bdcbd 100644 --- a/controllers/mongodb_status_options.go +++ b/controllers/mongodb_status_options.go @@ -33,7 +33,7 @@ func (o *optionBuilder) GetOptions() []status.Option { return o.options } -// options returns an initialized optionBuilder +// statusOptions returns an initialized optionBuilder func statusOptions() *optionBuilder { return &optionBuilder{ options: []status.Option{}, @@ -60,6 +60,26 @@ func (m mongoUriOption) GetResult() (reconcile.Result, error) { return result.OK() } +func (o *optionBuilder) withVersion(version string) *optionBuilder { + o.options = append(o.options, + versionOption{ + version: version, + }) + return o +} + +type versionOption struct { + version string +} + +func (v versionOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.Version = v.version +} + +func (v versionOption) GetResult() (reconcile.Result, error) { + return result.OK() +} + func (o *optionBuilder) withPhase(phase mdbv1.Phase, retryAfter int) *optionBuilder { o.options = append(o.options, phaseOption{ @@ -112,6 +132,20 @@ func (o *optionBuilder) withStatefulSetReplicas(members int) *optionBuilder { return o } +func (o *optionBuilder) withMongoDBArbiters(arbiters int) *optionBuilder { + o.options = append(o.options, mongoDBArbitersOption{ + mongoDBArbiters: arbiters, + }) + return o +} + +func (o *optionBuilder) withStatefulSetArbiters(arbiters int) *optionBuilder { + o.options = append(o.options, statefulSetArbitersOption{ + arbiters: arbiters, + }) + return o +} + func (o *optionBuilder) withMessage(severityLevel severity, msg string) *optionBuilder { if apierrors.IsTransientMessage(msg) { severityLevel = Debug @@ -183,3 +217,27 @@ func (s statefulSetReplicasOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { func (s statefulSetReplicasOption) GetResult() (reconcile.Result, error) { return result.OK() } + +type mongoDBArbitersOption struct { + mongoDBArbiters int +} + +func (a mongoDBArbitersOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.CurrentMongoDBArbiters = a.mongoDBArbiters +} + +func (a mongoDBArbitersOption) GetResult() (reconcile.Result, error) { + return result.OK() +} + +type statefulSetArbitersOption struct { + arbiters int +} + +func (s statefulSetArbitersOption) ApplyOption(mdb *mdbv1.MongoDBCommunity) { + mdb.Status.CurrentStatefulSetArbitersReplicas = s.arbiters +} + +func (s statefulSetArbitersOption) GetResult() (reconcile.Result, error) { + return result.OK() +} diff --git a/controllers/mongodb_status_options_test.go b/controllers/mongodb_status_options_test.go index 7484cd6fe..9041c8d99 100644 --- a/controllers/mongodb_status_options_test.go +++ b/controllers/mongodb_status_options_test.go @@ -9,9 +9,11 @@ import ( "github.com/stretchr/testify/assert" ) +const testVersion string = "4.2.6" + func TestMongoUriOption_ApplyOption(t *testing.T) { - mdb := newReplicaSet(3, "my-rs", "my-ns") + mdb := newReplicaSet(3, testVersion, "my-rs", "my-ns") opt := mongoUriOption{ mongoUri: "my-uri", @@ -23,7 +25,7 @@ func TestMongoUriOption_ApplyOption(t *testing.T) { } func TestOptionBuilder_RunningPhase(t *testing.T) { - mdb := newReplicaSet(3, "my-rs", "my-ns") + mdb := newReplicaSet(3, testVersion, "my-rs", "my-ns") statusOptions().withRunningPhase().GetOptions()[0].ApplyOption(&mdb) @@ -31,7 +33,7 @@ func TestOptionBuilder_RunningPhase(t *testing.T) { } func TestOptionBuilder_PendingPhase(t *testing.T) { - mdb := newReplicaSet(3, "my-rs", "my-ns") + mdb := newReplicaSet(3, testVersion, "my-rs", "my-ns") statusOptions().withPendingPhase(10).GetOptions()[0].ApplyOption(&mdb) @@ -39,14 +41,25 @@ func TestOptionBuilder_PendingPhase(t *testing.T) { } func TestOptionBuilder_FailedPhase(t *testing.T) { - mdb := newReplicaSet(3, "my-rs", "my-ns") + mdb := newReplicaSet(3, testVersion, "my-rs", "my-ns") statusOptions().withFailedPhase().GetOptions()[0].ApplyOption(&mdb) assert.Equal(t, mdbv1.Failed, mdb.Status.Phase) } -func newReplicaSet(members int, name, namespace string) mdbv1.MongoDBCommunity { +func TestVersion_ApplyOption(t *testing.T) { + mdb := newReplicaSet(3, testVersion, "my-rs", "my-ns") + + opt := versionOption{ + version: testVersion, + } + opt.ApplyOption(&mdb) + + assert.Equal(t, testVersion, mdb.Status.Version, "Status should be updated") +} + +func newReplicaSet(members int, version string, name, namespace string) mdbv1.MongoDBCommunity { return mdbv1.MongoDBCommunity{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ @@ -55,6 +68,7 @@ func newReplicaSet(members int, name, namespace string) mdbv1.MongoDBCommunity { }, Spec: mdbv1.MongoDBCommunitySpec{ Members: members, + Version: version, }, } } diff --git a/controllers/mongodb_tls.go b/controllers/mongodb_tls.go index da45631a3..56c67642d 100644 --- a/controllers/mongodb_tls.go +++ b/controllers/mongodb_tls.go @@ -1,13 +1,12 @@ package controllers import ( + "context" "crypto/sha256" "fmt" "strings" - "github.com/pkg/errors" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" @@ -20,45 +19,44 @@ import ( "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" ) const ( - tlsCAMountPath = "/var/lib/tls/ca/" - tlsCACertName = "ca.crt" - tlsOperatorSecretMountPath = "/var/lib/tls/server/" //nolint - tlsSecretCertName = "tls.crt" //nolint - tlsSecretKeyName = "tls.key" + tlsCAMountPath = "/var/lib/tls/ca/" + tlsCACertName = "ca.crt" + tlsOperatorSecretMountPath = "/var/lib/tls/server/" //nolint + tlsPrometheusSecretMountPath = "/var/lib/tls/prometheus/" //nolint + tlsSecretCertName = "tls.crt" + tlsSecretKeyName = "tls.key" + tlsSecretPemName = "tls.pem" + automationAgentPemMountPath = "/var/lib/mongodb-mms-automation/agent-certs" ) // validateTLSConfig will check that the configured ConfigMap and Secret exist and that they have the correct fields. -func (r *ReplicaSetReconciler) validateTLSConfig(mdb mdbv1.MongoDBCommunity) (bool, error) { +func (r *ReplicaSetReconciler) validateTLSConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity) (bool, error) { if !mdb.Spec.Security.TLS.Enabled { return true, nil } r.log.Info("Ensuring TLS is correctly configured") - // Ensure CA ConfigMap exists - caData, err := configmap.ReadData(r.client, mdb.TLSConfigMapNamespacedName()) + // Ensure CA cert is configured + _, err := getCaCrt(ctx, r.client, r.client, mdb) + if err != nil { if apiErrors.IsNotFound(err) { - r.log.Warnf(`CA ConfigMap "%s" not found`, mdb.TLSConfigMapNamespacedName()) + r.log.Warnf("CA resource not found: %s", err) return false, nil } return false, err } - // Ensure ConfigMap has a "ca.crt" field - if cert, ok := caData[tlsCACertName]; !ok || cert == "" { - r.log.Warnf(`ConfigMap "%s" should have a CA certificate in field "%s"`, mdb.TLSConfigMapNamespacedName(), tlsCACertName) - return false, nil - } - // Ensure Secret exists - secretData, err := secret.ReadStringData(r.client, mdb.TLSSecretNamespacedName()) + _, err = secret.ReadStringData(ctx, r.client, mdb.TLSSecretNamespacedName()) if err != nil { if apiErrors.IsNotFound(err) { r.log.Warnf(`Secret "%s" not found`, mdb.TLSSecretNamespacedName()) @@ -68,18 +66,23 @@ func (r *ReplicaSetReconciler) validateTLSConfig(mdb mdbv1.MongoDBCommunity) (bo return false, err } - // Ensure Secret has "tls.crt" and "tls.key" fields - if key, ok := secretData[tlsSecretKeyName]; !ok || key == "" { - r.log.Warnf(`Secret "%s" should have a key in field "%s"`, mdb.TLSSecretNamespacedName(), tlsSecretKeyName) - return false, nil - } - if cert, ok := secretData[tlsSecretCertName]; !ok || cert == "" { - r.log.Warnf(`Secret "%s" should have a certificate in field "%s"`, mdb.TLSSecretNamespacedName(), tlsSecretKeyName) + // validate whether the secret contains "tls.crt" and "tls.key", or it contains "tls.pem" + // if it contains all three, then the pem entry should be equal to the concatenation of crt and key + _, err = getPemOrConcatenatedCrtAndKey(ctx, r.client, mdb.TLSSecretNamespacedName()) + if err != nil { + r.log.Warnf(err.Error()) return false, nil } // Watch certificate-key secret to handle rotations - r.secretWatcher.Watch(mdb.TLSSecretNamespacedName(), mdb.NamespacedName()) + r.secretWatcher.Watch(ctx, mdb.TLSSecretNamespacedName(), mdb.NamespacedName()) + + // Watch CA certificate changes + if mdb.Spec.Security.TLS.CaCertificateSecret != nil { + r.secretWatcher.Watch(ctx, mdb.TLSCaCertificateSecretNamespacedName(), mdb.NamespacedName()) + } else { + r.configMapWatcher.Watch(ctx, mdb.TLSConfigMapNamespacedName(), mdb.NamespacedName()) + } r.log.Infof("Successfully validated TLS config") return true, nil @@ -87,32 +90,46 @@ func (r *ReplicaSetReconciler) validateTLSConfig(mdb mdbv1.MongoDBCommunity) (bo // getTLSConfigModification creates a modification function which enables TLS in the automation config. // It will also ensure that the combined cert-key secret is created. -func getTLSConfigModification(getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { +func getTLSConfigModification(ctx context.Context, cmGetter configmap.Getter, secretGetter secret.Getter, mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { if !mdb.Spec.Security.TLS.Enabled { return automationconfig.NOOP(), nil } - certKey, err := getCertAndKey(getUpdateCreator, mdb) + caCert, err := getCaCrt(ctx, cmGetter, secretGetter, mdb) + if err != nil { + return automationconfig.NOOP(), err + } + + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, secretGetter, mdb.TLSSecretNamespacedName()) if err != nil { return automationconfig.NOOP(), err } - return tlsConfigModification(mdb, certKey), nil + return tlsConfigModification(mdb, certKey, caCert), nil } // getCertAndKey will fetch the certificate and key from the user-provided Secret. -func getCertAndKey(getter secret.Getter, mdb mdbv1.MongoDBCommunity) (string, error) { - cert, err := secret.ReadKey(getter, tlsSecretCertName, mdb.TLSSecretNamespacedName()) +func getCertAndKey(ctx context.Context, getter secret.Getter, secretName types.NamespacedName) string { + cert, err := secret.ReadKey(ctx, getter, tlsSecretCertName, secretName) if err != nil { - return "", err + return "" } - key, err := secret.ReadKey(getter, tlsSecretKeyName, mdb.TLSSecretNamespacedName()) + key, err := secret.ReadKey(ctx, getter, tlsSecretKeyName, secretName) if err != nil { - return "", err + return "" } - return combineCertificateAndKey(cert, key), nil + return combineCertificateAndKey(cert, key) +} + +// getPem will fetch the pem from the user-provided secret +func getPem(ctx context.Context, getter secret.Getter, secretName types.NamespacedName) string { + pem, err := secret.ReadKey(ctx, getter, tlsSecretPemName, secretName) + if err != nil { + return "" + } + return pem } func combineCertificateAndKey(cert, key string) string { @@ -121,12 +138,81 @@ func combineCertificateAndKey(cert, key string) string { return fmt.Sprintf("%s\n%s", trimmedCert, trimmedKey) } +// getPemOrConcatenatedCrtAndKey will get the final PEM to write to the secret. +// This is either the tls.pem entry in the given secret, or the concatenation +// of tls.crt and tls.key +// It performs a basic validation on the entries. +func getPemOrConcatenatedCrtAndKey(ctx context.Context, getter secret.Getter, secretName types.NamespacedName) (string, error) { + certKey := getCertAndKey(ctx, getter, secretName) + pem := getPem(ctx, getter, secretName) + if certKey == "" && pem == "" { + return "", fmt.Errorf(`neither "%s" nor the pair "%s"/"%s" were present in the TLS secret`, tlsSecretPemName, tlsSecretCertName, tlsSecretKeyName) + } + if certKey == "" { + return pem, nil + } + if pem == "" { + return certKey, nil + } + if certKey != pem { + return "", fmt.Errorf(`if all of "%s", "%s" and "%s" are present in the secret, the entry for "%s" must be equal to the concatenation of "%s" with "%s"`, tlsSecretCertName, tlsSecretKeyName, tlsSecretPemName, tlsSecretPemName, tlsSecretCertName, tlsSecretKeyName) + } + return certKey, nil +} + +func getCaCrt(ctx context.Context, cmGetter configmap.Getter, secretGetter secret.Getter, mdb mdbv1.MongoDBCommunity) (string, error) { + var caResourceName types.NamespacedName + var caData map[string]string + var err error + if mdb.Spec.Security.TLS.CaCertificateSecret != nil { + caResourceName = mdb.TLSCaCertificateSecretNamespacedName() + caData, err = secret.ReadStringData(ctx, secretGetter, caResourceName) + } else if mdb.Spec.Security.TLS.CaConfigMap != nil { + caResourceName = mdb.TLSConfigMapNamespacedName() + caData, err = configmap.ReadData(ctx, cmGetter, caResourceName) + } + + if err != nil { + return "", err + } + + if caData == nil { + return "", fmt.Errorf("TLS field requires a reference to the CA certificate which signed the server certificates. Neither secret (field caCertificateSecretRef) not configMap (field CaConfigMap) reference present") + } + + if cert, ok := caData[tlsCACertName]; !ok || cert == "" { + return "", fmt.Errorf(`CA certificate resource "%s" should have a CA certificate in field "%s"`, caResourceName, tlsCACertName) + } else { + return cert, nil + } +} + +// ensureCASecret will create or update the operator managed Secret containing +// the CA certficate from the user provided Secret or ConfigMap. +func ensureCASecret(ctx context.Context, cmGetter configmap.Getter, secretGetter secret.Getter, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + cert, err := getCaCrt(ctx, cmGetter, secretGetter, mdb) + if err != nil { + return err + } + + caFileName := tlsOperatorSecretFileName(cert) + + operatorSecret := secret.Builder(). + SetName(mdb.TLSOperatorCASecretNamespacedName().Name). + SetNamespace(mdb.TLSOperatorCASecretNamespacedName().Namespace). + SetField(caFileName, cert). + SetOwnerReferences(mdb.GetOwnerReferences()). + Build() + + return secret.CreateOrUpdate(ctx, getUpdateCreator, operatorSecret) +} + // ensureTLSSecret will create or update the operator-managed Secret containing // the concatenated certificate and key from the user-provided Secret. -func ensureTLSSecret(getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { - certKey, err := getCertAndKey(getUpdateCreator, mdb) +func ensureTLSSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.TLSSecretNamespacedName()) if err != nil { - return errors.Errorf("could not get cert and key: %s", err) + return err } // Calculate file name from certificate and key fileName := tlsOperatorSecretFileName(certKey) @@ -135,10 +221,50 @@ func ensureTLSSecret(getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDB SetName(mdb.TLSOperatorSecretNamespacedName().Name). SetNamespace(mdb.TLSOperatorSecretNamespacedName().Namespace). SetField(fileName, certKey). - SetOwnerReferences([]metav1.OwnerReference{getOwnerReference(mdb)}). + SetOwnerReferences(mdb.GetOwnerReferences()). + Build() + + return secret.CreateOrUpdate(ctx, getUpdateCreator, operatorSecret) +} + +func ensureAgentCertSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + if mdb.Spec.GetAgentAuthMode() != "X509" { + return nil + } + + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.AgentCertificateSecretNamespacedName()) + if err != nil { + return err + } + + agentCertSecret := secret.Builder(). + SetName(mdb.AgentCertificatePemSecretNamespacedName().Name). + SetNamespace(mdb.NamespacedName().Namespace). + SetField(mdb.AgentCertificatePemSecretNamespacedName().Name, certKey). + SetOwnerReferences(mdb.GetOwnerReferences()). + Build() + + return secret.CreateOrUpdate(ctx, getUpdateCreator, agentCertSecret) +} + +// ensurePrometheusTLSSecret will create or update the operator-managed Secret containing +// the concatenated certificate and key from the user-provided Secret. +func ensurePrometheusTLSSecret(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) error { + certKey, err := getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.DeepCopy().PrometheusTLSSecretNamespacedName()) + if err != nil { + return err + } + // Calculate file name from certificate and key + fileName := tlsOperatorSecretFileName(certKey) + + operatorSecret := secret.Builder(). + SetName(mdb.PrometheusTLSOperatorSecretNamespacedName().Name). + SetNamespace(mdb.PrometheusTLSOperatorSecretNamespacedName().Namespace). + SetField(fileName, certKey). + SetOwnerReferences(mdb.GetOwnerReferences()). Build() - return secret.CreateOrUpdate(getUpdateCreator, operatorSecret) + return secret.CreateOrUpdate(ctx, getUpdateCreator, operatorSecret) } // tlsOperatorSecretFileName calculates the file name to use for the mounted @@ -153,8 +279,8 @@ func tlsOperatorSecretFileName(certKey string) string { } // tlsConfigModification will enable TLS in the automation config. -func tlsConfigModification(mdb mdbv1.MongoDBCommunity, certKey string) automationconfig.Modification { - caCertificatePath := tlsCAMountPath + tlsCACertName +func tlsConfigModification(mdb mdbv1.MongoDBCommunity, certKey, caCert string) automationconfig.Modification { + caCertificatePath := tlsCAMountPath + tlsOperatorSecretFileName(caCert) certificateKeyPath := tlsOperatorSecretMountPath + tlsOperatorSecretFileName(certKey) mode := automationconfig.TLSModeRequired @@ -163,9 +289,15 @@ func tlsConfigModification(mdb mdbv1.MongoDBCommunity, certKey string) automatio mode = automationconfig.TLSModePreferred } + automationAgentPemFilePath := "" + if mdb.Spec.IsAgentX509() { + automationAgentPemFilePath = automationAgentPemMountPath + "/" + mdb.AgentCertificatePemSecretNamespacedName().Name + } + return func(config *automationconfig.AutomationConfig) { // Configure CA certificate for agent config.TLSConfig.CAFilePath = caCertificatePath + config.TLSConfig.AutoPEMKeyFilePath = automationAgentPemFilePath for i := range config.Processes { args := config.Processes[i].Args26 @@ -184,9 +316,9 @@ func buildTLSPodSpecModification(mdb mdbv1.MongoDBCommunity) podtemplatespec.Mod return podtemplatespec.NOOP() } - // Configure a volume which mounts the CA certificate from a ConfigMap + // Configure a volume which mounts the CA certificate from either a Secret or a ConfigMap // The certificate is used by both mongod and the agent - caVolume := statefulset.CreateVolumeFromConfigMap("tls-ca", mdb.Spec.Security.TLS.CaConfigMap.Name) + caVolume := statefulset.CreateVolumeFromSecret("tls-ca", mdb.TLSOperatorCASecretNamespacedName().Name) caVolumeMount := statefulset.CreateVolumeMount(caVolume.Name, tlsCAMountPath, statefulset.WithReadOnly(true)) // Configure a volume which mounts the secret holding the server key and certificate @@ -204,3 +336,44 @@ func buildTLSPodSpecModification(mdb mdbv1.MongoDBCommunity) podtemplatespec.Mod podtemplatespec.WithVolumeMounts(construct.MongodbName, tlsSecretVolumeMount, caVolumeMount), ) } + +// buildTLSPrometheus adds the TLS mounts for Prometheus. +func buildTLSPrometheus(mdb mdbv1.MongoDBCommunity) podtemplatespec.Modification { + if mdb.Spec.Prometheus == nil || mdb.Spec.Prometheus.TLSSecretRef.Name == "" { + return podtemplatespec.NOOP() + } + + // Configure a volume which mounts the secret holding the server key and certificate + // The same key-certificate pair is used for all servers + tlsSecretVolume := statefulset.CreateVolumeFromSecret("prom-tls-secret", mdb.PrometheusTLSOperatorSecretNamespacedName().Name) + + tlsSecretVolumeMount := statefulset.CreateVolumeMount(tlsSecretVolume.Name, tlsPrometheusSecretMountPath, statefulset.WithReadOnly(true)) + + // MongoDB expects both key and certificate to be provided in a single PEM file + // We are using a secret format where they are stored in separate fields, tls.crt and tls.key + // Because of this we need to use an init container which reads the two files mounted from the secret and combines them into one + return podtemplatespec.Apply( + // podtemplatespec.WithVolume(caVolume), + podtemplatespec.WithVolume(tlsSecretVolume), + podtemplatespec.WithVolumeMounts(construct.AgentName, tlsSecretVolumeMount), + podtemplatespec.WithVolumeMounts(construct.MongodbName, tlsSecretVolumeMount), + ) +} + +func buildAgentX509(mdb mdbv1.MongoDBCommunity) podtemplatespec.Modification { + if mdb.Spec.GetAgentAuthMode() != "X509" { + return podtemplatespec.Apply( + podtemplatespec.RemoveVolume(constants.AgentPemFile), + podtemplatespec.RemoveVolumeMount(construct.AgentName, constants.AgentPemFile), + ) + } + + agentCertVolume := statefulset.CreateVolumeFromSecret(constants.AgentPemFile, mdb.AgentCertificatePemSecretNamespacedName().Name) + agentCertVolumeMount := statefulset.CreateVolumeMount(agentCertVolume.Name, automationAgentPemMountPath, statefulset.WithReadOnly(true)) + + return podtemplatespec.Apply( + podtemplatespec.WithVolume(agentCertVolume), + podtemplatespec.WithVolumeMounts(construct.AgentName, agentCertVolumeMount), + ) + +} diff --git a/controllers/mongodb_tls_test.go b/controllers/mongodb_tls_test.go index 650fd264f..b4e832778 100644 --- a/controllers/mongodb_tls_test.go +++ b/controllers/mongodb_tls_test.go @@ -2,60 +2,150 @@ package controllers import ( "context" + "errors" "testing" + "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + corev1 "k8s.io/api/core/v1" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/types" + k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509" "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" - mdbClient "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" + kubeClient "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/configmap" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/types" - k8sClient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -func TestStatefulSet_IsCorrectlyConfiguredWithTLS(t *testing.T) { +func TestStatefulSetIsCorrectlyConfiguredWithTLS(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSetWithTLS() - mgr := client.NewManager(&mdb) + mgr := kubeClient.NewManager(ctx, &mdb) - err := createTLSSecretAndConfigMap(mgr.GetClient(), mdb) + client := kubeClient.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, client, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, client, mdb) assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts := appsv1.StatefulSet{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) - // Assert that all TLS volumes have been added. - assert.Len(t, sts.Spec.Template.Spec.Volumes, 7) + assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, "", "") +} + +func TestStatefulSetIsCorrectlyConfiguredWithTLSAndX509(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509"} + mgr := kubeClient.NewManager(ctx, &mdb) + + client := kubeClient.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, client, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, client, mdb) + assert.NoError(t, err) + crt, key, err := x509.CreateAgentCertificate() + assert.NoError(t, err) + err = createAgentCertSecret(ctx, client, mdb, crt, key, "") + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts := appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + // Check that the pem secret has been created + s := corev1.Secret{} + err = mgr.GetClient().Get(ctx, mdb.AgentCertificatePemSecretNamespacedName(), &s) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, "", mdb.AgentCertificatePemSecretNamespacedName().Name) + + // If we deactivate X509 for the agent, we expect the certificates to be unmounted. + mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"SCRAM"} + err = mgr.GetClient().Update(ctx, &mdb) + assert.NoError(t, err) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts = appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, "", "") +} + +func assertStatefulSetVolumesAndVolumeMounts(t *testing.T, sts appsv1.StatefulSet, expectedTLSCASecretName string, expectedTLSOperatorSecretName string, expectedPromTLSSecretName string, expectedAgentCertSecretName string) { + prometheusTLSEnabled := expectedPromTLSSecretName != "" + agentX509Enabled := expectedAgentCertSecretName != "" + + permission := int32(416) assert.Contains(t, sts.Spec.Template.Spec.Volumes, corev1.Volume{ Name: "tls-ca", VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: mdb.Spec.Security.TLS.CaConfigMap.Name, - }, + Secret: &corev1.SecretVolumeSource{ + SecretName: expectedTLSCASecretName, + DefaultMode: &permission, }, }, }) - permission := int32(416) assert.Contains(t, sts.Spec.Template.Spec.Volumes, corev1.Volume{ Name: "tls-secret", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: mdb.TLSOperatorSecretNamespacedName().Name, + SecretName: expectedTLSOperatorSecretName, DefaultMode: &permission, }, }, }) + if prometheusTLSEnabled { + assert.Contains(t, sts.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "prom-tls-secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: expectedPromTLSSecretName, + DefaultMode: &permission, + }, + }, + }) + } + if agentX509Enabled { + assert.Contains(t, sts.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "agent-certs-pem", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: expectedAgentCertSecretName, + DefaultMode: &permission, + }, + }, + }) + } else { + assert.NotContains(t, sts.Spec.Template.Spec.Volumes, corev1.Volume{ + Name: "agent-certs-pem", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: expectedAgentCertSecretName, + DefaultMode: &permission, + }, + }, + }) + } tlsSecretVolumeMount := corev1.VolumeMount{ Name: "tls-secret", @@ -67,27 +157,151 @@ func TestStatefulSet_IsCorrectlyConfiguredWithTLS(t *testing.T) { ReadOnly: true, MountPath: tlsCAMountPath, } + tlsPrometheusSecretVolumeMount := corev1.VolumeMount{ + Name: "prom-tls-secret", + ReadOnly: true, + MountPath: tlsPrometheusSecretMountPath, + } + agentCertSecretVolumeMount := corev1.VolumeMount{ + Name: "agent-certs-pem", + ReadOnly: true, + MountPath: automationAgentPemMountPath, + } assert.Len(t, sts.Spec.Template.Spec.InitContainers, 2) - agentContainer := sts.Spec.Template.Spec.Containers[0] + var agentContainer corev1.Container + var mongodbContainer corev1.Container + + for i, container := range sts.Spec.Template.Spec.Containers { + if container.Name == construct.AgentName { + agentContainer = sts.Spec.Template.Spec.Containers[i] + } else if container.Name == construct.MongodbName { + mongodbContainer = sts.Spec.Template.Spec.Containers[i] + } + } + assert.Contains(t, agentContainer.VolumeMounts, tlsSecretVolumeMount) assert.Contains(t, agentContainer.VolumeMounts, tlsCAVolumeMount) + if prometheusTLSEnabled { + assert.Contains(t, agentContainer.VolumeMounts, tlsPrometheusSecretVolumeMount) + } + if agentX509Enabled { + assert.Contains(t, agentContainer.VolumeMounts, agentCertSecretVolumeMount) + } else { + assert.NotContains(t, agentContainer.VolumeMounts, agentCertSecretVolumeMount) + } - mongodbContainer := sts.Spec.Template.Spec.Containers[1] assert.Contains(t, mongodbContainer.VolumeMounts, tlsSecretVolumeMount) assert.Contains(t, mongodbContainer.VolumeMounts, tlsCAVolumeMount) + if prometheusTLSEnabled { + assert.Contains(t, mongodbContainer.VolumeMounts, tlsPrometheusSecretVolumeMount) + } +} + +func TestStatefulSetIsCorrectlyConfiguredWithPrometheusTLS(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Prometheus = &mdbv1.Prometheus{ + Username: "username", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "prom-password-secret", + }, + Port: 4321, + TLSSecretRef: mdbv1.SecretKeyReference{ + Name: "prom-secret-cert", + }, + } + + mgr := kubeClient.NewManager(ctx, &mdb) + cli := kubeClient.NewClient(mgr.GetClient()) + + err := secret.CreateOrUpdate(ctx, mgr.Client, secret.Builder(). + SetName("prom-password-secret"). + SetNamespace(mdb.Namespace). + SetField("password", "my-password"). + Build()) + assert.NoError(t, err) + err = createTLSSecret(ctx, cli, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createPrometheusTLSSecret(ctx, cli, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + + err = createTLSConfigMap(ctx, cli, mdb) + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts := appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, mdb.TLSOperatorCASecretNamespacedName().Name, mdb.TLSOperatorSecretNamespacedName().Name, mdb.PrometheusTLSOperatorSecretNamespacedName().Name, "") +} + +func TestStatefulSetIsCorrectlyConfiguredWithTLSAfterChangingExistingVolumes(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mgr := kubeClient.NewManager(ctx, &mdb) + + cli := kubeClient.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, cli, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + + tlsCAVolumeSecretName := mdb.TLSOperatorCASecretNamespacedName().Name + changedTLSCAVolumeSecretName := tlsCAVolumeSecretName + "-old" + + err = createTLSSecretWithNamespaceAndName(ctx, cli, mdb.Namespace, changedTLSCAVolumeSecretName, "CERT", "KEY", "") + assert.NoError(t, err) + + err = createTLSConfigMap(ctx, cli, mdb) + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts := appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, tlsCAVolumeSecretName, mdb.TLSOperatorSecretNamespacedName().Name, "", "") + + // updating sts tls-ca volume directly to simulate changing of underlying volume's secret + for i := range sts.Spec.Template.Spec.Volumes { + if sts.Spec.Template.Spec.Volumes[i].Name == "tls-ca" { + sts.Spec.Template.Spec.Volumes[i].VolumeSource.Secret.SecretName = changedTLSCAVolumeSecretName + } + } + + err = mgr.GetClient().Update(ctx, &sts) + assert.NoError(t, err) + + assertStatefulSetVolumesAndVolumeMounts(t, sts, changedTLSCAVolumeSecretName, mdb.TLSOperatorSecretNamespacedName().Name, "", "") + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + sts = appsv1.StatefulSet{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + assert.NoError(t, err) + assertStatefulSetVolumesAndVolumeMounts(t, sts, tlsCAVolumeSecretName, mdb.TLSOperatorSecretNamespacedName().Name, "", "") } -func TestAutomationConfig_IsCorrectlyConfiguredWithTLS(t *testing.T) { +func TestAutomationConfigIsCorrectlyConfiguredWithTLS(t *testing.T) { + ctx := context.Background() createAC := func(mdb mdbv1.MongoDBCommunity) automationconfig.AutomationConfig { - client := mdbClient.NewClient(client.NewManager(&mdb).GetClient()) - err := createTLSSecretAndConfigMap(client, mdb) + client := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, client, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, client, mdb) assert.NoError(t, err) - tlsModification, err := getTLSConfigModification(client, mdb) + tlsModification, err := getTLSConfigModification(ctx, client, client, mdb) assert.NoError(t, err) - ac, err := buildAutomationConfig(mdb, automationconfig.Auth{}, automationconfig.AutomationConfig{}, tlsModification) + ac, err := buildAutomationConfig(mdb, false, automationconfig.Auth{}, automationconfig.AutomationConfig{}, tlsModification) assert.NoError(t, err) return ac @@ -107,12 +321,24 @@ func TestAutomationConfig_IsCorrectlyConfiguredWithTLS(t *testing.T) { } }) + t.Run("With logRotate and SystemLog enabled", func(t *testing.T) { + mdb := newTestReplicaSetWithSystemLogAndLogRotate() + ac := createAC(mdb) + + for _, process := range ac.Processes { + assert.Equal(t, "/tmp/test", process.Args26.Get("systemLog.path").String()) + assert.Equal(t, "file", process.Args26.Get("systemLog.destination").String()) + assert.Equal(t, process.LogRotate, automationconfig.ConvertCrdLogRotateToAC(mdb.Spec.AgentConfiguration.LogRotate)) + assert.Equal(t, process.AuditLogRotate, automationconfig.ConvertCrdLogRotateToAC(mdb.Spec.AgentConfiguration.AuditLogRotate)) + } + }) + t.Run("With TLS enabled and required, rollout completed", func(t *testing.T) { mdb := newTestReplicaSetWithTLS() ac := createAC(mdb) assert.Equal(t, &automationconfig.TLS{ - CAFilePath: tlsCAMountPath + tlsCACertName, + CAFilePath: tlsCAMountPath + tlsOperatorSecretFileName("CERT"), ClientCertificateMode: automationconfig.ClientCertificateModeOptional, }, ac.TLSConfig) @@ -121,7 +347,7 @@ func TestAutomationConfig_IsCorrectlyConfiguredWithTLS(t *testing.T) { assert.Equal(t, automationconfig.TLSModeRequired, process.Args26.Get("net.tls.mode").Data()) assert.Equal(t, tlsOperatorSecretMountPath+operatorSecretFileName, process.Args26.Get("net.tls.certificateKeyFile").Data()) - assert.Equal(t, tlsCAMountPath+tlsCACertName, process.Args26.Get("net.tls.CAFile").Data()) + assert.Equal(t, tlsCAMountPath+tlsOperatorSecretFileName("CERT"), process.Args26.Get("net.tls.CAFile").Data()) assert.True(t, process.Args26.Get("net.tls.allowConnectionsWithoutCertificates").MustBool()) } }) @@ -132,7 +358,7 @@ func TestAutomationConfig_IsCorrectlyConfiguredWithTLS(t *testing.T) { ac := createAC(mdb) assert.Equal(t, &automationconfig.TLS{ - CAFilePath: tlsCAMountPath + tlsCACertName, + CAFilePath: tlsCAMountPath + tlsOperatorSecretFileName("CERT"), ClientCertificateMode: automationconfig.ClientCertificateModeOptional, }, ac.TLSConfig) @@ -141,36 +367,41 @@ func TestAutomationConfig_IsCorrectlyConfiguredWithTLS(t *testing.T) { assert.Equal(t, automationconfig.TLSModePreferred, process.Args26.Get("net.tls.mode").Data()) assert.Equal(t, tlsOperatorSecretMountPath+operatorSecretFileName, process.Args26.Get("net.tls.certificateKeyFile").Data()) - assert.Equal(t, tlsCAMountPath+tlsCACertName, process.Args26.Get("net.tls.CAFile").Data()) + assert.Equal(t, tlsCAMountPath+tlsOperatorSecretFileName("CERT"), process.Args26.Get("net.tls.CAFile").Data()) assert.True(t, process.Args26.Get("net.tls.allowConnectionsWithoutCertificates").MustBool()) } }) } func TestTLSOperatorSecret(t *testing.T) { + ctx := context.Background() t.Run("Secret is created if it doesn't exist", func(t *testing.T) { mdb := newTestReplicaSetWithTLS() - c := mdbClient.NewClient(client.NewManager(&mdb).GetClient()) - err := createTLSSecretAndConfigMap(c, mdb) + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, c, mdb) assert.NoError(t, err) - r := NewReconciler(client.NewManagerWithClient(c)) + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - err = r.ensureTLSResources(mdb) + err = r.ensureTLSResources(ctx, mdb) assert.NoError(t, err) - // Operator-managed secret should have been created and contain the + // Operator-managed secret should have been created and contains the // concatenated certificate and key. expectedCertificateKey := "CERT\nKEY" - certificateKey, err := secret.ReadKey(c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + certificateKey, err := secret.ReadKey(ctx, c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, expectedCertificateKey, certificateKey) }) t.Run("Secret is updated if it already exists", func(t *testing.T) { mdb := newTestReplicaSetWithTLS() - k8sclient := mdbClient.NewClient(client.NewManager(&mdb).GetClient()) - err := createTLSSecretAndConfigMap(k8sclient, mdb) + k8sclient := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, k8sclient, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, k8sclient, mdb) assert.NoError(t, err) // Create operator-managed secret @@ -179,18 +410,18 @@ func TestTLSOperatorSecret(t *testing.T) { SetNamespace(mdb.TLSOperatorSecretNamespacedName().Namespace). SetField(tlsOperatorSecretFileName(""), ""). Build() - err = k8sclient.CreateSecret(s) + err = k8sclient.CreateSecret(ctx, s) assert.NoError(t, err) - r := NewReconciler(client.NewManagerWithClient(k8sclient)) + r := NewReconciler(kubeClient.NewManagerWithClient(k8sclient), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - err = r.ensureTLSResources(mdb) + err = r.ensureTLSResources(ctx, mdb) assert.NoError(t, err) // Operator-managed secret should have been updated with the concatenated // certificate and key. expectedCertificateKey := "CERT\nKEY" - certificateKey, err := secret.ReadKey(k8sclient, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + certificateKey, err := secret.ReadKey(ctx, k8sclient, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) assert.NoError(t, err) assert.Equal(t, expectedCertificateKey, certificateKey) }) @@ -215,29 +446,173 @@ func TestCombineCertificateAndKey(t *testing.T) { } } -func createTLSSecretAndConfigMap(c k8sClient.Client, mdb mdbv1.MongoDBCommunity) error { - s := secret.Builder(). - SetName(mdb.Spec.Security.TLS.CertificateKeySecret.Name). - SetNamespace(mdb.Namespace). - SetField("tls.crt", "CERT"). - SetField("tls.key", "KEY"). - Build() +func TestPemSupport(t *testing.T) { + ctx := context.Background() + t.Run("Success if only pem is provided", func(t *testing.T) { + mdb := newTestReplicaSetWithTLS() + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "", "", "CERT\nKEY") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, c, mdb) + assert.NoError(t, err) + + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + err = r.ensureTLSResources(ctx, mdb) + assert.NoError(t, err) + + // Operator-managed secret should have been created and contains the + // concatenated certificate and key. + expectedCertificateKey := "CERT\nKEY" + certificateKey, err := secret.ReadKey(ctx, c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + assert.NoError(t, err) + assert.Equal(t, expectedCertificateKey, certificateKey) + }) + t.Run("Success if pem is equal to cert+key", func(t *testing.T) { + mdb := newTestReplicaSetWithTLS() + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "CERT", "KEY", "CERT\nKEY") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, c, mdb) + assert.NoError(t, err) + + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + err = r.ensureTLSResources(ctx, mdb) + assert.NoError(t, err) + + // Operator-managed secret should have been created and contains the + // concatenated certificate and key. + expectedCertificateKey := "CERT\nKEY" + certificateKey, err := secret.ReadKey(ctx, c, tlsOperatorSecretFileName(expectedCertificateKey), mdb.TLSOperatorSecretNamespacedName()) + assert.NoError(t, err) + assert.Equal(t, expectedCertificateKey, certificateKey) + }) + t.Run("Failure if pem is different from cert+key", func(t *testing.T) { + mdb := newTestReplicaSetWithTLS() + c := kubeClient.NewClient(kubeClient.NewManager(ctx, &mdb).GetClient()) + err := createTLSSecret(ctx, c, mdb, "CERT1", "KEY1", "CERT\nKEY") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, c, mdb) + assert.NoError(t, err) + + r := NewReconciler(kubeClient.NewManagerWithClient(c), "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - err := c.Create(context.TODO(), &s) - if err != nil { - return err + err = r.ensureTLSResources(ctx, mdb) + assert.Error(t, err) + assert.Contains(t, err.Error(), `if all of "tls.crt", "tls.key" and "tls.pem" are present in the secret, the entry for "tls.pem" must be equal to the concatenation of "tls.crt" with "tls.key"`) + }) +} + +func TestTLSConfigReferencesToCACertAreValidated(t *testing.T) { + ctx := context.Background() + type args struct { + caConfigMap *corev1.LocalObjectReference + caCertificateSecret *corev1.LocalObjectReference + expectedError error + } + tests := map[string]args{ + "Success if reference to CA cert provided via secret": { + caConfigMap: &corev1.LocalObjectReference{ + Name: "certificateKeySecret"}, + caCertificateSecret: nil, + }, + "Success if reference to CA cert provided via config map": { + caConfigMap: nil, + caCertificateSecret: &corev1.LocalObjectReference{ + Name: "caConfigMap"}, + }, + "Succes if reference to CA cert provided both via secret and configMap": { + caConfigMap: &corev1.LocalObjectReference{ + Name: "certificateKeySecret"}, + caCertificateSecret: &corev1.LocalObjectReference{ + Name: "caConfigMap"}, + }, + "Failure if reference to CA cert is missing": { + caConfigMap: nil, + caCertificateSecret: nil, + expectedError: errors.New("TLS field requires a reference to the CA certificate which signed the server certificates. Neither secret (field caCertificateSecretRef) not configMap (field CaConfigMap) reference present"), + }, + } + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + mdb := newTestReplicaSetWithTLSCaCertificateReferences(tc.caConfigMap, tc.caCertificateSecret) + + mgr := kubeClient.NewManager(ctx, &mdb) + cli := kubeClient.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, cli, mdb, "cert", "key", "pem") + + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + _, err = r.validateTLSConfig(ctx, mdb) + if tc.expectedError != nil { + assert.EqualError(t, err, tc.expectedError.Error()) + } else { + assert.NoError(t, err) + } + }) + } + +} + +func createTLSConfigMap(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity) error { + if !mdb.Spec.Security.TLS.Enabled { + return nil } configMap := configmap.Builder(). SetName(mdb.Spec.Security.TLS.CaConfigMap.Name). SetNamespace(mdb.Namespace). - SetField("ca.crt", "CERT"). + SetDataField("ca.crt", "CERT"). Build() - err = c.Create(context.TODO(), &configMap) - if err != nil { - return err + return c.Create(ctx, &configMap) +} + +func createTLSSecretWithNamespaceAndName(ctx context.Context, c k8sClient.Client, namespace string, name string, crt string, key string, pem string) error { + sBuilder := secret.Builder(). + SetName(name). + SetNamespace(namespace). + SetField(tlsCACertName, "CERT") + + if crt != "" { + sBuilder.SetField(tlsSecretCertName, crt) + } + if key != "" { + sBuilder.SetField(tlsSecretKeyName, key) } + if pem != "" { + sBuilder.SetField(tlsSecretPemName, pem) + } + + s := sBuilder.Build() + return c.Create(ctx, &s) +} + +func createTLSSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.Spec.Security.TLS.CertificateKeySecret.Name, crt, key, pem) +} + +func createAgentCertSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.AgentCertificateSecretNamespacedName().Name, crt, key, pem) +} + +func createAgentCertPemSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.AgentCertificatePemSecretNamespacedName().Name, crt, key, pem) +} + +func createPrometheusTLSSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, crt string, key string, pem string) error { + return createTLSSecretWithNamespaceAndName(ctx, c, mdb.Namespace, mdb.Spec.Prometheus.TLSSecretRef.Name, crt, key, pem) +} + +func createUserPasswordSecret(ctx context.Context, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, userPasswordSecretName string, password string) error { + sBuilder := secret.Builder(). + SetName(userPasswordSecretName). + SetNamespace(mdb.Namespace). + SetField("password", password) - return nil + s := sBuilder.Build() + return c.Create(ctx, &s) } diff --git a/controllers/mongodb_users.go b/controllers/mongodb_users.go new file mode 100644 index 000000000..cd99734ba --- /dev/null +++ b/controllers/mongodb_users.go @@ -0,0 +1,91 @@ +package controllers + +import ( + "context" + "fmt" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" +) + +// ensureUserResources will check that the configured user password secrets can be found +// and will start monitor them so that the reconcile process is triggered every time these secrets are updated +func (r ReplicaSetReconciler) ensureUserResources(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { + for _, user := range mdb.GetAuthUsers() { + if user.Database != constants.ExternalDB { + secretNamespacedName := types.NamespacedName{Name: user.PasswordSecretName, Namespace: mdb.Namespace} + if _, err := secret.ReadKey(ctx, r.client, user.PasswordSecretKey, secretNamespacedName); err != nil { + if apiErrors.IsNotFound(err) { + // check for SCRAM secret as well + scramSecretName := types.NamespacedName{Name: user.ScramCredentialsSecretName, Namespace: mdb.Namespace} + _, err = r.client.GetSecret(ctx, scramSecretName) + if apiErrors.IsNotFound(err) { + return fmt.Errorf(`user password secret: %s and scram secret: %s not found`, secretNamespacedName, scramSecretName) + } + r.log.Errorf(`user password secret "%s" not found: %s`, secretNamespacedName, err) + continue + } + return err + } + r.secretWatcher.Watch(ctx, secretNamespacedName, mdb.NamespacedName()) + } + } + + return nil +} + +// updateConnectionStringSecrets updates secrets where user specific connection strings are stored. +// The client applications can mount these secrets and connect to the mongodb cluster +func (r ReplicaSetReconciler) updateConnectionStringSecrets(ctx context.Context, mdb mdbv1.MongoDBCommunity, clusterDomain string) error { + for _, user := range mdb.GetAuthUsers() { + secretName := user.ConnectionStringSecretName + + secretNamespace := mdb.Namespace + if user.ConnectionStringSecretNamespace != "" { + secretNamespace = user.ConnectionStringSecretNamespace + } + + existingSecret, err := r.client.GetSecret(ctx, types.NamespacedName{ + Name: secretName, + Namespace: secretNamespace, + }) + if err != nil && !apiErrors.IsNotFound(err) { + return err + } + if err == nil && !secret.HasOwnerReferences(existingSecret, mdb.GetOwnerReferences()) { + return fmt.Errorf("connection string secret %s already exists and is not managed by the operator", secretName) + } + + pwd := "" + + if user.Database != constants.ExternalDB { + secretNamespacedName := types.NamespacedName{Name: user.PasswordSecretName, Namespace: mdb.Namespace} + pwd, err = secret.ReadKey(ctx, r.client, user.PasswordSecretKey, secretNamespacedName) + if err != nil { + return err + } + } + + connectionStringSecret := secret.Builder(). + SetName(secretName). + SetNamespace(secretNamespace). + SetField("connectionString.standard", mdb.MongoAuthUserURI(user, pwd, clusterDomain)). + SetField("connectionString.standardSrv", mdb.MongoAuthUserSRVURI(user, pwd, clusterDomain)). + SetField("username", user.Username). + SetField("password", pwd). + SetOwnerReferences(mdb.GetOwnerReferences()). + Build() + + if err := secret.CreateOrUpdate(ctx, r.client, connectionStringSecret); err != nil { + return err + } + + secretNamespacedName := types.NamespacedName{Name: connectionStringSecret.Name, Namespace: connectionStringSecret.Namespace} + r.secretWatcher.Watch(ctx, secretNamespacedName, mdb.NamespacedName()) + } + + return nil +} diff --git a/controllers/prometheus.go b/controllers/prometheus.go new file mode 100644 index 000000000..cebe939fe --- /dev/null +++ b/controllers/prometheus.go @@ -0,0 +1,79 @@ +package controllers + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" + + "k8s.io/apimachinery/pkg/types" +) + +const ( + // Keep in sync with api/v1/mongodbcommunity_types.go + DefaultPrometheusPort = 9216 + ListenAddress = "0.0.0.0" +) + +// PrometheusModification adds Prometheus configuration to AutomationConfig. +func getPrometheusModification(ctx context.Context, getUpdateCreator secret.GetUpdateCreator, mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { + if mdb.Spec.Prometheus == nil { + return automationconfig.NOOP(), nil + } + + secretNamespacedName := types.NamespacedName{Name: mdb.Spec.Prometheus.PasswordSecretRef.Name, Namespace: mdb.Namespace} + password, err := secret.ReadKey(ctx, getUpdateCreator, mdb.Spec.Prometheus.GetPasswordKey(), secretNamespacedName) + if err != nil { + return automationconfig.NOOP(), fmt.Errorf("could not configure Prometheus modification: %s", err) + } + + var certKey string + var tlsPEMPath string + var scheme string + + if mdb.Spec.Prometheus.TLSSecretRef.Name != "" { + certKey, err = getPemOrConcatenatedCrtAndKey(ctx, getUpdateCreator, mdb.PrometheusTLSSecretNamespacedName()) + if err != nil { + return automationconfig.NOOP(), err + } + tlsPEMPath = tlsPrometheusSecretMountPath + tlsOperatorSecretFileName(certKey) + scheme = "https" + } else { + scheme = "http" + } + + return func(config *automationconfig.AutomationConfig) { + promConfig := automationconfig.NewDefaultPrometheus(mdb.Spec.Prometheus.Username) + + promConfig.TLSPemPath = tlsPEMPath + promConfig.Scheme = scheme + promConfig.Password = password + + if mdb.Spec.Prometheus.Port > 0 { + promConfig.ListenAddress = fmt.Sprintf("%s:%d", ListenAddress, mdb.Spec.Prometheus.Port) + } + + if mdb.Spec.Prometheus.MetricsPath != "" { + promConfig.MetricsPath = mdb.Spec.Prometheus.MetricsPath + } + + config.Prometheus = &promConfig + }, nil +} + +// prometheusPort returns a `corev1.ServicePort` to be configured in the StatefulSet +// for the Prometheus endpoint. This function will only return a new Port when +// Prometheus has been configured, and nil otherwise. +func prometheusPort(mdb mdbv1.MongoDBCommunity) *corev1.ServicePort { + if mdb.Spec.Prometheus != nil { + return &corev1.ServicePort{ + Port: int32(mdb.Spec.Prometheus.GetPort()), + Name: "prometheus", + } + } + return nil +} diff --git a/controllers/replica_set_controller.go b/controllers/replica_set_controller.go index c97ba8217..cf3e9d526 100644 --- a/controllers/replica_set_controller.go +++ b/controllers/replica_set_controller.go @@ -5,55 +5,51 @@ import ( "encoding/json" "fmt" "os" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/functions" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/agent" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" - - "github.com/pkg/errors" + "strconv" + "strings" "github.com/imdario/mergo" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scram" - "github.com/stretchr/objx" - + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" "github.com/mongodb/mongodb-kubernetes-operator/controllers/construct" + "github.com/mongodb/mongodb-kubernetes-operator/controllers/predicates" "github.com/mongodb/mongodb-kubernetes-operator/controllers/validation" "github.com/mongodb/mongodb-kubernetes-operator/controllers/watch" - - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec" - - mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/agent" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication" "github.com/mongodb/mongodb-kubernetes-operator/pkg/automationconfig" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" kubernetesClient "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/client" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/podtemplatespec" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/service" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/functions" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/merge" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/result" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/scale" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/status" + "github.com/stretchr/objx" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apiErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" k8sClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) const ( - clusterDNSName = "CLUSTER_DNS_NAME" + clusterDomain = "CLUSTER_DOMAIN" lastSuccessfulConfiguration = "mongodb.com/v1.lastSuccessfulConfiguration" + lastAppliedMongoDBVersion = "mongodb.com/v1.lastAppliedMongoDBVersion" ) func init() { @@ -64,22 +60,34 @@ func init() { zap.ReplaceGlobals(logger) } -func NewReconciler(mgr manager.Manager) *ReplicaSetReconciler { +func NewReconciler(mgr manager.Manager, mongodbRepoUrl, mongodbImage, mongodbImageType, agentImage, versionUpgradeHookImage, readinessProbeImage string) *ReplicaSetReconciler { mgrClient := mgr.GetClient() secretWatcher := watch.New() - + configMapWatcher := watch.New() return &ReplicaSetReconciler{ - client: kubernetesClient.NewClient(mgrClient), - scheme: mgr.GetScheme(), - log: zap.S(), - secretWatcher: &secretWatcher, + client: kubernetesClient.NewClient(mgrClient), + scheme: mgr.GetScheme(), + log: zap.S(), + secretWatcher: &secretWatcher, + configMapWatcher: &configMapWatcher, + + mongodbRepoUrl: mongodbRepoUrl, + mongodbImage: mongodbImage, + mongodbImageType: mongodbImageType, + agentImage: agentImage, + versionUpgradeHookImage: versionUpgradeHookImage, + readinessProbeImage: readinessProbeImage, } } // SetupWithManager sets up the controller with the Manager and configures the necessary watches. func (r *ReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&mdbv1.MongoDBCommunity{}). + WithOptions(controller.Options{MaxConcurrentReconciles: 3}). + For(&mdbv1.MongoDBCommunity{}, builder.WithPredicates(predicates.OnlyOnSpecChange())). + Watches(&corev1.Secret{}, r.secretWatcher). + Watches(&corev1.ConfigMap{}, r.configMapWatcher). + Owns(&appsv1.StatefulSet{}). Complete(r) } @@ -87,10 +95,18 @@ func (r *ReplicaSetReconciler) SetupWithManager(mgr ctrl.Manager) error { type ReplicaSetReconciler struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver - client kubernetesClient.Client - scheme *runtime.Scheme - log *zap.SugaredLogger - secretWatcher *watch.ResourceWatcher + client kubernetesClient.Client + scheme *runtime.Scheme + log *zap.SugaredLogger + secretWatcher *watch.ResourceWatcher + configMapWatcher *watch.ResourceWatcher + + mongodbRepoUrl string + mongodbImage string + mongodbImageType string + agentImage string + versionUpgradeHookImage string + readinessProbeImage string } // +kubebuilder:rbac:groups=mongodbcommunity.mongodb.com,resources=mongodbcommunity,verbs=get;list;watch;create;update;patch;delete @@ -109,7 +125,7 @@ func (r ReplicaSetReconciler) Reconcile(ctx context.Context, request reconcile.R // TODO: generalize preparation for resource // Fetch the MongoDB instance mdb := mdbv1.MongoDBCommunity{} - err := r.client.Get(context.TODO(), request.NamespacedName, &mdb) + err := r.client.Get(ctx, request.NamespacedName, &mdb) if err != nil { if apiErrors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. @@ -123,120 +139,124 @@ func (r ReplicaSetReconciler) Reconcile(ctx context.Context, request reconcile.R } r.log = zap.S().With("ReplicaSet", request.NamespacedName) - r.log.Infow("Reconciling MongoDB", "MongoDB.Spec", mdb.Spec, "MongoDB.Status", mdb.Status) + r.log.Infof("Reconciling MongoDB") r.log.Debug("Validating MongoDB.Spec") - if err := r.validateUpdate(mdb); err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("error validating new Spec: %s", err)). - withFailedPhase(), - ) + lastAppliedSpec, err := r.validateSpec(mdb) + if err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("error validating new Spec: %s", err)). + withFailedPhase()) } r.log.Debug("Ensuring the service exists") - if err := r.ensureService(mdb); err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error ensuring the service exists: %s", err)). - withFailedPhase(), - ) + if err := r.ensureService(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring the service (members) exists: %s", err)). + withFailedPhase()) } - isTLSValid, err := r.validateTLSConfig(mdb) + isTLSValid, err := r.validateTLSConfig(ctx, mdb) if err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error validating TLS config: %s", err)). - withFailedPhase(), - ) + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error validating TLS config: %s", err)). + withFailedPhase()) } if !isTLSValid { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Info, "TLS config is not yet valid, retrying in 10 seconds"). - withPendingPhase(10), - ) + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Info, "TLS config is not yet valid, retrying in 10 seconds"). + withPendingPhase(10)) + } + + if err := r.ensureTLSResources(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring TLS resources: %s", err)). + withFailedPhase()) + } + + if err := r.ensurePrometheusTLSResources(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring TLS resources: %s", err)). + withFailedPhase()) } - if err := r.ensureTLSResources(mdb); err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error ensuring TLS resources: %s", err)). - withFailedPhase(), - ) + if err := r.ensureUserResources(ctx, mdb); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error ensuring User config: %s", err)). + withFailedPhase()) } - ready, err := r.deployMongoDBReplicaSet(mdb) + ready, err := r.deployMongoDBReplicaSet(ctx, mdb, lastAppliedSpec) if err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error deploying MongoDB ReplicaSet: %s", err)). - withFailedPhase(), - ) + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error deploying MongoDB ReplicaSet: %s", err)). + withFailedPhase()) } if !ready { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Info, "ReplicaSet is not yet ready, retrying in 10 seconds"). - withPendingPhase(10), - ) + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Info, "ReplicaSet is not yet ready, retrying in 10 seconds"). + withPendingPhase(10)) } r.log.Debug("Resetting StatefulSet UpdateStrategy to RollingUpdate") - if err := statefulset.ResetUpdateStrategy(&mdb, r.client); err != nil { - return status.Update(r.client.Status(), &mdb, - statusOptions(). - withMessage(Error, fmt.Sprintf("Error resetting StatefulSet UpdateStrategyType: %s", err)). - withFailedPhase(), - ) + if err := statefulset.ResetUpdateStrategy(ctx, &mdb, r.client); err != nil { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMessage(Error, fmt.Sprintf("Error resetting StatefulSet UpdateStrategyType: %s", err)). + withFailedPhase()) } - if scale.IsStillScaling(mdb) { - return status.Update(r.client.Status(), &mdb, statusOptions(). + if mdb.IsStillScaling() { + return status.Update(ctx, r.client.Status(), &mdb, statusOptions(). withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). withMessage(Info, fmt.Sprintf("Performing scaling operation, currentMembers=%d, desiredMembers=%d", mdb.CurrentReplicas(), mdb.DesiredReplicas())). withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). - withPendingPhase(10), - ) - } - - res, err := status.Update(r.client.Status(), &mdb, - statusOptions(). - withMongoURI(mdb.MongoURI()). - withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). - withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). - withMessage(None, ""). - withRunningPhase(), - ) + withStatefulSetArbiters(mdb.StatefulSetArbitersThisReconciliation()). + withMongoDBArbiters(mdb.AutomationConfigArbitersThisReconciliation()). + withPendingPhase(10)) + } + + res, err := status.Update(ctx, r.client.Status(), &mdb, statusOptions(). + withMongoURI(mdb.MongoURI(os.Getenv(clusterDomain))). // nolint:forbidigo + withMongoDBMembers(mdb.AutomationConfigMembersThisReconciliation()). + withStatefulSetReplicas(mdb.StatefulSetReplicasThisReconciliation()). + withStatefulSetArbiters(mdb.StatefulSetArbitersThisReconciliation()). + withMongoDBArbiters(mdb.AutomationConfigArbitersThisReconciliation()). + withMessage(None, ""). + withRunningPhase(). + withVersion(mdb.GetMongoDBVersion())) if err != nil { r.log.Errorf("Error updating the status of the MongoDB resource: %s", err) return res, err } - // the last version will be duplicated in two annotations. - // This is needed to reuse the update strategy logic in enterprise - if err := annotations.UpdateLastAppliedMongoDBVersion(&mdb, r.client); err != nil { - r.log.Errorf("Could not save current version as an annotation: %s", err) + if err := r.updateConnectionStringSecrets(ctx, mdb, os.Getenv(clusterDomain)); err != nil { // nolint:forbidigo + r.log.Errorf("Could not update connection string secrets: %s", err) } - if err := r.updateLastSuccessfulConfiguration(mdb); err != nil { + + if lastAppliedSpec != nil { + r.cleanupScramSecrets(ctx, mdb.Spec, *lastAppliedSpec, mdb.Namespace) + r.cleanupPemSecret(ctx, mdb.Spec, *lastAppliedSpec, mdb.Namespace) + r.cleanupConnectionStringSecrets(ctx, mdb.Spec, *lastAppliedSpec, mdb.Namespace, mdb.Name) + } + + if err := r.updateLastSuccessfulConfiguration(ctx, mdb); err != nil { r.log.Errorf("Could not save current spec as an annotation: %s", err) } if res.RequeueAfter > 0 || res.Requeue { - r.log.Infow("Requeuing reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status:", mdb.Status) + r.log.Info("Requeuing reconciliation") return res, nil } - r.log.Infow("Successfully finished reconciliation", "MongoDB.Spec:", mdb.Spec, "MongoDB.Status:", mdb.Status) + r.log.Infof("Successfully finished reconciliation, MongoDB.Spec: %+v, MongoDB.Status: %+v", mdb.Spec, mdb.Status) return res, err } // updateLastSuccessfulConfiguration annotates the MongoDBCommunity resource with the latest configuration -func (r *ReplicaSetReconciler) updateLastSuccessfulConfiguration(mdb mdbv1.MongoDBCommunity) error { +func (r *ReplicaSetReconciler) updateLastSuccessfulConfiguration(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { currentSpec, err := json.Marshal(mdb.Spec) if err != nil { return err @@ -244,67 +264,97 @@ func (r *ReplicaSetReconciler) updateLastSuccessfulConfiguration(mdb mdbv1.Mongo specAnnotations := map[string]string{ lastSuccessfulConfiguration: string(currentSpec), + // the last version will be duplicated in two annotations. + // This is needed to reuse the update strategy logic in enterprise + lastAppliedMongoDBVersion: mdb.Spec.Version, } - return annotations.SetAnnotations(&mdb, specAnnotations, r.client) + return annotations.SetAnnotations(ctx, &mdb, specAnnotations, r.client) } // ensureTLSResources creates any required TLS resources that the MongoDBCommunity // requires for TLS configuration. -func (r *ReplicaSetReconciler) ensureTLSResources(mdb mdbv1.MongoDBCommunity) error { +func (r *ReplicaSetReconciler) ensureTLSResources(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { if !mdb.Spec.Security.TLS.Enabled { return nil } // the TLS secret needs to be created beforehand, as both the StatefulSet and AutomationConfig // require the contents. if mdb.Spec.Security.TLS.Enabled { + r.log.Infof("TLS is enabled, creating/updating CA secret") + if err := ensureCASecret(ctx, r.client, r.client, r.client, mdb); err != nil { + return fmt.Errorf("could not ensure CA secret: %s", err) + } r.log.Infof("TLS is enabled, creating/updating TLS secret") - if err := ensureTLSSecret(r.client, mdb); err != nil { - return errors.Errorf("could not ensure TLS secret: %s", err) + if err := ensureTLSSecret(ctx, r.client, mdb); err != nil { + return fmt.Errorf("could not ensure TLS secret: %s", err) } + if mdb.Spec.IsAgentX509() { + r.log.Infof("Agent X509 authentication is enabled, creating/updating agent certificate secret") + if err := ensureAgentCertSecret(ctx, r.client, mdb); err != nil { + return fmt.Errorf("could not ensure Agent Certificate secret: %s", err) + } + } + } + return nil +} + +// ensurePrometheusTLSResources creates any required TLS resources that the MongoDBCommunity +// requires for TLS configuration. +func (r *ReplicaSetReconciler) ensurePrometheusTLSResources(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { + if mdb.Spec.Prometheus == nil || mdb.Spec.Prometheus.TLSSecretRef.Name == "" { + return nil + } + + // the TLS secret needs to be created beforehand, as both the StatefulSet and AutomationConfig + // require the contents. + r.log.Infof("Prometheus TLS is enabled, creating/updating TLS secret") + if err := ensurePrometheusTLSSecret(ctx, r.client, mdb); err != nil { + return fmt.Errorf("could not ensure TLS secret: %s", err) } + return nil } // deployStatefulSet deploys the backing StatefulSet of the MongoDBCommunity resource. +// +// When `Spec.Arbiters` > 0, a second StatefulSet will be created, with the amount +// of Pods corresponding to the amount of expected arbiters. +// // The returned boolean indicates that the StatefulSet is ready. -func (r *ReplicaSetReconciler) deployStatefulSet(mdb mdbv1.MongoDBCommunity) (bool, error) { +func (r *ReplicaSetReconciler) deployStatefulSet(ctx context.Context, mdb mdbv1.MongoDBCommunity) (bool, error) { r.log.Info("Creating/Updating StatefulSet") - if err := r.createOrUpdateStatefulSet(mdb); err != nil { - return false, errors.Errorf("error creating/updating StatefulSet: %s", err) + if err := r.createOrUpdateStatefulSet(ctx, mdb, false); err != nil { + return false, fmt.Errorf("error creating/updating StatefulSet: %s", err) + } + + r.log.Info("Creating/Updating StatefulSet for Arbiters") + if err := r.createOrUpdateStatefulSet(ctx, mdb, true); err != nil { + return false, fmt.Errorf("error creating/updating StatefulSet: %s", err) } - currentSts, err := r.client.GetStatefulSet(mdb.NamespacedName()) + currentSts, err := r.client.GetStatefulSet(ctx, mdb.NamespacedName()) if err != nil { - return false, errors.Errorf("error getting StatefulSet: %s", err) + return false, fmt.Errorf("error getting StatefulSet: %s", err) } r.log.Debugf("Ensuring StatefulSet is ready, with type: %s", mdb.GetUpdateStrategyType()) isReady := statefulset.IsReady(currentSts, mdb.StatefulSetReplicasThisReconciliation()) - if isReady { - r.log.Infow("StatefulSet is ready", - "replicas", currentSts.Spec.Replicas, - "generation", currentSts.Generation, - "observedGeneration", currentSts.Status.ObservedGeneration, - "updateStrategy", currentSts.Spec.UpdateStrategy.Type, - ) - } - return isReady || currentSts.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType, nil } // deployAutomationConfig deploys the AutomationConfig for the MongoDBCommunity resource. // The returned boolean indicates whether or not that Agents have all reached goal state. -func (r *ReplicaSetReconciler) deployAutomationConfig(mdb mdbv1.MongoDBCommunity) (bool, error) { +func (r *ReplicaSetReconciler) deployAutomationConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (bool, error) { r.log.Infof("Creating/Updating AutomationConfig") - sts, err := r.client.GetStatefulSet(mdb.NamespacedName()) + sts, err := r.client.GetStatefulSet(ctx, mdb.NamespacedName()) if err != nil && !apiErrors.IsNotFound(err) { return false, fmt.Errorf("failed to get StatefulSet: %s", err) } - ac, err := r.ensureAutomationConfig(mdb) + ac, err := r.ensureAutomationConfig(mdb, ctx, lastAppliedSpec) if err != nil { return false, fmt.Errorf("failed to ensure AutomationConfig: %s", err) } @@ -323,7 +373,7 @@ func (r *ReplicaSetReconciler) deployAutomationConfig(mdb mdbv1.MongoDBCommunity r.log.Debugf("Waiting for agents to reach version %d", ac.Version) // Note: we pass in the expected number of replicas this reconciliation as we scale members one at a time. If we were // to pass in the final member count, we would be waiting for agents that do not exist yet to be ready. - ready, err := agent.AllReachedGoalState(sts, r.client, mdb.StatefulSetReplicasThisReconciliation(), ac.Version, r.log) + ready, err := agent.AllReachedGoalState(ctx, sts, r.client, mdb.StatefulSetReplicasThisReconciliation(), ac.Version, r.log) if err != nil { return false, fmt.Errorf("failed to ensure agents have reached goal state: %s", err) } @@ -333,21 +383,29 @@ func (r *ReplicaSetReconciler) deployAutomationConfig(mdb mdbv1.MongoDBCommunity // shouldRunInOrder returns true if the order of execution of the AutomationConfig & StatefulSet // functions should be sequential or not. A value of false indicates they will run in reversed order. -func (r *ReplicaSetReconciler) shouldRunInOrder(mdb mdbv1.MongoDBCommunity) bool { +func (r *ReplicaSetReconciler) shouldRunInOrder(ctx context.Context, mdb mdbv1.MongoDBCommunity) bool { // The only case when we push the StatefulSet first is when we are ensuring TLS for the already existing ReplicaSet - _, err := r.client.GetStatefulSet(mdb.NamespacedName()) + sts, err := r.client.GetStatefulSet(ctx, mdb.NamespacedName()) + if !statefulset.IsReady(sts, mdb.StatefulSetReplicasThisReconciliation()) && mdb.Spec.Security.TLS.Enabled { + r.log.Debug("Enabling TLS on a deployment with a StatefulSet that is not Ready, the Automation Config must be updated first") + return true + } if err == nil && mdb.Spec.Security.TLS.Enabled { r.log.Debug("Enabling TLS on an existing deployment, the StatefulSet must be updated first") return false } // if we are scaling up, we need to make sure the StatefulSet is scaled up first. - if scale.IsScalingUp(mdb) { + if scale.IsScalingUp(&mdb) || mdb.CurrentArbiters() < mdb.DesiredArbiters() { + if scale.HasZeroReplicas(&mdb) { + r.log.Debug("Scaling up the ReplicaSet when there is no replicas, the Automation Config must be updated first") + return true + } r.log.Debug("Scaling up the ReplicaSet, the StatefulSet must be updated first") return false } - if scale.IsScalingDown(mdb) { + if scale.IsScalingDown(&mdb) { r.log.Debug("Scaling down the ReplicaSet, the Automation Config must be updated first") return true } @@ -365,112 +423,211 @@ func (r *ReplicaSetReconciler) shouldRunInOrder(mdb mdbv1.MongoDBCommunity) bool // deployMongoDBReplicaSet will ensure that both the AutomationConfig secret and backing StatefulSet // have been successfully created. A boolean is returned indicating if the process is complete // and an error if there was one. -func (r *ReplicaSetReconciler) deployMongoDBReplicaSet(mdb mdbv1.MongoDBCommunity) (bool, error) { - return functions.RunSequentially(r.shouldRunInOrder(mdb), +func (r *ReplicaSetReconciler) deployMongoDBReplicaSet(ctx context.Context, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (bool, error) { + return functions.RunSequentially(r.shouldRunInOrder(ctx, mdb), func() (bool, error) { - return r.deployAutomationConfig(mdb) + return r.deployAutomationConfig(ctx, mdb, lastAppliedSpec) }, func() (bool, error) { - return r.deployStatefulSet(mdb) + return r.deployStatefulSet(ctx, mdb) }) } -func (r *ReplicaSetReconciler) ensureService(mdb mdbv1.MongoDBCommunity) error { - svc := buildService(mdb) - err := r.client.Create(context.TODO(), &svc) - if err != nil && apiErrors.IsAlreadyExists(err) { - r.log.Infof("The service already exists... moving forward: %s", err) +// ensureService creates a Service unless it already exists. +// +// The Service definition is built from the `mdb` resource. If `isArbiter` is set to true, the Service +// will be created for the arbiters Statefulset. +func (r *ReplicaSetReconciler) ensureService(ctx context.Context, mdb mdbv1.MongoDBCommunity) error { + processPortManager, err := r.createProcessPortManager(ctx, mdb) + if err != nil { + return err + } + + svc := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: mdb.ServiceName(), Namespace: mdb.Namespace}} + op, err := controllerutil.CreateOrUpdate(ctx, r.client, svc, func() error { + resourceVersion := svc.ResourceVersion // Save resourceVersion for later + *svc = r.buildService(mdb, processPortManager) + svc.ResourceVersion = resourceVersion + return nil + }) + if err != nil { + r.log.Errorf("Could not create or patch the service: %s", err) return nil } + + r.log.Infow("Create/Update operation succeeded", "operation", op) + return err } -func (r *ReplicaSetReconciler) createOrUpdateStatefulSet(mdb mdbv1.MongoDBCommunity) error { +// createProcessPortManager is a helper method for creating new ReplicaSetPortManager. +// ReplicaSetPortManager needs current automation config and current pod state and the code for getting them +// was extracted here as it is used in ensureService and buildAutomationConfig. +func (r *ReplicaSetReconciler) createProcessPortManager(ctx context.Context, mdb mdbv1.MongoDBCommunity) (*agent.ReplicaSetPortManager, error) { + currentAC, err := automationconfig.ReadFromSecret(ctx, r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + if err != nil { + return nil, fmt.Errorf("could not read existing automation config: %s", err) + } + + currentPodStates, err := agent.GetAllDesiredMembersAndArbitersPodState(ctx, mdb.NamespacedName(), r.client, mdb.StatefulSetReplicasThisReconciliation(), mdb.StatefulSetArbitersThisReconciliation(), currentAC.Version, r.log) + if err != nil { + return nil, fmt.Errorf("cannot get all pods goal state: %w", err) + } + + return agent.NewReplicaSetPortManager(r.log, mdb.Spec.AdditionalMongodConfig.GetDBPort(), currentPodStates, currentAC.Processes), nil +} + +func (r *ReplicaSetReconciler) createOrUpdateStatefulSet(ctx context.Context, mdb mdbv1.MongoDBCommunity, isArbiter bool) error { set := appsv1.StatefulSet{} - err := r.client.Get(context.TODO(), mdb.NamespacedName(), &set) + + name := mdb.NamespacedName() + if isArbiter { + name = mdb.ArbiterNamespacedName() + } + + err := r.client.Get(ctx, name, &set) err = k8sClient.IgnoreNotFound(err) if err != nil { - return errors.Errorf("error getting StatefulSet: %s", err) + return fmt.Errorf("error getting StatefulSet: %s", err) } - buildStatefulSetModificationFunction(mdb)(&set) - if _, err = statefulset.CreateOrUpdate(r.client, set); err != nil { - return errors.Errorf("error creating/updating StatefulSet: %s", err) + + mongodbImage := getMongoDBImage(r.mongodbRepoUrl, r.mongodbImage, r.mongodbImageType, mdb.GetMongoDBVersion()) + buildStatefulSetModificationFunction(mdb, mongodbImage, r.agentImage, r.versionUpgradeHookImage, r.readinessProbeImage)(&set) + if isArbiter { + buildArbitersModificationFunction(mdb)(&set) + } + + if _, err = statefulset.CreateOrUpdate(ctx, r.client, set); err != nil { + return fmt.Errorf("error creating/updating StatefulSet: %s", err) } return nil } // ensureAutomationConfig makes sure the AutomationConfig secret has been successfully created. The automation config // that was updated/created is returned. -func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDBCommunity) (automationconfig.AutomationConfig, error) { - ac, err := r.buildAutomationConfig(mdb) +func (r ReplicaSetReconciler) ensureAutomationConfig(mdb mdbv1.MongoDBCommunity, ctx context.Context, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (automationconfig.AutomationConfig, error) { + ac, err := r.buildAutomationConfig(ctx, mdb, lastAppliedSpec) if err != nil { - return automationconfig.AutomationConfig{}, errors.Errorf("could not build automation config: %s", err) + return automationconfig.AutomationConfig{}, fmt.Errorf("could not build automation config: %s", err) } - return automationconfig.EnsureSecret( - r.client, - types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, - []metav1.OwnerReference{getOwnerReference(mdb)}, - ac, - ) - + return automationconfig.EnsureSecret(ctx, r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, mdb.GetOwnerReferences(), ac) } -func buildAutomationConfig(mdb mdbv1.MongoDBCommunity, auth automationconfig.Auth, currentAc automationconfig.AutomationConfig, modifications ...automationconfig.Modification) (automationconfig.AutomationConfig, error) { - domain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDNSName)) +func buildAutomationConfig(mdb mdbv1.MongoDBCommunity, isEnterprise bool, auth automationconfig.Auth, currentAc automationconfig.AutomationConfig, modifications ...automationconfig.Modification) (automationconfig.AutomationConfig, error) { + domain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDomain)) // nolint:forbidigo + arbiterDomain := getDomain(mdb.ServiceName(), mdb.Namespace, os.Getenv(clusterDomain)) // nolint:forbidigo + zap.S().Debugw("AutomationConfigMembersThisReconciliation", "mdb.AutomationConfigMembersThisReconciliation()", mdb.AutomationConfigMembersThisReconciliation()) + arbitersCount := mdb.AutomationConfigArbitersThisReconciliation() + if mdb.AutomationConfigMembersThisReconciliation() < mdb.Spec.Members { + // Have not reached desired amount of members yet, should not scale arbiters + arbitersCount = mdb.Status.CurrentMongoDBArbiters + } + + var acOverrideSettings map[string]interface{} + var acReplicaSetId *string + if mdb.Spec.AutomationConfigOverride != nil { + acOverrideSettings = mdb.Spec.AutomationConfigOverride.ReplicaSet.Settings.Object + acReplicaSetId = mdb.Spec.AutomationConfigOverride.ReplicaSet.Id + } + return automationconfig.NewBuilder(). + IsEnterprise(isEnterprise). SetTopology(automationconfig.ReplicaSetTopology). SetName(mdb.Name). SetDomain(domain). + SetArbiterDomain(arbiterDomain). SetMembers(mdb.AutomationConfigMembersThisReconciliation()). + SetArbiters(arbitersCount). SetReplicaSetHorizons(mdb.Spec.ReplicaSetHorizons). SetPreviousAutomationConfig(currentAc). SetMongoDBVersion(mdb.Spec.Version). SetFCV(mdb.Spec.FeatureCompatibilityVersion). SetOptions(automationconfig.Options{DownloadBase: "/var/lib/mongodb-mms-automation"}). SetAuth(auth). + SetReplicaSetId(acReplicaSetId). + SetSettings(acOverrideSettings). + SetMemberOptions(mdb.Spec.MemberConfig). + SetDataDir(mdb.GetMongodConfiguration().GetDBDataDir()). AddModifications(getMongodConfigModification(mdb)). AddModifications(modifications...). + AddProcessModification(func(_ int, p *automationconfig.Process) { + automationconfig.ConfigureAgentConfiguration(mdb.Spec.AgentConfiguration.SystemLog, mdb.Spec.AgentConfiguration.LogRotate, mdb.Spec.AgentConfiguration.AuditLogRotate, p) + }). Build() } +func guessEnterprise(mdb mdbv1.MongoDBCommunity, mongodbImage string) bool { + overrideAssumption, err := strconv.ParseBool(os.Getenv(construct.MongoDBAssumeEnterpriseEnv)) // nolint:forbidigo + if err == nil { + return overrideAssumption + } + + var overriddenImage string + containers := mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Template.Spec.Containers + if len(containers) > 0 { + for _, c := range containers { + if c.Name == construct.MongodbName { + if len(c.Image) > 0 { + overriddenImage = c.Image + } + } + } + } + if len(overriddenImage) > 0 { + return strings.Contains(overriddenImage, construct.OfficialMongodbEnterpriseServerImageName) + } + return mongodbImage == construct.OfficialMongodbEnterpriseServerImageName +} + // buildService creates a Service that will be used for the Replica Set StatefulSet // that allows all the members of the STS to see each other. -// TODO: Make sure this Service is as minimal as possible, to not interfere with -// future implementations and Service Discovery mechanisms we might implement. -func buildService(mdb mdbv1.MongoDBCommunity) corev1.Service { +func (r *ReplicaSetReconciler) buildService(mdb mdbv1.MongoDBCommunity, portManager *agent.ReplicaSetPortManager) corev1.Service { label := make(map[string]string) - label["app"] = mdb.ServiceName() - return service.Builder(). - SetName(mdb.ServiceName()). + name := mdb.ServiceName() + + label["app"] = name + + serviceBuilder := service.Builder(). + SetName(name). SetNamespace(mdb.Namespace). SetSelector(label). + SetLabels(label). SetServiceType(corev1.ServiceTypeClusterIP). SetClusterIP("None"). - SetPort(27017). SetPublishNotReadyAddresses(true). - Build() + SetOwnerReferences(mdb.GetOwnerReferences()) + + for _, servicePort := range portManager.GetServicePorts() { + tmpServicePort := servicePort + serviceBuilder.AddPort(&tmpServicePort) + } + + serviceBuilder.AddPort(prometheusPort(mdb)) + + return serviceBuilder.Build() } -// validateUpdate validates that the new Spec, corresponding to the existing one -// is still valid. If there is no a previous Spec, then the function assumes this is -// the first version of the MongoDB resource and skips. -func (r ReplicaSetReconciler) validateUpdate(mdb mdbv1.MongoDBCommunity) error { +// validateSpec checks if the MongoDB resource Spec is valid. +// If there has not yet been a successful configuration, the function runs the initial Spec validations. Otherwise, +// it checks that the attempted Spec is valid in relation to the Spec that resulted from that last successful configuration. +// The validation also returns the lastSuccessFulConfiguration Spec as mdbv1.MongoDBCommunitySpec. +func (r ReplicaSetReconciler) validateSpec(mdb mdbv1.MongoDBCommunity) (*mdbv1.MongoDBCommunitySpec, error) { lastSuccessfulConfigurationSaved, ok := mdb.Annotations[lastSuccessfulConfiguration] if !ok { - // First version of Spec, no need to validate - return nil + // First version of Spec + return nil, validation.ValidateInitialSpec(mdb, r.log) } - prevSpec := mdbv1.MongoDBCommunitySpec{} - err := json.Unmarshal([]byte(lastSuccessfulConfigurationSaved), &prevSpec) + lastSpec := mdbv1.MongoDBCommunitySpec{} + err := json.Unmarshal([]byte(lastSuccessfulConfigurationSaved), &lastSpec) if err != nil { - return err + return &lastSpec, err } - return validation.Validate(prevSpec, mdb.Spec) + return &lastSpec, validation.ValidateUpdate(mdb, lastSpec, r.log) } func getCustomRolesModification(mdb mdbv1.MongoDBCommunity) (automationconfig.Modification, error) { @@ -484,34 +641,90 @@ func getCustomRolesModification(mdb mdbv1.MongoDBCommunity) (automationconfig.Mo }, nil } -func (r ReplicaSetReconciler) buildAutomationConfig(mdb mdbv1.MongoDBCommunity) (automationconfig.AutomationConfig, error) { - tlsModification, err := getTLSConfigModification(r.client, mdb) +func (r ReplicaSetReconciler) buildAutomationConfig(ctx context.Context, mdb mdbv1.MongoDBCommunity, lastAppliedSpec *mdbv1.MongoDBCommunitySpec) (automationconfig.AutomationConfig, error) { + tlsModification, err := getTLSConfigModification(ctx, r.client, r.client, mdb) if err != nil { - return automationconfig.AutomationConfig{}, errors.Errorf("could not configure TLS modification: %s", err) + return automationconfig.AutomationConfig{}, fmt.Errorf("could not configure TLS modification: %s", err) } customRolesModification, err := getCustomRolesModification(mdb) if err != nil { - return automationconfig.AutomationConfig{}, errors.Errorf("could not configure custom roles: %s", err) + return automationconfig.AutomationConfig{}, fmt.Errorf("could not configure custom roles: %s", err) } - currentAC, err := automationconfig.ReadFromSecret(r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAC, err := automationconfig.ReadFromSecret(ctx, r.client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) if err != nil { - return automationconfig.AutomationConfig{}, errors.Errorf("could not read existing automation config: %s", err) + return automationconfig.AutomationConfig{}, fmt.Errorf("could not read existing automation config: %s", err) } auth := automationconfig.Auth{} - if err := scram.Enable(&auth, r.client, mdb); err != nil { - return automationconfig.AutomationConfig{}, errors.Errorf("could not configure scram authentication: %s", err) + if err := authentication.Enable(ctx, &auth, r.client, &mdb, mdb.AgentCertificateSecretNamespacedName()); err != nil { + return automationconfig.AutomationConfig{}, err } - return buildAutomationConfig( + if lastAppliedSpec != nil { + authentication.AddRemovedUsers(&auth, mdb, lastAppliedSpec) + } + + prometheusModification := automationconfig.NOOP() + if mdb.Spec.Prometheus != nil { + secretNamespacedName := types.NamespacedName{Name: mdb.Spec.Prometheus.PasswordSecretRef.Name, Namespace: mdb.Namespace} + r.secretWatcher.Watch(ctx, secretNamespacedName, mdb.NamespacedName()) + + prometheusModification, err = getPrometheusModification(ctx, r.client, mdb) + if err != nil { + return automationconfig.AutomationConfig{}, fmt.Errorf("could not enable TLS on Prometheus endpoint: %s", err) + } + } + + if mdb.Spec.IsAgentX509() { + r.secretWatcher.Watch(ctx, mdb.AgentCertificateSecretNamespacedName(), mdb.NamespacedName()) + r.secretWatcher.Watch(ctx, mdb.AgentCertificatePemSecretNamespacedName(), mdb.NamespacedName()) + } + + processPortManager, err := r.createProcessPortManager(ctx, mdb) + if err != nil { + return automationconfig.AutomationConfig{}, err + } + + automationConfig, err := buildAutomationConfig( mdb, + guessEnterprise(mdb, r.mongodbImage), auth, currentAC, tlsModification, customRolesModification, + prometheusModification, + processPortManager.GetPortsModification(), ) + + if err != nil { + return automationconfig.AutomationConfig{}, fmt.Errorf("could not create an automation config: %s", err) + } + + if mdb.Spec.AutomationConfigOverride != nil { + automationConfig = merge.AutomationConfigs(automationConfig, OverrideToAutomationConfig(*mdb.Spec.AutomationConfigOverride)) + } + + return automationConfig, nil +} + +// OverrideToAutomationConfig turns an automation config override from the resource spec into an automation config +// which can be used to merge. +func OverrideToAutomationConfig(override mdbv1.AutomationConfigOverride) automationconfig.AutomationConfig { + var processes []automationconfig.Process + for _, o := range override.Processes { + p := automationconfig.Process{ + Name: o.Name, + Disabled: o.Disabled, + LogRotate: automationconfig.ConvertCrdLogRotateToAC(o.LogRotate), + } + processes = append(processes, p) + } + + return automationconfig.AutomationConfig{ + Processes: processes, + } } // getMongodConfigModification will merge the additional configuration in the CRD @@ -526,35 +739,35 @@ func getMongodConfigModification(mdb mdbv1.MongoDBCommunity) automationconfig.Mo } } -// buildStatefulSet takes a MongoDB resource and converts it into +// buildStatefulSetModificationFunction takes a MongoDB resource and converts it into // the corresponding stateful set -func buildStatefulSet(mdb mdbv1.MongoDBCommunity) (appsv1.StatefulSet, error) { - sts := appsv1.StatefulSet{} - buildStatefulSetModificationFunction(mdb)(&sts) - return sts, nil -} - -func buildStatefulSetModificationFunction(mdb mdbv1.MongoDBCommunity) statefulset.Modification { - commonModification := construct.BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, mdb) +func buildStatefulSetModificationFunction(mdb mdbv1.MongoDBCommunity, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage string) statefulset.Modification { + commonModification := construct.BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage, true) return statefulset.Apply( commonModification, - statefulset.WithOwnerReference([]metav1.OwnerReference{getOwnerReference(mdb)}), + statefulset.WithOwnerReference(mdb.GetOwnerReferences()), statefulset.WithPodSpecTemplate( podtemplatespec.Apply( buildTLSPodSpecModification(mdb), + buildTLSPrometheus(mdb), + buildAgentX509(mdb), ), ), statefulset.WithCustomSpecs(mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec), + statefulset.WithObjectMetadata( + mdb.Spec.StatefulSetConfiguration.MetadataWrapper.Labels, + mdb.Spec.StatefulSetConfiguration.MetadataWrapper.Annotations, + ), ) } -func getOwnerReference(mdb mdbv1.MongoDBCommunity) metav1.OwnerReference { - return *metav1.NewControllerRef(&mdb, schema.GroupVersionKind{ - Group: mdbv1.GroupVersion.Group, - Version: mdbv1.GroupVersion.Version, - Kind: mdb.Kind, - }) +func buildArbitersModificationFunction(mdb mdbv1.MongoDBCommunity) statefulset.Modification { + return statefulset.Apply( + statefulset.WithReplicas(mdb.StatefulSetArbitersThisReconciliation()), + statefulset.WithServiceName(mdb.ServiceName()), + statefulset.WithName(mdb.ArbiterNamespacedName().Name), + ) } func getDomain(service, namespace, clusterName string) string { @@ -569,3 +782,18 @@ func getDomain(service, namespace, clusterName string) string { func isPreReadinessInitContainerStatefulSet(sts appsv1.StatefulSet) bool { return container.GetByName(construct.ReadinessProbeContainerName, sts.Spec.Template.Spec.InitContainers) == nil } + +func getMongoDBImage(repoUrl, mongodbImage, mongodbImageType, version string) string { + if strings.HasSuffix(repoUrl, "/") { + repoUrl = strings.TrimRight(repoUrl, "/") + } + mongoImageName := mongodbImage + for _, officialUrl := range construct.OfficialMongodbRepoUrls { + if repoUrl == officialUrl { + return fmt.Sprintf("%s/%s:%s-%s", repoUrl, mongoImageName, version, mongodbImageType) + } + } + + // This is the old images backwards compatibility code path. + return fmt.Sprintf("%s/%s:%s", repoUrl, mongoImageName, version) +} diff --git a/controllers/replicaset_controller_test.go b/controllers/replicaset_controller_test.go index 9b612043b..d7f2eb8da 100644 --- a/controllers/replicaset_controller_test.go +++ b/controllers/replicaset_controller_test.go @@ -4,22 +4,29 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "os" "reflect" "testing" "time" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/x509" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/statefulset" + "github.com/stretchr/testify/require" + apiErrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/container" + "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/yaml" - "github.com/pkg/errors" - "github.com/stretchr/objx" k8sClient "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/scram" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/annotations" "github.com/mongodb/mongodb-kubernetes-operator/pkg/kube/secret" @@ -39,9 +46,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -func init() { - os.Setenv(construct.AgentImageEnv, "agent-image") -} +const ( + AgentImage = "fake-agentImage" +) func newTestReplicaSet() mdbv1.MongoDBCommunity { return mdbv1.MongoDBCommunity{ @@ -52,7 +59,43 @@ func newTestReplicaSet() mdbv1.MongoDBCommunity { }, Spec: mdbv1.MongoDBCommunitySpec{ Members: 3, - Version: "4.2.2", + Version: "6.0.5", + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + }, + } +} + +func newTestReplicaSetWithSystemLogAndLogRotate() mdbv1.MongoDBCommunity { + return mdbv1.MongoDBCommunity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-rs", + Namespace: "my-ns", + Annotations: map[string]string{}, + }, + Spec: mdbv1.MongoDBCommunitySpec{ + Members: 3, + Version: "6.0.5", + Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, + }, + AgentConfiguration: mdbv1.AgentConfiguration{ + LogRotate: &automationconfig.CrdLogRotate{ + SizeThresholdMB: "1", + }, + AuditLogRotate: &automationconfig.CrdLogRotate{ + SizeThresholdMB: "1", + }, + SystemLog: &automationconfig.SystemLog{ + Destination: automationconfig.File, + Path: "/tmp/test", + }, + }, }, } } @@ -78,6 +121,15 @@ func newScramReplicaSet(users ...mdbv1.MongoDBUser) mdbv1.MongoDBCommunity { } func newTestReplicaSetWithTLS() mdbv1.MongoDBCommunity { + return newTestReplicaSetWithTLSCaCertificateReferences(&corev1.LocalObjectReference{ + Name: "caConfigMap", + }, + &corev1.LocalObjectReference{ + Name: "certificateKeySecret", + }) +} + +func newTestReplicaSetWithTLSCaCertificateReferences(caConfigMap, caCertificateSecret *corev1.LocalObjectReference) mdbv1.MongoDBCommunity { return mdbv1.MongoDBCommunity{ ObjectMeta: metav1.ObjectMeta{ Name: "my-rs", @@ -88,12 +140,14 @@ func newTestReplicaSetWithTLS() mdbv1.MongoDBCommunity { Members: 3, Version: "4.2.2", Security: mdbv1.Security{ + Authentication: mdbv1.Authentication{ + Modes: []mdbv1.AuthMode{"SCRAM"}, + }, TLS: mdbv1.TLS{ - Enabled: true, - CaConfigMap: mdbv1.LocalObjectReference{ - Name: "caConfigMap", - }, - CertificateKeySecret: mdbv1.LocalObjectReference{ + Enabled: true, + CaConfigMap: caConfigMap, + CaCertificateSecret: caCertificateSecret, + CertificateKeySecret: corev1.LocalObjectReference{ Name: "certificateKeySecret", }, }, @@ -103,17 +157,18 @@ func newTestReplicaSetWithTLS() mdbv1.MongoDBCommunity { } func TestKubernetesResources_AreCreated(t *testing.T) { + ctx := context.Background() // TODO: Create builder/yaml fixture of some type to construct MDB objects for unit tests mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) s := corev1.Secret{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, &s) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}, &s) assert.NoError(t, err) assert.Equal(t, mdb.Namespace, s.Namespace) assert.Equal(t, mdb.AutomationConfigSecretName(), s.Name) @@ -122,30 +177,29 @@ func TestKubernetesResources_AreCreated(t *testing.T) { } func TestStatefulSet_IsCorrectlyConfigured(t *testing.T) { - _ = os.Setenv(construct.MongodbRepoUrl, "repo") - _ = os.Setenv(construct.MongodbImageEnv, "mongo") + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "docker.io/mongodb", "mongodb-community-server", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts := appsv1.StatefulSet{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assert.Len(t, sts.Spec.Template.Spec.Containers, 2) agentContainer := sts.Spec.Template.Spec.Containers[1] assert.Equal(t, construct.AgentName, agentContainer.Name) - assert.Equal(t, os.Getenv(construct.AgentImageEnv), agentContainer.Image) + assert.Equal(t, AgentImage, agentContainer.Image) expectedProbe := probes.New(construct.DefaultReadiness()) assert.True(t, reflect.DeepEqual(&expectedProbe, agentContainer.ReadinessProbe)) mongodbContainer := sts.Spec.Template.Spec.Containers[0] assert.Equal(t, construct.MongodbName, mongodbContainer.Name) - assert.Equal(t, "repo/mongo:4.2.2", mongodbContainer.Image) + assert.Equal(t, "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", mongodbContainer.Image) assert.Equal(t, resourcerequirements.Defaults(), agentContainer.Resources) @@ -153,7 +207,105 @@ func TestStatefulSet_IsCorrectlyConfigured(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, acVolume.Secret, "automation config should be stored in a secret!") assert.Nil(t, acVolume.ConfigMap, "automation config should be stored in a secret, not a config map!") +} +func TestGuessEnterprise(t *testing.T) { + type testConfig struct { + setArgs func(t *testing.T) + mdb mdbv1.MongoDBCommunity + mongodbImage string + expectedEnterprise bool + } + tests := map[string]testConfig{ + "No override and Community image": { + setArgs: func(t *testing.T) {}, + mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-community-server", + expectedEnterprise: false, + }, + "No override and Enterprise image": { + setArgs: func(t *testing.T) {}, + mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-enterprise-server", + expectedEnterprise: true, + }, + "Assuming enterprise manually": { + setArgs: func(t *testing.T) { + t.Setenv(construct.MongoDBAssumeEnterpriseEnv, "true") + }, + mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-community-server", + expectedEnterprise: true, + }, + "Assuming community manually": { + setArgs: func(t *testing.T) { + t.Setenv(construct.MongoDBAssumeEnterpriseEnv, "false") + }, + mdb: mdbv1.MongoDBCommunity{}, + mongodbImage: "mongodb-enterprise-server", + expectedEnterprise: false, + }, + // This one is a corner case. We don't expect users to fall here very often as there are + // dedicated variables to control this type of behavior. + "Enterprise with StatefulSet override": { + setArgs: func(t *testing.T) {}, + mdb: mdbv1.MongoDBCommunity{ + Spec: mdbv1.MongoDBCommunitySpec{ + StatefulSetConfiguration: mdbv1.StatefulSetConfiguration{ + SpecWrapper: mdbv1.StatefulSetSpecWrapper{ + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: construct.MongodbName, + Image: "another_repo.com/another_org/mongodb-enterprise-server", + }, + }, + }, + }, + }, + }, + }, + }, + }, + mongodbImage: "mongodb-community-server", + expectedEnterprise: true, + }, + "Enterprise with StatefulSet override to Community": { + setArgs: func(t *testing.T) {}, + mdb: mdbv1.MongoDBCommunity{ + Spec: mdbv1.MongoDBCommunitySpec{ + StatefulSetConfiguration: mdbv1.StatefulSetConfiguration{ + SpecWrapper: mdbv1.StatefulSetSpecWrapper{ + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: construct.MongodbName, + Image: "another_repo.com/another_org/mongodb-community-server", + }, + }, + }, + }, + }, + }, + }, + }, + }, + mongodbImage: "mongodb-enterprise-server", + expectedEnterprise: false, + }, + } + for testName := range tests { + t.Run(testName, func(t *testing.T) { + testConfig := tests[testName] + testConfig.setArgs(t) + calculatedEnterprise := guessEnterprise(testConfig.mdb, testConfig.mongodbImage) + assert.Equal(t, testConfig.expectedEnterprise, calculatedEnterprise) + }) + } } func getVolumeByName(sts appsv1.StatefulSet, volumeName string) (corev1.Volume, error) { @@ -162,43 +314,44 @@ func getVolumeByName(sts appsv1.StatefulSet, volumeName string) (corev1.Volume, return v, nil } } - return corev1.Volume{}, errors.Errorf("volume with name %s, not found", volumeName) + return corev1.Volume{}, fmt.Errorf("volume with name %s, not found", volumeName) } func TestChangingVersion_ResultsInRollingUpdateStrategyType(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) + mgr := client.NewManager(ctx, &mdb) mgrClient := mgr.GetClient() - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assertReconciliationSuccessful(t, res, err) // fetch updated resource after first reconciliation - _ = mgrClient.Get(context.TODO(), mdb.NamespacedName(), &mdb) + _ = mgrClient.Get(ctx, mdb.NamespacedName(), &mdb) sts := appsv1.StatefulSet{} - err = mgrClient.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgrClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) mdbRef := &mdb mdbRef.Spec.Version = "4.2.3" - _ = mgrClient.Update(context.TODO(), &mdb) + _ = mgrClient.Update(ctx, &mdb) // agents start the upgrade, they are not all ready sts.Status.UpdatedReplicas = 1 sts.Status.ReadyReplicas = 2 - err = mgrClient.Update(context.TODO(), &sts) + err = mgrClient.Update(ctx, &sts) assert.NoError(t, err) - _ = mgrClient.Get(context.TODO(), mdb.NamespacedName(), &sts) + _ = mgrClient.Get(ctx, mdb.NamespacedName(), &sts) // reconcilliation is successful - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) sts = appsv1.StatefulSet{} - err = mgrClient.Get(context.TODO(), types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) + err = mgrClient.Get(ctx, types.NamespacedName{Name: mdb.Name, Namespace: mdb.Namespace}, &sts) assert.NoError(t, err) assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type, @@ -210,16 +363,16 @@ func TestBuildStatefulSet_ConfiguresUpdateStrategyCorrectly(t *testing.T) { mdb := newTestReplicaSet() mdb.Spec.Version = "4.0.0" mdb.Annotations[annotations.LastAppliedMongoDBVersion] = "4.0.0" - sts, err := buildStatefulSet(mdb) - assert.NoError(t, err) + sts := appsv1.StatefulSet{} + buildStatefulSetModificationFunction(mdb, "fake-mongodbImage", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage")(&sts) assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) }) t.Run("On No Version Change, First Version", func(t *testing.T) { mdb := newTestReplicaSet() mdb.Spec.Version = "4.0.0" delete(mdb.Annotations, annotations.LastAppliedMongoDBVersion) - sts, err := buildStatefulSet(mdb) - assert.NoError(t, err) + sts := appsv1.StatefulSet{} + buildStatefulSetModificationFunction(mdb, "fake-mongodbImage", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage")(&sts) assert.Equal(t, appsv1.RollingUpdateStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) }) t.Run("On Version Change", func(t *testing.T) { @@ -235,85 +388,449 @@ func TestBuildStatefulSet_ConfiguresUpdateStrategyCorrectly(t *testing.T) { assert.NoError(t, err) mdb.Annotations[annotations.LastAppliedMongoDBVersion] = string(bytes) - sts, err := buildStatefulSet(mdb) - - assert.NoError(t, err) + sts := appsv1.StatefulSet{} + buildStatefulSetModificationFunction(mdb, "fake-mongodbImage", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage")(&sts) assert.Equal(t, appsv1.OnDeleteStatefulSetStrategyType, sts.Spec.UpdateStrategy.Type) }) } func TestService_isCorrectlyCreatedAndUpdated(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + svc := corev1.Service{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + assert.NoError(t, err) + assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP) + assert.Equal(t, svc.Spec.Selector["app"], mdb.ServiceName()) + assert.Len(t, svc.Spec.Ports, 1) + assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 27017, Name: "mongodb"}) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) +} + +func TestService_usesCustomMongodPortWhenSpecified(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mongodConfig := objx.New(map[string]interface{}{}) + mongodConfig.Set("net.port", 1000.) + mdb.Spec.AdditionalMongodConfig.Object = mongodConfig + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) svc := corev1.Service{} - err = mgr.GetClient().Get(context.TODO(), types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) assert.NoError(t, err) assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP) assert.Equal(t, svc.Spec.Selector["app"], mdb.ServiceName()) assert.Len(t, svc.Spec.Ports, 1) - assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 27017}) + assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 1000, Name: "mongodb"}) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) } +func createOrUpdatePodsWithVersions(ctx context.Context, t *testing.T, c k8sClient.Client, name types.NamespacedName, versions []string) { + for i, version := range versions { + createPodWithAgentAnnotation(ctx, t, c, types.NamespacedName{ + Namespace: name.Namespace, + Name: fmt.Sprintf("%s-%d", name.Name, i), + }, version) + } +} + +func createPodWithAgentAnnotation(ctx context.Context, t *testing.T, c k8sClient.Client, name types.NamespacedName, versionStr string) { + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name.Name, + Namespace: name.Namespace, + Annotations: map[string]string{ + "agent.mongodb.com/version": versionStr, + }, + }, + } + + err := c.Create(ctx, &pod) + + if err != nil && apiErrors.IsAlreadyExists(err) { + err = c.Update(ctx, &pod) + assert.NoError(t, err) + } + + assert.NoError(t, err) +} + +func TestService_changesMongodPortOnRunningClusterWithArbiters(t *testing.T) { + ctx := context.Background() + mdb := newScramReplicaSet(mdbv1.MongoDBUser{ + Name: "testuser", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "password-secret-name", + }, + ScramCredentialsSecretName: "scram-credentials", + }) + + namespacedName := mdb.NamespacedName() + arbiterNamespacedName := mdb.ArbiterNamespacedName() + + const oldPort = automationconfig.DefaultDBPort + const newPort = 8000 + + mgr := client.NewManager(ctx, &mdb) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + + t.Run("Prepare cluster with arbiters and change port", func(t *testing.T) { + err := createUserPasswordSecret(ctx, mgr.Client, mdb, "password-secret-name", "pass") + assert.NoError(t, err) + + mdb.Spec.Arbiters = 1 + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + assertReconciliationSuccessful(t, res, err) + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + }) + _ = assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 1) + + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"1", "1", "1"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"1"}) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + assertReconciliationSuccessful(t, res, err) + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + }) + _ = assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 1) + assertStatefulsetReady(ctx, t, mgr, namespacedName, 3) + assertStatefulsetReady(ctx, t, mgr, arbiterNamespacedName, 1) + + mdb.Spec.AdditionalMongodConfig = mdbv1.NewMongodConfiguration() + mdb.Spec.AdditionalMongodConfig.SetDBPort(newPort) + + err = mgr.GetClient().Update(ctx, &mdb) + assert.NoError(t, err) + + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) + }) + + t.Run("Port should be changed only in the process #0", func(t *testing.T) { + // port changes should be performed one at a time + // should set port #0 to new one + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + require.NoError(t, err) + assert.True(t, res.Requeue) + + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 2) + require.Len(t, currentAc.Processes, 4) + assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[1].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[2].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[3].GetPort()) + + // not all ports are changed, so there are still two ports in the service + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + newPort: "mongodb-new", + }) + + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) + }) + + t.Run("Ports should be changed in processes #0,#1", func(t *testing.T) { + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"2", "2", "2"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"2"}) + + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + require.NoError(t, err) + assert.True(t, res.Requeue) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 3) + require.Len(t, currentAc.Processes, 4) + assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[2].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[3].GetPort()) + + // not all ports are changed, so there are still two ports in the service + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + newPort: "mongodb-new", + }) + + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) + }) + + t.Run("Ports should be changed in processes #0,#1,#2", func(t *testing.T) { + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"3", "3", "3"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"3"}) + + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + require.NoError(t, err) + assert.True(t, res.Requeue) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 4) + require.Len(t, currentAc.Processes, 4) + assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[2].GetPort()) + assert.Equal(t, oldPort, currentAc.Processes[3].GetPort()) + + // not all ports are changed, so there are still two ports in the service + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + newPort: "mongodb-new", + }) + + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) + }) + + t.Run("Ports should be changed in all processes", func(t *testing.T) { + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"4", "4", "4"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"4"}) + + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assert.NoError(t, err) + assert.True(t, res.Requeue) + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 5) + require.Len(t, currentAc.Processes, 4) + assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[2].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[3].GetPort()) + + // all the ports are changed but there are still two service ports for old and new port until the next reconcile + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + oldPort: "mongodb", + newPort: "mongodb-new", + }) + + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, oldPort, newPort) + }) + + t.Run("At the end there should be only new port in the service", func(t *testing.T) { + setStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 3) + setArbiterStatefulSetReadyReplicas(ctx, t, mgr.GetClient(), mdb, 1) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), namespacedName, []string{"5", "5", "5"}) + createOrUpdatePodsWithVersions(ctx, t, mgr.GetClient(), arbiterNamespacedName, []string{"5"}) + + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: namespacedName}) + assert.NoError(t, err) + // no need to requeue, port change is finished + assert.False(t, res.Requeue) + // there should not be any changes in config anymore + currentAc := assertAutomationConfigVersion(ctx, t, mgr.Client, mdb, 5) + require.Len(t, currentAc.Processes, 4) + assert.Equal(t, newPort, currentAc.Processes[0].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[1].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[2].GetPort()) + assert.Equal(t, newPort, currentAc.Processes[3].GetPort()) + + assertServicePorts(ctx, t, mgr.Client, mdb, map[int]string{ + newPort: "mongodb", + }) + + // only at the end, when all pods are ready we have updated connection strings + assertConnectionStringSecretPorts(ctx, t, mgr.GetClient(), mdb, newPort, oldPort) + }) +} + +// assertConnectionStringSecretPorts checks that connection string secret has expectedPort and does not have notExpectedPort. +func assertConnectionStringSecretPorts(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, expectedPort int, notExpectedPort int) { + connectionStringSecret := corev1.Secret{} + scramUsers := mdb.GetAuthUsers() + require.Len(t, scramUsers, 1) + secretNamespacedName := types.NamespacedName{Name: scramUsers[0].ConnectionStringSecretName, Namespace: scramUsers[0].ConnectionStringSecretNamespace} + err := c.Get(ctx, secretNamespacedName, &connectionStringSecret) + require.NoError(t, err) + require.Contains(t, connectionStringSecret.Data, "connectionString.standard") + assert.Contains(t, string(connectionStringSecret.Data["connectionString.standard"]), fmt.Sprintf("%d", expectedPort)) + assert.NotContains(t, string(connectionStringSecret.Data["connectionString.standard"]), fmt.Sprintf("%d", notExpectedPort)) +} + +func assertServicePorts(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, expectedServicePorts map[int]string) { + svc := corev1.Service{} + + err := c.Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + require.NoError(t, err) + assert.Equal(t, corev1.ServiceTypeClusterIP, svc.Spec.Type) + assert.Equal(t, mdb.ServiceName(), svc.Spec.Selector["app"]) + assert.Len(t, svc.Spec.Ports, len(expectedServicePorts)) + + actualServicePorts := map[int]string{} + for _, servicePort := range svc.Spec.Ports { + actualServicePorts[int(servicePort.Port)] = servicePort.Name + } + + assert.Equal(t, expectedServicePorts, actualServicePorts) +} + +func assertAutomationConfigVersion(ctx context.Context, t *testing.T, c client.Client, mdb mdbv1.MongoDBCommunity, expectedVersion int) automationconfig.AutomationConfig { + ac, err := automationconfig.ReadFromSecret(ctx, c, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + require.NoError(t, err) + assert.Equal(t, expectedVersion, ac.Version) + return ac +} + +func assertStatefulsetReady(ctx context.Context, t *testing.T, mgr manager.Manager, name types.NamespacedName, expectedReplicas int) { + sts := appsv1.StatefulSet{} + err := mgr.GetClient().Get(ctx, name, &sts) + require.NoError(t, err) + assert.True(t, statefulset.IsReady(sts, expectedReplicas)) +} + +func TestService_configuresPrometheusCustomPorts(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + mdb.Spec.Prometheus = &mdbv1.Prometheus{ + Username: "username", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "secret", + }, + Port: 4321, + } + + mongodConfig := objx.New(map[string]interface{}{}) + mongodConfig.Set("net.port", 1000.) + mdb.Spec.AdditionalMongodConfig.Object = mongodConfig + + mgr := client.NewManager(ctx, &mdb) + err := secret.CreateOrUpdate(ctx, mgr.Client, secret.Builder(). + SetName("secret"). + SetNamespace(mdb.Namespace). + SetField("password", "my-password"). + Build()) + + assert.NoError(t, err) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + svc := corev1.Service{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + assert.NoError(t, err) + assert.Equal(t, svc.Spec.Type, corev1.ServiceTypeClusterIP) + assert.Equal(t, svc.Spec.Selector["app"], mdb.ServiceName()) + assert.Len(t, svc.Spec.Ports, 2) + assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 1000, Name: "mongodb"}) + assert.Equal(t, svc.Spec.Ports[1], corev1.ServicePort{Port: 4321, Name: "prometheus"}) + + assert.Equal(t, svc.Labels["app"], mdb.ServiceName()) + + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) +} + +func TestService_configuresPrometheus(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + mdb.Spec.Prometheus = &mdbv1.Prometheus{ + Username: "username", + PasswordSecretRef: mdbv1.SecretKeyReference{ + Name: "secret", + }, + } + + mgr := client.NewManager(ctx, &mdb) + err := secret.CreateOrUpdate(ctx, mgr.Client, secret.Builder(). + SetName("secret"). + SetNamespace(mdb.Namespace). + SetField("password", "my-password"). + Build()) + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + svc := corev1.Service{} + err = mgr.GetClient().Get(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}, &svc) + assert.NoError(t, err) + + assert.Len(t, svc.Spec.Ports, 2) + assert.Equal(t, svc.Spec.Ports[0], corev1.ServicePort{Port: 27017, Name: "mongodb"}) + assert.Equal(t, svc.Spec.Ports[1], corev1.ServicePort{Port: 9216, Name: "prometheus"}) +} + +func TestCustomNetPort_Configuration(t *testing.T) { + ctx := context.Background() + svc, _ := performReconciliationAndGetService(ctx, t, "specify_net_port.yaml") + assert.Equal(t, corev1.ServiceTypeClusterIP, svc.Spec.Type) + assert.Len(t, svc.Spec.Ports, 1) + assert.Equal(t, corev1.ServicePort{Port: 40333, Name: "mongodb"}, svc.Spec.Ports[0]) +} + func TestAutomationConfig_versionIsBumpedOnChange(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Equal(t, 1, currentAc.Version) mdb.Spec.Members++ - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - _ = mgr.GetClient().Update(context.TODO(), &mdb) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + _ = mgr.GetClient().Update(ctx, &mdb) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err = automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err = automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Equal(t, 2, currentAc.Version) } func TestAutomationConfig_versionIsNotBumpedWithNoChanges(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Equal(t, currentAc.Version, 1) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err = automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err = automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Equal(t, currentAc.Version, 1) } func TestAutomationConfigFCVIsNotIncreasedWhenUpgradingMinorVersion(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mdb.Spec.Version = "4.2.2" + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Len(t, currentAc.Processes, 3) assert.Equal(t, currentAc.Processes[0].FeatureCompatibilityVersion, "4.2") @@ -321,11 +838,11 @@ func TestAutomationConfigFCVIsNotIncreasedWhenUpgradingMinorVersion(t *testing.T // Upgrading minor version does not change the FCV on the automationConfig mdbRef := &mdb mdbRef.Spec.Version = "4.4.0" - _ = mgr.Client.Update(context.TODO(), mdbRef) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + _ = mgr.Client.Update(ctx, mdbRef) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err = automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err = automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.Len(t, currentAc.Processes, 3) assert.Equal(t, currentAc.Processes[0].FeatureCompatibilityVersion, "4.2") @@ -333,20 +850,21 @@ func TestAutomationConfigFCVIsNotIncreasedWhenUpgradingMinorVersion(t *testing.T } func TestAutomationConfig_CustomMongodConfig(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() mongodConfig := objx.New(map[string]interface{}{}) - mongodConfig.Set("net.port", 1000) + mongodConfig.Set("net.port", float64(1000)) mongodConfig.Set("storage.other", "value") mongodConfig.Set("arbitrary.config.path", "value") mdb.Spec.AdditionalMongodConfig.Object = mongodConfig - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) for _, p := range currentAc.Processes { @@ -364,36 +882,33 @@ func TestAutomationConfig_CustomMongodConfig(t *testing.T) { } func TestExistingPasswordAndKeyfile_AreUsedWhenTheSecretExists(t *testing.T) { + ctx := context.Background() mdb := newScramReplicaSet() - mgr := client.NewManager(&mdb) + mgr := client.NewManager(ctx, &mdb) c := mgr.Client keyFileNsName := mdb.GetAgentKeyfileSecretNamespacedName() - err := secret.CreateOrUpdate(c, - secret.Builder(). - SetName(keyFileNsName.Name). - SetNamespace(keyFileNsName.Namespace). - SetField(scram.AgentKeyfileKey, "my-keyfile"). - Build(), - ) + err := secret.CreateOrUpdate(ctx, c, secret.Builder(). + SetName(keyFileNsName.Name). + SetNamespace(keyFileNsName.Namespace). + SetField(constants.AgentKeyfileKey, "my-keyfile"). + Build()) assert.NoError(t, err) passwordNsName := mdb.GetAgentPasswordSecretNamespacedName() - err = secret.CreateOrUpdate(c, - secret.Builder(). - SetName(passwordNsName.Name). - SetNamespace(passwordNsName.Namespace). - SetField(scram.AgentPasswordKey, "my-pass"). - Build(), - ) + err = secret.CreateOrUpdate(ctx, c, secret.Builder(). + SetName(passwordNsName.Name). + SetNamespace(passwordNsName.Namespace). + SetField(constants.AgentPasswordKey, "my-pass"). + Build()) assert.NoError(t, err) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) assert.NotEmpty(t, currentAc.Auth.KeyFileWindows) assert.False(t, currentAc.Auth.Disabled) @@ -405,23 +920,26 @@ func TestExistingPasswordAndKeyfile_AreUsedWhenTheSecretExists(t *testing.T) { } func TestScramIsConfigured(t *testing.T) { - assertReplicaSetIsConfiguredWithScram(t, newScramReplicaSet()) + ctx := context.Background() + assertReplicaSetIsConfiguredWithScram(ctx, t, newScramReplicaSet()) } func TestScramIsConfiguredWhenNotSpecified(t *testing.T) { - assertReplicaSetIsConfiguredWithScram(t, newTestReplicaSet()) + ctx := context.Background() + assertReplicaSetIsConfiguredWithScram(ctx, t, newTestReplicaSet()) } func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() mdb.Spec.Members = 5 - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, 5, mdb.Status.CurrentMongoDBMembers) @@ -429,73 +947,74 @@ func TestReplicaSet_IsScaledDown_OneMember_AtATime_WhenItAlreadyExists(t *testin // scale members from five to three mdb.Spec.Members = 3 - err = mgr.GetClient().Update(context.TODO(), &mdb) + err = mgr.GetClient().Update(ctx, &mdb) assert.NoError(t, err) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) assert.NoError(t, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, true, res.Requeue) assert.Equal(t, 4, mdb.Status.CurrentMongoDBMembers) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assert.NoError(t, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, false, res.Requeue) assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) } func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) // scale members from three to five mdb.Spec.Members = 5 - err = mgr.GetClient().Update(context.TODO(), &mdb) + err = mgr.GetClient().Update(ctx, &mdb) assert.NoError(t, err) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assert.NoError(t, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, true, res.Requeue) assert.Equal(t, 4, mdb.Status.CurrentMongoDBMembers) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - makeStatefulSetReady(t, mgr.GetClient(), mdb) + makeStatefulSetReady(ctx, t, mgr.GetClient(), mdb) - res, err = r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + res, err = r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assert.NoError(t, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, false, res.Requeue) @@ -503,38 +1022,55 @@ func TestReplicaSet_IsScaledUp_OneMember_AtATime_WhenItAlreadyExists(t *testing. } func TestIgnoreUnknownUsers(t *testing.T) { + ctx := context.Background() t.Run("Ignore Unkown Users set to true", func(t *testing.T) { mdb := newTestReplicaSet() ignoreUnknownUsers := true mdb.Spec.Security.Authentication.IgnoreUnknownUsers = &ignoreUnknownUsers - assertAuthoritativeSet(t, mdb, false) + assertAuthoritativeSet(ctx, t, mdb, false) }) t.Run("IgnoreUnknownUsers is not set", func(t *testing.T) { mdb := newTestReplicaSet() mdb.Spec.Security.Authentication.IgnoreUnknownUsers = nil - assertAuthoritativeSet(t, mdb, false) + assertAuthoritativeSet(ctx, t, mdb, false) }) t.Run("IgnoreUnknownUsers set to false", func(t *testing.T) { mdb := newTestReplicaSet() ignoreUnknownUsers := false mdb.Spec.Security.Authentication.IgnoreUnknownUsers = &ignoreUnknownUsers - assertAuthoritativeSet(t, mdb, true) + assertAuthoritativeSet(ctx, t, mdb, true) }) +} +func TestAnnotationsAreAppliedToResource(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) + assert.NoError(t, err) + + assert.NotNil(t, mdb.Annotations) + assert.NotEmpty(t, mdb.Annotations[lastSuccessfulConfiguration], "last successful spec should have been saved as annotation but was not") + assert.Equal(t, mdb.Annotations[lastAppliedMongoDBVersion], mdb.Spec.Version, "last version should have been saved as an annotation but was not") } // assertAuthoritativeSet asserts that a reconciliation of the given MongoDBCommunity resource // results in the AuthoritativeSet of the created AutomationConfig to have the expectedValue provided. -func assertAuthoritativeSet(t *testing.T, mdb mdbv1.MongoDBCommunity, expectedValue bool) { - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) +func assertAuthoritativeSet(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity, expectedValue bool) { + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - s, err := mgr.Client.GetSecret(types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + s, err := mgr.Client.GetSecret(ctx, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) assert.NoError(t, err) bytes := s.Data[automationconfig.ConfigKey] @@ -544,14 +1080,49 @@ func assertAuthoritativeSet(t *testing.T, mdb mdbv1.MongoDBCommunity, expectedVa assert.Equal(t, expectedValue, ac.Auth.AuthoritativeSet) } -func assertReplicaSetIsConfiguredWithScram(t *testing.T, mdb mdbv1.MongoDBCommunity) { - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) +func assertReplicaSetIsConfiguredWithScram(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity) { + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + t.Run("Automation Config is configured with SCRAM", func(t *testing.T) { + assert.NotEmpty(t, currentAc.Auth.Key) + assert.NoError(t, err) + assert.NotEmpty(t, currentAc.Auth.KeyFileWindows) + assert.NotEmpty(t, currentAc.Auth.AutoPwd) + assert.False(t, currentAc.Auth.Disabled) + }) + t.Run("Secret with password was created", func(t *testing.T) { + secretNsName := mdb.GetAgentPasswordSecretNamespacedName() + s, err := mgr.Client.GetSecret(ctx, secretNsName) + assert.NoError(t, err) + assert.Equal(t, s.Data[constants.AgentPasswordKey], []byte(currentAc.Auth.AutoPwd)) + }) + + t.Run("Secret with keyfile was created", func(t *testing.T) { + secretNsName := mdb.GetAgentKeyfileSecretNamespacedName() + s, err := mgr.Client.GetSecret(ctx, secretNsName) + assert.NoError(t, err) + assert.Equal(t, s.Data[constants.AgentKeyfileKey], []byte(currentAc.Auth.Key)) + }) +} + +func assertReplicaSetIsConfiguredWithScramTLS(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity) { + mgr := client.NewManager(ctx, &mdb) + newClient := client.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, newClient, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, newClient, mdb) + assert.NoError(t, err) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - currentAc, err := automationconfig.ReadFromSecret(mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) t.Run("Automation Config is configured with SCRAM", func(t *testing.T) { + assert.Empty(t, currentAc.TLSConfig.AutoPEMKeyFilePath) assert.NotEmpty(t, currentAc.Auth.Key) assert.NoError(t, err) assert.NotEmpty(t, currentAc.Auth.KeyFileWindows) @@ -560,35 +1131,104 @@ func assertReplicaSetIsConfiguredWithScram(t *testing.T, mdb mdbv1.MongoDBCommun }) t.Run("Secret with password was created", func(t *testing.T) { secretNsName := mdb.GetAgentPasswordSecretNamespacedName() - s, err := mgr.Client.GetSecret(secretNsName) + s, err := mgr.Client.GetSecret(ctx, secretNsName) assert.NoError(t, err) - assert.Equal(t, s.Data[scram.AgentPasswordKey], []byte(currentAc.Auth.AutoPwd)) + assert.Equal(t, s.Data[constants.AgentPasswordKey], []byte(currentAc.Auth.AutoPwd)) }) t.Run("Secret with keyfile was created", func(t *testing.T) { secretNsName := mdb.GetAgentKeyfileSecretNamespacedName() - s, err := mgr.Client.GetSecret(secretNsName) + s, err := mgr.Client.GetSecret(ctx, secretNsName) assert.NoError(t, err) - assert.Equal(t, s.Data[scram.AgentKeyfileKey], []byte(currentAc.Auth.Key)) + assert.Equal(t, s.Data[constants.AgentKeyfileKey], []byte(currentAc.Auth.Key)) }) } +func assertReplicaSetIsConfiguredWithX509(ctx context.Context, t *testing.T, mdb mdbv1.MongoDBCommunity) { + mgr := client.NewManager(ctx, &mdb) + newClient := client.NewClient(mgr.GetClient()) + err := createTLSSecret(ctx, newClient, mdb, "CERT", "KEY", "") + assert.NoError(t, err) + err = createTLSConfigMap(ctx, newClient, mdb) + assert.NoError(t, err) + crt, key, err := x509.CreateAgentCertificate() + assert.NoError(t, err) + err = createAgentCertSecret(ctx, newClient, mdb, crt, key, "") + assert.NoError(t, err) + + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assertReconciliationSuccessful(t, res, err) + + currentAc, err := automationconfig.ReadFromSecret(ctx, mgr.Client, types.NamespacedName{Name: mdb.AutomationConfigSecretName(), Namespace: mdb.Namespace}) + + t.Run("Automation Config is configured with X509", func(t *testing.T) { + assert.NotEmpty(t, currentAc.TLSConfig.AutoPEMKeyFilePath) + assert.Equal(t, automationAgentPemMountPath+"/"+mdb.AgentCertificatePemSecretNamespacedName().Name, currentAc.TLSConfig.AutoPEMKeyFilePath) + assert.NotEmpty(t, currentAc.Auth.Key) + assert.NoError(t, err) + assert.NotEmpty(t, currentAc.Auth.KeyFileWindows) + assert.Empty(t, currentAc.Auth.AutoPwd) + assert.False(t, currentAc.Auth.Disabled) + assert.Equal(t, "CN=mms-automation-agent,OU=ENG,O=MongoDB,C=US", currentAc.Auth.AutoUser) + }) + t.Run("Secret with password was not created", func(t *testing.T) { + secretNsName := mdb.GetAgentPasswordSecretNamespacedName() + _, err := mgr.Client.GetSecret(ctx, secretNsName) + assert.Error(t, err) + }) + t.Run("Secret with keyfile was created", func(t *testing.T) { + secretNsName := mdb.GetAgentKeyfileSecretNamespacedName() + s, err := mgr.Client.GetSecret(ctx, secretNsName) + assert.NoError(t, err) + assert.Equal(t, s.Data[constants.AgentKeyfileKey], []byte(currentAc.Auth.Key)) + }) +} + +func TestX509andSCRAMIsConfiguredWithX509Agent(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509", "SCRAM"} + mdb.Spec.Security.Authentication.AgentMode = "X509" + + assertReplicaSetIsConfiguredWithX509(ctx, t, mdb) +} + +func TestX509andSCRAMIsConfiguredWithSCRAMAgent(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509", "SCRAM"} + mdb.Spec.Security.Authentication.AgentMode = "SCRAM" + + assertReplicaSetIsConfiguredWithScramTLS(ctx, t, mdb) +} + +func TestX509IsConfigured(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSetWithTLS() + mdb.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509"} + + assertReplicaSetIsConfiguredWithX509(ctx, t, mdb) +} + func TestReplicaSet_IsScaledUpToDesiredMembers_WhenFirstCreated(t *testing.T) { + ctx := context.Background() mdb := newTestReplicaSet() - mgr := client.NewManager(&mdb) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) assertReconciliationSuccessful(t, res, err) - err = mgr.GetClient().Get(context.TODO(), mdb.NamespacedName(), &mdb) + err = mgr.GetClient().Get(ctx, mdb.NamespacedName(), &mdb) assert.NoError(t, err) assert.Equal(t, 3, mdb.Status.CurrentMongoDBMembers) } func TestVolumeClaimTemplates_Configuration(t *testing.T) { - sts := performReconciliationAndGetStatefulSet(t, "volume_claim_templates_mdb.yaml") + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "volume_claim_templates_mdb.yaml") assert.Len(t, sts.Spec.VolumeClaimTemplates, 3) @@ -603,7 +1243,8 @@ func TestVolumeClaimTemplates_Configuration(t *testing.T) { } func TestChangeDataVolume_Configuration(t *testing.T) { - sts := performReconciliationAndGetStatefulSet(t, "change_data_volume.yaml") + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "change_data_volume.yaml") assert.Len(t, sts.Spec.VolumeClaimTemplates, 2) dataVolume := sts.Spec.VolumeClaimTemplates[0] @@ -616,7 +1257,8 @@ func TestChangeDataVolume_Configuration(t *testing.T) { } func TestCustomStorageClass_Configuration(t *testing.T) { - sts := performReconciliationAndGetStatefulSet(t, "custom_storage_class.yaml") + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "custom_storage_class.yaml") dataVolume := sts.Spec.VolumeClaimTemplates[0] @@ -632,7 +1274,8 @@ func TestCustomStorageClass_Configuration(t *testing.T) { } func TestCustomTaintsAndTolerations_Configuration(t *testing.T) { - sts := performReconciliationAndGetStatefulSet(t, "tolerations_example.yaml") + ctx := context.Background() + sts, _ := performReconciliationAndGetStatefulSet(ctx, t, "tolerations_example.yaml") assert.Len(t, sts.Spec.Template.Spec.Tolerations, 2) assert.Equal(t, "example-key", sts.Spec.Template.Spec.Tolerations[0].Key) @@ -644,21 +1287,81 @@ func TestCustomTaintsAndTolerations_Configuration(t *testing.T) { assert.Equal(t, corev1.TaintEffectNoExecute, sts.Spec.Template.Spec.Tolerations[1].Effect) } -func performReconciliationAndGetStatefulSet(t *testing.T, filePath string) appsv1.StatefulSet { +func TestCustomDataDir_Configuration(t *testing.T) { + ctx := context.Background() + sts, c := performReconciliationAndGetStatefulSet(ctx, t, "specify_data_dir.yaml") + + agentContainer := container.GetByName("mongodb-agent", sts.Spec.Template.Spec.Containers) + assert.NotNil(t, agentContainer) + assertVolumeMountPath(t, agentContainer.VolumeMounts, "data-volume", "/some/path/db") + + mongoContainer := container.GetByName("mongod", sts.Spec.Template.Spec.Containers) + assert.NotNil(t, mongoContainer) + + lastCommand := mongoContainer.Command[len(agentContainer.Command)-1] + assert.Contains(t, lastCommand, "/some/path/db", "startup command should be using the newly specified path") + + ac, err := automationconfig.ReadFromSecret(ctx, c, types.NamespacedName{Name: "example-mongodb-config", Namespace: "test-ns"}) + assert.NoError(t, err) + + for _, p := range ac.Processes { + actualStoragePath := p.Args26.Get("storage.dbPath").String() + assert.Equal(t, "/some/path/db", actualStoragePath, "process dbPath should have been set") + } +} + +func TestInconsistentReplicas(t *testing.T) { + ctx := context.Background() + mdb := newTestReplicaSet() + stsReplicas := new(int32) + *stsReplicas = 3 + mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Replicas = stsReplicas + mdb.Spec.Members = 4 + + mgr := client.NewManager(ctx, &mdb) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + _, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: mdb.Namespace, Name: mdb.Name}}) + assert.NoError(t, err) +} + +func assertVolumeMountPath(t *testing.T, mounts []corev1.VolumeMount, name, path string) { + for _, v := range mounts { + if v.Name == name { + assert.Equal(t, path, v.MountPath) + return + } + } + t.Fatalf("volume with name %s was not present!", name) +} + +func performReconciliationAndGetStatefulSet(ctx context.Context, t *testing.T, filePath string) (appsv1.StatefulSet, client.Client) { mdb, err := loadTestFixture(filePath) assert.NoError(t, err) - mgr := client.NewManager(&mdb) - assert.NoError(t, generatePasswordsForAllUsers(mdb, mgr.Client)) - r := NewReconciler(mgr) - res, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: mdb.NamespacedName()}) + mgr := client.NewManager(ctx, &mdb) + assert.NoError(t, generatePasswordsForAllUsers(ctx, mdb, mgr.Client)) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) assertReconciliationSuccessful(t, res, err) - sts, err := mgr.Client.GetStatefulSet(mdb.NamespacedName()) + sts, err := mgr.Client.GetStatefulSet(ctx, mdb.NamespacedName()) assert.NoError(t, err) - return sts + return sts, mgr.Client } -func generatePasswordsForAllUsers(mdb mdbv1.MongoDBCommunity, c client.Client) error { +func performReconciliationAndGetService(ctx context.Context, t *testing.T, filePath string) (corev1.Service, client.Client) { + mdb, err := loadTestFixture(filePath) + assert.NoError(t, err) + mgr := client.NewManager(ctx, &mdb) + assert.NoError(t, generatePasswordsForAllUsers(ctx, mdb, mgr.Client)) + r := NewReconciler(mgr, "fake-mongodbRepoUrl", "fake-mongodbImage", "ubi8", AgentImage, "fake-versionUpgradeHookImage", "fake-readinessProbeImage") + res, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: mdb.NamespacedName()}) + assertReconciliationSuccessful(t, res, err) + svc, err := mgr.Client.GetService(ctx, types.NamespacedName{Name: mdb.ServiceName(), Namespace: mdb.Namespace}) + assert.NoError(t, err) + return svc, mgr.Client +} + +func generatePasswordsForAllUsers(ctx context.Context, mdb mdbv1.MongoDBCommunity, c client.Client) error { for _, user := range mdb.Spec.Users { key := "password" @@ -672,7 +1375,7 @@ func generatePasswordsForAllUsers(mdb mdbv1.MongoDBCommunity, c client.Client) e SetField(key, "GAGTQK2ccRRaxJFudI5y"). Build() - if err := c.CreateSecret(passwordSecret); err != nil { + if err := c.CreateSecret(ctx, passwordSecret); err != nil { return err } } @@ -688,17 +1391,27 @@ func assertReconciliationSuccessful(t *testing.T, result reconcile.Result, err e // makeStatefulSetReady updates the StatefulSet corresponding to the // provided MongoDB resource to mark it as ready for the case of `statefulset.IsReady` -func makeStatefulSetReady(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity) { - setStatefulSetReadyReplicas(t, c, mdb, mdb.StatefulSetReplicasThisReconciliation()) +func makeStatefulSetReady(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity) { + setStatefulSetReadyReplicas(ctx, t, c, mdb, mdb.StatefulSetReplicasThisReconciliation()) } -func setStatefulSetReadyReplicas(t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, readyReplicas int) { +func setStatefulSetReadyReplicas(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, readyReplicas int) { sts := appsv1.StatefulSet{} - err := c.Get(context.TODO(), mdb.NamespacedName(), &sts) + err := c.Get(ctx, mdb.NamespacedName(), &sts) assert.NoError(t, err) sts.Status.ReadyReplicas = int32(readyReplicas) sts.Status.UpdatedReplicas = int32(mdb.StatefulSetReplicasThisReconciliation()) - err = c.Update(context.TODO(), &sts) + err = c.Update(ctx, &sts) + assert.NoError(t, err) +} + +func setArbiterStatefulSetReadyReplicas(ctx context.Context, t *testing.T, c k8sClient.Client, mdb mdbv1.MongoDBCommunity, readyReplicas int) { + sts := appsv1.StatefulSet{} + err := c.Get(ctx, mdb.ArbiterNamespacedName(), &sts) + assert.NoError(t, err) + sts.Status.ReadyReplicas = int32(readyReplicas) + sts.Status.UpdatedReplicas = int32(mdb.StatefulSetArbitersThisReconciliation()) + err = c.Update(ctx, &sts) assert.NoError(t, err) } @@ -706,13 +1419,13 @@ func setStatefulSetReadyReplicas(t *testing.T, c k8sClient.Client, mdb mdbv1.Mon func loadTestFixture(yamlFileName string) (mdbv1.MongoDBCommunity, error) { testPath := fmt.Sprintf("testdata/%s", yamlFileName) mdb := mdbv1.MongoDBCommunity{} - data, err := ioutil.ReadFile(testPath) + data, err := os.ReadFile(testPath) if err != nil { - return mdb, errors.Errorf("error reading file: %s", err) + return mdb, fmt.Errorf("error reading file: %s", err) } if err := marshalRuntimeObjectFromYAMLBytes(data, &mdb); err != nil { - return mdb, errors.Errorf("error converting yaml bytes to service account: %s", err) + return mdb, fmt.Errorf("error converting yaml bytes to service account: %s", err) } return mdb, nil @@ -727,3 +1440,74 @@ func marshalRuntimeObjectFromYAMLBytes(bytes []byte, obj runtime.Object) error { } return json.Unmarshal(jsonBytes, &obj) } + +func TestGetMongoDBImage(t *testing.T) { + type testConfig struct { + mongodbRepoUrl string + mongodbImage string + mongodbImageType string + version string + expectedImage string + } + tests := map[string]testConfig{ + "Default UBI8 Community image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-community-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", + }, + "Overridden UBI8 Enterprise image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-enterprise-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-enterprise-server:6.0.5-ubi8", + }, + "Overridden UBI8 Enterprise image from Quay": { + mongodbRepoUrl: "quay.io/mongodb", + mongodbImage: "mongodb-enterprise-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "quay.io/mongodb/mongodb-enterprise-server:6.0.5-ubi8", + }, + "Overridden Ubuntu Community image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-community-server", + mongodbImageType: "ubuntu2204", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubuntu2204", + }, + "Overridden UBI Community image": { + mongodbRepoUrl: "docker.io/mongodb", + mongodbImage: "mongodb-community-server", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongodb/mongodb-community-server:6.0.5-ubi8", + }, + "Docker Inc images": { + mongodbRepoUrl: "docker.io", + mongodbImage: "mongo", + mongodbImageType: "ubi8", + version: "6.0.5", + expectedImage: "docker.io/mongo:6.0.5", + }, + "Deprecated AppDB images defined the old way": { + mongodbRepoUrl: "quay.io", + mongodbImage: "mongodb/mongodb-enterprise-appdb-database-ubi", + // In this example, we intentionally don't use the suffix from the env. variable and let users + // define it in the version instead. There are some known customers who do this. + // This is a backwards compatibility case. + mongodbImageType: "will-be-ignored", + version: "5.0.14-ent", + expectedImage: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi:5.0.14-ent", + }, + } + for testName := range tests { + t.Run(testName, func(t *testing.T) { + testConfig := tests[testName] + image := getMongoDBImage(testConfig.mongodbRepoUrl, testConfig.mongodbImage, testConfig.mongodbImageType, testConfig.version) + assert.Equal(t, testConfig.expectedImage, image) + }) + } +} diff --git a/controllers/testdata/change_data_volume.yaml b/controllers/testdata/change_data_volume.yaml index 58cdabb9f..0ab77019c 100644 --- a/controllers/testdata/change_data_volume.yaml +++ b/controllers/testdata/change_data_volume.yaml @@ -6,7 +6,6 @@ spec: members: 3 type: ReplicaSet version: "4.2.6" - persistent: true security: authentication: modes: ["SCRAM"] diff --git a/controllers/testdata/custom_storage_class.yaml b/controllers/testdata/custom_storage_class.yaml index 4579c29ba..9740ce4ec 100644 --- a/controllers/testdata/custom_storage_class.yaml +++ b/controllers/testdata/custom_storage_class.yaml @@ -6,7 +6,9 @@ spec: members: 3 type: ReplicaSet version: "4.2.6" - persistent: true + security: + authentication: + modes: ["SCRAM"] statefulSet: spec: volumeClaimTemplates: diff --git a/controllers/testdata/specify_data_dir.yaml b/controllers/testdata/specify_data_dir.yaml new file mode 100644 index 000000000..d2b80012c --- /dev/null +++ b/controllers/testdata/specify_data_dir.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb + namespace: test-ns +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + storage.dbPath: /some/path/db diff --git a/controllers/testdata/specify_net_port.yaml b/controllers/testdata/specify_net_port.yaml new file mode 100644 index 000000000..f57d367dd --- /dev/null +++ b/controllers/testdata/specify_net_port.yaml @@ -0,0 +1,26 @@ +--- +apiVersion: mongodbcommunity.mongodb.com/v1 +kind: MongoDBCommunity +metadata: + name: example-mongodb + namespace: test-ns +spec: + members: 3 + type: ReplicaSet + version: "4.2.6" + security: + authentication: + modes: ["SCRAM"] + users: + - name: my-user + db: admin + passwordSecretRef: + name: my-user-password + roles: + - name: clusterAdmin + db: admin + - name: userAdminAnyDatabase + db: admin + scramCredentialsSecretName: my-scram + additionalMongodConfig: + net.port: 40333 diff --git a/controllers/testdata/tolerations_example.yaml b/controllers/testdata/tolerations_example.yaml index 9d02db09c..b8c303ae7 100644 --- a/controllers/testdata/tolerations_example.yaml +++ b/controllers/testdata/tolerations_example.yaml @@ -6,7 +6,9 @@ spec: members: 3 type: ReplicaSet version: "4.2.6" - persistent: true + security: + authentication: + modes: ["SCRAM"] statefulSet: spec: template: diff --git a/controllers/testdata/volume_claim_templates_mdb.yaml b/controllers/testdata/volume_claim_templates_mdb.yaml index 0403e6525..1e20915b9 100644 --- a/controllers/testdata/volume_claim_templates_mdb.yaml +++ b/controllers/testdata/volume_claim_templates_mdb.yaml @@ -6,7 +6,9 @@ spec: members: 3 type: ReplicaSet version: "4.2.6" - persistent: true + security: + authentication: + modes: ["SCRAM"] statefulSet: spec: volumeClaimTemplates: diff --git a/controllers/validation/validation.go b/controllers/validation/validation.go index bd67fbcb9..3d84cc1c0 100644 --- a/controllers/validation/validation.go +++ b/controllers/validation/validation.go @@ -1,14 +1,207 @@ package validation import ( + "errors" + "fmt" + "strings" + + "go.uber.org/zap" + mdbv1 "github.com/mongodb/mongodb-kubernetes-operator/api/v1" - "github.com/pkg/errors" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/authentication/authtypes" + "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/constants" ) -func Validate(oldSpec, newSpec mdbv1.MongoDBCommunitySpec) error { - if oldSpec.Security.TLS.Enabled && !newSpec.Security.TLS.Enabled { +// ValidateInitialSpec checks if the resource's initial Spec is valid. +func ValidateInitialSpec(mdb mdbv1.MongoDBCommunity, log *zap.SugaredLogger) error { + return validateSpec(mdb, log) +} + +// ValidateUpdate validates that the new Spec, corresponding to the existing one, is still valid. +func ValidateUpdate(mdb mdbv1.MongoDBCommunity, oldSpec mdbv1.MongoDBCommunitySpec, log *zap.SugaredLogger) error { + if oldSpec.Security.TLS.Enabled && !mdb.Spec.Security.TLS.Enabled { return errors.New("TLS can't be set to disabled after it has been enabled") } + return validateSpec(mdb, log) +} + +// validateSpec validates the specs of the given resource definition. +func validateSpec(mdb mdbv1.MongoDBCommunity, log *zap.SugaredLogger) error { + if err := validateUsers(mdb); err != nil { + return err + } + + if err := validateArbiterSpec(mdb); err != nil { + return err + } + + if err := validateAuthModeSpec(mdb, log); err != nil { + return err + } + + if err := validateAgentCertSecret(mdb, log); err != nil { + return err + } + + if err := validateStatefulSet(mdb); err != nil { + return err + } + + return nil +} + +// validateUsers checks if the users configuration is valid +func validateUsers(mdb mdbv1.MongoDBCommunity) error { + connectionStringSecretNameMap := map[string]authtypes.User{} + nameCollisions := []string{} + + scramSecretNameMap := map[string]authtypes.User{} + scramSecretNameCollisions := []string{} + expectedAuthMethods := map[string]struct{}{} + + if len(mdb.Spec.Security.Authentication.Modes) == 0 { + expectedAuthMethods[constants.Sha256] = struct{}{} + } + + for _, auth := range mdb.Spec.Security.Authentication.Modes { + expectedAuthMethods[mdbv1.ConvertAuthModeToAuthMechanism(auth)] = struct{}{} + } + + for _, user := range mdb.GetAuthUsers() { + + // Ensure no collisions in the connection string secret names + connectionStringSecretName := user.ConnectionStringSecretName + if previousUser, exists := connectionStringSecretNameMap[connectionStringSecretName]; exists { + nameCollisions = append(nameCollisions, + fmt.Sprintf(`[connection string secret name: "%s" for user: "%s", db: "%s" and user: "%s", db: "%s"]`, + connectionStringSecretName, + previousUser.Username, + previousUser.Database, + user.Username, + user.Database)) + } else { + connectionStringSecretNameMap[connectionStringSecretName] = user + } + + // Ensure no collisions in the secret holding scram credentials + scramSecretName := user.ScramCredentialsSecretName + if previousUser, exists := scramSecretNameMap[scramSecretName]; exists { + scramSecretNameCollisions = append(scramSecretNameCollisions, + fmt.Sprintf(`[scram secret name: "%s" for user: "%s" and user: "%s"]`, + scramSecretName, + previousUser.Username, + user.Username)) + } else { + scramSecretNameMap[scramSecretName] = user + } + + if user.Database == constants.ExternalDB { + if _, ok := expectedAuthMethods[constants.X509]; !ok { + return fmt.Errorf("X.509 user %s present but X.509 is not enabled", user.Username) + } + if user.PasswordSecretKey != "" { + return fmt.Errorf("X509 user %s should not have a password secret key", user.Username) + } + if user.PasswordSecretName != "" { + return fmt.Errorf("X509 user %s should not have a password secret name", user.Username) + } + if user.ScramCredentialsSecretName != "" { + return fmt.Errorf("X509 user %s should not have scram credentials secret name", user.Username) + } + } else { + _, sha1 := expectedAuthMethods[constants.Sha1] + _, sha256 := expectedAuthMethods[constants.Sha256] + if !sha1 && !sha256 { + return fmt.Errorf("SCRAM user %s present but SCRAM is not enabled", user.Username) + } + if user.PasswordSecretKey == "" { + return fmt.Errorf("SCRAM user %s is missing password secret key", user.Username) + } + if user.PasswordSecretName == "" { + return fmt.Errorf("SCRAM user %s is missing password secret name", user.Username) + } + if user.ScramCredentialsSecretName == "" { + return fmt.Errorf("SCRAM user %s is missing scram credentials secret name", user.Username) + } + } + } + if len(nameCollisions) > 0 { + return fmt.Errorf("connection string secret names collision, update at least one of the users so that the resulted secret names (--) are unique: %s", + strings.Join(nameCollisions, ", ")) + } + + if len(scramSecretNameCollisions) > 0 { + return fmt.Errorf("scram credential secret names collision, update at least one of the users: %s", + strings.Join(scramSecretNameCollisions, ", ")) + } + + return nil +} + +// validateArbiterSpec checks if the initial Member spec is valid. +func validateArbiterSpec(mdb mdbv1.MongoDBCommunity) error { + if mdb.Spec.Arbiters < 0 { + return fmt.Errorf("number of arbiters must be greater or equal than 0") + } + if mdb.Spec.Arbiters >= mdb.Spec.Members { + return fmt.Errorf("number of arbiters specified (%v) is greater or equal than the number of members in the replicaset (%v). At least one member must not be an arbiter", mdb.Spec.Arbiters, mdb.Spec.Members) + } + + return nil +} + +// validateAuthModeSpec checks that the list of modes does not contain duplicates. +func validateAuthModeSpec(mdb mdbv1.MongoDBCommunity, log *zap.SugaredLogger) error { + allModes := mdb.Spec.Security.Authentication.Modes + mapMechanisms := make(map[string]struct{}) + + // Issue warning if Modes array is empty + if len(allModes) == 0 { + mapMechanisms[constants.Sha256] = struct{}{} + log.Warnf("An empty Modes array has been provided. The default mode (SCRAM-SHA-256) will be used.") + } + + // Check that no auth is defined more than once + for _, mode := range allModes { + if value := mdbv1.ConvertAuthModeToAuthMechanism(mode); value == "" { + return fmt.Errorf("unexpected value (%q) defined for supported authentication modes", value) + } else if value == constants.X509 && !mdb.Spec.Security.TLS.Enabled { + return fmt.Errorf("TLS must be enabled when using X.509 authentication") + } + mapMechanisms[mdbv1.ConvertAuthModeToAuthMechanism(mode)] = struct{}{} + } + + if len(mapMechanisms) < len(allModes) { + return fmt.Errorf("some authentication modes are declared twice or more") + } + + agentMode := mdb.Spec.GetAgentAuthMode() + if agentMode == "" && len(allModes) > 1 { + return fmt.Errorf("If spec.security.authentication.modes contains different authentication modes, the agent mode must be specified ") + } + if _, present := mapMechanisms[mdbv1.ConvertAuthModeToAuthMechanism(agentMode)]; !present { + return fmt.Errorf("Agent authentication mode: %s must be part of the spec.security.authentication.modes", agentMode) + } + + return nil +} + +func validateAgentCertSecret(mdb mdbv1.MongoDBCommunity, log *zap.SugaredLogger) error { + agentMode := mdb.Spec.GetAgentAuthMode() + if agentMode != "X509" && + mdb.Spec.Security.Authentication.AgentCertificateSecret != nil && + mdb.Spec.Security.Authentication.AgentCertificateSecret.Name != "" { + log.Warnf("Agent authentication is not X.509, but the agent certificate secret is configured, it will be ignored") + } + return nil +} + +func validateStatefulSet(mdb mdbv1.MongoDBCommunity) error { + stsReplicas := mdb.Spec.StatefulSetConfiguration.SpecWrapper.Spec.Replicas + + if stsReplicas != nil && *stsReplicas != int32(mdb.Spec.Members) { + return fmt.Errorf("spec.statefulset.spec.replicas has to be equal to spec.members") + } return nil } diff --git a/controllers/watch/watch.go b/controllers/watch/watch.go index 376bbfcf2..9522c53c3 100644 --- a/controllers/watch/watch.go +++ b/controllers/watch/watch.go @@ -1,11 +1,13 @@ package watch import ( + "context" "github.com/mongodb/mongodb-kubernetes-operator/pkg/util/contains" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -16,6 +18,8 @@ type ResourceWatcher struct { watched map[types.NamespacedName][]types.NamespacedName } +var _ handler.EventHandler = &ResourceWatcher{} + // New will create a new ResourceWatcher with no watched objects. func New() ResourceWatcher { return ResourceWatcher{ @@ -24,7 +28,7 @@ func New() ResourceWatcher { } // Watch will add a new object to watch. -func (w ResourceWatcher) Watch(watchedName, dependentName types.NamespacedName) { +func (w ResourceWatcher) Watch(ctx context.Context, watchedName, dependentName types.NamespacedName) { existing, hasExisting := w.watched[watchedName] if !hasExisting { existing = []types.NamespacedName{} @@ -38,19 +42,19 @@ func (w ResourceWatcher) Watch(watchedName, dependentName types.NamespacedName) w.watched[watchedName] = append(existing, dependentName) } -func (w ResourceWatcher) Create(event event.CreateEvent, queue workqueue.RateLimitingInterface) { +func (w ResourceWatcher) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { w.handleEvent(event.Object, queue) } -func (w ResourceWatcher) Update(event event.UpdateEvent, queue workqueue.RateLimitingInterface) { +func (w ResourceWatcher) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { w.handleEvent(event.ObjectOld, queue) } -func (w ResourceWatcher) Delete(event event.DeleteEvent, queue workqueue.RateLimitingInterface) { +func (w ResourceWatcher) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { w.handleEvent(event.Object, queue) } -func (w ResourceWatcher) Generic(event event.GenericEvent, queue workqueue.RateLimitingInterface) { +func (w ResourceWatcher) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { w.handleEvent(event.Object, queue) } diff --git a/controllers/watch/watch_test.go b/controllers/watch/watch_test.go index ad4202445..ab8c522be 100644 --- a/controllers/watch/watch_test.go +++ b/controllers/watch/watch_test.go @@ -1,6 +1,7 @@ package watch import ( + "context" "testing" "k8s.io/apimachinery/pkg/types" @@ -19,6 +20,7 @@ import ( ) func TestWatcher(t *testing.T) { + ctx := context.Background() obj := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "pod", @@ -45,23 +47,23 @@ func TestWatcher(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Create(event.CreateEvent{ + watcher.Create(ctx, event.CreateEvent{ Object: obj, - }, queue) + }, &queue) // Ensure no reconciliation is queued if object is not watched. assert.Equal(t, 0, queue.Len()) }) - t.Run("Multiple objects to reconile", func(t *testing.T) { + t.Run("Multiple objects to reconcile", func(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Watch(objNsName, mdb1.NamespacedName()) - watcher.Watch(objNsName, mdb2.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb2.NamespacedName()) - watcher.Create(event.CreateEvent{ + watcher.Create(ctx, event.CreateEvent{ Object: obj, - }, queue) + }, &queue) // Ensure multiple reconciliations are enqueued. assert.Equal(t, 2, queue.Len()) @@ -70,11 +72,11 @@ func TestWatcher(t *testing.T) { t.Run("Create event", func(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Watch(objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) - watcher.Create(event.CreateEvent{ + watcher.Create(ctx, event.CreateEvent{ Object: obj, - }, queue) + }, &queue) assert.Equal(t, 1, queue.Len()) }) @@ -82,12 +84,12 @@ func TestWatcher(t *testing.T) { t.Run("Update event", func(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Watch(objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) - watcher.Update(event.UpdateEvent{ + watcher.Update(ctx, event.UpdateEvent{ ObjectOld: obj, ObjectNew: obj, - }, queue) + }, &queue) assert.Equal(t, 1, queue.Len()) }) @@ -95,11 +97,11 @@ func TestWatcher(t *testing.T) { t.Run("Delete event", func(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Watch(objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) - watcher.Delete(event.DeleteEvent{ + watcher.Delete(ctx, event.DeleteEvent{ Object: obj, - }, queue) + }, &queue) assert.Equal(t, 1, queue.Len()) }) @@ -107,17 +109,18 @@ func TestWatcher(t *testing.T) { t.Run("Generic event", func(t *testing.T) { watcher := New() queue := controllertest.Queue{Interface: workqueue.New()} - watcher.Watch(objNsName, mdb1.NamespacedName()) + watcher.Watch(ctx, objNsName, mdb1.NamespacedName()) - watcher.Generic(event.GenericEvent{ + watcher.Generic(ctx, event.GenericEvent{ Object: obj, - }, queue) + }, &queue) assert.Equal(t, 1, queue.Len()) }) } func TestWatcherAdd(t *testing.T) { + ctx := context.Background() watcher := New() assert.Empty(t, watcher.watched) @@ -137,17 +140,17 @@ func TestWatcherAdd(t *testing.T) { } // Ensure single object can be added to empty watchlist. - watcher.Watch(watchedName, mdb1.NamespacedName()) + watcher.Watch(ctx, watchedName, mdb1.NamespacedName()) assert.Len(t, watcher.watched, 1) assert.Equal(t, []types.NamespacedName{mdb1.NamespacedName()}, watcher.watched[watchedName]) // Ensure object can only be watched once. - watcher.Watch(watchedName, mdb1.NamespacedName()) + watcher.Watch(ctx, watchedName, mdb1.NamespacedName()) assert.Len(t, watcher.watched, 1) assert.Equal(t, []types.NamespacedName{mdb1.NamespacedName()}, watcher.watched[watchedName]) // Ensure a single object can be watched for multiple reconciliations. - watcher.Watch(watchedName, mdb2.NamespacedName()) + watcher.Watch(ctx, watchedName, mdb2.NamespacedName()) assert.Len(t, watcher.watched, 1) assert.Equal(t, []types.NamespacedName{ mdb1.NamespacedName(), diff --git a/deploy/clusterwide/role.yaml b/deploy/clusterwide/cluster_role.yaml similarity index 58% rename from deploy/clusterwide/role.yaml rename to deploy/clusterwide/cluster_role.yaml index 6a134d463..de8abc63c 100644 --- a/deploy/clusterwide/role.yaml +++ b/deploy/clusterwide/cluster_role.yaml @@ -1,18 +1,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: mongodb-kubernetes-operator rules: - apiGroups: - "" resources: - - pods - services - - services/finalizers - - endpoints - - persistentvolumeclaims - - events - configmaps - secrets verbs: @@ -26,9 +20,6 @@ rules: - apiGroups: - apps resources: - - deployments - - daemonsets - - replicasets - statefulsets verbs: - create @@ -38,45 +29,27 @@ rules: - patch - update - watch -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - get - - create -- apiGroups: - - apps - resourceNames: - - mongodb-kubernetes-operator - resources: - - deployments/finalizers - verbs: - - update - apiGroups: - "" resources: - pods verbs: + - delete - get -- apiGroups: - - apps - resources: - - replicasets - - deployments - verbs: - - get + - list + - patch + - update + - watch - apiGroups: - mongodbcommunity.mongodb.com resources: - mongodbcommunity - mongodbcommunity/status - mongodbcommunity/spec + - mongodbcommunity/finalizers verbs: - - create - - delete - get - - list - patch + - list - update - watch diff --git a/deploy/clusterwide/role_binding.yaml b/deploy/clusterwide/cluster_role_binding.yaml similarity index 89% rename from deploy/clusterwide/role_binding.yaml rename to deploy/clusterwide/cluster_role_binding.yaml index 2096a8b65..7617ec02d 100644 --- a/deploy/clusterwide/role_binding.yaml +++ b/deploy/clusterwide/cluster_role_binding.yaml @@ -4,8 +4,8 @@ metadata: name: mongodb-kubernetes-operator subjects: - kind: ServiceAccount + # namespace: name: mongodb-kubernetes-operator - namespace: default roleRef: kind: ClusterRole name: mongodb-kubernetes-operator diff --git a/deploy/clusterwide/role-for-binding.yaml b/deploy/clusterwide/role-for-binding.yaml new file mode 100644 index 000000000..8bc7daaed --- /dev/null +++ b/deploy/clusterwide/role-for-binding.yaml @@ -0,0 +1,10 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: read-access-for-service-binding + labels: + servicebinding.io/controller: "true" +rules: + - apiGroups: ["mongodbcommunity.mongodb.com"] + resources: ["mongodbcommunity", "mongodbcommunity/status"] + verbs: ["get", "list", "watch"] diff --git a/deploy/e2e/role.yaml b/deploy/e2e/role.yaml index 0f2178e42..b11b12dd1 100644 --- a/deploy/e2e/role.yaml +++ b/deploy/e2e/role.yaml @@ -25,6 +25,13 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - create + - list - apiGroups: - apps resources: @@ -61,6 +68,12 @@ rules: - pods verbs: - get +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create - apiGroups: - apps resources: @@ -98,3 +111,187 @@ rules: - patch - update - watch +# needed for cert-manager integration +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - delete + - patch + - update +- apiGroups: + - acme.cert-manager.io + resources: + - challenges + - challenges/finalizers + - challenges/status + - orders + - orders/finalizers + - orders/status + verbs: + - create + - delete + - update + - get + - list + - watch + - patch + - deletecollection +- apiGroups: + - cert-manager.io + resources: + - clusterissuers + - clusterissuers/status + - issuers + - issuers/status + - certificates + - certificaterequests + - certificaterequests/finalizers + - certificaterequests/status + - certificates/finalizers + - certificates/status + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - patch + - deletecollection +- apiGroups: + - cert-manager.io + resources: + - signers + resourceNames: + - clusterissuers.cert-manager.io/* + - issuers.cert-manager.io/* + verbs: + - approve +- apiGroups: + - networking.k8s.io + resources: + - ingresses + - ingresses/finalizers + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - networking.x-k8s.io + resources: + - httproutes + - gateways + - gateways/finalizers + - httproutes/finalizers + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - route.openshift.io + resources: + - routes/custom-host + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +- apiGroups: + - apiregistration.k8s.io + resources: + - apiservices + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - auditregistration.k8s.io + resources: + - auditsinks + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + - certificatesigningrequests/status + verbs: + - get + - list + - watch + - create + - delete + - update +- apiGroups: + - certificates.k8s.io + resources: + - signers + resourceNames: + - clusterissuers.cert-manager.io/* + - issuers.cert-manager.io/* + verbs: + - sign +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - cert-manager-controller + - cert-manager-cainjector-leader-election + - cert-manager-cainjector-leader-election-core + verbs: + - get + - update + - patch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - update diff --git a/deploy/e2e/role_binding.yaml b/deploy/e2e/role_binding.yaml index f9a643252..17a3828b3 100644 --- a/deploy/e2e/role_binding.yaml +++ b/deploy/e2e/role_binding.yaml @@ -5,7 +5,7 @@ metadata: subjects: - kind: ServiceAccount name: e2e-test - namespace: default + namespace: mongodb roleRef: kind: ClusterRole name: e2e-test diff --git a/deploy/e2e/service_account.yaml b/deploy/e2e/service_account.yaml index 07bb7f0f4..84fea363b 100644 --- a/deploy/e2e/service_account.yaml +++ b/deploy/e2e/service_account.yaml @@ -2,3 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: e2e-test + namespace: mongodb diff --git a/deploy/openshift/operator_openshift.yaml b/deploy/openshift/operator_openshift.yaml index 89503d558..b7011a1cc 100644 --- a/deploy/openshift/operator_openshift.yaml +++ b/deploy/openshift/operator_openshift.yaml @@ -1,44 +1,69 @@ apiVersion: apps/v1 kind: Deployment metadata: + annotations: + email: support@mongodb.com + labels: + owner: mongodb name: mongodb-kubernetes-operator spec: replicas: 1 selector: matchLabels: name: mongodb-kubernetes-operator + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate template: metadata: labels: name: mongodb-kubernetes-operator spec: - serviceAccountName: mongodb-kubernetes-operator + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: name + operator: In + values: + - mongodb-kubernetes-operator + topologyKey: kubernetes.io/hostname containers: - - name: mongodb-kubernetes-operator - image: quay.io/mongodb/mongodb-kubernetes-operator:0.6.0 - command: - - mongodb-kubernetes-operator - imagePullPolicy: Always - env: - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MANAGED_SECURITY_CONTEXT - value: 'true' - - name: OPERATOR_NAME - value: "mongodb-kubernetes-operator" - - name: AGENT_IMAGE # The MongoDB Agent the operator will deploy to manage MongoDB deployments - value: quay.io/mongodb/mongodb-agent:10.29.0.6830-1 - - name: READINESS_PROBE_IMAGE - value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.3 - - name: VERSION_UPGRADE_HOOK_IMAGE - value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.2 - - name: MONGODB_IMAGE - value: "library/mongo" - - name: MONGODB_REPO_URL - value: "registry.hub.docker.com" + - command: + - /usr/local/bin/entrypoint + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MANAGED_SECURITY_CONTEXT + value: 'true' + - name: OPERATOR_NAME + value: mongodb-kubernetes-operator + - name: AGENT_IMAGE + value: quay.io/mongodb/mongodb-agent-ubi:108.0.6.8796-1 + - name: READINESS_PROBE_IMAGE + value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.23 + - name: VERSION_UPGRADE_HOOK_IMAGE + value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.10 + - name: MONGODB_IMAGE + value: mongo + - name: MONGODB_REPO_URL + value: quay.io + image: quay.io/mongodb/mongodb-kubernetes-operator:0.13.0 + imagePullPolicy: Always + name: mongodb-kubernetes-operator + resources: + limits: + cpu: 1100m + memory: 1Gi + requests: + cpu: 500m + memory: 200Mi + serviceAccountName: mongodb-kubernetes-operator diff --git a/dev_notes/RELEASE_NOTES.md b/dev_notes/RELEASE_NOTES.md deleted file mode 100644 index 603818df9..000000000 --- a/dev_notes/RELEASE_NOTES.md +++ /dev/null @@ -1,28 +0,0 @@ -# MongoDB Kubernetes Operator 0.6.0 -## Kubernetes Operator - -* Breaking Changes - * A new VolumeClaimTemplate has been added `logs-volume`. When you deploy the operator, if there is an existing StatefulSet the operator will attempt to perform an invalid update. The existing StatefulSet must be deleted before upgrading the operator. - - * The user of the mongod and mongodb-agent containers has changed. This means that there will be permissions - issues when upgrading from an earlier version of the operator. In order to update the permissions in the volume, you can use an init container. - -* Upgrade instructions - - Remove the current operator deployment - - `kubectl delete deployment ` - Delete the existing StatefulSet for the MongoDBCommunity resource - Note: to ensure existing data is not lost, ensure that the retain policy of your Persistent Volumes is configured correctly. Please reference the [official docs](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming) for these configuration options. - - `kubectl delete statefulset ` - Install the new operator - - follow the regular [installation instruction](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/docs/install-upgrade.md) - Patch the StatefulSet once it has been created. This will add an init container that will update the permissions of the existing volume. - - `kubectl patch statefulset --type='json' --patch '[ {"op":"add","path":"/spec/template/spec/initContainers/-", "value": { "name": "change-data-dir-permissions", "image": "busybox", "command": [ "chown", "-R", "2000", "/data" ], "securityContext": { "runAsNonRoot": false, "runAsUser": 0, "runAsGroup":0 }, "volumeMounts": [ { "mountPath": "/data", "name" : "data-volume" } ] } } ]'` - -* Bug fixes - * Fixes an issue that prevented the agents from reaching goal state when upgrading minor version of MongoDB. - - ## Updated Image Tags - * mongodb-kubernetes-operator:0.6.0 - * mongodb-agent:0.29.0.6830-1 - * mongodb-kubernetes-readinessprobe:1.0.3 diff --git a/dev_notes/dev-quick-start.md b/dev_notes/dev-quick-start.md deleted file mode 100644 index 0cfcf7324..000000000 --- a/dev_notes/dev-quick-start.md +++ /dev/null @@ -1,69 +0,0 @@ - -#### Prerequisites - -* install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -* create a python virtual enironment - -```bash -python3 -m venv /path/to/new/virtual/environment -source path/to/new/virtual/environment/bin/activate -``` -* install python dependencies ```pip install -r requirements.txt``` - - -#### Create a Kind cluster and a local registry -```bash -./scripts/dev/setup_kind_cluster.sh -``` - -#### set the kind kubernetes context -```bash -export KUBECONFIG=~/.kube/kind -``` - -#### get kind credentials -```bash -kind export kubeconfig - -# check it worked by running: -kubectl cluster-info --context kind-kind --kubeconfig $KUBECONFIG -``` - -#### create the namespace to work in -```bash -kubectl create namespace mongodb - -# optionally set it as the default -kubectl config set-context --current --namespace=mongodb -``` - -#### create a config file for the dev environment -```bash -cat > ~/.community-operator-dev/config.json << EOL -{ - "namespace": "mongodb", - "repo_url": "localhost:5000", - "operator_image": "mongodb-kubernetes-operator", - "e2e_image": "e2e", - "prestop_hook_image": "prehook", - "testrunner_image": "test-runner", - "version_upgrade_hook_image": "community-operator-version-upgrade-post-start-hook" -} -EOL -``` - -#### build and deploy the operator to the cluster -```bash -python scripts/dev/build_and_deploy_operator.py -``` - - -#### See the operator deployment -```bash -kubectl get pods -``` - -#### Deploy a Replica Set -```bash -kubectl apply -f deploy/crds/mongodb.com_v1_mongodbcommunity_cr.yaml -``` diff --git a/dev_notes/how-to-release.md b/dev_notes/how-to-release.md deleted file mode 100644 index 19205d2a0..000000000 --- a/dev_notes/how-to-release.md +++ /dev/null @@ -1,17 +0,0 @@ - -## How to Release - -* Update any finished tickets in [kube-community-next](https://jira.mongodb.org/issues?jql=project%20%3D%20CLOUDP%20AND%20component%20%3D%20%22Kubernetes%20Community%22%20%20AND%20status%20in%20(Resolved%2C%20Closed)%20and%20fixVersion%3D%20kube-community-next%20%20ORDER%20BY%20resolved) to have the version of the release you're doing (kube-community-x.y) - -* Prepare the release PR - 1. Increment any image version changes. - 2. Create a github draft release `./scripts/dev/create_github_release.sh`. - 3. Commit changes. - -* Create release PR - 1. Reconfigure the Evergreen run to add the relevant release task(s). - - -* Unblock release task once everything is green - -Once the images are released, merge release PR & publish github release \ No newline at end of file diff --git a/dev_notes/past_release_notes/v0.5.2.md b/dev_notes/past_release_notes/v0.5.2.md deleted file mode 100644 index f333ce5fa..000000000 --- a/dev_notes/past_release_notes/v0.5.2.md +++ /dev/null @@ -1,27 +0,0 @@ -# MongoDB Kubernetes Operator 0.5.2 -## Kubernetes Operator -* Changes - * Readiness probe has been moved into an init container from the Agent image. - * Security context is now added when the `MANAGED_SECURITY_CONTEXT` environment variable is not set. -* Bug fixes - * Removed unnecessary environment variable configuration in the openshift samples. - * Fixed an issue where the operator would perform unnecessary reconcilliations when Secrets were modified. - * Fixed an issue where a race condition could cause the deployment to get into a bad state when TLS - settings when being changed at the same time as a scaling operation was happening. - * Fixed an issue where the agent pod would panic when running as a non-root user. - -## MongoDBCommunity Resource -* Changes - * Added `spec.security.authentication.ignoreUnknownUsers` field. This value defaults to `true`. When enabled, - any MongoDB users added through external sources will not be removed. - - -## Miscellaneous -* Changes - * Internal code refactorings to allow libraries to be imported into other projects. - - - ## Updated Image Tags - * mongodb-kubernetes-operator:0.5.2 - * mongodb-agent:10.27.0.6772-1 - * mongodb-kubernetes-readinessprobe:1.0.1 [new image] diff --git a/dev_notes/past_release_notes/v0.6.0.md b/dev_notes/past_release_notes/v0.6.0.md deleted file mode 100644 index 33adf0f8a..000000000 --- a/dev_notes/past_release_notes/v0.6.0.md +++ /dev/null @@ -1,28 +0,0 @@ -# MongoDB Kubernetes Operator 0.6.0 -## Kubernetes Operator - -* Breaking Changes - * A new VolumeClaimTemplate has been added `logs-volume`. When you deploy the operator, if there is an existing StatefulSet the operator will attempt to perform an invalid update. The existing StatefulSet must be deleted before upgrading the operator. - - * The user of the mongod and mongodb-agent containers has changed. This means that there will be permissions - issues when upgrading from an earlier version of the operator. In order to update the permissions in the volume, you can use an init container. - -* Upgrade instructions - - Remove the current operator deployment - - `kubectl delete deployment ` - Delete the existing StatefulSet for the MongoDBCommunity resource - Note: to ensure existing data is not lost, ensure that the retain policy of your Persistent Volumes is configured correctly. - - `kubectl delete statefulset ` - Install the new operator - - follow the regular [installation instruction](https://github.com/mongodb/mongodb-kubernetes-operator/blob/master/docs/install-upgrade.md) - Patch the StatefulSet once it has been created. This will add an init container that will update the permissions of the existing volume. - - `kubectl patch statefulset --type='json' --patch '[ {"op":"add","path":"/spec/template/spec/initContainers/-", "value": { "name": "change-data-dir-permissions", "image": "busybox", "command": [ "chown", "-R", "2000", "/data" ], "securityContext": { "runAsNonRoot": false, "runAsUser": 0, "runAsGroup":0 }, "volumeMounts": [ { "mountPath": "/data", "name" : "data-volume" } ] } } ]'` - -* Bug fixes - * Fixes an issue that prevented the agents from reaching goal state when upgrading minor version of MongoDB. - - ## Updated Image Tags - * mongodb-kubernetes-operator:0.6.0 - * mongodb-agent:0.29.0.6830-1 - * mongodb-kubernetes-readinessprobe:1.0.3 diff --git a/docs/README.md b/docs/README.md index 58e3d56d5..7475a0d10 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,9 +2,10 @@ ## Table of Contents -- [Contribute to the MongoDB Kubernetes Operator](/docs/contributing.md) -- [MongoDB Community Kubernetes Operator Architecture](/docs/architecture.md) -- [Install and Upgrade the Community Kubernetes Operator](/docs/install-upgrade.md) -- [Deploy and Configure MongoDB Resources](/docs/deploy-configure.md) -- [Create Database Users](/docs/users.md) -- [Secure MongoDB Resources](/docs/secure.md) +- [Contribute to the MongoDB Kubernetes Operator](contributing.md) +- [MongoDB Community Kubernetes Operator Architecture](architecture.md) +- [Install and Upgrade the Community Kubernetes Operator](install-upgrade.md) +- [Deploy and Configure MongoDBCommunity Resources](deploy-configure.md) +- [Configure Logging of the MongoDB components](logging.md) +- [Create Database Users](users.md) +- [Secure MongoDBCommunity Resources](secure.md) diff --git a/docs/RELEASE_NOTES.md b/docs/RELEASE_NOTES.md new file mode 100644 index 000000000..6109fac02 --- /dev/null +++ b/docs/RELEASE_NOTES.md @@ -0,0 +1,18 @@ +# MongoDB Kubernetes Operator 0.13.0 + +## Dependency updates + - Updated kubernetes dependencies to 1.30 + - Bumped Go dependency to 1.24 + - Updated packages `crypto`, `net` and `oauth2` to remediate multiple CVEs + +## MongoDBCommunity Resource + - Added support for overriding the ReplicaSet ID ([#1656](https://github.com/mongodb/mongodb-kubernetes-operator/pull/1656)). + +## Improvements + - Refactored environment variable propagation ([#1676](https://github.com/mongodb/mongodb-kubernetes-operator/pull/1676)). + - Introduced a linter to limit inappropriate usage of environment variables within the codebase ([#1690](https://github.com/mongodb/mongodb-kubernetes-operator/pull/1690)). + +## Security & Dependency Updates + - **CVE Updates**: Updated packages `crypto`, `net` and `oauth2` to remediate multiple CVEs + - Upgraded to Go 1.24 and Kubernetes dependencies to 1.30.x . + diff --git a/docs/architecture.md b/docs/architecture.md index 71db0d3a8..5e11baf6e 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -10,25 +10,25 @@ The MongoDB Community Kubernetes Operator is a [Custom Resource Definition](http ## Cluster Configuration -You create and update MongoDB resources by defining a MongoDB resource definition. When you apply the MongoDB resource definition to your Kubernetes environment, the Operator: +You create and update MongoDBCommunity resources by defining a MongoDBCommunity resource definition. When you apply the MongoDBCommunity resource definition to your Kubernetes environment, the Operator: -1. Creates a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) that contains one [pod](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) for each [replica set](https://docs.mongodb.com/manual/replication/) member. -1. Writes the Automation configuration as a [ConfigMap](https://kubernetes.io/docs/concepts/configuration/configmap/) and mounts it to each pod. +1. Creates a [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) that contains one [pod](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) for each [replica set](https://www.mongodb.com/docs/manual/replication/) member. +1. Writes the Automation configuration as a [Secret](https://kubernetes.io/docs/concepts/configuration/secret/) and mounts it to each pod. 1. Creates one [init container](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) and two [containers](https://kubernetes.io/docs/concepts/containers/overview/) in each pod: - An init container which copies the `cmd/versionhook` binary to the main `mongod` container. This is run before `mongod` starts to handle [version upgrades](#example-mongodb-version-upgrade). - - A container for the [`mongod`](https://docs.mongodb.com/manual/reference/program/mongod/index.html) process binary. `mongod` is the primary daemon process for the MongoDB system. It handles data requests, manages data access, and performs background management operations. + - A container for the [`mongod`](https://www.mongodb.com/docs/manual/reference/program/mongod/index.html) process binary. `mongod` is the primary daemon process for the MongoDB system. It handles data requests, manages data access, and performs background management operations. - A container for the MongoDB Agent. The Automation function of the MongoDB Agent handles configuring, stopping, and restarting the `mongod` process. The MongoDB Agent periodically polls the `mongod` to determine status and can deploy changes as needed. 1. Creates several volumes: - `data-volume` which is [persistent](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) and mounts to `/data` on both the server and agent containers. Stores server data as well as `automation-mongod.conf` written by the agent and some locks the agent needs. - - `automation-config` which is mounted from the previously generated ConfigMap to both the server and agent. Only lives as long as the pod. + - `automation-config` which is mounted from the previously generated `Secret` to both the server and agent. Only lives as long as the pod. - `healthstatus` which contains the agent's current status. This is shared with the `mongod` container where it's used by the pre-stop hook. Only lives as long as the pod. -1. Initiates the MongoDB Agent, which in turn creates the database configuration and launches the `mongod` process according to your [MongoDB resource definition](../deploy/crds/mongodb.com_v1_mongodbcommunity_cr.yaml). +1. Initiates the MongoDB Agent, which in turn creates the database configuration and launches the `mongod` process according to your [MongoDBCommunity resource definition](../config/crd/bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml).