diff --git a/e2e-tests/arbiter/run b/e2e-tests/arbiter/run index 2721feae74..83af287fd8 100755 --- a/e2e-tests/arbiter/run +++ b/e2e-tests/arbiter/run @@ -74,9 +74,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'check arbiter without service-per-pod' check_cr_config "arbiter-rs0" diff --git a/e2e-tests/balancer/run b/e2e-tests/balancer/run index d989f73562..06ebc8dfb7 100755 --- a/e2e-tests/balancer/run +++ b/e2e-tests/balancer/run @@ -69,9 +69,9 @@ main() { desc 'create first PSMDB cluster' cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client-70.yml" + + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client-70.yml if version_gt "1.19" && [ $EKS -ne 1 ]; then $sed 's/docker/runc/g' "$conf_dir/container-rc.yaml" | kubectl_bin apply -f - diff --git a/e2e-tests/conf/client.yml b/e2e-tests/conf/client.yml index cc2a6b1e5f..1449675378 100644 --- a/e2e-tests/conf/client.yml +++ b/e2e-tests/conf/client.yml @@ -15,7 +15,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: psmdb-client - image: percona/percona-server-mongodb:4.4 + image: percona/percona-server-mongodb:4.4-multi imagePullPolicy: Always command: - sleep diff --git a/e2e-tests/conf/client_with_tls.yml b/e2e-tests/conf/client_with_tls.yml index 4b6f5e829e..bd259c26ea 100644 --- a/e2e-tests/conf/client_with_tls.yml +++ b/e2e-tests/conf/client_with_tls.yml @@ -15,7 +15,7 @@ spec: terminationGracePeriodSeconds: 10 containers: - name: psmdb-client - image: percona/percona-server-mongodb:4.4 + image: percona/percona-server-mongodb:4.4-multi imagePullPolicy: Always command: ["/bin/bash","-c","cat /etc/mongodb-ssl/tls.key /etc/mongodb-ssl/tls.crt > /tmp/tls.pem && sleep 100500"] volumeMounts: diff --git a/e2e-tests/cross-site-sharded/run b/e2e-tests/cross-site-sharded/run index 14a3c99765..f8d36a143b 100755 --- a/e2e-tests/cross-site-sharded/run +++ b/e2e-tests/cross-site-sharded/run @@ -39,9 +39,8 @@ desc "create main cluster" create_infra "$namespace" desc 'create secrets and start client' -kubectl_bin apply \ - -f "$conf_dir/client.yml" \ - -f "$test_dir/conf/secrets.yml" +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client.yml desc "create main PSMDB cluster $main_cluster." apply_cluster "$test_dir/conf/$main_cluster.yml" @@ -112,8 +111,7 @@ create_namespace $replica_namespace 0 deploy_operator desc 'start client' -kubectl_bin apply \ - -f "$conf_dir/client.yml" +apply_client $conf_dir/client.yml desc "copy secrets from main to replica namespace and create all of them" kubectl get secret ${main_cluster}-secrets -o yaml -n ${namespace} \ diff --git a/e2e-tests/custom-replset-name/conf/some-name.yml b/e2e-tests/custom-replset-name/conf/some-name.yml index c34b9de33d..6f3b63c875 100644 --- a/e2e-tests/custom-replset-name/conf/some-name.yml +++ b/e2e-tests/custom-replset-name/conf/some-name.yml @@ -6,7 +6,7 @@ spec: crVersion: 1.18.0 backup: enabled: true - image: percona/percona-backup-mongodb:2.0.4 + image: pitr: enabled: false serviceAccountName: percona-server-mongodb-operator @@ -33,7 +33,7 @@ spec: bucket: operator-testing prefix: psmdb endpointUrl: https://storage.googleapis.com - image: percona/percona-server-mongodb:4.4.10-11 + image: imagePullPolicy: Always pmm: enabled: false diff --git a/e2e-tests/custom-replset-name/run b/e2e-tests/custom-replset-name/run index c50d924433..9808298ab2 100755 --- a/e2e-tests/custom-replset-name/run +++ b/e2e-tests/custom-replset-name/run @@ -10,7 +10,11 @@ create_infra $namespace apply_s3_storage_secrets deploy_minio -kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml + cluster="some-name" desc 'create first PSMDB cluster' diff --git a/e2e-tests/custom-tls/run b/e2e-tests/custom-tls/run index 9e39c37357..d4d8e91a7f 100755 --- a/e2e-tests/custom-tls/run +++ b/e2e-tests/custom-tls/run @@ -32,8 +32,8 @@ main() { destroy_cert_manager || true # We need to be sure that we are getting certificates created by the operator, not by cert-manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml cluster="some-name" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/custom-users-roles-sharded/run b/e2e-tests/custom-users-roles-sharded/run index 5e6988a69f..1a3b332374 100755 --- a/e2e-tests/custom-users-roles-sharded/run +++ b/e2e-tests/custom-users-roles-sharded/run @@ -79,10 +79,9 @@ create_infra "$namespace" mongosUri="userAdmin:userAdmin123456@$cluster-mongos.$namespace" desc 'create secrets and start client' -kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${test_dir}/conf/app-user-secrets.yml" - +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $test_dir/conf/app-user-secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then @@ -135,7 +134,7 @@ kubectl_bin patch psmdb ${cluster} --type=merge --patch '{ "key": "userTwoPassKey" }, "roles": [ - {"db":"admin","name":"userAdminAnyDatabase"}, + {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } diff --git a/e2e-tests/custom-users-roles/run b/e2e-tests/custom-users-roles/run index ae7ce54f31..9999d3c50c 100755 --- a/e2e-tests/custom-users-roles/run +++ b/e2e-tests/custom-users-roles/run @@ -68,9 +68,9 @@ cluster="some-name-rs0" create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${test_dir}/conf/app-user-secrets.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $test_dir/conf/app-user-secrets.yml +apply_client $conf_dir/client.yml mongoUri="userAdmin:userAdmin123456@$cluster.$namespace" @@ -107,7 +107,7 @@ kubectl_bin patch psmdb ${psmdb} --type=merge --patch '{ "key": "userTwoPassKey" }, "roles": [ - {"db":"admin","name":"userAdminAnyDatabase"}, + {"db":"admin","name":"userAdminAnyDatabase"}, {"db":"admin","name":"clusterAdmin"} ] } diff --git a/e2e-tests/data-at-rest-encryption/run b/e2e-tests/data-at-rest-encryption/run index 44982a0247..54901f7c06 100755 --- a/e2e-tests/data-at-rest-encryption/run +++ b/e2e-tests/data-at-rest-encryption/run @@ -13,7 +13,8 @@ deploy_minio apply_s3_storage_secrets desc 'create secrets and start client' -kubectl_bin apply -f "$conf_dir/secrets.yml" -f "$conf_dir/client.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml cluster='some-name' desc "create PSMDB cluster $cluster" @@ -57,10 +58,7 @@ sleep 5 desc "check backup and restore -- minio" backup_dest_minio=$(get_backup_dest "$backup_name_minio") -retry 3 8 kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep myApp.test.gz +retry 3 8 aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_mongos 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster-mongos.$namespace" compare_mongos_cmd "find" "myApp:myPass@$cluster-mongos.$namespace" "-2nd" run_restore "$backup_name_minio" diff --git a/e2e-tests/data-sharded/run b/e2e-tests/data-sharded/run index 5c77ed5a78..07e40d9ac6 100755 --- a/e2e-tests/data-sharded/run +++ b/e2e-tests/data-sharded/run @@ -39,8 +39,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml cluster="some-name" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/default-cr/run b/e2e-tests/default-cr/run index a0c80c3b78..76d4947258 100755 --- a/e2e-tests/default-cr/run +++ b/e2e-tests/default-cr/run @@ -48,26 +48,46 @@ function main() { desc 'create secrets and start client' kubectl_bin apply -f $deploy_dir/secrets.yaml - kubectl_bin apply -f $conf_dir/client.yml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster $cluster" kubectl_bin apply ${OPERATOR_NS:+-n $OPERATOR_NS} --server-side --force-conflicts -f $deploy_dir/crd.yaml + + + local temp_operator_yaml="$(mktemp)" + if [ -n "$OPERATOR_NS" ]; then apply_rbac cw-rbac kubectl_bin apply -n ${OPERATOR_NS} -f $deploy_dir/cw-operator.yaml else apply_rbac rbac - yq eval '((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true")' "$deploy_dir/operator.yaml" \ - | kubectl_bin apply -f - + yq eval '((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true")' "$deploy_dir/operator.yaml" > $temp_operator_yaml + + if [[ $ARCH == "arm64" ]]; then + yq eval -i '(.spec.template.spec.tolerations += '"$TOLERATIONS_ARM64"')' $temp_operator_yaml + fi + + kubectl_bin apply -f $temp_operator_yaml fi + local temp_cr="$(mktemp)" yq eval '.spec.upgradeOptions.versionServiceEndpoint = "https://check-dev.percona.com" | .spec.replsets[].affinity.antiAffinityTopologyKey = "none" | .spec.replsets[].nonvoting.affinity.antiAffinityTopologyKey = "none" | .spec.replsets[].arbiter.affinity.antiAffinityTopologyKey = "none" | .spec.sharding.configsvrReplSet.affinity.antiAffinityTopologyKey = "none" | - .spec.sharding.mongos.affinity.antiAffinityTopologyKey = "none"' $deploy_dir/cr.yaml \ - | kubectl_bin apply -f - + .spec.sharding.mongos.affinity.antiAffinityTopologyKey = "none"' $deploy_dir/cr.yaml > $temp_cr + + if [[ $ARCH == "arm64" ]]; then + yq eval '.spec.replsets[].tolerations += '"$TOLERATIONS_ARM64"' | + (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += '"$TOLERATIONS_ARM64"' | + (.spec | select(has("sharding"))).sharding.mongos.tolerations += '"$TOLERATIONS_ARM64"' | + (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += '"$TOLERATIONS_ARM64"' | + (.spec.replsets[] | select(has("nonvoting"))).nonvoting.tolerations += '"$TOLERATIONS_ARM64"'' $temp_cr | + kubectl_bin apply -f - + else + kubectl_bin apply -f $temp_cr + fi desc 'check if all 3 Pods started' wait_cluster_consistency $cluster 70 @@ -137,7 +157,20 @@ function main() { cluster="minimal-cluster" yq eval '.metadata.name = "'${cluster}'"' $deploy_dir/secrets.yaml | kubectl_bin apply -f - - yq eval '.spec.upgradeOptions.versionServiceEndpoint = "https://check-dev.percona.com"' $deploy_dir/cr-minimal.yaml | kubectl_bin apply -f - + local temp_cr_minimal="$(mktemp)" + yq eval '.spec.upgradeOptions.versionServiceEndpoint = "https://check-dev.percona.com"' $deploy_dir/cr-minimal.yaml > $temp_cr_minimal + + if [[ $ARCH == "arm64" ]]; then + yq eval '.spec.replsets[].tolerations += '"$TOLERATIONS_ARM64"' | + (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += '"$TOLERATIONS_ARM64"' | + (.spec | select(has("sharding"))).sharding.mongos.tolerations += '"$TOLERATIONS_ARM64"' | + (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += '"$TOLERATIONS_ARM64"' | + (.spec.replsets[] | select(has("nonvoting"))).nonvoting.tolerations += '"$TOLERATIONS_ARM64"'' $temp_cr_minimal | + kubectl_bin apply -f - + else + kubectl_bin apply -f $temp_cr_minimal + fi + desc 'check if all Pods started' wait_cluster_consistency "${cluster}" diff --git a/e2e-tests/demand-backup-eks-credentials/run b/e2e-tests/demand-backup-eks-credentials/run index 96ffed793e..4a59214f9c 100755 --- a/e2e-tests/demand-backup-eks-credentials/run +++ b/e2e-tests/demand-backup-eks-credentials/run @@ -14,9 +14,8 @@ fi create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml cluster="some-name-rs0" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/demand-backup-physical-sharded/run b/e2e-tests/demand-backup-physical-sharded/run index 4cfb61bc15..afb79a8716 100755 --- a/e2e-tests/demand-backup-physical-sharded/run +++ b/e2e-tests/demand-backup-physical-sharded/run @@ -66,11 +66,13 @@ apply_s3_storage_secrets ### Case 1: Backup and restore on sharded cluster desc 'Testing on sharded cluster' -echo "Creating PSMDB cluster" +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client_with_tls.yml + cluster="some-name" -kubectl_bin apply -f "${conf_dir}/secrets.yml" -apply_cluster "${test_dir}/conf/${cluster}-sharded.yml" -kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" +desc "create first PSMDB cluster $cluster" +apply_cluster $test_dir/conf/$cluster-sharded.yml echo "check if all pods started" wait_for_running ${cluster}-rs0 3 diff --git a/e2e-tests/demand-backup-physical/run b/e2e-tests/demand-backup-physical/run index e633401f50..0f8a9e2002 100755 --- a/e2e-tests/demand-backup-physical/run +++ b/e2e-tests/demand-backup-physical/run @@ -58,11 +58,13 @@ apply_s3_storage_secrets desc 'Testing on not sharded cluster' +desc 'create secrets and start client' +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client_with_tls.yml + echo "Creating PSMDB cluster" cluster="some-name" -kubectl_bin apply -f "${test_dir}/conf/secrets.yml" -apply_cluster "${test_dir}/conf/${cluster}.yml" -kubectl_bin apply -f "${conf_dir}/client_with_tls.yml" +apply_cluster $test_dir/conf/$cluster.yml echo "check if all pods started" wait_for_running ${cluster}-rs0 3 diff --git a/e2e-tests/demand-backup-sharded/run b/e2e-tests/demand-backup-sharded/run index 94456ba08a..756ad88e8f 100755 --- a/e2e-tests/demand-backup-sharded/run +++ b/e2e-tests/demand-backup-sharded/run @@ -19,11 +19,9 @@ create_infra "$namespace" deploy_minio -desc 'create first PSMDB cluster' -cluster="some-name" -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then @@ -34,6 +32,8 @@ else kubectl_bin apply -f "$conf_dir/container-rc.yaml" fi +desc 'create first PSMDB cluster' +cluster="some-name" apply_cluster "$test_dir/conf/$cluster-rs0.yml" desc 'check if all 3 Pods started' wait_for_running $cluster-rs0 3 @@ -146,10 +146,18 @@ fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls "s3://${backup_dest_minio}/rs0/" \ - | grep "myApp.test.gz" + +retry=0 +until aws_cli "s3 ls s3://$backup_dest_minio/rs0/" | grep "myApp.test.gz"; do + if [[ $retry -ge 10 ]]; then + echo "Max retry count $retry reached. File myApp.test.gz wasn't found on s3://$backup_dest_minio/rs0/" + exit 1 + fi + ((retry += 1)) + echo -n . + sleep 5 +done + insert_data_mongos "100501" "myApp" insert_data_mongos "100501" "myApp1" insert_data_mongos "100501" "myApp2" @@ -161,10 +169,7 @@ check_data desc 'delete backup and check if it is removed from bucket -- minio' kubectl_bin delete psmdb-backup --all -backup_exists=$(kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ \ - | grep -c ${backup_dest_minio}_ | cat) +backup_exists=$(aws_cli "s3 ls s3://operator-testing/" | grep -c ${backup_dest_minio}_ | cat) if [[ $backup_exists -eq 1 ]]; then echo "Backup was not removed from bucket -- minio" exit 1 diff --git a/e2e-tests/demand-backup/run b/e2e-tests/demand-backup/run index 82d852eace..04af446b69 100755 --- a/e2e-tests/demand-backup/run +++ b/e2e-tests/demand-backup/run @@ -116,11 +116,11 @@ create_infra $namespace deploy_minio -desc 'create secrets and start client' cluster="some-name-rs0" -kubectl_bin apply \ - -f "$test_dir/conf/secrets.yml" \ - -f "$conf_dir/client.yml" + +desc 'create secrets and start client' +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets @@ -215,10 +215,7 @@ fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep myApp.test.gz +aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_recovery_check "$backup_name_minio" "$cluster" run_mongo \ @@ -250,10 +247,7 @@ run_recovery_check_bkp_source "$backup_name_minio" "$backup_dest_minio" "$cluste desc 'delete backup and check if it is removed from bucket -- minio' kubectl_bin delete psmdb-backup --all -backup_exists=$(kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ \ - | grep -c ${backup_dest_minio} | cat) +backup_exists=$(aws_cli "s3 ls s3://operator-testing/" | grep -c ${backup_dest_minio} | cat) if [[ $backup_exists -eq 1 ]]; then echo "Backup was not removed from bucket -- minio" exit 1 @@ -285,10 +279,7 @@ sleep 60 desc 'delete backup and check if it is removed from bucket -- minio' kubectl_bin delete psmdb-backup --all -backup_exists=$(kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://operator-testing/ \ - | grep -c ${backup_dest_minio} | cat) +backup_exists=$(aws_cli "s3 ls s3://operator-testing/" | grep -c ${backup_dest_minio} | cat) if [[ $backup_exists -eq 1 ]]; then echo "Backup was not removed from bucket -- minio" exit 1 diff --git a/e2e-tests/expose-sharded/run b/e2e-tests/expose-sharded/run index 8932032b60..e9f53631e4 100755 --- a/e2e-tests/expose-sharded/run +++ b/e2e-tests/expose-sharded/run @@ -91,12 +91,10 @@ function expose_cluster() { function main() { create_infra "$namespace" - desc 'create first PSMDB cluster' - cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + desc 'create secrets and start client' + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then @@ -107,6 +105,8 @@ function main() { kubectl_bin apply -f "$conf_dir/container-rc.yaml" fi + desc 'create first PSMDB cluster' + cluster="some-name" apply_cluster "$test_dir/conf/$cluster-rs0.yml" desc 'check if all 3 Pods started' wait_for_running $cluster-rs0 3 diff --git a/e2e-tests/finalizer/run b/e2e-tests/finalizer/run index 2bd0acf3d9..ad8bfe5b6e 100755 --- a/e2e-tests/finalizer/run +++ b/e2e-tests/finalizer/run @@ -7,13 +7,12 @@ test_dir=$(realpath "$(dirname "$0")") . "${test_dir}/../functions" create_infra "$namespace" -cluster="some-name" desc 'create secrets and start client' -kubectl_bin apply \ - -f $conf_dir/secrets_with_tls.yml \ - -f $conf_dir/client.yml +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client.yml +cluster="some-name" apply_cluster "$test_dir/conf/$cluster.yml" desc 'check if all 3 Pods started' wait_for_running "$cluster-rs0" 3 diff --git a/e2e-tests/functions b/e2e-tests/functions index 26aa87ed9f..699f831f80 100755 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -16,6 +16,7 @@ SKIP_BACKUPS_TO_AWS_GCP_AZURE=${SKIP_BACKUPS_TO_AWS_GCP_AZURE:-1} PMM_SERVER_VER=${PMM_SERVER_VER:-"9.9.9"} IMAGE_PMM_CLIENT=${IMAGE_PMM_CLIENT:-"perconalab/pmm-client:dev-latest"} IMAGE_PMM_SERVER=${IMAGE_PMM_SERVER:-"perconalab/pmm-server:dev-latest"} +TOLERATIONS_ARM64='[{"key": "kubernetes.io/arch", "operator": "Equal", "value": "arm64", "effect": "NoSchedule"}]' CERT_MANAGER_VER="1.16.1" tmp_dir=$(mktemp -d) sed=$(which gsed || which sed) @@ -28,6 +29,24 @@ conf_dir=$(realpath $test_dir/../conf || :) src_dir=$(realpath $test_dir/../..) logs_dir=$(realpath $test_dir/../logs || :) +if archs=$(kubectl get nodes -o jsonpath='{range .items[*]}{.status.nodeInfo.architecture}{" "}{end}'); then + first_arch=$(echo $archs | awk '{print $1}') + + if [[ $first_arch =~ ^(amd64|arm64)$ ]]; then + for arch in $archs; do + if [[ "$arch" != "$first_arch" ]]; then + echo "All nodes in the cluster must be equal to the 1-st one: $first_arch !" + exit 1 + fi + done + ARCH="$first_arch" + echo "================================== Using ARCH=$ARCH ==================================" + else + echo "Unsupported architecture: $first_arch" + exit 1 + fi +fi + if [[ ${ENABLE_LOGGING} == "true" ]]; then if [ ! -d "${logs_dir}" ]; then mkdir "${logs_dir}" @@ -386,6 +405,8 @@ deploy_operator() { desc 'start PSMDB operator' local cr_file + local temp_operator_yaml="$(mktemp)" + if [ -f "${test_dir}/conf/crd.yaml" ]; then cr_file="${test_dir}/conf/crd.yaml" else @@ -393,21 +414,27 @@ deploy_operator() { fi kubectl_bin apply --server-side --force-conflicts -f "${cr_file}" - if [ -n "$OPERATOR_NS" ]; then + + if [[ "$OPERATOR_NS" ]]; then apply_rbac cw-rbac yq eval ' (.spec.template.spec.containers[].image = "'${IMAGE}'") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | - ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/cw-operator.yaml \ - | kubectl_bin apply -f - + ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/cw-operator.yaml > $temp_operator_yaml else apply_rbac rbac yq eval ' (.spec.template.spec.containers[].image = "'${IMAGE}'") | ((.. | select(.[] == "DISABLE_TELEMETRY")) |= .value="true") | - ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/operator.yaml \ - | kubectl_bin apply -f - + ((.. | select(.[] == "LOG_LEVEL")) |= .value="DEBUG")' ${src_dir}/deploy/operator.yaml > $temp_operator_yaml fi + + if [[ $ARCH == "arm64" ]]; then + yq eval -i '(.spec.template.spec.tolerations += '"$TOLERATIONS_ARM64"')' $temp_operator_yaml + fi + + kubectl_bin apply -f $temp_operator_yaml + sleep 2 wait_pod $(get_operator_pod) } @@ -427,12 +454,29 @@ deploy_operator_gh() { curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${git_tag}/deploy/${operator_yaml}.yaml" >"${tmp_dir}/${operator_yaml}_${git_tag}.yaml" $sed -i -e "s^image: .*^image: ${IMAGE}^" "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" + + if [[ $ARCH == "arm64" ]]; then + yq eval -i '(.spec.template.spec.tolerations += '"$TOLERATIONS_ARM64"')' "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" + fi + kubectl_bin apply -f "${tmp_dir}/${operator_yaml}_${git_tag}.yaml" sleep 2 wait_pod "$(get_operator_pod)" } +aws_cli() { + local cmd=$1 + + kubectl_bin run -i --rm aws-cli --image=amazon/aws-cli \ + --restart=Never \ + --env=AWS_ACCESS_KEY_ID=some-access-key \ + --env=AWS_SECRET_ACCESS_KEY=some-secret-key \ + --env=AWS_DEFAULT_REGION=us-east-1 \ + --overrides='{"apiVersion": "v1","spec": {"tolerations": [{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' \ + -- --endpoint-url http://minio-service:9000 $cmd +} + deploy_minio() { desc 'install Minio' helm uninstall minio-service || : @@ -453,7 +497,16 @@ deploy_minio() { --set configPathmc=/tmp/.minio/ \ --set persistence.size=2G \ --set securityContext.enabled=false \ + --set 'tolerations[0].key=kubernetes.io/arch' \ + --set 'tolerations[0].operator=Equal' \ + --set 'tolerations[0].value=arm64' \ + --set 'tolerations[0].effect=NoSchedule' \ + --set 'postJob.tolerations[0].key=kubernetes.io/arch' \ + --set 'postJob.tolerations[0].operator=Equal' \ + --set 'postJob.tolerations[0].value=arm64' \ + --set 'postJob.tolerations[0].effect=NoSchedule' \ minio/minio + MINIO_POD=$(kubectl_bin get pods --selector=release=minio-service -o 'jsonpath={.items[].metadata.name}') wait_pod $MINIO_POD @@ -462,9 +515,7 @@ deploy_minio() { fi # create bucket - kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - bash -c 'AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 mb s3://operator-testing' + aws_cli "s3 mb s3://operator-testing" } deploy_vault() { @@ -488,9 +539,23 @@ deploy_vault() { --set injector.agentImage.repository="docker.io/hashicorp/vault" \ --set server.image.repository="docker.io/hashicorp/vault" else - retry 10 60 helm install $name hashicorp/vault \ - --disable-openapi-validation \ - --set dataStorage.enabled=false + if [[ $ARCH == "arm64" ]]; then + helm install vault-service hashicorp/vault \ + --disable-openapi-validation \ + --set dataStorage.enabled=false \ + --set server.tolerations[0].key=kubernetes.io/arch \ + --set server.tolerations[0].operator=Equal \ + --set server.tolerations[0].value=arm64 \ + --set server.tolerations[0].effect=NoSchedule \ + --set injector.tolerations[0].key=kubernetes.io/arch \ + --set injector.tolerations[0].operator=Equal \ + --set injector.tolerations[0].value=arm64 \ + --set injector.tolerations[0].effect=NoSchedule + else + retry 10 60 helm install $name hashicorp/vault \ + --disable-openapi-validation \ + --set dataStorage.enabled=false + fi fi until kubectl_bin get pod/vault-service-0 -o jsonpath='{.status.phase}' 2>/dev/null | grep 'Running'; do @@ -529,7 +594,18 @@ deploy_chaos_mesh() { desc 'install chaos-mesh' helm repo add chaos-mesh https://charts.chaos-mesh.org - helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${chaos_mesh_ns} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 + # helm install chaos-mesh chaos-mesh/chaos-mesh --namespace=${chaos_mesh_ns} --set chaosDaemon.runtime=containerd --set chaosDaemon.socketPath=/run/containerd/containerd.sock --set dashboard.create=false --version 2.5.1 + helm install chaos-mesh chaos-mesh/chaos-mesh \ + --namespace=${chaos_mesh_ns} \ + --set chaosDaemon.runtime=containerd \ + --set chaosDaemon.socketPath=/run/containerd/containerd.sock \ + --set dashboard.create=false \ + --set controllerManager.tolerations[0].key="kubernetes.io/arch" \ + --set controllerManager.tolerations[0].operator="Equal" \ + --set controllerManager.tolerations[0].value="arm64" \ + --set controllerManager.tolerations[0].effect="NoSchedule" \ + --version 2.5.1 + sleep 10 } @@ -709,6 +785,10 @@ compare_kubectl() { (.. | select(. == "extensions/v1beta1")) = "apps/v1" | (.. | select(. == "batch/v1beta1")) = "batch/v1" ' - >${new_result} + if [[ $ARCH == "arm64" ]]; then + yq -i eval 'del(.spec.template.spec.tolerations)' ${new_result} + fi + yq -i eval 'del(.spec.persistentVolumeClaimRetentionPolicy)' ${new_result} if version_gt "1.22"; then @@ -937,9 +1017,44 @@ deploy_cert_manager() { kubectl_bin create namespace cert-manager || : kubectl_bin label namespace cert-manager certmanager.k8s.io/disable-validation=true || : - kubectl_bin apply -f "https://github.com/cert-manager/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml" --validate=false || : 2>/dev/null + kubectl_bin apply -f "https://github.com/cert-manager/cert-manager/releases/download/v${CERT_MANAGER_VER}/cert-manager.yaml" --validate=false || : + + if [[ $ARCH == "arm64" ]]; then + kubectl patch deployment cert-manager -n cert-manager --type='json' -p='[{ + "op": "add", + "path": "/spec/template/spec/tolerations", + "value": [{ + "key": "kubernetes.io/arch", + "operator": "Equal", + "value": "arm64", + "effect": "NoSchedule" + }] + }]' + + kubectl patch deployment cert-manager-cainjector -n cert-manager --type='json' -p='[{ + "op": "add", + "path": "/spec/template/spec/tolerations", + "value": [{ + "key": "kubernetes.io/arch", + "operator": "Equal", + "value": "arm64", + "effect": "NoSchedule" + }] + }]' + + kubectl patch deployment cert-manager-webhook -n cert-manager --type='json' -p='[{ + "op": "add", + "path": "/spec/template/spec/tolerations", + "value": [{ + "key": "kubernetes.io/arch", + "operator": "Equal", + "value": "arm64", + "effect": "NoSchedule" + }] + }]' + fi + kubectl_bin -n cert-manager wait pod -l app.kubernetes.io/instance=cert-manager --for=condition=ready - sleep 120 } delete_crd() { @@ -1120,25 +1235,39 @@ EOF } cat_config() { - cat "$1" \ - | yq eval '(.spec | select(.image == null)).image = "'"$IMAGE_MONGOD"'"' \ - | yq eval '(.spec | select(has("pmm"))).pmm.image = "'"$IMAGE_PMM_CLIENT"'"' \ - | yq eval '(.spec | select(has("initImage"))).initImage = "'"$IMAGE"'"' \ - | yq eval '(.spec | select(has("backup"))).backup.image = "'"$IMAGE_BACKUP"'"' \ - | yq eval '.spec.upgradeOptions.apply="Never"' + local temp_cr="$(mktemp)" + + yq eval ' + (.spec | select(.image == null)).image = "'"$IMAGE_MONGOD"'" | + (.spec | select(has("pmm"))).pmm.image = "'"$IMAGE_PMM_CLIENT"'" | + (.spec | select(has("initImage"))).initImage = "'"$IMAGE"'" | + (.spec | select(has("backup"))).backup.image = "'"$IMAGE_BACKUP"'" | + .spec.upgradeOptions.apply="Never"' "$1" > $temp_cr + + if [[ $ARCH == "arm64" ]]; then + yq eval '.spec.replsets[].tolerations += '"$TOLERATIONS_ARM64"' | + (.spec | select(has("sharding"))).sharding.configsvrReplSet.tolerations += '"$TOLERATIONS_ARM64"' | + (.spec | select(has("sharding"))).sharding.mongos.tolerations += '"$TOLERATIONS_ARM64"' | + (.spec.replsets[] | select(has("arbiter"))).arbiter.tolerations += '"$TOLERATIONS_ARM64"' | + (.spec.replsets[] | select(has("nonvoting"))).nonvoting.tolerations += '"$TOLERATIONS_ARM64"'' $temp_cr + else + cat $temp_cr + fi +} + +apply_client() { + if [[ $ARCH == "arm64" ]]; then + yq eval '.spec.template.spec.tolerations += '"$TOLERATIONS_ARM64"'' "$1" | kubectl_bin apply -f - + else + kubectl_bin apply -f "$1" + fi } apply_cluster() { - if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - cat_config "$1" \ - | kubectl_bin apply -f - + if [[ "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]]; then + cat_config $1 | yq eval 'del(.spec.backup.tasks.[1]) | del(.spec.backup.tasks.[1]) | del(.spec.backup.tasks.[1])' - | kubectl_bin apply -f - else - cat_config "$1" \ - | yq eval ' - del(.spec.backup.tasks.[1]) | - del(.spec.backup.tasks.[1]) | - del(.spec.backup.tasks.[1])' - \ - | kubectl_bin apply -f - + cat_config $1 | kubectl_bin apply -f - fi } @@ -1338,7 +1467,7 @@ function run_simple_cli_inside_image() { local cli=${2} local pod_name=${RANDOM} - kubectl_bin -n default run ${pod_name} --image=${image} --restart=Never --command -- sleep infinity >/dev/null + kubectl_bin -n default run ${pod_name} --image=${image} --restart=Never --overrides='{"apiVersion": "v1","spec": {"tolerations": [{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' --command -- sleep infinity >/dev/null kubectl_bin -n default wait --for=condition=Ready pod/${pod_name} >/dev/null local output=$(kubectl_bin -n default exec ${pod_name} -- ${cli}) kubectl_bin -n default delete pod/${pod_name} --grace-period=0 --force >/dev/null diff --git a/e2e-tests/ignore-labels-annotations/run b/e2e-tests/ignore-labels-annotations/run index ffda3d1151..cc90333f52 100755 --- a/e2e-tests/ignore-labels-annotations/run +++ b/e2e-tests/ignore-labels-annotations/run @@ -45,16 +45,16 @@ check_service() { # `notIgnoredLabel` and `notIgnoredAnnotation` should be deleted kubectl_bin patch "service/$svc_name" --type=json --patch '[ { - "op": "add", - "path": "/metadata/labels", + "op": "add", + "path": "/metadata/labels", "value": { "notIgnoredLabel": "true", "ignoredLabel": "true" } }, { - "op": "add", - "path": "/metadata/annotations", + "op": "add", + "path": "/metadata/annotations", "value": { "notIgnoredAnnotation": "true", "ignoredAnnotation": "true" @@ -85,15 +85,15 @@ check_service() { desc "adding labels and annotations to $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "replace", - "path": "'$expose_path'/labels", + "op": "replace", + "path": "'$expose_path'/labels", "value": { "crLabel": "true", } }, { - "op": "replace", - "path": "'$expose_path'/annotations", + "op": "replace", + "path": "'$expose_path'/annotations", "value": { "crAnnotation": "true", } @@ -105,11 +105,11 @@ check_service() { desc "removing labels and annotations from $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "remove", + "op": "remove", "path": "'$expose_path'/labels" }, { - "op": "remove", + "op": "remove", "path": "'$expose_path'/annotations" }]' sleep 5 @@ -119,16 +119,16 @@ check_service() { desc "adding other labels and annotations to $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "replace", - "path": "'$expose_path'/labels", + "op": "replace", + "path": "'$expose_path'/labels", "value": { "otherCrLabel": "true", "secondCrLabel": "true", } }, { - "op": "replace", - "path": "'$expose_path'/annotations", + "op": "replace", + "path": "'$expose_path'/annotations", "value": { "otherCrAnnotation": "true", "secondCrAnnotation": "true", @@ -141,15 +141,15 @@ check_service() { desc "adding removing one labels from $expose_path" kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "replace", - "path": "'$expose_path'/labels", + "op": "replace", + "path": "'$expose_path'/labels", "value": { "otherCrLabel": "true", } }, { - "op": "replace", - "path": "'$expose_path'/annotations", + "op": "replace", + "path": "'$expose_path'/annotations", "value": { "otherCrAnnotation": "true", } @@ -162,11 +162,11 @@ check_service() { # When `labels` and `annotations` are not set, old metadata should stay kubectl_bin patch psmdb ${cluster} --type=json --patch '[ { - "op": "remove", + "op": "remove", "path": "/spec/ignoreAnnotations", }, { - "op": "remove", + "op": "remove", "path": "/spec/ignoreLabels", }]' diff --git a/e2e-tests/init-deploy/run b/e2e-tests/init-deploy/run index 014cc392bf..b3593aa888 100755 --- a/e2e-tests/init-deploy/run +++ b/e2e-tests/init-deploy/run @@ -11,9 +11,8 @@ max_conn=16 create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply \ - -f "$test_dir/conf/secrets_with_tls.yml" \ - -f "$conf_dir/client.yml" +kubectl_bin apply -f $test_dir/conf/secrets_with_tls.yml +apply_client $conf_dir/client.yml desc 'create custom RuntimeClass' if version_gt "1.19" && [ $EKS -ne 1 ]; then diff --git a/e2e-tests/ldap-tls/run b/e2e-tests/ldap-tls/run index 0742cf71f6..83268f743d 100755 --- a/e2e-tests/ldap-tls/run +++ b/e2e-tests/ldap-tls/run @@ -13,8 +13,13 @@ deploy_openldap() { select(.metadata.name == "ldap-ca").spec.dnsNames[0]="openldap.'$namespace'.svc.cluster.local"' "$test_dir/conf/openldap.yaml" \ | kubectl_bin apply -f - else - yq 'select(.metadata.name == "ldap-ca").spec.dnsNames[0]="openldap.'$namespace'.svc.cluster.local"' "$test_dir/conf/openldap.yaml" \ - | kubectl_bin apply -f - + if [[ $ARCH == "arm64" ]]; then + yq 'select(.metadata.name == "ldap-ca").spec.dnsNames[0]="openldap.'$namespace'.svc.cluster.local"' "$test_dir/conf/openldap.yaml" | + yq eval '(select(.kind == "Deployment") | .spec.template.spec.tolerations) += '"$TOLERATIONS_ARM64"'' $test_dir/conf/openldap.yaml | + kubectl_bin apply -f - + else + yq 'select(.metadata.name == "ldap-ca").spec.dnsNames[0]="openldap.'$namespace'.svc.cluster.local"' "$test_dir/conf/openldap.yaml" | kubectl_bin apply -f - + fi fi kubectl rollout status deployment/openldap --timeout=120s @@ -151,10 +156,8 @@ main() { deploy_openldap desc 'create secrets and start client' - cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml test_mongod_openldap test_sharded_openldap diff --git a/e2e-tests/ldap/run b/e2e-tests/ldap/run index 529622e90e..f42bca8617 100755 --- a/e2e-tests/ldap/run +++ b/e2e-tests/ldap/run @@ -12,7 +12,11 @@ deploy_openldap() { select(.kind=="Deployment").spec.template.spec.containers[0].securityContext.capabilities.add[0]="NET_BIND_SERVICE"' "$test_dir/conf/openldap.yaml" \ | kubectl_bin apply -f - else - kubectl_bin apply -f "$test_dir/conf/openldap.yaml" + if [[ $ARCH == "arm64" ]]; then + yq eval '(select(.kind == "Deployment") | .spec.template.spec.tolerations) += '"$TOLERATIONS_ARM64"'' $test_dir/conf/openldap.yaml | kubectl_bin apply -f - + else + kubectl_bin apply -f "$test_dir/conf/openldap.yaml" + fi fi kubectl rollout status deployment/openldap --timeout=120s @@ -147,10 +151,8 @@ main() { deploy_openldap desc 'create secrets and start client' - cluster="some-name" - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml test_mongod_openldap test_sharded_openldap diff --git a/e2e-tests/liveness/run b/e2e-tests/liveness/run index c888b037a0..cba0acba8a 100755 --- a/e2e-tests/liveness/run +++ b/e2e-tests/liveness/run @@ -9,7 +9,9 @@ set_debug create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml cluster="liveness" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/mongod-major-upgrade-sharded/run b/e2e-tests/mongod-major-upgrade-sharded/run index e4378d70c6..5a9773d21f 100755 --- a/e2e-tests/mongod-major-upgrade-sharded/run +++ b/e2e-tests/mongod-major-upgrade-sharded/run @@ -17,8 +17,9 @@ function main() { apply_s3_storage_secrets - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" + desc 'create secrets and start client' + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'install version service' diff --git a/e2e-tests/mongod-major-upgrade/run b/e2e-tests/mongod-major-upgrade/run index 8cb58e23fc..e688b82791 100755 --- a/e2e-tests/mongod-major-upgrade/run +++ b/e2e-tests/mongod-major-upgrade/run @@ -16,8 +16,9 @@ function main() { create_infra "${namespace}" - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" + desc 'create secrets and start client' + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'install version service' diff --git a/e2e-tests/multi-cluster-service/run b/e2e-tests/multi-cluster-service/run index 4dc5b7560d..1a0c059927 100755 --- a/e2e-tests/multi-cluster-service/run +++ b/e2e-tests/multi-cluster-service/run @@ -69,20 +69,19 @@ wait_service_export() { } desc "Register Kubernetes cluster" -k8s_cluster_name=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=appropriate/curl -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name) -k8s_cluster_region=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=appropriate/curl -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location) +kubectl -n default delete pod curl || : +k8s_cluster_name=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=alpine/curl --overrides='{"spec": {"tolerations":[{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name) +k8s_cluster_region=$(kubectl -n default run --quiet curl --rm --restart=Never -it --image=alpine/curl --overrides='{"spec": {"tolerations":[{"key": "kubernetes.io/arch","operator": "Equal","value": "arm64","effect": "NoSchedule"}]}}' -- -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-location) gcloud container hub memberships register ${k8s_cluster_name} --gke-cluster ${k8s_cluster_region}/${k8s_cluster_name} --enable-workload-identity wait_mcs_api create_infra "$namespace" -desc 'create first PSMDB cluster' -cluster="some-name" -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets if version_gt "1.19" && [ $EKS -ne 1 ]; then @@ -93,7 +92,9 @@ else kubectl_bin apply -f "$conf_dir/container-rc.yaml" fi -apply_cluster "$test_dir/conf/$cluster.yml" +desc 'create first PSMDB cluster' +cluster="some-name" +apply_cluster $test_dir/conf/$cluster.yml desc 'check if all 3 Pods started' wait_for_running $cluster-rs0 3 wait_for_running $cluster-cfg 3 "false" diff --git a/e2e-tests/non-voting/run b/e2e-tests/non-voting/run index 088df33f7c..9ee7f41b8c 100755 --- a/e2e-tests/non-voting/run +++ b/e2e-tests/non-voting/run @@ -39,9 +39,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc "check non-voting members" spinup_psmdb "$cluster" "$test_dir/conf/$cluster.yml" diff --git a/e2e-tests/one-pod/run b/e2e-tests/one-pod/run index 9d8266fe37..51f62d21fd 100755 --- a/e2e-tests/one-pod/run +++ b/e2e-tests/one-pod/run @@ -28,9 +28,9 @@ main() { create_infra $namespace desc 'create secrets and start client' - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${conf_dir}/secrets.yml" \ - -f "${conf_dir}/minio-secret.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + apply_client $conf_dir/client.yml deploy_minio diff --git a/e2e-tests/operator-self-healing-chaos/run b/e2e-tests/operator-self-healing-chaos/run index 8cd344ad92..d380cbc6b1 100755 --- a/e2e-tests/operator-self-healing-chaos/run +++ b/e2e-tests/operator-self-healing-chaos/run @@ -9,9 +9,8 @@ set_debug cluster="some-name-rs0" setup_cluster() { - desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/secrets.yml + desc 'create secrets' + kubectl_bin apply -f $conf_dir/secrets.yml desc "create first PSMDB cluster $cluster" apply_cluster $conf_dir/$cluster.yml diff --git a/e2e-tests/pitr-physical/run b/e2e-tests/pitr-physical/run index 49ae125d4a..249c60d2e2 100755 --- a/e2e-tests/pitr-physical/run +++ b/e2e-tests/pitr-physical/run @@ -121,10 +121,9 @@ main() { deploy_minio desc 'create secrets and start client' - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" \ - -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml cluster="some-name" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/pitr-sharded/run b/e2e-tests/pitr-sharded/run index 94638aed9a..858ffda293 100755 --- a/e2e-tests/pitr-sharded/run +++ b/e2e-tests/pitr-sharded/run @@ -79,10 +79,9 @@ main() { deploy_minio desc 'create secrets and start client' - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" \ - -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'create custom RuntimeClass' if version_gt "1.19" && [ $EKS -ne 1 ]; then diff --git a/e2e-tests/pitr/run b/e2e-tests/pitr/run index 6e839e27ae..a035958b1b 100755 --- a/e2e-tests/pitr/run +++ b/e2e-tests/pitr/run @@ -112,10 +112,9 @@ main() { deploy_minio desc 'create secrets and start client' - kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" \ - -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/minio-secret.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml cluster="some-name-rs0" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/pvc-resize/run b/e2e-tests/pvc-resize/run index 7689e35f48..a454be8268 100755 --- a/e2e-tests/pvc-resize/run +++ b/e2e-tests/pvc-resize/run @@ -115,10 +115,9 @@ fi create_infra "${namespace}" -desc 'create secrets and psmdb client' -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml desc 'create PSMDB cluster' cluster="some-name" diff --git a/e2e-tests/recover-no-primary/run b/e2e-tests/recover-no-primary/run index 2ca5bff53b..9ba0a3f2a7 100755 --- a/e2e-tests/recover-no-primary/run +++ b/e2e-tests/recover-no-primary/run @@ -6,12 +6,13 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/../functions -create_infra ${namespace} +create_infra $namespace + +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client.yml cluster="some-name" -kubectl_bin apply \ - -f ${conf_dir}/secrets_with_tls.yml \ - -f ${conf_dir}/client.yml function test_single_replset() { apply_cluster ${test_dir}/conf/${cluster}.yml diff --git a/e2e-tests/replset-overrides/conf/some-name-overridden.yml b/e2e-tests/replset-overrides/conf/some-name-overridden.yml index f477c9fbdc..367fafdc0b 100644 --- a/e2e-tests/replset-overrides/conf/some-name-overridden.yml +++ b/e2e-tests/replset-overrides/conf/some-name-overridden.yml @@ -5,11 +5,11 @@ metadata: - percona.com/delete-psmdb-pvc name: some-name spec: - image: perconalab/percona-server-mongodb-operator:main-mongod7.0 + image: imagePullPolicy: Always backup: enabled: true - image: perconalab/percona-server-mongodb-operator:main-backup + image: storages: minio: type: s3 diff --git a/e2e-tests/replset-overrides/conf/some-name.yml b/e2e-tests/replset-overrides/conf/some-name.yml index 393a5e35b4..1e3fc125c8 100644 --- a/e2e-tests/replset-overrides/conf/some-name.yml +++ b/e2e-tests/replset-overrides/conf/some-name.yml @@ -5,11 +5,11 @@ metadata: - percona.com/delete-psmdb-pvc name: some-name spec: - image: perconalab/percona-server-mongodb-operator:main-mongod7.0 + image: imagePullPolicy: Always backup: enabled: true - image: perconalab/percona-server-mongodb-operator:main-backup + image: storages: minio: type: s3 diff --git a/e2e-tests/replset-overrides/run b/e2e-tests/replset-overrides/run index 79419b9621..d8fe5219b1 100755 --- a/e2e-tests/replset-overrides/run +++ b/e2e-tests/replset-overrides/run @@ -126,7 +126,7 @@ test_override_priority() { main() { create_infra ${namespace} - kubectl_bin apply -f ${conf_dir}/client.yml + apply_client $conf_dir/client.yml deploy_minio diff --git a/e2e-tests/rs-shard-migration/run b/e2e-tests/rs-shard-migration/run index 7020b091a6..6507824459 100755 --- a/e2e-tests/rs-shard-migration/run +++ b/e2e-tests/rs-shard-migration/run @@ -18,7 +18,9 @@ function main() { create_infra $namespace desc 'create secrets and start client' - kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml + cluster="some-name" CLUSTER_SIZE=3 @@ -31,8 +33,47 @@ function main() { simple_data_check "${cluster}-rs0" ${CLUSTER_SIZE} desc 'initiate migration from replicaset to sharded cluster' - kubectl_bin patch psmdb/${cluster} --type json -p='[{"op":"add","path":"/spec/sharding","value":{"configsvrReplSet":{"size":'${CLUSTER_SIZE}',"volumeSpec":{"persistentVolumeClaim":{"resources":{"requests":{"storage":"3Gi"}}}}},"enabled":true,"mongos":{"size":1}}}]' + + kubectl patch psmdb/some-name --type json '-p=[{ + "op": "add", + "path": "/spec/sharding", + "value": { + "configsvrReplSet": { + "size": 3, + "volumeSpec": { + "persistentVolumeClaim": { + "resources": { + "requests": { + "storage": "3Gi" + } + } + } + }, + "tolerations": [ + { + "key": "kubernetes.io/arch", + "operator": "Equal", + "value": "arm64", + "effect": "NoSchedule" + } + ] + }, + "enabled": true, + "mongos": { + "size": 1, + "tolerations": [ + { + "key": "kubernetes.io/arch", + "operator": "Equal", + "value": "arm64", + "effect": "NoSchedule" + } + ] + } + } + }]' sleep 10 + wait_for_running "${cluster}-rs0" "${CLUSTER_SIZE}" "false" wait_for_running "${cluster}-cfg" "${CLUSTER_SIZE}" "false" wait_cluster_consistency "${cluster}" diff --git a/e2e-tests/run-release-arm64.csv b/e2e-tests/run-release-arm64.csv new file mode 100644 index 0000000000..bd7c2c7f9d --- /dev/null +++ b/e2e-tests/run-release-arm64.csv @@ -0,0 +1,46 @@ +arbiter +balancer +custom-replset-name +custom-tls +custom-users-roles +custom-users-roles-sharded +cross-site-sharded +data-at-rest-encryption +data-sharded +default-cr +demand-backup +demand-backup-eks-credentials +demand-backup-physical +demand-backup-physical-sharded +demand-backup-sharded +expose-sharded +ignore-labels-annotations +init-deploy +finalizer +ldap +ldap-tls +limits +liveness +multi-cluster-service +non-voting +one-pod +operator-self-healing-chaos +pitr +pitr-sharded +pitr-physical +pvc-resize +recover-no-primary +replset-overrides +rs-shard-migration +scaling +scheduled-backup +security-context +self-healing-chaos +service-per-pod +serviceless-external-nodes +split-horizon +storage +tls-issue-cert-manager +upgrade +upgrade-consistency +users diff --git a/e2e-tests/scaling/run b/e2e-tests/scaling/run index 4246858b75..56ce1d5de8 100755 --- a/e2e-tests/scaling/run +++ b/e2e-tests/scaling/run @@ -9,9 +9,8 @@ set_debug create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply \ - -f $conf_dir/secrets.yml \ - -f $conf_dir/client.yml +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml cluster='some-name-rs0' desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/scheduled-backup/run b/e2e-tests/scheduled-backup/run index 8cb5a463c8..dde0a5bb43 100755 --- a/e2e-tests/scheduled-backup/run +++ b/e2e-tests/scheduled-backup/run @@ -47,9 +47,8 @@ cat - <<-EOF | kubectl apply -f - EOF desc 'create secrets and start client' -kubectl_bin apply \ - -f "$conf_dir/secrets.yml" \ - -f "$conf_dir/client.yml" +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml apply_s3_storage_secrets @@ -88,16 +87,7 @@ sleep 55 desc 'disable backups schedule' apply_cluster "$test_dir/conf/$cluster.yml" -if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then - backup_name_aws=$(kubectl_bin get psmdb-backup | grep aws-s3 | awk '{print$1}' | head -1) - backup_name_gcp=$(kubectl_bin get psmdb-backup | grep gcp-cs | awk '{print$1}' | head -1) - backup_name_azure=$(kubectl_bin get psmdb-backup | grep azure-blob | awk '{print$1}' | head -1) - wait_backup "$backup_name_aws" - wait_backup "$backup_name_gcp" - wait_backup "$backup_name_azure" -fi - -backup_name_minio=$(kubectl_bin get psmdb-backup | grep minio | awk '{print$1}' | head -1) +backup_name_minio=$(kubectl_bin get psmdb-backup | grep minio | awk '{print $1}' | head -1) wait_backup "$backup_name_minio" sleep 5 @@ -106,24 +96,41 @@ echo -n "checking backup count for every-min-minio..." check_backup_count every-min-minio 1 echo "OK" -echo -n "checking backup count for every-min-aws-s3..." -check_backup_count every-min-aws-s3 1 -echo "OK" +if [ -z "$SKIP_BACKUPS_TO_AWS_GCP_AZURE" ]; then + backup_name_aws=$(kubectl_bin get psmdb-backup | grep aws-s3 | awk '{print$1}' | head -1) + backup_name_gcp=$(kubectl_bin get psmdb-backup | grep gcp-cs | awk '{print$1}' | head -1) + backup_name_azure=$(kubectl_bin get psmdb-backup | grep azure-blob | awk '{print$1}' | head -1) + wait_backup "$backup_name_aws" + wait_backup "$backup_name_gcp" + wait_backup "$backup_name_azure" -echo -n "checking backup count for every-min-gcp-cs..." -check_backup_count every-min-gcp-cs 1 -echo "OK" + echo -n "checking backup count for every-min-aws-s3..." + check_backup_count every-min-aws-s3 1 + echo "OK" -echo -n "checking backup count for every-min-azure-blob..." -check_backup_count every-min-azure-blob 1 -echo "OK" + echo -n "checking backup count for every-min-gcp-cs..." + check_backup_count every-min-gcp-cs 1 + echo "OK" + + echo -n "checking backup count for every-min-azure-blob..." + check_backup_count every-min-azure-blob 1 + echo "OK" +fi desc 'check backup and restore -- minio' backup_dest_minio=$(get_backup_dest "$backup_name_minio") -kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep "myApp.test.gz" + +retry=0 +until aws_cli "s3 ls s3://$backup_dest_minio/rs0/" | grep "myApp.test.gz"; do + if [[ $retry -ge 10 ]]; then + echo "Max retry count $retry reached. File myApp.test.gz wasn't found on s3://$backup_dest_minio/rs0/" + exit 1 + fi + ((retry += 1)) + echo -n . + sleep 5 +done + run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@$cluster.$namespace" compare_mongo_cmd "find" "myApp:myPass@$cluster-0.$cluster.$namespace" "-2nd" compare_mongo_cmd "find" "myApp:myPass@$cluster-1.$cluster.$namespace" "-2nd" diff --git a/e2e-tests/security-context/run b/e2e-tests/security-context/run index fd1f06ba9d..2e166af207 100755 --- a/e2e-tests/security-context/run +++ b/e2e-tests/security-context/run @@ -9,7 +9,9 @@ set_debug create_infra $namespace desc 'create secrets and start client' -kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml -f $conf_dir/minio-secret.yml +kubectl_bin apply -f $conf_dir/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml desc 'create additional service account' kubectl_bin apply -f "$test_dir/conf/service-account.yml" diff --git a/e2e-tests/self-healing-chaos/run b/e2e-tests/self-healing-chaos/run index c6b09170bc..229136cb30 100755 --- a/e2e-tests/self-healing-chaos/run +++ b/e2e-tests/self-healing-chaos/run @@ -23,9 +23,8 @@ check_pod_restarted() { setup_cluster() { desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/secrets.yml \ - -f $conf_dir/client.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster $cluster" apply_cluster $conf_dir/$cluster.yml @@ -76,11 +75,8 @@ kill_pod() { local pod=$1 local old_resourceVersion=$(kubectl get pod $pod -ojson | jq '.metadata.resourceVersion' | tr -d '"') - yq eval ' - .metadata.name = "chaos-cluster-pod-kill" | - del(.spec.selector.pods.test-namespace) | - .spec.selector.pods.'$namespace'[0] = "'$pod'"' $conf_dir/chaos-pod-kill.yml \ - | kubectl apply -f - + yq eval '.metadata.name = "chaos-cluster-pod-kill" | del(.spec.selector.pods.test-namespace) | .spec.selector.pods.'$namespace'[0] = "'$pod'"' $conf_dir/chaos-pod-kill.yml | kubectl apply -f - + sleep 5 # check if all 3 Pods started diff --git a/e2e-tests/service-per-pod/run b/e2e-tests/service-per-pod/run index 382270c410..455ca49539 100755 --- a/e2e-tests/service-per-pod/run +++ b/e2e-tests/service-per-pod/run @@ -109,9 +109,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'check ClusterIP' check_cr_config "cluster-ip-rs0" diff --git a/e2e-tests/serviceless-external-nodes/conf/external.yml b/e2e-tests/serviceless-external-nodes/conf/external.yml index 96f7840e55..f48b3388cf 100644 --- a/e2e-tests/serviceless-external-nodes/conf/external.yml +++ b/e2e-tests/serviceless-external-nodes/conf/external.yml @@ -10,7 +10,7 @@ spec: replsetSize: true mongosSize: true clusterServiceDNSMode: "Internal" - image: percona/percona-server-mongodb:6.0.4-3 + image: imagePullPolicy: Always secrets: users: mydb-custom-users diff --git a/e2e-tests/serviceless-external-nodes/conf/main.yml b/e2e-tests/serviceless-external-nodes/conf/main.yml index 4a9b7e3942..af55e1a35a 100644 --- a/e2e-tests/serviceless-external-nodes/conf/main.yml +++ b/e2e-tests/serviceless-external-nodes/conf/main.yml @@ -9,7 +9,7 @@ spec: clusterServiceDNSMode: "Internal" tls: mode: allowTLS - image: percona/percona-server-mongodb:6.0.4-3 + image: imagePullPolicy: Always secrets: users: mydb-custom-users diff --git a/e2e-tests/serviceless-external-nodes/run b/e2e-tests/serviceless-external-nodes/run index b7a1272737..4b3a98ac3c 100755 --- a/e2e-tests/serviceless-external-nodes/run +++ b/e2e-tests/serviceless-external-nodes/run @@ -14,10 +14,9 @@ unset OPERATOR_NS desc "Create main cluster" create_infra "$namespace" -kubectl_bin apply \ - -f "$conf_dir/client.yml" \ - -f "$test_dir/conf/secrets.yml" +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client.yml apply_cluster "$test_dir/conf/main.yml" wait_for_running "$cluster-rs0" 1 compare_kubectl statefulset/mydb-rs0 @@ -33,13 +32,12 @@ kubectl_bin config set-context $(kubectl_bin config current-context) --namespace create_namespace $replica_namespace 0 deploy_operator -kubectl_bin apply \ - -f "$conf_dir/client.yml" \ - -f "$test_dir/conf/secrets.yml" +desc 'create secrets and start client' +kubectl_bin apply -f $test_dir/conf/secrets.yml +apply_client $conf_dir/client.yml apply_cluster "$test_dir/conf/external.yml" - -wait_pod ${cluster}-rs0-0 -wait_pod ${cluster}-rs0-1 +wait_pod $cluster-rs0-0 +wait_pod $cluster-rs0-1 secrets_count=$(kubectl_bin get secret -o json | jq --arg pattern "$cluster" '[.items[] | select(.metadata.name | test($pattern))] | length') if [[ $secrets_count != 6 ]]; then diff --git a/e2e-tests/smart-update/run b/e2e-tests/smart-update/run index 015a4d0b19..e933d296f5 100755 --- a/e2e-tests/smart-update/run +++ b/e2e-tests/smart-update/run @@ -35,7 +35,8 @@ cluster="smart-update" create_infra ${namespace} desc 'create secrets and start client' -kubectl_bin apply -f ${conf_dir}/secrets.yml -f ${conf_dir}/client.yml +kubectl_bin apply -f $conf_dir/secrets.yml +apply_client $conf_dir/client.yml IMAGE_MONGOD_TO_UPDATE=${IMAGE_MONGOD} if [[ ${IMAGE_MONGOD} == *"percona-server-mongodb-operator"* ]]; then diff --git a/e2e-tests/split-horizon/run b/e2e-tests/split-horizon/run index 20a903c1a2..1abf0eb3b8 100755 --- a/e2e-tests/split-horizon/run +++ b/e2e-tests/split-horizon/run @@ -25,13 +25,13 @@ configure_client_hostAliases() { wait_pod $(kubectl_bin get pods --selector=name=psmdb-client -o 'jsonpath={.items[].metadata.name}') } -create_infra ${namespace} +create_infra $namespace -cluster="some-name" -kubectl_bin apply \ - -f ${conf_dir}/secrets_with_tls.yml \ - -f ${conf_dir}/client_with_tls.yml +desc 'create secrets and start client' +kubectl_bin apply -f $conf_dir/secrets_with_tls.yml +apply_client $conf_dir/client_with_tls.yml +cluster="some-name" apply_cluster ${test_dir}/conf/${cluster}-3horizons.yml wait_for_running "${cluster}-rs0" 3 wait_cluster_consistency ${cluster} diff --git a/e2e-tests/storage/run b/e2e-tests/storage/run index 4dff4779f6..f4d2d27289 100755 --- a/e2e-tests/storage/run +++ b/e2e-tests/storage/run @@ -47,14 +47,19 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply \ - -f $conf_dir/client.yml \ - -f $conf_dir/secrets.yml \ - -f $test_dir/conf/hostpath-helper.yml + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'check emptydir' check_cr_config "emptydir-rs0" + if [[ $ARCH == "arm64" ]]; then + yq eval '.spec.template.spec.tolerations += '"$TOLERATIONS_ARM64"'' \ + $test_dir/conf/hostpath-helper.yml | kubectl_bin apply -f - + else + kubectl_bin apply -f $test_dir/conf/hostpath-helper.yml + fi + desc 'check hostpath' check_cr_config "hostpath-rs0" diff --git a/e2e-tests/tls-issue-cert-manager/run b/e2e-tests/tls-issue-cert-manager/run index 0b10d74b75..bfa6071766 100755 --- a/e2e-tests/tls-issue-cert-manager/run +++ b/e2e-tests/tls-issue-cert-manager/run @@ -29,8 +29,8 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml desc 'create custom cert-manager issuers and certificates' kubectl_bin apply -f "$test_dir/conf/some-name-psmdb-ca-issuer.yml" diff --git a/e2e-tests/upgrade-consistency-sharded-tls/run b/e2e-tests/upgrade-consistency-sharded-tls/run index 4f7a0a88cf..f0dc59e78b 100755 --- a/e2e-tests/upgrade-consistency-sharded-tls/run +++ b/e2e-tests/upgrade-consistency-sharded-tls/run @@ -20,8 +20,9 @@ main() { deploy_cert_manager desc 'create secrets and start client' - kubectl_bin apply -f "$conf_dir/secrets.yml" - kubectl_bin apply -f "$conf_dir/client_with_tls.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client_with_tls.yml + deploy_cmctl desc "create first PSMDB cluster 1.17.0 $CLUSTER" diff --git a/e2e-tests/upgrade-consistency/run b/e2e-tests/upgrade-consistency/run index b2021a2184..e3323be828 100755 --- a/e2e-tests/upgrade-consistency/run +++ b/e2e-tests/upgrade-consistency/run @@ -12,7 +12,8 @@ main() { create_infra $namespace desc 'create secrets and start client' - kubectl_bin apply -f "${conf_dir}/client.yml" -f "${conf_dir}/secrets.yml" + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster 1.17.0 $CLUSTER" apply_cluster "$test_dir/conf/${CLUSTER}-rs0.yml" diff --git a/e2e-tests/upgrade-sharded/run b/e2e-tests/upgrade-sharded/run index 08db6b2323..a8b4cc5b24 100755 --- a/e2e-tests/upgrade-sharded/run +++ b/e2e-tests/upgrade-sharded/run @@ -161,15 +161,15 @@ function main() { if [ -n "$OPERATOR_NS" ]; then rbac="cw-rbac" fi - create_infra_gh "${namespace}" "${GIT_TAG}" + create_infra_gh $namespace $GIT_TAG deploy_cert_manager apply_s3_storage_secrets deploy_minio desc 'create secrets and start client' - curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" >"${tmp_dir}/secrets.yaml" - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${tmp_dir}/secrets.yaml" + curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" > $tmp_dir/secrets.yaml + kubectl_bin apply -f $tmp_dir/secrets.yaml + apply_client $conf_dir/client.yml desc "create first PSMDB cluster $cluster" local cr_yaml="${tmp_dir}/cr_${GIT_TAG}.yaml" @@ -263,11 +263,7 @@ function main() { run_mongos 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-mongos.${namespace}" backup_dest_minio=$(get_backup_dest "$backup_name_minio") - retry 3 5 kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls "s3://${backup_dest_minio}/rs0/" \ - | grep myApp.test.gz - + retry 3 5 aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_mongos 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@${cluster}-mongos.${namespace}" compare_mongos_cmd "find" "myApp:myPass@$cluster-mongos.$namespace" "-2nd" ".svc.cluster.local" "myApp" "test" run_restore "$backup_name_minio" diff --git a/e2e-tests/upgrade/run b/e2e-tests/upgrade/run index 78df239e26..a4589aa121 100755 --- a/e2e-tests/upgrade/run +++ b/e2e-tests/upgrade/run @@ -146,9 +146,9 @@ function main() { deploy_minio desc 'create secrets and start client' - curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" >"${tmp_dir}/secrets.yaml" - kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${tmp_dir}/secrets.yaml" + curl -s "https://raw.githubusercontent.com/percona/percona-server-mongodb-operator/${GIT_TAG}/deploy/secrets.yaml" > $tmp_dir/secrets.yaml + kubectl_bin apply -f $tmp_dir/secrets.yaml + apply_client $conf_dir/client.yml local cr_yaml="${tmp_dir}/cr_${GIT_TAG}.yaml" prepare_cr_yaml "${cr_yaml}" @@ -219,11 +219,7 @@ function main() { run_mongo 'use myApp\n db.test.drop()' "myApp:myPass@${cluster}-rs0.${namespace}" backup_dest_minio=$(get_backup_dest "$backup_name_minio") - kubectl_bin run -i --rm aws-cli --image=perconalab/awscli --restart=Never -- \ - /usr/bin/env AWS_ACCESS_KEY_ID=some-access-key AWS_SECRET_ACCESS_KEY=some-secret-key AWS_DEFAULT_REGION=us-east-1 \ - /usr/bin/aws --endpoint-url http://minio-service:9000 s3 ls s3://${backup_dest_minio}/rs0/ \ - | grep myApp.test.gz - + aws_cli "s3 ls s3://${backup_dest_minio}/rs0/" | grep "myApp.test.gz" run_mongo 'use myApp\n db.test.insert({ x: 100501 })' "myApp:myPass@${cluster}-rs0.${namespace}" compare_mongo_cmd "find" "myApp:myPass@$cluster-rs0.$namespace" "-2nd" ".svc.cluster.local" "myApp" "test" run_restore "$backup_name_minio" diff --git a/e2e-tests/users/run b/e2e-tests/users/run index 39d32fe91d..79fc2c699c 100755 --- a/e2e-tests/users/run +++ b/e2e-tests/users/run @@ -14,9 +14,9 @@ create_infra "$namespace" deploy_minio desc 'create secrets and start client' -kubectl_bin apply -f "${conf_dir}/client.yml" \ - -f "${test_dir}/conf/secrets.yml" \ - -f "${conf_dir}/minio-secret.yml" +kubectl_bin apply -f $test_dir/conf/secrets.yml +kubectl_bin apply -f $conf_dir/minio-secret.yml +apply_client $conf_dir/client.yml cluster="some-name-rs0" desc "create first PSMDB cluster $cluster" diff --git a/e2e-tests/version-service/run b/e2e-tests/version-service/run index fbef8220bd..ae594b550e 100755 --- a/e2e-tests/version-service/run +++ b/e2e-tests/version-service/run @@ -11,11 +11,11 @@ function check_telemetry_transfer() { local cr_vs_uri=${1} local cr_vs_channel=${2:-"disabled"} local telemetry_state=${3:-"enabled"} + local cluster="minimal-cluster" - cluster="minimal-cluster" desc 'create secrets and start client' - kubectl_bin apply -f $conf_dir/client.yml - yq eval '.metadata.name = "'${cluster}'"' $conf_dir/secrets.yml | kubectl_bin apply -f - + apply_client $conf_dir/client.yml + yq eval '.metadata.name = "'$cluster'"' $conf_dir/secrets.yml | kubectl_bin apply -f - desc "create PSMDB minimal cluster $cluster" yq eval ' @@ -151,7 +151,9 @@ for i in "${!cases[@]}"; do cluster="${cases[$i]}" expected_image="${expected_images[$i]}" - kubectl_bin apply -f $conf_dir/secrets.yml -f $conf_dir/client.yml + desc 'create secrets and start client' + kubectl_bin apply -f $conf_dir/secrets.yml + apply_client $conf_dir/client.yml desc 'create PSMDB cluster' tmp_file=$(mktemp)