diff --git a/.github/workflows/azure-experiment-pipeline.yml b/.github/workflows/azure-experiment-pipeline.yml new file mode 100644 index 000000000..98ada5da4 --- /dev/null +++ b/.github/workflows/azure-experiment-pipeline.yml @@ -0,0 +1,203 @@ +--- +name: Azure-Experiment-Pipeline +on: + workflow_dispatch: + inputs: + goExperimentImage: + description: "Go Experiment Image" + default: "litmuschaos/go-runner:ci" + required: true + operatorImage: + description: "Operator Image" + default: "litmuschaos/chaos-operator:ci" + required: true + runnerImage: + description: "Runner Image" + default: "litmuschaos/chaos-runner:ci" + required: true + chaosNamespace: + description: "Chaos Namespace" + default: "default" + required: true + experimentImagePullPolicy: + description: "Experiment Image Pull Policy" + default: "Always" + required: true + +jobs: + Azure_VM_Instance_Stop: + runs-on: ubuntu-latest + steps: + + #Install and configure a k3s cluster + - name: Installing Prerequisites (K3S Cluster) + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + run: | + curl -sfL https://get.k3s.io | sh -s - --docker --write-kubeconfig-mode 664 + kubectl wait node --all --for condition=ready --timeout=90s + mkdir -p $HOME/.kube && cat /etc/rancher/k3s/k3s.yaml > $HOME/.kube/config + kubectl get nodes + + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.16' + + - name: Create azure auth file from secrets + run: echo "${{ secrets.AZURE_AUTH_FILE }}" > azure.auth + + - name: Create Kubernetes secret for azure experiment + if: always() + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + run: | + kubectl create secret generic cloud-secret --from-file=azure.auth + + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + + - name: Create target Azure VM Instances + if: always() + run: | + az group create -n ${{ secrets.AZURE_RESOURCE_GROUP }}_instance_${{ github.run_number }} -l eastus && \ + az vm create -g ${{ secrets.AZURE_RESOURCE_GROUP }}_instance_${{ github.run_number }} -n litmus-e2e-instance-one --image UbuntuLTS --generate-ssh-keys && \ + az vm create -g ${{ secrets.AZURE_RESOURCE_GROUP }}_instance_${{ github.run_number }} -n litmus-e2e-instance-two --image UbuntuLTS --generate-ssh-keys + + - name: Litmus Infra Setup + if: always() + run: make build-litmus + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + OPERATOR_IMAGE: "${{ github.event.inputs.operatorImage }}" + RUNNER_IMAGE: "${{ github.event.inputs.runnerImage }}" + + - name: Run Azure VM Instance Stop experiment in serial & parallel mode + if: always() + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + AZURE_INSTANCE_NAME: "litmus-e2e-instance-one,litmus-e2e-instance-two" + AZURE_RESOURCE_GROUP : "${{ secrets.AZURE_RESOURCE_GROUP }}_instance_${{ github.run_number }}" + GO_EXPERIMENT_IMAGE: "${{ github.event.inputs.goExperimentImage }}" + EXPERIMENT_IMAGE_PULL_POLICY: "${{ github.event.inputs.experimentImagePullPolicy }}" + CHAOS_NAMESPACE: "${{ github.event.inputs.chaosNamespace }}" + run: make azure-instance-stop + + - name: Delete target Azure VM Instances + if: always() + run: | + az vm delete --ids $(az vm list -g ${{ secrets.AZURE_RESOURCE_GROUP }}_instance_${{ github.run_number }} --query "[].id" -o tsv) --yes + az group delete -n ${{ secrets.AZURE_RESOURCE_GROUP }}_instance_${{ github.run_number }} --yes + + - name: "[Debug]: check chaos resources" + if: ${{ failure() }} + continue-on-error: true + run: | + bash <(curl -s https://raw.githubusercontent.com/litmuschaos/litmus-e2e/master/build/debug.sh) + + - name: "[Debug]: check operator logs" + if: ${{ failure() }} + continue-on-error: true + run: | + operator_name=$(kubectl get pods -n litmus -l app.kubernetes.io/component=operator --no-headers | awk '{print$1}') + kubectl logs $operator_name -n litmus > logs.txt + cat logs.txt + + - name: Deleting K3S cluster + if: always() + run: /usr/local/bin/k3s-uninstall.sh + + Azure_VM_Disk_Loss: + runs-on: ubuntu-latest + steps: + + #Install and configure a k3s cluster + - name: Installing Prerequisites (K3S Cluster) + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + run: | + curl -sfL https://get.k3s.io | sh -s - --docker --write-kubeconfig-mode 664 + kubectl wait node --all --for condition=ready --timeout=90s + mkdir -p $HOME/.kube && cat /etc/rancher/k3s/k3s.yaml > $HOME/.kube/config + kubectl get nodes + + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.14' + + - name: Create azure auth file from secrets + run: echo "${{ secrets.AZURE_AUTH_FILE }}" > azure.auth + + - name: Create Kubernetes secret for azure experiment + if: always() + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + run: | + kubectl create secret generic cloud-secret --from-file=azure.auth + + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + + - name: Create a Azure VM Instance with target Disk Volumes + if: always() + run: | + az group create -n ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} -l eastus && \ + az vm create -g ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} -n litmus-e2e-instance-one --image UbuntuLTS --generate-ssh-keys && \ + az disk create -n litmus-e2e-disk-one -g ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} --size-gb=4 && \ + az disk create -n litmus-e2e-disk-two -g ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} --size-gb=4 && \ + az vm disk attach -g ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} --vm-name litmus-e2e-instance-one -n litmus-e2e-disk-one && \ + az vm disk attach -g ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} --vm-name litmus-e2e-instance-one -n litmus-e2e-disk-two + - name: Litmus Infra Setup + if: always() + run: make build-litmus + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + OPERATOR_IMAGE: "${{ github.event.inputs.operatorImage }}" + RUNNER_IMAGE: "${{ github.event.inputs.runnerImage }}" + + - name: Run Azure VM Disk Loss experiment in serial & parallel mode + if: always() + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + AZURE_INSTANCE_NAME: "litmus-e2e-instance-one,litmus-e2e-instance-two" + AZURE_DISK_NAME: "litmus-e2e-disk-one,litmus-e2e-disk-two" + AZURE_RESOURCE_GROUP: "${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }}" + GO_EXPERIMENT_IMAGE: "${{ github.event.inputs.goExperimentImage }}" + EXPERIMENT_IMAGE_PULL_POLICY: "${{ github.event.inputs.experimentImagePullPolicy }}" + CHAOS_NAMESPACE: "${{ github.event.inputs.chaosNamespace }}" + run: make azure-disk-loss + + - name: Delete the VM Instance and target Disk Volumes + if: always() + run: | + az vm disk detach -g ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} --vm-name litmus-e2e-instance-one -n litmus-e2e-disk-one && \ + az vm disk detach -g ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} --vm-name litmus-e2e-instance-one -n litmus-e2e-disk-two && \ + az disk delete -n litmus-e2e-disk-one -g ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} --yes && \ + az disk delete -n litmus-e2e-disk-two -g ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} --yes && \ + az vm delete --ids $(az vm list -g ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} --query "[].id" -o tsv) --yes + az group delete -n ${{ secrets.AZURE_RESOURCE_GROUP }}_disk_${{ github.run_number }} --yes + + - name: "[Debug]: check chaos resources" + if: ${{ failure() }} + continue-on-error: true + run: | + bash <(curl -s https://raw.githubusercontent.com/litmuschaos/litmus-e2e/master/build/debug.sh) + + - name: "[Debug]: check operator logs" + if: ${{ failure() }} + continue-on-error: true + run: | + operator_name=$(kubectl get pods -n litmus -l app.kubernetes.io/component=operator --no-headers | awk '{print$1}') + kubectl logs $operator_name -n litmus > logs.txt + cat logs.txt + + - name: Deleting K3S cluster + if: always() + run: /usr/local/bin/k3s-uninstall.sh \ No newline at end of file diff --git a/.github/workflows/nightly-azure-experiment-pipeline.yml b/.github/workflows/nightly-azure-experiment-pipeline.yml new file mode 100644 index 000000000..0fb1df1bf --- /dev/null +++ b/.github/workflows/nightly-azure-experiment-pipeline.yml @@ -0,0 +1,183 @@ +--- +name: Scheduled-Azure-Experiment-Pipeline +on: + schedule: + - cron: "30 22 * * *" # Daily 02:30 AM in midnight + +jobs: + Azure_VM_Instance_Stop: + runs-on: ubuntu-latest + steps: + + #Install and configure a k3s cluster + - name: Installing Prerequisites (K3S Cluster) + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + run: | + curl -sfL https://get.k3s.io | sh -s - --docker --write-kubeconfig-mode 664 + kubectl wait node --all --for condition=ready --timeout=90s + mkdir -p $HOME/.kube && cat /etc/rancher/k3s/k3s.yaml > $HOME/.kube/config + kubectl get nodes + + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.16' + + - name: Create azure auth file from secrets + run: echo "${{ secrets.AZURE_AUTH_FILE }}" > azure.auth + + - name: Create Kubernetes secret for azure experiment + if: always() + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + run: | + kubectl create secret generic cloud-secret --from-file=azure.auth + + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + + - name: Create target Azure VM Instances + if: always() + run: | + az group create -n ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_instance_${{ github.run_number }} -l eastus && \ + az vm create -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_instance_${{ github.run_number }} -n litmus-e2e-instance-one --image UbuntuLTS --generate-ssh-keys && \ + az vm create -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_instance_${{ github.run_number }} -n litmus-e2e-instance-two --image UbuntuLTS --generate-ssh-keys + + - name: Litmus Infra Setup + if: always() + run: make build-litmus + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + OPERATOR_IMAGE: "${{ github.event.inputs.operatorImage }}" + RUNNER_IMAGE: "${{ github.event.inputs.runnerImage }}" + + - name: Run Azure VM Instance Stop experiment in serial & parallel mode + if: always() + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + AZURE_INSTANCE_NAME: "litmus-e2e-instance-one,litmus-e2e-instance-two" + AZURE_RESOURCE_GROUP : "${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_instance_${{ github.run_number }}" + GO_EXPERIMENT_IMAGE: "${{ github.event.inputs.goExperimentImage }}" + EXPERIMENT_IMAGE_PULL_POLICY: "${{ github.event.inputs.experimentImagePullPolicy }}" + CHAOS_NAMESPACE: "${{ github.event.inputs.chaosNamespace }}" + run: make azure-instance-stop + + - name: Delete target Azure VM Instances + if: always() + run: | + az vm delete --ids $(az vm list -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_instance_${{ github.run_number }} --query "[].id" -o tsv) --yes + az group delete -n ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_instance_${{ github.run_number }} --yes + + - name: "[Debug]: check chaos resources" + if: ${{ failure() }} + continue-on-error: true + run: | + bash <(curl -s https://raw.githubusercontent.com/litmuschaos/litmus-e2e/master/build/debug.sh) + + - name: "[Debug]: check operator logs" + if: ${{ failure() }} + continue-on-error: true + run: | + operator_name=$(kubectl get pods -n litmus -l app.kubernetes.io/component=operator --no-headers | awk '{print$1}') + kubectl logs $operator_name -n litmus > logs.txt + cat logs.txt + + - name: Deleting K3S cluster + if: always() + run: /usr/local/bin/k3s-uninstall.sh + + Azure_VM_Disk_Loss: + runs-on: ubuntu-latest + steps: + + #Install and configure a k3s cluster + - name: Installing Prerequisites (K3S Cluster) + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + run: | + curl -sfL https://get.k3s.io | sh -s - --docker --write-kubeconfig-mode 664 + kubectl wait node --all --for condition=ready --timeout=90s + mkdir -p $HOME/.kube && cat /etc/rancher/k3s/k3s.yaml > $HOME/.kube/config + kubectl get nodes + + - uses: actions/checkout@v2 + + - uses: actions/setup-go@v2 + with: + go-version: '1.14' + + - name: Create azure auth file from secrets + run: echo "${{ secrets.AZURE_AUTH_FILE }}" > azure.auth + + - name: Create Kubernetes secret for azure experiment + if: always() + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + run: | + kubectl create secret generic cloud-secret --from-file=azure.auth + + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + + - name: Create a Azure VM Instance with target Disk Volumes + if: always() + run: | + az group create -n ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} -l eastus && \ + az vm create -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} -n litmus-e2e-instance-one --image UbuntuLTS --generate-ssh-keys && \ + az disk create -n litmus-e2e-disk-one -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} --size-gb=4 && \ + az disk create -n litmus-e2e-disk-two -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} --size-gb=4 && \ + az vm disk attach -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} --vm-name litmus-e2e-instance-one -n litmus-e2e-disk-one && \ + az vm disk attach -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} --vm-name litmus-e2e-instance-one -n litmus-e2e-disk-two + - name: Litmus Infra Setup + if: always() + run: make build-litmus + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + OPERATOR_IMAGE: "${{ github.event.inputs.operatorImage }}" + RUNNER_IMAGE: "${{ github.event.inputs.runnerImage }}" + + - name: Run Azure VM Disk Loss experiment in serial & parallel mode + if: always() + env: + KUBECONFIG: /etc/rancher/k3s/k3s.yaml + AZURE_INSTANCE_NAME: "litmus-e2e-instance-one,litmus-e2e-instance-two" + AZURE_DISK_NAME: "litmus-e2e-disk-one,litmus-e2e-disk-two" + AZURE_RESOURCE_GROUP: "${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }}" + GO_EXPERIMENT_IMAGE: "${{ github.event.inputs.goExperimentImage }}" + EXPERIMENT_IMAGE_PULL_POLICY: "${{ github.event.inputs.experimentImagePullPolicy }}" + CHAOS_NAMESPACE: "${{ github.event.inputs.chaosNamespace }}" + run: make azure-disk-loss + + - name: Delete the VM Instance and target Disk Volumes + if: always() + run: | + az vm disk detach -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} --vm-name litmus-e2e-instance-one -n litmus-e2e-disk-one && \ + az vm disk detach -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} --vm-name litmus-e2e-instance-one -n litmus-e2e-disk-two && \ + az disk delete -n litmus-e2e-disk-one -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} --yes && \ + az disk delete -n litmus-e2e-disk-two -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} --yes && \ + az vm delete --ids $(az vm list -g ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} --query "[].id" -o tsv) --yes + az group delete -n ${{ secrets.AZURE_RESOURCE_GROUP }}_schedule_disk_${{ github.run_number }} --yes + + - name: "[Debug]: check chaos resources" + if: ${{ failure() }} + continue-on-error: true + run: | + bash <(curl -s https://raw.githubusercontent.com/litmuschaos/litmus-e2e/master/build/debug.sh) + + - name: "[Debug]: check operator logs" + if: ${{ failure() }} + continue-on-error: true + run: | + operator_name=$(kubectl get pods -n litmus -l app.kubernetes.io/component=operator --no-headers | awk '{print$1}') + kubectl logs $operator_name -n litmus > logs.txt + cat logs.txt + + - name: Deleting K3S cluster + if: always() + run: /usr/local/bin/k3s-uninstall.sh \ No newline at end of file diff --git a/.master-plan.yml b/.master-plan.yml index b6bde1d3d..b96de9bd3 100644 --- a/.master-plan.yml +++ b/.master-plan.yml @@ -230,6 +230,28 @@ spec: git/location: "" test/status: "Not Done" + + # ------------------------------------------------- + # Kubernetes Azure chaos experiment BDD Tests + # ------------------------------------------------- + + - tcid: TCID-EC2-AZURE-INFRA-INSTANCE-STOP + name: "TCID-EC2-AZURE-INFRA-INSTANCE-STOP" + description: "Stops an Azure VM instance" + labels: + test/feature: "azure" + test/tags: "azure, instance stop" + git/location: "" + test/status: "Done" + + - tcid: TCID-EC2-AZURE-INFRA-DISK-LOSS + name: "TCID-EC2-AZURE-INFRA-DISK-LOSS" + description: "Detach a VM Disk" + labels: + test/feature: "azure" + test/tags: "azure, disk loss" + git/location: "" + test/status: "Done" # ------------------------------------------------- # Kubernetes GCP chaos experiment BDD Tests # ------------------------------------------------- diff --git a/Makefile b/Makefile index 2e0f6e0a0..21acff37a 100644 --- a/Makefile +++ b/Makefile @@ -223,7 +223,23 @@ ebs-loss-by-tag: @echo "------------------------------------------" @echo "Running ebs-loss-by-tag experiment" @echo "------------------------------------------" - @go test platform/aws/ebs-loss-by-tag_test.go -v -count=1 -timeout=20m + @go test platform/aws/ebs-loss-by-tag_test.go -v -count=1 -timeout=20m + +.PHONY: azure-instance-stop +azure-instance-stop: + + @echo "------------------------------------------" + @echo "Running azure-instance-stop experiment" + @echo "------------------------------------------" + @go test platform/azure/instance-stop_test.go -v -count=1 -timeout=20m + +.PHONY: azure-disk-loss +azure-disk-loss: + + @echo "------------------------------------------" + @echo "Running azure-disk-loss experiment" + @echo "------------------------------------------" + @go test platform/azure/disk-loss_test.go -v -count=1 -timeout=20m .PHONY: gcp-vm-instance-stop gcp-vm-instance-stop: diff --git a/pkg/environment/environment.go b/pkg/environment/environment.go index f0221c059..25fc7d37c 100644 --- a/pkg/environment/environment.go +++ b/pkg/environment/environment.go @@ -44,6 +44,10 @@ func GetENV(testDetails *types.TestDetails, expName, engineName string) { testDetails.UpdateWebsite = Getenv("UPDATE_WEBSITE", "false") testDetails.TargetNodes = Getenv("TARGET_NODES", "") testDetails.NodeLabel = Getenv("NODE_LABEL", "") + testDetails.AzureResourceGroup = Getenv("AZURE_RESOURCE_GROUP", "") + testDetails.AzureInstanceName = Getenv("AZURE_INSTANCE_NAME", "") + testDetails.AzureDiskName = Getenv("AZURE_DISK_NAME", "") + testDetails.AzureScaleSet = Getenv("AZURE_SCALE_SET", "") testDetails.Args = Getenv("ARGS", "") testDetails.Command = Getenv("COMMAND", "") diff --git a/pkg/install.go b/pkg/install.go index 818ca7b9d..0878cfa0c 100644 --- a/pkg/install.go +++ b/pkg/install.go @@ -220,7 +220,7 @@ func InstallGoChaosExperiment(testsDetails *types.TestDetails, chaosExperiment * //Fetch Experiment file res, err := http.Get(testsDetails.ExperimentPath) if err != nil { - return errors.Errorf("Fail to fetch the rbac file, due to %v", err) + return errors.Errorf("Fail to fetch the experiment file, due to %v", err) } // ReadAll reads from response until an error or EOF and returns the data it read. @@ -319,6 +319,14 @@ func setEngineVar(chaosEngine *v1alpha1.ChaosEngine, testsDetails *types.TestDet Name: "CHAOS_KILL_COMMAND", Value: testsDetails.MemoryKillCommand, }) + case "azure-instance-stop": + envDetails.SetEnv("RESOURCE_GROUP", testsDetails.AzureResourceGroup). + SetEnv("AZURE_INSTANCE_NAME", testsDetails.AzureInstanceName). + SetEnv("AZURE_SCALE_SET", testsDetails.AzureScaleSet) + case "azure-disk-loss": + envDetails.SetEnv("RESOURCE_GROUP", testsDetails.AzureResourceGroup). + SetEnv("AZURE_SCALE_SET", testsDetails.AzureScaleSet). + SetEnv("VIRTUAL_DISK_NAMES", testsDetails.AzureDiskName) case "gcp-vm-instance-stop": envDetails.SetEnv("GCP_PROJECT_ID", testsDetails.GCPProjectID). SetEnv("VM_INSTANCE_NAMES", testsDetails.VMInstanceNames). diff --git a/pkg/types/types.go b/pkg/types/types.go index cc91c0eea..65b77bcbe 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -56,6 +56,10 @@ type TestDetails struct { Version string TargetNodes string NodeLabel string + AzureResourceGroup string + AzureInstanceName string + AzureDiskName string + AzureScaleSet string Args string Command string } diff --git a/platform/azure/disk-loss_test.go b/platform/azure/disk-loss_test.go new file mode 100644 index 000000000..f4a3ef304 --- /dev/null +++ b/platform/azure/disk-loss_test.go @@ -0,0 +1,150 @@ +package test + +import ( + "testing" + + "github.com/litmuschaos/chaos-operator/pkg/apis/litmuschaos/v1alpha1" + "github.com/litmuschaos/litmus-e2e/pkg" + "github.com/litmuschaos/litmus-e2e/pkg/environment" + "github.com/litmuschaos/litmus-e2e/pkg/log" + "github.com/litmuschaos/litmus-e2e/pkg/types" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/klog" +) + +func TestGoAzureDiskLoss(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "BDD test") +} + +var _ = Describe("BDD of azure-disk-loss experiment", func() { + // BDD TEST CASE 1 - azure-disk-loss in parallel mode + Context("Check for azure-disk-loss in parallel mode experiment", func() { + It("Should check for the azure disk loss in parallel", func() { + + testsDetails := types.TestDetails{} + clients := environment.ClientSets{} + chaosExperiment := v1alpha1.ChaosExperiment{} + chaosEngine := v1alpha1.ChaosEngine{} + + //Getting kubeConfig and Generate ClientSets + By("[PreChaos]: Getting kubeconfig and generate clientset") + err := clients.GenerateClientSetFromKubeConfig() + Expect(err).To(BeNil(), "Unable to Get the kubeconfig, due to {%v}", err) + + //Fetching all the default ENV + By("[PreChaos]: Fetching all default ENVs") + klog.Infof("[PreReq]: Getting the ENVs for the %v test", testsDetails.ExperimentName) + environment.GetENV(&testsDetails, "azure-disk-loss", "az-en-par") + log.Infof("[Info]: The target disks are: %v", testsDetails.AzureDiskName) + + testsDetails.RbacPath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-disk-loss/rbac.yaml" + testsDetails.ExperimentPath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-disk-loss/experiment.yaml" + testsDetails.EnginePath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-disk-loss/engine.yaml" + testsDetails.ChaosNamespace = "default" + testsDetails.AppNS = "default" + + // Checking the chaos operator running status + By("[Status]: Checking chaos operator status") + err = pkg.OperatorStatusCheck(&testsDetails, clients) + Expect(err).To(BeNil(), "Operator status check failed, due to {%v}", err) + + // Prepare Chaos Execution + By("[Prepare]: Prepare Chaos Execution") + err = pkg.PrepareChaos(&testsDetails, &chaosExperiment, &chaosEngine, clients, false) + Expect(err).To(BeNil(), "fail to prepare chaos, due to {%v}", err) + + //Checking runner pod running state + By("[Status]: Runner pod running status check") + err = pkg.RunnerPodStatus(&testsDetails, testsDetails.AppNS, clients) + Expect(err).To(BeNil(), "Runner pod status check failed, due to {%v}", err) + + //Chaos pod running status check + err = pkg.ChaosPodStatus(&testsDetails, clients) + Expect(err).To(BeNil(), "Chaos pod status check failed, due to {%v}", err) + + //Waiting for chaos pod to get completed + //And Print the logs of the chaos pod + By("[Status]: Wait for chaos pod completion and then print logs") + err = pkg.ChaosPodLogs(&testsDetails, clients) + Expect(err).To(BeNil(), "Fail to get the experiment chaos pod logs, due to {%v}", err) + + //Checking the chaosresult verdict + By("[Verdict]: Checking the chaosresult verdict") + err = pkg.ChaosResultVerdict(&testsDetails, clients) + Expect(err).To(BeNil(), "ChasoResult Verdict check failed, due to {%v}", err) + + //Checking chaosengine verdict + By("Checking the Verdict of Chaos Engine") + err = pkg.ChaosEngineVerdict(&testsDetails, clients) + Expect(err).To(BeNil(), "ChaosEngine Verdict check failed, due to {%v}", err) + }) + }) + + // BDD TEST CASE 2 - azure-disk-loss in serial mode + Context("Check for azure-disk-loss experiment", func() { + + It("Should check for the azure disk loss in serial", func() { + + testsDetails := types.TestDetails{} + clients := environment.ClientSets{} + chaosExperiment := v1alpha1.ChaosExperiment{} + chaosEngine := v1alpha1.ChaosEngine{} + + //Getting kubeConfig and Generate ClientSets + By("[PreChaos]: Getting kubeconfig and generate clientset") + err := clients.GenerateClientSetFromKubeConfig() + Expect(err).To(BeNil(), "Unable to Get the kubeconfig, due to {%v}", err) + + //Fetching all the default ENV + By("[PreChaos]: Fetching all default ENVs") + klog.Infof("[PreReq]: Getting the ENVs for the %v test", testsDetails.ExperimentName) + environment.GetENV(&testsDetails, "azure-disk-loss", "az-en-ser") + log.Infof("[Info]: The target disks are: %v", testsDetails.AzureDiskName) + + testsDetails.RbacPath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-disk-loss/rbac.yaml" + testsDetails.ExperimentPath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-disk-loss/experiment.yaml" + testsDetails.EnginePath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-disk-loss/engine.yaml" + testsDetails.Sequence = "serial" + testsDetails.ChaosNamespace = "default" + testsDetails.AppNS = "default" + + // Checking the chaos operator running status + By("[Status]: Checking chaos operator status") + err = pkg.OperatorStatusCheck(&testsDetails, clients) + Expect(err).To(BeNil(), "Operator status check failed, due to {%v}", err) + + // Prepare Chaos Execution + By("[Prepare]: Prepare Chaos Execution") + err = pkg.PrepareChaos(&testsDetails, &chaosExperiment, &chaosEngine, clients, false) + Expect(err).To(BeNil(), "fail to prepare chaos, due to {%v}", err) + + //Checking runner pod running state + By("[Status]: Runner pod running status check") + err = pkg.RunnerPodStatus(&testsDetails, testsDetails.AppNS, clients) + Expect(err).To(BeNil(), "Runner pod status check failed, due to {%v}", err) + + //Chaos pod running status check + err = pkg.ChaosPodStatus(&testsDetails, clients) + Expect(err).To(BeNil(), "Chaos pod status check failed, due to {%v}", err) + + //Waiting for chaos pod to get completed + //And Print the logs of the chaos pod + By("[Status]: Wait for chaos pod completion and then print logs") + err = pkg.ChaosPodLogs(&testsDetails, clients) + Expect(err).To(BeNil(), "Fail to get the experiment chaos pod logs, due to {%v}", err) + + //Checking the chaosresult verdict + By("[Verdict]: Checking the chaosresult verdict") + err = pkg.ChaosResultVerdict(&testsDetails, clients) + Expect(err).To(BeNil(), "ChasoResult Verdict check failed, due to {%v}", err) + + //Checking chaosengine verdict + By("Checking the Verdict of Chaos Engine") + err = pkg.ChaosEngineVerdict(&testsDetails, clients) + Expect(err).To(BeNil(), "ChaosEngine Verdict check failed, due to {%v}", err) + + }) + }) +}) diff --git a/platform/azure/instance-stop_test.go b/platform/azure/instance-stop_test.go new file mode 100644 index 000000000..6d5d409b6 --- /dev/null +++ b/platform/azure/instance-stop_test.go @@ -0,0 +1,150 @@ +package test + +import ( + "testing" + + "github.com/litmuschaos/chaos-operator/pkg/apis/litmuschaos/v1alpha1" + "github.com/litmuschaos/litmus-e2e/pkg" + "github.com/litmuschaos/litmus-e2e/pkg/environment" + "github.com/litmuschaos/litmus-e2e/pkg/log" + "github.com/litmuschaos/litmus-e2e/pkg/types" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/klog" +) + +func TestGoAzureInstaceStop(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "BDD test") +} + +var _ = Describe("BDD of azure-instance-stop experiment", func() { + // BDD TEST CASE 1 - azure-instance-stop in parallel mode + Context("Check for azure-instance-stop in parallel mode experiment", func() { + It("Should check for the azure instance stop in parallel", func() { + + testsDetails := types.TestDetails{} + clients := environment.ClientSets{} + chaosExperiment := v1alpha1.ChaosExperiment{} + chaosEngine := v1alpha1.ChaosEngine{} + + //Getting kubeConfig and Generate ClientSets + By("[PreChaos]: Getting kubeconfig and generate clientset") + err := clients.GenerateClientSetFromKubeConfig() + Expect(err).To(BeNil(), "Unable to Get the kubeconfig, due to {%v}", err) + + //Fetching all the default ENV + By("[PreChaos]: Fetching all default ENVs") + klog.Infof("[PreReq]: Getting the ENVs for the %v test", testsDetails.ExperimentName) + environment.GetENV(&testsDetails, "azure-instance-stop", "az-en-par") + log.Infof("[Info]: The target instances are: %v", testsDetails.AzureInstanceName) + + testsDetails.RbacPath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-instance-stop/rbac.yaml" + testsDetails.ExperimentPath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-instance-stop/experiment.yaml" + testsDetails.EnginePath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-instance-stop/engine.yaml" + testsDetails.ChaosNamespace = "default" + testsDetails.AppNS = "default" + + // Checking the chaos operator running status + By("[Status]: Checking chaos operator status") + err = pkg.OperatorStatusCheck(&testsDetails, clients) + Expect(err).To(BeNil(), "Operator status check failed, due to {%v}", err) + + // Prepare Chaos Execution + By("[Prepare]: Prepare Chaos Execution") + err = pkg.PrepareChaos(&testsDetails, &chaosExperiment, &chaosEngine, clients, false) + Expect(err).To(BeNil(), "fail to prepare chaos, due to {%v}", err) + + //Checking runner pod running state + By("[Status]: Runner pod running status check") + err = pkg.RunnerPodStatus(&testsDetails, testsDetails.AppNS, clients) + Expect(err).To(BeNil(), "Runner pod status check failed, due to {%v}", err) + + //Chaos pod running status check + err = pkg.ChaosPodStatus(&testsDetails, clients) + Expect(err).To(BeNil(), "Chaos pod status check failed, due to {%v}", err) + + //Waiting for chaos pod to get completed + //And Print the logs of the chaos pod + By("[Status]: Wait for chaos pod completion and then print logs") + err = pkg.ChaosPodLogs(&testsDetails, clients) + Expect(err).To(BeNil(), "Fail to get the experiment chaos pod logs, due to {%v}", err) + + //Checking the chaosresult verdict + By("[Verdict]: Checking the chaosresult verdict") + err = pkg.ChaosResultVerdict(&testsDetails, clients) + Expect(err).To(BeNil(), "ChasoResult Verdict check failed, due to {%v}", err) + + //Checking chaosengine verdict + By("Checking the Verdict of Chaos Engine") + err = pkg.ChaosEngineVerdict(&testsDetails, clients) + Expect(err).To(BeNil(), "ChaosEngine Verdict check failed, due to {%v}", err) + }) + }) + + // BDD TEST CASE 2 - azure-instance-stop in serial mode + Context("Check for azure-instance-stop experiment", func() { + + It("Should check for the azure instance stop in serial", func() { + + testsDetails := types.TestDetails{} + clients := environment.ClientSets{} + chaosExperiment := v1alpha1.ChaosExperiment{} + chaosEngine := v1alpha1.ChaosEngine{} + + //Getting kubeConfig and Generate ClientSets + By("[PreChaos]: Getting kubeconfig and generate clientset") + err := clients.GenerateClientSetFromKubeConfig() + Expect(err).To(BeNil(), "Unable to Get the kubeconfig, due to {%v}", err) + + //Fetching all the default ENV + By("[PreChaos]: Fetching all default ENVs") + klog.Infof("[PreReq]: Getting the ENVs for the %v test", testsDetails.ExperimentName) + environment.GetENV(&testsDetails, "azure-instance-stop", "az-en-ser") + log.Infof("[Info]: The target instances are: %v", testsDetails.AzureInstanceName) + + testsDetails.RbacPath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-instance-stop/rbac.yaml" + testsDetails.ExperimentPath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-instance-stop/experiment.yaml" + testsDetails.EnginePath = "https://hub.litmuschaos.io/api/chaos/master?file=charts/azure/azure-instance-stop/engine.yaml" + testsDetails.Sequence = "serial" + testsDetails.ChaosNamespace = "default" + testsDetails.AppNS = "default" + + // Checking the chaos operator running status + By("[Status]: Checking chaos operator status") + err = pkg.OperatorStatusCheck(&testsDetails, clients) + Expect(err).To(BeNil(), "Operator status check failed, due to {%v}", err) + + // Prepare Chaos Execution + By("[Prepare]: Prepare Chaos Execution") + err = pkg.PrepareChaos(&testsDetails, &chaosExperiment, &chaosEngine, clients, false) + Expect(err).To(BeNil(), "fail to prepare chaos, due to {%v}", err) + + //Checking runner pod running state + By("[Status]: Runner pod running status check") + err = pkg.RunnerPodStatus(&testsDetails, testsDetails.AppNS, clients) + Expect(err).To(BeNil(), "Runner pod status check failed, due to {%v}", err) + + //Chaos pod running status check + err = pkg.ChaosPodStatus(&testsDetails, clients) + Expect(err).To(BeNil(), "Chaos pod status check failed, due to {%v}", err) + + //Waiting for chaos pod to get completed + //And Print the logs of the chaos pod + By("[Status]: Wait for chaos pod completion and then print logs") + err = pkg.ChaosPodLogs(&testsDetails, clients) + Expect(err).To(BeNil(), "Fail to get the experiment chaos pod logs, due to {%v}", err) + + //Checking the chaosresult verdict + By("[Verdict]: Checking the chaosresult verdict") + err = pkg.ChaosResultVerdict(&testsDetails, clients) + Expect(err).To(BeNil(), "ChasoResult Verdict check failed, due to {%v}", err) + + //Checking chaosengine verdict + By("Checking the Verdict of Chaos Engine") + err = pkg.ChaosEngineVerdict(&testsDetails, clients) + Expect(err).To(BeNil(), "ChaosEngine Verdict check failed, due to {%v}", err) + + }) + }) +})