forked from litmuschaos/chaos-charts
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexperiment.yaml
155 lines (143 loc) · 5.18 KB
/
experiment.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
name: argowf-chaos-pod-memory-hog
namespace: litmus
labels:
subject: "{{workflow.parameters.appNamespace}}_kube-proxy"
spec:
entrypoint: argowf-chaos
serviceAccountName: argo-chaos
securityContext:
runAsUser: 1000
runAsNonRoot: true
arguments:
parameters:
- name: adminModeNamespace
value: "litmus"
- name: appNamespace
value: "kube-system"
templates:
- name: argowf-chaos
steps:
- - name: install-chaos-faults
template: install-chaos-faults
- - name: run-chaos
template: run-chaos
- - name: cleanup-chaos-resources
template: cleanup-chaos-resources
- name: install-chaos-faults
inputs:
artifacts:
- name: install-chaos-faults
path: /tmp/pod-memory-hog.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
description:
message: |
Injects memory consumption on pods belonging to an app deployment
kind: ChaosExperiment
metadata:
name: pod-memory-hog
spec:
definition:
scope: Namespaced
permissions:
- apiGroups:
- ""
- "batch"
- "litmuschaos.io"
resources:
- "jobs"
- "pods"
- "pods/log"
- "events"
- "chaosengines"
- "chaosexperiments"
- "chaosresults"
verbs:
- "create"
- "list"
- "get"
- "patch"
- "update"
- "delete"
image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.11.0"
args:
- -c
- ./experiments -name pod-memory-hog
command:
- /bin/bash
env:
- name: TOTAL_CHAOS_DURATION
value: '30'
- name: CHAOS_INTERVAL
value: '10'
## enter the amount of memory in megabytes to be consumed by the application pod
- name: MEMORY_CONSUMPTION
value: '500'
## percentage of total pods to target
- name: PODS_AFFECTED_PERC
value: ''
## Period to wait before and after injection of chaos in sec
- name: RAMP_TIME
value: ''
- name: TARGET_POD
value: ''
labels:
name: pod-memory-hog
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
[
"kubectl apply -f /tmp/pod-memory-hog.yaml -n {{workflow.parameters.adminModeNamespace}}",
]
- name: run-chaos
inputs:
artifacts:
- name: run-chaos
path: /tmp/chaosengine.yaml
raw:
data: |
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: kube-proxy-pod-memory-hog-chaos
namespace: "{{workflow.parameters.adminModeNamespace}}"
labels:
context: "{{workflow.parameters.appNamespace}}_kube-proxy"
annotations: {}
spec:
appinfo:
appns: kube-system
applabel: "k8s-app=kube-proxy"
appkind: daemonset
jobCleanUpPolicy: retain
engineState: 'active'
chaosServiceAccount: litmus-admin
experiments:
- name: pod-memory-hog
spec:
components:
env:
- name: TARGET_CONTAINER
value: 'kube-proxy'
- name: MEMORY_CONSUMPTION
value: '500'
- name: TOTAL_CHAOS_DURATION
value: '60' # in seconds
- name: CHAOS_KILL_COMMAND
value: "kill -9 $(ps afx | grep \"[dd] if /dev/zero\" | awk '{print $1}' | tr '\n' ' ')"
container:
image: litmuschaos/litmus-checker:latest
args: ["-file=/tmp/chaosengine.yaml", "-saveName=/tmp/engine-name"]
- name: cleanup-chaos-resources
container:
image: litmuschaos/k8s:latest
command: [sh, -c]
args:
[
"kubectl delete chaosengine kube-proxy-pod-memory-hog-chaos -n {{workflow.parameters.adminModeNamespace}}",
]