diff options
Diffstat (limited to 'roles/onap-chaos-tests')
21 files changed, 1061 insertions, 0 deletions
diff --git a/roles/onap-chaos-tests/tasks/cassandra.yaml b/roles/onap-chaos-tests/tasks/cassandra.yaml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/cassandra.yaml @@ -0,0 +1 @@ +--- diff --git a/roles/onap-chaos-tests/tasks/kafka.yaml b/roles/onap-chaos-tests/tasks/kafka.yaml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/kafka.yaml @@ -0,0 +1 @@ +--- diff --git a/roles/onap-chaos-tests/tasks/main.yaml b/roles/onap-chaos-tests/tasks/main.yaml new file mode 100644 index 0000000..2fc48d2 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/main.yaml @@ -0,0 +1,22 @@ +--- +# - name: Check the chaos target is defined +# ansible.builtin.fail: +# msg: "You must specify a chaos target (node_drain,...)" +# when: experiment_name is not defined +# tags: init_check + +- import_tasks: prepare.yaml + tags: prepare + +- import_tasks: node-drain.yaml + tags: node-drain + +- import_tasks: node-cpu-hog.yaml + tags: node-cpu-hog + +- import_tasks: node-memory-hog.yaml + tags: node-memory-hog + +- import_tasks: pod-delete-aai.yaml + tags: aai + diff --git a/roles/onap-chaos-tests/tasks/node-cpu-hog.yaml b/roles/onap-chaos-tests/tasks/node-cpu-hog.yaml new file mode 100644 index 0000000..c70f339 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/node-cpu-hog.yaml @@ -0,0 +1,86 @@ +--- +- name: create directory for cpu hog scenario + ansible.builtin.file: + path: /tmp/resiliency/node-cpu-hog + state: directory + mode: '0755' + tags: prepare + +- name: Get compute node list + community.kubernetes.k8s_info: + kind: Node + label_selectors: "! node-role.kubernetes.io/master" + register: kubernetes_computer_node_list + tags: prepare + +- name: Set Fact first compute node Internal IP + ansible.builtin.set_fact: + first_node_ip: "{{ item.address }}" + when: "'Hostname' in item.type" + with_items: + "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}" + tags: prepare + +- name: Set Compute for the node cpu hog + ansible.builtin.set_fact: + compute_chaos: "{{ first_node_ip }}" + when: compute_chaos is not defined + tags: prepare + +- name: Prepare rbac file for node cpu hog experiment + ansible.builtin.template: + src: node-cpu-hog-rbac.yaml.j2 + dest: /tmp/resiliency/node-cpu-hog/node-cpu-hog-rbac.yaml + mode: 0600 + tags: prepare + +- name: Prepare chaos file for node cpu hog experiment + ansible.builtin.template: + src: node-cpu-hog-chaos.yaml.j2 + dest: /tmp/resiliency/node-cpu-hog/node-cpu-hog-chaos.yaml + mode: 0600 + tags: prepare + +- name: Apply node cpu hog rbac + community.kubernetes.k8s: + state: present + src: /tmp/resiliency/node-cpu-hog/node-cpu-hog-rbac.yaml + +- name: Apply node cpu hog experiment + community.kubernetes.k8s: + state: present + src: /tmp/resiliency/node-cpu-hog/node-cpu-hog-chaos.yaml + +- name: wait for the end of chaos + run_once: true + community.kubernetes.k8s_info: + kind: ChaosEngine + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos-cpu + namespace: "{{ onap_namespace }}" + register: chaosengine_status + changed_when: + &chaos_test chaosengine_status is defined and + chaosengine_status.resources[0].status.engineStatus == "completed" + until: *chaos_test + retries: 5 + delay: 120 + +- name: get results + run_once: true + community.kubernetes.k8s_info: + kind: ChaosResult + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos-cpu-node-cpu-hog + namespace: "{{ onap_namespace }}" + register: chaosresult_drain + changed_when: + &chaos_result chaosresult_drain is defined and + (chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass" or + chaosresult_drain.resources[0].status.experimentStatus.verdict == "Fail" ) + until: *chaos_result + retries: 5 + delay: 20 +- name: Print the chaos result verdict + ansible.builtin.debug: + msg: " The test is {{ chaosresult_drain.resources[0].status.experimentStatus.verdict}}-ed" diff --git a/roles/onap-chaos-tests/tasks/node-drain.yaml b/roles/onap-chaos-tests/tasks/node-drain.yaml new file mode 100644 index 0000000..84d53c4 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/node-drain.yaml @@ -0,0 +1,105 @@ +--- +- name: create directory for drain scenario + ansible.builtin.file: + path: /tmp/resiliency/node-drain + state: directory + mode: '0755' + tags: prepare + +- name: Get compute node list + community.kubernetes.k8s_info: + kind: Node + label_selectors: "! node-role.kubernetes.io/master" + register: kubernetes_computer_node_list + tags: prepare + +- name: Set Fact first compute node Internal IP + ansible.builtin.set_fact: + first_node_ip: "{{ item.address }}" + when: "'Hostname' in item.type" + with_items: + "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}" + tags: prepare + +- name: Set Compute for the drain chaos + ansible.builtin.set_fact: + compute_chaos: "{{ first_node_ip }}" + when: compute_chaos is not defined + tags: prepare + +- name: Prepare rbac file for drain experiment + ansible.builtin.template: + src: node-drain-rbac.yaml.j2 + dest: /tmp/resiliency/node-drain/node-drain-rbac.yaml + mode: 0600 + tags: prepare + +- name: Prepare chaos file for drain experiment + ansible.builtin.template: + src: node-drain-chaos.yaml.j2 + dest: /tmp/resiliency/node-drain/node-drain-chaos.yaml + mode: 0600 + tags: prepare + +- name: Apply drain rbac + community.kubernetes.k8s: + state: present + src: /tmp/resiliency/node-drain/node-drain-rbac.yaml + tags: apply + +- name: Cordon the Chosen node + ansible.builtin.shell: "kubectl cordon {{ compute_chaos }}" + +- name: Apply chaos drain experiment + community.kubernetes.k8s: + state: present + src: /tmp/resiliency/node-drain/node-drain-chaos.yaml + tags: apply + +- name: wait for the end of chaos + run_once: true + community.kubernetes.k8s_info: + kind: ChaosEngine + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos-drain + namespace: "{{ onap_namespace }}" + register: chaosengine_status + changed_when: + &chaos_test chaosengine_status is defined and + chaosengine_status.resources[0].status.engineStatus == "completed" + until: *chaos_test + retries: 5 + delay: 120 + tags: wait +# - name: Print the chaos engine object +# ansible.builtin.debug: +# msg: "{{chaosengine_status.resources[0].status.engineStatus }}" + + +- name: get results + run_once: true + community.kubernetes.k8s_info: + kind: ChaosResult + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos-drain-node-drain + namespace: "{{ onap_namespace }}" + register: chaosresult_drain + changed_when: + &chaos_result chaosresult_drain is defined and + (chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass" or + chaosresult_drain.resources[0].status.experimentStatus.verdict == "Fail" ) + until: *chaos_result + retries: 5 + delay: 10 + tags: wait + +- name: Print the chaos result object + ansible.builtin.debug: + msg: "{{ chaosresult_drain.resources[0].status.experimentStatus.verdict}}" + +- name: Print the chaos result verdict + ansible.builtin.debug: + msg: " The test is {{ chaosresult_drain.resources[0].status.experimentStatus.verdict}}-ed" + +- name: Uncordon the Chosen node + ansible.builtin.shell: "kubectl uncordon {{ compute_chaos }}" diff --git a/roles/onap-chaos-tests/tasks/node-memory-hog.yaml b/roles/onap-chaos-tests/tasks/node-memory-hog.yaml new file mode 100644 index 0000000..82ad014 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/node-memory-hog.yaml @@ -0,0 +1,86 @@ +--- +- name: create directory for memory hog scenario + ansible.builtin.file: + path: /tmp/resiliency/node-memory-hog + state: directory + mode: '0755' + tags: prepare + +- name: Get compute node list + community.kubernetes.k8s_info: + kind: Node + label_selectors: "! node-role.kubernetes.io/master" + register: kubernetes_computer_node_list + tags: prepare + +- name: Set Fact first compute node Internal IP + ansible.builtin.set_fact: + first_node_ip: "{{ item.address }}" + when: "'Hostname' in item.type" + with_items: + "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}" + tags: prepare + +- name: Set Compute for the node memory hog + ansible.builtin.set_fact: + compute_chaos: "{{ first_node_ip }}" + when: compute_chaos is not defined + tags: prepare + +- name: Prepare rbac file for node memory hog experiment + ansible.builtin.template: + src: node-memory-hog-rbac.yaml.j2 + dest: /tmp/resiliency/node-memory-hog/node-memory-hog-rbac.yaml + mode: 0600 + tags: prepare + +- name: Prepare chaos file for node memory hog experiment + ansible.builtin.template: + src: node-memory-hog-chaos.yaml.j2 + dest: /tmp/resiliency/node-memory-hog/node-memory-hog-chaos.yaml + mode: 0600 + tags: prepare + +- name: Apply node memory hog rbac + community.kubernetes.k8s: + state: present + src: /tmp/resiliency/node-memory-hog/node-memory-hog-rbac.yaml + +- name: Apply node memory hog experiment + community.kubernetes.k8s: + state: present + src: /tmp/resiliency/node-memory-hog/node-memory-hog-chaos.yaml + +- name: wait for the end of chaos + run_once: true + community.kubernetes.k8s_info: + kind: ChaosEngine + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos-memory + namespace: "{{ onap_namespace }}" + register: chaosengine_status + changed_when: + &chaos_test chaosengine_status is defined and + chaosengine_status.resources[0].status.engineStatus == "completed" + until: *chaos_test + retries: 5 + delay: 120 + +- name: get results + run_once: true + community.kubernetes.k8s_info: + kind: ChaosResult + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos-memory-node-memory-hog + namespace: "{{ onap_namespace }}" + register: chaosresult_drain + changed_when: + &chaos_result chaosresult_drain is defined and + (chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass" or + chaosresult_drain.resources[0].status.experimentStatus.verdict == "Fail" ) + until: *chaos_result + retries: 5 + delay: 20 +- name: Print the chaos result verdict + ansible.builtin.debug: + msg: " The test is {{ chaosresult_drain.resources[0].status.experimentStatus.verdict}}-ed" diff --git a/roles/onap-chaos-tests/tasks/pod-delete-aai.yaml b/roles/onap-chaos-tests/tasks/pod-delete-aai.yaml new file mode 100644 index 0000000..87fbb17 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/pod-delete-aai.yaml @@ -0,0 +1,103 @@ +--- +- name: create directory for pod delete aai scenario + ansible.builtin.file: + path: /tmp/resiliency/pod-delete-aai + state: directory + mode: '0755' + tags: prepare + +- name: Get deployments name + ansible.builtin.shell: kubectl get deployments -n onap | grep aai |awk '{print $1}' + register: deployments + tags: prepare + +- name: print grep output + ansible.builtin.debug: + msg: "{{deployments.stdout_lines}}" + + +- name: add labels to deployments + community.kubernetes.k8s: + state: present + definition: + apiVersion: apps/v1 + kind: Deployment + metadata: + name: "{{ item }}" + namespace: "{{ onap_namespace }}" + labels: + component: onap-aai + spec: + template: + metadata: + label: + component: onap-aai + loop: "{{deployments.stdout_lines}}" + tags: prepare + +- name: get AAI pod name + ansible.builtin.shell: kubectl get pods -n onap | grep aai |awk '{print $1}' + register: pod_list + tags: prepare + +- name: print pod list + ansible.builtin.debug: + msg: "{{pod_list.stdout_lines | join(', ') }}" + +- name: Prepare rbac file for pod delete aai experiment + ansible.builtin.template: + src: pod-delete-rbac.yaml.j2 + dest: /tmp/resiliency/pod-delete-aai/pod-delete-aai-rbac.yaml + mode: 0600 + tags: prepare + +- name: Prepare chaos file for pod delete aai experiment + ansible.builtin.template: + src: pod-delete-aai-chaos.yaml.j2 + dest: /tmp/resiliency/pod-delete-aai/pod-delete-aai-chaos.yaml + mode: 0600 + tags: prepare + +- name: Apply pod delete aai rbac + community.kubernetes.k8s: + state: present + src: /tmp/resiliency/pod-delete-aai/pod-delete-rbac.yaml + +- name: Apply pod delete aai experiment + community.kubernetes.k8s: + state: present + src: /tmp/resiliency/pod-delete-aai/pod-delete-aai-chaos.yaml + +- name: wait for the end of chaos + run_once: true + community.kubernetes.k8s_info: + kind: ChaosEngine + api_version: litmuschaos.io/v1alpha1 + name: aai-chaos + namespace: "{{ onap_namespace }}" + register: chaosengine_status + changed_when: + &chaos_test chaosengine_status is defined and + chaosengine_status.resources[0].status.engineStatus == "completed" + until: *chaos_test + retries: 5 + delay: 120 + +- name: get results + run_once: true + community.kubernetes.k8s_info: + kind: ChaosResult + api_version: litmuschaos.io/v1alpha1 + name: aai-chaos-pod-delete + namespace: "{{ onap_namespace }}" + register: chaosresult_aai + changed_when: + &chaos_result chaosresult_aai is defined and + (chaosresult_aai.resources[0].status.experimentStatus.verdict == "Pass" or + chaosresult_aai.resources[0].status.experimentStatus.verdict == "Fail" ) + until: *chaos_result + retries: 5 + delay: 20 +- name: Print the chaos result verdict + ansible.builtin.debug: + msg: " The test is {{ chaosresult_aai.resources[0].status.experimentStatus.verdict}}-ed" diff --git a/roles/onap-chaos-tests/tasks/pod-delete-sdc.yaml b/roles/onap-chaos-tests/tasks/pod-delete-sdc.yaml new file mode 100644 index 0000000..d46fdd1 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/pod-delete-sdc.yaml @@ -0,0 +1,76 @@ +--- +- name: Get compute node list + community.kubernetes.k8s_info: + kind: Node + label_selectors: "! node-role.kubernetes.io/master" + register: kubernetes_computer_node_list + +- name: Set Fact first compute node Internal IP + ansible.builtin.set_fact: + first_node_ip: "{{ item.address }}" + when: "'Hostname' in item.type" + with_items: + "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}" + +- name: Set Compute for the drain chaos + ansible.builtin.set_fact: + compute_chaos: "{{ first_node_ip }}" + when: compute_chaos is not defined + +- name: Prepare rbac file for drain experiment + ansible.builtin.template: + src: drain-rbac.yaml.j2 + dest: /tmp/drain-rbac.yaml + mode: 0600 + +- name: Prepare chaos file for drain experiment + ansible.builtin.template: + src: drain-chaos.yaml.j2 + dest: /tmp/drain-chaos.yaml + mode: 0600 + +- name: Apply drain rbac + community.kubernetes.k8s: + state: present + src: /tmp/drain-rbac.yaml + +- name: Cordon the Chosen node + ansible.builtin.shell: "kubectl cordon {{ compute_chaos }}" + +- name: Apply chaos drain experiment + community.kubernetes.k8s: + state: present + src: /tmp/drain-chaos.yaml + +- name: wait for the end of chaos + run_once: true + community.kubernetes.k8s_info: + kind: ChaosEngine + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos + namespace: "{{ onap_namespace }}" + register: chaosengine_status + changed_when: + &chaos_test chaosengine_status is defined and + chaosengine_status.resources[0].status.engineStatus == "completed" + until: *chaos_test + retries: 5 + delay: 120 + +- name: get results + run_once: true + community.kubernetes.k8s_info: + kind: ChaosResult + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos-node-drain + namespace: "{{ onap_namespace }}" + register: chaosresult_drain + changed_when: + &chaos_result chaosresult_drain is defined and + chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass" + until: *chaos_result + retries: 5 + delay: 10 + +- name: Uncordon the Chosen node + ansible.builtin.shell: "kubectl uncordon {{ compute_chaos }}" diff --git a/roles/onap-chaos-tests/tasks/pod-delete-sdnc.yaml b/roles/onap-chaos-tests/tasks/pod-delete-sdnc.yaml new file mode 100644 index 0000000..d46fdd1 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/pod-delete-sdnc.yaml @@ -0,0 +1,76 @@ +--- +- name: Get compute node list + community.kubernetes.k8s_info: + kind: Node + label_selectors: "! node-role.kubernetes.io/master" + register: kubernetes_computer_node_list + +- name: Set Fact first compute node Internal IP + ansible.builtin.set_fact: + first_node_ip: "{{ item.address }}" + when: "'Hostname' in item.type" + with_items: + "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}" + +- name: Set Compute for the drain chaos + ansible.builtin.set_fact: + compute_chaos: "{{ first_node_ip }}" + when: compute_chaos is not defined + +- name: Prepare rbac file for drain experiment + ansible.builtin.template: + src: drain-rbac.yaml.j2 + dest: /tmp/drain-rbac.yaml + mode: 0600 + +- name: Prepare chaos file for drain experiment + ansible.builtin.template: + src: drain-chaos.yaml.j2 + dest: /tmp/drain-chaos.yaml + mode: 0600 + +- name: Apply drain rbac + community.kubernetes.k8s: + state: present + src: /tmp/drain-rbac.yaml + +- name: Cordon the Chosen node + ansible.builtin.shell: "kubectl cordon {{ compute_chaos }}" + +- name: Apply chaos drain experiment + community.kubernetes.k8s: + state: present + src: /tmp/drain-chaos.yaml + +- name: wait for the end of chaos + run_once: true + community.kubernetes.k8s_info: + kind: ChaosEngine + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos + namespace: "{{ onap_namespace }}" + register: chaosengine_status + changed_when: + &chaos_test chaosengine_status is defined and + chaosengine_status.resources[0].status.engineStatus == "completed" + until: *chaos_test + retries: 5 + delay: 120 + +- name: get results + run_once: true + community.kubernetes.k8s_info: + kind: ChaosResult + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos-node-drain + namespace: "{{ onap_namespace }}" + register: chaosresult_drain + changed_when: + &chaos_result chaosresult_drain is defined and + chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass" + until: *chaos_result + retries: 5 + delay: 10 + +- name: Uncordon the Chosen node + ansible.builtin.shell: "kubectl uncordon {{ compute_chaos }}" diff --git a/roles/onap-chaos-tests/tasks/pod-delete-so.yaml b/roles/onap-chaos-tests/tasks/pod-delete-so.yaml new file mode 100644 index 0000000..d46fdd1 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/pod-delete-so.yaml @@ -0,0 +1,76 @@ +--- +- name: Get compute node list + community.kubernetes.k8s_info: + kind: Node + label_selectors: "! node-role.kubernetes.io/master" + register: kubernetes_computer_node_list + +- name: Set Fact first compute node Internal IP + ansible.builtin.set_fact: + first_node_ip: "{{ item.address }}" + when: "'Hostname' in item.type" + with_items: + "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}" + +- name: Set Compute for the drain chaos + ansible.builtin.set_fact: + compute_chaos: "{{ first_node_ip }}" + when: compute_chaos is not defined + +- name: Prepare rbac file for drain experiment + ansible.builtin.template: + src: drain-rbac.yaml.j2 + dest: /tmp/drain-rbac.yaml + mode: 0600 + +- name: Prepare chaos file for drain experiment + ansible.builtin.template: + src: drain-chaos.yaml.j2 + dest: /tmp/drain-chaos.yaml + mode: 0600 + +- name: Apply drain rbac + community.kubernetes.k8s: + state: present + src: /tmp/drain-rbac.yaml + +- name: Cordon the Chosen node + ansible.builtin.shell: "kubectl cordon {{ compute_chaos }}" + +- name: Apply chaos drain experiment + community.kubernetes.k8s: + state: present + src: /tmp/drain-chaos.yaml + +- name: wait for the end of chaos + run_once: true + community.kubernetes.k8s_info: + kind: ChaosEngine + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos + namespace: "{{ onap_namespace }}" + register: chaosengine_status + changed_when: + &chaos_test chaosengine_status is defined and + chaosengine_status.resources[0].status.engineStatus == "completed" + until: *chaos_test + retries: 5 + delay: 120 + +- name: get results + run_once: true + community.kubernetes.k8s_info: + kind: ChaosResult + api_version: litmuschaos.io/v1alpha1 + name: onap-chaos-node-drain + namespace: "{{ onap_namespace }}" + register: chaosresult_drain + changed_when: + &chaos_result chaosresult_drain is defined and + chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass" + until: *chaos_result + retries: 5 + delay: 10 + +- name: Uncordon the Chosen node + ansible.builtin.shell: "kubectl uncordon {{ compute_chaos }}" diff --git a/roles/onap-chaos-tests/tasks/prepare.yaml b/roles/onap-chaos-tests/tasks/prepare.yaml new file mode 100644 index 0000000..827156e --- /dev/null +++ b/roles/onap-chaos-tests/tasks/prepare.yaml @@ -0,0 +1,38 @@ +--- +- name: create directory for resiliency + ansible.builtin.file: + path: /tmp/resiliency + state: directory + mode: '0755' + +- name: Download Litmus manifest. + ansible.builtin.get_url: + url: https://litmuschaos.github.io/litmus/litmus-operator-v1.13.5.yaml + dest: /tmp/resiliency/litmus_manifest.yaml + mode: '0664' + +- name: Apply Litmus manifest + community.kubernetes.k8s: + state: present + src: /tmp/resiliency/litmus_manifest.yaml + +- name: Ensure litmus Chaos Operator is running + ansible.builtin.shell: "kubectl get pods -n litmus" + +- name: Download generic experiments manifest + ansible.builtin.get_url: + url: https://hub.litmuschaos.io/api/chaos/1.13.5?file=charts/generic/experiments.yaml + dest: /tmp/resiliency/litmus_experiments_manifest.yaml + mode: '0664' + +- name: Apply Litmus Experiment manifest + community.kubernetes.k8s: + namespace: "{{ onap_namespace }}" + state: present + src: /tmp/resiliency/litmus_experiments_manifest.yaml + +- name: "Copy resiliency script" + ansible.builtin.copy: + src: scripts/run_chaos_tests.sh + dest: /tmp/resiliency + mode: '755' diff --git a/roles/onap-chaos-tests/tasks/reporting.yaml b/roles/onap-chaos-tests/tasks/reporting.yaml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/onap-chaos-tests/tasks/reporting.yaml @@ -0,0 +1 @@ +--- diff --git a/roles/onap-chaos-tests/templates/node-cpu-hog-chaos.yaml.j2 b/roles/onap-chaos-tests/templates/node-cpu-hog-chaos.yaml.j2 new file mode 100644 index 0000000..ce72420 --- /dev/null +++ b/roles/onap-chaos-tests/templates/node-cpu-hog-chaos.yaml.j2 @@ -0,0 +1,30 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: node-cpu-hog + namespace: {{ onap_namespace }} +spec: + # It can be true/false + annotationCheck: 'false' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + chaosServiceAccount: node-cpu-hog-sa + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: node-cpu-hog + spec: + components: + env: + # set chaos duration (in sec) as desired + - name: TOTAL_CHAOS_DURATION + value: '120' + + - name: NODE_CPU_CORE + value: '6' + + # ENTER THE COMMA SEPARATED TARGET NODES NAME + - name: TARGET_NODES + value: {{ compute_chaos }} diff --git a/roles/onap-chaos-tests/templates/node-cpu-hog-rbac.yaml.j2 b/roles/onap-chaos-tests/templates/node-cpu-hog-rbac.yaml.j2 new file mode 100644 index 0000000..e240b5e --- /dev/null +++ b/roles/onap-chaos-tests/templates/node-cpu-hog-rbac.yaml.j2 @@ -0,0 +1,49 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-cpu-hog-sa + namespace: {{ onap_namespace }} + labels: + name: node-cpu-hog-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-cpu-hog-sa + labels: + name: node-cpu-hog-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log"] + verbs: ["list","get","create"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-cpu-hog-sa + labels: + name: node-cpu-hog-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: node-cpu-hog-sa +subjects: +- kind: ServiceAccount + name: node-cpu-hog-sa + namespace: {{ onap_namespace }}
\ No newline at end of file diff --git a/roles/onap-chaos-tests/templates/node-drain-chaos.yaml.j2 b/roles/onap-chaos-tests/templates/node-drain-chaos.yaml.j2 new file mode 100644 index 0000000..a90a5f1 --- /dev/null +++ b/roles/onap-chaos-tests/templates/node-drain-chaos.yaml.j2 @@ -0,0 +1,28 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: node-drain + namespace: {{ onap_namespace }} +spec: + # It can be true/false + annotationCheck: 'false' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=onap + auxiliaryAppInfo: '' + chaosServiceAccount: node-drain-sa + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: node-drain + spec: + components: + # nodeSelector: + # # provide the node labels + # kubernetes.io/hostname: 'node02' + env: + # enter the target node name + - name: TARGET_NODE + value: {{ compute_chaos }} + - name: TOTAL_CHAOS_DURATION + value: 120s diff --git a/roles/onap-chaos-tests/templates/node-drain-rbac.yaml.j2 b/roles/onap-chaos-tests/templates/node-drain-rbac.yaml.j2 new file mode 100644 index 0000000..d7e4a78 --- /dev/null +++ b/roles/onap-chaos-tests/templates/node-drain-rbac.yaml.j2 @@ -0,0 +1,53 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-drain-sa + namespace: {{ onap_namespace }} + labels: + name: node-drain-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-drain-sa + labels: + name: node-drain-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log","pods/eviction"] + verbs: ["list","get","create"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["apps"] + resources: ["daemonsets"] + verbs: ["list","get","delete"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["patch","get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-drain-sa + labels: + name: node-drain-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: node-drain-sa +subjects: +- kind: ServiceAccount + name: node-drain-sa + namespace: {{ onap_namespace }} + diff --git a/roles/onap-chaos-tests/templates/node-memory-hog-chaos.yaml.j2 b/roles/onap-chaos-tests/templates/node-memory-hog-chaos.yaml.j2 new file mode 100644 index 0000000..fb39b9b --- /dev/null +++ b/roles/onap-chaos-tests/templates/node-memory-hog-chaos.yaml.j2 @@ -0,0 +1,32 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: node-memory-hog + namespace: {{ onap_namespace }} +spec: + # It can be true/false + annotationCheck: 'false' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + chaosServiceAccount: node-memory-hog-sa + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: node-memory-hog + spec: + components: + env: + # set chaos duration (in sec) as desired + - name: TOTAL_CHAOS_DURATION + value: '120' + + ## Specify the size as percent of total node capacity Ex: '30' + ## Note: For consuming memory in mebibytes change the variable to MEMORY_CONSUMPTION_MEBIBYTES + - name: MEMORY_CONSUMPTION_PERCENTAGE + value: '30' + + # ENTER THE COMMA SEPARATED TARGET NODES NAME + - name: TARGET_NODES + value: {{ compute_chaos }} diff --git a/roles/onap-chaos-tests/templates/node-memory-hog-rbac.yaml.j2 b/roles/onap-chaos-tests/templates/node-memory-hog-rbac.yaml.j2 new file mode 100644 index 0000000..9b21e05 --- /dev/null +++ b/roles/onap-chaos-tests/templates/node-memory-hog-rbac.yaml.j2 @@ -0,0 +1,49 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-memory-hog-sa + namespace: {{ onap_namespace }} + labels: + name: node-memory-hog-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: node-memory-hog-sa + labels: + name: node-memory-hog-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log"] + verbs: ["create","list","get"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get","list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-memory-hog-sa + labels: + name: node-memory-hog-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: node-memory-hog-sa +subjects: +- kind: ServiceAccount + name: node-memory-hog-sa + namespace: {{ onap_namespace }}
\ No newline at end of file diff --git a/roles/onap-chaos-tests/templates/pod-delete-aai-chaos.yaml.j2 b/roles/onap-chaos-tests/templates/pod-delete-aai-chaos.yaml.j2 new file mode 100644 index 0000000..1c6eb01 --- /dev/null +++ b/roles/onap-chaos-tests/templates/pod-delete-aai-chaos.yaml.j2 @@ -0,0 +1,35 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: aai-chaos + namespace: {{ onap_namespace }} +spec: + appinfo: + appns: {{ onap_namespace }} + applabel: 'component=onap-aai' + appkind: 'deployment' + # It can be active/stop + engineState: 'active' + chaosServiceAccount: pod-delete-sa + experiments: + - name: pod-delete + spec: + components: + env: + - name: TARGET_PODS + value: {{ pod_list.stdout_lines | join(', ') }} + # set chaos duration (in sec) as desired + - name: TOTAL_CHAOS_DURATION + value: '30' + + # set chaos interval (in sec) as desired + - name: CHAOS_INTERVAL + value: '10' + + # pod failures without '--force' & default terminationGracePeriodSeconds + - name: FORCE + value: 'true' + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '100' diff --git a/roles/onap-chaos-tests/templates/pod-delete-aai-rbac.yaml.j2 b/roles/onap-chaos-tests/templates/pod-delete-aai-rbac.yaml.j2 new file mode 100644 index 0000000..2b85d42 --- /dev/null +++ b/roles/onap-chaos-tests/templates/pod-delete-aai-rbac.yaml.j2 @@ -0,0 +1,57 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-delete-sa + namespace: {{ onap_namespace }} + labels: + name: pod-delete-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-delete-sa + namespace: {{ onap_namespace }} + labels: + name: pod-delete-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log","replicationcontrollers"] + verbs: ["create","list","get"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["apps"] + resources: ["deployments","statefulsets","daemonsets","replicasets"] + verbs: ["list","get"] +- apiGroups: ["apps.openshift.io"] + resources: ["deploymentconfigs"] + verbs: ["list","get"] +- apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["list","get"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-delete-sa + namespace: {{ onap_namespace }} + labels: + name: pod-delete-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-delete-sa +subjects: +- kind: ServiceAccount + name: pod-delete-sa + namespace: {{ onap_namespace }}
\ No newline at end of file diff --git a/roles/onap-chaos-tests/templates/pod-delete-rbac.yaml.j2 b/roles/onap-chaos-tests/templates/pod-delete-rbac.yaml.j2 new file mode 100644 index 0000000..2b85d42 --- /dev/null +++ b/roles/onap-chaos-tests/templates/pod-delete-rbac.yaml.j2 @@ -0,0 +1,57 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-delete-sa + namespace: {{ onap_namespace }} + labels: + name: pod-delete-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-delete-sa + namespace: {{ onap_namespace }} + labels: + name: pod-delete-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log","replicationcontrollers"] + verbs: ["create","list","get"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["apps"] + resources: ["deployments","statefulsets","daemonsets","replicasets"] + verbs: ["list","get"] +- apiGroups: ["apps.openshift.io"] + resources: ["deploymentconfigs"] + verbs: ["list","get"] +- apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["list","get"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-delete-sa + namespace: {{ onap_namespace }} + labels: + name: pod-delete-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-delete-sa +subjects: +- kind: ServiceAccount + name: pod-delete-sa + namespace: {{ onap_namespace }}
\ No newline at end of file |