aboutsummaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/legal-tern/tasks/main.yaml90
-rw-r--r--roles/legal-tern/templates/.netrc.j23
-rw-r--r--roles/onap-chaos-tests/tasks/cassandra.yaml1
-rw-r--r--roles/onap-chaos-tests/tasks/kafka.yaml1
-rw-r--r--roles/onap-chaos-tests/tasks/main.yaml22
-rw-r--r--roles/onap-chaos-tests/tasks/node-cpu-hog.yaml86
-rw-r--r--roles/onap-chaos-tests/tasks/node-drain.yaml105
-rw-r--r--roles/onap-chaos-tests/tasks/node-memory-hog.yaml86
-rw-r--r--roles/onap-chaos-tests/tasks/pod-delete-aai.yaml103
-rw-r--r--roles/onap-chaos-tests/tasks/pod-delete-sdc.yaml76
-rw-r--r--roles/onap-chaos-tests/tasks/pod-delete-sdnc.yaml76
-rw-r--r--roles/onap-chaos-tests/tasks/pod-delete-so.yaml76
-rw-r--r--roles/onap-chaos-tests/tasks/prepare.yaml38
-rw-r--r--roles/onap-chaos-tests/tasks/reporting.yaml1
-rw-r--r--roles/onap-chaos-tests/templates/node-cpu-hog-chaos.yaml.j230
-rw-r--r--roles/onap-chaos-tests/templates/node-cpu-hog-rbac.yaml.j249
-rw-r--r--roles/onap-chaos-tests/templates/node-drain-chaos.yaml.j228
-rw-r--r--roles/onap-chaos-tests/templates/node-drain-rbac.yaml.j253
-rw-r--r--roles/onap-chaos-tests/templates/node-memory-hog-chaos.yaml.j232
-rw-r--r--roles/onap-chaos-tests/templates/node-memory-hog-rbac.yaml.j249
-rw-r--r--roles/onap-chaos-tests/templates/pod-delete-aai-chaos.yaml.j235
-rw-r--r--roles/onap-chaos-tests/templates/pod-delete-aai-rbac.yaml.j257
-rw-r--r--roles/onap-chaos-tests/templates/pod-delete-rbac.yaml.j257
-rw-r--r--roles/onap-stability-tests/tasks/main.yaml61
-rw-r--r--roles/onap-stability-tests/templates/.netrc.j23
-rw-r--r--roles/xtesting-healthcheck-k8s-job/defaults/main.yaml47
-rw-r--r--roles/xtesting-healthcheck-k8s-job/tasks/main.yaml51
-rw-r--r--roles/xtesting-healthcheck-k8s/defaults/main.yaml9
-rw-r--r--roles/xtesting-healthcheck-k8s/tasks/main.yaml126
-rw-r--r--roles/xtesting-healthcheck-k8s/templates/env-os.j217
-rw-r--r--roles/xtesting-healthcheck/defaults/main.yaml130
-rw-r--r--roles/xtesting-healthcheck/tasks/launch.yaml50
-rw-r--r--roles/xtesting-healthcheck/tasks/main.yaml5
-rw-r--r--roles/xtesting-healthcheck/tasks/prepare.yaml52
-rw-r--r--roles/xtesting-jumphost/tasks/main.yaml101
-rw-r--r--roles/xtesting-onap-security/tasks/main.yaml88
-rw-r--r--roles/xtesting-onap-security/templates/env-os.j211
-rw-r--r--roles/xtesting-onap-vnf/tasks/launch.yaml75
-rw-r--r--roles/xtesting-onap-vnf/tasks/main.yaml5
-rw-r--r--roles/xtesting-onap-vnf/tasks/prepare.yaml57
-rw-r--r--roles/xtesting-onap-vnf/tasks/prepare_cnf_test.yaml86
-rw-r--r--roles/xtesting-onap-vnf/templates/basic_vm-service-istanbul.yaml.j22
-rw-r--r--roles/xtesting-onap-vnf/templates/basic_vm-service-jakarta.yaml.j240
-rw-r--r--roles/xtesting-onap-vnf/templates/basic_vm-service-master.yaml.j240
-rw-r--r--roles/xtesting-onap-vnf/templates/basic_vm_macro-service-istanbul.yaml.j22
-rw-r--r--roles/xtesting-onap-vnf/templates/basic_vm_macro-service-jakarta.yaml.j255
-rw-r--r--roles/xtesting-onap-vnf/templates/basic_vm_macro-service-master.yaml.j255
-rw-r--r--roles/xtesting-onap-vnf/templates/clearwater-ims-service.yaml.j255
-rw-r--r--roles/xtesting-onap-vnf/templates/env-os.j214
-rw-r--r--roles/xtesting-onap-vnf/templates/settings.py.j263
-rw-r--r--roles/xtesting-pages/tasks/main.yaml69
51 files changed, 2523 insertions, 0 deletions
diff --git a/roles/legal-tern/tasks/main.yaml b/roles/legal-tern/tasks/main.yaml
new file mode 100644
index 0000000..534672f
--- /dev/null
+++ b/roles/legal-tern/tasks/main.yaml
@@ -0,0 +1,90 @@
+---
+- name: "Install tern prerequisites"
+ become: yes
+ ansible.builtin.apt:
+ name:
+ - attr
+ - fuse-overlayfs
+ - python3-venv
+ - jq
+ state: latest
+
+- name: "Install pip dependencies"
+ become: yes
+ ansible.builtin.pip:
+ name:
+ - wheel
+ - lftools
+ state: latest
+
+- name: "Set variables for tern run"
+ ansible.builtin.set_fact:
+ tern_output: "/tmp/tern/archives/{{ run_tiers }}/{{ run_type }}"
+ tern_archives: "/tmp/tern"
+ tern_k8_namespace: "onap"
+
+- name: "Delete directory with/for results"
+ become: yes
+ ansible.builtin.file:
+ path: "{{ tern_output }}"
+ state: absent
+
+- name: "Copy tern script"
+ ansible.builtin.copy:
+ src: scripts/run_tern.sh
+ dest: "{{ ansible_user_dir }}/run_tern.sh"
+ mode: '500'
+
+- name: "Copy netrc for lftool"
+ ansible.builtin.template:
+ src: .netrc.j2
+ dest: "{{ ansible_user_dir }}/.netrc"
+ mode: 0600
+
+- name: "Create directory for results"
+ become: yes
+ ansible.builtin.file:
+ path: "{{ tern_output }}"
+ state: directory
+ mode: '0700'
+ recurse: yes
+ owner: "{{ ansible_user }}"
+
+- name: "Add fuse group"
+ become: yes
+ ansible.builtin.group:
+ name: "fuse"
+ state: present
+ system: yes
+
+- name: "Add user to fuse group"
+ become: yes
+ ansible.builtin.user:
+ append: yes
+ groups: fuse
+ user: "{{ ansible_user }}"
+
+- name: "Launch tern analysis & push artifacts"
+ become: no
+ ansible.builtin.shell:
+ cmd: "{{ ansible_user_dir }}/run_tern.sh > {{ tern_output }}/run_tern.log"
+ chdir: "{{ tern_output }}"
+ environment:
+ LF_RESULTS_BACKUP: '{{ lf_results_backup }}'
+ POD: '{{ pod }}'
+ CI_PIPELINE_CREATED_AT: '{{ ci_pipeline_created_at }}'
+ TERN_LOCATION: '{{ ansible_user_dir }}'
+ K8NAMESPACE: '{{ tern_k8_namespace }}'
+ ARCHIVES_LOCATION: '{{ tern_archives }}'
+ async: 259200 # 60*60*24*3 = 3 days
+ poll: 0 # dont wait for it
+ register: tern_analysis
+
+- name: "Check if tern analysis is running"
+ become: no
+ async_status:
+ jid: "{{ tern_analysis.ansible_job_id }}"
+ register: tern_result
+ until: tern_result.started
+ retries: 10
+ delay: 10
diff --git a/roles/legal-tern/templates/.netrc.j2 b/roles/legal-tern/templates/.netrc.j2
new file mode 100644
index 0000000..e4c22e3
--- /dev/null
+++ b/roles/legal-tern/templates/.netrc.j2
@@ -0,0 +1,3 @@
+machine nexus.onap.org
+login onap-integration
+password {{ lf_it_nexus_pwd }}
diff --git a/roles/onap-chaos-tests/tasks/cassandra.yaml b/roles/onap-chaos-tests/tasks/cassandra.yaml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/cassandra.yaml
@@ -0,0 +1 @@
+---
diff --git a/roles/onap-chaos-tests/tasks/kafka.yaml b/roles/onap-chaos-tests/tasks/kafka.yaml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/kafka.yaml
@@ -0,0 +1 @@
+---
diff --git a/roles/onap-chaos-tests/tasks/main.yaml b/roles/onap-chaos-tests/tasks/main.yaml
new file mode 100644
index 0000000..2fc48d2
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/main.yaml
@@ -0,0 +1,22 @@
+---
+# - name: Check the chaos target is defined
+# ansible.builtin.fail:
+# msg: "You must specify a chaos target (node_drain,...)"
+# when: experiment_name is not defined
+# tags: init_check
+
+- import_tasks: prepare.yaml
+ tags: prepare
+
+- import_tasks: node-drain.yaml
+ tags: node-drain
+
+- import_tasks: node-cpu-hog.yaml
+ tags: node-cpu-hog
+
+- import_tasks: node-memory-hog.yaml
+ tags: node-memory-hog
+
+- import_tasks: pod-delete-aai.yaml
+ tags: aai
+
diff --git a/roles/onap-chaos-tests/tasks/node-cpu-hog.yaml b/roles/onap-chaos-tests/tasks/node-cpu-hog.yaml
new file mode 100644
index 0000000..c70f339
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/node-cpu-hog.yaml
@@ -0,0 +1,86 @@
+---
+- name: create directory for cpu hog scenario
+ ansible.builtin.file:
+ path: /tmp/resiliency/node-cpu-hog
+ state: directory
+ mode: '0755'
+ tags: prepare
+
+- name: Get compute node list
+ community.kubernetes.k8s_info:
+ kind: Node
+ label_selectors: "! node-role.kubernetes.io/master"
+ register: kubernetes_computer_node_list
+ tags: prepare
+
+- name: Set Fact first compute node Internal IP
+ ansible.builtin.set_fact:
+ first_node_ip: "{{ item.address }}"
+ when: "'Hostname' in item.type"
+ with_items:
+ "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}"
+ tags: prepare
+
+- name: Set Compute for the node cpu hog
+ ansible.builtin.set_fact:
+ compute_chaos: "{{ first_node_ip }}"
+ when: compute_chaos is not defined
+ tags: prepare
+
+- name: Prepare rbac file for node cpu hog experiment
+ ansible.builtin.template:
+ src: node-cpu-hog-rbac.yaml.j2
+ dest: /tmp/resiliency/node-cpu-hog/node-cpu-hog-rbac.yaml
+ mode: 0600
+ tags: prepare
+
+- name: Prepare chaos file for node cpu hog experiment
+ ansible.builtin.template:
+ src: node-cpu-hog-chaos.yaml.j2
+ dest: /tmp/resiliency/node-cpu-hog/node-cpu-hog-chaos.yaml
+ mode: 0600
+ tags: prepare
+
+- name: Apply node cpu hog rbac
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/resiliency/node-cpu-hog/node-cpu-hog-rbac.yaml
+
+- name: Apply node cpu hog experiment
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/resiliency/node-cpu-hog/node-cpu-hog-chaos.yaml
+
+- name: wait for the end of chaos
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosEngine
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos-cpu
+ namespace: "{{ onap_namespace }}"
+ register: chaosengine_status
+ changed_when:
+ &chaos_test chaosengine_status is defined and
+ chaosengine_status.resources[0].status.engineStatus == "completed"
+ until: *chaos_test
+ retries: 5
+ delay: 120
+
+- name: get results
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosResult
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos-cpu-node-cpu-hog
+ namespace: "{{ onap_namespace }}"
+ register: chaosresult_drain
+ changed_when:
+ &chaos_result chaosresult_drain is defined and
+ (chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass" or
+ chaosresult_drain.resources[0].status.experimentStatus.verdict == "Fail" )
+ until: *chaos_result
+ retries: 5
+ delay: 20
+- name: Print the chaos result verdict
+ ansible.builtin.debug:
+ msg: " The test is {{ chaosresult_drain.resources[0].status.experimentStatus.verdict}}-ed"
diff --git a/roles/onap-chaos-tests/tasks/node-drain.yaml b/roles/onap-chaos-tests/tasks/node-drain.yaml
new file mode 100644
index 0000000..84d53c4
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/node-drain.yaml
@@ -0,0 +1,105 @@
+---
+- name: create directory for drain scenario
+ ansible.builtin.file:
+ path: /tmp/resiliency/node-drain
+ state: directory
+ mode: '0755'
+ tags: prepare
+
+- name: Get compute node list
+ community.kubernetes.k8s_info:
+ kind: Node
+ label_selectors: "! node-role.kubernetes.io/master"
+ register: kubernetes_computer_node_list
+ tags: prepare
+
+- name: Set Fact first compute node Internal IP
+ ansible.builtin.set_fact:
+ first_node_ip: "{{ item.address }}"
+ when: "'Hostname' in item.type"
+ with_items:
+ "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}"
+ tags: prepare
+
+- name: Set Compute for the drain chaos
+ ansible.builtin.set_fact:
+ compute_chaos: "{{ first_node_ip }}"
+ when: compute_chaos is not defined
+ tags: prepare
+
+- name: Prepare rbac file for drain experiment
+ ansible.builtin.template:
+ src: node-drain-rbac.yaml.j2
+ dest: /tmp/resiliency/node-drain/node-drain-rbac.yaml
+ mode: 0600
+ tags: prepare
+
+- name: Prepare chaos file for drain experiment
+ ansible.builtin.template:
+ src: node-drain-chaos.yaml.j2
+ dest: /tmp/resiliency/node-drain/node-drain-chaos.yaml
+ mode: 0600
+ tags: prepare
+
+- name: Apply drain rbac
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/resiliency/node-drain/node-drain-rbac.yaml
+ tags: apply
+
+- name: Cordon the Chosen node
+ ansible.builtin.shell: "kubectl cordon {{ compute_chaos }}"
+
+- name: Apply chaos drain experiment
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/resiliency/node-drain/node-drain-chaos.yaml
+ tags: apply
+
+- name: wait for the end of chaos
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosEngine
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos-drain
+ namespace: "{{ onap_namespace }}"
+ register: chaosengine_status
+ changed_when:
+ &chaos_test chaosengine_status is defined and
+ chaosengine_status.resources[0].status.engineStatus == "completed"
+ until: *chaos_test
+ retries: 5
+ delay: 120
+ tags: wait
+# - name: Print the chaos engine object
+# ansible.builtin.debug:
+# msg: "{{chaosengine_status.resources[0].status.engineStatus }}"
+
+
+- name: get results
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosResult
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos-drain-node-drain
+ namespace: "{{ onap_namespace }}"
+ register: chaosresult_drain
+ changed_when:
+ &chaos_result chaosresult_drain is defined and
+ (chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass" or
+ chaosresult_drain.resources[0].status.experimentStatus.verdict == "Fail" )
+ until: *chaos_result
+ retries: 5
+ delay: 10
+ tags: wait
+
+- name: Print the chaos result object
+ ansible.builtin.debug:
+ msg: "{{ chaosresult_drain.resources[0].status.experimentStatus.verdict}}"
+
+- name: Print the chaos result verdict
+ ansible.builtin.debug:
+ msg: " The test is {{ chaosresult_drain.resources[0].status.experimentStatus.verdict}}-ed"
+
+- name: Uncordon the Chosen node
+ ansible.builtin.shell: "kubectl uncordon {{ compute_chaos }}"
diff --git a/roles/onap-chaos-tests/tasks/node-memory-hog.yaml b/roles/onap-chaos-tests/tasks/node-memory-hog.yaml
new file mode 100644
index 0000000..82ad014
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/node-memory-hog.yaml
@@ -0,0 +1,86 @@
+---
+- name: create directory for memory hog scenario
+ ansible.builtin.file:
+ path: /tmp/resiliency/node-memory-hog
+ state: directory
+ mode: '0755'
+ tags: prepare
+
+- name: Get compute node list
+ community.kubernetes.k8s_info:
+ kind: Node
+ label_selectors: "! node-role.kubernetes.io/master"
+ register: kubernetes_computer_node_list
+ tags: prepare
+
+- name: Set Fact first compute node Internal IP
+ ansible.builtin.set_fact:
+ first_node_ip: "{{ item.address }}"
+ when: "'Hostname' in item.type"
+ with_items:
+ "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}"
+ tags: prepare
+
+- name: Set Compute for the node memory hog
+ ansible.builtin.set_fact:
+ compute_chaos: "{{ first_node_ip }}"
+ when: compute_chaos is not defined
+ tags: prepare
+
+- name: Prepare rbac file for node memory hog experiment
+ ansible.builtin.template:
+ src: node-memory-hog-rbac.yaml.j2
+ dest: /tmp/resiliency/node-memory-hog/node-memory-hog-rbac.yaml
+ mode: 0600
+ tags: prepare
+
+- name: Prepare chaos file for node memory hog experiment
+ ansible.builtin.template:
+ src: node-memory-hog-chaos.yaml.j2
+ dest: /tmp/resiliency/node-memory-hog/node-memory-hog-chaos.yaml
+ mode: 0600
+ tags: prepare
+
+- name: Apply node memory hog rbac
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/resiliency/node-memory-hog/node-memory-hog-rbac.yaml
+
+- name: Apply node memory hog experiment
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/resiliency/node-memory-hog/node-memory-hog-chaos.yaml
+
+- name: wait for the end of chaos
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosEngine
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos-memory
+ namespace: "{{ onap_namespace }}"
+ register: chaosengine_status
+ changed_when:
+ &chaos_test chaosengine_status is defined and
+ chaosengine_status.resources[0].status.engineStatus == "completed"
+ until: *chaos_test
+ retries: 5
+ delay: 120
+
+- name: get results
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosResult
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos-memory-node-memory-hog
+ namespace: "{{ onap_namespace }}"
+ register: chaosresult_drain
+ changed_when:
+ &chaos_result chaosresult_drain is defined and
+ (chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass" or
+ chaosresult_drain.resources[0].status.experimentStatus.verdict == "Fail" )
+ until: *chaos_result
+ retries: 5
+ delay: 20
+- name: Print the chaos result verdict
+ ansible.builtin.debug:
+ msg: " The test is {{ chaosresult_drain.resources[0].status.experimentStatus.verdict}}-ed"
diff --git a/roles/onap-chaos-tests/tasks/pod-delete-aai.yaml b/roles/onap-chaos-tests/tasks/pod-delete-aai.yaml
new file mode 100644
index 0000000..87fbb17
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/pod-delete-aai.yaml
@@ -0,0 +1,103 @@
+---
+- name: create directory for pod delete aai scenario
+ ansible.builtin.file:
+ path: /tmp/resiliency/pod-delete-aai
+ state: directory
+ mode: '0755'
+ tags: prepare
+
+- name: Get deployments name
+ ansible.builtin.shell: kubectl get deployments -n onap | grep aai |awk '{print $1}'
+ register: deployments
+ tags: prepare
+
+- name: print grep output
+ ansible.builtin.debug:
+ msg: "{{deployments.stdout_lines}}"
+
+
+- name: add labels to deployments
+ community.kubernetes.k8s:
+ state: present
+ definition:
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: "{{ item }}"
+ namespace: "{{ onap_namespace }}"
+ labels:
+ component: onap-aai
+ spec:
+ template:
+ metadata:
+ label:
+ component: onap-aai
+ loop: "{{deployments.stdout_lines}}"
+ tags: prepare
+
+- name: get AAI pod name
+ ansible.builtin.shell: kubectl get pods -n onap | grep aai |awk '{print $1}'
+ register: pod_list
+ tags: prepare
+
+- name: print pod list
+ ansible.builtin.debug:
+ msg: "{{pod_list.stdout_lines | join(', ') }}"
+
+- name: Prepare rbac file for pod delete aai experiment
+ ansible.builtin.template:
+ src: pod-delete-rbac.yaml.j2
+ dest: /tmp/resiliency/pod-delete-aai/pod-delete-aai-rbac.yaml
+ mode: 0600
+ tags: prepare
+
+- name: Prepare chaos file for pod delete aai experiment
+ ansible.builtin.template:
+ src: pod-delete-aai-chaos.yaml.j2
+ dest: /tmp/resiliency/pod-delete-aai/pod-delete-aai-chaos.yaml
+ mode: 0600
+ tags: prepare
+
+- name: Apply pod delete aai rbac
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/resiliency/pod-delete-aai/pod-delete-rbac.yaml
+
+- name: Apply pod delete aai experiment
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/resiliency/pod-delete-aai/pod-delete-aai-chaos.yaml
+
+- name: wait for the end of chaos
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosEngine
+ api_version: litmuschaos.io/v1alpha1
+ name: aai-chaos
+ namespace: "{{ onap_namespace }}"
+ register: chaosengine_status
+ changed_when:
+ &chaos_test chaosengine_status is defined and
+ chaosengine_status.resources[0].status.engineStatus == "completed"
+ until: *chaos_test
+ retries: 5
+ delay: 120
+
+- name: get results
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosResult
+ api_version: litmuschaos.io/v1alpha1
+ name: aai-chaos-pod-delete
+ namespace: "{{ onap_namespace }}"
+ register: chaosresult_aai
+ changed_when:
+ &chaos_result chaosresult_aai is defined and
+ (chaosresult_aai.resources[0].status.experimentStatus.verdict == "Pass" or
+ chaosresult_aai.resources[0].status.experimentStatus.verdict == "Fail" )
+ until: *chaos_result
+ retries: 5
+ delay: 20
+- name: Print the chaos result verdict
+ ansible.builtin.debug:
+ msg: " The test is {{ chaosresult_aai.resources[0].status.experimentStatus.verdict}}-ed"
diff --git a/roles/onap-chaos-tests/tasks/pod-delete-sdc.yaml b/roles/onap-chaos-tests/tasks/pod-delete-sdc.yaml
new file mode 100644
index 0000000..d46fdd1
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/pod-delete-sdc.yaml
@@ -0,0 +1,76 @@
+---
+- name: Get compute node list
+ community.kubernetes.k8s_info:
+ kind: Node
+ label_selectors: "! node-role.kubernetes.io/master"
+ register: kubernetes_computer_node_list
+
+- name: Set Fact first compute node Internal IP
+ ansible.builtin.set_fact:
+ first_node_ip: "{{ item.address }}"
+ when: "'Hostname' in item.type"
+ with_items:
+ "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}"
+
+- name: Set Compute for the drain chaos
+ ansible.builtin.set_fact:
+ compute_chaos: "{{ first_node_ip }}"
+ when: compute_chaos is not defined
+
+- name: Prepare rbac file for drain experiment
+ ansible.builtin.template:
+ src: drain-rbac.yaml.j2
+ dest: /tmp/drain-rbac.yaml
+ mode: 0600
+
+- name: Prepare chaos file for drain experiment
+ ansible.builtin.template:
+ src: drain-chaos.yaml.j2
+ dest: /tmp/drain-chaos.yaml
+ mode: 0600
+
+- name: Apply drain rbac
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/drain-rbac.yaml
+
+- name: Cordon the Chosen node
+ ansible.builtin.shell: "kubectl cordon {{ compute_chaos }}"
+
+- name: Apply chaos drain experiment
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/drain-chaos.yaml
+
+- name: wait for the end of chaos
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosEngine
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos
+ namespace: "{{ onap_namespace }}"
+ register: chaosengine_status
+ changed_when:
+ &chaos_test chaosengine_status is defined and
+ chaosengine_status.resources[0].status.engineStatus == "completed"
+ until: *chaos_test
+ retries: 5
+ delay: 120
+
+- name: get results
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosResult
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos-node-drain
+ namespace: "{{ onap_namespace }}"
+ register: chaosresult_drain
+ changed_when:
+ &chaos_result chaosresult_drain is defined and
+ chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass"
+ until: *chaos_result
+ retries: 5
+ delay: 10
+
+- name: Uncordon the Chosen node
+ ansible.builtin.shell: "kubectl uncordon {{ compute_chaos }}"
diff --git a/roles/onap-chaos-tests/tasks/pod-delete-sdnc.yaml b/roles/onap-chaos-tests/tasks/pod-delete-sdnc.yaml
new file mode 100644
index 0000000..d46fdd1
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/pod-delete-sdnc.yaml
@@ -0,0 +1,76 @@
+---
+- name: Get compute node list
+ community.kubernetes.k8s_info:
+ kind: Node
+ label_selectors: "! node-role.kubernetes.io/master"
+ register: kubernetes_computer_node_list
+
+- name: Set Fact first compute node Internal IP
+ ansible.builtin.set_fact:
+ first_node_ip: "{{ item.address }}"
+ when: "'Hostname' in item.type"
+ with_items:
+ "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}"
+
+- name: Set Compute for the drain chaos
+ ansible.builtin.set_fact:
+ compute_chaos: "{{ first_node_ip }}"
+ when: compute_chaos is not defined
+
+- name: Prepare rbac file for drain experiment
+ ansible.builtin.template:
+ src: drain-rbac.yaml.j2
+ dest: /tmp/drain-rbac.yaml
+ mode: 0600
+
+- name: Prepare chaos file for drain experiment
+ ansible.builtin.template:
+ src: drain-chaos.yaml.j2
+ dest: /tmp/drain-chaos.yaml
+ mode: 0600
+
+- name: Apply drain rbac
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/drain-rbac.yaml
+
+- name: Cordon the Chosen node
+ ansible.builtin.shell: "kubectl cordon {{ compute_chaos }}"
+
+- name: Apply chaos drain experiment
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/drain-chaos.yaml
+
+- name: wait for the end of chaos
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosEngine
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos
+ namespace: "{{ onap_namespace }}"
+ register: chaosengine_status
+ changed_when:
+ &chaos_test chaosengine_status is defined and
+ chaosengine_status.resources[0].status.engineStatus == "completed"
+ until: *chaos_test
+ retries: 5
+ delay: 120
+
+- name: get results
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosResult
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos-node-drain
+ namespace: "{{ onap_namespace }}"
+ register: chaosresult_drain
+ changed_when:
+ &chaos_result chaosresult_drain is defined and
+ chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass"
+ until: *chaos_result
+ retries: 5
+ delay: 10
+
+- name: Uncordon the Chosen node
+ ansible.builtin.shell: "kubectl uncordon {{ compute_chaos }}"
diff --git a/roles/onap-chaos-tests/tasks/pod-delete-so.yaml b/roles/onap-chaos-tests/tasks/pod-delete-so.yaml
new file mode 100644
index 0000000..d46fdd1
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/pod-delete-so.yaml
@@ -0,0 +1,76 @@
+---
+- name: Get compute node list
+ community.kubernetes.k8s_info:
+ kind: Node
+ label_selectors: "! node-role.kubernetes.io/master"
+ register: kubernetes_computer_node_list
+
+- name: Set Fact first compute node Internal IP
+ ansible.builtin.set_fact:
+ first_node_ip: "{{ item.address }}"
+ when: "'Hostname' in item.type"
+ with_items:
+ "{{ (kubernetes_computer_node_list.resources | first).status.addresses }}"
+
+- name: Set Compute for the drain chaos
+ ansible.builtin.set_fact:
+ compute_chaos: "{{ first_node_ip }}"
+ when: compute_chaos is not defined
+
+- name: Prepare rbac file for drain experiment
+ ansible.builtin.template:
+ src: drain-rbac.yaml.j2
+ dest: /tmp/drain-rbac.yaml
+ mode: 0600
+
+- name: Prepare chaos file for drain experiment
+ ansible.builtin.template:
+ src: drain-chaos.yaml.j2
+ dest: /tmp/drain-chaos.yaml
+ mode: 0600
+
+- name: Apply drain rbac
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/drain-rbac.yaml
+
+- name: Cordon the Chosen node
+ ansible.builtin.shell: "kubectl cordon {{ compute_chaos }}"
+
+- name: Apply chaos drain experiment
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/drain-chaos.yaml
+
+- name: wait for the end of chaos
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosEngine
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos
+ namespace: "{{ onap_namespace }}"
+ register: chaosengine_status
+ changed_when:
+ &chaos_test chaosengine_status is defined and
+ chaosengine_status.resources[0].status.engineStatus == "completed"
+ until: *chaos_test
+ retries: 5
+ delay: 120
+
+- name: get results
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: ChaosResult
+ api_version: litmuschaos.io/v1alpha1
+ name: onap-chaos-node-drain
+ namespace: "{{ onap_namespace }}"
+ register: chaosresult_drain
+ changed_when:
+ &chaos_result chaosresult_drain is defined and
+ chaosresult_drain.resources[0].status.experimentStatus.verdict == "Pass"
+ until: *chaos_result
+ retries: 5
+ delay: 10
+
+- name: Uncordon the Chosen node
+ ansible.builtin.shell: "kubectl uncordon {{ compute_chaos }}"
diff --git a/roles/onap-chaos-tests/tasks/prepare.yaml b/roles/onap-chaos-tests/tasks/prepare.yaml
new file mode 100644
index 0000000..827156e
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/prepare.yaml
@@ -0,0 +1,38 @@
+---
+- name: create directory for resiliency
+ ansible.builtin.file:
+ path: /tmp/resiliency
+ state: directory
+ mode: '0755'
+
+- name: Download Litmus manifest.
+ ansible.builtin.get_url:
+ url: https://litmuschaos.github.io/litmus/litmus-operator-v1.13.5.yaml
+ dest: /tmp/resiliency/litmus_manifest.yaml
+ mode: '0664'
+
+- name: Apply Litmus manifest
+ community.kubernetes.k8s:
+ state: present
+ src: /tmp/resiliency/litmus_manifest.yaml
+
+- name: Ensure litmus Chaos Operator is running
+ ansible.builtin.shell: "kubectl get pods -n litmus"
+
+- name: Download generic experiments manifest
+ ansible.builtin.get_url:
+ url: https://hub.litmuschaos.io/api/chaos/1.13.5?file=charts/generic/experiments.yaml
+ dest: /tmp/resiliency/litmus_experiments_manifest.yaml
+ mode: '0664'
+
+- name: Apply Litmus Experiment manifest
+ community.kubernetes.k8s:
+ namespace: "{{ onap_namespace }}"
+ state: present
+ src: /tmp/resiliency/litmus_experiments_manifest.yaml
+
+- name: "Copy resiliency script"
+ ansible.builtin.copy:
+ src: scripts/run_chaos_tests.sh
+ dest: /tmp/resiliency
+ mode: '755'
diff --git a/roles/onap-chaos-tests/tasks/reporting.yaml b/roles/onap-chaos-tests/tasks/reporting.yaml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/roles/onap-chaos-tests/tasks/reporting.yaml
@@ -0,0 +1 @@
+---
diff --git a/roles/onap-chaos-tests/templates/node-cpu-hog-chaos.yaml.j2 b/roles/onap-chaos-tests/templates/node-cpu-hog-chaos.yaml.j2
new file mode 100644
index 0000000..ce72420
--- /dev/null
+++ b/roles/onap-chaos-tests/templates/node-cpu-hog-chaos.yaml.j2
@@ -0,0 +1,30 @@
+apiVersion: litmuschaos.io/v1alpha1
+kind: ChaosEngine
+metadata:
+ name: node-cpu-hog
+ namespace: {{ onap_namespace }}
+spec:
+ # It can be true/false
+ annotationCheck: 'false'
+ # It can be active/stop
+ engineState: 'active'
+ #ex. values: ns1:name=percona,ns2:run=nginx
+ auxiliaryAppInfo: ''
+ chaosServiceAccount: node-cpu-hog-sa
+ # It can be delete/retain
+ jobCleanUpPolicy: 'delete'
+ experiments:
+ - name: node-cpu-hog
+ spec:
+ components:
+ env:
+ # set chaos duration (in sec) as desired
+ - name: TOTAL_CHAOS_DURATION
+ value: '120'
+
+ - name: NODE_CPU_CORE
+ value: '6'
+
+ # ENTER THE COMMA SEPARATED TARGET NODES NAME
+ - name: TARGET_NODES
+ value: {{ compute_chaos }}
diff --git a/roles/onap-chaos-tests/templates/node-cpu-hog-rbac.yaml.j2 b/roles/onap-chaos-tests/templates/node-cpu-hog-rbac.yaml.j2
new file mode 100644
index 0000000..e240b5e
--- /dev/null
+++ b/roles/onap-chaos-tests/templates/node-cpu-hog-rbac.yaml.j2
@@ -0,0 +1,49 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: node-cpu-hog-sa
+ namespace: {{ onap_namespace }}
+ labels:
+ name: node-cpu-hog-sa
+ app.kubernetes.io/part-of: litmus
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: node-cpu-hog-sa
+ labels:
+ name: node-cpu-hog-sa
+ app.kubernetes.io/part-of: litmus
+rules:
+- apiGroups: [""]
+ resources: ["pods","events"]
+ verbs: ["create","list","get","patch","update","delete","deletecollection"]
+- apiGroups: [""]
+ resources: ["pods/exec","pods/log"]
+ verbs: ["list","get","create"]
+- apiGroups: ["batch"]
+ resources: ["jobs"]
+ verbs: ["create","list","get","delete","deletecollection"]
+- apiGroups: ["litmuschaos.io"]
+ resources: ["chaosengines","chaosexperiments","chaosresults"]
+ verbs: ["create","list","get","patch","update"]
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get","list"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: node-cpu-hog-sa
+ labels:
+ name: node-cpu-hog-sa
+ app.kubernetes.io/part-of: litmus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: node-cpu-hog-sa
+subjects:
+- kind: ServiceAccount
+ name: node-cpu-hog-sa
+ namespace: {{ onap_namespace }} \ No newline at end of file
diff --git a/roles/onap-chaos-tests/templates/node-drain-chaos.yaml.j2 b/roles/onap-chaos-tests/templates/node-drain-chaos.yaml.j2
new file mode 100644
index 0000000..a90a5f1
--- /dev/null
+++ b/roles/onap-chaos-tests/templates/node-drain-chaos.yaml.j2
@@ -0,0 +1,28 @@
+apiVersion: litmuschaos.io/v1alpha1
+kind: ChaosEngine
+metadata:
+ name: node-drain
+ namespace: {{ onap_namespace }}
+spec:
+ # It can be true/false
+ annotationCheck: 'false'
+ # It can be active/stop
+ engineState: 'active'
+ #ex. values: ns1:name=percona,ns2:run=onap
+ auxiliaryAppInfo: ''
+ chaosServiceAccount: node-drain-sa
+ # It can be delete/retain
+ jobCleanUpPolicy: 'delete'
+ experiments:
+ - name: node-drain
+ spec:
+ components:
+ # nodeSelector:
+ # # provide the node labels
+ # kubernetes.io/hostname: 'node02'
+ env:
+ # enter the target node name
+ - name: TARGET_NODE
+ value: {{ compute_chaos }}
+ - name: TOTAL_CHAOS_DURATION
+ value: 120s
diff --git a/roles/onap-chaos-tests/templates/node-drain-rbac.yaml.j2 b/roles/onap-chaos-tests/templates/node-drain-rbac.yaml.j2
new file mode 100644
index 0000000..d7e4a78
--- /dev/null
+++ b/roles/onap-chaos-tests/templates/node-drain-rbac.yaml.j2
@@ -0,0 +1,53 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: node-drain-sa
+ namespace: {{ onap_namespace }}
+ labels:
+ name: node-drain-sa
+ app.kubernetes.io/part-of: litmus
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: node-drain-sa
+ labels:
+ name: node-drain-sa
+ app.kubernetes.io/part-of: litmus
+rules:
+- apiGroups: [""]
+ resources: ["pods","events"]
+ verbs: ["create","list","get","patch","update","delete","deletecollection"]
+- apiGroups: [""]
+ resources: ["pods/exec","pods/log","pods/eviction"]
+ verbs: ["list","get","create"]
+- apiGroups: ["batch"]
+ resources: ["jobs"]
+ verbs: ["create","list","get","delete","deletecollection"]
+- apiGroups: ["apps"]
+ resources: ["daemonsets"]
+ verbs: ["list","get","delete"]
+- apiGroups: ["litmuschaos.io"]
+ resources: ["chaosengines","chaosexperiments","chaosresults"]
+ verbs: ["create","list","get","patch","update"]
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["patch","get","list"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: node-drain-sa
+ labels:
+ name: node-drain-sa
+ app.kubernetes.io/part-of: litmus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: node-drain-sa
+subjects:
+- kind: ServiceAccount
+ name: node-drain-sa
+ namespace: {{ onap_namespace }}
+
diff --git a/roles/onap-chaos-tests/templates/node-memory-hog-chaos.yaml.j2 b/roles/onap-chaos-tests/templates/node-memory-hog-chaos.yaml.j2
new file mode 100644
index 0000000..fb39b9b
--- /dev/null
+++ b/roles/onap-chaos-tests/templates/node-memory-hog-chaos.yaml.j2
@@ -0,0 +1,32 @@
+apiVersion: litmuschaos.io/v1alpha1
+kind: ChaosEngine
+metadata:
+ name: node-memory-hog
+ namespace: {{ onap_namespace }}
+spec:
+ # It can be true/false
+ annotationCheck: 'false'
+ # It can be active/stop
+ engineState: 'active'
+ #ex. values: ns1:name=percona,ns2:run=nginx
+ auxiliaryAppInfo: ''
+ chaosServiceAccount: node-memory-hog-sa
+ # It can be delete/retain
+ jobCleanUpPolicy: 'delete'
+ experiments:
+ - name: node-memory-hog
+ spec:
+ components:
+ env:
+ # set chaos duration (in sec) as desired
+ - name: TOTAL_CHAOS_DURATION
+ value: '120'
+
+ ## Specify the size as percent of total node capacity Ex: '30'
+ ## Note: For consuming memory in mebibytes change the variable to MEMORY_CONSUMPTION_MEBIBYTES
+ - name: MEMORY_CONSUMPTION_PERCENTAGE
+ value: '30'
+
+ # ENTER THE COMMA SEPARATED TARGET NODES NAME
+ - name: TARGET_NODES
+ value: {{ compute_chaos }}
diff --git a/roles/onap-chaos-tests/templates/node-memory-hog-rbac.yaml.j2 b/roles/onap-chaos-tests/templates/node-memory-hog-rbac.yaml.j2
new file mode 100644
index 0000000..9b21e05
--- /dev/null
+++ b/roles/onap-chaos-tests/templates/node-memory-hog-rbac.yaml.j2
@@ -0,0 +1,49 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: node-memory-hog-sa
+ namespace: {{ onap_namespace }}
+ labels:
+ name: node-memory-hog-sa
+ app.kubernetes.io/part-of: litmus
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: node-memory-hog-sa
+ labels:
+ name: node-memory-hog-sa
+ app.kubernetes.io/part-of: litmus
+rules:
+- apiGroups: [""]
+ resources: ["pods","events"]
+ verbs: ["create","list","get","patch","update","delete","deletecollection"]
+- apiGroups: [""]
+ resources: ["pods/exec","pods/log"]
+ verbs: ["create","list","get"]
+- apiGroups: ["batch"]
+ resources: ["jobs"]
+ verbs: ["create","list","get","delete","deletecollection"]
+- apiGroups: ["litmuschaos.io"]
+ resources: ["chaosengines","chaosexperiments","chaosresults"]
+ verbs: ["create","list","get","patch","update"]
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get","list"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: node-memory-hog-sa
+ labels:
+ name: node-memory-hog-sa
+ app.kubernetes.io/part-of: litmus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: node-memory-hog-sa
+subjects:
+- kind: ServiceAccount
+ name: node-memory-hog-sa
+ namespace: {{ onap_namespace }} \ No newline at end of file
diff --git a/roles/onap-chaos-tests/templates/pod-delete-aai-chaos.yaml.j2 b/roles/onap-chaos-tests/templates/pod-delete-aai-chaos.yaml.j2
new file mode 100644
index 0000000..1c6eb01
--- /dev/null
+++ b/roles/onap-chaos-tests/templates/pod-delete-aai-chaos.yaml.j2
@@ -0,0 +1,35 @@
+apiVersion: litmuschaos.io/v1alpha1
+kind: ChaosEngine
+metadata:
+ name: aai-chaos
+ namespace: {{ onap_namespace }}
+spec:
+ appinfo:
+ appns: {{ onap_namespace }}
+ applabel: 'component=onap-aai'
+ appkind: 'deployment'
+ # It can be active/stop
+ engineState: 'active'
+ chaosServiceAccount: pod-delete-sa
+ experiments:
+ - name: pod-delete
+ spec:
+ components:
+ env:
+ - name: TARGET_PODS
+ value: {{ pod_list.stdout_lines | join(', ') }}
+ # set chaos duration (in sec) as desired
+ - name: TOTAL_CHAOS_DURATION
+ value: '30'
+
+ # set chaos interval (in sec) as desired
+ - name: CHAOS_INTERVAL
+ value: '10'
+
+ # pod failures without '--force' & default terminationGracePeriodSeconds
+ - name: FORCE
+ value: 'true'
+
+ ## percentage of total pods to target
+ - name: PODS_AFFECTED_PERC
+ value: '100'
diff --git a/roles/onap-chaos-tests/templates/pod-delete-aai-rbac.yaml.j2 b/roles/onap-chaos-tests/templates/pod-delete-aai-rbac.yaml.j2
new file mode 100644
index 0000000..2b85d42
--- /dev/null
+++ b/roles/onap-chaos-tests/templates/pod-delete-aai-rbac.yaml.j2
@@ -0,0 +1,57 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: pod-delete-sa
+ namespace: {{ onap_namespace }}
+ labels:
+ name: pod-delete-sa
+ app.kubernetes.io/part-of: litmus
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: pod-delete-sa
+ namespace: {{ onap_namespace }}
+ labels:
+ name: pod-delete-sa
+ app.kubernetes.io/part-of: litmus
+rules:
+- apiGroups: [""]
+ resources: ["pods","events"]
+ verbs: ["create","list","get","patch","update","delete","deletecollection"]
+- apiGroups: [""]
+ resources: ["pods/exec","pods/log","replicationcontrollers"]
+ verbs: ["create","list","get"]
+- apiGroups: ["batch"]
+ resources: ["jobs"]
+ verbs: ["create","list","get","delete","deletecollection"]
+- apiGroups: ["apps"]
+ resources: ["deployments","statefulsets","daemonsets","replicasets"]
+ verbs: ["list","get"]
+- apiGroups: ["apps.openshift.io"]
+ resources: ["deploymentconfigs"]
+ verbs: ["list","get"]
+- apiGroups: ["argoproj.io"]
+ resources: ["rollouts"]
+ verbs: ["list","get"]
+- apiGroups: ["litmuschaos.io"]
+ resources: ["chaosengines","chaosexperiments","chaosresults"]
+ verbs: ["create","list","get","patch","update"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: pod-delete-sa
+ namespace: {{ onap_namespace }}
+ labels:
+ name: pod-delete-sa
+ app.kubernetes.io/part-of: litmus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: pod-delete-sa
+subjects:
+- kind: ServiceAccount
+ name: pod-delete-sa
+ namespace: {{ onap_namespace }} \ No newline at end of file
diff --git a/roles/onap-chaos-tests/templates/pod-delete-rbac.yaml.j2 b/roles/onap-chaos-tests/templates/pod-delete-rbac.yaml.j2
new file mode 100644
index 0000000..2b85d42
--- /dev/null
+++ b/roles/onap-chaos-tests/templates/pod-delete-rbac.yaml.j2
@@ -0,0 +1,57 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: pod-delete-sa
+ namespace: {{ onap_namespace }}
+ labels:
+ name: pod-delete-sa
+ app.kubernetes.io/part-of: litmus
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: pod-delete-sa
+ namespace: {{ onap_namespace }}
+ labels:
+ name: pod-delete-sa
+ app.kubernetes.io/part-of: litmus
+rules:
+- apiGroups: [""]
+ resources: ["pods","events"]
+ verbs: ["create","list","get","patch","update","delete","deletecollection"]
+- apiGroups: [""]
+ resources: ["pods/exec","pods/log","replicationcontrollers"]
+ verbs: ["create","list","get"]
+- apiGroups: ["batch"]
+ resources: ["jobs"]
+ verbs: ["create","list","get","delete","deletecollection"]
+- apiGroups: ["apps"]
+ resources: ["deployments","statefulsets","daemonsets","replicasets"]
+ verbs: ["list","get"]
+- apiGroups: ["apps.openshift.io"]
+ resources: ["deploymentconfigs"]
+ verbs: ["list","get"]
+- apiGroups: ["argoproj.io"]
+ resources: ["rollouts"]
+ verbs: ["list","get"]
+- apiGroups: ["litmuschaos.io"]
+ resources: ["chaosengines","chaosexperiments","chaosresults"]
+ verbs: ["create","list","get","patch","update"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: pod-delete-sa
+ namespace: {{ onap_namespace }}
+ labels:
+ name: pod-delete-sa
+ app.kubernetes.io/part-of: litmus
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: pod-delete-sa
+subjects:
+- kind: ServiceAccount
+ name: pod-delete-sa
+ namespace: {{ onap_namespace }} \ No newline at end of file
diff --git a/roles/onap-stability-tests/tasks/main.yaml b/roles/onap-stability-tests/tasks/main.yaml
new file mode 100644
index 0000000..09c03f5
--- /dev/null
+++ b/roles/onap-stability-tests/tasks/main.yaml
@@ -0,0 +1,61 @@
+---
+- name: "Install stability tests prerequisites"
+ become: yes
+ ansible.builtin.apt:
+ name:
+ - python3-venv
+ - libssl-dev
+ state: latest
+
+- name: "Set variables for stability tests run"
+ ansible.builtin.set_fact:
+ stability_tests_output: "/tmp/stability/archives"
+ stability_tests_archives: "/tmp/stability"
+
+- name: "Delete directory with/for results"
+ ansible.builtin.file:
+ path: "{{ stability_tests_output }}"
+ state: absent
+
+- name: "Copy stability tests script"
+ ansible.builtin.copy:
+ src: scripts/run_stability_tests.sh
+ dest: "{{ ansible_user_dir }}/run_stability_tests.sh"
+ mode: '500'
+
+- name: "Copy netrc for lftool"
+ ansible.builtin.template:
+ src: .netrc.j2
+ dest: "{{ ansible_user_dir }}/.netrc"
+ mode: 0600
+
+- name: "Create directory for stability test execution"
+ ansible.builtin.file:
+ path: "{{ stability_tests_output }}"
+ state: directory
+ mode: '0755'
+ recurse: yes
+ owner: "{{ ansible_user }}"
+
+- name: "Launch stability tests & push artifacts"
+ ansible.builtin.shell:
+ cmd: "{{ ansible_user_dir }}/run_stability_tests.sh > {{ stability_tests_output }}/run_stability.log"
+ chdir: "{{ ansible_user_dir }}"
+ environment:
+ LF_RESULTS_BACKUP: '{{ lf_results_backup }}'
+ POD: '{{ pod }}'
+ CI_PIPELINE_CREATED_AT: '{{ ci_pipeline_created_at }}'
+ STABILITY_TESTS_LOCATION: '{{ stability_tests_output }}'
+ ARCHIVES_LOCATION: '{{ stability_tests_archives }}'
+ async: 259200 # 60*60*24*3 = 3 days
+ poll: 0 # dont wait for it
+ register: stability_tests
+
+- name: "Check if stability tests are running"
+ become: no
+ async_status:
+ jid: "{{ stability_tests.ansible_job_id }}"
+ register: stability_tests_result
+ until: stability_tests_result.started
+ retries: 10
+ delay: 10
diff --git a/roles/onap-stability-tests/templates/.netrc.j2 b/roles/onap-stability-tests/templates/.netrc.j2
new file mode 100644
index 0000000..e4c22e3
--- /dev/null
+++ b/roles/onap-stability-tests/templates/.netrc.j2
@@ -0,0 +1,3 @@
+machine nexus.onap.org
+login onap-integration
+password {{ lf_it_nexus_pwd }}
diff --git a/roles/xtesting-healthcheck-k8s-job/defaults/main.yaml b/roles/xtesting-healthcheck-k8s-job/defaults/main.yaml
new file mode 100644
index 0000000..7b9bf68
--- /dev/null
+++ b/roles/xtesting-healthcheck-k8s-job/defaults/main.yaml
@@ -0,0 +1,47 @@
+---
+internal_check_certs_deployment:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ name: "integration-onap-internal-check-certs"
+ namespace: "{{ onap_namespace }}"
+ spec:
+ template:
+ spec:
+ restartPolicy: Never
+ containers:
+ - name: xtesting-onap
+ image: "{{ docker_health_k8s_image }}:{{ testing_container_tag }}"
+ imagePullPolicy: Always
+ command: ["run_tests","-t","internal_check_certs","-r"]
+ env:
+ - name: INSTALLER_TYPE
+ value: "{{ deployment_name }}"
+ - name: TEST_ENVIRONMENT
+ value: internal_job
+ - name: DEPLOY_SCENARIO
+ value: "{{ deploy_scenario }}"
+ - name: NODE_NAME
+ value: "{{ node_name }}"
+ - name: TEST_DB_URL
+ value:
+ "{{ test_result_url }}"
+ - name: BUILD_TAG
+ value: "{{ build_tag }}"
+ - name: TAG
+ value: "{{ run_type }}"
+ volumeMounts:
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: job-save-results
+ mountPath:
+ /var/lib/xtesting/results/
+ volumes:
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: job-save-results
+ hostPath:
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+ backoffLimit: 0
diff --git a/roles/xtesting-healthcheck-k8s-job/tasks/main.yaml b/roles/xtesting-healthcheck-k8s-job/tasks/main.yaml
new file mode 100644
index 0000000..6ed429a
--- /dev/null
+++ b/roles/xtesting-healthcheck-k8s-job/tasks/main.yaml
@@ -0,0 +1,51 @@
+---
+# tasks file for functest (tests)
+
+##
+- block:
+ - name: create directories as root
+ become: yes
+ file:
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+ state: directory
+ mode: 0755
+
+ - name: Delete old logs
+ become: yes
+ file:
+ state: absent
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+
+ - name: Delete healthcheck job
+ k8s:
+ state: absent
+ api: batch/v1
+ kind: Job
+ namespace: onap
+ name: "integration-onap-internal-check-certs"
+ kubeconfig: "{{ global_local_path }}/{{ kube_conf_file }}"
+
+ - name: save internal check certs deployment to file
+ copy:
+ content: "{{ internal_check_certs_deployment | to_nice_yaml }}"
+ dest:
+ "{{ k8s_job__dir_path }}/healthcheck-internal-check-certs.yaml"
+
+ - name: start internal check certs job
+ k8s:
+ state: present
+ src: "{{ k8s_job__dir_path }}/healthcheck-internal-check-certs.yaml"
+ kubeconfig: "{{ global_local_path }}/{{ kube_conf_file }}"
+ - name: Wait until the healthcheck test is "completed"
+ wait_for:
+ timeout: "{{ run_timeout }}"
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}/xtesting.log"
+ search_regex: Result.EX_
+ always:
+ - name: "save healthcheck {{ run_type }} results for artifacts"
+ synchronize:
+ src: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+ use_ssh_args: true
+ dest: "./results/{{ run_tiers }}"
+ mode: pull
+ ignore_errors: True
diff --git a/roles/xtesting-healthcheck-k8s/defaults/main.yaml b/roles/xtesting-healthcheck-k8s/defaults/main.yaml
new file mode 100644
index 0000000..02190ea
--- /dev/null
+++ b/roles/xtesting-healthcheck-k8s/defaults/main.yaml
@@ -0,0 +1,9 @@
+---
+postgres_secret_name: postgres-postgresql
+postgres_user: helm
+postgres_db: helm
+postgres_port: 30347
+postgres_url: "postgresql://{{
+ postgres_svc }}.{{ postgres_namespace }}:{{ postgres_port }}/{{
+ postgres_db }}?user={{ postgres_user }}&password={{
+ postgres_password }}&sslmode=disable" \ No newline at end of file
diff --git a/roles/xtesting-healthcheck-k8s/tasks/main.yaml b/roles/xtesting-healthcheck-k8s/tasks/main.yaml
new file mode 100644
index 0000000..2f727e9
--- /dev/null
+++ b/roles/xtesting-healthcheck-k8s/tasks/main.yaml
@@ -0,0 +1,126 @@
+---
+# tasks file for functest (tests)
+
+##
+- block:
+ - name: Clean directory
+ ansible.builtin.file:
+ path: "{{ exec_local_path }}"
+ state: absent
+
+ - name: Create directory
+ ansible.builtin.file:
+ path: "{{ exec_local_path }}"
+ state: directory
+ mode: 0755
+
+ - name: check helm version
+ command: "helm version --template {% raw %}'{{.Version}}'{% endraw %}"
+ register: helm_version
+
+ # Return of previous command will be "v3.3.4" for v3 and up and "<no value>"
+ # for version 2.
+ - name: store helm version
+ ansible.builtin.set_fact:
+ helmv3: "{{ ('<no' in helm_version.stdout) | ternary(false, true) }}"
+
+ - name: retrieve helm postgres secret
+ community.kubernetes.k8s_info:
+ api_version: v1
+ kind: Secret
+ name: "{{ postgres_secret_name }}"
+ namespace: "{{ postgres_namespace }}"
+ register: postgres_secrets
+ when: helmv3 | bool and helmv3_use_sql | bool
+
+ - name: retrieve helm postgres password
+ set_fact:
+ postgres_password: "{{
+ postgres_secrets.resources[0].data['postgresql-password'] | b64decode }}"
+ when: helmv3 | bool and helmv3_use_sql | bool
+
+ - name: generate fake postgres_url
+ set_fact:
+ postgres_url: ""
+ when: not helmv3_use_sql | bool
+
+ - name: Create env file
+ ansible.builtin.template:
+ src: env-os.j2
+ dest: "{{ exec_local_path }}/env"
+ mode: "0644"
+
+ - name: create directories as root
+ become: yes
+ ansible.builtin.file:
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+ state: directory
+ mode: 0755
+
+ - name: Delete old logs
+ become: yes
+ ansible.builtin.file:
+ state: absent
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+
+ - name: set helm deploy log folder
+ ansible.builtin.set_fact:
+ helm_deploy_logs_path: "{{ helmv3 | bool | ternary(
+ helm3_deploy_logs_path, helm2_deploy_logs_path) }}"
+
+ - name: "clean {{ docker_healthcheck_k8s_name }} docker"
+ community.general.docker_container:
+ name: "{{ docker_healthcheck_k8s_name }}"
+ state: absent
+ force_kill: yes
+
+ - name: generate pre command to run
+ ansible.builtin.set_fact:
+ command: chmod 700 /root/.kube && chmod 600 /root/.kube/config
+
+ - name: generate command to run
+ ansible.builtin.set_fact:
+ command: "{{ command }} && run_tests --test all --report"
+
+ - name: "launch {{ docker_healthcheck_k8s_name }} docker"
+ community.general.docker_container:
+ container_default_behavior: no_defaults
+ name: "{{ docker_healthcheck_k8s_name }}"
+ image: "{{ docker_health_k8s_image }}:{{ docker_health_k8s_version }}"
+ env_file: "{{ exec_local_path }}/env"
+ state: started
+ command: "/bin/bash -c '{{ command }}'"
+ recreate: yes
+ volumes: "{{ volumes_healthcheck_k8s }}"
+ etc_hosts: "{{ etc_hosts }}"
+ detach: yes
+ pull: yes
+ keep_volumes: no
+
+ - name: wait for test docker to be finished
+ community.docker.docker_container_info:
+ name: "{{ docker_healthcheck_k8s_name }}"
+ register: container_info
+ until: container_info.container.State.Status == "exited"
+ retries: "{{ run_timeout }}"
+ delay: 1
+
+ - name: "{{ docker_healthcheck_k8s_name }} has failed"
+ ansible.builtin.fail:
+ msg: "The test {{ docker_healthcheck_k8s_name }} has failed"
+ when: container_info.container.State.ExitCode != 0
+ always:
+ - name: "save {{ docker_healthcheck_k8s_name }} results for artifacts"
+ ansible.posix.synchronize:
+ src: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+ dest: "./results/{{ run_tiers }}"
+ use_ssh_args: true
+ mode: pull
+ ignore_errors: True
+ when: not use_s3 | bool
+ # temporitary work in order to wait for xtesting to handle thousands of
+ # files upload
+ - name: "push {{ docker_healthcheck_k8s_name }} results to S3"
+ command: "mc cp --recursive {{ res_local_path }}/{{ run_tiers }}/{{
+ run_type }} s3/{{ s3_raw_dst }}"
+ when: use_s3 | bool
diff --git a/roles/xtesting-healthcheck-k8s/templates/env-os.j2 b/roles/xtesting-healthcheck-k8s/templates/env-os.j2
new file mode 100644
index 0000000..eeb2bf2
--- /dev/null
+++ b/roles/xtesting-healthcheck-k8s/templates/env-os.j2
@@ -0,0 +1,17 @@
+INSTALLER_TYPE={{ deployment_name }}
+DEPLOY_SCENARIO= {{ scenario }}
+TEST_DB_URL={{ test_result_url }}
+NODE_NAME={{ node_name }}
+BUILD_TAG={{ build_tag }}
+ONAP_IP={{ onap_ip }}
+{% if project != 'oom' %}
+DEPLOY_ENVIRONMENT='gating_component'
+CHART={{ project }}
+{% endif %}
+{% if helmv3 | bool %}
+HELM_BIN=helm3
+{% if helmv3_use_sql | bool %}
+HELM_DRIVER=sql
+HELM_DRIVER_SQL_CONNECTION_STRING={{ postgres_url }}
+{% endif %}
+{% endif %} \ No newline at end of file
diff --git a/roles/xtesting-healthcheck/defaults/main.yaml b/roles/xtesting-healthcheck/defaults/main.yaml
new file mode 100644
index 0000000..6312a16
--- /dev/null
+++ b/roles/xtesting-healthcheck/defaults/main.yaml
@@ -0,0 +1,130 @@
+---
+tests:
+ - core
+
+healthcheck_secret:
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: s3-keys
+ namespace: "{{ onap_namespace }}"
+ data:
+ access-key: "{{ s3_access_key | string | b64encode }}"
+ secret-key: "{{ s3_secret_key | string | b64encode }}"
+
+healthcheck_deployment:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ name: "integration-onap-{{ run_type }}"
+ namespace: "{{ onap_namespace }}"
+ spec:
+ backoffLimit: 0
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "false"
+ spec:
+ restartPolicy: Never
+ containers:
+ - name: "functest-onap-{{ run_type }}"
+ image: "{{ testing_container }}:{{ testing_container_tag }}"
+ imagePullPolicy: Always
+ env: "{{ healthcheck_deployment_env }}"
+ volumeMounts:
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: robot-eteshare
+ mountPath: /share/config
+ - name: robot-save-results
+ mountPath: /var/lib/xtesting/results/
+ command:
+ - run_tests
+ args: "{{ args }}"
+ volumes: "{{ job_volumes }}"
+
+healthcheck_deployment_env_legacy:
+ - name: INSTALLER_TYPE
+ value: "{{ deployment_name }}"
+ - name: DEPLOY_SCENARIO
+ value: "{{ deploy_scenario }}"
+ - name: NODE_NAME
+ value: "{{ node_name }}"
+ - name: TEST_DB_URL
+ value: "{{ test_result_url }}"
+ - name: BUILD_TAG
+ value: "{{ build_tag }}"
+ - name: TAG
+ value: "{{ run_type }}"
+
+healthcheck_deployment_env_s3:
+ - name: INSTALLER_TYPE
+ value: "{{ deployment_name }}"
+ - name: DEPLOY_SCENARIO
+ value: "{{ deploy_scenario }}"
+ - name: NODE_NAME
+ value: "{{ node_name }}"
+ - name: TEST_DB_URL
+ value: "{{ test_result_url }}"
+ - name: BUILD_TAG
+ value: "{{ build_tag }}"
+ - name: TAG
+ value: "{{ run_type }}"
+ - name: S3_ENDPOINT_URL
+ value: "{{ s3_internal_url }}"
+ - name: S3_DST_URL
+ value: "{{ s3_dst }}"
+ - name: HTTP_DST_URL
+ value: "{{ s3_http_url_endpoint }}"
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ key: access-key
+ name: s3-keys
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ key: secret-key
+ name: s3-keys
+
+healthcheck_deployment_env: "{{ use_s3 | bool |
+ternary(healthcheck_deployment_env_s3, healthcheck_deployment_env_legacy) }}"
+
+args_legacy:
+ - --test
+ - "{{ run_type }}"
+ - --report
+
+args_s3:
+ - --test
+ - "{{ run_type }}"
+ - --push
+ - --report
+
+args: "{{ use_s3 | bool | ternary(args_s3, args_legacy) }}"
+
+volumes_legacy:
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: robot-eteshare
+ configMap:
+ name: "{{ onap_namespace }}-{{ robot_configmap }}"
+ defaultMode: 0755
+ - name: robot-save-results
+ hostPath:
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+
+volumes_s3:
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: robot-eteshare
+ configMap:
+ name: "{{ onap_namespace }}-{{ robot_configmap }}"
+ defaultMode: 0755
+ - name: robot-save-results
+ emptyDir: {}
+
+job_volumes: "{{ use_s3 | bool | ternary(volumes_s3, volumes_legacy) }}"
diff --git a/roles/xtesting-healthcheck/tasks/launch.yaml b/roles/xtesting-healthcheck/tasks/launch.yaml
new file mode 100644
index 0000000..5ec978b
--- /dev/null
+++ b/roles/xtesting-healthcheck/tasks/launch.yaml
@@ -0,0 +1,50 @@
+---
+- block:
+ - name: ensure secret is present
+ community.kubernetes.k8s:
+ state: present
+ src: "{{ k8s_job__dir_path }}/s3-keys-{{ run_type }}.yaml"
+ kubeconfig: "{{ global_local_path }}/{{ kube_conf_file }}"
+ when: use_s3 | bool
+
+ - name: start healthcheck job
+ community.kubernetes.k8s:
+ state: present
+ src: "{{ k8s_job__dir_path }}/healthcheck-{{ run_type }}.yaml"
+ kubeconfig: "{{ global_local_path }}/{{ kube_conf_file }}"
+
+ - name: wait for end of job
+ community.kubernetes.k8s_info:
+ kind: Job
+ name: "integration-onap-{{ run_type }}"
+ namespace: "{{ onap_namespace }}"
+ register: job_info
+ until: (job_info.resources[0].status.succeeded is defined and
+ job_info.resources[0].status.succeeded == 1) or
+ (job_info.resources[0].status.failed is defined and
+ job_info.resources[0].status.failed >= 1)
+ retries: "{{ run_timeout }}"
+ delay: 1
+
+ - name: job has failed
+ ansible.builtin.fail:
+ msg: "The job has failed"
+ when: job_info.resources[0].status.failed is defined and
+ job_info.resources[0].status.failed >= 1
+
+ always:
+ - name: "save healthcheck {{ run_type }} results for artifacts"
+ ansible.posix.synchronize:
+ src: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+ use_ssh_args: true
+ dest: "./results/{{ run_tiers }}"
+ mode: pull
+ rsync_opts:
+ - "--exclude=output.xml"
+ ignore_errors: True
+ when: not use_s3 | bool
+
+ - name: remove secret file
+ ansible.builtin.file:
+ path: "{{ k8s_job__dir_path }}/s3-keys-{{ run_type }}.yaml"
+ state: absent
diff --git a/roles/xtesting-healthcheck/tasks/main.yaml b/roles/xtesting-healthcheck/tasks/main.yaml
new file mode 100644
index 0000000..5fb373c
--- /dev/null
+++ b/roles/xtesting-healthcheck/tasks/main.yaml
@@ -0,0 +1,5 @@
+---
+# tasks file for ONAP healthcheck
+
+- import_tasks: prepare.yaml
+- import_tasks: launch.yaml
diff --git a/roles/xtesting-healthcheck/tasks/prepare.yaml b/roles/xtesting-healthcheck/tasks/prepare.yaml
new file mode 100644
index 0000000..712916e
--- /dev/null
+++ b/roles/xtesting-healthcheck/tasks/prepare.yaml
@@ -0,0 +1,52 @@
+---
+- name: create directories
+ ansible.builtin.file:
+ path: "{{ item }}"
+ state: directory
+ mode: 0755
+ loop:
+ - "{{ exec_local_path }}/"
+ - "{{ ansible_user_dir }}/oom/{{ onap_version }}/{{ run_tiers }}/"
+
+- name: create directories as root
+ become: yes
+ ansible.builtin.file:
+ path: "{{ item }}"
+ state: directory
+ mode: 0755
+ loop:
+ - "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+
+- name: Delete healthcheck job
+ community.kubernetes.k8s:
+ state: absent
+ api: batch/v1
+ kind: Job
+ namespace: onap
+ name: "integration-onap-{{ run_type }}"
+ kubeconfig: "{{ global_local_path }}/{{ kube_conf_file }}"
+
+- name: Delete old logs
+ become: yes
+ ansible.builtin.file:
+ state: absent
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+
+- name: Override docker version for CPS (python3 migration)
+ ansible.builtin.set_fact:
+ testing_container:
+ "nexus3.onap.org:10003/onap/xtesting-smoke-usecases-robot-py3"
+ when: (run_type == "cps-healthcheck") or
+ (run_type == "cps-temporal-healthcheck") or
+ (run_type == "cps-dmi-plugin-healthcheck")
+
+- name: save healthcheck deployment to file
+ ansible.builtin.copy:
+ content: "{{ healthcheck_deployment | to_nice_yaml }}"
+ dest: "{{ k8s_job__dir_path }}/healthcheck-{{ run_type }}.yaml"
+
+- name: save secret for S3 to file
+ ansible.builtin.copy:
+ content: "{{ healthcheck_secret | to_nice_yaml }}"
+ dest: "{{ k8s_job__dir_path }}/s3-keys-{{ run_type }}.yaml"
+ when: use_s3 | bool
diff --git a/roles/xtesting-jumphost/tasks/main.yaml b/roles/xtesting-jumphost/tasks/main.yaml
new file mode 100644
index 0000000..0fa5e01
--- /dev/null
+++ b/roles/xtesting-jumphost/tasks/main.yaml
@@ -0,0 +1,101 @@
+---
+# tasks file for roles/prepare_test
+
+##
+# Install deps and docker
+##
+- name: Install needed packages
+ become: "yes"
+ ansible.builtin.apt:
+ name: "{{ apt_packages }}"
+ state: present
+ update_cache: yes
+
+- name: install pip dependencies
+ become: "yes"
+ ansible.builtin.pip:
+ name: "{{ item }}"
+ state: latest
+ loop: "{{ pip_packages }}"
+
+- name: retrieve linux type
+ ansible.builtin.set_fact:
+ distri: "{{ ansible_distribution.lower() }}"
+
+- name: retrieve docker apt key
+ ansible.builtin.apt_key:
+ url: "https://download.docker.com/linux/{{ distri }}/gpg"
+ id: 0EBFCD88
+ state: present
+
+- name: retrieve linux release
+ ansible.builtin.set_fact:
+ release: "{{ ansible_distribution_release.lower() }}"
+
+- name: add docker repo
+ ansible.builtin.apt_repository:
+ repo: >
+ deb [arch=amd64] https://download.docker.com/linux/{{ distri }}
+ {{ release }} stable
+ state: present
+
+- name: Wait for automatic system updates
+ become: yes
+ shell:
+ "if [ -e /bin/fuser ]; then while sudo fuser /var/lib/dpkg/lock >/dev/null \
+ 2>&1; do sleep 1; done; fi;"
+ changed_when: false
+
+- name: install docker
+ become: yes
+ ansible.builtin.apt:
+ update_cache: "yes"
+ name: "docker-ce"
+ state: present
+
+- name: add login user to docker group
+ become: true
+ ansible.builtin.user:
+ name: "{{ ansible_user }}"
+ groups: docker
+ append: yes
+
+- name: create xtesting directory
+ ansible.builtin.file:
+ path: "{{ global_local_path }}"
+ state: directory
+ mode: 0755
+
+- name: copy description file
+ ansible.builtin.copy:
+ src: "vars/kube-config"
+ dest: "{{ global_local_path }}/{{ kube_conf_file }}"
+ mode: 0644
+
+##
+# Setup containers
+##
+- name: remove container
+ community.general.docker_container:
+ name: "{{ docker_base_name }}*"
+ state: absent
+
+##
+# Setup S3
+# This is done as workaround because xtesting cannot handle well push of
+# thousands of file for now.
+# checksum: sha256:https://dl.min.io/client/mc/release/linux-amd64/mc.sha256sum
+# doesn't work for now
+##
+- name: "retrieve mc (minio client)"
+ become: "yes"
+ ansible.builtin.get_url:
+ url: https://dl.min.io/client/mc/release/linux-amd64/mc
+ dest: /usr/local/bin/mc
+ mode: "0777"
+ when: use_s3 | bool
+
+- name: "set s3 alias"
+ command: "mc alias set s3 {{ s3_endpoint }} {{ s3_access_key }} {{
+ s3_secret_key }}"
+ when: use_s3 | bool \ No newline at end of file
diff --git a/roles/xtesting-onap-security/tasks/main.yaml b/roles/xtesting-onap-security/tasks/main.yaml
new file mode 100644
index 0000000..8148010
--- /dev/null
+++ b/roles/xtesting-onap-security/tasks/main.yaml
@@ -0,0 +1,88 @@
+---
+# tasks file for functest (tests)
+
+##
+- block:
+ - name: Create directory
+ ansible.builtin.file:
+ path: "{{ exec_local_path }}"
+ state: directory
+ mode: 0755
+
+ - name: Create env file
+ ansible.builtin.template:
+ src: env-os.j2
+ dest: "{{ exec_local_path }}/env"
+ mode: "0644"
+
+ - name: Delete old logs
+ become: yes
+ ansible.builtin.file:
+ state: absent
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+
+ - name: "clean {{ docker_onap_security_name }} docker"
+ community.general.docker_container:
+ name: "{{ docker_onap_security_name }}-{{ run_type }}"
+ state: absent
+ force_kill: yes
+
+ - name: generate pre command to run
+ ansible.builtin.set_fact:
+ command: "mkdir -p /var/lib/xtesting/results/{{ run_type }}"
+
+ - name: generate command to run
+ ansible.builtin.set_fact:
+ command: "{{ command }} && run_tests --test {{ run_type }} --report"
+
+ - name: add S3 upload to command
+ ansible.builtin.set_fact:
+ command: "{{ command }} --push"
+ when: use_s3 | bool
+
+ - name: "launch {{ docker_onap_security_name }} docker"
+ community.general.docker_container:
+ container_default_behavior: no_defaults
+ name: "{{ docker_onap_security_name }}-{{ run_type }}"
+ image: "{{ docker_onap_security_image }}:{{ docker_onap_security_version }}"
+ env_file: "{{ exec_local_path }}/env"
+ state: started
+ command: "/bin/bash -c '{{ command }}'"
+ recreate: yes
+ pid_mode: host
+ volumes: "{{ volumes_security }}"
+ detach: true
+ pull: yes
+ keep_volumes: no
+
+ - name: wait for test docker to be finished
+ community.docker.docker_container_info:
+ name: "{{ docker_onap_security_name }}-{{ run_type }}"
+ register: container_info
+ until: container_info.container.State.Status == "exited"
+ retries: "{{ run_timeout }}"
+ delay: 1
+
+ - name: "{{ docker_onap_security_name }} has failed"
+ ansible.builtin.fail:
+ msg: "The test {{ docker_onap_security_name }} has failed"
+ when: container_info.container.State.ExitCode != 0
+ always:
+ - name: retrieve container logs
+ shell: "docker logs {{ docker_onap_security_name }}-{{ run_type }}"
+ register: container_logs
+ ignore_errors: True
+
+ - name: display container logs
+ debug:
+ msg: "{{ container_logs.stdout }}"
+ ignore_errors: True
+
+ - name: "save {{ docker_onap_security_name }} results for artifacts"
+ ansible.posix.synchronize:
+ src: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+ dest: "./results/{{ run_tiers }}"
+ use_ssh_args: true
+ mode: pull
+ ignore_errors: True
+ when: not use_s3 | bool
diff --git a/roles/xtesting-onap-security/templates/env-os.j2 b/roles/xtesting-onap-security/templates/env-os.j2
new file mode 100644
index 0000000..06a31a1
--- /dev/null
+++ b/roles/xtesting-onap-security/templates/env-os.j2
@@ -0,0 +1,11 @@
+INSTALLER_TYPE={{ deployment_name }}
+TEST_DB_URL={{ test_result_url }}
+NODE_NAME={{ node_name }}
+BUILD_TAG={{ build_tag }}
+{% if use_s3 | bool %}
+S3_ENDPOINT_URL={{ s3_endpoint }}
+S3_DST_URL={{ s3_dst }}
+HTTP_DST_URL={{ s3_http_url_endpoint }}
+AWS_ACCESS_KEY_ID={{ s3_access_key }}
+AWS_SECRET_ACCESS_KEY={{ s3_secret_key }}
+{% endif %} \ No newline at end of file
diff --git a/roles/xtesting-onap-vnf/tasks/launch.yaml b/roles/xtesting-onap-vnf/tasks/launch.yaml
new file mode 100644
index 0000000..4220647
--- /dev/null
+++ b/roles/xtesting-onap-vnf/tasks/launch.yaml
@@ -0,0 +1,75 @@
+---
+# tasks file for functest (tests)
+
+##
+- block:
+ - name: generate pre command to run
+ ansible.builtin.set_fact:
+ command: "mkdir -p /var/lib/xtesting/results/{{ run_type }}"
+
+ - name: generate command to run
+ ansible.builtin.set_fact:
+ command: "{{ command }} && run_tests --test {{ run_type }} --report"
+
+ - name: add S3 upload to command
+ ansible.builtin.set_fact:
+ command: "{{ command }} --push"
+ when: use_s3 | bool
+
+ - name: generate a random number between 0 and 600
+ ansible.builtin.set_fact:
+ before_launch_wait_time: "{{ 600 | random }}"
+ when: random_wait and before_launch_wait_time is not defined
+
+ - name: "wait {{ before_launch_wait_time }}s in order to allow 'sequential' tests"
+ run_once: yes
+ wait_for:
+ timeout: "{{ before_launch_wait_time }}"
+ delegate_to: localhost
+ when: random_wait
+
+ - name: "launch onap-vnf docker for {{ run_type }}"
+ community.general.docker_container:
+ container_default_behavior: no_defaults
+ name: "{{ docker_vnf_name }}-{{ run_type }}"
+ image: "{{ docker_vnf_image }}:{{ docker_vnf_version }}"
+ env_file: "{{ exec_local_path }}/env"
+ state: started
+ command: "/bin/bash -c '{{ command }}'"
+ pull: yes
+ recreate: yes
+ volumes: "{{ volumes }}"
+ etc_hosts: "{{ etc_hosts }}"
+ detach: yes
+ keep_volumes: no
+
+ - name: wait for test docker to be finished
+ community.docker.docker_container_info:
+ name: "{{ docker_vnf_name }}-{{ run_type }}"
+ register: container_info
+ until: container_info.container.State.Status == "exited"
+ retries: "{{ run_timeout }}"
+ delay: 1
+
+ - name: "{{ run_type }} has failed"
+ ansible.builtin.fail:
+ msg: "The test {{ run_type }} has failed"
+ when: container_info.container.State.ExitCode != 0
+ always:
+ - name: retrieve container logs
+ shell: "docker logs {{ docker_vnf_name }}-{{ run_type }}"
+ register: container_logs
+ ignore_errors: True
+
+ - name: display container logs
+ debug:
+ msg: "{{ container_logs.stdout }}"
+ ignore_errors: True
+ - name: "save VNF results for artifacts"
+ ansible.posix.synchronize:
+ src: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+ dest: "./results/{{ run_tiers }}"
+ mode: pull
+ use_ssh_args: true
+ ignore_errors: True
+ when: not use_s3 | bool
diff --git a/roles/xtesting-onap-vnf/tasks/main.yaml b/roles/xtesting-onap-vnf/tasks/main.yaml
new file mode 100644
index 0000000..db10573
--- /dev/null
+++ b/roles/xtesting-onap-vnf/tasks/main.yaml
@@ -0,0 +1,5 @@
+---
+- import_tasks: prepare_cnf_test.yaml
+ when: run_type == "basic_cnf"
+- import_tasks: prepare.yaml
+- import_tasks: launch.yaml
diff --git a/roles/xtesting-onap-vnf/tasks/prepare.yaml b/roles/xtesting-onap-vnf/tasks/prepare.yaml
new file mode 100644
index 0000000..addab8c
--- /dev/null
+++ b/roles/xtesting-onap-vnf/tasks/prepare.yaml
@@ -0,0 +1,57 @@
+---
+# tasks file for roles/prepare_test
+
+##
+# Prepare config
+##
+- name: Clean directory
+ ansible.builtin.file:
+ path: "{{ exec_local_path }}"
+ state: absent
+
+- name: Create directory
+ ansible.builtin.file:
+ path: "{{ exec_local_path }}/vnf-services"
+ state: directory
+ mode: 0755
+
+- name: Create env file
+ ansible.builtin.template:
+ src: env-os.j2
+ dest: "{{ exec_local_path }}/env"
+ mode: "0644"
+
+- name: copy pythonsdk-tests configuration file
+ ansible.builtin.template:
+ src: settings.py.j2
+ dest: "{{ exec_local_path }}/settings.py"
+ mode: "0644"
+
+- name: create basic_vm configuration file
+ ansible.builtin.template:
+ src: basic_vm-service-{{ onap_version }}.yaml.j2
+ dest: "{{ exec_local_path }}/basic_vm-service.yaml"
+ mode: "0644"
+
+- name: create basic_vm_macro configuration file
+ ansible.builtin.template:
+ src: basic_vm_macro-service-{{ onap_version }}.yaml.j2
+ dest: "{{ exec_local_path }}/basic_vm_macro-service.yaml"
+ mode: "0644"
+
+- name: create directories as root
+ become: yes
+ ansible.builtin.file:
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+ state: directory
+ mode: 0755
+- name: Delete old logs
+ become: yes
+ ansible.builtin.file:
+ state: absent
+ path: "{{ res_local_path }}/{{ run_tiers }}/{{ run_type }}"
+
+- name: clean onap-vnf docker
+ community.general.docker_container:
+ name: "{{ docker_vnf_name }}-{{ run_type }}"
+ state: absent
diff --git a/roles/xtesting-onap-vnf/tasks/prepare_cnf_test.yaml b/roles/xtesting-onap-vnf/tasks/prepare_cnf_test.yaml
new file mode 100644
index 0000000..47ee366
--- /dev/null
+++ b/roles/xtesting-onap-vnf/tasks/prepare_cnf_test.yaml
@@ -0,0 +1,86 @@
+---
+- name: Check that cloud-site service does not exist
+ community.kubernetes.k8s_service:
+ name: cloud-site
+ namespace: onap
+ state: absent
+
+- name: Expose nodePort with created cloud-site service
+ shell: kubectl -n onap expose svc so-catalog-db-adapter --name cloud-site --type=NodePort
+
+- name: Get nodePort for cloud-site service
+ community.kubernetes.k8s_info:
+ kind: service
+ namespace: onap
+ name: cloud-site
+ register: output
+
+- name: set variable for api port
+ ansible.builtin.set_fact:
+ cloudsite_port: "{{ output.resources[0].spec.ports[0].nodePort }}"
+
+- name: Get DEFAULT Cloud Site
+ ansible.builtin.uri:
+ url: "{{ so_url }}:{{ cloudsite_port }}/\
+ cloudSite/DEFAULT"
+ method: GET
+ status_code: 200
+ body_format: json
+ validate_certs: "no"
+ return_content: "yes"
+ headers:
+ authorization: Basic YnBlbDpwYXNzd29yZDEk
+ Accept: application/json
+ Cache-Control: no-cache
+ register: output
+
+- name: Add cloud site CloudRegion
+ ansible.builtin.uri:
+ url: "{{ so_url }}:{{ cloudsite_port }}/\
+ cloudSite/{{ k8sRegionID }}"
+ method: PUT
+ status_code:
+ - 200
+ - 201
+ body_format: json
+ validate_certs: "no"
+ return_content: "yes"
+ headers:
+ authorization: Basic YnBlbDpwYXNzd29yZDEk
+ Accept: application/json
+ Cache-Control: no-cache
+ body:
+ id: "{{ k8sRegionID }}"
+ region_id: "{{ k8sRegionID }}"
+ aic_version: 2.5
+ clli: "{{ datacenter_id }}"
+ orchestrator: multicloud
+ identity_service_id: DEFAULT_KEYSTONE
+ identityService: "{{ output.json.identityService }}"
+
+- name: Remove created cloud-site service with NodePort
+ community.kubernetes.k8s_service:
+ name: cloud-site
+ namespace: onap
+ state: absent
+
+- name: delete cnf namespace
+ community.kubernetes.k8s:
+ state: absent
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ cnf_namespace }}"
+
+- name: wait for cnf namespace full deletion
+ shell: |
+ set -o pipefail && kubectl get namespace |
+ grep -c {{ cnf_namespace }} || true
+ args:
+ executable: /bin/bash
+ register: kube
+ changed_when: kube.stdout == '0'
+ until: kube.stdout == '0'
+ retries: 600
+ delay: 1
diff --git a/roles/xtesting-onap-vnf/templates/basic_vm-service-istanbul.yaml.j2 b/roles/xtesting-onap-vnf/templates/basic_vm-service-istanbul.yaml.j2
new file mode 100644
index 0000000..53ffc91
--- /dev/null
+++ b/roles/xtesting-onap-vnf/templates/basic_vm-service-istanbul.yaml.j2
@@ -0,0 +1,2 @@
+{% extends "basic_vm-service-master.yaml.j2" %}
+{% block heat_file_path %}onaptests/templates/heat-files/ubuntu20/ubuntu20agent.zip{% endblock %} \ No newline at end of file
diff --git a/roles/xtesting-onap-vnf/templates/basic_vm-service-jakarta.yaml.j2 b/roles/xtesting-onap-vnf/templates/basic_vm-service-jakarta.yaml.j2
new file mode 100644
index 0000000..d0ca6f2
--- /dev/null
+++ b/roles/xtesting-onap-vnf/templates/basic_vm-service-jakarta.yaml.j2
@@ -0,0 +1,40 @@
+---
+basic_vm:
+ vnfs:
+ - vnf_name: basic_vm
+ heat_files_to_upload: {% block heat_file_path %}templates/heat-files/ubuntu20/ubuntu20agent.zip{% endblock %}
+
+ vnf_parameters: [
+ {"name": "ubuntu20_image_name",
+ "value": "Ubuntu_2004"
+ },
+ {"name": "ubuntu20_key_name",
+ "value": "onap_dt"
+ },
+ {"name": "ubuntu20_pub_key",
+ "value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAA\
+BAQDY15cdBmIs2XOpe4EiFCsaY6bmUmK/GysMoLl4UG51JCfJwvwoWCoA+6mDIbymZxhxq9IGx\
+ilp/yTA6WQ9s/5pBag1cUMJmFuda9PjOkXl04jgqh5tR6I+GZ97AvCg93KAECis5ubSqw1xOCj4\
+utfEUtPoF1OuzqM/lE5mY4N6VKXn+fT7pCD6cifBEs6JHhVNvs5OLLp/tO8Pa3kKYQOdyS0xc3r\
+h+t2lrzvKUSWGZbX+dLiFiEpjsUL3tDqzkEMNUn4pdv69OJuzWHCxRWPfdrY9Wg0j3mJesP29EBh\
+t+w+EC9/kBKq+1VKdmsXUXAcjEvjovVL8l1BrX3BY0R8D imported-openssh-key"
+ },
+ {"name": "ubuntu20_flavor_name",
+ "value": "m1.small"
+ },
+ {"name": "VM_name",
+ "value": "ubuntu20agent-VM-01"
+ },
+ {"name": "vnf_id",
+ "value": "ubuntu20agent-VNF-instance"
+ },
+ {"name": "vf_module_id",
+ "value": "ubuntu20agent-vfmodule-instance"
+ },
+ {"name": "vnf_name",
+ "value": "ubuntu20agent-VNF"
+ },
+ {"name": "admin_plane_net_name",
+ "value": "{{ openstack_public_net_id }}"
+ }
+ ]
diff --git a/roles/xtesting-onap-vnf/templates/basic_vm-service-master.yaml.j2 b/roles/xtesting-onap-vnf/templates/basic_vm-service-master.yaml.j2
new file mode 100644
index 0000000..d0ca6f2
--- /dev/null
+++ b/roles/xtesting-onap-vnf/templates/basic_vm-service-master.yaml.j2
@@ -0,0 +1,40 @@
+---
+basic_vm:
+ vnfs:
+ - vnf_name: basic_vm
+ heat_files_to_upload: {% block heat_file_path %}templates/heat-files/ubuntu20/ubuntu20agent.zip{% endblock %}
+
+ vnf_parameters: [
+ {"name": "ubuntu20_image_name",
+ "value": "Ubuntu_2004"
+ },
+ {"name": "ubuntu20_key_name",
+ "value": "onap_dt"
+ },
+ {"name": "ubuntu20_pub_key",
+ "value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAA\
+BAQDY15cdBmIs2XOpe4EiFCsaY6bmUmK/GysMoLl4UG51JCfJwvwoWCoA+6mDIbymZxhxq9IGx\
+ilp/yTA6WQ9s/5pBag1cUMJmFuda9PjOkXl04jgqh5tR6I+GZ97AvCg93KAECis5ubSqw1xOCj4\
+utfEUtPoF1OuzqM/lE5mY4N6VKXn+fT7pCD6cifBEs6JHhVNvs5OLLp/tO8Pa3kKYQOdyS0xc3r\
+h+t2lrzvKUSWGZbX+dLiFiEpjsUL3tDqzkEMNUn4pdv69OJuzWHCxRWPfdrY9Wg0j3mJesP29EBh\
+t+w+EC9/kBKq+1VKdmsXUXAcjEvjovVL8l1BrX3BY0R8D imported-openssh-key"
+ },
+ {"name": "ubuntu20_flavor_name",
+ "value": "m1.small"
+ },
+ {"name": "VM_name",
+ "value": "ubuntu20agent-VM-01"
+ },
+ {"name": "vnf_id",
+ "value": "ubuntu20agent-VNF-instance"
+ },
+ {"name": "vf_module_id",
+ "value": "ubuntu20agent-vfmodule-instance"
+ },
+ {"name": "vnf_name",
+ "value": "ubuntu20agent-VNF"
+ },
+ {"name": "admin_plane_net_name",
+ "value": "{{ openstack_public_net_id }}"
+ }
+ ]
diff --git a/roles/xtesting-onap-vnf/templates/basic_vm_macro-service-istanbul.yaml.j2 b/roles/xtesting-onap-vnf/templates/basic_vm_macro-service-istanbul.yaml.j2
new file mode 100644
index 0000000..ada0289
--- /dev/null
+++ b/roles/xtesting-onap-vnf/templates/basic_vm_macro-service-istanbul.yaml.j2
@@ -0,0 +1,2 @@
+{% extends "basic_vm_macro-service-master.yaml.j2" %}
+{% block heat_file_path %}onaptests/templates/heat-files/ubuntu20/ubuntu20agent.zip{% endblock %} \ No newline at end of file
diff --git a/roles/xtesting-onap-vnf/templates/basic_vm_macro-service-jakarta.yaml.j2 b/roles/xtesting-onap-vnf/templates/basic_vm_macro-service-jakarta.yaml.j2
new file mode 100644
index 0000000..c35ca34
--- /dev/null
+++ b/roles/xtesting-onap-vnf/templates/basic_vm_macro-service-jakarta.yaml.j2
@@ -0,0 +1,55 @@
+---
+basic_vm_macro:
+ instantiation_type: "Macro"
+ vnfs:
+ - vnf_name: basic_vm_macro
+ properties:
+ controller_actor: "CDS"
+ skip_post_instantiation_configuration: False
+ sdnc_artifact_name: "vnf"
+ sdnc_model_version: "1.0.0"
+ sdnc_model_name: "ubuntu20"
+ vnf_artifact_type: "CONTROLLER_BLUEPRINT_ARCHIVE"
+ vnf_artifact_name: "BASIC_VM_enriched.zip"
+ vnf_artifact_label: "vnfcds"
+ vnf_artifact_file_path: "/tmp/BASIC_VM_enriched.zip"
+ heat_files_to_upload: {% block heat_file_path %}templates/heat-files/ubuntu20/ubuntu20agent.zip{% endblock %}
+
+ vf_module_parameters:
+ - vf_module_name: base_ubuntu20
+ parameters: [
+ {"name": "ubuntu20_image_name",
+ "value": "Ubuntu_2004"
+ },
+ {"name": "ubuntu20_key_name",
+ "value": "onap_dt"
+ },
+ {"name": "ubuntu20_pub_key",
+ "value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAA\
+ BAQDY15cdBmIs2XOpe4EiFCsaY6bmUmK/GysMoLl4UG51JCfJwvwoWCoA+6mDIbymZxhxq9IGx\
+ ilp/yTA6WQ9s/5pBag1cUMJmFuda9PjOkXl04jgqh5tR6I+GZ97AvCg93KAECis5ubSqw1xOCj4\
+ utfEUtPoF1OuzqM/lE5mY4N6VKXn+fT7pCD6cifBEs6JHhVNvs5OLLp/tO8Pa3kKYQOdyS0xc3r\
+ h+t2lrzvKUSWGZbX+dLiFiEpjsUL3tDqzkEMNUn4pdv69OJuzWHCxRWPfdrY9Wg0j3mJesP29EBh\
+ t+w+EC9/kBKq+1VKdmsXUXAcjEvjovVL8l1BrX3BY0R8D imported-openssh-key"
+ },
+ {"name": "ubuntu20_flavor_name",
+ "value": "m1.small"
+ },
+ {"name": "VM_name",
+ "value": "basic_vm_macro-VM-01"
+ },
+ {"name": "vnf_id",
+ "value": "basic_vm_macro-VNF-instance"
+ },
+ {"name": "vf_module_id",
+ "value": "basic_vm_macro-vfmodule-instance"
+ },
+ {"name": "vnf_name",
+ "value": "basic_vm_macro-VNF"
+ },
+ {"name": "admin_plane_net_name",
+ "value": "{{ openstack_public_net_id }}"
+ },
+ {"name": "ubuntu20_name_0",
+ "value": "ubuntu20agent-VNF"}
+ ]
diff --git a/roles/xtesting-onap-vnf/templates/basic_vm_macro-service-master.yaml.j2 b/roles/xtesting-onap-vnf/templates/basic_vm_macro-service-master.yaml.j2
new file mode 100644
index 0000000..c35ca34
--- /dev/null
+++ b/roles/xtesting-onap-vnf/templates/basic_vm_macro-service-master.yaml.j2
@@ -0,0 +1,55 @@
+---
+basic_vm_macro:
+ instantiation_type: "Macro"
+ vnfs:
+ - vnf_name: basic_vm_macro
+ properties:
+ controller_actor: "CDS"
+ skip_post_instantiation_configuration: False
+ sdnc_artifact_name: "vnf"
+ sdnc_model_version: "1.0.0"
+ sdnc_model_name: "ubuntu20"
+ vnf_artifact_type: "CONTROLLER_BLUEPRINT_ARCHIVE"
+ vnf_artifact_name: "BASIC_VM_enriched.zip"
+ vnf_artifact_label: "vnfcds"
+ vnf_artifact_file_path: "/tmp/BASIC_VM_enriched.zip"
+ heat_files_to_upload: {% block heat_file_path %}templates/heat-files/ubuntu20/ubuntu20agent.zip{% endblock %}
+
+ vf_module_parameters:
+ - vf_module_name: base_ubuntu20
+ parameters: [
+ {"name": "ubuntu20_image_name",
+ "value": "Ubuntu_2004"
+ },
+ {"name": "ubuntu20_key_name",
+ "value": "onap_dt"
+ },
+ {"name": "ubuntu20_pub_key",
+ "value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAA\
+ BAQDY15cdBmIs2XOpe4EiFCsaY6bmUmK/GysMoLl4UG51JCfJwvwoWCoA+6mDIbymZxhxq9IGx\
+ ilp/yTA6WQ9s/5pBag1cUMJmFuda9PjOkXl04jgqh5tR6I+GZ97AvCg93KAECis5ubSqw1xOCj4\
+ utfEUtPoF1OuzqM/lE5mY4N6VKXn+fT7pCD6cifBEs6JHhVNvs5OLLp/tO8Pa3kKYQOdyS0xc3r\
+ h+t2lrzvKUSWGZbX+dLiFiEpjsUL3tDqzkEMNUn4pdv69OJuzWHCxRWPfdrY9Wg0j3mJesP29EBh\
+ t+w+EC9/kBKq+1VKdmsXUXAcjEvjovVL8l1BrX3BY0R8D imported-openssh-key"
+ },
+ {"name": "ubuntu20_flavor_name",
+ "value": "m1.small"
+ },
+ {"name": "VM_name",
+ "value": "basic_vm_macro-VM-01"
+ },
+ {"name": "vnf_id",
+ "value": "basic_vm_macro-VNF-instance"
+ },
+ {"name": "vf_module_id",
+ "value": "basic_vm_macro-vfmodule-instance"
+ },
+ {"name": "vnf_name",
+ "value": "basic_vm_macro-VNF"
+ },
+ {"name": "admin_plane_net_name",
+ "value": "{{ openstack_public_net_id }}"
+ },
+ {"name": "ubuntu20_name_0",
+ "value": "ubuntu20agent-VNF"}
+ ]
diff --git a/roles/xtesting-onap-vnf/templates/clearwater-ims-service.yaml.j2 b/roles/xtesting-onap-vnf/templates/clearwater-ims-service.yaml.j2
new file mode 100644
index 0000000..bf950a7
--- /dev/null
+++ b/roles/xtesting-onap-vnf/templates/clearwater-ims-service.yaml.j2
@@ -0,0 +1,55 @@
+---
+ims:
+ tosca_file_from_SDC: service-Ims-template
+ version: "1.0"
+ subscription_type: "ims"
+ vnfs:
+ - vnf_name: Clearwater_vnf
+ heat_files_to_upload: onaptests/templates/heat-files/clearwater_ims/clearwater_ims.zip
+ vnf_parameters: [
+ {"vnf-parameter-name":"public_net_id",
+ "vnf-parameter-value":"{{ openstack_public_net_id }}"},
+ {"vnf-parameter-name":"bono_image_name",
+ "vnf-parameter-value":"Ubuntu_1404"},
+ {"vnf-parameter-name":"bono_flavor_name",
+ "vnf-parameter-value":"m1.medium"},
+ {"vnf-parameter-name":"dime_image_name",
+ "vnf-parameter-value":"Ubuntu_1404"},
+ {"vnf-parameter-name":"dime_flavor_name",
+ "vnf-parameter-value":"m1.medium"},
+ {"vnf-parameter-name":"dns_image_name",
+ "vnf-parameter-value":"Ubuntu_1404"},
+ {"vnf-parameter-name":"dns_flavor_name",
+ "vnf-parameter-value":"m1.medium"},
+ {"vnf-parameter-name":"ellis_image_name",
+ "vnf-parameter-value":"Ubuntu_1404"},
+ {"vnf-parameter-name":"ellis_flavor_name",
+ "vnf-parameter-value":"m1.medium"},
+ {"vnf-parameter-name":"homer_image_name",
+ "vnf-parameter-value":"Ubuntu_1404"},
+ {"vnf-parameter-name":"homer_flavor_name",
+ "vnf-parameter-value":"m1.medium"},
+ {"vnf-parameter-name":"robot_image_name",
+ "vnf-parameter-value":"Ubuntu_1404"},
+ {"vnf-parameter-name":"robot_flavor_name",
+ "vnf-parameter-value":"m1.medium"},
+ {"vnf-parameter-name":"sprout_image_name",
+ "vnf-parameter-value":"Ubuntu_1404"},
+ {"vnf-parameter-name":"sprout_flavor_name",
+ "vnf-parameter-value":"m1.medium"},
+ {"vnf-parameter-name":"vellum_image_name",
+ "vnf-parameter-value":"Ubuntu_1404"},
+ {"vnf-parameter-name":"vellum_flavor_name",
+ "vnf-parameter-value":"m1.medium"},
+ {"vnf-parameter-name":"dns_ip",
+ "vnf-parameter-value":"8.8.8.8"},
+ {"vnf-parameter-name": "clearwater_pub_key",
+ "vnf-parameter-value":"ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAr0If\
+62QHgf/xKzomkwBD9c1ol7edWpyG5+p9UBRE0D/bJcA5lyRpaYlcjxp3pfnN+WiVYfzjwHDjeDlyAO\
+pH2o3yrBCA9U+sU6PjhIH/BXFVkVQBY4xAmtjQnN3QCzjn8BA6PbaEt53OvvaYqtgg0yc5OOA0nyDl\
+cg/FU88I1MnhZvjTU90V4QEYKCMAyYcz6NdjGfC7PmpKIVmSWgHOdR59d5CGIRv6BnIWIBqXy+z+el\
+dbSKBrRVo/dv/H8Q0uwS6/rA2gLXeXhsQmEFZXsxwovypWm6t5hWMK/4cXA88AwylMd9xSroic398R\
+ZiO3QygZ9L7aQN2rnuAQHHOOaw=="
+ },
+ {"vnf-parameter-name":"clearwater_key_name",
+ "vnf-parameter-value":"onap_key"}]
diff --git a/roles/xtesting-onap-vnf/templates/env-os.j2 b/roles/xtesting-onap-vnf/templates/env-os.j2
new file mode 100644
index 0000000..5bbcdd4
--- /dev/null
+++ b/roles/xtesting-onap-vnf/templates/env-os.j2
@@ -0,0 +1,14 @@
+INSTALLER_TYPE={{ deployment_name }}
+TEST_DB_URL={{ test_result_url }}
+NODE_NAME={{ node_name }}
+BUILD_TAG={{ build_tag }}
+DEBUG=False
+OS_TEST_CLOUD={{ os_test_cloud }}
+ONAP_PYTHON_SDK_SETTINGS={{ vnf_settings }}
+{% if use_s3 | bool %}
+S3_ENDPOINT_URL={{ s3_endpoint }}
+S3_DST_URL={{ s3_dst }}
+HTTP_DST_URL={{ s3_http_url_endpoint }}
+AWS_ACCESS_KEY_ID={{ s3_access_key }}
+AWS_SECRET_ACCESS_KEY={{ s3_secret_key }}
+{% endif %}
diff --git a/roles/xtesting-onap-vnf/templates/settings.py.j2 b/roles/xtesting-onap-vnf/templates/settings.py.j2
new file mode 100644
index 0000000..c5eaba3
--- /dev/null
+++ b/roles/xtesting-onap-vnf/templates/settings.py.j2
@@ -0,0 +1,63 @@
+"""Specific settings module.""" # pylint: disable=bad-whitespace
+
+######################
+# #
+# ONAP INPUTS DATAS #
+# #
+######################
+
+
+# Variables to set logger information
+# Possible values for logging levels in onapsdk: INFO, DEBUG , WARNING, ERROR
+LOG_CONFIG = {
+ "version": 1,
+ "disable_existing_loggers": False,
+ "formatters": {
+ "default": {
+ "class": "logging.Formatter",
+ "format": "%(asctime)s %(levelname)s %(lineno)d:%(filename)s(%(process)d) - %(message)s"
+ }
+ },
+ "handlers": {
+ "console": {
+ "level": "INFO",
+ "class": "logging.StreamHandler",
+ "formatter": "default"
+ },
+ "file": {
+ "level": "DEBUG",
+ "class": "logging.FileHandler",
+ "formatter": "default",
+ "filename": "/var/lib/xtesting/results/{{ run_type }}/pythonsdk.debug.log",
+ "mode": "w"
+ }
+ },
+ "root": {
+ "level": "DEBUG",
+ "handlers": ["console", "file"]
+ }
+}
+CLEANUP_FLAG = False
+
+# SOCK_HTTP = "socks5h://127.0.0.1:8080"
+REPORTING_FILE_PATH = "/var/lib/xtesting/results/{{ run_type }}/reporting.html"
+K8S_REGION_TYPE = "k8s"
+TILLER_HOST = "localhost"
+K8S_CONFIG = None # None means it will use default config (~/.kube/config)
+K8S_NAMESPACE = "onap" # Kubernetes namespace
+K8S_ONAP_NAMESPACE = "onap" # ONAP Kubernetes namespace
+K8S_ADDITIONAL_RESOURCES_NAMESPACE = K8S_ONAP_NAMESPACE # Resources created on tests namespac
+ORCHESTRATION_REQUEST_TIMEOUT = 60.0 * 30 # 30 minutes in seconds
+{% if use_ingress | bool %}
+AAI_URL = "https://aai-api.simpledemo.onap.org"
+CDS_URL = "https://cds-blueprintsprocessor-api.simpledemo.onap.org"
+MSB_URL = "https://msb-iag-ui.simpledemo.onap.org"
+SDC_BE_URL = "https://sdc-be-api.simpledemo.onap.org"
+SDC_FE_URL = "https://sdc-fe-ui.simpledemo.onap.org"
+SDNC_URL = "https://sdnc-api.simpledemo.onap.org"
+SO_URL = "https://so-api.simpledemo.onap.org"
+CLAMP_URL = "https://policy-ui.simpledemo.onap.org"
+VES_URL = "https://dcae-ves-collector-api.simpledemo.onap.org"
+DMAAP_URL = "https://dmaap-mr-api.simpledemo.onap.org"
+{% endif %}
+
diff --git a/roles/xtesting-pages/tasks/main.yaml b/roles/xtesting-pages/tasks/main.yaml
new file mode 100644
index 0000000..e9fe0d0
--- /dev/null
+++ b/roles/xtesting-pages/tasks/main.yaml
@@ -0,0 +1,69 @@
+---
+- name: install pip dependencies
+ become: "yes"
+ ansible.builtin.pip:
+ name: "{{ item }}"
+ state: latest
+ loop: "{{ pip_packages_pages }}"
+
+- name: Copy template dir to Jumphost to generate pages
+ ansible.builtin.copy:
+ src: "{{ doc_path }}/template"
+ dest: "{{ doc_dir_target }}/{{ doc_path }}"
+
+- name: Copy dashboard page generator to Jumphost
+ ansible.builtin.copy:
+ src: "{{ doc_path }}/generate_status.py"
+ dest: "{{ doc_dir_target }}/{{ doc_path }}"
+
+- name: "[LEGACY] Generate pages"
+ shell: "python3 generate_status.py -p {{ node_name }} -d {{ test_result_url }} -t {{ build_tag }} -m legacy"
+ args:
+ chdir: "{{ doc_dir_target }}/{{ doc_path }}"
+ when: not use_s3 | bool
+
+- name: "[S3] Generate pages"
+ shell: "python3 generate_status.py -p {{ node_name }} -d {{ test_result_url }} -t {{ build_tag }} -m s3"
+ args:
+ chdir: "{{ doc_dir_target }}/{{ doc_path }}"
+ when: use_s3 | bool
+ register: page_generation
+
+- name: Manage additional status pages
+ block:
+ - name: Copy stability page generator to Jumphost
+ ansible.builtin.copy:
+ src: "{{ doc_path }}/generate_stability_graphs.py"
+ dest: "{{ doc_dir_target }}/{{ doc_path }}"
+
+ - name: Copy docker version page generator to Jumphost
+ ansible.builtin.copy:
+ src: "{{ doc_path }}/generate_docker_version.py"
+ dest: "{{ doc_dir_target }}/{{ doc_path }}"
+
+ - name: "Generate stability page"
+ shell: "python3 generate_stability_graphs.py -v {{ onap_version }}"
+ args:
+ chdir: "{{ doc_dir_target }}/{{ doc_path }}"
+
+ - name: "Generate docker version page"
+ shell: "python3 generate_docker_version.py"
+ args:
+ chdir: "{{ doc_dir_target }}/{{ doc_path }}"
+ when: '"daily" in pod'
+ ignore_errors: yes
+
+- name: Fetch pages
+ ansible.posix.synchronize:
+ src: "{{ doc_dir_target }}/{{ doc_path }}/*"
+ dest: "{{ doc_path }}"
+ use_ssh_args: true
+ mode: pull
+ recursive: no
+ rsync_opts:
+ - "--exclude=*.py"
+
+- name: Delete remote doc dir
+ ansible.builtin.file:
+ path: "{{ doc_dir_target }}/{{ doc_path }}"
+ state: absent