aboutsummaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/node_prepare/defaults/main.yaml1
-rw-r--r--roles/node_prepare/handlers/main.yaml7
-rw-r--r--roles/node_prepare/tasks/main.yaml28
-rw-r--r--roles/node_prepare/tasks/nfs_client.yaml35
-rw-r--r--roles/node_prepare/tasks/nfs_client_DEBIAN.yaml8
-rw-r--r--roles/node_prepare/tasks/nfs_server.yaml47
-rw-r--r--roles/node_prepare/tasks/nfs_server_COREOS.yaml4
-rw-r--r--roles/node_prepare/tasks/nfs_server_DEBIAN.yaml9
-rw-r--r--roles/oom_clean/defaults/main.yaml8
-rw-r--r--roles/oom_clean/tasks/helm3.yaml66
-rw-r--r--roles/oom_clean/tasks/main.yaml151
-rw-r--r--roles/oom_configure/defaults/main.yaml35
-rw-r--r--roles/oom_configure/tasks/main.yaml210
-rw-r--r--roles/oom_configure/templates/components-overrides.yaml.j2153
-rw-r--r--roles/oom_configure/templates/onap-overrides.yaml.j2202
-rw-r--r--roles/oom_configure/templates/so-overrides.yaml.j263
-rw-r--r--roles/oom_generate_artifacts/defaults/main.yaml7
-rw-r--r--roles/oom_generate_artifacts/tasks/loadbalancer_facts.yaml71
-rw-r--r--roles/oom_generate_artifacts/tasks/main.yaml82
-rw-r--r--roles/oom_launch/defaults/main.yaml7
-rw-r--r--roles/oom_launch/tasks/main.yaml199
-rw-r--r--roles/oom_postconfigure/defaults/main.yaml7
-rw-r--r--roles/oom_postconfigure/tasks/main.yaml52
-rw-r--r--roles/oom_prepare/defaults/main.yaml8
-rw-r--r--roles/oom_prepare/tasks/main.yaml242
-rw-r--r--roles/oom_wait/tasks/main.yaml40
-rw-r--r--roles/prepare_ci/defaults/main.yaml6
-rw-r--r--roles/prepare_ci/tasks/install_DEBIAN.yaml11
-rw-r--r--roles/prepare_ci/tasks/main.yaml57
-rw-r--r--roles/prepare_ci/vars/debian.yaml18
30 files changed, 1834 insertions, 0 deletions
diff --git a/roles/node_prepare/defaults/main.yaml b/roles/node_prepare/defaults/main.yaml
new file mode 100644
index 0000000..3160f83
--- /dev/null
+++ b/roles/node_prepare/defaults/main.yaml
@@ -0,0 +1 @@
+nfs_daemon: nfs-kernel-server \ No newline at end of file
diff --git a/roles/node_prepare/handlers/main.yaml b/roles/node_prepare/handlers/main.yaml
new file mode 100644
index 0000000..73aa2ed
--- /dev/null
+++ b/roles/node_prepare/handlers/main.yaml
@@ -0,0 +1,7 @@
+---
+- name: restart nfs server
+ systemd:
+ name: "{{ nfs_daemon }}"
+ state: restarted
+ daemon_reload: "yes"
+ become: "yes"
diff --git a/roles/node_prepare/tasks/main.yaml b/roles/node_prepare/tasks/main.yaml
new file mode 100644
index 0000000..afbaeb4
--- /dev/null
+++ b/roles/node_prepare/tasks/main.yaml
@@ -0,0 +1,28 @@
+---
+# TODO: retrieve facts on Openstack to dynamically find subnet for etc export
+- name: install nfs
+ when: not use_global_storage
+ block:
+ - name: gather facts
+ setup:
+
+ - name: create nfs server on controller
+ import_tasks: nfs_server.yaml
+ when: inventory_hostname in groups['nfs-server']
+
+ - name: flush handlers
+ meta: flush_handlers
+
+ - name: mount dockerdata in nfs
+ import_tasks: nfs_client.yaml
+ when: inventory_hostname in groups['k8s-cluster'] and
+ inventory_hostname not in groups['nfs-server']
+
+ - name: put right value for max_map_count
+ become: "yes"
+ ansible.posix.sysctl:
+ name: vm.max_map_count
+ value: 1048575
+ sysctl_set: "yes"
+ state: present
+ reload: "yes"
diff --git a/roles/node_prepare/tasks/nfs_client.yaml b/roles/node_prepare/tasks/nfs_client.yaml
new file mode 100644
index 0000000..315fdab
--- /dev/null
+++ b/roles/node_prepare/tasks/nfs_client.yaml
@@ -0,0 +1,35 @@
+---
+- name: install packages
+ import_tasks: nfs_client_DEBIAN.yaml
+ when: ansible_os_family | lower == "debian"
+
+- name: Create mountable dir
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ nfs_folder }}"
+ state: directory
+ mode: 0777
+ owner: root
+ group: root
+
+- name: set mountpoints
+ become: "yes"
+ ansible.posix.mount:
+ name: "{{ nfs_folder }}"
+ src:
+ "{{ hostvars[groups['nfs-server'][0]].ansible_default_ipv4.address }}\
+ :{{ nfs_folder }}"
+ fstype: nfs4
+ dump: 0
+ passno: 2
+ opts: "nfsvers=4.1,rsize=131072,wsize=131072"
+ state: mounted
+
+- name: Ensure mountable dir is accessible to everyone
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ nfs_folder }}"
+ state: directory
+ mode: 0777
+ owner: root
+ group: root
diff --git a/roles/node_prepare/tasks/nfs_client_DEBIAN.yaml b/roles/node_prepare/tasks/nfs_client_DEBIAN.yaml
new file mode 100644
index 0000000..f3eb6ee
--- /dev/null
+++ b/roles/node_prepare/tasks/nfs_client_DEBIAN.yaml
@@ -0,0 +1,8 @@
+---
+- name: "[Debian] Ensure NFS utilities are installed."
+ include_role:
+ name: apt_install
+ vars:
+ environment: "{{ proxy_env }}"
+ packages:
+ - nfs-common
diff --git a/roles/node_prepare/tasks/nfs_server.yaml b/roles/node_prepare/tasks/nfs_server.yaml
new file mode 100644
index 0000000..6179172
--- /dev/null
+++ b/roles/node_prepare/tasks/nfs_server.yaml
@@ -0,0 +1,47 @@
+---
+- name: apply distribution specific tasks
+ include_tasks: "nfs_server_{{ ansible_os_family | upper }}.yaml"
+ when: >
+ ansible_os_family | lower == "debian" or
+ ansible_os_family | lower == "coreos"
+
+- name: retrieve server pdf information
+ ansible.builtin.set_fact:
+ server_pdf:
+ "{{ nodes | selectattr('name', 'in', inventory_hostname) | first }}"
+
+- name: create nfs volume
+ include_role:
+ name: create_disk
+ vars:
+ disks: "{{ server_pdf.disks }}"
+ disk_purpose: nfs
+ mount_path: "{{ nfs_folder }}"
+ force_full_erase: False
+ when: (server_pdf.disks | selectattr('name', 'eq', 'disk-nfs') |
+ list | length) > 0
+
+- name: Create mountable dir
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ nfs_folder }}"
+ state: directory
+ mode: 0777
+ owner: root
+ group: root
+
+- name: create file in order that nfs server is happy...
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ nfs_folder }}/do_not_remove"
+ state: touch
+
+- name: copy /etc/exports
+ become: "yes"
+ ansible.builtin.lineinfile:
+ path: /etc/exports
+ owner: root
+ group: root
+ regexp: "^{{ nfs_folder }}"
+ line: "{{ nfs_folder }} *(rw,async,no_root_squash,no_subtree_check)"
+ notify: restart nfs server
diff --git a/roles/node_prepare/tasks/nfs_server_COREOS.yaml b/roles/node_prepare/tasks/nfs_server_COREOS.yaml
new file mode 100644
index 0000000..ad59414
--- /dev/null
+++ b/roles/node_prepare/tasks/nfs_server_COREOS.yaml
@@ -0,0 +1,4 @@
+---
+- name: override default nfs daemon name
+ ansible.builtin.set_fact:
+ nfs_daemon: nfsd
diff --git a/roles/node_prepare/tasks/nfs_server_DEBIAN.yaml b/roles/node_prepare/tasks/nfs_server_DEBIAN.yaml
new file mode 100644
index 0000000..38b4d14
--- /dev/null
+++ b/roles/node_prepare/tasks/nfs_server_DEBIAN.yaml
@@ -0,0 +1,9 @@
+---
+- name: "[Debian] Ensure NFS utilities are installed."
+ include_role:
+ name: apt_install
+ vars:
+ environment: "{{ proxy_env }}"
+ packages:
+ - nfs-common
+ - nfs-kernel-server
diff --git a/roles/oom_clean/defaults/main.yaml b/roles/oom_clean/defaults/main.yaml
new file mode 100644
index 0000000..adf0ae3
--- /dev/null
+++ b/roles/oom_clean/defaults/main.yaml
@@ -0,0 +1,8 @@
+faulty_pods:
+ - ejbca
+
+helm_env: {}
+
+helm_env_postgres:
+ HELM_DRIVER: sql
+ HELM_DRIVER_SQL_CONNECTION_STRING: "{{ postgres_url }}" \ No newline at end of file
diff --git a/roles/oom_clean/tasks/helm3.yaml b/roles/oom_clean/tasks/helm3.yaml
new file mode 100644
index 0000000..f6eb3a9
--- /dev/null
+++ b/roles/oom_clean/tasks/helm3.yaml
@@ -0,0 +1,66 @@
+---
+- name: "[HELM3] retrieve helm postgres secret"
+ community.kubernetes.k8s_info:
+ api_version: v1
+ kind: Secret
+ name: "{{ postgres_secret_name }}"
+ namespace: "{{ postgres_namespace }}"
+ register: postgres_secrets
+ when: helmv3_use_sql|bool
+
+- name: "[HELM3] retrieve helm postrgres password"
+ set_fact:
+ postgres_password: "{{
+ postgres_secrets.resources[0].data['postgresql-password'] | b64decode }}"
+ when: helmv3_use_sql|bool
+
+- name: set helm environment with postgres
+ set_fact:
+ helm_env: "{{ helm_env_postgres }}"
+ when: helmv3_use_sql|bool
+
+- name: "[HELM3] list previously installed components"
+ shell: |
+ set -o pipefail && helm list -a -n {{ onap_namespace }} |
+ awk '{print $1}' | grep {{ chart_name }}- || true
+ args:
+ executable: /bin/bash
+ environment: "{{ helm_env }}"
+ register: components
+ changed_when: "false"
+
+- name: "[HELM3] remove previously installed components"
+ command:
+ "helm uninstall {{ item }} -n {{ onap_namespace }}"
+ loop: "{{ components.stdout_lines }}"
+ environment: "{{ helm_env }}"
+ register: helm_undeploy
+ async: 900
+ poll: 0
+
+- name: "[HELM3] Wait for component deletion"
+ ansible.builtin.async_status:
+ jid: "{{ item.ansible_job_id }}"
+ register: _jobs
+ until: _jobs.finished
+ delay: 5
+ retries: 300
+ loop: "{{ helm_undeploy.results }}"
+ loop_control:
+ label: "{{ item.item }}"
+
+- name: "[HELM3] check if an onap installation has been launched before"
+ shell: |
+ set -o pipefail && helm list -a -n {{ onap_namespace }} |
+ awk '{print $1}' | grep -c {{ chart_name }} || true
+ args:
+ executable: /bin/bash
+ environment: "{{ helm_env }}"
+ register: launched
+ changed_when: "false"
+
+- name: "[HELM3] remove previous installation"
+ command:
+ "helm uninstall {{ chart_name }} -n {{ onap_namespace }}"
+ environment: "{{ helm_env }}"
+ when: launched.stdout != '0' \ No newline at end of file
diff --git a/roles/oom_clean/tasks/main.yaml b/roles/oom_clean/tasks/main.yaml
new file mode 100644
index 0000000..4f14200
--- /dev/null
+++ b/roles/oom_clean/tasks/main.yaml
@@ -0,0 +1,151 @@
+---
+- name: check helm version
+ command: "helm version --template {% raw %}'{{.Version}}'{% endraw %}"
+ register: helm_version
+
+# Return of previous command will be "v3.3.4" for v3 and up and "<no value>"
+# for version 2.
+- name: store helm version
+ ansible.builtin.set_fact:
+ helmv3: "{{ ('<' in helm_version.stdout) | ternary(false, true) }}"
+
+- name: "HELM 3 not installed - stop playbook"
+ ansible.builtin.fail:
+ msg: HELM 3 not installed
+ when: not helmv3
+
+- name: "[HELM3] Remove previous installation"
+ include_tasks: helm3.yaml
+ when: helmv3
+
+- name: get number of remaining pods
+ command: >
+ kubectl get pods --namespace {{ onap_namespace }} --no-headers
+ -o custom-columns=NAME:.metadata.name
+ changed_when: False
+ register: pods
+
+- name: delete remaining faulty pods
+ command: >
+ kubectl delete pods --namespace {{ onap_namespace }} --force
+ --grace-period 0 {{ item }}
+ loop: "{{ pods.stdout_lines }}"
+ when: (pods.stdout_lines | length) <= (faulty_pods | length) and
+ ((item | regex_replace('^[a-zA-Z0-9]+-') |
+ regex_replace('-[0-9a-z]+-[0-9a-z]+$')) in faulty_pods)
+ changed_when: True
+
+- name: get number of remaining jobs
+ command: >
+ kubectl get jobs --namespace {{ onap_namespace }} --no-headers
+ -o custom-columns=NAME:.metadata.name
+ changed_when: false
+ register: jobs
+
+- name: delete remaining faulty jobs
+ command: >
+ kubectl delete job --namespace {{ onap_namespace }} --force
+ --grace-period 0 {{ item }}
+ loop: "{{ jobs.stdout_lines }}"
+
+- name: get number of remaining pvcs
+ command: >
+ kubectl get pvc --namespace {{ onap_namespace }} --no-headers
+ -o custom-columns=NAME:.metadata.name
+ changed_when: false
+ register: pvcs
+
+- name: delete remaining faulty pvcs
+ command: >
+ kubectl delete pvc --namespace {{ onap_namespace }} --force
+ --grace-period 0 {{ item }}
+ loop: "{{ pvcs.stdout_lines }}"
+
+- name: check if namespace is for namespace full deletion
+ shell: |
+ set -o pipefail && kubectl get namespace {{ onap_namespace }}
+ -o jsonpath="{.status.phase}" || true
+ args:
+ executable: /bin/bash
+ register: ns_status
+ ignore_errors: yes
+ changed_when: False
+
+- name: delete onap namespace
+ community.kubernetes.k8s:
+ state: absent
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ onap_namespace }}"
+ when: (not ns_status.failed) and ('Terminating' not in ns_status.stdout)
+
+- name: delete onap tests namespace
+ community.kubernetes.k8s:
+ state: absent
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ onap_namespace }}-tests"
+ when: (not ns_status.failed) and ('Terminating' not in ns_status.stdout)
+
+- name: wait for namespace full deletion
+ shell: |
+ set -o pipefail && kubectl get namespace |
+ grep -c {{ onap_namespace }} || true
+ args:
+ executable: /bin/bash
+ register: kube
+ changed_when:
+ kube.stdout == '0'
+ until:
+ kube.stdout == '0'
+ retries: 600
+ delay: 1
+
+- name: list all remaining persistent volumes
+ shell: |
+ set -o pipefail &&
+ kubectl get pv -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' |
+ grep {{ chart_name }} || true
+ args:
+ executable: /bin/bash
+ register: persistent_volumes
+ changed_when: "false"
+
+- name: remove remaining persistent volumes
+ shell: |
+ set -o pipefail && kubectl delete pv {{ item }} || true
+ args:
+ executable: /bin/bash
+ changed_when: "true"
+ loop: "{{ persistent_volumes.stdout_lines }}"
+
+- name: "list all onap directories in {{ nfs_folder }}"
+ ansible.builtin.find:
+ paths: "{{ nfs_folder }}"
+ recurse: no
+ file_type: directory
+ register: onap_directories
+
+- name: "delete onap directory in {{ nfs_folder }}"
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ item.path }}"
+ state: absent
+ loop: "{{ onap_directories.files }}"
+ loop_control:
+ label: "{{ item.path }}"
+
+- name: delete component-gating-overrides.yaml if present
+ ansible.builtin.file:
+ path: "{{ override_gating_component }}"
+ state: absent
+
+- name: remove oom directory
+ become: "yes"
+ ansible.builtin.file:
+ path: "{{ oom_path }}"
+ state: absent
diff --git a/roles/oom_configure/defaults/main.yaml b/roles/oom_configure/defaults/main.yaml
new file mode 100644
index 0000000..461e13b
--- /dev/null
+++ b/roles/oom_configure/defaults/main.yaml
@@ -0,0 +1,35 @@
+---
+openstack_tenant_name:
+ "{{ lookup('env','VNFS_TENANT_NAME') |
+ default(os_infra.tenant.name, true) }}"
+openstack_user_name:
+ "{{ lookup('env','VNFS_USER_NAME') |
+ default(os_infra.user.name, true) }}"
+openstack_service_tenant_name: service
+
+component_enabled:
+ a1policymanagement: "{{ medium_onap or ('a1policymanagement' in additional_components) }}"
+ cli: "{{ small_onap or medium_onap or ('cli' in additional_components) }}"
+ consul: "{{ small_onap or medium_onap or ('consul' in additional_components) }}"
+ cps: "{{ small_onap or medium_onap or ('cps' in additional_components) }}"
+ contrib: "{{ medium_onap or ('contrib' in additional_components) }}"
+ dcaegen2: "{{ medium_onap or ('dcaegen2' in additional_components) }}"
+ dcaegen2_services: "{{ medium_onap or ('dcaegen2_services' in additional_components) }}"
+ dcaemod: "{{ 'dcaemod' in additional_components }}"
+ esr: "{{ small_onap or medium_onap or ('esr' in additional_components) }}"
+ holmes: "{{ medium_onap or ('holmes' in additional_components) }}"
+ log: "{{ small_onap or medium_onap or ('log' in additional_components) }}"
+ msb: "{{ small_onap or medium_onap or ('msb' in additional_components) }}"
+ multicloud: "{{ small_onap or medium_onap or ('multicloud' in additional_components) }}"
+ nbi: "{{ small_onap or medium_onap or ('nbi' in additional_components) }}"
+ oof: "{{ medium_onap or ('oof' in additional_components) }}"
+ policy: "{{ medium_onap or ('policy' in additional_components) }}"
+ pomba: "{{ medium_onap or ('pomba' in additional_components) }}"
+ portal: "{{ portal_enabled }}"
+ sniro_emulator: "{{ 'sniro_emulator' in additional_components }}"
+ uui: "{{ 'uui' in additional_components }}"
+ vfc: "{{ 'vfc' in additional_components }}"
+ vid: "{{ small_onap or medium_onap or ('vid' in additional_components) }}"
+ vnfsdk: "{{ 'vnfsdk' in additional_components }}"
+ modeling: "{{ 'modeling' in additional_components }}"
+ cds: "{{ small_onap or medium_onap or ('cds' in additional_components) }}"
diff --git a/roles/oom_configure/tasks/main.yaml b/roles/oom_configure/tasks/main.yaml
new file mode 100644
index 0000000..dc9e100
--- /dev/null
+++ b/roles/oom_configure/tasks/main.yaml
@@ -0,0 +1,210 @@
+---
+- name: fetch cloud config
+ ansible.builtin.fetch:
+ dest: /tmp/clouds.yaml
+ src: "{{ ansible_user_dir }}/.config/openstack/clouds.yaml"
+ flat: "yes"
+
+- name: load cloud config
+ include_vars: /tmp/clouds.yaml
+
+- name: initialize os_auth_url
+ ansible.builtin.set_fact:
+ os_auth_url: "{{ clouds[openstack_user_name].auth.auth_url }}"
+
+- name: add v3 at end of os_auth_url
+ ansible.builtin.set_fact:
+ os_auth_url:
+ "{{ ((os_auth_url[-3:] == 'v3/') or (os_auth_url[-2:] == 'v3')) |
+ ternary(os_auth_url | regex_replace('/$', ''),
+ (os_auth_url[-1:] == '/') | ternary(
+ os_auth_url ~ 'v3',
+ os_auth_url ~ '/v3')) }}"
+
+- name: set tenant id
+ ansible.builtin.set_fact:
+ tenant_id: "{{ clouds[openstack_user_name].auth.project_id }}"
+ when: clouds[openstack_user_name].auth.project_id is defined
+
+- name: retrieve tenant id
+ block:
+ - name: load cloud config
+ openstack.cloud.os_client_config:
+
+ # - name: retrieve info from VNF tenant
+ # os_project_facts:
+ # cloud: "{{ openstack_user_name }}"
+ # name: "{{ openstack_tenant_name }}"
+ # register: tenant
+ # ISSUE with shade: You are not authorized to perform the requested action:
+ # identity:list_projects.
+ #
+ # - name: retrieve tenant ID
+ # set_fact:
+ # tenant_id: "{{ tenant.ansible_facts.openstack_projects.0.id }}"
+
+ - name: retrieve info from VNF tenant -- bash way
+ shell: >-
+ set -o pipefail && \
+ openstack --os-cloud {{ openstack_user_name }} project list -f json |
+ jq -r '[.[]| select(.Name=="{{ openstack_tenant_name }}") | .ID] |
+ first'
+ args:
+ executable: /bin/bash
+ changed_when: False
+ register: tenant
+
+ - name: retrieve tenant ID -- bash way
+ ansible.builtin.set_fact:
+ tenant_id: "{{ tenant.stdout_lines.0 }}"
+ when: clouds[openstack_user_name].auth.project_id is not defined
+
+- name: generate openstack info file
+ ansible.builtin.copy:
+ content: |
+ openstack_user_name: {{ openstack_user_name }}
+ openstack_tenant_name: {{ openstack_tenant_name }}
+ openstack_tenant_id: {{ tenant_id }}
+ dest: "{{ base_dir }}/vars/openstack_infos.yml"
+ delegate_to: localhost
+
+- name: generate encrypted password for robot
+ shell: |
+ set -o pipefail &&\
+ echo -n '{{ clouds[openstack_user_name].auth.password }}' |
+ openssl aes-128-ecb -e -K `cat encryption.key` -nosalt |
+ xxd -c 256 -p
+ args:
+ chdir: "{{ oom_path }}/kubernetes/so/resources/config/mso"
+ executable: /bin/bash
+ changed_when: false
+ register: shell
+
+- name: save robot encrypted password
+ ansible.builtin.set_fact:
+ robot_encrypted_password: "{{ shell.stdout }}"
+
+- name: set so_crypto container name
+ set_fact:
+ so_crypto: "{{ proxy_for_dockerhub }}/sdesbure/so_crypto"
+ when: proxy_for_dockerhub | bool
+
+- name: set so_crypto container name
+ set_fact:
+ so_crypto: "sdesbure/so_crypto"
+ when: not proxy_for_dockerhub | bool
+
+- name: generate encrypted password for so
+ shell: >
+ docker run --rm {{ so_crypto }}
+ {{ clouds[openstack_user_name].auth.password }}
+ `cat encryption.key`
+ args:
+ chdir: "{{ oom_path }}/kubernetes/so/resources/config/mso"
+ changed_when: False
+ register: shell
+
+- name: save so encrypted password
+ ansible.builtin.set_fact:
+ encrypted_password: "{{ shell.stdout }}"
+
+- name: create config override directory
+ ansible.builtin.file:
+ path: "{{ oom_etc_path }}"
+ recurse: "yes"
+ state: directory
+
+- name: check if a deployment has already been done
+ ansible.builtin.stat:
+ path: "{{ deployment_file }}"
+ register: deployment_stat
+
+- name: get deployment.yaml
+ when: deployment_stat.stat.exists
+ block:
+ - name: create temporary local file for deployment.yaml
+ ansible.builtin.tempfile:
+ state: file
+ suffix: temp
+ register: tmp_deployment
+ delegate_to: "127.0.0.1"
+
+ - name: fetch deployment info
+ ansible.builtin.fetch:
+ dest: "{{ tmp_deployment.path }}"
+ src: "{{ deployment_file }}"
+ flat: "yes"
+
+ - name: load deployment info
+ include_vars:
+ file: "{{ tmp_deployment.path }}"
+
+ - name: change deployment type if needed
+ ansible.builtin.set_fact:
+ deployment_type: "{{ deployment }}"
+ when: deployment_type == "micro" or
+ (deployment_type == "small" and deployment != "micro" ) or
+ deployment == "full"
+
+ always:
+ - name: destroy the local tmp_deployment
+ ansible.builtin.file:
+ path: "{{ tmp_deployment.path }}"
+ state: absent
+ delegate_to: "127.0.0.1"
+
+- name: "generate config override template for deployment {{ deployment_type }}"
+ ansible.builtin.template:
+ src: onap-overrides.yaml.j2
+ dest: "{{ override_file }}"
+
+- name: check if pre generated component override file exists
+ ansible.builtin.stat:
+ path: "{{ base_dir }}/vars/components-overrides.yml"
+ delegate_to: localhost
+ register: stat
+
+- name: copy pre generated component override file
+ ansible.builtin.copy:
+ dest: "{{ override_components }}"
+ src: "{{ base_dir }}/vars/components-overrides.yml"
+ when: stat.stat.exists
+
+- name: "generate config override template for deployment {{ deployment_type }}"
+ ansible.builtin.template:
+ src: components-overrides.yaml.j2
+ dest: "{{ override_components }}"
+ when: (not stat.stat.exists) and (core_onap or small_onap or medium_onap)
+
+- name: "generate so override template"
+ ansible.builtin.template:
+ src: so-overrides.yaml.j2
+ dest: "{{ override_gating_component }}"
+ when: project == 'so'
+
+- name: save on which step we are
+ ansible.builtin.copy:
+ content: |
+ ---
+ deployment: {{ deployment_type }}
+ dest: "{{ deployment_file }}"
+
+- name: "[facts retrieved] get first node IP address (case ip not defined)"
+ ansible.builtin.set_fact:
+ first_node_ip: "{{
+ hostvars[groups['kube-node'].0].ansible_default_ipv4.address }}"
+ when: gather_nodes_fact
+
+- name: "[No Facts retrieved] get first node IP address (case ip not defined)"
+ ansible.builtin.set_fact:
+ first_node_ip: "{{ hostvars[groups['kube-node'].0].ip }}"
+ when: not gather_nodes_fact
+
+- name: generate etc/hosts for utilities
+ become: "yes"
+ ansible.builtin.blockinfile:
+ path: /etc/hosts
+ marker: "# {mark} ANSIBLE MANAGED UTILITIES HOSTS"
+ block: |
+ {{ first_node_ip }} minio.minio
+ {{ first_node_ip }} {{ postgres_svc }}.{{ postgres_namespace }}
diff --git a/roles/oom_configure/templates/components-overrides.yaml.j2 b/roles/oom_configure/templates/components-overrides.yaml.j2
new file mode 100644
index 0000000..a58cbac
--- /dev/null
+++ b/roles/oom_configure/templates/components-overrides.yaml.j2
@@ -0,0 +1,153 @@
+---
+aaf:
+ enabled: true
+a1policymanagement:
+{% if component_enabled.a1policymanagement %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+appc:
+{% if component_enabled.appc %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+cli:
+{% if component_enabled.cli %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+consul:
+{% if component_enabled.consul %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+contrib:
+{% if component_enabled.contrib %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+dcaegen2:
+{% if component_enabled.dcaegen2 %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+dcaegen2-services:
+{% if component_enabled.dcaegen2_services %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+dcaemod:
+{% if component_enabled.dcaemod %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+esr:
+{% if component_enabled.esr %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+holmes:
+{% if component_enabled.holmes %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+log:
+{% if component_enabled.log %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+modeling:
+{% if component_enabled.modeling %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+msb:
+{% if component_enabled.msb %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+multicloud:
+{% if component_enabled.multicloud %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+nbi:
+{% if component_enabled.nbi %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+oof:
+{% if component_enabled.oof %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+policy:
+{% if component_enabled.policy %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+pomba:
+{% if component_enabled.pomba %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+portal:
+{% if component_enabled.portal %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+sniro-emulator:
+{% if component_enabled.sniro_emulator %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+uui:
+{% if component_enabled.uui %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+vfc:
+{% if component_enabled.vfc %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+vid:
+{% if component_enabled.vid %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+vnfsdk:
+{% if component_enabled.vnfsdk %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
+cds:
+{% if component_enabled.cds %}
+ enabled: true
+{% else %}
+ enabled: false
+{% endif %}
diff --git a/roles/oom_configure/templates/onap-overrides.yaml.j2 b/roles/oom_configure/templates/onap-overrides.yaml.j2
new file mode 100644
index 0000000..a2bb227
--- /dev/null
+++ b/roles/oom_configure/templates/onap-overrides.yaml.j2
@@ -0,0 +1,202 @@
+---
+global:
+ repository: {{ repository }}
+{% if proxy_for_dockerhub %}
+ dockerHubRepository: "{{ proxy_for_dockerhub }}"
+{% endif %}
+{% if proxy_for_k8s_gcr %}
+ googleK8sRepository: "{{ proxy_for_k8s_gcr }}"
+{% endif %}
+{% if proxy_for_elastic %}
+ elasticRepository: "{{ proxy_for_elastic }}"
+{% endif %}
+ flavor: {{ onap_flavor }}
+ masterPassword: gatingPassword
+{% if use_ingress %}
+ ingress:
+ enabled: true
+{% endif %}
+{% if use_servicemesh %}
+ serviceMesh:
+ enabled: true
+ tls: true
+ aafEnabled: false
+ cmpv2Enabled: false
+ tlsEnabled: false
+ msbEnabled: false
+{% endif %}
+
+{% if use_global_storage %}
+ persistence:
+ storageClass: {{ os_infra.onap.global_storage.class | default('-') }}
+{% endif %}
+{% if use_metrics %}
+ metrics:
+ enabled: true
+{% if use_custom_resources_metrics %}
+ custom_resources: true
+{% endif %}
+{% endif %}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+aaf:
+ aaf-cass:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ aaf-sms:
+ aaf-sms-quorumclient:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+appc:
+ config:
+ enableClustering: false
+ openStackType: "OpenStackProvider"
+ openStackName: "OpenStack"
+ openStackKeyStoneUrl: {{ os_auth_url }}
+ openStackServiceTenantName: {{ openstack_service_tenant_name }}
+ openStackDomain: {{ clouds[openstack_user_name].auth.user_domain_name | default('Default') }}
+ openStackUserName: {{ openstack_user_name }}
+ openStackEncryptedPassword: "{{ encrypted_password }}"
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+ mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+cassandra:
+ liveness:
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 120
+ readiness:
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+ startup:
+ initialDelaySeconds: 30
+ periodSeconds: 30
+ timeoutSeconds: 120
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+{% if use_global_storage and os_infra.onap.global_storage.rwx_class is defined %}
+cds:
+ cds-blueprints-processor:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.rwx_class }}"
+contrib:
+ netbox:
+ netbox-app:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.rwx_class }}"
+{% endif %}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+dcaegen2:
+ dcae-bootstrap:
+ postgres:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ dcae-dashboard:
+ postgres:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ dcae-inventory-api:
+ postgres:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ dcae-redis:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+dcaegen2-services:
+ dcae-datafile-collector:
+ enabled: false
+ dcae-pm-mapper:
+ enabled: false
+{% if use_global_storage and os_infra.onap.global_storage.rwx_class is defined %}
+dcaemod:
+ dcaemod-genprocessor:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.rwx_class }}"
+{% endif %}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+dmaap:
+ dmaap-bc:
+ postgres:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ dmaap-dr-prov:
+ mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ message-router:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+modeling:
+ mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+nbi:
+ config:
+ openStackRegion: {{ clouds[openstack_user_name].region_name }}
+ openStackVNFTenantId: {{ tenant_id }}
+ cloudOwner: {{ details.pod_owner }}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+oof:
+ music:
+ music-cassandra:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+ zookeeper:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+robot:
+ config:
+ openStackEncryptedPasswordHere: "{{ robot_encrypted_password }}"
+{% if use_ingress %}
+ useIngressHost:
+ enabled: true
+{% endif %}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+sdc:
+ sdc-es:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
+so:
+ so-catalog-db-adapter:
+ config:
+ openStackUserName: {{ openstack_user_name }}
+ openStackRegion: {{ clouds[openstack_user_name].region_name }}
+ openStackKeyStoneUrl: {{ os_auth_url }}
+ openStackServiceTenantName: {{ openstack_service_tenant_name }}
+ openStackEncryptedPasswordHere: "{{ encrypted_password }}"
+ openStackTenantId: {{ tenant_id }}
+ openStackKeystoneVersion: "KEYSTONE_V3"
+ openStackProjectDomainName:
+ {{ clouds[openstack_user_name].auth.user_domain_name | default('Default') }}
+ openStackUserDomainName:
+ {{ clouds[openstack_user_name].project_domain_name | default('Default') }}
+ so-mariadb:
+ config:
+ # gerrit branch where the latest heat code is checked in
+ gerritBranch: {{ branch }}
+{% if use_global_storage and os_infra.onap.global_storage.fast_class is defined %}
+vfc:
+ mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+vid:
+ mariadb-galera:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+vnfsdk:
+ postgres:
+ persistence:
+ storageClassOverride: "{{ os_infra.onap.global_storage.fast_class }}"
+{% endif %}
diff --git a/roles/oom_configure/templates/so-overrides.yaml.j2 b/roles/oom_configure/templates/so-overrides.yaml.j2
new file mode 100644
index 0000000..837f04c
--- /dev/null
+++ b/roles/oom_configure/templates/so-overrides.yaml.j2
@@ -0,0 +1,63 @@
+---
+global:
+ soBaseImage: orange-opensource/lfn/onap/build-so/base-image:1.0
+so:
+ repositoryOverride: &gitlabRegistry registry.gitlab.com
+ image: orange-opensource/lfn/onap/build-so/api-handler-infra:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: &soHelpers
+ repositoryOverride: {{ repository }}
+ certInitializer:
+ repositoryOverride: {{ repository }}
+ so-bpmn-infra:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/bpmn-infra:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-catalog-db-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/catalog-db-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-cnf-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/mso-cnf-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-etsi-nfvo-ns-lcm:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/so-etsi-nfvo-ns-lcm:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-monitoring:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/so-monitoring:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-nssmf-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/nssmf-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-oof-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/so-oof-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-openstack-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/openstack-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-request-db-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/request-db-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-sdc-controller:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/sdc-controller:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-sdnc-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/sdnc-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-vfc-adapter:
+ repositoryOverride: {{ repository }}
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/vfc-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
+ so-vnfm-adapter:
+ repositoryOverride: *gitlabRegistry
+ image: orange-opensource/lfn/onap/build-so/vnfm-adapter:{{ gerrit_review }}-{{ gerrit_patchset }}
+ soHelpers: *soHelpers
diff --git a/roles/oom_generate_artifacts/defaults/main.yaml b/roles/oom_generate_artifacts/defaults/main.yaml
new file mode 100644
index 0000000..2e8b012
--- /dev/null
+++ b/roles/oom_generate_artifacts/defaults/main.yaml
@@ -0,0 +1,7 @@
+---
+aai_server: aai.api.sparky.simpledemo.onap.org
+aai_port: 30233
+aai_user: AAI
+aai_password: AAI
+msb_server: msb.api.simpledemo.onap.org
+msb_port: 30280
diff --git a/roles/oom_generate_artifacts/tasks/loadbalancer_facts.yaml b/roles/oom_generate_artifacts/tasks/loadbalancer_facts.yaml
new file mode 100644
index 0000000..aa2afcf
--- /dev/null
+++ b/roles/oom_generate_artifacts/tasks/loadbalancer_facts.yaml
@@ -0,0 +1,71 @@
+---
+- name: retrieve istio-ingressgateway device information
+ command: "kubectl get svc -o json -n istio-system istio-ingressgateway"
+ register: ingress_gw
+ changed_when: "false"
+ when: use_servicemesh
+
+- name: get IP of portal loadbalancer
+ ansible.builtin.set_fact:
+ first_node_ip: "{{
+ (ingress_gw.stdout|from_json).status.loadBalancer.ingress.0.ip }}"
+ when: use_servicemesh
+
+- name: retrieve portal device information
+ command: "kubectl get svc -o json -n {{ onap_namespace }} portal-app"
+ register: portal
+ changed_when: "false"
+ when: portal_enabled and not use_servicemesh
+
+- name: get IP of portal loadbalancer
+ ansible.builtin.set_fact:
+ portal_lb: "{{
+ (portal.stdout|from_json).status.loadBalancer.ingress.0.ip }}"
+ ignore_errors: yes
+ register: portal_lb_ip
+ when: portal_enabled and not use_servicemesh
+
+- name: get external IP of portal loadbalancer
+ ansible.builtin.set_fact:
+ portal_lb: "{{ (portal.stdout|from_json).spec.externalIPs.0 }}"
+ ignore_errors: "yes"
+ register: portal_external_ip_check
+ when: portal_enabled and and not use_servicemesh and ((portal_lb_ip is not defined) or
+ (portal_lb_ip|length == 0))
+
+- name: "[Facts retrieved] get first node IP address (case ip not defined)"
+ ansible.builtin.set_fact:
+ first_node_ip: "{{
+ hostvars[groups['kube-node'].0].ansible_default_ipv4.address }}"
+ when: gather_nodes_fact and not use_servicemesh
+
+- name: "[No Facts retrieved] get first node IP address (case ip not defined)"
+ ansible.builtin.set_fact:
+ first_node_ip: "{{ hostvars[groups['kube-node'].0].ip }}"
+ when: not gather_nodes_fact and not use_servicemesh
+
+- block:
+ - name: list all used ips
+ ansible.builtin.set_fact:
+ used_ips: "{{ used_ips|default([]) + [
+ hostvars[item].ansible_default_ipv4.address ~ '/' ~
+ ((hostvars[item].ansible_default_ipv4.network ~ '/' ~
+ hostvars[item].ansible_default_ipv4.netmask) |
+ ipaddr('prefix'))
+ ] }}"
+ loop: "{{ groups['k8s-cluster'] }}"
+ - name: generate network in ipaddr type
+ ansible.builtin.set_fact:
+ network: "{{ (ansible_default_ipv4.network ~ '/' ~
+ ansible_default_ipv4.netmask) | ipaddr('net') }}"
+ - name: generate the list of addresses in network
+ ansible.builtin.set_fact:
+ addresses: "{{ addresses|default([]) + [network | ipaddr(item)] }}"
+ loop: "{{ range(1, network | ipaddr('size') - 1) | list }}"
+
+ - name: pick a random address for portal
+ ansible.builtin.set_fact:
+ portal_lb: "{{ addresses | difference(used_ips) | random }}"
+ when: gather_nodes_fact and
+ portal_enabled and not use_servicemesh and
+ ((portal_lb_ip is not defined) or (portal_lb_ip|length == 0))
diff --git a/roles/oom_generate_artifacts/tasks/main.yaml b/roles/oom_generate_artifacts/tasks/main.yaml
new file mode 100644
index 0000000..55559ed
--- /dev/null
+++ b/roles/oom_generate_artifacts/tasks/main.yaml
@@ -0,0 +1,82 @@
+---
+- name: generate load balancer facts
+ import_tasks: loadbalancer_facts.yaml
+
+- name: update portal app to reflect this choice
+ command: |
+ kubectl patch svc portal-app -p \
+ '{"spec":{"externalIPs":["{{ portal_lb | ipaddr('address') }}"] }}' \
+ -n {{ onap_namespace }}
+ when: gather_nodes_fact and
+ portal_enabled and
+ ((portal_lb_ip is not defined) or (portal_lb_ip|length == 0)) and
+ ((portal_external_ip_check is not defined) or
+ portal_external_ip_check.failed)
+
+- name: generate etc/hosts
+ become: "yes"
+ ansible.builtin.blockinfile:
+ path: /etc/hosts
+ marker: "# {mark} ANSIBLE MANAGED OOM HOSTS"
+ block: |
+ {{ first_node_ip }} portal.api.simpledemo.onap.org
+ {{ first_node_ip }} vid.api.simpledemo.onap.org
+ {{ first_node_ip }} sdc.api.fe.simpledemo.onap.org
+ {{ first_node_ip }} sdc.api.be.simpledemo.onap.org
+ {{ first_node_ip }} portal-sdk.simpledemo.onap.org
+ {{ first_node_ip }} policy.api.simpledemo.onap.org
+ {{ first_node_ip }} aai.api.sparky.simpledemo.onap.org
+ {{ first_node_ip }} cli.api.simpledemo.onap.org
+ {{ first_node_ip }} msb.api.simpledemo.onap.org
+ {{ first_node_ip }} so.api.simpledemo.onap.org
+ {{ first_node_ip }} appc.api.simpledemo.onap.org
+ {{ first_node_ip }} sdnc.api.simpledemo.onap.org
+ {{ first_node_ip }} nbi.api.simpledemo.onap.org
+ {{ first_node_ip }} consul.api.simpledemo.onap.org
+ {{ first_node_ip }} kibana.api.simpledemo.onap.org
+ {{ first_node_ip }} mr.api.simpledemo.onap.org
+ {{ first_node_ip }} uui.api.simpledemo.onap.org
+ {{ first_node_ip }} aaf.api.simpledemo.onap.org
+ {{ first_node_ip }} robot.api.simpledemo.onap.org
+ {{ first_node_ip }} dcae.api.simpledemo.onap.org
+ {{ first_node_ip }} sdc.workflow.plugin.simpledemo.onap.org
+ {{ first_node_ip }} sdc.dcae.plugin.simpledemo.onap.org
+
+- name: generate hosts file for Non-Ingress Setup
+ ansible.builtin.copy:
+ dest: "{{ playbook_dir }}/vars/hosts"
+ content: |
+ {{ first_node_ip }} portal.api.simpledemo.onap.org
+ {{ first_node_ip }} vid.api.simpledemo.onap.org
+ {{ first_node_ip }} sdc.api.fe.simpledemo.onap.org
+ {{ first_node_ip }} sdc.api.be.simpledemo.onap.org
+ {{ first_node_ip }} portal-sdk.simpledemo.onap.org
+ {{ first_node_ip }} policy.api.simpledemo.onap.org
+ {{ first_node_ip }} aai.api.sparky.simpledemo.onap.org
+ {{ first_node_ip }} cli.api.simpledemo.onap.org
+ {{ first_node_ip }} msb.api.simpledemo.onap.org
+ {{ first_node_ip }} so.api.simpledemo.onap.org
+ {{ first_node_ip }} appc.api.simpledemo.onap.org
+ {{ first_node_ip }} sdnc.api.simpledemo.onap.org
+ {{ first_node_ip }} nbi.api.simpledemo.onap.org
+ {{ first_node_ip }} consul.api.simpledemo.onap.org
+ {{ first_node_ip }} kibana.api.simpledemo.onap.org
+ {{ first_node_ip }} mr.api.simpledemo.onap.org
+ {{ first_node_ip }} uui.api.simpledemo.onap.org
+ {{ first_node_ip }} aaf.api.simpledemo.onap.org
+ {{ first_node_ip }} robot.api.simpledemo.onap.org
+ {{ first_node_ip }} dcae.api.simpledemo.onap.org
+ {{ first_node_ip }} sdc.workflow.plugin.simpledemo.onap.org
+ {{ first_node_ip }} sdc.dcae.plugin.simpledemo.onap.org
+ delegate_to: localhost
+
+- name: generate cluster config file
+ ansible.builtin.copy:
+ dest: "{{ playbook_dir }}/vars/cluster.yml"
+ content: |
+ oom_cluster_ip: {{ first_node_ip }}
+ onap_namespace: {{ onap_namespace }}
+ openstack_tenant_id: {{ openstack_tenant_id }}
+ openstack_tenant_name: {{ openstack_tenant_name }}
+ deployment_type: {{ deployment_type }}
+ delegate_to: localhost
diff --git a/roles/oom_launch/defaults/main.yaml b/roles/oom_launch/defaults/main.yaml
new file mode 100644
index 0000000..e64f60f
--- /dev/null
+++ b/roles/oom_launch/defaults/main.yaml
@@ -0,0 +1,7 @@
+onap_timeout: 900
+
+helm_env: {}
+
+helm_env_postgres:
+ HELM_DRIVER: sql
+ HELM_DRIVER_SQL_CONNECTION_STRING: "{{ postgres_url }}"
diff --git a/roles/oom_launch/tasks/main.yaml b/roles/oom_launch/tasks/main.yaml
new file mode 100644
index 0000000..9ed4144
--- /dev/null
+++ b/roles/oom_launch/tasks/main.yaml
@@ -0,0 +1,199 @@
+---
+- name: check if onap/Chart.yaml file exists
+ ansible.builtin.stat:
+ path: "{{ onap_chart_path }}"
+ register: onap_chart_stat
+
+- name: load onap/Chart.yaml
+ ansible.builtin.slurp:
+ src: "{{ onap_chart_path }}"
+ register: onap_chart_content
+ when: onap_chart_stat.stat.exists
+
+- name: set version according to release found in onap chart
+ set_fact:
+ onap_version:
+ "{{ (onap_chart_content['content'] | b64decode | from_yaml).version }}"
+ when: onap_chart_stat.stat.exists
+
+- name: show version that will be used
+ debug:
+ msg: "will deploy onap version {{ onap_version }}"
+
+- name: check if a environment.yaml exists
+ ansible.builtin.stat:
+ path: "{{ generic_override_path }}/environment.yaml"
+ register: environment_stat
+
+- name: set environment.yaml override
+ ansible.builtin.set_fact:
+ environment_override: "--values {{ generic_override_path }}/environment.yaml"
+ when: environment_stat.stat.exists
+
+- name: do not set environment.yaml override
+ ansible.builtin.set_fact:
+ environment_override: ""
+ when: not environment_stat.stat.exists
+
+- name: check if a onap-components.yaml exists
+ ansible.builtin.stat:
+ path: "{{ override_components }}"
+ register: component_stat
+
+- name: set onap-components.yaml override
+ ansible.builtin.set_fact:
+ component_override: "--values {{ override_components }}"
+ when: component_stat.stat.exists
+
+- name: do not set onap-components.yaml override
+ ansible.builtin.set_fact:
+ component_override: ""
+ when: not component_stat.stat.exists
+
+- name: check if a component-gating-overrides.yaml exists
+ ansible.builtin.stat:
+ path: "{{ override_gating_component }}"
+ register: gating_stat
+
+- name: set component-gating-overrides.yaml override
+ ansible.builtin.set_fact:
+ so_override: "--values {{ override_gating_component }}"
+ when: gating_stat.stat.exists and project == "so"
+
+- name: do not set component-gating-overrides.yaml override
+ ansible.builtin.set_fact:
+ so_override: ""
+ when: not gating_stat.stat.exists or project != "so"
+
+- name: check helm version
+ command: "helm version --template {% raw %}'{{.Version}}'{% endraw %}"
+ register: helm_version
+
+# Return of previous command will be "v3.3.4" for v3 and up and "<no value>"
+# for version 2.
+- name: store helm version
+ ansible.builtin.set_fact:
+ helmv3: "{{ ('<' in helm_version.stdout) | ternary(false, true) }}"
+
+- name: "HELM 3 not installed - stop playbook"
+ ansible.builtin.fail:
+ msg: HELM 3 not installed
+ when: not helmv3
+
+- name: set timeout
+ set_fact:
+ onap_timeout: "{{ onap_timeout }}s"
+
+- name: retrieve helm postgres secret
+ community.kubernetes.k8s_info:
+ api_version: v1
+ kind: Secret
+ name: "{{ postgres_secret_name }}"
+ namespace: "{{ postgres_namespace }}"
+ register: postgres_secrets
+ when: helmv3_use_sql|bool
+
+- name: retrieve helm postgres password
+ set_fact:
+ postgres_password: "{{
+ postgres_secrets.resources[0].data['postgresql-password'] | b64decode }}"
+ when: helmv3_use_sql|bool
+
+- name: set helm environment with postgres
+ set_fact:
+ helm_env: "{{ helm_env_postgres }}"
+ when: helmv3_use_sql|bool
+
+- name: update helm repo
+ command: "helm repo up"
+
+- name: create ONAP namespace
+ run_once: "yes"
+ community.kubernetes.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: "{{ onap_namespace }}"
+ labels:
+ istio-injection: "{{ (os_infra.onap.istioEnabled | default(true)) |
+ ternary ('enabled', 'disabled') }}"
+ name: "{{ onap_namespace }}"
+
+- name: generate command line for launch
+ set_fact:
+ helm_launch: >
+ helm deploy {{ chart_name }} local/onap
+ --namespace {{ onap_namespace }}
+ --version {{ onap_version }}
+ --values {{ onap_all_file }}
+ {{ environment_override }}
+ --values {{ override_file }}
+ {{ component_override }}
+ {{ so_override }}
+ --timeout {{ onap_timeout }}
+
+- name: show deploy execution command line
+ debug:
+ var: helm_launch
+
+- name: "[HELMv3] launch installation"
+ command: "{{ helm_launch }}"
+ register: yolo3
+ changed_when: true
+ async: 4800
+ poll: 0
+ when: helmv3
+ environment: "{{ helm_env }}"
+
+- name: "[HELMv3] wait for helm deploy to finish"
+ async_status:
+ jid: "{{ yolo3.ansible_job_id }}"
+ register: job_result3
+ until: job_result3.finished
+ retries: 480
+ delay: 10
+ when: helmv3
+
+- name: "[HELMv3] see output"
+ ansible.builtin.debug:
+ msg: "{{ job_result3.stdout }}"
+ when: helmv3
+
+- name: check if a deployment has already been done
+ ansible.builtin.stat:
+ path: "{{ deployment_file }}"
+ register: deployment_stat
+
+- name: get deployment.yaml
+ when: deployment_stat.stat.exists
+ block:
+ - name: create temporary local file for deployment.yaml
+ ansible.builtin.tempfile:
+ state: file
+ suffix: temp
+ register: tmp_deployment
+ delegate_to: "127.0.0.1"
+
+ - name: fetch deployment info
+ ansible.builtin.fetch:
+ dest: "{{ tmp_deployment.path }}"
+ src: "{{ deployment_file }}"
+ flat: "yes"
+
+ - name: load deployment info
+ include_vars:
+ file: "{{ tmp_deployment.path }}"
+
+ always:
+ - name: destroy the local tmp_deployment
+ ansible.builtin.file:
+ path: "{{ tmp_deployment.path }}"
+ state: absent
+ delegate_to: "127.0.0.1"
+
+- name: grab a beer
+ ansible.builtin.debug:
+ msg: " .:.\n _oOoOo\n \
+ [_|||||\n |||||\n ~~~~~"
diff --git a/roles/oom_postconfigure/defaults/main.yaml b/roles/oom_postconfigure/defaults/main.yaml
new file mode 100644
index 0000000..c164448
--- /dev/null
+++ b/roles/oom_postconfigure/defaults/main.yaml
@@ -0,0 +1,7 @@
+---
+aai_server: aai.api.sparky.simpledemo.onap.org
+aai_port: 30233
+aai_user: AAI
+aai_password: AAI
+msb_server: msb.api.simpledemo.onap.org
+msb_port: 30283
diff --git a/roles/oom_postconfigure/tasks/main.yaml b/roles/oom_postconfigure/tasks/main.yaml
new file mode 100644
index 0000000..b00c6b4
--- /dev/null
+++ b/roles/oom_postconfigure/tasks/main.yaml
@@ -0,0 +1,52 @@
+---
+- name: check if chartmuseum script exists
+ ansible.builtin.stat:
+ path: "{{ contrib_path }}/tools/registry-initialize.sh"
+ register: chartmuseum_script
+
+- name: wait for chartmuseum to be up
+ run_once: true
+ community.kubernetes.k8s_info:
+ kind: Deployment
+ wait: true
+ name: "{{ onap_release_name }}-chartmuseum"
+ namespace: "{{ onap_namespace }}"
+ wait_sleep: 10
+ wait_timeout: 600
+ register: chartmuseum_deployment
+
+- name: run internal chart museum result push
+ run_once: true
+ ansible.builtin.shell: |
+ {{ contrib_path }}/tools/registry-initialize.sh -d {{ charts_path }}
+ for package in certInitializer repositoryGenerator readinessCheck postgres serviceAccount mongo common
+ do
+ {{ contrib_path }}/tools/registry-initialize.sh -d {{ charts_path }} -p $package
+ done
+ when: chartmuseum_script.stat.exists and
+ chartmuseum_deployment.resources|length > 0 and
+ chartmuseum_deployment.resources[0].status.availableReplicas > 0
+
+
+- name: fetch cloud config
+ ansible.builtin.fetch:
+ dest: /tmp/clouds.yaml
+ src: "{{ ansible_user_dir }}/.config/openstack/clouds.yaml"
+ flat: "yes"
+
+- name: load cloud config
+ include_vars: /tmp/clouds.yaml
+
+- name: initialize os_auth_url
+ ansible.builtin.set_fact:
+ os_auth_url: "{{ clouds[openstack_user_name].auth.auth_url }}"
+
+- name: add v3 at end of os_auth_url
+ ansible.builtin.set_fact:
+ os_auth_url:
+ "{{ ((os_auth_url[-3:] == 'v3/') or (os_auth_url[-2:] == 'v3')) |
+ ternary(os_auth_url | regex_replace('/$', ''),
+ (os_auth_url[-1:] == '/') | ternary(
+ os_auth_url ~ 'v3',
+ os_auth_url ~ '/v3')) }}"
+
diff --git a/roles/oom_prepare/defaults/main.yaml b/roles/oom_prepare/defaults/main.yaml
new file mode 100644
index 0000000..89d5539
--- /dev/null
+++ b/roles/oom_prepare/defaults/main.yaml
@@ -0,0 +1,8 @@
+---
+helm_server_port: 8879
+helm_path: /usr/local/bin/helm
+chartmuseum_path: /usr/local/bin/chartmuseum
+# new values can be found here: https://github.com/fishworks/fish-food/blob/main/Food/chartmuseum.lua
+chartmuseum_version: v0.12.0
+chartmuseum_sha: 53402edf5ac9f736cb6da8f270f6bbf356dcbbe5592d8a09ee6f91a2dc30e4f6
+helm_push_version: v0.10.3
diff --git a/roles/oom_prepare/tasks/main.yaml b/roles/oom_prepare/tasks/main.yaml
new file mode 100644
index 0000000..043ec52
--- /dev/null
+++ b/roles/oom_prepare/tasks/main.yaml
@@ -0,0 +1,242 @@
+---
+- name: remove oom directory
+ ansible.builtin.file:
+ path: "{{ oom_path }}"
+ state: absent
+
+- name: set review_path (oom case)
+ ansible.builtin.set_fact:
+ review_path: "{{ oom_path }}"
+ when: project == 'oom'
+
+- name: "clone oom {{ branch }}"
+ ansible.builtin.git:
+ repo: "{{ oom_url }}"
+ dest: "{{ oom_path }}"
+ version: "{{ branch }}"
+
+- name: "configure git" # noqa 303
+ shell: |
+ git config --global user.email "You@example.com";
+ git config --global user.name "Your Name"
+ changed_when: "false"
+
+- name: override helm path for CoreOS
+ ansible.builtin.set_fact:
+ helm_path: /home/core/bin/helm
+ when: ansible_os_family | lower == "coreos"
+
+- name: retrieve review_path and clone when not in oom case
+ block:
+ - name: set review_path (not oom case)
+ ansible.builtin.set_fact:
+ review_path: "{{ oom_path }}/{{ project_dir_mapping[project] }}"
+
+ - name: ensure review directory is not there
+ ansible.builtin.file:
+ path: "{{ review_path }}"
+ state: absent
+
+ - name: "clone {{ project }} {{ branch }}"
+ ansible.builtin.git:
+ repo: "{{ onap_base_url }}/{{ project }}"
+ dest: "{{ review_path }}"
+ version: "{{ branch }}"
+ when: project != 'oom' and 'oom' in project
+
+- name: generate review end of url
+ ansible.builtin.set_fact:
+ review_end_url: "{{ gerrit_review[-2:] }}/{{ gerrit_review }}/\
+ {{ gerrit_patchset }}"
+ when: gerrit_review and 'oom' in project
+
+- name: "retrieve change branch for project {{ project }}" # noqa 303
+ shell:
+ cmd: >
+ git pull --no-edit {{ onap_base_url }}/{{ project }}
+ refs/changes/{{ review_end_url }}
+ chdir: "{{ review_path }}"
+ when: gerrit_review and 'oom' in project
+
+- name: "retrieve right submodules if needed for oom {{ project }}" # noqa 303
+ shell:
+ cmd: >
+ git submodule update
+ chdir: "{{ review_path }}"
+ when: gerrit_review and project == 'oom'
+
+- name: check helm version
+ command: "helm version --template {% raw %}'{{.Version}}'{% endraw %}"
+ register: helm_version
+
+# Return of previous command will be "v3.3.4" for v3 and up and "<no value>"
+# for version 2.
+- name: store helm version
+ ansible.builtin.set_fact:
+ helmv3: "{{ ('<' in helm_version.stdout) | ternary(false, true) }}"
+
+- name: "HELM 3 not installed - stop playbook"
+ ansible.builtin.fail:
+ msg: HELM 3 not installed
+ when: not helmv3
+
+- name: create .local/helm folder
+ ansible.builtin.file:
+ path: "{{ ansible_user_dir }}/.local/helm"
+ state: directory
+ recurse: "yes"
+
+- name: retrieve chartmuseum
+ become: true
+ ansible.builtin.get_url:
+ dest: "{{ chartmuseum_path }}"
+ url: "https://s3.amazonaws.com/chartmuseum/release/\
+ {{ chartmuseum_version }}/bin/linux/amd64/chartmuseum"
+ checksum: "sha256:{{ chartmuseum_sha }}"
+ mode: 0777
+
+- name: create chartmuseum folder
+ ansible.builtin.file:
+ path: "{{ ansible_user_dir }}/.chartstorage"
+ state: directory
+
+- name: create .local/chartmuseum folder
+ ansible.builtin.file:
+ path: "{{ ansible_user_dir }}/.local/chartmuseum"
+ state: directory
+ recurse: "yes"
+
+- name: start helm server
+ become: "yes"
+ shell: "start-stop-daemon --start --background --oknodo \
+ --chuid {{ ansible_user_uid }} --group {{ ansible_user_gid }} \
+ --exec {{ chartmuseum_path }} -- --port={{ helm_server_port }} \
+ --storage='local' --allow-overwrite --debug \
+ --storage-local-rootdir='{{ ansible_user_dir }}/.chartstorage' \
+ > {{ ansible_user_dir }}/.local/chartmuseum/chartmuseum.log 2>&1"
+ changed_when: "true"
+
+- name: list all helm repositories
+ command: "helm repo list -o json"
+ register: repos
+
+- name: remove all helm repositories
+ community.kubernetes.helm_repository:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ repos.stdout | from_json | map(attribute='name') | list }}"
+
+- name: add helm local repository
+ community.kubernetes.helm_repository:
+ name: local
+ repo_url: "http://127.0.0.1:{{ helm_server_port }}"
+
+- name: add helm local repository
+ community.kubernetes.helm_repository:
+ name: onap
+ repo_url: "http://127.0.0.1:{{ helm_server_port }}"
+
+- name: check if Helm cm-push plugin is installed
+ shell: "helm plugin list | grep cm-push | grep 0.10 | wc -l"
+ register: helm_plugin_cm_push
+ changed_when: "false"
+
+- name: Install Helm cm-push plugin
+ command: "helm plugin install --version {{ helm_push_version }} https://github.com/chartmuseum/helm-push.git"
+ changed_when: "true"
+ when: helm_plugin_cm_push.stdout == "0"
+
+- name: Install Helm deploy plugin
+ community.kubernetes.helm_plugin:
+ plugin_path: "{{ oom_path }}/kubernetes/helm/plugins/deploy"
+ namespace: default
+ state: present
+
+- name: Install Helm undeploy plugin
+ community.kubernetes.helm_plugin:
+ plugin_path: "{{ oom_path }}/kubernetes/helm/plugins/undeploy"
+ namespace: default
+ state: present
+
+- name: Add Kafka Strimzi repository
+ community.kubernetes.helm_repository:
+ name: strimzi
+ repo_url: https://strimzi.io/charts/
+
+- name: Install kafka strimzi
+ community.kubernetes.helm:
+ name: strimzi-kafka-operator
+ chart_ref: strimzi/strimzi-kafka-operator
+ release_namespace: strimzi-system
+ create_namespace: true
+ chart_version: "{{ strimzi_version }}"
+ values:
+ watchAnyNamespace: True
+
+- name: compile helm packages
+ command: "make SKIP_LINT=TRUE all"
+ async: 3600
+ poll: 0
+ changed_when: "true"
+ args:
+ chdir: "{{ oom_path }}/kubernetes"
+ register: make_helm
+
+- name: "wait for helm compile to finish"
+ async_status:
+ jid: "{{ make_helm.ansible_job_id }}"
+ register: job_result
+ until: job_result.finished
+ retries: 360
+ delay: 10
+
+- name: "[review case] generate helm make logs filename"
+ set_fact:
+ helm_log: "make-{{ gerrit_review }}-{{ gerrit_patchset }}.log"
+ when: gerrit_review
+
+- name: "[normal case] generate helm make logs filename"
+ set_fact:
+ helm_log: "make-{{ branch }}.log"
+ when: not gerrit_review
+
+- name: save helm package output
+ copy:
+ dest: "{{ ansible_user_dir }}/.local/helm/{{ helm_log }}"
+ content: "{{ job_result.stdout }}"
+
+- name: "[WORKAROUND] readd helm local repository"
+ command: "helm repo add local http://127.0.0.1:{{ helm_server_port }}"
+ when: not helmv3
+
+- name: check if user clouds exists
+ stat:
+ path: "{{ base_dir }}/vars/user_cloud.yml"
+ delegate_to: localhost
+ register: stat
+
+- name: get user clouds
+ block:
+ - name: include user clouds info
+ include_vars:
+ file: "{{ base_dir }}/vars/user_cloud.yml"
+ name: user_cloud
+
+ - name: retrieve OpenStack user name
+ set_fact:
+ openstack_user_name: "{{ user_cloud | list | first }}"
+
+ - name: retrieve OpenStack informations
+ set_fact:
+ openstack_tenant_name:
+ "{{ user_cloud[openstack_user_name].auth.project_name }}"
+ os_auth_url:
+ "{{ user_cloud[openstack_user_name].auth.auth_url }}"
+
+ - name: generate openstack info file
+ copy:
+ content: |
+ openstack_user_name: {{ openstack_user_name }}
+ openstack_tenant_name: {{ openstack_tenant_name }}
+ dest: "{{ base_dir }}/vars/openstack_infos.yml"
+ delegate_to: localhost
diff --git a/roles/oom_wait/tasks/main.yaml b/roles/oom_wait/tasks/main.yaml
new file mode 100644
index 0000000..924e526
--- /dev/null
+++ b/roles/oom_wait/tasks/main.yaml
@@ -0,0 +1,40 @@
+---
+- name: wait for all containers to be started
+ shell:
+ "set -o pipefail && \
+ kubectl get po -n {{ onap_namespace }} | \
+ grep -c ContainerCreating || true"
+ args:
+ executable: /bin/bash
+ register: kube
+ changed_when:
+ kube.stdout == '0'
+ until:
+ kube.stdout == '0'
+ retries: 1000
+ delay: 10
+
+- name: wait for all containers to be initialized
+ shell:
+ "set -o pipefail && \
+ kubectl get po -n {{ onap_namespace }} | \
+ grep Init | grep -cv Error || true"
+ args:
+ executable: /bin/bash
+ register: kube
+ changed_when:
+ kube.stdout == '0'
+ until:
+ kube.stdout == '0'
+ retries: 1000
+ delay: 10
+
+- name: get result
+ shell:
+ "kubectl get po -n {{ onap_namespace }}"
+ changed_when: "false"
+ register: kube
+
+- name: show result
+ ansible.builtin.debug:
+ msg: "{{ kube.stdout }}"
diff --git a/roles/prepare_ci/defaults/main.yaml b/roles/prepare_ci/defaults/main.yaml
new file mode 100644
index 0000000..34cfc01
--- /dev/null
+++ b/roles/prepare_ci/defaults/main.yaml
@@ -0,0 +1,6 @@
+---
+ci_packages: []
+ci_packages_to_be_removed:
+ - python3-yaml
+ci_python3_packages: []
+proxy_env: {}
diff --git a/roles/prepare_ci/tasks/install_DEBIAN.yaml b/roles/prepare_ci/tasks/install_DEBIAN.yaml
new file mode 100644
index 0000000..9537976
--- /dev/null
+++ b/roles/prepare_ci/tasks/install_DEBIAN.yaml
@@ -0,0 +1,11 @@
+---
+- name: load os specific configuration
+ include_vars: "debian.yaml"
+ when: ansible_os_family | lower == "debian"
+
+- name: "[Debian] install needed packages"
+ include_role:
+ name: apt_install
+ vars:
+ environment: "{{ proxy_env }}"
+ packages: "{{ ci_packages }}"
diff --git a/roles/prepare_ci/tasks/main.yaml b/roles/prepare_ci/tasks/main.yaml
new file mode 100644
index 0000000..a008fd1
--- /dev/null
+++ b/roles/prepare_ci/tasks/main.yaml
@@ -0,0 +1,57 @@
+---
+- name: load os specific configuration
+ include_vars: "{{ ansible_os_family | lower }}.yaml"
+ when: ansible_os_family | lower == "debian"
+
+- name: "[Debian] install needed packages"
+ include_role:
+ name: apt_install
+ vars:
+ environment: "{{ proxy_env }}"
+ packages: "{{ ci_packages }}"
+ when: ansible_os_family | lower == "debian"
+
+- name: "[Non Debian] install needed packages"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: present
+ loop: "{{ ci_packages }}"
+ when: ansible_os_family | lower != "debian"
+
+# Workaround
+# Conflict between the python3-yaml installed with the package manager
+# preventing the one from pip
+# Observed on daily/weekly on the 26th of June
+# ERROR: Cannot uninstall 'PyYAML'. It is a distutils installed project and
+# thus we cannot accurately determine which files belong to it which would lead
+# to only a partial uninstall.
+# As a workaround, we force the uninstallation of the python3-yaml package
+# before starting the installation
+- name: "[Debian] remove unexpected packages"
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ ci_packages_to_be_removed }}"
+ when: ansible_os_family | lower == "debian"
+
+- name: "[Non Debian] remove unexpected packages"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ ci_packages_to_be_removed }}"
+ when: ansible_os_family | lower != "debian"
+# End of Workaround
+
+- name: "[Python 3] install needed python packages"
+ pip:
+ name: "{{ item }}"
+ state: present
+ loop: "{{ ci_python3_packages }}"
+ when: ansible_python_version is version('3', '>=')
+
+
+- name: allow oom_path parent directory to be usable by user
+ ansible.builtin.file:
+ path: "{{ oom_path.split('/')[0:-1] | join('/') }}"
+ state: directory
+ mode: 0777
diff --git a/roles/prepare_ci/vars/debian.yaml b/roles/prepare_ci/vars/debian.yaml
new file mode 100644
index 0000000..5b9029c
--- /dev/null
+++ b/roles/prepare_ci/vars/debian.yaml
@@ -0,0 +1,18 @@
+---
+ci_packages:
+ - jq
+ - build-essential
+ - libffi-dev
+ - git
+ - python3-pip
+ - rsync
+ci_python3_packages:
+ - openshift==0.11.2
+ - stevedore==1.32.0
+ - dogpile.cache==0.6.5
+ - openstacksdk==0.43.0
+ - shade==1.33.0
+ - os-client-config==2.0.0
+ - python-openstackclient==5.2.1
+ - python-heatclient==1.18.0
+ - jsonschema