diff options
author | Michal Ptacek <m.ptacek@partner.samsung.com> | 2019-09-04 15:39:20 +0000 |
---|---|---|
committer | Gerrit Code Review <gerrit@onap.org> | 2019-09-04 15:39:20 +0000 |
commit | 1da8b7af533aa48450d42219f0615d0bb510da4a (patch) | |
tree | 827fc4fa9da8d685d6d7960978ce99680e8f7d31 /tools/cicdansible/roles | |
parent | 8620b2be770895ffca1385f3a8a57b9422ecf126 (diff) | |
parent | 2e34522351d40edd0e37b4919630736748949f2a (diff) |
Merge changes from topic "OOM-2042"
* changes:
Add ansible configuration file
Add cicdansible playbook
Add onap installation role
Add onap instance configuration role
Add ansible role to deploy onap infrastructure on openstack
Add floating ip fact retrieval module
Add inventory for cicdansible playbook
Add heat template to deploy onap infrastructure
Add the .gitignore for cicdansible
Diffstat (limited to 'tools/cicdansible/roles')
16 files changed, 291 insertions, 0 deletions
diff --git a/tools/cicdansible/roles/configure_instances/tasks/cicd_registry.yml b/tools/cicdansible/roles/configure_instances/tasks/cicd_registry.yml new file mode 100644 index 00000000..f3c54ca3 --- /dev/null +++ b/tools/cicdansible/roles/configure_instances/tasks/cicd_registry.yml @@ -0,0 +1,10 @@ +#Configure access to cicd docker registry. +- name: "Ensure that docker config directory exists" + file: + path: /etc/docker + mode: 0700 + state: directory +- name: "Allow insecure access to cicd docker registry" + template: + src: daemon.json.j2 + dest: /etc/docker/daemon.json diff --git a/tools/cicdansible/roles/configure_instances/tasks/general.yml b/tools/cicdansible/roles/configure_instances/tasks/general.yml new file mode 100644 index 00000000..6ed9982e --- /dev/null +++ b/tools/cicdansible/roles/configure_instances/tasks/general.yml @@ -0,0 +1,26 @@ +#General instance configuration. +#Modify /etc/hosts on every instance to add every instance there including itself. +- name: "Add hosts to /etc/hosts" + lineinfile: + path: /etc/hosts + insertafter: EOF + regexp: "^[^ ]+ {{ item }}$" + state: present + line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}" + loop: "{{ groups['instances'] }}" +#Copy private ssh key to instances for easy connecting between them. +- name: "Ensure ssh directory exists" + file: + path: /root/.ssh + owner: root + group: root + mode: 0700 + state: directory +- name: "Install ssh private key" + copy: + src: "{{ ansible_private_key_file }}" + dest: /root/.ssh/id_rsa + mode: 0400 +#Add public ssh host keys of all instances to trust them. +- name: "Add host keys of instances to known_hosts" + shell: "ssh-keyscan {{ groups['instances'] | join(' ') }} > /root/.ssh/known_hosts" diff --git a/tools/cicdansible/roles/configure_instances/tasks/main.yml b/tools/cicdansible/roles/configure_instances/tasks/main.yml new file mode 100644 index 00000000..fe5b4b7d --- /dev/null +++ b/tools/cicdansible/roles/configure_instances/tasks/main.yml @@ -0,0 +1,5 @@ +#Initial instance configuration. +- include_tasks: general.yml +#Configure cicd registry access, but skip installer. +- include_tasks: cicd_registry.yml + when: "inventory_hostname != 'installer'" diff --git a/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2 b/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2 new file mode 100644 index 00000000..1c3ca9bb --- /dev/null +++ b/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2 @@ -0,0 +1,3 @@ +{ +"insecure-registries": ["{{ cicd_docker_registry }}"] +} diff --git a/tools/cicdansible/roles/install/defaults/main.yml b/tools/cicdansible/roles/install/defaults/main.yml new file mode 100644 index 00000000..b21e6323 --- /dev/null +++ b/tools/cicdansible/roles/install/defaults/main.yml @@ -0,0 +1,3 @@ +--- +installer_deploy_path: "{{ ansible_user_dir }}/installer" +install_timeout: 10600 diff --git a/tools/cicdansible/roles/install/tasks/download_resources.yml b/tools/cicdansible/roles/install/tasks/download_resources.yml new file mode 100644 index 00000000..7f042596 --- /dev/null +++ b/tools/cicdansible/roles/install/tasks/download_resources.yml @@ -0,0 +1,6 @@ +#Download resources/scripts to controller. +- name: "Download software resources" + fetch: + src: "{{ resources_dir }}/{{ resources_sw_filename }}" + flat: yes + dest: "resources/" diff --git a/tools/cicdansible/roles/install/tasks/install.yml b/tools/cicdansible/roles/install/tasks/install.yml new file mode 100644 index 00000000..35df7976 --- /dev/null +++ b/tools/cicdansible/roles/install/tasks/install.yml @@ -0,0 +1,48 @@ +#Onap installation tasks +#Copy ssh private key used for resource server access +- name: "Copy resource server access key" + copy: + src: "{{ hostvars[groups['resources'][0]].ansible_private_key_file }}" + dest: "{{ ansible_user_dir }}/.ssh/res.pem" + mode: 0600 +#Unarchive resources. +- name: "Ensure {{ installer_deploy_path }} directory exists" + file: + path: "{{ installer_deploy_path }}" + state: directory +- name: "Extract sw resources" + unarchive: + src: "resources/{{ hostvars[groups['resources'][0]].resources_sw_filename }}" + dest: "{{ installer_deploy_path }}" +#Generate ansible inventory and extra vars. +- name: "Generate ansible inventory for installer" + template: + src: inventory.yml.j2 + dest: "{{ installer_deploy_path }}/ansible/inventory/hosts.yml" +- name: "generate application specific config overrides" + copy: + content: "{{ application_config | b64decode }}" + dest: "{{ installer_deploy_path }}/ansible/application/application_overrides.yml" +# This generates a file with locations of resource files in resource host, we +# do it only to allow manually running offline installer without +# typing them by hand. We cannot use +# inventory template because it will be overridden +# by application_configuration.yml. +- name: Generate resource location file + copy: + content: | + resources_dir: {{ resources_dir }} + resources_filename: {{ resources_filename }} + aux_resources_filename: {{ aux_resources_filename }} + app_data_path: /opt/onap/resources + dest: "{{ installer_deploy_path }}/ansible/application/resources.yml" +#Run script. +- name: "Execute installation" + shell: + ./run_playbook.sh + -e @application/application_configuration.yml -e @application/application_overrides.yml + -e @application/resources.yml -i inventory/hosts.yml site.yml + args: + chdir: "{{ installer_deploy_path }}/ansible" + async: "{{ install_timeout }}" + when: install_app diff --git a/tools/cicdansible/roles/install/tasks/main.yml b/tools/cicdansible/roles/install/tasks/main.yml new file mode 100644 index 00000000..04ac4c3d --- /dev/null +++ b/tools/cicdansible/roles/install/tasks/main.yml @@ -0,0 +1 @@ +- include_tasks: "{{ mode }}.yml" diff --git a/tools/cicdansible/roles/install/templates/inventory.yml.j2 b/tools/cicdansible/roles/install/templates/inventory.yml.j2 new file mode 100644 index 00000000..36bf3bd3 --- /dev/null +++ b/tools/cicdansible/roles/install/templates/inventory.yml.j2 @@ -0,0 +1,36 @@ +all: + vars: + ansible_ssh_private_key_file: /root/.ssh/id_rsa + ansible_ssh_common_args: "-o StrictHostKeyChecking=no" + children: + resources: + vars: + ansible_ssh_private_key_file: /root/.ssh/res.pem + ansible_user: "{{ hostvars[groups['resources'][0]].ansible_user }}" + ansible_become: "{{ hostvars[groups['resources'][0]].ansible_become }}" + hosts: + resource_host: + ansible_host: {{ resource_host }} + infrastructure: + hosts: + infra_host: + ansible_host: infra + cluster_ip: {{ hostvars['infra'].ansible_default_ipv4.address }} + kubernetes: + children: + kubernetes-node: + hosts: +{% for h in groups['nodes'] %} + {{ h }}: + ansible_host: "{{ hostvars[h].ansible_default_ipv4.address }}" + cluster_ip: "{{ hostvars[h].ansible_default_ipv4.address }}" +{% endfor %} + kubernetes-control-plane: + hosts: + infra_host + kubernetes-etcd: + hosts: + infra_host + nfs-server: + hosts: + node0 diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/main.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/main.yml new file mode 100644 index 00000000..44de5795 --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/main.yml @@ -0,0 +1,11 @@ +#Openstack specific configuration running on instances. +#Get volumes. +- name: "get volume info" + set_fact: + volumes: "{{ (hostvars['localhost'].heat_stack.stack.outputs | selectattr('output_key', 'equalto', 'volumes') | list).0.output_value[inventory_hostname] | default([]) }}" +- name: "Configure volumes" + include_tasks: configure/volume.yml + vars: + volume_id: "{{ item[0] }}" + mountpoint: "{{ item[1] }}" + loop: "{{ volumes }}" diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/volume.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/volume.yml new file mode 100644 index 00000000..8c553850 --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/volume.yml @@ -0,0 +1,47 @@ +#Configure a single openstack volume. +- name: "Set volume path" + set_fact: + volume_path: "/dev/disk/by-id/virtio-{{ volume_id | truncate(20, True, '') }}" +- name: "Set partition path" + set_fact: + partition_path: "{{ volume_path }}-part1" +- name: "Wait for volume" + #We do not do it normally, because we want to trigger udev (workaround for some bugs). + shell: "udevadm trigger && udevadm settle && [[ -b {{ volume_path }} ]]" + register: result + retries: 30 + delay: 10 + until: result.rc == 0 +- name: "Partition volume" + parted: + device: "{{ volume_path }}" + number: 1 + label: msdos + flags: boot + part_type: primary + state: present +- name: "Wait for partition to appear" + stat: + path: "{{ partition_path }}" + follow: true + register: part_stat + delay: 1 + retries: 5 + until: part_stat.stat.isblk is defined and part_stat.stat.isblk +- name: "Create xfs filesystem on volume" + filesystem: + dev: "{{ partition_path }}" + type: xfs +- name: "Ensure that the mountpoint exists" + file: + path: "{{ mountpoint }}" + owner: root + group: root + mode: 0755 + state: directory +- name: "Mount filesystem" + mount: + src: "{{ partition_path }}" + path: "{{ mountpoint }}" + fstype: xfs + state: mounted diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml new file mode 100644 index 00000000..2bfeda77 --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml @@ -0,0 +1,36 @@ +#Tasks for stack redeployment. +#Delete the heat stack before deployment. +- name: "delete deployment to force redeploy" + os_stack: + auth: "{{ os_auth }}" + auth_type: token + name: "{{ stack_name }}" + state: absent +#Deploy heat stack with infrastructure. +- name: "Deploy the infrastructure via heat" + os_stack: + auth: "{{ os_auth }}" + auth_type: token + name: "{{ stack_name }}" + template: "heat/installer.yaml" + state: present + environment: + - "heat/installer.env" + parameters: + num_nodes: "{{ num_nodes }}" + public_network_name: "{{ public_network }}" + external_subnet_cidr: "{{ external_subnet_cidr }}" + subnet_cidr: "{{ subnet_cidr }}" + subnet_range_start: "{{ subnet_range_start }}" + subnet_range_end: "{{ subnet_range_end }}" + router_addr: "{{ router_addr }}" + auth_key: "{{ auth_public_key }}" + image_name: "{{ image_name }}" + node_flavor_name: "{{ node_flavor_name }}" + infra_flavor_name: "{{ infra_flavor_name }}" + installer_flavor_name: "{{ installer_flavor_name }}" + node_ip: "{{ floating_ips_by_address[first_node_ip].id }}" + infra_ip: "{{ floating_ips_by_address[infra_ip].id }}" + installer_ip: "{{ floating_ips_by_address[installer_ip].id }}" + wait: true + register: heat_stack diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/main.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/main.yml new file mode 100644 index 00000000..324f5374 --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/main.yml @@ -0,0 +1,8 @@ +--- +#This mode expects some variables, and deploys infrastructure on open stack. +#Execute prerequisites. +- include_tasks: deploy/prereq.yml +#Deploy stack. +- include_tasks: deploy/heat.yml +#Register instances in inventory. +- include_tasks: deploy/register_instances.yml diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/prereq.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/prereq.yml new file mode 100644 index 00000000..2fe8717a --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/prereq.yml @@ -0,0 +1,41 @@ +#Prerequisite tasks before stack deployment. +#Authenticate to cloud. +- name: "authenticate to cloud" + os_auth: + auth: + auth_url: "{{ os_auth_url }}" + username: "{{ os_username }}" + password: "{{ os_password }}" + domain_name: "{{ os_domain_name }}" + project_name: "{{ os_project_name }}" + project_domain_name: "{{ os_domain_name }}" +#Will use the token from this point on. +- name: "set token" + set_fact: + os_auth: + auth_url: "{{ os_auth_url }}" + token: "{{ auth_token }}" + project_name: "{{ os_project_name }}" + project_domain_name: "{{ os_domain_name }}" +#Retrieve floating ip info. +- name: "get floating ip facts" + os_floating_ips_facts: + auth: "{{ os_auth }}" + auth_type: token + network: "{{ public_network }}" +#Group floating ips by ip address to allow looking them up. +- name: "group floating ips by address" + set_fact: + floating_ips_by_address: "{{ floating_ips_by_address | default({}) | combine({item.floating_ip_address: item}) }}" + loop: "{{ query('items', openstack_floating_ips) }}" +- name: "fail if required floating ips do not exist" + fail: msg="The required floating ips do not exist" + when: "(not (first_node_ip in floating_ips_by_address) + or not (infra_ip in floating_ips_by_address) + or not (installer_ip in floating_ips_by_address))" +#Get a ssh public key to be passed to heat, it requires ssh-keygen with -y option. +- name: "Retrieve public key from ssh private key" + command: "ssh-keygen -y -f {{ hostvars['installer'].ansible_private_key_file }}" + register: public_key_generation +- set_fact: + auth_public_key: "{{ public_key_generation.stdout }}" diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/register_instances.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/register_instances.yml new file mode 100644 index 00000000..a50ecd22 --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/register_instances.yml @@ -0,0 +1,9 @@ +#Register instances as hosts in inventory. +#Installer and infra are statically registered. +#Register node instances dynamically. +- name: "Register node instances" + add_host: + name: "node{{ item[0] }}" + groups: nodes + ansible_host: "{{ item[1] }}" + loop: "{{ query('indexed_items', (heat_stack.stack.outputs | selectattr('output_key', 'equalto', 'node_ips') | list).0.output_value) }}" diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/main.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/main.yml new file mode 100644 index 00000000..7a00abff --- /dev/null +++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/main.yml @@ -0,0 +1 @@ +- include_tasks: "{{ mode }}/main.yml" |