summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tools/cicdansible/.gitignore2
-rw-r--r--tools/cicdansible/ansible.cfg18
-rw-r--r--tools/cicdansible/group_vars/all.yml63
-rw-r--r--tools/cicdansible/group_vars/instances.yml11
-rw-r--r--tools/cicdansible/group_vars/nodes.yml5
-rw-r--r--tools/cicdansible/group_vars/resources.yml6
-rw-r--r--tools/cicdansible/heat/config.yaml10
-rw-r--r--tools/cicdansible/heat/installer.env1
-rw-r--r--tools/cicdansible/heat/installer.yaml283
-rw-r--r--tools/cicdansible/heat/instance.yaml58
-rw-r--r--tools/cicdansible/heat/node.yaml59
-rw-r--r--tools/cicdansible/hosts.yml28
-rw-r--r--tools/cicdansible/install.yml36
-rw-r--r--tools/cicdansible/library/os_floating_ips_facts.py61
-rw-r--r--tools/cicdansible/roles/configure_instances/tasks/cicd_registry.yml10
-rw-r--r--tools/cicdansible/roles/configure_instances/tasks/general.yml26
-rw-r--r--tools/cicdansible/roles/configure_instances/tasks/main.yml5
-rw-r--r--tools/cicdansible/roles/configure_instances/templates/daemon.json.j23
-rw-r--r--tools/cicdansible/roles/install/defaults/main.yml3
-rw-r--r--tools/cicdansible/roles/install/tasks/download_resources.yml6
-rw-r--r--tools/cicdansible/roles/install/tasks/install.yml48
-rw-r--r--tools/cicdansible/roles/install/tasks/main.yml1
-rw-r--r--tools/cicdansible/roles/install/templates/inventory.yml.j236
-rw-r--r--tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/main.yml11
-rw-r--r--tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/volume.yml47
-rw-r--r--tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml36
-rw-r--r--tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/main.yml8
-rw-r--r--tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/prereq.yml41
-rw-r--r--tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/register_instances.yml9
-rw-r--r--tools/cicdansible/roles/setup_openstack_infrastructure/tasks/main.yml1
30 files changed, 932 insertions, 0 deletions
diff --git a/tools/cicdansible/.gitignore b/tools/cicdansible/.gitignore
new file mode 100644
index 00000000..bb3e4abb
--- /dev/null
+++ b/tools/cicdansible/.gitignore
@@ -0,0 +1,2 @@
+*.retry
+resources/
diff --git a/tools/cicdansible/ansible.cfg b/tools/cicdansible/ansible.cfg
new file mode 100644
index 00000000..e74dda58
--- /dev/null
+++ b/tools/cicdansible/ansible.cfg
@@ -0,0 +1,18 @@
+#Ansible configuration used when running the playbook.
+[defaults]
+#Stdout callback.
+stdout_callback=debug
+#Default verbosity level, for logging all module outputs.
+verbosity=1
+
+[inventory]
+#Fail when inventory parsing fails.
+any_unparsed_is_failed=true
+
+[connection]
+#Enable ansible pipelining.
+pipelining=true
+
+[ssh_connection]
+#Increase control persist settings.
+ssh_args=-C -o ControlMaster=auto -o ControlPersist=30m
diff --git a/tools/cicdansible/group_vars/all.yml b/tools/cicdansible/group_vars/all.yml
new file mode 100644
index 00000000..581e7c4a
--- /dev/null
+++ b/tools/cicdansible/group_vars/all.yml
@@ -0,0 +1,63 @@
+---
+#General configuration, can be overridden in cmdline.
+#Authentication/keystone url.
+os_auth_url: ""
+#Openstack username.
+os_username: ""
+#Password.
+os_password: ""
+#Domain name.
+os_domain_name: "default"
+#Project name.
+os_project_name: ""
+#The name or id of public network used to communicate with instances.
+public_network: ""
+#Floating ip address for first node instance
+first_node_ip: ""
+#Floating ip of infra instance.
+infra_ip: ""
+#Floating ip of installer.
+installer_ip: ""
+#Openstack flavor name for nodes.
+node_flavor_name: ""
+#Flavor name for infra instance.
+infra_flavor_name: ""
+#Flavor name for installer instance.
+installer_flavor_name: ""
+#Name of the image for instances.
+image_name: ""
+#Cidr of private subnet where instances are connected.
+subnet_cidr: "10.1.0.0/24"
+#Start of dhcp allocation range for subnet.
+subnet_range_start: "10.1.0.4"
+#Subnet allocation range end.
+subnet_range_end: "10.1.0.254"
+#Ip address of router used as a gateway to external network.
+router_addr: "10.1.0.1"
+#Cidr of external subnet to allow access to, 0.0.0.0/0 means allow internet access.
+# For offline deployment it is recommended to set this to a cidr of intranet.
+external_subnet_cidr: ""
+#Address of cicd docker registry.
+cicd_docker_registry: ""
+#Number of nodes to deploy.
+num_nodes: "3"
+#Stack name to deploy on heat.
+stack_name: "installer-test"
+#Address of resource server with packages.
+resource_host: ""
+#Directory with all onap packages (on resource host).
+resources_dir: ""
+#Filename of software package.
+resources_sw_filename: "sw_package.tar"
+#Filename of binary resources.
+resources_filename: "resources_package.tar"
+#Filename of auxiliary resources.
+aux_resources_filename: "aux_package.tar"
+#Whether to deploy app.
+#Setting it to false will skip deployment, but instance preconfiguration
+#will still be done and sw resources uploaded to the installer host.
+install_app: true
+# This is a string containing base64-encoded yaml blob passed to offline installer via -e option.
+# You can use it to override any variable in offline installer except those
+# supported directly by cicdansible.
+application_config: ''
diff --git a/tools/cicdansible/group_vars/instances.yml b/tools/cicdansible/group_vars/instances.yml
new file mode 100644
index 00000000..0d756a57
--- /dev/null
+++ b/tools/cicdansible/group_vars/instances.yml
@@ -0,0 +1,11 @@
+#Configuration for all instances.
+#User to log in to instances as.
+ansible_user: root
+#Whether to become root using sudo or such like, no by default.
+ansible_become: no
+#Private key to use to access instances.
+ansible_private_key_file: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa"
+#Arguments to skip host key verification for instances, modify only if you know what you are doing.
+disable_ssh_host_auth: "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
+ansible_ssh_common_args: "{{ disable_ssh_host_auth }}"
+
diff --git a/tools/cicdansible/group_vars/nodes.yml b/tools/cicdansible/group_vars/nodes.yml
new file mode 100644
index 00000000..76a222c2
--- /dev/null
+++ b/tools/cicdansible/group_vars/nodes.yml
@@ -0,0 +1,5 @@
+#Configuration for kubernetes nodes.
+#This redirects ssh connections through the installer instance, to allow connecting via internal ip.
+#It should work even on openssh versions lacking -j option support.
+#The value is based heavily on the default from parent group.
+ansible_ssh_common_args: "{{ disable_ssh_host_auth }} -o ProxyCommand='ssh {{ disable_ssh_host_auth }} -i {{ ansible_private_key_file }} -W %h:%p root@{{ installer_ip }}'"
diff --git a/tools/cicdansible/group_vars/resources.yml b/tools/cicdansible/group_vars/resources.yml
new file mode 100644
index 00000000..e7c0f773
--- /dev/null
+++ b/tools/cicdansible/group_vars/resources.yml
@@ -0,0 +1,6 @@
+#Resource host configuration.
+#Define used private key.
+ansible_private_key_file: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa"
+#User login data.
+ansible_user: root
+ansible_become: no
diff --git a/tools/cicdansible/heat/config.yaml b/tools/cicdansible/heat/config.yaml
new file mode 100644
index 00000000..e1f0309f
--- /dev/null
+++ b/tools/cicdansible/heat/config.yaml
@@ -0,0 +1,10 @@
+#cloud-config
+#Enable root login.
+disable_root: false
+#Output everything to /dev/console...
+output: { all: "/dev/console" }
+#Initialization.
+runcmd:
+ - |
+ set -efxu -o pipefail
+ %{NOTIFY_COMMAND} --data-binary '{"status": "SUCCESS", "reason": "instance started successfully"}'
diff --git a/tools/cicdansible/heat/installer.env b/tools/cicdansible/heat/installer.env
new file mode 100644
index 00000000..9765ce30
--- /dev/null
+++ b/tools/cicdansible/heat/installer.env
@@ -0,0 +1 @@
+#Environment file
diff --git a/tools/cicdansible/heat/installer.yaml b/tools/cicdansible/heat/installer.yaml
new file mode 100644
index 00000000..8fff3a74
--- /dev/null
+++ b/tools/cicdansible/heat/installer.yaml
@@ -0,0 +1,283 @@
+#This is the environment heat template, compatible with openstack ocata.
+heat_template_version: 2017-02-24
+description: "Heat template for deploying onap env"
+parameters:
+ auth_key:
+ label: "Auth public key"
+ description: "The public key used to authenticate to instances"
+ type: string
+ node_flavor_name:
+ label: "name of node flavor"
+ description: "The name of the flavor used to create kubernetes nodes"
+ type: string
+ constraints:
+ - custom_constraint: nova.flavor
+ description: "need to specify a valid flavor"
+ infra_flavor_name:
+ label: "name of infra flavor"
+ description: "flavor used to create infra instance"
+ type: string
+ constraints:
+ - custom_constraint: nova.flavor
+ description: "need to specify a valid flavor"
+ installer_flavor_name:
+ label: "name of installer flavor"
+ description: "flavor used to create installer instance"
+ type: string
+ constraints:
+ - custom_constraint: nova.flavor
+ description: "need to specify a valid flavor"
+ image_name:
+ label: "image name"
+ description: "name of the image from which to create all instances, should be rhel 7.6 or centos image"
+ type: string
+ constraints:
+ - custom_constraint: glance.image
+ description: "must specify a valid image name"
+ subnet_cidr:
+ label: "private subnet cidr"
+ description: "Cidr of a private subnet instances will be connected to"
+ type: string
+ constraints:
+ - custom_constraint: net_cidr
+ subnet_range_start:
+ label: "subnet dhcp allocation range start"
+ description: "Start of range of dhcp allocatable ips on private subnet"
+ type: string
+ constraints:
+ - custom_constraint: ip_addr
+ subnet_range_end:
+ label: "end of subnet dhcp allocation range"
+ description: "End of private subnet's dhcp allocation range"
+ type: string
+ constraints:
+ - custom_constraint: ip_addr
+ router_addr:
+ label: "ip address of router"
+ description: "IP address of the router allowing access to other networks incl. company network"
+ type: string
+ constraints:
+ - custom_constraint: ip_addr
+ public_network_name:
+ label: "name of the public network"
+ description: "Name of the public, internet facing network, also allowing access to company internal hosts"
+ type: string
+ constraints:
+ - custom_constraint: neutron.network
+ description: "Must specify a valid network name or id"
+ external_subnet_cidr:
+ label: "external subnet cidr"
+ description: "The CIDR of the external subnet, that should be accessible from instances, even when internet access is cut. Putting 0.0.0.0/0 here means access to internet."
+ type: string
+ constraints:
+ - custom_constraint: net_cidr
+ installer_ip:
+ label: "floating ip of the installer"
+ description: "a pre-allocated floating ip that will be associated with the installer instance"
+ type: string
+ infra_ip:
+ label: "floating ip of the infra"
+ description: "a pre-allocated floating ip that will be associated with the infrastructure instance"
+ type: string
+ node_ip:
+ label: "floating ip of the first node"
+ description: "a pre-allocated floating ip that will be associated with the first kubernetes node and allow accessing onap"
+ type: string
+ num_nodes:
+ label: "num nodes"
+ description: "the number of kubernetes nodes to create, min 1"
+ type: number
+ constraints:
+ - range: { min: 1 }
+ description: "must be a positive number"
+resources:
+ # Security group used to secure access to instances.
+ secgroup:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ rules:
+ # Egress rule allowing access to external_subnet_cidr.
+ - direction: egress
+ ethertype: IPv4
+ remote_ip_prefix: { get_param: external_subnet_cidr }
+ # Ingress rule, allowing also inbound access by external network.
+ - direction: ingress
+ ethertype: IPv4
+ remote_ip_prefix: { get_param: external_subnet_cidr }
+ # Allow outbound communication with the internal subnet.
+ - direction: egress
+ ethertype: IPv4
+ remote_ip_prefix: { get_param: subnet_cidr }
+ # Allow inbound communication from internal network.
+ - direction: ingress
+ ethertype: IPv4
+ remote_ip_prefix: { get_param: subnet_cidr }
+ # Allow outbound access to 169.254.0.0/16, mainly for metadata. We do not need inbound.
+ - direction: egress
+ ethertype: IPv4
+ remote_ip_prefix: 169.254.0.0/16
+ #A network that our test environment will be connected to.
+ privnet:
+ type: OS::Neutron::Net
+ #Subnet that instances will live in.
+ privsubnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network: { get_resource: privnet }
+ cidr: { get_param: subnet_cidr }
+ allocation_pools:
+ - { start: { get_param: subnet_range_start }, end: { get_param: subnet_range_end } }
+ gateway_ip: { get_param: router_addr }
+ ip_version: 4
+ #A port connected to the private network, taken by router.
+ routerport:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: privnet }
+ fixed_ips:
+ - { subnet: { get_resource: privsubnet }, ip_address: { get_param: router_addr } }
+ security_groups: [{ get_resource: secgroup }]
+ #This is a router, routing between us and the internet.
+ #It has an external gateway to public network.
+ router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info:
+ network: { get_param: public_network_name }
+ #This is a router interface connecting it to our private subnet's router port.
+ routercon:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router: { get_resource: router }
+ port: { get_resource: routerport }
+
+ #Key used to authenticate to instances as root.
+ key:
+ type: OS::Nova::KeyPair
+ properties:
+ name: { get_param: "OS::stack_name" }
+ public_key: { get_param: auth_key }
+ #Handle to signal about starting up of instances.
+ instance_wait_handle:
+ type: OS::Heat::WaitConditionHandle
+ #Monitor waiting for all instances to start.
+ instance_wait:
+ type: OS::Heat::WaitCondition
+ properties:
+ handle: { get_resource: instance_wait_handle }
+ timeout: 1200
+ count:
+ yaql:
+ data: { num_nodes: { get_param: num_nodes } }
+ #This is number of all nodes + 2 (infra instance and installer)
+ expression: "$.data.num_nodes + 2"
+ #Resource group to deploy n nodes using node template for each, each node numbered starting from 0.
+ nodes:
+ type: OS::Heat::ResourceGroup
+ properties:
+ count: { get_param: num_nodes }
+ resource_def:
+ type: node.yaml
+ properties:
+ nodenum: "%index%"
+ key_name: { get_resource: key }
+ image_name: { get_param: image_name }
+ network: { get_resource: privnet }
+ subnet: { get_resource: privsubnet }
+ flavor_name: { get_param: node_flavor_name }
+ notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
+ security_group: { get_resource: secgroup }
+ depends_on: [routercon, instance_wait_handle]
+ #Nfs storage volume for first node.
+ nfs_storage:
+ type: OS::Cinder::Volume
+ properties:
+ name: nfs_storage
+ size: 50
+ #Attachment of volume to first node.
+ nfs_storage_attachment:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ instance_uuid: { get_attr: [nodes, "resource.0"] }
+ volume_id: { get_resource: nfs_storage }
+ #Floating ip association for node (first only).
+ node_fip_assoc:
+ type: OS::Neutron::FloatingIPAssociation
+ properties:
+ floatingip_id: { get_param: node_ip }
+ port_id: { get_attr: ["nodes", "resource.0.port_id"] }
+ #Openstack volume used for storing resources.
+ resources_storage:
+ type: "OS::Cinder::Volume"
+ properties:
+ name: "resources_storage"
+ size: 120
+ #Instance representing infrastructure instance, created using subtemplate.
+ infra:
+ type: "instance.yaml"
+ properties:
+ instance_name: infra
+ network: { get_resource: privnet }
+ subnet: { get_resource: privsubnet }
+ key_name: { get_resource: key }
+ flavor_name: { get_param: infra_flavor_name }
+ image_name: { get_param: image_name }
+ notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
+ security_group: { get_resource: secgroup }
+ depends_on: [instance_wait_handle]
+ #Volume attachment for infra node.
+ resources_storage_attachment:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ volume_id: { get_resource: resources_storage }
+ instance_uuid: { get_resource: infra }
+ #Floating ip association for infra.
+ infra_fip_assoc:
+ type: OS::Neutron::FloatingIPAssociation
+ properties:
+ floatingip_id: { get_param: infra_ip }
+ port_id: { get_attr: ["infra", "port_id"] }
+ #Small installer vm having access to other instances, used to install onap.
+ installer:
+ type: "instance.yaml"
+ properties:
+ instance_name: installer
+ image_name: { get_param: image_name }
+ flavor_name: { get_param: installer_flavor_name }
+ key_name: { get_resource: key }
+ network: { get_resource: privnet }
+ subnet: { get_resource: privsubnet }
+ notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
+ security_group: { get_resource: secgroup }
+ depends_on: instance_wait_handle
+ #Floating ip for installer.
+ installer_fip_assoc:
+ type: OS::Neutron::FloatingIPAssociation
+ properties:
+ floatingip_id: { get_param: installer_ip }
+ port_id: { get_attr: [installer, port_id] }
+#Output values
+outputs:
+ installer_ip:
+ value: { get_attr: [installer, ip] }
+ description: "Internal ip of installer instance"
+ infra_ip:
+ value: { get_attr: [infra, ip] }
+ description: "Internal ip of infra instance"
+ node_ips:
+ value: { get_attr: [nodes, ip] }
+ description: "Serialized json list of node internal ips starting at node0"
+ volumes:
+ description: "map of volumes per each instance"
+ value:
+ yaql:
+ data:
+ resources_volid: { get_resource: resources_storage }
+ nfs_volid: { get_resource: nfs_storage }
+ docker_volids: { get_attr: [nodes, docker_storage_id] }
+ #This is going to create a map, where keys are instance names, and values are lists of
+ #pairs of volume ids and their mount points.
+ #This is done by merging few generated maps together, base map is taken by
+ #enumerating over docker storage volumes and transforming them into a map like
+ #{"node0"=>["volid","/var/lib/docker"],...], node1=>...}
+ expression: 'dict($.data.docker_volids.enumerate().select(["node"+str($[0]), [[$[1], "/var/lib/docker"]]])).mergeWith({"infra" => [[$.data.resources_volid, "/opt/onap"]], "node0" => [[$.data.nfs_volid, "/dockerdata-nfs"]]})'
diff --git a/tools/cicdansible/heat/instance.yaml b/tools/cicdansible/heat/instance.yaml
new file mode 100644
index 00000000..2734704d
--- /dev/null
+++ b/tools/cicdansible/heat/instance.yaml
@@ -0,0 +1,58 @@
+#Template for instances.
+heat_template_version: 2017-02-24
+description: "template instantiating and configuring a single instance (any)"
+parameters:
+ instance_name:
+ type: string
+ network:
+ type: string
+ subnet:
+ type: string
+ image_name:
+ type: string
+ flavor_name:
+ type: string
+ key_name:
+ type: string
+ notify_command:
+ type: string
+ security_group:
+ type: string
+#Resources.
+resources:
+ #This is the network port to attach instance to.
+ port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_param: network }
+ security_groups: [ { get_param: security_group } ]
+ fixed_ips:
+ - { subnet: { get_param: subnet }}
+ #cloudinit configuration stuff.
+ config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ config:
+ str_replace_strict:
+ template: { get_file: config.yaml }
+ params:
+ "%{NOTIFY_COMMAND}": { get_param: notify_command }
+ #Actual instance to create.
+ instance:
+ type: OS::Nova::Server
+ properties:
+ name: { get_param: instance_name }
+ image: { get_param: image_name }
+ flavor: { get_param: flavor_name }
+ key_name: { get_param: key_name }
+ networks:
+ - port: { get_resource: port }
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: config }
+outputs:
+ OS::stack_id:
+ value: { get_resource: instance }
+ port_id:
+ value: { get_resource: port }
+ ip:
+ value: { get_attr: ["port", "fixed_ips", 0, "ip_address"] }
diff --git a/tools/cicdansible/heat/node.yaml b/tools/cicdansible/heat/node.yaml
new file mode 100644
index 00000000..b6048d8d
--- /dev/null
+++ b/tools/cicdansible/heat/node.yaml
@@ -0,0 +1,59 @@
+#This yaml template instantiates kubernetes nodes (using instance.yaml subtemplate).
+#It contains some node specific things, and has been split from main template
+#to be able to do some late evaluation tricks.
+heat_template_version: 2017-02-24
+description: "This template instantiates a single kubernetes node using the instance.yaml subtemplate"
+parameters:
+ key_name:
+ type: string
+ flavor_name:
+ type: string
+ nodenum:
+ type: number
+ image_name:
+ type: string
+ network:
+ type: string
+ subnet:
+ type: string
+ notify_command:
+ type: string
+ security_group:
+ type: string
+resources:
+ #Volume for storing /var/lib/docker for node.
+ docker_storage:
+ type: OS::Cinder::Volume
+ properties:
+ name: docker_storage
+ size: 120
+ #Call generic instance template.
+ instance:
+ type: instance.yaml
+ properties:
+ instance_name:
+ str_replace_strict:
+ template: "node%index%"
+ params: { "%index%": { get_param: nodenum } }
+ key_name: { get_param: key_name }
+ image_name: { get_param: image_name }
+ network: { get_param: network }
+ subnet: { get_param: subnet }
+ flavor_name: { get_param: flavor_name }
+ notify_command: { get_param: notify_command }
+ security_group: { get_param: security_group }
+ #Attachment of docker volume to node.
+ docker_storage_attachment:
+ type: OS::Cinder::VolumeAttachment
+ properties:
+ volume_id: { get_resource: docker_storage }
+ instance_uuid: { get_resource: instance }
+outputs:
+ OS::stack_id:
+ value: { get_resource: instance }
+ port_id:
+ value: { get_attr: ["instance", "port_id"] }
+ ip:
+ value: { get_attr: ["instance", "ip"] }
+ docker_storage_id:
+ value: { get_resource: docker_storage }
diff --git a/tools/cicdansible/hosts.yml b/tools/cicdansible/hosts.yml
new file mode 100644
index 00000000..e4c416cf
--- /dev/null
+++ b/tools/cicdansible/hosts.yml
@@ -0,0 +1,28 @@
+#Default inventory.
+#This file should not be modified, instead modify group_vars.
+#NOTE
+#All kubernetes nodes including the first node are added to inventory dynamically.
+#Instances group with children.
+instances:
+ hosts:
+
+#Installer instance.
+ installer:
+ #Do not modify.
+ ansible_host: "{{ installer_ip }}"
+
+#Infra instance.
+ infra:
+ #Do not modify.
+ ansible_host: "{{ infra_ip }}"
+
+ children:
+ #Empty group for nodes, populated dynamically, do not modify please.
+ nodes:
+
+#The group for resource host, only first entry is considered.
+#This host contains onap installer packages including scripts.
+resources:
+ hosts:
+ resource_host:
+ ansible_host: "{{ resource_host }}"
diff --git a/tools/cicdansible/install.yml b/tools/cicdansible/install.yml
new file mode 100644
index 00000000..13071c31
--- /dev/null
+++ b/tools/cicdansible/install.yml
@@ -0,0 +1,36 @@
+---
+#Installation of onap on open stack driven by ansible.
+#Default parameters are set in group_vars/*.yml.
+#Inventory is in hosts.yml, and parameters specific to instances are set there.
+#Deploy infrastructure.
+- name: "deploy infrastructure"
+ hosts: localhost
+ gather_facts: false
+ roles:
+ - role: setup_openstack_infrastructure
+ vars:
+ mode: deploy
+#Play that configures all instances.
+- name: "Instance configuration"
+ hosts: instances
+ any_errors_fatal: true
+ roles:
+ - role: setup_openstack_infrastructure
+ vars:
+ mode: configure
+ - role: configure_instances
+#Play that downloads sw resources.
+- name: "Download resources"
+ hosts: resources
+ gather_facts: false
+ roles:
+ - role: install
+ vars:
+ mode: download_resources
+#Perform installation.
+- name: "Perform installation"
+ hosts: installer
+ roles:
+ - role: install
+ vars:
+ mode: install
diff --git a/tools/cicdansible/library/os_floating_ips_facts.py b/tools/cicdansible/library/os_floating_ips_facts.py
new file mode 100644
index 00000000..ad546004
--- /dev/null
+++ b/tools/cicdansible/library/os_floating_ips_facts.py
@@ -0,0 +1,61 @@
+#!/usr/bin/python
+ANSIBLE_METADATA = {
+ 'METADATA_VERSION': '1.1',
+ 'supported_by': 'community',
+ 'status': 'preview'
+}
+
+DOCUMENTATION = '''
+---
+module: "os_floating_ips_facts"
+short_description: "Retrieves facts about floating ips"
+description:
+ - "This module retrieves facts about one or more floating ips allocated to project."
+version_added: "2.7"
+author:
+ - "Michal Zegan"
+requirements:
+ - "python => 2.7"
+ - "openstacksdk"
+options:
+ floating_ip:
+ description:
+ - "The floating ip to retrieve facts for"
+ type: "str"
+ network:
+ description:
+ - "Name or id of the floating ip network to query."
+ required: true
+ type: "str"
+notes:
+ - "Registers facts starting with openstack_floating_ips"
+extends_documentation_fragment: openstack
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
+
+def run_module():
+ args=openstack_module_kwargs()
+ argspec=openstack_full_argument_spec(
+ floating_ip=dict(type=str),
+ network=dict(type=str, required=True))
+ module=AnsibleModule(argument_spec=argspec, **args)
+ sdk, cloud = openstack_cloud_from_module(module)
+ try:
+ fip_network=cloud.network.find_network(module.params['network'])
+ filter=dict(
+ project_id=cloud.current_project_id,
+ floating_network_id=fip_network.id)
+ if not (module.params['floating_ip'] is None):
+ filter['floating_ip_address'] = module.params['floating_ip']
+ ips=[dict(x) for x in cloud.network.ips(**filter)]
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(openstack_floating_ips=ips)
+ )
+ except sdk.exceptions.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+if __name__ == '__main__':
+ run_module()
diff --git a/tools/cicdansible/roles/configure_instances/tasks/cicd_registry.yml b/tools/cicdansible/roles/configure_instances/tasks/cicd_registry.yml
new file mode 100644
index 00000000..f3c54ca3
--- /dev/null
+++ b/tools/cicdansible/roles/configure_instances/tasks/cicd_registry.yml
@@ -0,0 +1,10 @@
+#Configure access to cicd docker registry.
+- name: "Ensure that docker config directory exists"
+ file:
+ path: /etc/docker
+ mode: 0700
+ state: directory
+- name: "Allow insecure access to cicd docker registry"
+ template:
+ src: daemon.json.j2
+ dest: /etc/docker/daemon.json
diff --git a/tools/cicdansible/roles/configure_instances/tasks/general.yml b/tools/cicdansible/roles/configure_instances/tasks/general.yml
new file mode 100644
index 00000000..6ed9982e
--- /dev/null
+++ b/tools/cicdansible/roles/configure_instances/tasks/general.yml
@@ -0,0 +1,26 @@
+#General instance configuration.
+#Modify /etc/hosts on every instance to add every instance there including itself.
+- name: "Add hosts to /etc/hosts"
+ lineinfile:
+ path: /etc/hosts
+ insertafter: EOF
+ regexp: "^[^ ]+ {{ item }}$"
+ state: present
+ line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}"
+ loop: "{{ groups['instances'] }}"
+#Copy private ssh key to instances for easy connecting between them.
+- name: "Ensure ssh directory exists"
+ file:
+ path: /root/.ssh
+ owner: root
+ group: root
+ mode: 0700
+ state: directory
+- name: "Install ssh private key"
+ copy:
+ src: "{{ ansible_private_key_file }}"
+ dest: /root/.ssh/id_rsa
+ mode: 0400
+#Add public ssh host keys of all instances to trust them.
+- name: "Add host keys of instances to known_hosts"
+ shell: "ssh-keyscan {{ groups['instances'] | join(' ') }} > /root/.ssh/known_hosts"
diff --git a/tools/cicdansible/roles/configure_instances/tasks/main.yml b/tools/cicdansible/roles/configure_instances/tasks/main.yml
new file mode 100644
index 00000000..fe5b4b7d
--- /dev/null
+++ b/tools/cicdansible/roles/configure_instances/tasks/main.yml
@@ -0,0 +1,5 @@
+#Initial instance configuration.
+- include_tasks: general.yml
+#Configure cicd registry access, but skip installer.
+- include_tasks: cicd_registry.yml
+ when: "inventory_hostname != 'installer'"
diff --git a/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2 b/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2
new file mode 100644
index 00000000..1c3ca9bb
--- /dev/null
+++ b/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2
@@ -0,0 +1,3 @@
+{
+"insecure-registries": ["{{ cicd_docker_registry }}"]
+}
diff --git a/tools/cicdansible/roles/install/defaults/main.yml b/tools/cicdansible/roles/install/defaults/main.yml
new file mode 100644
index 00000000..b21e6323
--- /dev/null
+++ b/tools/cicdansible/roles/install/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+installer_deploy_path: "{{ ansible_user_dir }}/installer"
+install_timeout: 10600
diff --git a/tools/cicdansible/roles/install/tasks/download_resources.yml b/tools/cicdansible/roles/install/tasks/download_resources.yml
new file mode 100644
index 00000000..7f042596
--- /dev/null
+++ b/tools/cicdansible/roles/install/tasks/download_resources.yml
@@ -0,0 +1,6 @@
+#Download resources/scripts to controller.
+- name: "Download software resources"
+ fetch:
+ src: "{{ resources_dir }}/{{ resources_sw_filename }}"
+ flat: yes
+ dest: "resources/"
diff --git a/tools/cicdansible/roles/install/tasks/install.yml b/tools/cicdansible/roles/install/tasks/install.yml
new file mode 100644
index 00000000..35df7976
--- /dev/null
+++ b/tools/cicdansible/roles/install/tasks/install.yml
@@ -0,0 +1,48 @@
+#Onap installation tasks
+#Copy ssh private key used for resource server access
+- name: "Copy resource server access key"
+ copy:
+ src: "{{ hostvars[groups['resources'][0]].ansible_private_key_file }}"
+ dest: "{{ ansible_user_dir }}/.ssh/res.pem"
+ mode: 0600
+#Unarchive resources.
+- name: "Ensure {{ installer_deploy_path }} directory exists"
+ file:
+ path: "{{ installer_deploy_path }}"
+ state: directory
+- name: "Extract sw resources"
+ unarchive:
+ src: "resources/{{ hostvars[groups['resources'][0]].resources_sw_filename }}"
+ dest: "{{ installer_deploy_path }}"
+#Generate ansible inventory and extra vars.
+- name: "Generate ansible inventory for installer"
+ template:
+ src: inventory.yml.j2
+ dest: "{{ installer_deploy_path }}/ansible/inventory/hosts.yml"
+- name: "generate application specific config overrides"
+ copy:
+ content: "{{ application_config | b64decode }}"
+ dest: "{{ installer_deploy_path }}/ansible/application/application_overrides.yml"
+# This generates a file with locations of resource files in resource host, we
+# do it only to allow manually running offline installer without
+# typing them by hand. We cannot use
+# inventory template because it will be overridden
+# by application_configuration.yml.
+- name: Generate resource location file
+ copy:
+ content: |
+ resources_dir: {{ resources_dir }}
+ resources_filename: {{ resources_filename }}
+ aux_resources_filename: {{ aux_resources_filename }}
+ app_data_path: /opt/onap/resources
+ dest: "{{ installer_deploy_path }}/ansible/application/resources.yml"
+#Run script.
+- name: "Execute installation"
+ shell:
+ ./run_playbook.sh
+ -e @application/application_configuration.yml -e @application/application_overrides.yml
+ -e @application/resources.yml -i inventory/hosts.yml site.yml
+ args:
+ chdir: "{{ installer_deploy_path }}/ansible"
+ async: "{{ install_timeout }}"
+ when: install_app
diff --git a/tools/cicdansible/roles/install/tasks/main.yml b/tools/cicdansible/roles/install/tasks/main.yml
new file mode 100644
index 00000000..04ac4c3d
--- /dev/null
+++ b/tools/cicdansible/roles/install/tasks/main.yml
@@ -0,0 +1 @@
+- include_tasks: "{{ mode }}.yml"
diff --git a/tools/cicdansible/roles/install/templates/inventory.yml.j2 b/tools/cicdansible/roles/install/templates/inventory.yml.j2
new file mode 100644
index 00000000..36bf3bd3
--- /dev/null
+++ b/tools/cicdansible/roles/install/templates/inventory.yml.j2
@@ -0,0 +1,36 @@
+all:
+ vars:
+ ansible_ssh_private_key_file: /root/.ssh/id_rsa
+ ansible_ssh_common_args: "-o StrictHostKeyChecking=no"
+ children:
+ resources:
+ vars:
+ ansible_ssh_private_key_file: /root/.ssh/res.pem
+ ansible_user: "{{ hostvars[groups['resources'][0]].ansible_user }}"
+ ansible_become: "{{ hostvars[groups['resources'][0]].ansible_become }}"
+ hosts:
+ resource_host:
+ ansible_host: {{ resource_host }}
+ infrastructure:
+ hosts:
+ infra_host:
+ ansible_host: infra
+ cluster_ip: {{ hostvars['infra'].ansible_default_ipv4.address }}
+ kubernetes:
+ children:
+ kubernetes-node:
+ hosts:
+{% for h in groups['nodes'] %}
+ {{ h }}:
+ ansible_host: "{{ hostvars[h].ansible_default_ipv4.address }}"
+ cluster_ip: "{{ hostvars[h].ansible_default_ipv4.address }}"
+{% endfor %}
+ kubernetes-control-plane:
+ hosts:
+ infra_host
+ kubernetes-etcd:
+ hosts:
+ infra_host
+ nfs-server:
+ hosts:
+ node0
diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/main.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/main.yml
new file mode 100644
index 00000000..44de5795
--- /dev/null
+++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/main.yml
@@ -0,0 +1,11 @@
+#Openstack specific configuration running on instances.
+#Get volumes.
+- name: "get volume info"
+ set_fact:
+ volumes: "{{ (hostvars['localhost'].heat_stack.stack.outputs | selectattr('output_key', 'equalto', 'volumes') | list).0.output_value[inventory_hostname] | default([]) }}"
+- name: "Configure volumes"
+ include_tasks: configure/volume.yml
+ vars:
+ volume_id: "{{ item[0] }}"
+ mountpoint: "{{ item[1] }}"
+ loop: "{{ volumes }}"
diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/volume.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/volume.yml
new file mode 100644
index 00000000..8c553850
--- /dev/null
+++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/configure/volume.yml
@@ -0,0 +1,47 @@
+#Configure a single openstack volume.
+- name: "Set volume path"
+ set_fact:
+ volume_path: "/dev/disk/by-id/virtio-{{ volume_id | truncate(20, True, '') }}"
+- name: "Set partition path"
+ set_fact:
+ partition_path: "{{ volume_path }}-part1"
+- name: "Wait for volume"
+ #We do not do it normally, because we want to trigger udev (workaround for some bugs).
+ shell: "udevadm trigger && udevadm settle && [[ -b {{ volume_path }} ]]"
+ register: result
+ retries: 30
+ delay: 10
+ until: result.rc == 0
+- name: "Partition volume"
+ parted:
+ device: "{{ volume_path }}"
+ number: 1
+ label: msdos
+ flags: boot
+ part_type: primary
+ state: present
+- name: "Wait for partition to appear"
+ stat:
+ path: "{{ partition_path }}"
+ follow: true
+ register: part_stat
+ delay: 1
+ retries: 5
+ until: part_stat.stat.isblk is defined and part_stat.stat.isblk
+- name: "Create xfs filesystem on volume"
+ filesystem:
+ dev: "{{ partition_path }}"
+ type: xfs
+- name: "Ensure that the mountpoint exists"
+ file:
+ path: "{{ mountpoint }}"
+ owner: root
+ group: root
+ mode: 0755
+ state: directory
+- name: "Mount filesystem"
+ mount:
+ src: "{{ partition_path }}"
+ path: "{{ mountpoint }}"
+ fstype: xfs
+ state: mounted
diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml
new file mode 100644
index 00000000..2bfeda77
--- /dev/null
+++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml
@@ -0,0 +1,36 @@
+#Tasks for stack redeployment.
+#Delete the heat stack before deployment.
+- name: "delete deployment to force redeploy"
+ os_stack:
+ auth: "{{ os_auth }}"
+ auth_type: token
+ name: "{{ stack_name }}"
+ state: absent
+#Deploy heat stack with infrastructure.
+- name: "Deploy the infrastructure via heat"
+ os_stack:
+ auth: "{{ os_auth }}"
+ auth_type: token
+ name: "{{ stack_name }}"
+ template: "heat/installer.yaml"
+ state: present
+ environment:
+ - "heat/installer.env"
+ parameters:
+ num_nodes: "{{ num_nodes }}"
+ public_network_name: "{{ public_network }}"
+ external_subnet_cidr: "{{ external_subnet_cidr }}"
+ subnet_cidr: "{{ subnet_cidr }}"
+ subnet_range_start: "{{ subnet_range_start }}"
+ subnet_range_end: "{{ subnet_range_end }}"
+ router_addr: "{{ router_addr }}"
+ auth_key: "{{ auth_public_key }}"
+ image_name: "{{ image_name }}"
+ node_flavor_name: "{{ node_flavor_name }}"
+ infra_flavor_name: "{{ infra_flavor_name }}"
+ installer_flavor_name: "{{ installer_flavor_name }}"
+ node_ip: "{{ floating_ips_by_address[first_node_ip].id }}"
+ infra_ip: "{{ floating_ips_by_address[infra_ip].id }}"
+ installer_ip: "{{ floating_ips_by_address[installer_ip].id }}"
+ wait: true
+ register: heat_stack
diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/main.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/main.yml
new file mode 100644
index 00000000..324f5374
--- /dev/null
+++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/main.yml
@@ -0,0 +1,8 @@
+---
+#This mode expects some variables, and deploys infrastructure on open stack.
+#Execute prerequisites.
+- include_tasks: deploy/prereq.yml
+#Deploy stack.
+- include_tasks: deploy/heat.yml
+#Register instances in inventory.
+- include_tasks: deploy/register_instances.yml
diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/prereq.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/prereq.yml
new file mode 100644
index 00000000..2fe8717a
--- /dev/null
+++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/prereq.yml
@@ -0,0 +1,41 @@
+#Prerequisite tasks before stack deployment.
+#Authenticate to cloud.
+- name: "authenticate to cloud"
+ os_auth:
+ auth:
+ auth_url: "{{ os_auth_url }}"
+ username: "{{ os_username }}"
+ password: "{{ os_password }}"
+ domain_name: "{{ os_domain_name }}"
+ project_name: "{{ os_project_name }}"
+ project_domain_name: "{{ os_domain_name }}"
+#Will use the token from this point on.
+- name: "set token"
+ set_fact:
+ os_auth:
+ auth_url: "{{ os_auth_url }}"
+ token: "{{ auth_token }}"
+ project_name: "{{ os_project_name }}"
+ project_domain_name: "{{ os_domain_name }}"
+#Retrieve floating ip info.
+- name: "get floating ip facts"
+ os_floating_ips_facts:
+ auth: "{{ os_auth }}"
+ auth_type: token
+ network: "{{ public_network }}"
+#Group floating ips by ip address to allow looking them up.
+- name: "group floating ips by address"
+ set_fact:
+ floating_ips_by_address: "{{ floating_ips_by_address | default({}) | combine({item.floating_ip_address: item}) }}"
+ loop: "{{ query('items', openstack_floating_ips) }}"
+- name: "fail if required floating ips do not exist"
+ fail: msg="The required floating ips do not exist"
+ when: "(not (first_node_ip in floating_ips_by_address)
+ or not (infra_ip in floating_ips_by_address)
+ or not (installer_ip in floating_ips_by_address))"
+#Get a ssh public key to be passed to heat, it requires ssh-keygen with -y option.
+- name: "Retrieve public key from ssh private key"
+ command: "ssh-keygen -y -f {{ hostvars['installer'].ansible_private_key_file }}"
+ register: public_key_generation
+- set_fact:
+ auth_public_key: "{{ public_key_generation.stdout }}"
diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/register_instances.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/register_instances.yml
new file mode 100644
index 00000000..a50ecd22
--- /dev/null
+++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/register_instances.yml
@@ -0,0 +1,9 @@
+#Register instances as hosts in inventory.
+#Installer and infra are statically registered.
+#Register node instances dynamically.
+- name: "Register node instances"
+ add_host:
+ name: "node{{ item[0] }}"
+ groups: nodes
+ ansible_host: "{{ item[1] }}"
+ loop: "{{ query('indexed_items', (heat_stack.stack.outputs | selectattr('output_key', 'equalto', 'node_ips') | list).0.output_value) }}"
diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/main.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/main.yml
new file mode 100644
index 00000000..7a00abff
--- /dev/null
+++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/main.yml
@@ -0,0 +1 @@
+- include_tasks: "{{ mode }}/main.yml"