summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/BuildGuide.rst6
-rw-r--r--docs/vFWCL-notes.rst9
-rw-r--r--patches/base_vfw.yaml506
-rwxr-xr-xpatches/update_policy.sh26
-rwxr-xr-xpatches/update_robot.sh12
-rw-r--r--tools/cicdansible/group_vars/all.yml4
-rw-r--r--tools/cicdansible/heat/installer.yaml16
-rw-r--r--tools/cicdansible/heat/instance.yaml17
-rw-r--r--tools/cicdansible/heat/node.yaml4
-rw-r--r--tools/cicdansible/roles/install/tasks/install.yml11
-rw-r--r--tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml1
11 files changed, 80 insertions, 532 deletions
diff --git a/docs/BuildGuide.rst b/docs/BuildGuide.rst
index 12426b7c..249d4e85 100644
--- a/docs/BuildGuide.rst
+++ b/docs/BuildGuide.rst
@@ -108,14 +108,14 @@ Part 2. Download artifacts for offline installer
.. note:: Skip this step if you have already all necessary resources and continue with Part 3. Populate local nexus
-There need to be created RPM repository containing packages which need to be installed on all nodes.
+A RPM repository containing packages to be installed on all nodes needs to be created:
::
# run the docker container with -d parameter for destination directory with RPM packages
./offline-installer/build/create_repo.sh -d $(pwd)
-.. note:: If script fails with permissions, problem could be with SeLinux. Issue is possible to solve by:
+.. note:: If script fails due to permissions issue, it could be a problem with SeLinux. It can be fixed by running:
::
# Change security context of directory
@@ -141,8 +141,6 @@ so one might try following command to download most of the required artifacts in
--docker ./build/data_lists/onap_docker_images.list \
-Alternatively, step-by-step procedure is described in Appendix 1.
-
This concludes SW download part required for ONAP offline platform creating.
Part 3. Populate local nexus
diff --git a/docs/vFWCL-notes.rst b/docs/vFWCL-notes.rst
index 2d6fd6fb..d8c76252 100644
--- a/docs/vFWCL-notes.rst
+++ b/docs/vFWCL-notes.rst
@@ -148,9 +148,10 @@ for this reason we are patching *base_vfw.yaml* for all vFW VMs with following c
# nasty hack to bypass cloud-init issues
sed -i '1i nameserver 8.8.8.8' /etc/resolv.conf
- iface_correct=`ip a | grep 10.8.8 | awk {'print $7'}`
- route add default gw 10.8.8.1 ${iface_correct}
+ iface_correct=`ip a | grep <network_prefix> | awk {'print $7'}`
+ route add default gw <network_prefix>.1 ${iface_correct}
+Network prefix variable is in our case "10.8.8".
Lets treat it as an example of how these two problems can be fixed. Feel free to adjust private/public key and skip cloud-init problem if you don't have it.
Our helping script with above setting is fixing both issues (a) and (b) for us.
@@ -158,7 +159,7 @@ Our helping script with above setting is fixing both issues (a) and (b) for us.
::
# copy offline-installer repo into infra node and run following script from patches folder
- ./update_robot.sh
+ ./update_robot.sh <namespace> <network_prefix>
**drools**
@@ -170,7 +171,7 @@ One can fix it by running following script.
::
# copy offline-installer repo into infra node and run following script from patches folder
- ./update_policy.sh
+ ./update_policy.sh <namespace>
.. note:: This script is also restarting policy, there is some small chance that drools will be marked as sick during interval its being restarted and redeployed. If it happens, just try again.
diff --git a/patches/base_vfw.yaml b/patches/base_vfw.yaml
deleted file mode 100644
index fb4a0174..00000000
--- a/patches/base_vfw.yaml
+++ /dev/null
@@ -1,506 +0,0 @@
-##########################################################################
-#
-#==================LICENSE_START==========================================
-#
-#
-# Copyright (c) 2017 AT&T Intellectual Property. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-#==================LICENSE_END============================================
-#
-# ECOMP is a trademark and service mark of AT&T Intellectual Property.
-#
-##########################################################################
-
-heat_template_version: 2013-05-23
-
-description: Heat template that deploys vFirewall demo app for ONAP
-
-##############
-# #
-# PARAMETERS #
-# #
-##############
-
-parameters:
- vfw_image_name:
- type: string
- label: Image name or ID
- description: Image to be used for compute instance
- vfw_flavor_name:
- type: string
- label: Flavor
- description: Type of instance (flavor) to be used
- vpg_image_name:
- type: string
- label: Image name or ID
- description: Image to be used for compute instance
- vpg_flavor_name:
- type: string
- label: Flavor
- description: Type of instance (flavor) to be used
- vsn_image_name:
- type: string
- label: Image name or ID
- description: Image to be used for compute instance
- vsn_flavor_name:
- type: string
- label: Flavor
- description: Type of instance (flavor) to be used
- public_net_id:
- type: string
- label: Public network name or ID
- description: Public network that enables remote connection to VNF
- unprotected_private_net_id:
- type: string
- label: Unprotected private network name or ID
- description: Private network that connects vPacketGenerator with vFirewall
- protected_private_net_id:
- type: string
- label: Protected private network name or ID
- description: Private network that connects vFirewall with vSink
- onap_private_net_id:
- type: string
- label: ONAP management network name or ID
- description: Private network that connects ONAP components and the VNF
- onap_private_subnet_id:
- type: string
- label: ONAP management sub-network name or ID
- description: Private sub-network that connects ONAP components and the VNF
- unprotected_private_net_cidr:
- type: string
- label: Unprotected private network CIDR
- description: The CIDR of the unprotected private network
- protected_private_net_cidr:
- type: string
- label: Protected private network CIDR
- description: The CIDR of the protected private network
- onap_private_net_cidr:
- type: string
- label: ONAP private network CIDR
- description: The CIDR of the protected private network
- vfw_int_unprotected_private_ip_0:
- type: string
- label: vFirewall private IP address towards the unprotected network
- description: Private IP address that is assigned to the vFirewall to communicate with the vPacketGenerator
- vfw_int_protected_private_ip_0:
- type: string
- label: vFirewall private IP address towards the protected network
- description: Private IP address that is assigned to the vFirewall to communicate with the vSink
- vfw_onap_private_ip_0:
- type: string
- label: vFirewall private IP address towards the ONAP management network
- description: Private IP address that is assigned to the vFirewall to communicate with ONAP components
- vfw_int_protected_private_floating_ip:
- type: string
- label: same value as vpg_int_unprotected_private_ip_0
- description: IP to inform OpenStack to enable vfw protected private port to allow packets coming from the packet generator
- vpg_int_unprotected_private_ip_0:
- type: string
- label: vPacketGenerator private IP address towards the unprotected network
- description: Private IP address that is assigned to the vPacketGenerator to communicate with the vFirewall
- vpg_onap_private_ip_0:
- type: string
- label: vPacketGenerator private IP address towards the ONAP management network
- description: Private IP address that is assigned to the vPacketGenerator to communicate with ONAP components
- vsn_int_protected_private_ip_0:
- type: string
- label: vSink private IP address towards the protected network
- description: Private IP address that is assigned to the vSink to communicate with the vFirewall
- vsn_onap_private_ip_0:
- type: string
- label: vSink private IP address towards the ONAP management network
- description: Private IP address that is assigned to the vSink to communicate with ONAP components
- vfw_name_0:
- type: string
- label: vFirewall name
- description: Name of the vFirewall
- vpg_name_0:
- type: string
- label: vPacketGenerator name
- description: Name of the vPacketGenerator
- vsn_name_0:
- type: string
- label: vSink name
- description: Name of the vSink
- vnf_id:
- type: string
- label: VNF ID
- description: The VNF ID is provided by ONAP
- vnf_name:
- type: string
- label: VNF NAME
- description: The VNF NAME is provided by ONAP
- vf_module_id:
- type: string
- label: vFirewall module ID
- description: The vFirewall Module ID is provided by ONAP
- dcae_collector_ip:
- type: string
- label: DCAE collector IP address
- description: IP address of the DCAE collector
- dcae_collector_port:
- type: string
- label: DCAE collector port
- description: Port of the DCAE collector
- key_name:
- type: string
- label: Key pair name
- description: Public/Private key pair name
- pub_key:
- type: string
- label: Public key
- description: Public key to be installed on the compute instance
- install_script_version:
- type: string
- label: Installation script version number
- description: Version number of the scripts that install the vFW demo app
- demo_artifacts_version:
- type: string
- label: Artifacts version used in demo vnfs
- description: Artifacts (jar, tar.gz) version used in demo vnfs
- nexus_artifact_repo:
- type: string
- description: Root URL for the Nexus repository for Maven artifacts.
- cloud_env:
- type: string
- label: Cloud environment
- description: Cloud environment (e.g., openstack, rackspace)
- sec_group:
- type: string
- description: ONAP Security Group
-
-#############
-# #
-# RESOURCES #
-# #
-#############
-
-resources:
- random-str:
- type: OS::Heat::RandomString
- properties:
- length: 4
-
- my_keypair:
- type: OS::Nova::KeyPair
- properties:
- name:
- str_replace:
- template: vnfname_base_rand
- params:
- base: { get_param: key_name }
- rand: { get_resource: random-str }
- vnfname: { get_param: vnf_name }
- public_key: { get_param: pub_key }
- save_private_key: false
-
- # NETWORK_ROLE: unprotected_private
- # NETWORK_TYPE: internal
- int_unprotected_private_network:
- type: OS::Neutron::Net
- properties:
- name:
- str_replace:
- template: vnfname_netid
- params:
- netid: { get_param: unprotected_private_net_id }
- vnfname: { get_param: vnf_name }
-
- # NETWORK_ROLE: protected_private
- # NETWORK_TYPE: internal
- int_protected_private_network:
- type: OS::Neutron::Net
- properties:
- name:
- str_replace:
- template: vnfname_netid
- params:
- netid: { get_param: protected_private_net_id }
- vnfname: { get_param: vnf_name }
-
- # NETWORK_ROLE: unprotected_private
- # NETWORK_TYPE: internal
- int_unprotected_private_subnet:
- type: OS::Neutron::Subnet
- properties:
- network: { get_resource: int_unprotected_private_network }
- cidr: { get_param: unprotected_private_net_cidr }
-
- # NETWORK_ROLE: protected_private
- # NETWORK_TYPE: internal
- int_protected_private_subnet:
- type: OS::Neutron::Subnet
- properties:
- network: { get_resource: int_protected_private_network }
- cidr: { get_param: protected_private_net_cidr }
-
- ### Virtual Firewall instantiation ###
-
- # VM_TYPE: vfw
- # NETWORK_ROLE: protected_private
- # NETWORK_TYPE: internal
- vfw_0_int_unprotected_private_port_0:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: int_unprotected_private_network }
- fixed_ips: [{"subnet": { get_resource: int_unprotected_private_subnet }, "ip_address": { get_param: vfw_int_unprotected_private_ip_0 }}]
- security_groups:
- - { get_param: sec_group }
-
- # VM_TYPE: vfw
- # NETWORK_ROLE: protected_private
- # NETWORK_TYPE: internal
- vfw_0_int_protected_private_port_0:
- type: OS::Neutron::Port
- properties:
- allowed_address_pairs: [{ "ip_address": { get_param: vfw_int_protected_private_floating_ip }}]
- network: { get_resource: int_protected_private_network }
- fixed_ips: [{"subnet": { get_resource: int_protected_private_subnet }, "ip_address": { get_param: vfw_int_protected_private_ip_0 }}]
- security_groups:
- - { get_param: sec_group }
-
- # VM_TYPE: vfw
- # NETWORK_ROLE: onap_private
- # NETWORK_TYPE: external
- vfw_0_onap_private_port_0:
- type: OS::Neutron::Port
- properties:
- network: { get_param: onap_private_net_id }
- fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vfw_onap_private_ip_0 }}]
- security_groups:
- - { get_param: sec_group }
-
- # VM_TYPE: vfw
- vfw_server_0:
- type: OS::Nova::Server
- properties:
- image: { get_param: vfw_image_name }
- flavor: { get_param: vfw_flavor_name }
- name: { get_param: vfw_name_0 }
- key_name: { get_resource: my_keypair }
- networks:
- - network: { get_param: public_net_id }
- - port: { get_resource: vfw_0_int_unprotected_private_port_0 }
- - port: { get_resource: vfw_0_int_protected_private_port_0 }
- - port: { get_resource: vfw_0_onap_private_port_0 }
- metadata:
- vnf_id: { get_param: vnf_id }
- vf_module_id: { get_param: vf_module_id }
- vnf_name: { get_param: vnf_name }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __dcae_collector_ip__ : { get_param: dcae_collector_ip }
- __dcae_collector_port__ : { get_param: dcae_collector_port }
- __demo_artifacts_version__ : { get_param: demo_artifacts_version }
- __install_script_version__ : { get_param: install_script_version }
- __vfw_private_ip_0__ : { get_param: vfw_int_unprotected_private_ip_0 }
- __vfw_private_ip_1__ : { get_param: vfw_int_protected_private_ip_0 }
- __vfw_private_ip_2__ : { get_param: vfw_onap_private_ip_0 }
- __unprotected_private_net_cidr__ : { get_param: unprotected_private_net_cidr }
- __protected_private_net_cidr__ : { get_param: protected_private_net_cidr }
- __onap_private_net_cidr__ : { get_param: onap_private_net_cidr }
- __cloud_env__ : { get_param: cloud_env }
- __nexus_artifact_repo__: { get_param: nexus_artifact_repo }
- template: |
- #!/bin/bash
-
- # nasty hack to bypass cloud-init issues
- sed -i '1i nameserver 8.8.8.8' /etc/resolv.conf
- iface_correct=`ip a | grep 10.8.8 | awk {'print $7'}`
- route add default gw 10.8.8.1 ${iface_correct}
-
- # Create configuration files
- mkdir /opt/config
- echo "__dcae_collector_ip__" > /opt/config/dcae_collector_ip.txt
- echo "__dcae_collector_port__" > /opt/config/dcae_collector_port.txt
- echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
- echo "__install_script_version__" > /opt/config/install_script_version.txt
- echo "__vfw_private_ip_0__" > /opt/config/vfw_private_ip_0.txt
- echo "__vfw_private_ip_1__" > /opt/config/vfw_private_ip_1.txt
- echo "__vfw_private_ip_2__" > /opt/config/vfw_private_ip_2.txt
- echo "__unprotected_private_net_cidr__" > /opt/config/unprotected_private_net_cidr.txt
- echo "__protected_private_net_cidr__" > /opt/config/protected_private_net_cidr.txt
- echo "__onap_private_net_cidr__" > /opt/config/onap_private_net_cidr.txt
- echo "__cloud_env__" > /opt/config/cloud_env.txt
- echo "__nexus_artifact_repo__" > /opt/config/nexus_artifact_repo.txt
-
- # Download and run install script
- apt-get update
- apt-get -y install unzip
- if [[ "__install_script_version__" =~ "SNAPSHOT" ]]; then REPO=snapshots; else REPO=releases; fi
- curl -k -L "__nexus_artifact_repo__/service/local/artifact/maven/redirect?r=${REPO}&g=org.onap.demo.vnf.vfw&a=vfw-scripts&e=zip&v=__install_script_version__" -o /opt/vfw-scripts-__install_script_version__.zip
- unzip -j /opt/vfw-scripts-__install_script_version__.zip -d /opt v_firewall_install.sh
- cd /opt
- chmod +x v_firewall_install.sh
- ./v_firewall_install.sh
-
-
- ### Virtual Packet Generator instantiation ###
-
- vpg_0_int_unprotected_private_port_0:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: int_unprotected_private_network }
- fixed_ips: [{"subnet": { get_resource: int_unprotected_private_subnet }, "ip_address": { get_param: vpg_int_unprotected_private_ip_0 }}]
- security_groups:
- - { get_param: sec_group }
-
- vpg_0_onap_private_port_0:
- type: OS::Neutron::Port
- properties:
- network: { get_param: onap_private_net_id }
- fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vpg_onap_private_ip_0 }}]
- security_groups:
- - { get_param: sec_group }
-
- vpg_server_0:
- type: OS::Nova::Server
- properties:
- image: { get_param: vpg_image_name }
- flavor: { get_param: vpg_flavor_name }
- name: { get_param: vpg_name_0 }
- key_name: { get_resource: my_keypair }
- networks:
- - network: { get_param: public_net_id }
- - port: { get_resource: vpg_0_int_unprotected_private_port_0 }
- - port: { get_resource: vpg_0_onap_private_port_0 }
- metadata:
- vnf_id: { get_param: vnf_id }
- vf_module_id: { get_param: vf_module_id }
- vnf_name: { get_param: vnf_name }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __fw_ipaddr__: { get_param: vfw_int_unprotected_private_ip_0 }
- __protected_net_cidr__: { get_param: protected_private_net_cidr }
- __sink_ipaddr__: { get_param: vsn_int_protected_private_ip_0 }
- __demo_artifacts_version__ : { get_param: demo_artifacts_version }
- __install_script_version__ : { get_param: install_script_version }
- __vpg_private_ip_0__ : { get_param: vpg_int_unprotected_private_ip_0 }
- __vpg_private_ip_1__ : { get_param: vpg_onap_private_ip_0 }
- __unprotected_private_net_cidr__ : { get_param: unprotected_private_net_cidr }
- __onap_private_net_cidr__ : { get_param: onap_private_net_cidr }
- __cloud_env__ : { get_param: cloud_env }
- __nexus_artifact_repo__: { get_param: nexus_artifact_repo }
- template: |
- #!/bin/bash
-
- # nasty hack to bypass cloud-init issues
- sed -i '1i nameserver 8.8.8.8' /etc/resolv.conf
- iface_correct=`ip a | grep 10.8.8 | awk {'print $7'}`
- route add default gw 10.8.8.1 ${iface_correct}
-
- # Create configuration files
- mkdir /opt/config
- echo "__fw_ipaddr__" > /opt/config/fw_ipaddr.txt
- echo "__protected_net_cidr__" > /opt/config/protected_net_cidr.txt
- echo "__sink_ipaddr__" > /opt/config/sink_ipaddr.txt
- echo "__demo_artifacts_version__" > /opt/config/demo_artifacts_version.txt
- echo "__install_script_version__" > /opt/config/install_script_version.txt
- echo "__vpg_private_ip_0__" > /opt/config/vpg_private_ip_0.txt
- echo "__vpg_private_ip_1__" > /opt/config/vpg_private_ip_1.txt
- echo "__unprotected_private_net_cidr__" > /opt/config/unprotected_private_net_cidr.txt
- echo "__onap_private_net_cidr__" > /opt/config/onap_private_net_cidr.txt
- echo "__cloud_env__" > /opt/config/cloud_env.txt
- echo "__nexus_artifact_repo__" > /opt/config/nexus_artifact_repo.txt
-
- # Download and run install script
- apt-get update
- apt-get -y install unzip
- if [[ "__install_script_version__" =~ "SNAPSHOT" ]]; then REPO=snapshots; else REPO=releases; fi
- curl -k -L "__nexus_artifact_repo__/service/local/artifact/maven/redirect?r=${REPO}&g=org.onap.demo.vnf.vfw&a=vfw-scripts&e=zip&v=__install_script_version__" -o /opt/vfw-scripts-__install_script_version__.zip
- unzip -j /opt/vfw-scripts-__install_script_version__.zip -d /opt v_packetgen_install.sh
- cd /opt
- chmod +x v_packetgen_install.sh
- ./v_packetgen_install.sh
-
-
- ### Virtual Sink instantiation ###
-
- vsn_0_int_protected_private_port_0:
- type: OS::Neutron::Port
- properties:
- network: { get_resource: int_protected_private_network }
- fixed_ips: [{"subnet": { get_resource: int_protected_private_subnet }, "ip_address": { get_param: vsn_int_protected_private_ip_0 }}]
- security_groups:
- - { get_param: sec_group }
-
- vsn_0_onap_private_port_0:
- type: OS::Neutron::Port
- properties:
- network: { get_param: onap_private_net_id }
- fixed_ips: [{"subnet": { get_param: onap_private_subnet_id }, "ip_address": { get_param: vsn_onap_private_ip_0 }}]
- security_groups:
- - { get_param: sec_group }
-
- vsn_server_0:
- type: OS::Nova::Server
- properties:
- image: { get_param: vsn_image_name }
- flavor: { get_param: vsn_flavor_name }
- name: { get_param: vsn_name_0 }
- key_name: { get_resource: my_keypair }
- networks:
- - network: { get_param: public_net_id }
- - port: { get_resource: vsn_0_int_protected_private_port_0 }
- - port: { get_resource: vsn_0_onap_private_port_0 }
- metadata:
- vnf_id: { get_param: vnf_id }
- vf_module_id: { get_param: vf_module_id }
- vnf_name: { get_param: vnf_name }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __protected_net_gw__: { get_param: vfw_int_protected_private_ip_0 }
- __unprotected_net__: { get_param: unprotected_private_net_cidr }
- __install_script_version__ : { get_param: install_script_version }
- __vsn_private_ip_0__ : { get_param: vsn_int_protected_private_ip_0 }
- __vsn_private_ip_1__ : { get_param: vsn_onap_private_ip_0 }
- __protected_private_net_cidr__ : { get_param: protected_private_net_cidr }
- __onap_private_net_cidr__ : { get_param: onap_private_net_cidr }
- __cloud_env__ : { get_param: cloud_env }
- __nexus_artifact_repo__: { get_param: nexus_artifact_repo }
- template: |
- #!/bin/bash
-
- # nasty hack to bypass cloud-init issues
- sed -i '1i nameserver 8.8.8.8' /etc/resolv.conf
- iface_correct=`ip a | grep 10.8.8 | awk {'print $7'}`
- route add default gw 10.8.8.1 ${iface_correct}
-
- # Create configuration files
- mkdir /opt/config
- echo "__protected_net_gw__" > /opt/config/protected_net_gw.txt
- echo "__unprotected_net__" > /opt/config/unprotected_net.txt
- echo "__install_script_version__" > /opt/config/install_script_version.txt
- echo "__vsn_private_ip_0__" > /opt/config/vsn_private_ip_0.txt
- echo "__vsn_private_ip_1__" > /opt/config/vsn_private_ip_1.txt
- echo "__protected_private_net_cidr__" > /opt/config/protected_private_net_cidr.txt
- echo "__onap_private_net_cidr__" > /opt/config/onap_private_net_cidr.txt
- echo "__cloud_env__" > /opt/config/cloud_env.txt
- echo "__nexus_artifact_repo__" > /opt/config/nexus_artifact_repo.txt
-
- # Download and run install script
- apt-get update
- apt-get -y install unzip
- if [[ "__install_script_version__" =~ "SNAPSHOT" ]]; then REPO=snapshots; else REPO=releases; fi
- curl -k -L "__nexus_artifact_repo__/service/local/artifact/maven/redirect?r=${REPO}&g=org.onap.demo.vnf.vfw&a=vfw-scripts&e=zip&v=__install_script_version__" -o /opt/vfw-scripts-__install_script_version__.zip
- unzip -j /opt/vfw-scripts-__install_script_version__.zip -d /opt v_sink_install.sh
- cd /opt
- chmod +x v_sink_install.sh
- ./v_sink_install.sh
diff --git a/patches/update_policy.sh b/patches/update_policy.sh
index cc0ab17a..15a5f883 100755
--- a/patches/update_policy.sh
+++ b/patches/update_policy.sh
@@ -1,20 +1,20 @@
#!/usr/bin/env bash
set -xe
-
-DROOLS_POD=`kubectl get pods | grep drools | awk {'print $1'}`
+NAMESPACE=$1
+DROOLS_POD=`kubectl -n ${NAMESPACE} get pods | grep drools | awk {'print $1'}`
DST_BASE="/home/policy/.m2/repository/org/onap"
# WA to clean wrong _remote.repositories
# this/original version of files will prevent maven to find missing dependencies
# its not an issue in online lab and those files are updated when poms are collected from internet
-kubectl exec -it ${DROOLS_POD} -n onap -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/common/common/1.5.3/_remote.repositories"
-kubectl exec -it ${DROOLS_POD} -n onap -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/common/controller-usecases/1.5.3/_remote.repositories"
-kubectl exec -it ${DROOLS_POD} -n onap -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/common/database/1.5.3/_remote.repositories"
-kubectl exec -it ${DROOLS_POD} -n onap -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/common/eventmanager/1.5.3/_remote.repositories"
-kubectl exec -it ${DROOLS_POD} -n onap -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/common/guard/1.5.3/_remote.repositories"
-kubectl exec -it ${DROOLS_POD} -n onap -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/controlloop/1.5.3/_remote.repositories"
-kubectl exec -it ${DROOLS_POD} -n onap -- bash -c "rm -f $DST_BASE/policy/drools-applications/drools-applications/1.5.3/_remote.repositories"
+kubectl -n ${NAMESPACE} exec -it ${DROOLS_POD} -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/common/common/1.5.3/_remote.repositories"
+kubectl -n ${NAMESPACE} exec -it ${DROOLS_POD} -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/common/controller-usecases/1.5.3/_remote.repositories"
+kubectl -n ${NAMESPACE} exec -it ${DROOLS_POD} -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/common/database/1.5.3/_remote.repositories"
+kubectl -n ${NAMESPACE} exec -it ${DROOLS_POD} -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/common/eventmanager/1.5.3/_remote.repositories"
+kubectl -n ${NAMESPACE} exec -it ${DROOLS_POD} -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/common/guard/1.5.3/_remote.repositories"
+kubectl -n ${NAMESPACE} exec -it ${DROOLS_POD} -- bash -c "rm -f $DST_BASE/policy/drools-applications/controlloop/controlloop/1.5.3/_remote.repositories"
+kubectl -n ${NAMESPACE} exec -it ${DROOLS_POD} -- bash -c "rm -f $DST_BASE/policy/drools-applications/drools-applications/1.5.3/_remote.repositories"
# this part is for patching POLICY-2191
@@ -22,9 +22,9 @@ patch_pom() {
pom_name=$1
dst_path=$2
- kubectl exec -it ${DROOLS_POD} -n onap -- bash -c "rm -f ${dst_path}/_remote.repositories;mkdir -p ${dst_path}"
- kubectl cp ./POLICY-2191/${pom_name} ${DROOLS_POD}:${dst_path}/${pom_name}
- kubectl cp ./POLICY-2191/${pom_name}.sha1 ${DROOLS_POD}:${dst_path}/${pom_name}.sha1
+ kubectl -n ${NAMESPACE} exec -it ${DROOLS_POD} -- bash -c "rm -f ${dst_path}/_remote.repositories;mkdir -p ${dst_path}"
+ kubectl -n ${NAMESPACE} cp ./POLICY-2191/${pom_name} ${DROOLS_POD}:${dst_path}/${pom_name}
+ kubectl -n ${NAMESPACE} cp ./POLICY-2191/${pom_name}.sha1 ${DROOLS_POD}:${dst_path}/${pom_name}.sha1
}
# patch 48 files in drools
@@ -54,4 +54,4 @@ patch_pom policy-models-pdp-2.1.3.pom ${DST_BASE}/policy/models/policy-models-pd
patch_pom policy-models-tosca-2.1.3.pom ${DST_BASE}/policy/models/policy-models-tosca/2.1.3/
# restart policy
-kubectl exec -it ${DROOLS_POD} -n onap -- bash -c '/opt/app/policy/bin/policy stop;/opt/app/policy/bin/policy start'
+kubectl -n ${NAMESPACE} exec -it ${DROOLS_POD} -- bash -c '/opt/app/policy/bin/policy stop;/opt/app/policy/bin/policy start'
diff --git a/patches/update_robot.sh b/patches/update_robot.sh
index 80c96113..8609685d 100755
--- a/patches/update_robot.sh
+++ b/patches/update_robot.sh
@@ -2,11 +2,13 @@
set -xe
-ROBOT_POD=`kubectl get pods | grep robot | awk {'print $1'}`
+NAMESPACE=$1
+NETPREFIX=$2
+ROBOT_POD=`kubectl ${NAMESPACE} get pods | grep robot | awk {'print $1'}`
ROBOT_HOME="/var/opt/ONAP"
# distribute example private key onap-dev
-kubectl cp onap-dev.pem ${ROBOT_POD}:${ROBOT_HOME}/onap-dev.pem
+kubectl cp -n ${NAMESPACE} onap-dev.pem ${ROBOT_POD}:${ROBOT_HOME}/onap-dev.pem
# stability improvement for SRPOL lab
# there is an issue that cloudinit is randomly putting default route
@@ -14,4 +16,8 @@ kubectl cp onap-dev.pem ${ROBOT_POD}:${ROBOT_HOME}/onap-dev.pem
# this patch assume that we are using rc3-offline-network as public network for vFW VMs
# vFW VMs are installing SW in runtime, similarly as other ONAP demo usecases
# please note that such network must be reachable from robot pod
-kubectl cp base_vfw.yaml ${ROBOT_POD}:${ROBOT_HOME}/demo/heat/vFW/base_vfw.yaml
+HACK="\n # nasty hack to bypass cloud-init issues\n sed -i '1i nameserver 8.8.8.8' /etc/resolv.conf\n iface_correct=\`ip a | grep ${NETPREFIX} | awk {'print \$7'}\`\n route add default gw ${NETPREFIX}.1 \${iface_correct}"
+
+kubectl cp -n ${NAMESPACE} ${ROBOT_POD}:${ROBOT_HOME}/demo/heat/vFW/base_vfw.yaml base_vfw.yaml
+sed -i -e "/#!\/bin\/bash/a\ ${HACK}" base_vfw.yaml
+kubectl cp -n ${NAMESPACE} base_vfw.yaml ${ROBOT_POD}:${ROBOT_HOME}/demo/heat/vFW/base_vfw.yaml
diff --git a/tools/cicdansible/group_vars/all.yml b/tools/cicdansible/group_vars/all.yml
index f886b628..3165e374 100644
--- a/tools/cicdansible/group_vars/all.yml
+++ b/tools/cicdansible/group_vars/all.yml
@@ -30,7 +30,7 @@ image_name: ""
#True by default, most openstack providers offer ssd volumes probably.
use_volume_for_nfs: true
#Cidr of private subnet where instances are connected.
-subnet_cidr: "10.1.0.0/24"
+subnet_cidr: "10.1.0.0/16"
#Start of dhcp allocation range for subnet.
subnet_range_start: "10.1.0.4"
#Subnet allocation range end.
@@ -64,3 +64,5 @@ install_app: true
# You can use it to override any variable in offline installer except those
# supported directly by cicdansible.
application_config: ''
+# Id of the network for demo usecases
+demo_network_id: ""
diff --git a/tools/cicdansible/heat/installer.yaml b/tools/cicdansible/heat/installer.yaml
index 7b3f10c0..1f65f73f 100644
--- a/tools/cicdansible/heat/installer.yaml
+++ b/tools/cicdansible/heat/installer.yaml
@@ -94,6 +94,11 @@ parameters:
type: boolean
label: "use volume for nfs storage"
description: "Indicates whether a cinder volume should be used for nfs storage or not. If not checked, the nfs would be stored in the root disk"
+ demo_network:
+ label: "demo net id"
+ type: string
+ description: "specifies id of network used for demo usecases"
+ default: ""
conditions:
#Condition for nfs volume usage.
use_volume_for_nfs: { get_param: use_volume_for_nfs }
@@ -201,6 +206,7 @@ resources:
flavor_name: { get_param: node_flavor_name }
notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
security_group: { get_resource: secgroup }
+ demo_network: { get_param: demo_network }
scheduler_hints:
group: { get_resource: anti_affinity_group }
depends_on: [routercon, instance_wait_handle]
@@ -243,6 +249,7 @@ resources:
notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
security_group: { get_resource: secgroup }
scheduler_hints: {}
+ demo_network: { get_param: demo_network }
depends_on: [instance_wait_handle]
#Volume attachment for infra node.
resources_storage_attachment:
@@ -309,6 +316,15 @@ resources:
- []
#Output values
outputs:
+ network_name:
+ value: {get_attr: [privnet, name] }
+ description: "Name of private network"
+ network_id:
+ value: { get_resource: privnet }
+ description: "ID of private network"
+ subnet_id:
+ value: { get_resource: privsubnet }
+ description: "ID of private subnet"
installer_ip:
value: { get_attr: [installer, ip] }
description: "Internal ip of installer instance"
diff --git a/tools/cicdansible/heat/instance.yaml b/tools/cicdansible/heat/instance.yaml
index 5429eb6e..7d9715f7 100644
--- a/tools/cicdansible/heat/instance.yaml
+++ b/tools/cicdansible/heat/instance.yaml
@@ -21,6 +21,16 @@ parameters:
scheduler_hints:
type: json
default: {}
+ demo_network:
+ type: string
+ default: ""
+conditions:
+ #Condition for demo network connection
+ connect_demo_net:
+ not:
+ equals:
+ - get_param: demo_network
+ - ""
#Resources.
resources:
#This is the network port to attach instance to.
@@ -48,8 +58,13 @@ resources:
image: { get_param: image_name }
flavor: { get_param: flavor_name }
key_name: { get_param: key_name }
+ config_drive: true
networks:
- - port: { get_resource: port }
+ if:
+ - "connect_demo_net"
+ - - port: { get_resource: port }
+ - network: { get_param: demo_network }
+ - - port: { get_resource: port }
user_data_format: SOFTWARE_CONFIG
user_data: { get_resource: config }
scheduler_hints: { get_param: scheduler_hints }
diff --git a/tools/cicdansible/heat/node.yaml b/tools/cicdansible/heat/node.yaml
index cd628eec..12097770 100644
--- a/tools/cicdansible/heat/node.yaml
+++ b/tools/cicdansible/heat/node.yaml
@@ -22,6 +22,9 @@ parameters:
type: string
scheduler_hints:
type: json
+ demo_network:
+ type: string
+ default: ""
resources:
#Volume for storing /var/lib/docker for node.
docker_storage:
@@ -45,6 +48,7 @@ resources:
notify_command: { get_param: notify_command }
security_group: { get_param: security_group }
scheduler_hints: { get_param: scheduler_hints }
+ demo_network: { get_param: demo_network }
#Attachment of docker volume to node.
docker_storage_attachment:
type: OS::Cinder::VolumeAttachment
diff --git a/tools/cicdansible/roles/install/tasks/install.yml b/tools/cicdansible/roles/install/tasks/install.yml
index 141ea7ae..529e2acf 100644
--- a/tools/cicdansible/roles/install/tasks/install.yml
+++ b/tools/cicdansible/roles/install/tasks/install.yml
@@ -23,6 +23,17 @@
copy:
content: "{{ application_config | b64decode }}"
dest: "{{ installer_deploy_path }}/ansible/application/application_overrides.yml"
+ # add onap network configuration to overrides
+- name: "inject onap network information to config overrides"
+ replace:
+ path: "{{ installer_deploy_path }}/ansible/application/application_overrides.yml"
+ regexp: '({{ item.key }}:)\s?.*'
+ replace: '\1 {{ item.value }}'
+ loop: "{{ lines|dict2items }}"
+ vars:
+ lines:
+ openStackPrivateNetId: "{{ (hostvars['localhost'].heat_stack.stack.outputs | selectattr('output_key', 'equalto', 'network_id') | list).0.output_value }}"
+ openStackPrivateSubnetId: "{{ (hostvars['localhost'].heat_stack.stack.outputs | selectattr('output_key', 'equalto', 'subnet_id') | list).0.output_value }}"
# This generates a file with locations of resource files in resource host, we
# do it only to allow manually running offline installer without
# typing them by hand. We cannot use
diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml
index 5f9bc4f6..25e7ac79 100644
--- a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml
+++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml
@@ -33,5 +33,6 @@
infra_ip: "{{ floating_ips_by_address[infra_ip].id }}"
installer_ip: "{{ floating_ips_by_address[installer_ip].id }}"
use_volume_for_nfs: "{{ use_volume_for_nfs }}"
+ demo_network: "{{ demo_network_id }}"
wait: true
register: heat_stack