aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--deployment/heat/onap-oom/env/tlab/onap-oom.env19
-rw-r--r--deployment/heat/onap-oom/env/windriver/onap-oom.env19
-rw-r--r--deployment/heat/onap-oom/k8s_vm_init.sh21
-rw-r--r--deployment/heat/onap-oom/k8s_vm_init_serv.sh98
-rw-r--r--deployment/heat/onap-oom/k8s_vm_install.sh (renamed from deployment/heat/onap-oom/k8s_vm_entrypoint.sh)30
-rw-r--r--deployment/heat/onap-oom/onap-oom.yaml991
-rw-r--r--deployment/heat/onap-oom/parts/onap-oom-1.yaml8
-rw-r--r--deployment/heat/onap-oom/parts/onap-oom-2.yaml57
-rw-r--r--deployment/heat/onap-oom/rancher_vm_entrypoint.sh11
-rwxr-xr-xdeployment/heat/onap-oom/scripts/gen-onap-oom-yaml.sh3
-rw-r--r--test/mocks/pnfsimulator/src/main/java/org/onap/pnfsimulator/simulator/client/HttpClientAdapterImpl.java2
-rwxr-xr-xtest/vcpe/cleanvGMUX.sh62
-rw-r--r--test/vcpe/healthcheck-k8s.py36
-rwxr-xr-xtest/vcpe/preload.py9
-rw-r--r--test/vcpe/preload_templates/template.vcpe_bng_vfmodule.json6
-rw-r--r--test/vcpe/preload_templates/template.vcpe_brgemu_vfmodule.json144
-rw-r--r--test/vcpe/preload_templates/template.vcpe_gmux_vfmodule.json2
-rw-r--r--test/vcpe/preload_templates/template.vcpe_gwgra_vfmodule.json125
-rw-r--r--test/vcpe/preload_templates/template.vcpe_infra_vfmodule.json8
-rw-r--r--test/vcpe/preload_templates/template_sniro_data.json2
-rw-r--r--test/vcpe/preload_templates/template_sniro_request.json2
-rwxr-xr-xtest/vcpe/soutils.py2
-rwxr-xr-xtest/vcpe/vcpe.py8
-rwxr-xr-xtest/vcpe/vcpe_custom_service.py8
-rwxr-xr-xtest/vcpe/vcpecommon.py46
-rw-r--r--version-manifest/pom.xml2
-rw-r--r--version-manifest/src/main/resources/docker-manifest-staging.csv89
-rw-r--r--version-manifest/src/main/resources/docker-manifest.csv131
-rw-r--r--version-manifest/src/main/resources/java-manifest.csv115
-rw-r--r--version.properties2
30 files changed, 1550 insertions, 508 deletions
diff --git a/deployment/heat/onap-oom/env/tlab/onap-oom.env b/deployment/heat/onap-oom/env/tlab/onap-oom.env
index 49d02c4fb..04fa44d76 100644
--- a/deployment/heat/onap-oom/env/tlab/onap-oom.env
+++ b/deployment/heat/onap-oom/env/tlab/onap-oom.env
@@ -7,8 +7,8 @@ parameters:
rancher_vm_flavor: m1.large
k8s_vm_flavor: m1.xlarge
- etcd_vm_flavor: m1.large
- orch_vm_flavor: m1.large
+ etcd_vm_flavor: m1.medium
+ orch_vm_flavor: m1.medium
public_net_id: fbe8fd92-6636-4e63-ab28-bb6a5b0888a9
@@ -22,8 +22,8 @@ parameters:
pullPolicy: IfNotPresent
robot:
flavor: large
- appcUsername: "admin"
- appcPassword: "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"
+ appcUsername: "appc@appc.onap.org"
+ appcPassword: "demo123456!"
openStackKeyStoneUrl: "https://bdc1tlab01.research.att.com:5000"
openStackPublicNetId: "__oam_network_id__" # NOTE: for TLAB, openStackPublicNetId needs to be oam_network_id instead of public_net_id
openStackTenantId: "${OS_PROJECT_ID}"
@@ -48,10 +48,9 @@ parameters:
openStackKeyStoneUrl: "https://bdc1tlab01.research.att.com:5000/v2.0"
openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
appc:
- replicaCount: 1
+ replicaCount: 3
config:
- enableClustering: false
- enableAAF: false
+ enableClustering: true
openStackType: "OpenStackProvider"
openStackName: "OpenStack"
openStackKeyStoneUrl: "https://bdc1tlab01.research.att.com:5000/v2.0"
@@ -59,9 +58,7 @@ parameters:
openStackDomain: "${OS_USER_DOMAIN_NAME}"
openStackUserName: "${OS_USERNAME}"
openStackEncryptedPassword: "${OS_PASSWORD}"
- odlUser: admin
- odlPassword: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
sdnc:
- replicaCount: 1
+ replicaCount: 3
config:
- enableClustering: false
+ enableClustering: true
diff --git a/deployment/heat/onap-oom/env/windriver/onap-oom.env b/deployment/heat/onap-oom/env/windriver/onap-oom.env
index c6111843c..15819ffe1 100644
--- a/deployment/heat/onap-oom/env/windriver/onap-oom.env
+++ b/deployment/heat/onap-oom/env/windriver/onap-oom.env
@@ -7,8 +7,8 @@ parameters:
rancher_vm_flavor: m1.large
k8s_vm_flavor: m1.xlarge
- etcd_vm_flavor: m1.large
- orch_vm_flavor: m1.large
+ etcd_vm_flavor: m1.medium
+ orch_vm_flavor: m1.medium
public_net_id: 971040b2-7059-49dc-b220-4fab50cb2ad4
@@ -22,8 +22,8 @@ parameters:
pullPolicy: IfNotPresent
robot:
flavor: large
- appcUsername: "admin"
- appcPassword: "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"
+ appcUsername: "appc@appc.onap.org"
+ appcPassword: "demo123456!"
openStackKeyStoneUrl: "http://10.12.25.2:5000"
openStackPublicNetId: "__public_net_id__"
openStackTenantId: "${OS_PROJECT_ID}"
@@ -49,10 +49,9 @@ parameters:
openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}"
appc:
- replicaCount: 1
+ replicaCount: 3
config:
- enableClustering: false
- enableAAF: false
+ enableClustering: true
openStackType: "OpenStackProvider"
openStackName: "OpenStack"
openStackKeyStoneUrl: "http://10.12.25.2:5000/v2.0"
@@ -60,9 +59,7 @@ parameters:
openStackDomain: "${OS_USER_DOMAIN_NAME}"
openStackUserName: "${OS_USERNAME}"
openStackEncryptedPassword: "${OS_PASSWORD}"
- odlUser: admin
- odlPassword: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
sdnc:
- replicaCount: 1
+ replicaCount: 3
config:
- enableClustering: false
+ enableClustering: true
diff --git a/deployment/heat/onap-oom/k8s_vm_init.sh b/deployment/heat/onap-oom/k8s_vm_init.sh
new file mode 100644
index 000000000..5d162cb87
--- /dev/null
+++ b/deployment/heat/onap-oom/k8s_vm_init.sh
@@ -0,0 +1,21 @@
+#!/bin/bash -x
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+RANCHER_IMAGES=$(docker images | grep rancher | wc -l)
+if [ $RANCHER_IMAGES -eq 0 ]; then
+ while [ ! -e /dockerdata-nfs/rancher_agent_cmd.sh ]; do
+ mount /dockerdata-nfs
+ sleep 10
+ done
+
+ cd ~
+ cp /dockerdata-nfs/rancher_agent_cmd.sh .
+ sed -i "s/docker run/docker run -e CATTLE_HOST_LABELS='__host_label__=true' -e CATTLE_AGENT_IP=__host_private_ip_addr__/g" rancher_agent_cmd.sh
+ source rancher_agent_cmd.sh
+fi
diff --git a/deployment/heat/onap-oom/k8s_vm_init_serv.sh b/deployment/heat/onap-oom/k8s_vm_init_serv.sh
new file mode 100644
index 000000000..153607739
--- /dev/null
+++ b/deployment/heat/onap-oom/k8s_vm_init_serv.sh
@@ -0,0 +1,98 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: k8s_vm_init.sh
+# Required-Start: $remote_fs $syslog
+# Required-Stop: $remote_fs $syslog
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Start daemon at boot time
+# Description: Enable service provided by daemon.
+### END INIT INFO
+
+dir="/opt"
+cmd="./k8s_vm_init.sh"
+user="root"
+
+name=`basename $0`
+pid_file="/var/run/$name.pid"
+stdout_log="/var/log/$name.log"
+stderr_log="/var/log/$name.err"
+
+get_pid() {
+ cat "$pid_file"
+}
+
+is_running() {
+ [ -f "$pid_file" ] && ps `get_pid` > /dev/null 2>&1
+}
+
+case "$1" in
+ start)
+ if is_running; then
+ echo "Already started"
+ else
+ echo "Starting $name"
+ cd "$dir"
+ if [ -z "$user" ]; then
+ sudo $cmd >> "$stdout_log" 2>> "$stderr_log" &
+ else
+ sudo -u "$user" $cmd >> "$stdout_log" 2>> "$stderr_log" &
+ fi
+ echo $! > "$pid_file"
+ if ! is_running; then
+ echo "Unable to start, see $stdout_log and $stderr_log"
+ exit 1
+ fi
+ fi
+ ;;
+ stop)
+ if is_running; then
+ echo -n "Stopping $name.."
+ kill `get_pid`
+ for i in {1..10}
+ do
+ if ! is_running; then
+ break
+ fi
+
+ echo -n "."
+ sleep 1
+ done
+ echo
+
+ if is_running; then
+ echo "Not stopped; may still be shutting down or shutdown may have failed"
+ exit 1
+ else
+ echo "Stopped"
+ if [ -f "$pid_file" ]; then
+ rm "$pid_file"
+ fi
+ fi
+ else
+ echo "Not running"
+ fi
+ ;;
+ restart)
+ $0 stop
+ if is_running; then
+ echo "Unable to stop, will not attempt to start"
+ exit 1
+ fi
+ $0 start
+ ;;
+ status)
+ if is_running; then
+ echo "Running"
+ else
+ echo "Stopped"
+ exit 1
+ fi
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|status}"
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/deployment/heat/onap-oom/k8s_vm_entrypoint.sh b/deployment/heat/onap-oom/k8s_vm_install.sh
index 62340c11c..3e80f8ea0 100644
--- a/deployment/heat/onap-oom/k8s_vm_entrypoint.sh
+++ b/deployment/heat/onap-oom/k8s_vm_install.sh
@@ -1,5 +1,4 @@
#!/bin/bash -x
-#
# Copyright 2018 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -8,27 +7,34 @@
#
# http://www.apache.org/licenses/LICENSE-2.0
#
-
export DEBIAN_FRONTEND=noninteractive
-HOST_IP=$(hostname -I)
-echo $HOST_IP `hostname` >> /etc/hosts
+echo "__host_private_ip_addr__ $(hostname)" >> /etc/hosts
printenv
mkdir -p /opt/config
echo "__docker_version__" > /opt/config/docker_version.txt
echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt
echo "__rancher_private_ip_addr__" > /opt/config/rancher_private_ip_addr.txt
+echo "__host_private_ip_addr__" > /opt/config/host_private_ip_addr.txt
+echo "__mtu__" > /opt/config/mtu.txt
mkdir -p /etc/docker
if [ ! -z "__docker_proxy__" ]; then
cat > /etc/docker/daemon.json <<EOF
{
+ "mtu": __mtu__,
"insecure-registries" : ["__docker_proxy__"]
}
EOF
+else
+ cat > /etc/docker/daemon.json <<EOF
+{
+ "mtu": __mtu__
+}
+EOF
fi
if [ ! -z "__apt_proxy__" ]; then
- cat > /etc/apt/apt.conf.d/30proxy<<EOF
+ cat > /etc/apt/apt.conf.d/30proxy <<EOF
Acquire::http { Proxy "http://__apt_proxy__"; };
Acquire::https::Proxy "DIRECT";
EOF
@@ -58,15 +64,5 @@ while ! hash docker &> /dev/null; do
done
apt-mark hold docker-ce
-while [ ! -e /dockerdata-nfs/rancher_agent_cmd.sh ]; do
- mount /dockerdata-nfs
- sleep 10
-done
-
-cd ~
-cp /dockerdata-nfs/rancher_agent_cmd.sh .
-sed -i "s/docker run/docker run -e CATTLE_HOST_LABELS='__host_label__=true' -e CATTLE_AGENT_IP=${HOST_IP}/g" rancher_agent_cmd.sh
-source rancher_agent_cmd.sh
-sleep 1m
-
-reboot
+# Enable autorestart when VM reboots
+update-rc.d k8s_vm_init_serv defaults
diff --git a/deployment/heat/onap-oom/onap-oom.yaml b/deployment/heat/onap-oom/onap-oom.yaml
index 689bb6603..e6845cc52 100644
--- a/deployment/heat/onap-oom/onap-oom.yaml
+++ b/deployment/heat/onap-oom/onap-oom.yaml
@@ -96,6 +96,10 @@ parameters:
description: Set to "true" if you want to use a RAM disk for /dockerdata-nfs/.
default: "false"
+ mtu:
+ type: number
+ default: 1500
+
resources:
random-str:
type: OS::Heat::RandomString
@@ -128,6 +132,10 @@ resources:
- protocol: tcp
port_range_min: 1
port_range_max: 65535
+ # Protocols used for vLB/vDNS use case
+ - protocol: 47
+ - protocol: 53
+ - protocol: 132
# ONAP management private network
@@ -213,6 +221,7 @@ resources:
__helm_version__: { get_param: helm_version }
__helm_deploy_delay__: { get_param: helm_deploy_delay }
__use_ramdisk__: { get_param: use_ramdisk }
+ __mtu__: { get_param: mtu }
__public_net_id__: { get_param: public_net_id }
__oam_network_cidr__: { get_param: oam_network_cidr }
__oam_network_id__: { get_resource: oam_network }
@@ -261,6 +270,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_01_private_port }
+ k8s_01_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_01_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_01_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_01_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_01_vm_scripts }
+
k8s_01_vm:
type: OS::Nova::Server
properties:
@@ -271,18 +323,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_01_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_01_vm_config }
k8s_02_private_port:
type: OS::Neutron::Port
@@ -298,6 +340,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_02_private_port }
+ k8s_02_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_02_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_02_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_02_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_02_vm_scripts }
+
k8s_02_vm:
type: OS::Nova::Server
properties:
@@ -308,18 +393,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_02_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_02_vm_config }
k8s_03_private_port:
type: OS::Neutron::Port
@@ -335,6 +410,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_03_private_port }
+ k8s_03_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_03_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_03_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_03_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_03_vm_scripts }
+
k8s_03_vm:
type: OS::Nova::Server
properties:
@@ -345,18 +463,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_03_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_03_vm_config }
k8s_04_private_port:
type: OS::Neutron::Port
@@ -372,6 +480,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_04_private_port }
+ k8s_04_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_04_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_04_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_04_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_04_vm_scripts }
+
k8s_04_vm:
type: OS::Nova::Server
properties:
@@ -382,18 +533,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_04_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_04_vm_config }
k8s_05_private_port:
type: OS::Neutron::Port
@@ -409,6 +550,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_05_private_port }
+ k8s_05_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_05_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_05_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_05_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_05_vm_scripts }
+
k8s_05_vm:
type: OS::Nova::Server
properties:
@@ -419,18 +603,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_05_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_05_vm_config }
k8s_06_private_port:
type: OS::Neutron::Port
@@ -446,6 +620,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_06_private_port }
+ k8s_06_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_06_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_06_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_06_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_06_vm_scripts }
+
k8s_06_vm:
type: OS::Nova::Server
properties:
@@ -456,18 +673,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_06_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_06_vm_config }
k8s_07_private_port:
type: OS::Neutron::Port
@@ -483,6 +690,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_07_private_port }
+ k8s_07_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_07_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_07_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_07_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_07_vm_scripts }
+
k8s_07_vm:
type: OS::Nova::Server
properties:
@@ -493,18 +743,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_07_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_07_vm_config }
k8s_08_private_port:
type: OS::Neutron::Port
@@ -520,6 +760,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_08_private_port }
+ k8s_08_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_08_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_08_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_08_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_08_vm_scripts }
+
k8s_08_vm:
type: OS::Nova::Server
properties:
@@ -530,18 +813,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_08_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_08_vm_config }
k8s_09_private_port:
type: OS::Neutron::Port
@@ -557,6 +830,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_09_private_port }
+ k8s_09_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_09_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_09_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_09_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_09_vm_scripts }
+
k8s_09_vm:
type: OS::Nova::Server
properties:
@@ -567,18 +883,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_09_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_09_vm_config }
k8s_10_private_port:
type: OS::Neutron::Port
@@ -594,6 +900,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_10_private_port }
+ k8s_10_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_10_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_10_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_10_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_10_vm_scripts }
+
k8s_10_vm:
type: OS::Nova::Server
properties:
@@ -604,18 +953,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_10_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_10_vm_config }
k8s_11_private_port:
type: OS::Neutron::Port
@@ -631,6 +970,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_11_private_port }
+ k8s_11_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_11_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_11_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_11_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_11_vm_scripts }
+
k8s_11_vm:
type: OS::Nova::Server
properties:
@@ -641,18 +1023,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_11_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_11_vm_config }
k8s_12_private_port:
type: OS::Neutron::Port
@@ -668,6 +1040,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: k8s_12_private_port }
+ k8s_12_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [k8s_12_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [k8s_12_floating_ip, fixed_ip_address] }
+ __host_label__: 'compute'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ k8s_12_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: k8s_12_vm_scripts }
+
k8s_12_vm:
type: OS::Nova::Server
properties:
@@ -678,18 +1093,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: k8s_12_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'compute'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: k8s_12_vm_config }
etcd_1_private_port:
type: OS::Neutron::Port
@@ -705,6 +1110,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: etcd_1_private_port }
+ etcd_1_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [etcd_1_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [etcd_1_floating_ip, fixed_ip_address] }
+ __host_label__: 'etcd'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ etcd_1_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: etcd_1_vm_scripts }
+
etcd_1_vm:
type: OS::Nova::Server
properties:
@@ -715,18 +1163,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: etcd_1_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'etcd'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: etcd_1_vm_config }
etcd_2_private_port:
type: OS::Neutron::Port
@@ -742,6 +1180,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: etcd_2_private_port }
+ etcd_2_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [etcd_2_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [etcd_2_floating_ip, fixed_ip_address] }
+ __host_label__: 'etcd'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ etcd_2_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: etcd_2_vm_scripts }
+
etcd_2_vm:
type: OS::Nova::Server
properties:
@@ -752,18 +1233,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: etcd_2_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'etcd'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: etcd_2_vm_config }
etcd_3_private_port:
type: OS::Neutron::Port
@@ -779,6 +1250,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: etcd_3_private_port }
+ etcd_3_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [etcd_3_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [etcd_3_floating_ip, fixed_ip_address] }
+ __host_label__: 'etcd'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ etcd_3_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: etcd_3_vm_scripts }
+
etcd_3_vm:
type: OS::Nova::Server
properties:
@@ -789,18 +1303,8 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: etcd_3_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'etcd'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: etcd_3_vm_config }
orch_1_private_port:
type: OS::Neutron::Port
@@ -816,6 +1320,49 @@ resources:
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: orch_1_private_port }
+ orch_1_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [orch_1_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [orch_1_floating_ip, fixed_ip_address] }
+ __host_label__: 'orchestration'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ orch_1_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: orch_1_vm_scripts }
+
orch_1_vm:
type: OS::Nova::Server
properties:
@@ -826,18 +1373,78 @@ resources:
key_name: { get_param: key_name }
networks:
- port: { get_resource: orch_1_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: 'orchestration'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: orch_1_vm_config }
+
+ orch_2_private_port:
+ type: OS::Neutron::Port
+ properties:
+ network: { get_resource: oam_network }
+ fixed_ips: [{"subnet": { get_resource: oam_subnet }}]
+ security_groups:
+ - { get_resource: onap_sg }
+
+ orch_2_floating_ip:
+ type: OS::Neutron::FloatingIP
+ properties:
+ floating_network_id: { get_param: public_net_id }
+ port_id: { get_resource: orch_2_private_port }
+
+ orch_2_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [orch_2_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [orch_2_floating_ip, fixed_ip_address] }
+ __host_label__: 'orchestration'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ orch_2_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: orch_2_vm_scripts }
+
+ orch_2_vm:
+ type: OS::Nova::Server
+ properties:
+ name:
+ list_join: ['-', [ { get_param: 'OS::stack_name' }, 'orch', '2' ] ]
+ image: { get_param: ubuntu_1604_image }
+ flavor: { get_param: orch_vm_flavor }
+ key_name: { get_param: key_name }
+ networks:
+ - port: { get_resource: orch_2_private_port }
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: orch_2_vm_config }
outputs:
rancher_vm_ip:
diff --git a/deployment/heat/onap-oom/parts/onap-oom-1.yaml b/deployment/heat/onap-oom/parts/onap-oom-1.yaml
index a96c9a373..5f117acb8 100644
--- a/deployment/heat/onap-oom/parts/onap-oom-1.yaml
+++ b/deployment/heat/onap-oom/parts/onap-oom-1.yaml
@@ -93,6 +93,10 @@ parameters:
description: Set to "true" if you want to use a RAM disk for /dockerdata-nfs/.
default: "false"
+ mtu:
+ type: number
+ default: 1500
+
resources:
random-str:
type: OS::Heat::RandomString
@@ -125,6 +129,10 @@ resources:
- protocol: tcp
port_range_min: 1
port_range_max: 65535
+ # Protocols used for vLB/vDNS use case
+ - protocol: 47
+ - protocol: 53
+ - protocol: 132
# ONAP management private network
diff --git a/deployment/heat/onap-oom/parts/onap-oom-2.yaml b/deployment/heat/onap-oom/parts/onap-oom-2.yaml
index ff2272d92..721f3cdc4 100644
--- a/deployment/heat/onap-oom/parts/onap-oom-2.yaml
+++ b/deployment/heat/onap-oom/parts/onap-oom-2.yaml
@@ -12,6 +12,49 @@
floating_network_id: { get_param: public_net_id }
port_id: { get_resource: ${VM_TYPE}_${VM_NUM}_private_port }
+ ${VM_TYPE}_${VM_NUM}_vm_scripts:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ power_state:
+ mode: reboot
+ runcmd:
+ - [ /opt/k8s_vm_install.sh ]
+ write_files:
+ - path: /opt/k8s_vm_install.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __docker_version__: { get_param: docker_version }
+ __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
+ __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
+ __host_private_ip_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_floating_ip, fixed_ip_address] }
+ __mtu__: { get_param: mtu }
+ template:
+ get_file: k8s_vm_install.sh
+ - path: /opt/k8s_vm_init.sh
+ permissions: '0755'
+ content:
+ str_replace:
+ params:
+ __host_private_ip_addr__: { get_attr: [${VM_TYPE}_${VM_NUM}_floating_ip, fixed_ip_address] }
+ __host_label__: '$HOST_LABEL'
+ template:
+ get_file: k8s_vm_init.sh
+ - path: /etc/init.d/k8s_vm_init_serv
+ permissions: '0755'
+ content:
+ get_file: k8s_vm_init_serv.sh
+
+ ${VM_TYPE}_${VM_NUM}_vm_config:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: { get_resource: ${VM_TYPE}_${VM_NUM}_vm_scripts }
+
${VM_TYPE}_${VM_NUM}_vm:
type: OS::Nova::Server
properties:
@@ -22,16 +65,6 @@
key_name: { get_param: key_name }
networks:
- port: { get_resource: ${VM_TYPE}_${VM_NUM}_private_port }
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __docker_proxy__: { get_param: docker_proxy }
- __apt_proxy__: { get_param: apt_proxy }
- __docker_version__: { get_param: docker_version }
- __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] }
- __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] }
- __host_label__: '$HOST_LABEL'
- template:
- get_file: k8s_vm_entrypoint.sh
+ user_data_format: SOFTWARE_CONFIG
+ user_data: { get_resource: ${VM_TYPE}_${VM_NUM}_vm_config }
diff --git a/deployment/heat/onap-oom/rancher_vm_entrypoint.sh b/deployment/heat/onap-oom/rancher_vm_entrypoint.sh
index aced5dfc6..bd39963de 100644
--- a/deployment/heat/onap-oom/rancher_vm_entrypoint.sh
+++ b/deployment/heat/onap-oom/rancher_vm_entrypoint.sh
@@ -35,6 +35,7 @@ echo "__rancher_agent_version__" > /opt/config/rancher_agent_version.txt
echo "__kubectl_version__" > /opt/config/kubectl_version.txt
echo "__helm_version__" > /opt/config/helm_version.txt
echo "__helm_deploy_delay__" > /opt/config/helm_deploy_delay.txt
+echo "__mtu__" > /opt/config/mtu.txt
cat <<EOF > /opt/config/integration-override.yaml
__integration_override_yaml__
@@ -50,14 +51,20 @@ sed -i 's/\_\_docker_proxy__/__docker_proxy__/g' /opt/config/integration-overrid
cp /opt/config/integration-override.yaml /root
cat /root/integration-override.yaml
-echo `hostname -I` `hostname` >> /etc/hosts
mkdir -p /etc/docker
if [ ! -z "__docker_proxy__" ]; then
cat > /etc/docker/daemon.json <<EOF
{
+ "mtu": __mtu__,
"insecure-registries" : ["__docker_proxy__"]
}
EOF
+else
+ cat > /etc/docker/daemon.json <<EOF
+{
+ "mtu": __mtu__
+}
+EOF
fi
if [ ! -z "__apt_proxy__" ]; then
cat > /etc/apt/apt.conf.d/30proxy<<EOF
@@ -157,7 +164,7 @@ curl -s -u "${CATTLE_ACCESS_KEY}:${CATTLE_SECRET_KEY}" \
-X PUT \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
--d '{"stacks":[{"type":"catalogTemplate", "answers":{"CONSTRAINT_TYPE":"required"}, "name":"kubernetes", "templateVersionId":"library:infra*k8s:52"}, {"type":"catalogTemplate", "name":"network-services", "templateId":"library:infra*network-services"}, {"type":"catalogTemplate", "name":"ipsec", "templateId":"library:infra*ipsec"}, {"type":"catalogTemplate", "name":"healthcheck", "templateId":"library:infra*healthcheck"}]}' \
+-d '{"stacks":[{"type":"catalogTemplate", "answers":{"CONSTRAINT_TYPE":"required"}, "name":"kubernetes", "templateVersionId":"library:infra*k8s:52"}, {"type":"catalogTemplate", "name":"network-services", "templateId":"library:infra*network-services"}, {"type":"catalogTemplate", "name":"ipsec", "templateId":"library:infra*ipsec", "answers":{"MTU":"__mtu__"}}, {"type":"catalogTemplate", "name":"healthcheck", "templateId":"library:infra*healthcheck"}]}' \
"http://$RANCHER_IP:8080/v2-beta/projecttemplates/$TEMPLATE_ID"
curl -s -u "${CATTLE_ACCESS_KEY}:${CATTLE_SECRET_KEY}" -X POST -H 'Content-Type: application/json' -d '{ "name":"oom", "projectTemplateId":"'$TEMPLATE_ID'" }' "http://$RANCHER_IP:8080/v2-beta/projects" > project.json
diff --git a/deployment/heat/onap-oom/scripts/gen-onap-oom-yaml.sh b/deployment/heat/onap-oom/scripts/gen-onap-oom-yaml.sh
index 1167496c5..61d5f9f70 100755
--- a/deployment/heat/onap-oom/scripts/gen-onap-oom-yaml.sh
+++ b/deployment/heat/onap-oom/scripts/gen-onap-oom-yaml.sh
@@ -63,6 +63,7 @@ cat <<EOF
__helm_version__: { get_param: helm_version }
__helm_deploy_delay__: { get_param: helm_deploy_delay }
__use_ramdisk__: { get_param: use_ramdisk }
+ __mtu__: { get_param: mtu }
__public_net_id__: { get_param: public_net_id }
__oam_network_cidr__: { get_param: oam_network_cidr }
__oam_network_id__: { get_resource: oam_network }
@@ -103,7 +104,7 @@ for VM_NUM in $(seq 3); do
VM_TYPE=etcd HOST_LABEL=etcd VM_NUM=$VM_NUM envsubst < $PARTS_DIR/onap-oom-2.yaml
done
-for VM_NUM in $(seq 1); do
+for VM_NUM in $(seq 2); do
VM_TYPE=orch HOST_LABEL=orchestration VM_NUM=$VM_NUM envsubst < $PARTS_DIR/onap-oom-2.yaml
done
diff --git a/test/mocks/pnfsimulator/src/main/java/org/onap/pnfsimulator/simulator/client/HttpClientAdapterImpl.java b/test/mocks/pnfsimulator/src/main/java/org/onap/pnfsimulator/simulator/client/HttpClientAdapterImpl.java
index db1ea1578..f0c9917f5 100644
--- a/test/mocks/pnfsimulator/src/main/java/org/onap/pnfsimulator/simulator/client/HttpClientAdapterImpl.java
+++ b/test/mocks/pnfsimulator/src/main/java/org/onap/pnfsimulator/simulator/client/HttpClientAdapterImpl.java
@@ -33,6 +33,7 @@ import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.util.EntityUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
@@ -65,6 +66,7 @@ public class HttpClientAdapterImpl implements HttpClientAdapter {
try {
HttpPost request = createRequest(content, url);
HttpResponse response = client.execute(request);
+ EntityUtils.consumeQuietly(response.getEntity());
LOGGER.info(INVOKE, "Message sent, ves response code: {}", response.getStatusLine());
} catch (IOException e) {
LOGGER.warn("Error sending message to ves: {}", e.getMessage());
diff --git a/test/vcpe/cleanvGMUX.sh b/test/vcpe/cleanvGMUX.sh
new file mode 100755
index 000000000..3b8dabc8d
--- /dev/null
+++ b/test/vcpe/cleanvGMUX.sh
@@ -0,0 +1,62 @@
+#! /usr/bin/env bash
+#########################################################################################
+# Script to cleanpu vGMUX and other parts of the vCPE Use Case
+#
+# Edit the IP addresses and portas as appropriate
+#
+#######################################################################################
+
+
+VGMUX_IP=10.12.6.242
+#VBRG_IP=10.12.5.142
+#SDNC_IP=10.12.5.180
+
+#curl -X DELETE -u admin:Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U http://$SDNC_IP:8282/restconf/config/GENERIC-RESOURCE-API:tunnelxconn-allotted-resources
+#curl -X DELETE -u admin:Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U http://$SDNC_IP:8282/restconf/config/GENERIC-RESOURCE-API:brg-allotted-resources
+
+###################################
+# vGMUX
+curl -H 'Content-Type: application/json' -H 'Accept: application/json' -u admin:admin -X DELETE http://$VGMUX_IP:8183/restconf/config/ietf-interfaces:interfaces/interface/vxlanTun10.3.0.2/v3po:l2
+echo
+
+curl -H 'Content-Type: application/json' -H 'Accept: application/json' -u admin:admin -X DELETE http://$VGMUX_IP:8183/restconf/config/ietf-interfaces:interfaces/interface/vxlanTun10.5.0.22/v3po:l2
+echo
+
+curl -H 'Content-Type: application/json' -H 'Accept: application/json' -u admin:admin -X DELETE http://$VGMUX_IP:8183/restconf/config/ietf-interfaces:interfaces/interface/vxlanTun10.5.0.22
+echo
+
+curl -H 'Content-Type: application/json' -H 'Accept: application/json' -u admin:admin -X DELETE http://$VGMUX_IP:8183/restconf/config/ietf-interfaces:interfaces/interface/vxlanTun10.3.0.2
+echo
+
+
+curl -H 'Content-Type: application/json' -H 'Accept: application/json' -u admin:admin -X DELETE http://$VGMUX_IP:8183/restconf/config/ietf-interfaces:interfaces/interface/vxlanTun10.5.0.106
+echo
+curl -H 'Content-Type: application/json' -H 'Accept: application/json' -u admin:admin -X DELETE http://$VGMUX_IP:8183/restconf/config/ietf-interfaces:interfaces/interface/vxlanTun10.5.0.107
+
+curl -H 'Content-Type: application/json' -H 'Accept: application/json' -u admin:admin -X DELETE http://$VGMUX_IP:8183/restconf/config/ietf-interfaces:interfaces/interface/vxlanTun10.5.0.111
+
+curl -H 'Content-Type: application/json' -H 'Accept: application/json' -u admin:admin -X DELETE http://$VGMUX_IP:8183/restconf/config/ietf-interfaces:interfaces/interface/vxlanTun10.5.0.110
+
+# Check by listing interfaces
+echo "********************* vGMUX status ************************"
+curl -H 'Content-Type: application/json' -H 'Accept: application/json' -u admin:admin -X GET http://$VGMUX_IP:8183/restconf/config/ietf-interfaces:interfaces| python -m json.tool
+
+
+exit;
+
+#########################################################################################
+# remove above exit if you want to interact with the other components
+#########################################################################################
+
+
+
+###################################
+# vBRG
+curl -u admin:admin -X DELETE http://$VBRG_IP:8183/restconf/config/ietf-interfaces:interfaces/interface/vxlanTun10.1.0.21
+
+# check
+echo "********************* vBRG status ************************"
+curl -H 'Content-Type: application/json' -H 'Accept: application/json' -u admin:admin -X GET http://$VBRG_IP:8183/restconf/config/ietf-interfaces:interfaces| python -m json.tool
+
+
+
diff --git a/test/vcpe/healthcheck-k8s.py b/test/vcpe/healthcheck-k8s.py
new file mode 100644
index 000000000..0aea3fb9e
--- /dev/null
+++ b/test/vcpe/healthcheck-k8s.py
@@ -0,0 +1,36 @@
+#! /usr/bin/python
+
+import logging
+import json
+from vcpecommon import *
+import commands
+import sys
+
+if len(sys.argv) <2:
+ print('namespace not provided')
+ print('Usage: healthcheck-k8s.py onap')
+ sys.exit()
+
+namespace=sys.argv[1]
+
+logging.basicConfig(level=logging.INFO, format='%(message)s')
+common = VcpeCommon()
+
+print('Checking vGMUX REST API from SDNC')
+cmd = 'curl -u admin:admin -X GET http://10.0.101.21:8183/restconf/config/ietf-interfaces:interfaces'
+ret = commands.getstatusoutput("kubectl -n {0} exec dev-sdnc-sdnc-0 -- bash -c '{1}'".format(namespace,cmd))
+sz = ret[-1].split('\n')[-1]
+print('\n')
+print(sz)
+
+print('Checking vBRG REST API from SDNC')
+cmd = 'curl -u admin:admin -X GET http://10.3.0.2:8183/restconf/config/ietf-interfaces:interfaces'
+ret = commands.getstatusoutput("kubectl -n {0} exec dev-sdnc-sdnc-0 -- bash -c '{1}'".format(namespace,cmd))
+sz = ret[-1].split('\n')[-1]
+print('\n')
+print(sz)
+
+print('Checking SDNC DB for vBRG MAC address')
+mac = common.get_brg_mac_from_sdnc()
+print(mac)
+
diff --git a/test/vcpe/preload.py b/test/vcpe/preload.py
index c4efafde6..d72150319 100755
--- a/test/vcpe/preload.py
+++ b/test/vcpe/preload.py
@@ -121,6 +121,15 @@ class Preload:
self.logger.info('Preloading vGW')
return self.preload(template_file, replace_dict, self.vcpecommon.sdnc_preload_vnf_url)
+ def preload_vgw_gra(self, template_file, brg_mac, commont_dict, name_suffix, vgw_vfmod_name_index):
+ replace_dict = {'${brg_mac}': brg_mac,
+ '${suffix}': name_suffix,
+ '${vgw_vfmod_name_index}': vgw_vfmod_name_index
+ }
+ replace_dict.update(commont_dict)
+ self.logger.info('Preloading vGW-GRA')
+ return self.preload(template_file, replace_dict, self.vcpecommon.sdnc_preload_vnf_url)
+
def preload_vfmodule(self, template_file, service_instance_id, vnf_model, vfmodule_model, common_dict, name_suffix):
"""
:param template_file:
diff --git a/test/vcpe/preload_templates/template.vcpe_bng_vfmodule.json b/test/vcpe/preload_templates/template.vcpe_bng_vfmodule.json
index a95d45671..7ef2fb7db 100644
--- a/test/vcpe/preload_templates/template.vcpe_bng_vfmodule.json
+++ b/test/vcpe/preload_templates/template.vcpe_bng_vfmodule.json
@@ -25,7 +25,7 @@
"VNF-API:vnf-parameters": [
{
"vnf-parameter-name": "vcpe_image_name",
- "vnf-parameter-value": "vbng-base-ubuntu-16-04"
+ "vnf-parameter-value": "vbng-casa-base-ubuntu-16-04"
},
{
"vnf-parameter-name": "vcpe_flavor_name",
@@ -84,6 +84,10 @@
"vnf-parameter-value": "10.4.0.0/24"
},
{
+ "vnf-parameter-name": "sdnc_ip_addr",
+ "vnf-parameter-value": "${sdnc_brg_bng_ip}"
+ },
+ {
"vnf-parameter-name": "vbng_private_ip_0",
"vnf-parameter-value": "10.3.0.1"
},
diff --git a/test/vcpe/preload_templates/template.vcpe_brgemu_vfmodule.json b/test/vcpe/preload_templates/template.vcpe_brgemu_vfmodule.json
index 9a9ecc37d..7072f6825 100644
--- a/test/vcpe/preload_templates/template.vcpe_brgemu_vfmodule.json
+++ b/test/vcpe/preload_templates/template.vcpe_brgemu_vfmodule.json
@@ -1,138 +1,138 @@
{
"VNF-API:input": {
"VNF-API:request-information": {
- "VNF-API:notification-url": "https://so.onap.org",
- "VNF-API:order-number": "robot12",
- "VNF-API:order-version": "1.0",
- "VNF-API:request-action": "PreloadVNFRequest",
- "VNF-API:request-id": "robot12",
- "VNF-API:request-sub-action": "SUPP",
+ "VNF-API:notification-url": "https://so.onap.org",
+ "VNF-API:order-number": "robot12",
+ "VNF-API:order-version": "1.0",
+ "VNF-API:request-action": "PreloadVNFRequest",
+ "VNF-API:request-id": "robot12",
+ "VNF-API:request-sub-action": "SUPP",
"VNF-API:source": "VID"
- },
+ },
"VNF-API:sdnc-request-header": {
- "VNF-API:svc-action": "reserve",
- "VNF-API:svc-notification-url": "https://son.onap.org",
+ "VNF-API:svc-action": "reserve",
+ "VNF-API:svc-notification-url": "https://son.onap.org",
"VNF-API:svc-request-id": "robot12"
- },
+ },
"VNF-API:vnf-topology-information": {
- "VNF-API:vnf-assignments": {},
+ "VNF-API:vnf-assignments": {},
"VNF-API:vnf-parameters": [
{
- "vnf-parameter-name": "vcpe_image_name",
- "vnf-parameter-value": "vbrg-base-ubuntu-16-04-new"
- },
+ "vnf-parameter-name": "vcpe_image_name",
+ "vnf-parameter-value": "vbrg-casa-base-ubuntu-16-04"
+ },
{
- "vnf-parameter-name": "vcpe_flavor_name",
+ "vnf-parameter-name": "vcpe_flavor_name",
"vnf-parameter-value": "m1.medium"
- },
+ },
{
- "vnf-parameter-name": "public_net_id",
+ "vnf-parameter-name": "public_net_id",
"vnf-parameter-value": "${public_net_id}"
- },
+ },
{
- "vnf-parameter-name": "vbrgemu_bng_private_net_id",
+ "vnf-parameter-name": "vbrgemu_bng_private_net_id",
"vnf-parameter-value": "${brg_bng_net}"
- },
+ },
{
- "vnf-parameter-name": "vbrgemu_bng_private_subnet_id",
+ "vnf-parameter-name": "vbrgemu_bng_private_subnet_id",
"vnf-parameter-value": "${brg_bng_subnet}"
- },
+ },
{
- "vnf-parameter-name": "onap_private_net_id",
+ "vnf-parameter-name": "onap_private_net_id",
"vnf-parameter-value": "${oam_onap_net}"
- },
+ },
{
- "vnf-parameter-name": "onap_private_subnet_id",
+ "vnf-parameter-name": "onap_private_subnet_id",
"vnf-parameter-value": "${oam_onap_subnet}"
- },
+ },
{
- "vnf-parameter-name": "onap_private_net_cidr",
+ "vnf-parameter-name": "onap_private_net_cidr",
"vnf-parameter-value": "10.0.0.0/16"
- },
+ },
{
- "vnf-parameter-name": "compile_state",
+ "vnf-parameter-name": "compile_state",
"vnf-parameter-value": "done"
- },
+ },
{
- "vnf-parameter-name": "vbrgemu_bng_private_net_cidr",
+ "vnf-parameter-name": "vbrgemu_bng_private_net_cidr",
"vnf-parameter-value": "10.3.0.0/24"
- },
+ },
{
- "vnf-parameter-name": "vbrgemu_private_ip_0",
+ "vnf-parameter-name": "vbrgemu_private_ip_0",
"vnf-parameter-value": "10.3.0.21"
- },
+ },
{
- "vnf-parameter-name": "sdnc_ip",
- "vnf-parameter-value": "10.0.7.1"
- },
+ "vnf-parameter-name": "sdnc_ip",
+ "vnf-parameter-value": "${sdnc_brg_bng_ip}"
+ },
{
- "vnf-parameter-name": "vbrgemu_name_0",
+ "vnf-parameter-name": "vbrgemu_name_0",
"vnf-parameter-value": "zdcpe1cpe01brgemu01_${suffix}"
- },
+ },
{
- "vnf-parameter-name": "vnf_id",
+ "vnf-parameter-name": "vnf_id",
"vnf-parameter-value": "vCPE_Infrastructure_BGREMU_demo_app_${suffix}"
- },
+ },
{
- "vnf-parameter-name": "vf_module_id",
+ "vnf-parameter-name": "vf_module_id",
"vnf-parameter-value": "vCPE_Customer_BRGEMU_${suffix}"
- },
+ },
{
- "vnf-parameter-name": "repo_url_blob",
+ "vnf-parameter-name": "repo_url_blob",
"vnf-parameter-value": "https://nexus.onap.org/content/sites/raw"
- },
+ },
{
- "vnf-parameter-name": "repo_url_artifacts",
+ "vnf-parameter-name": "repo_url_artifacts",
"vnf-parameter-value": "https://nexus.onap.org/content/groups/staging"
- },
+ },
{
- "vnf-parameter-name": "demo_artifacts_version",
+ "vnf-parameter-name": "demo_artifacts_version",
"vnf-parameter-value": "1.1.1"
- },
+ },
{
- "vnf-parameter-name": "install_script_version",
+ "vnf-parameter-name": "install_script_version",
"vnf-parameter-value": "1.1.1"
- },
+ },
{
- "vnf-parameter-name": "key_name",
+ "vnf-parameter-name": "key_name",
"vnf-parameter-value": "vbrgemu_key"
- },
+ },
{
- "vnf-parameter-name": "pub_key",
+ "vnf-parameter-name": "pub_key",
"vnf-parameter-value": "${pub_key}"
- },
+ },
{
- "vnf-parameter-name": "cloud_env",
+ "vnf-parameter-name": "cloud_env",
"vnf-parameter-value": "openstack"
- },
+ },
{
- "vnf-parameter-name": "vpp_source_repo_url",
+ "vnf-parameter-name": "vpp_source_repo_url",
"vnf-parameter-value": "https://gerrit.fd.io/r/vpp"
- },
+ },
{
- "vnf-parameter-name": "vpp_source_repo_branch",
+ "vnf-parameter-name": "vpp_source_repo_branch",
"vnf-parameter-value": "stable/1704"
- },
+ },
{
- "vnf-parameter-name": "hc2vpp_source_repo_url",
+ "vnf-parameter-name": "hc2vpp_source_repo_url",
"vnf-parameter-value": "https://gerrit.fd.io/r/hc2vpp"
- },
+ },
{
- "vnf-parameter-name": "hc2vpp_source_repo_branch",
+ "vnf-parameter-name": "hc2vpp_source_repo_branch",
"vnf-parameter-value": "stable/1704"
- },
+ },
{
- "vnf-parameter-name": "vpp_patch_url",
+ "vnf-parameter-name": "vpp_patch_url",
"vnf-parameter-value": "https://git.onap.org/demo/plain/vnfs/vCPE/vpp-option-82-for-vbrg/src/patches/VPP-Add-Option82-Nat-Filter-For-vBRG.patch"
}
- ],
+ ],
"vnf-topology-identifier": {
- "generic-vnf-name": "${generic_vnf_name}",
- "generic-vnf-type": "${generic_vnf_type}",
- "service-type": "${service_type}",
- "vnf-name": "${vnf_name}",
+ "generic-vnf-name": "${generic_vnf_name}",
+ "generic-vnf-type": "${generic_vnf_type}",
+ "service-type": "${service_type}",
+ "vnf-name": "${vnf_name}",
"vnf-type": "${vnf_type}"
}
}
}
-} \ No newline at end of file
+}
diff --git a/test/vcpe/preload_templates/template.vcpe_gmux_vfmodule.json b/test/vcpe/preload_templates/template.vcpe_gmux_vfmodule.json
index 9dc747393..f3401326d 100644
--- a/test/vcpe/preload_templates/template.vcpe_gmux_vfmodule.json
+++ b/test/vcpe/preload_templates/template.vcpe_gmux_vfmodule.json
@@ -25,7 +25,7 @@
"VNF-API:vnf-parameters": [
{
"vnf-parameter-name": "vcpe_image_name",
- "vnf-parameter-value": "vgmux2-base-ubuntu-16-04"
+ "vnf-parameter-value": "vgmux-casa-base-ubuntu-16-04"
},
{
"vnf-parameter-name": "vcpe_flavor_name",
diff --git a/test/vcpe/preload_templates/template.vcpe_gwgra_vfmodule.json b/test/vcpe/preload_templates/template.vcpe_gwgra_vfmodule.json
new file mode 100644
index 000000000..ced57ca6a
--- /dev/null
+++ b/test/vcpe/preload_templates/template.vcpe_gwgra_vfmodule.json
@@ -0,0 +1,125 @@
+{
+ "GENERIC-RESOURCE-API:input": {
+ "GENERIC-RESOURCE-API:request-information": {
+ "GENERIC-RESOURCE-API:notification-url": "https://so.onap.org",
+ "GENERIC-RESOURCE-API:order-number": "robot12",
+ "GENERIC-RESOURCE-API:order-version": "1.0",
+ "GENERIC-RESOURCE-API:request-action": "PreloadVfModuleRequest",
+ "GENERIC-RESOURCE-API:request-id": "robot12",
+ "GENERIC-RESOURCE-API:source": "VID"
+ },
+ "GENERIC-RESOURCE-API:sdnc-request-header": {
+ "GENERIC-RESOURCE-API:svc-action": "reserve",
+ "GENERIC-RESOURCE-API:svc-notification-url": "https://son.onap.org",
+ "GENERIC-RESOURCE-API:svc-request-id": "robot12"
+ },
+ "preload-vf-module-topology-information": {
+ "vf-module-topology": {
+ "vf-module-parameters": {
+ "param": [
+ {
+ "name": "public_net_id",
+ "value": "${public_net_id}"
+ },
+ {
+ "name": "mux_gw_private_net_id",
+ "value": "${mux_gw_net}"
+ },
+ {
+ "name": "mux_gw_private_subnet_id",
+ "value": "${mux_gw_subnet}"
+ },
+ {
+ "name": "mux_gw_private_net_cidr",
+ "value": "10.5.0.0/24"
+ },
+ {
+ "name": "cpe_public_net_id",
+ "value": "${cpe_public_net}"
+ },
+ {
+ "name": "cpe_public_subnet_id",
+ "value": "${cpe_public_subnet}"
+ },
+ {
+ "name": "cpe_public_net_cidr",
+ "value": "10.2.0.0/24"
+ },
+ {
+ "name": "vgw_private_ip_0",
+ "value": "10.5.0.92"
+ },
+ {
+ "name": "vgw_private_ip_1",
+ "value": "10.0.101.92"
+ },
+ {
+ "name": "vgw_private_ip_2",
+ "value": "10.2.0.7"
+ },
+ {
+ "name": "vgw_name_0",
+ "value": "zdcpe1cpe01gw01_${suffix}"
+ },
+ {
+ "name": "mux_ip_addr",
+ "value": "10.5.0.21"
+ },
+ {
+ "name": "vg_vgmux_tunnel_vni",
+ "value": "92"
+ },
+ {
+ "name": "onap_private_net_id",
+ "value": "${oam_onap_net}"
+ },
+ {
+ "name": "onap_private_subnet_id",
+ "value": "${oam_onap_subnet}"
+ },
+ {
+ "name": "onap_private_net_cidr",
+ "value": "10.0.0.0/16"
+ },
+ {
+ "name": "repo_url_blob",
+ "value": "https://nexus.onap.org/content/sites/raw"
+ },
+ {
+ "name": "repo_url_artifacts",
+ "value": "https://nexus.onap.org/content/groups/staging"
+ },
+ {
+ "name": "demo_artifacts_version",
+ "value": "1.1.1"
+ },
+ {
+ "name": "install_script_version",
+ "value": "1.1.1"
+ },
+ {
+ "name": "key_name",
+ "value": "vgw_key"
+ },
+ {
+ "name": "pub_key",
+ "value": "${pub_key}"
+ },
+ {
+ "name": "cloud_env",
+ "value": "openstack"
+ }
+ ] },
+ "vf-module-assignments": {},
+ "vf-module-topology-identifier": {
+ "vf-module-name": "zRegionOne${vgw_vfmod_name_index}_base_vcpe_vgw_0"
+ }
+ },
+ "vnf-resource-assignments": {},
+ "vnf-topology-identifier-structure": {
+ "nf-type": "vgw",
+ "vnf-id": "VGW2BRG-${brg_mac}"
+ }
+ }
+ }
+}
diff --git a/test/vcpe/preload_templates/template.vcpe_infra_vfmodule.json b/test/vcpe/preload_templates/template.vcpe_infra_vfmodule.json
index 27259546b..d2072e2e6 100644
--- a/test/vcpe/preload_templates/template.vcpe_infra_vfmodule.json
+++ b/test/vcpe/preload_templates/template.vcpe_infra_vfmodule.json
@@ -105,11 +105,11 @@
},
{
"vnf-parameter-name": "mr_ip_addr",
- "vnf-parameter-value": "10.12.5.239"
+ "vnf-parameter-value": "${mr_ip_addr}"
},
{
"vnf-parameter-name": "mr_ip_port",
- "vnf-parameter-value": "30227"
+ "vnf-parameter-value": "${mr_ip_port}"
},
{
"vnf-parameter-name": "vaaa_name_0",
@@ -153,11 +153,11 @@
},
{
"vnf-parameter-name": "demo_artifacts_version",
- "vnf-parameter-value": "1.3.0"
+ "vnf-parameter-value": "1.3.0-SNAPSHOT"
},
{
"vnf-parameter-name": "install_script_version",
- "vnf-parameter-value": "1.2.0-SNAPSHOT"
+ "vnf-parameter-value": "1.3.0-SNAPSHOT"
},
{
"vnf-parameter-name": "key_name",
diff --git a/test/vcpe/preload_templates/template_sniro_data.json b/test/vcpe/preload_templates/template_sniro_data.json
index 92c9cc719..c2c6421e6 100644
--- a/test/vcpe/preload_templates/template_sniro_data.json
+++ b/test/vcpe/preload_templates/template_sniro_data.json
@@ -1,6 +1,6 @@
{
"solutionInfo" : {
- "placement" : [
+ "placementInfo" : [
{
"cloudRegionId" : "RegionOne",
"inventoryType" : "service",
diff --git a/test/vcpe/preload_templates/template_sniro_request.json b/test/vcpe/preload_templates/template_sniro_request.json
index 565e14cd1..6fdbb7072 100644
--- a/test/vcpe/preload_templates/template_sniro_request.json
+++ b/test/vcpe/preload_templates/template_sniro_request.json
@@ -14,7 +14,7 @@
},
"method" : "POST",
"base64Body" : "${base64_sniro_data}",
- "url" : "https://${sniro_ip}:8080/adapters/rest/SDNCNotify/SNIROResponse"
+ "url" : " http://so-bpmn-infra.onap:8081/mso/WorkflowMessage/SNIROResponse"
}
}
}
diff --git a/test/vcpe/soutils.py b/test/vcpe/soutils.py
index 623f12cc3..3749689f2 100755
--- a/test/vcpe/soutils.py
+++ b/test/vcpe/soutils.py
@@ -170,7 +170,7 @@ class SoUtils:
'modelInfo': model,
'subscriberInfo': {'subscriberName': 'Kaneohe',
'globalSubscriberId': self.vcpecommon.global_subscriber_id},
- 'cloudConfiguration': {"lcpCloudRegionId": 'CloudOwner_RegionOne', #self.vcpecommon.os_region_name,
+ 'cloudConfiguration': {"lcpCloudRegionId": 'RegionOne', #self.vcpecommon.os_region_name,
"tenantId": self.vcpecommon.os_tenant_id},
'requestParameters': {
"userParams": [
diff --git a/test/vcpe/vcpe.py b/test/vcpe/vcpe.py
index 26f27fb7a..8bd39960a 100755
--- a/test/vcpe/vcpe.py
+++ b/test/vcpe/vcpe.py
@@ -142,8 +142,9 @@ def deploy_custom_service():
# create new service
csar_file = vcpecommon.find_file('rescust', 'csar', 'csar')
vgw_template_file = vcpecommon.find_file('vgw', 'json', 'preload_templates')
+ vgw_gra_template_file = vcpecommon.find_file('gwgra', 'json', 'preload_templates')
preload_dict = vcpecommon.load_preload_data()
- custom_service.create_custom_service(csar_file, vgw_template_file, preload_dict)
+ custom_service.create_custom_service(csar_file, vgw_template_file, vgw_gra_template_file, preload_dict)
def closed_loop(lossrate=0):
@@ -174,6 +175,8 @@ def init_so_sdnc():
vcpecommon = VcpeCommon()
config_sdnc_so.insert_customer_service_to_so(vcpecommon)
config_sdnc_so.insert_customer_service_to_sdnc(vcpecommon)
+ vgw_vfmod_name_index= 0
+ vcpecommon.save_object(vgw_vfmod_name_index, vcpecommon.vgw_vfmod_name_index_file)
def tmp_sniro():
@@ -186,13 +189,14 @@ def tmp_sniro():
config_sniro(vcpecommon, svc_instance_uuid['gmux'], svc_instance_uuid['brg'])
if __name__ == '__main__':
- logging.basicConfig(level=logging.INFO, format='%(message)s')
+ logging.basicConfig(level=logging.DEBUG, format='%(message)s')
print('----------------------------------------------------------------------------------------------------')
print(' vcpe.py: Brief info about this program')
# print(' vcpe.py sdc: Onboard VNFs, design and distribute vCPE services (under development)')
print(' vcpe.py init: Add customer service data to SDNC and SO DBs.')
print(' vcpe.py infra: Deploy infrastructure, including DHCP, AAA, DNS, Web Server, vBNG, vGMUX, vBRG.')
+ print(' vcpe.py brg: Deploy brg only (for testing after infra succeeds).')
print(' vcpe.py customer: Deploy customer service, including vGW and VxLANs')
print(' vcpe.py loop: Test closed loop control')
print('----------------------------------------------------------------------------------------------------')
diff --git a/test/vcpe/vcpe_custom_service.py b/test/vcpe/vcpe_custom_service.py
index 8c5635473..f02802353 100755
--- a/test/vcpe/vcpe_custom_service.py
+++ b/test/vcpe/vcpe_custom_service.py
@@ -66,18 +66,24 @@ class CustomService:
print(' 6. ping the web server: ping {0}'.format('10.2.0.10'))
print(' 7. wget http://{0}'.format('10.2.0.10'))
- def create_custom_service(self, csar_file, vgw_template_file, preload_dict=None):
+ def create_custom_service(self, csar_file, vgw_template_file, vgw_gra_template_file, preload_dict=None):
name_suffix = datetime.now().strftime('%Y%m%d%H%M')
if self.vcpecommon.oom_mode:
brg_mac = str(raw_input("Enter the BRG MAC address: "))
else:
brg_mac = self.vcpecommon.get_brg_mac_from_sdnc()
+ # get name index
+ self.vgw_vfmod_name_index= self.vcpecommon.load_object(self.vcpecommon.vgw_vfmod_name_index_file)
+ self.vgw_vfmod_name_index=self.vgw_vfmod_name_index + 1
+ self.save_object(vgw_vfmod_name_index,self.vcpecommon.vgw_vfmod_name_index_file)
# preload vGW
if preload_dict:
preloader = preload.Preload(self.vcpecommon)
parameters_to_change = ['vgw_private_ip_0', 'vgw_private_ip_1', 'vgw_private_ip_2','vg_vgmux_tunnel_vni']
self.vcpecommon.increase_ip_address_or_vni_in_template(vgw_template_file, parameters_to_change)
preloader.preload_vgw(vgw_template_file, brg_mac, preload_dict, name_suffix)
+ # preload vGW-GRA
+ preloader.preload_vgw_gra(vgw_gra_template_file, brg_mac, preload_dict, name_suffix, vgw_vfmod_name_index)
# create service
so = soutils.SoUtils(self.vcpecommon, 'v5')
diff --git a/test/vcpe/vcpecommon.py b/test/vcpe/vcpecommon.py
index 75f883835..f5e1b3046 100755
--- a/test/vcpe/vcpecommon.py
+++ b/test/vcpe/vcpecommon.py
@@ -19,17 +19,17 @@ class VcpeCommon:
external_net_prefix_len = 16
#############################################################################################
# set the openstack cloud access credentials here
- oom_mode = False
+ oom_mode = True
cloud = {
'--os-auth-url': 'http://10.12.25.2:5000',
'--os-username': 'kxi',
'--os-user-domain-id': 'default',
'--os-project-domain-id': 'default',
- '--os-tenant-id': '41d6d38489bd40b09ea8a6b6b852dcbd' if oom_mode else '1e097c6713e74fd7ac8e4295e605ee1e',
+ '--os-tenant-id': 'b8ad3842ab3642f7bf3fbe4e4d3b9f86' if oom_mode else '1e097c6713e74fd7ac8e4295e605ee1e',
'--os-region-name': 'RegionOne',
'--os-password': 'n3JhGMGuDzD8',
- '--os-project-domain-name': 'Integration-SB-00' if oom_mode else 'Integration-SB-07',
+ '--os-project-domain-name': 'Integration-SB-05' if oom_mode else 'Integration-SB-07',
'--os-identity-api-version': '3'
}
@@ -39,12 +39,8 @@ class VcpeCommon:
'public_net': 'external',
'public_net_id': '971040b2-7059-49dc-b220-4fab50cb2ad4'
}
-# for sb07
-# 'oam_onap_lAky',
-# for sb00
- #'oam_onap_net': 'oam_network_0qV7',
- #'oam_onap_subnet': 'oam_network_0qV7',
- # End: configurations that you must change for a new ONAP installation
+ sdnc_controller_pod = 'dev-sdnc-sdnc-0'
+
#############################################################################################
template_variable_symbol = '${'
@@ -71,9 +67,16 @@ class VcpeCommon:
self.logger = logging.getLogger(__name__)
self.logger.info('Initializing configuration')
+ # OOM: this is the address that the brg and bng will nat for config of brg - 10.0.0.x address of k8 host for sdnc
+ self.sdnc_brg_bng_ip = self.get_pod_node_oam_ip(self.sdnc_controller_pod)[1]
+ # OOM: this is a k8 host external IP
self.oom_so_sdnc_aai_ip = '10.12.5.18'
+ # OOM: this is a k8 host external IP can be same as oom_so_sdnc_aai_ip
self.oom_dcae_ves_collector = '10.12.5.18'
- self.so_nbi_port = '30223' if self.oom_mode else '8080'
+ # OOM: this is a k8 host external IP can be same as oom_so_sdnc_aai_ip
+ self.mr_ip_addr = '10.12.5.18'
+ self.mr_ip_port = '30227'
+ self.so_nbi_port = '30277' if self.oom_mode else '8080'
self.sdnc_preloading_port = '30202' if self.oom_mode else '8282'
self.aai_query_port = '30233' if self.oom_mode else '8443'
self.sniro_port = '30288' if self.oom_mode else '8080'
@@ -85,6 +88,8 @@ class VcpeCommon:
self.hosts = self.get_vm_ip(self.host_names, self.external_net_addr, self.external_net_prefix_len)
# this is the keyword used to name vgw stack, must not be used in other stacks
self.vgw_name_keyword = 'base_vcpe_vgw'
+ # this is the file that will keep the index of last assigned SO name
+ self.vgw_vfmod_name_index_file= '__var/vgw_vfmod_name_index'
self.svc_instance_uuid_file = '__var/svc_instance_uuid'
self.preload_dict_file = '__var/preload_dict'
self.vgmux_vnf_name_file = '__var/vgmux_vnf_name'
@@ -123,17 +128,19 @@ class VcpeCommon:
':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-network-topology-operation'
self.sdnc_preload_vnf_url = 'http://' + self.hosts['sdnc'] + \
':' + self.sdnc_preloading_port + '/restconf/operations/VNF-API:preload-vnf-topology-operation'
+ self.sdnc_preload_gra_url = 'http://' + self.hosts['sdnc'] + \
+ ':' + self.sdnc_preloading_port + '/restconf/operations/GENERIC-RESOURCE-API:preload-vf-module-topology-operation'
self.sdnc_ar_cleanup_url = 'http://' + self.hosts['sdnc'] + ':' + self.sdnc_preloading_port + \
'/restconf/config/GENERIC-RESOURCE-API:'
#############################################################################################
# SO urls, note: do NOT add a '/' at the end of the url
- self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/ecomp/mso/infra/serviceInstances/v4',
- 'v5': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/ecomp/mso/infra/serviceInstances/v5'}
- self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/ecomp/mso/infra/orchestrationRequests/v5'
+ self.so_req_api_url = {'v4': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/serviceInstantiation/v7/serviceInstances',
+ 'v5': 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infraserviceInstantiation/v7/serviceInstances'}
+ self.so_check_progress_api_url = 'http://' + self.hosts['so'] + ':' + self.so_nbi_port + '/onap/so/infra/orchestrationRequests/v6'
self.so_userpass = 'InfraPortalClient', 'password1$'
self.so_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
- self.so_db_name = 'mso_catalog'
+ self.so_db_name = 'catalogdb'
self.so_db_user = 'root'
self.so_db_pass = 'password'
self.so_db_port = '30252' if self.oom_mode else '32769'
@@ -310,6 +317,17 @@ class VcpeCommon:
return str(ip)
return None
+ def get_pod_node_oam_ip(self, pod):
+ """
+ :Assuming kubectl is available
+ :param pod: pod name as a string, e.g. 'dev-sdnc-sdnc-0'
+ :return pod's node oam ip (10.0.0.0/16)
+ """
+ cmd = "kubectl -n onap describe pod {0} |grep Node:|cut -d'/' -f2".format(pod)
+ ret = commands.getstatusoutput(cmd)
+ self.logger.debug("cmd = %s, ret = %s", cmd, ret)
+ return ret
+
def get_vm_ip(self, keywords, net_addr=None, net_addr_len=None):
"""
:param keywords: list of keywords to search for vm, e.g. ['bng', 'gmux', 'brg']
diff --git a/version-manifest/pom.xml b/version-manifest/pom.xml
index 48dd9c12a..ab55b5c90 100644
--- a/version-manifest/pom.xml
+++ b/version-manifest/pom.xml
@@ -8,7 +8,7 @@
</parent>
<groupId>org.onap.integration</groupId>
<artifactId>version-manifest</artifactId>
- <version>3.0.0-SNAPSHOT</version>
+ <version>4.0.0-SNAPSHOT</version>
<packaging>maven-plugin</packaging>
<name>ONAP Version Manifest and Maven Plugin</name>
<url>https://www.onap.org</url>
diff --git a/version-manifest/src/main/resources/docker-manifest-staging.csv b/version-manifest/src/main/resources/docker-manifest-staging.csv
index a8febca45..079dc18a2 100644
--- a/version-manifest/src/main/resources/docker-manifest-staging.csv
+++ b/version-manifest/src/main/resources/docker-manifest-staging.csv
@@ -34,6 +34,11 @@ onap/clamp-dashboard-kibana,3.0-STAGING-latest
onap/clamp-dashboard-logstash,3.0-STAGING-latest
onap/cli,2.0.4
onap/data-router,1.3-STAGING-latest
+onap/dcae-be,1.3-STAGING-latest
+onap/dcae-dt,1.2-STAGING-latest
+onap/dcae-fe,1.3-STAGING-latest
+onap/dcae-tools,1.3-STAGING-latest
+onap/dcae-tosca-app,1.3-STAGING-latest
onap/dmaap/buscontroller,1.0.23
onap/dmaap/datarouter-node,latest
onap/dmaap/datarouter-prov,latest
@@ -47,19 +52,19 @@ onap/holmes/rule-management,1.2.3-STAGING-latest
onap/model-loader,1.3-STAGING-latest
onap/msb/msb_apigateway,1.2.2-STAGING-latest
onap/msb/msb_discovery,1.2.2-STAGING-latest
-onap/multicloud/azure,1.2.1-SNAPSHOT
-onap/multicloud/framework,1.2.1-STAGING
-onap/multicloud/openstack-newton,1.2.1-STAGING
-onap/multicloud/openstack-ocata,1.2.1-STAGING
-onap/multicloud/openstack-pike,1.2.1-STAGING
-onap/multicloud/openstack-windriver,1.2.1-STAGING
-onap/multicloud/vio,1.2.1-STAGING
+onap/multicloud/azure,1.2.2-SNAPSHOT
+onap/multicloud/framework,1.2.2-STAGING
+onap/multicloud/openstack-newton,1.2.2-STAGING
+onap/multicloud/openstack-ocata,1.2.2-STAGING
+onap/multicloud/openstack-pike,1.2.2-STAGING
+onap/multicloud/openstack-windriver,1.2.2-STAGING
+onap/multicloud/vio,1.2.2-STAGING
onap/multicloud/vio-vesagent,1.0.0
-onap/music/cassandra_3_11,3.0.23
-onap/music/cassandra_job,3.0.23
+onap/music/cassandra_3_11,3.0.24
+onap/music/cassandra_job,3.0.24
onap/music/cassandra_music,3.0.0
-onap/music/music,3.0.23
-onap/music/prom,1.0.5-latest
+onap/music/music,3.0.24
+onap/music/prom,1.0.5
onap/network-discovery,latest
onap/oom/kube2msb,1.1.0
onap/optf-cmso-dbinit,1.0.1
@@ -85,28 +90,28 @@ onap/org.onap.dcaegen2.platform.policy-handler,4.4.0
onap/org.onap.dcaegen2.platform.servicechange-handler,1.1.5
onap/org.onap.dcaegen2.services.prh.prh-app-server,1.1.1
onap/policy-apex-pdp,2.0-SNAPSHOT-latest
-onap/policy-distribution,2.0.2-SNAPSHOT-latest
+onap/policy-distribution,2.0.3-SNAPSHOT-latest
onap/policy-drools,1.3-SNAPSHOT-latest
onap/policy-pe,1.3-SNAPSHOT-latest
-onap/pomba-aai-context-builder,1.3.2-SNAPSHOT-latest
-onap/pomba-context-aggregator,1.3.4-SNAPSHOT-latest
-onap/pomba-network-discovery-context-builder,1.3.1-SNAPSHOT-latest
-onap/pomba-sdc-context-builder,1.3.2-SNAPSHOT-latest
+onap/pomba-aai-context-builder,1.3.3-SNAPSHOT-latest
+onap/pomba-context-aggregator,1.3.5-SNAPSHOT-latest
+onap/pomba-network-discovery-context-builder,1.3.2-SNAPSHOT-latest
+onap/pomba-sdc-context-builder,1.3.3-SNAPSHOT-latest
onap/portal-app,2.3.1
onap/portal-db,2.3.1
onap/portal-sdk,2.3.1
onap/portal-wms,2.3.1
onap/rproxy,2.1-STAGING-latest
-onap/sdc-backend,1.3-STAGING-latest
-onap/sdc-backend-init,1.3-STAGING-latest
-onap/sdc-cassandra,1.3-STAGING-latest
-onap/sdc-cassandra-init,1.3-STAGING-latest
-onap/sdc-elasticsearch,1.3-STAGING-latest
-onap/sdc-frontend,1.3-STAGING-latest
-onap/sdc-init-elasticsearch,1.3-STAGING-latest
-onap/sdc-kibana,1.3-STAGING-latest
-onap/sdc-onboard-backend,1.3-STAGING-latest
-onap/sdc-onboard-cassandra-init,1.3-STAGING-latest
+onap/sdc-backend,1.4-STAGING-latest
+onap/sdc-backend-init,1.4-STAGING-latest
+onap/sdc-cassandra,1.4-STAGING-latest
+onap/sdc-cassandra-init,1.4-STAGING-latest
+onap/sdc-elasticsearch,1.4-STAGING-latest
+onap/sdc-frontend,1.4-STAGING-latest
+onap/sdc-init-elasticsearch,1.4-STAGING-latest
+onap/sdc-kibana,1.4-STAGING-latest
+onap/sdc-onboard-backend,1.4-STAGING-latest
+onap/sdc-onboard-cassandra-init,1.4-STAGING-latest
onap/sdnc-ansible-server-image,1.4-STAGING-latest
onap/sdnc-dmaap-listener-image,1.4-STAGING-latest
onap/sdnc-image,1.4-STAGING-latest
@@ -127,27 +132,27 @@ onap/sparky-be,1.3-STAGING-latest
onap/spike,1.0-STAGING-latest
onap/testsuite,1.3.1-STAGING-latest
onap/tproxy-config,2.1-STAGING-latest
-onap/usecase-ui,1.2.1-STAGING-latest
-onap/usecase-ui-server,1.2.0-STAGING-latest
+onap/usecase-ui,1.2.2-STAGING-latest
+onap/usecase-ui-server,1.2.1-STAGING-latest
onap/validation,1.3-STAGING-latest
-onap/vfc/catalog,1.2.0-STAGING-latest
-onap/vfc/db,1.2.0-STAGING-latest
-onap/vfc/emsdriver,1.2.0-STAGING-latest
-onap/vfc/gvnfmdriver,1.2.0-STAGING-latest
-onap/vfc/jujudriver,1.2.0-STAGING-latest
-onap/vfc/multivimproxy,1.2.0-STAGING-latest
-onap/vfc/nfvo/svnfm/huawei,1.2.0-STAGING-latest
+onap/vfc/catalog,1.2.1-STAGING-latest
+onap/vfc/db,1.2.1-STAGING-latest
+onap/vfc/emsdriver,1.2.1-STAGING-latest
+onap/vfc/gvnfmdriver,1.2.1-STAGING-latest
+onap/vfc/jujudriver,1.2.1-STAGING-latest
+onap/vfc/multivimproxy,1.2.1-STAGING-latest
+onap/vfc/nfvo/svnfm/huawei,1.2.1-STAGING-latest
onap/vfc/nfvo/svnfm/nokia,1.1.0-STAGING-latest
-onap/vfc/nfvo/svnfm/nokiav2,1.3.0-STAGING-latest
-onap/vfc/nslcm,1.2.0-STAGING-latest
-onap/vfc/resmanagement,1.2.0-STAGING-latest
-onap/vfc/vnflcm,1.2.0-STAGING-latest
-onap/vfc/vnfmgr,1.2.0-STAGING-latest
-onap/vfc/vnfres,1.2.0-STAGING-latest
+onap/vfc/nfvo/svnfm/nokiav2,1.3.1-STAGING-latest
+onap/vfc/nslcm,1.2.1-STAGING-latest
+onap/vfc/resmanagement,1.2.1-STAGING-latest
+onap/vfc/vnflcm,1.2.1-STAGING-latest
+onap/vfc/vnfmgr,1.2.1-STAGING-latest
+onap/vfc/vnfres,1.2.1-STAGING-latest
onap/vfc/wfengine-activiti,1.2.0-STAGING-latest
onap/vfc/wfengine-mgrservice,1.2.0-STAGING-latest
onap/vfc/ztesdncdriver,1.2.0-STAGING-latest
-onap/vfc/ztevnfmdriver,1.2.0
+onap/vfc/ztevnfmdriver,1.2.1-STAGING-latest
onap/vid,3.2-STAGING-latest
onap/vnfsdk/ice,1.1.0
onap/vnfsdk/refrepo,1.2.0
diff --git a/version-manifest/src/main/resources/docker-manifest.csv b/version-manifest/src/main/resources/docker-manifest.csv
index 281c1845c..534105578 100644
--- a/version-manifest/src/main/resources/docker-manifest.csv
+++ b/version-manifest/src/main/resources/docker-manifest.csv
@@ -1,13 +1,13 @@
image,tag
-onap/aaf/aaf_agent,2.1.7
-onap/aaf/aaf_cm,2.1.7
-onap/aaf/aaf_config,2.1.7
-onap/aaf/aaf_fs,2.1.7
-onap/aaf/aaf_gui,2.1.7
-onap/aaf/aaf_hello,2.1.7
-onap/aaf/aaf_locate,2.1.7
-onap/aaf/aaf_oauth,2.1.7
-onap/aaf/aaf_service,2.1.7
+onap/aaf/aaf_agent,2.1.8
+onap/aaf/aaf_cm,2.1.8
+onap/aaf/aaf_config,2.1.8
+onap/aaf/aaf_fs,2.1.8
+onap/aaf/aaf_gui,2.1.8
+onap/aaf/aaf_hello,2.1.8
+onap/aaf/aaf_locate,2.1.8
+onap/aaf/aaf_oauth,2.1.8
+onap/aaf/aaf_service,2.1.8
onap/aaf/abrmd,3.0.0
onap/aaf/distcenter,3.0.0
onap/aaf/sms,3.0.1
@@ -15,25 +15,29 @@ onap/aaf/smsquorumclient,3.0.1
onap/aaf/testcaservice,3.0.0
onap/aai-cacher,1.0.0
onap/aai-graphadmin,1.0.0
-onap/aai-resources,1.3.1
-onap/aai-traversal,1.3.1
+onap/aai-resources,1.3.3
+onap/aai-traversal,1.3.2
onap/aai/esr-gui,1.2.1
onap/aai/esr-server,1.2.1
onap/admportal-sdnc-image,1.4.1
onap/appc-cdt-image,1.4.0
onap/appc-image,1.4.0
-onap/babel,1.3.0
+onap/babel,1.3.2
onap/ccsdk-ansible-server-image,0.3.1
onap/ccsdk-apps-ms-neng,0.3.1
onap/ccsdk-dgbuilder-image,0.3.1
onap/ccsdk-odl-oxygen-image,0.3.1
onap/ccsdk-odlsli-image,0.3.1
onap/champ,1.3.0
-onap/clamp,3.0.2
-onap/clamp-dashboard-kibana,3.0.2
-onap/clamp-dashboard-logstash,3.0.2
+onap/clamp,3.0.3
+onap/clamp-dashboard-kibana,3.0.3
+onap/clamp-dashboard-logstash,3.0.3
onap/cli,2.0.4
onap/data-router,1.3.0
+onap/dcae-be,1.3.0
+onap/dcae-dt,1.2.0
+onap/dcae-fe,1.3.0
+onap/dcae-tosca-app,1.3.0
onap/dmaap/buscontroller,1.0.23
onap/dmaap/datarouter-node,1.0.3
onap/dmaap/datarouter-prov,1.0.3
@@ -43,20 +47,22 @@ onap/externalapi/nbi,3.0.1
onap/gizmo,1.3.0
onap/holmes/engine-management,1.2.2
onap/holmes/rule-management,1.2.3
-onap/model-loader,1.3.0
+onap/model-loader,1.3.1
onap/msb/msb_apigateway,1.2.1
onap/msb/msb_discovery,1.2.1
-onap/multicloud/azure,1.2.0
-onap/multicloud/framework,1.2.0
-onap/multicloud/openstack-newton,1.2.0
-onap/multicloud/openstack-ocata,1.2.0
-onap/multicloud/openstack-pike,1.2.0
-onap/multicloud/openstack-windriver,1.2.0
-onap/multicloud/vio,1.2.0
+onap/multicloud/azure,1.2.1
+onap/multicloud/framework,1.2.1
+onap/multicloud/openstack-newton,1.2.1
+onap/multicloud/openstack-ocata,1.2.1
+onap/multicloud/openstack-pike,1.2.1
+onap/multicloud/openstack-windriver,1.2.1
+onap/multicloud/vio,1.2.1
onap/multicloud/vio-vesagent,1.0.0
-onap/music/cassandra_3_11,3.0.23
-onap/music/cassandra_job,3.0.23
-onap/music/music,3.0.23
+onap/music/cassandra_3_11,3.0.24
+onap/music/cassandra_job,3.0.24
+onap/music/cassandra_music,3.0.0
+onap/music/music,3.0.24
+onap/music/prom,1.0.5
onap/oom/kube2msb,1.1.0
onap/optf-cmso-dbinit,1.0.1
onap/optf-cmso-service,1.0.1
@@ -81,27 +87,27 @@ onap/org.onap.dcaegen2.platform.policy-handler,4.4.0
onap/org.onap.dcaegen2.platform.servicechange-handler,1.1.5
onap/org.onap.dcaegen2.services.prh.prh-app-server,1.1.1
onap/policy-apex-pdp,2.0.3
-onap/policy-distribution,2.0.2
-onap/policy-drools,1.3.3
-onap/policy-pe,1.3.3
-onap/pomba-aai-context-builder,1.3.1
-onap/pomba-context-aggregator,1.3.3
-onap/pomba-network-discovery-context-builder,1.3.0
-onap/pomba-sdc-context-builder,1.3.1
+onap/policy-distribution,2.0.3
+onap/policy-drools,1.3.4
+onap/policy-pe,1.3.4
+onap/pomba-aai-context-builder,1.3.2
+onap/pomba-context-aggregator,1.3.4
+onap/pomba-network-discovery-context-builder,1.3.1
+onap/pomba-sdc-context-builder,1.3.2
onap/portal-app,2.3.1
onap/portal-db,2.3.1
onap/portal-sdk,2.3.1
onap/portal-wms,2.3.1
-onap/sdc-backend,1.3.2
-onap/sdc-backend-init,1.3.2
-onap/sdc-cassandra,1.3.2
-onap/sdc-cassandra-init,1.3.2
-onap/sdc-elasticsearch,1.3.2
-onap/sdc-frontend,1.3.2
-onap/sdc-init-elasticsearch,1.3.2
-onap/sdc-kibana,1.3.2
-onap/sdc-onboard-backend,1.3.2
-onap/sdc-onboard-cassandra-init,1.3.2
+onap/sdc-backend,1.3.3
+onap/sdc-backend-init,1.3.3
+onap/sdc-cassandra,1.3.3
+onap/sdc-cassandra-init,1.3.3
+onap/sdc-elasticsearch,1.3.3
+onap/sdc-frontend,1.3.3
+onap/sdc-init-elasticsearch,1.3.3
+onap/sdc-kibana,1.3.3
+onap/sdc-onboard-backend,1.3.3
+onap/sdc-onboard-cassandra-init,1.3.3
onap/sdnc-ansible-server-image,1.4.1
onap/sdnc-dmaap-listener-image,1.4.1
onap/sdnc-image,1.4.1
@@ -118,27 +124,28 @@ onap/so/sdnc-adapter,1.3.1
onap/so/so-monitoring,1.3.1
onap/so/vfc-adapter,1.3.1
onap/sparky-be,1.3.0
-onap/usecase-ui,1.2.1
-onap/usecase-ui-server,1.2.0
-onap/validation,1.3.0
-onap/vfc/catalog,1.2.0
-onap/vfc/db,1.2.0
-onap/vfc/emsdriver,1.2.0
-onap/vfc/gvnfmdriver,1.2.0
-onap/vfc/jujudriver,1.2.0
-onap/vfc/multivimproxy,1.2.0
-onap/vfc/nfvo/svnfm/huawei,1.2.0
+onap/testsuite,1.3.1
+onap/usecase-ui,1.2.2
+onap/usecase-ui-server,1.2.1
+onap/validation,1.3.1
+onap/vfc/catalog,1.2.1
+onap/vfc/db,1.2.1
+onap/vfc/emsdriver,1.2.1
+onap/vfc/gvnfmdriver,1.2.1
+onap/vfc/jujudriver,1.2.1
+onap/vfc/multivimproxy,1.2.1
+onap/vfc/nfvo/svnfm/huawei,1.2.1
onap/vfc/nfvo/svnfm/nokia,1.1.0
onap/vfc/nfvo/svnfm/nokiav2,1.3.0
-onap/vfc/nslcm,1.2.0
-onap/vfc/resmanagement,1.2.0
-onap/vfc/vnflcm,1.2.0
-onap/vfc/vnfmgr,1.2.0
-onap/vfc/vnfres,1.2.0
+onap/vfc/nslcm,1.2.1
+onap/vfc/resmanagement,1.2.1
+onap/vfc/vnflcm,1.2.1
+onap/vfc/vnfmgr,1.2.1
+onap/vfc/vnfres,1.2.1
onap/vfc/wfengine-activiti,1.2.0
onap/vfc/wfengine-mgrservice,1.2.0
onap/vfc/ztesdncdriver,1.2.0
-onap/vfc/ztevnfmdriver,1.2.0
+onap/vfc/ztevnfmdriver,1.2.1
onap/vid,3.2.0
onap/vnfsdk/ice,1.1.0
onap/vnfsdk/refrepo,1.2.0
@@ -152,6 +159,6 @@ onap/vvp/jenkins,1.0.0
onap/vvp/portal,1.0.0
onap/vvp/postgresql,1.0.0
onap/vvp/test-engine,1.0.0
-onap/workflow-backend,1.3.1
-onap/workflow-frontend,1.3.1
-onap/workflow-init,1.3.1
+onap/workflow-backend,1.3.2
+onap/workflow-frontend,1.3.2
+onap/workflow-init,1.3.2
diff --git a/version-manifest/src/main/resources/java-manifest.csv b/version-manifest/src/main/resources/java-manifest.csv
index deaaaf1f7..5ed838b4d 100644
--- a/version-manifest/src/main/resources/java-manifest.csv
+++ b/version-manifest/src/main/resources/java-manifest.csv
@@ -1,28 +1,28 @@
groupId,artifactId,version
-org.onap.aaf.authz,aaf-auth-batch,2.1.7
-org.onap.aaf.authz,aaf-auth-cass,2.1.7
-org.onap.aaf.authz,aaf-auth-certman,2.1.7
-org.onap.aaf.authz,aaf-auth-client,2.1.7
-org.onap.aaf.authz,aaf-auth-cmd,2.1.7
-org.onap.aaf.authz,aaf-auth-core,2.1.7
-org.onap.aaf.authz,aaf-auth-deforg,2.1.7
-org.onap.aaf.authz,aaf-auth-fs,2.1.7
-org.onap.aaf.authz,aaf-auth-gui,2.1.7
-org.onap.aaf.authz,aaf-auth-hello,2.1.7
-org.onap.aaf.authz,aaf-auth-locate,2.1.7
-org.onap.aaf.authz,aaf-auth-oauth,2.1.7
-org.onap.aaf.authz,aaf-auth-service,2.1.7
-org.onap.aaf.authz,aaf-cadi-aaf,2.1.7
-org.onap.aaf.authz,aaf-cadi-cass,2.1.7
-org.onap.aaf.authz,aaf-cadi-client,2.1.7
-org.onap.aaf.authz,aaf-cadi-core,2.1.7
-org.onap.aaf.authz,aaf-cadi-oauth-enduser,2.1.7
-org.onap.aaf.authz,aaf-cadi-shiro,2.1.2
-org.onap.aaf.authz,aaf-misc-env,2.1.7
-org.onap.aaf.authz,aaf-misc-log4j,2.1.7
-org.onap.aaf.authz,aaf-misc-rosetta,2.1.7
-org.onap.aaf.authz,aaf-misc-xgen,2.1.7
-org.onap.aaf.authz,aaf-shiro-aafrealm-osgi-bundle,2.1.2
+org.onap.aaf.authz,aaf-auth-batch,2.1.8
+org.onap.aaf.authz,aaf-auth-cass,2.1.8
+org.onap.aaf.authz,aaf-auth-certman,2.1.8
+org.onap.aaf.authz,aaf-auth-client,2.1.8
+org.onap.aaf.authz,aaf-auth-cmd,2.1.8
+org.onap.aaf.authz,aaf-auth-core,2.1.8
+org.onap.aaf.authz,aaf-auth-deforg,2.1.8
+org.onap.aaf.authz,aaf-auth-fs,2.1.8
+org.onap.aaf.authz,aaf-auth-gui,2.1.8
+org.onap.aaf.authz,aaf-auth-hello,2.1.8
+org.onap.aaf.authz,aaf-auth-locate,2.1.8
+org.onap.aaf.authz,aaf-auth-oauth,2.1.8
+org.onap.aaf.authz,aaf-auth-service,2.1.8
+org.onap.aaf.authz,aaf-cadi-aaf,2.1.8
+org.onap.aaf.authz,aaf-cadi-cass,2.1.8
+org.onap.aaf.authz,aaf-cadi-client,2.1.8
+org.onap.aaf.authz,aaf-cadi-core,2.1.8
+org.onap.aaf.authz,aaf-cadi-oauth-enduser,2.1.8
+org.onap.aaf.authz,aaf-cadi-shiro,2.1.8
+org.onap.aaf.authz,aaf-misc-env,2.1.8
+org.onap.aaf.authz,aaf-misc-log4j,2.1.8
+org.onap.aaf.authz,aaf-misc-rosetta,2.1.8
+org.onap.aaf.authz,aaf-misc-xgen,2.1.8
+org.onap.aaf.authz,aaf-shiro-aafrealm-osgi-bundle,2.1.8
org.onap.aai,rest-client,1.3.0
org.onap.aai,search-data-service,1.3.1
org.onap.aai.aai-common,aai-annotations,1.3.0
@@ -31,7 +31,7 @@ org.onap.aai.aai-common,aai-common,1.3.0
org.onap.aai.aai-common,aai-core,1.3.0
org.onap.aai.aai-common,aai-schema,1.3.0
org.onap.aai.aai-common,aai-utils,1.3.0
-org.onap.aai.babel,babel,1.3.0
+org.onap.aai.babel,babel,1.3.2
org.onap.aai.cacher,cacher,1.0.0
org.onap.aai.champ,champ,1.3.0
org.onap.aai.data-router,data-router,1.3.0
@@ -53,15 +53,15 @@ org.onap.aai.logging-service,common-logging,1.3.0
org.onap.aai.logging-service,eelf-logging,1.3.0
org.onap.aai.logging-service,logging-api,1.3.0
org.onap.aai.logging-service,logging-service,1.3.0
-org.onap.aai.model-loader,model-loader,1.3.0
-org.onap.aai.resources,aai-resources,1.3.1
-org.onap.aai.resources,resources,1.3.1
+org.onap.aai.model-loader,model-loader,1.3.1
+org.onap.aai.resources,aai-resources,1.3.3
+org.onap.aai.resources,resources,1.3.3
org.onap.aai.router-core,router-core,1.3.0
org.onap.aai.sparky-be,sparky-be,1.3.0
org.onap.aai.sparky-fe,sparky-fe,1.3.0
-org.onap.aai.traversal,aai-traversal,1.3.1
-org.onap.aai.traversal,traversal,1.3.1
-org.onap.aai.validation,validation,1.3.0
+org.onap.aai.traversal,aai-traversal,1.3.2
+org.onap.aai.traversal,traversal,1.3.2
+org.onap.aai.validation,validation,1.3.1
org.onap.appc,appc-aai-client-provider,1.4.0
org.onap.appc,appc-ansible-adapter-bundle,1.4.0
org.onap.appc,appc-artifact-handler-provider,1.4.0
@@ -155,7 +155,7 @@ org.onap.ccsdk.sli.plugins,properties-node-provider,0.3.1
org.onap.ccsdk.sli.plugins,restapi-call-node-provider,0.3.1
org.onap.ccsdk.storage.pgaas,pgaas,1.0.0
org.onap.ccsdk.utils,utils,1.0.0
-org.onap.clamp.clds.clamp,clamp,3.0.2
+org.onap.clamp.clds.clamp,clamp,3.0.3
org.onap.cli,cli-framework,2.0.4
org.onap.cli,cli-main,2.0.4
org.onap.cli,cli-plugins-sample,2.0.4
@@ -216,21 +216,21 @@ org.onap.modeling.toscaparsers,kwalify,1.0.0
org.onap.modeling.toscaparsers.nfvparser,modeling-toscaparsers-nfvparser,1.0.0
org.onap.msb.java-sdk,msb-java-sdk,1.1.1
org.onap.msb.swagger-sdk,swagger-sdk,1.1.0
-org.onap.multicloud.azure,multicloud-azure,1.2.0
-org.onap.multicloud.framework,multivimbroker,1.2.0
-org.onap.multicloud.openstack,newton,1.2.0
-org.onap.multicloud.openstack,ocata,1.2.0
-org.onap.multicloud.openstack,windriver,1.2.0
+org.onap.multicloud.azure,multicloud-azure,1.2.1
+org.onap.multicloud.framework,multivimbroker,1.2.1
+org.onap.multicloud.openstack,newton,1.2.1
+org.onap.multicloud.openstack,ocata,1.2.1
+org.onap.multicloud.openstack,windriver,1.2.1
org.onap.multicloud.openstack.vmware,vesagent,1.0.0
-org.onap.multicloud.openstack.vmware,vio,1.2.0
+org.onap.multicloud.openstack.vmware,vio,1.2.1
org.onap.music,MUSIC,3.0.23
-org.onap.oparent,oparent,1.2.1
+org.onap.oparent,oparent,1.2.2
org.onap.policy.apex-pdp,apex-pdp,2.0.3
org.onap.policy.common,common-modules,1.3.3
-org.onap.policy.distribution,distribution,2.0.2
-org.onap.policy.drools-applications,drools-pdp-apps,1.3.3
-org.onap.policy.drools-pdp,drools-pdp,1.3.3
-org.onap.policy.engine,PolicyEngineSuite,1.3.3
+org.onap.policy.distribution,distribution,2.0.3
+org.onap.policy.drools-applications,drools-pdp-apps,1.3.4
+org.onap.policy.drools-pdp,drools-pdp,1.3.4
+org.onap.policy.engine,PolicyEngineSuite,1.3.4
org.onap.policy.parent,parent,2.0.1
org.onap.portal.sdk,epsdk-analytics,2.4.0
org.onap.portal.sdk,epsdk-app-common,2.4.0
@@ -245,24 +245,23 @@ org.onap.sdc.sdc-tosca,sdc-tosca,1.4.6
org.onap.sdnc.northbound,generic-resource-api.provider,1.4.1
org.onap.sdnc.northbound,vnfapi-provider,1.4.1
org.onap.sdnc.northbound,vnftools-provider,1.4.1
-org.onap.usecase-ui.server,usecase-ui-server,1.2.0
-org.onap.vfc.gvnfm.vnflcm.lcm,vfc-gvnfm-vnflcm-lcm,1.2.0
-org.onap.vfc.gvnfm.vnfmgr.mgr,vfc-gvnfm-vnfmgr-mgr,1.2.0
-org.onap.vfc.gvnfm.vnfres.res,vfc-gvnfm-vnfres-res,1.2.0
-org.onap.vfc.nfvo.catalog,vfc-nfvo-catalog,1.2.0
-org.onap.vfc.nfvo.db,vfc-nfvo-db,1.2.0
-org.onap.vfc.nfvo.driver.ems.ems,emsdriver-standalone,1.2.0
+org.onap.usecase-ui.server,usecase-ui-server,1.2.1
+org.onap.vfc.gvnfm.vnflcm.lcm,vfc-gvnfm-vnflcm-lcm,1.2.1
+org.onap.vfc.gvnfm.vnfmgr.mgr,vfc-gvnfm-vnfmgr-mgr,1.2.1
+org.onap.vfc.gvnfm.vnfres.res,vfc-gvnfm-vnfres-res,1.2.1
+org.onap.vfc.nfvo.catalog,vfc-nfvo-catalog,1.2.1
+org.onap.vfc.nfvo.db,vfc-nfvo-db,1.2.1
+org.onap.vfc.nfvo.driver.ems.ems,emsdriver-standalone,1.2.1
org.onap.vfc.nfvo.driver.sfc.zte.sfc-driver-standalone,vfc-nfvo-driver-sfc-zte-sfc-driver,1.2.0
-org.onap.vfc.nfvo.driver.vnfm.gvnfm.gvnfmadapter,vfc-nfvo-driver-vnfm-gvnfm-gvnfmadapter,1.2.0
+org.onap.vfc.nfvo.driver.vnfm.gvnfm.gvnfmadapter,vfc-nfvo-driver-vnfm-gvnfm-gvnfmadapter,1.2.1
org.onap.vfc.nfvo.driver.vnfm.svnfm,vfcadaptor-deployment,1.1.0
-org.onap.vfc.nfvo.driver.vnfm.svnfm.huawei.vnfmadapter,hw-vnfmadapter-deployment,1.2.0
-org.onap.vfc.nfvo.driver.vnfm.svnfm.nokiav2,driverwar,1.3.0
-org.onap.vfc.nfvo.driver.vnfm.svnfm.zte.vmanager,vfc-nfvo-driver-vnfm-svnfm-zte-vmanager,1.2.0
-org.onap.vfc.nfvo.lcm,vfc-nfvo-lcm,1.2.0
-org.onap.vfc.nfvo.multivimproxy,vfc-nfvo-multivimproxy-deployment,1.2.0
-org.onap.vfc.nfvo.resmanagement,vfc-nfvo-resmanagement-deployment,1.2.0
+org.onap.vfc.nfvo.driver.vnfm.svnfm.huawei.vnfmadapter,hw-vnfmadapter-deployment,1.2.1
+org.onap.vfc.nfvo.driver.vnfm.svnfm.nokiav2,driverwar,1.2.1
+org.onap.vfc.nfvo.driver.vnfm.svnfm.zte.vmanager,vfc-nfvo-driver-vnfm-svnfm-zte-vmanager,1.2.1
+org.onap.vfc.nfvo.lcm,vfc-nfvo-lcm,1.2.1
+org.onap.vfc.nfvo.multivimproxy,vfc-nfvo-multivimproxy-deployment,1.2.1
+org.onap.vfc.nfvo.resmanagement,vfc-nfvo-resmanagement-deployment,1.2.1
org.onap.vfc.nfvo.wfengine,activiti-extension,1.2.0
-org.onap.vfc.nfvo.wfengine,workflow-engine-mgr-service,1.2.0
org.onap.vnfsdk.refrepo,vnf-sdk-marketplace,1.1.2
org.onap.vnfsdk.refrepo,vnf-sdk-marketplace-core-parent,1.2.0
org.onap.vnfsdk.refrepo,vnf-sdk-marketplace-deployment,1.2.0
diff --git a/version.properties b/version.properties
index ddc6dd849..50547ff5c 100644
--- a/version.properties
+++ b/version.properties
@@ -3,7 +3,7 @@
# Note that these variables cannot be structured (e.g. : version.release or version.snapshot etc... )
# because they are used in Jenkins, whose plug-in doesn't support
-major_version=3
+major_version=4
minor_version=0
patch_version=0