aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitattributes3
-rw-r--r--bootstrap/jenkins/vagrant/Vagrantfile1
-rwxr-xr-xbootstrap/jenkins/vagrant/bootstrap.sh8
-rwxr-xr-xbootstrap/jenkins/vagrant/jenkins-init-1.sh2
-rwxr-xr-xbootstrap/jenkins/vagrant/jenkins-init-2.sh2
-rwxr-xr-xbootstrap/jenkins/vagrant/jjb-init.sh7
-rw-r--r--bootstrap/jenkins/vagrant/jjb/java-jobs.yaml6
-rw-r--r--bootstrap/vagrant-onap/README.md11
-rw-r--r--bootstrap/vagrant-onap/Vagrantfile169
-rw-r--r--bootstrap/vagrant-onap/doc/source/install/index.rst16
-rw-r--r--bootstrap/vagrant-onap/doc/source/install/known_issues.rst13
-rwxr-xr-xbootstrap/vagrant-onap/lib/_composed_functions11
-rwxr-xr-xbootstrap/vagrant-onap/lib/_onap_functions6
-rwxr-xr-xbootstrap/vagrant-onap/lib/aai35
-rwxr-xr-xbootstrap/vagrant-onap/lib/appc23
-rwxr-xr-xbootstrap/vagrant-onap/lib/ccsdk32
-rwxr-xr-xbootstrap/vagrant-onap/lib/commons100
-rwxr-xr-xbootstrap/vagrant-onap/lib/config/env-vars79
-rwxr-xr-xbootstrap/vagrant-onap/lib/dcae59
-rw-r--r--bootstrap/vagrant-onap/lib/files/all-in-one585
-rw-r--r--bootstrap/vagrant-onap/lib/files/globals.yml2
-rw-r--r--bootstrap/vagrant-onap/lib/files/kolla-build.conf5
-rw-r--r--bootstrap/vagrant-onap/lib/files/kubectl_config_generator.py40
-rw-r--r--bootstrap/vagrant-onap/lib/files/passwords.yml216
-rwxr-xr-xbootstrap/vagrant-onap/lib/functions223
-rwxr-xr-xbootstrap/vagrant-onap/lib/mr11
-rwxr-xr-xbootstrap/vagrant-onap/lib/msb50
-rwxr-xr-xbootstrap/vagrant-onap/lib/mso32
-rwxr-xr-xbootstrap/vagrant-onap/lib/multicloud47
-rwxr-xr-xbootstrap/vagrant-onap/lib/oom220
-rwxr-xr-xbootstrap/vagrant-onap/lib/openstack71
-rwxr-xr-xbootstrap/vagrant-onap/lib/policy25
-rwxr-xr-xbootstrap/vagrant-onap/lib/portal17
-rwxr-xr-xbootstrap/vagrant-onap/lib/robot38
-rwxr-xr-xbootstrap/vagrant-onap/lib/sdc45
-rwxr-xr-xbootstrap/vagrant-onap/lib/sdnc19
-rwxr-xr-xbootstrap/vagrant-onap/lib/vfc28
-rwxr-xr-xbootstrap/vagrant-onap/lib/vid27
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/lib/vnfsdk23
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/lib/vvp23
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/_test_base8
-rwxr-xr-xbootstrap/vagrant-onap/tests/asserts14
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_aai62
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_appc82
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_ccsdk83
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_dcae116
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_functions25
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_mr6
-rwxr-xr-xbootstrap/vagrant-onap/tests/test_msb61
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_mso50
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_multicloud35
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_policy80
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_portal14
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_robot26
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_sdc39
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_sdnc38
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_vfc38
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_vid20
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_vnfsdk34
-rwxr-xr-x[-rw-r--r--]bootstrap/vagrant-onap/tests/test_vvp34
-rw-r--r--bootstrap/vagrant-onap/tools/Run.ps15
-rwxr-xr-xbootstrap/vagrant-onap/tools/run.sh4
-rwxr-xr-xbootstrap/vagrant-onap/tools/setup_libvirt.sh21
-rwxr-xr-xbootstrap/vagrant-onap/vagrant_utils/postinstall.sh15
-rwxr-xr-xbootstrap/vagrant-onap/vagrant_utils/unit_testing.sh4
-rw-r--r--docs/heat.rst236
-rw-r--r--docs/index.rst11
-rw-r--r--test/csit/plans/aai/resources/docker-compose.yml28
-rw-r--r--test/csit/plans/aai/resources/setup.sh17
-rw-r--r--test/csit/plans/aai/traversal/docker-compose.yml28
-rw-r--r--test/csit/plans/aai/traversal/setup.sh19
-rwxr-xr-xtest/csit/plans/appc/healthcheck/bundle_query.sh3
-rwxr-xr-xtest/csit/plans/appc/healthcheck/setup.sh21
-rw-r--r--test/csit/plans/appc/healthcheck/testplan.txt1
-rw-r--r--test/csit/plans/cli/sanity-check/setup.sh2
-rw-r--r--test/csit/plans/sdnc/healthcheck/setup.sh18
-rw-r--r--test/csit/plans/so/sanity-check/setup.sh6
-rw-r--r--test/csit/plans/so/sanity-check/teardown.sh1
-rw-r--r--test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh12
-rw-r--r--test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/teardown.sh2
-rw-r--r--test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh44
-rw-r--r--test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh4
-rw-r--r--test/csit/plans/vfc-nfvo-wfengine/sanity-check/testplan.txt6
-rw-r--r--test/csit/plans/vvp/sanity/setup.sh36
-rw-r--r--test/csit/plans/vvp/sanity/teardown.sh20
-rw-r--r--test/csit/plans/vvp/sanity/testplan.txt3
-rwxr-xr-xtest/csit/run-csit.sh4
-rw-r--r--test/csit/scripts/nfvo-wfengine/demo.bpmn20.xml11
-rw-r--r--test/csit/scripts/so/chef-config/aai.crt27
-rw-r--r--test/csit/scripts/so/chef-config/mso-docker.json220
-rw-r--r--test/csit/scripts/so/mariadb/conf.d/mariadb1.cnf193
-rw-r--r--test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/01-load-default-sql-files.sh25
-rw-r--r--test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/02-load-additional-changes.sh27
-rw-r--r--test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/automated-tests/create_mso_db-tests.sql49
-rw-r--r--test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql128
-rw-r--r--test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql77
-rw-r--r--test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql59
-rw-r--r--test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mariadb_engine_7.7.3-ee.sql1195
-rw-r--r--test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql25
-rw-r--r--test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql462
-rw-r--r--test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql84
-rw-r--r--test/csit/scripts/vvp/clone_and_setup_vvp_data.sh110
-rw-r--r--test/csit/scripts/vvp/docker_health.sh33
-rw-r--r--test/csit/scripts/vvp/kill_containers_and_remove_dataFolders.sh31
-rw-r--r--test/csit/scripts/vvp/start_vvp_containers.sh93
-rw-r--r--test/csit/scripts/vvp/start_vvp_sanity.sh26
-rw-r--r--test/csit/tests/clamp/APIs/01__TCA.robot11
-rw-r--r--test/csit/tests/dcaegen2/testcases/assets/json_events/dcae_healthcheck.json8
-rw-r--r--test/csit/tests/dcaegen2/testcases/assets/json_events/ves_vfirewall_measurement.json70
-rw-r--r--test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_fault_eventlist_batch.json124
-rw-r--r--test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_single_fault_event.json68
-rw-r--r--test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_single_fault_event_bad.json66
-rw-r--r--test/csit/tests/holmes/testcase/EngineMgt/Engine-Mgt.robot2
-rw-r--r--test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot2
-rw-r--r--test/csit/tests/policy/suite1/configpolicy_vCPE_R1.template7
-rw-r--r--test/csit/tests/policy/suite1/configpolicy_vDNS_R1.template7
-rw-r--r--test/csit/tests/policy/suite1/configpolicy_vFW_R1.template7
-rw-r--r--test/csit/tests/portal/testsuites/news_widget.zipbin0 -> 5125 bytes
-rw-r--r--test/csit/tests/portal/testsuites/test1.robot4
-rw-r--r--test/csit/tests/portal/testsuites/widget_news.zipbin1503954 -> 4593 bytes
-rw-r--r--test/csit/tests/so/sanity-check/data/createE2eservice.json126
-rw-r--r--test/csit/tests/so/sanity-check/data/deleteE2eservice.json30
-rw-r--r--test/csit/tests/so/sanity-check/sanity_test_so.robot2
-rw-r--r--test/csit/tests/vfc/nfvo-driver-svnfm/jsoninput/hwvnfm_createtoken.json24
-rw-r--r--test/csit/tests/vfc/nfvo-driver-svnfm/ztevmanager.robot6
-rw-r--r--test/csit/tests/vfc/nfvo-wfengine/workflow.robot128
-rw-r--r--test/csit/tests/vid/login/test1.robot2
-rw-r--r--test/csit/tests/vnfsdk-pkgtools/tosca-metadata/csar/test_entry.yaml46
-rw-r--r--test/csit/tests/vvp/sanity/__init__.robot2
-rw-r--r--test/csit/tests/vvp/sanity/test1.robot19
-rwxr-xr-xtest/ete/labs/gwu/apt-proxy.sh18
-rw-r--r--test/ete/labs/gwu/onap-oom.env26
-rw-r--r--test/ete/labs/gwu/onap-openrc9
-rw-r--r--test/ete/labs/gwu/onap-openstack-template.env142
-rwxr-xr-xtest/ete/labs/huawei/apt-proxy.sh16
-rw-r--r--test/ete/labs/huawei/k8s_vm_entrypoint.sh196
-rw-r--r--test/ete/labs/huawei/onap-ci-amsterdam-heat-openrc9
-rw-r--r--test/ete/labs/huawei/onap-oom.env21
-rw-r--r--test/ete/labs/huawei/onap-oom.yaml130
-rw-r--r--test/ete/labs/huawei/onap-openstack-template.env142
-rw-r--r--test/ete/labs/huawei/rancher_vm_entrypoint.sh18
-rw-r--r--test/ete/labs/windriver/Integration-Jenkins-openrc.sh7
-rw-r--r--test/ete/labs/windriver/Integration-SB-04-openrc.sh49
-rwxr-xr-xtest/ete/labs/windriver/apt-proxy.sh16
-rw-r--r--test/ete/labs/windriver/onap-openstack-template.env145
-rwxr-xr-xtest/ete/scripts/deploy-onap.sh87
-rwxr-xr-xtest/ete/scripts/dns-zones/delete-dns-zones.sh27
-rwxr-xr-xtest/ete/scripts/get-floating-ip.sh7
-rwxr-xr-xtest/ete/scripts/install_openstack_cli.sh4
-rwxr-xr-xtest/ete/scripts/post-jenkins-results.sh6
-rwxr-xr-xtest/ete/scripts/pull-docker-images.sh22
-rwxr-xr-xtest/ete/scripts/remote/run-robot.sh16
-rwxr-xr-xtest/ete/scripts/run-healthcheck.sh12
-rwxr-xr-xtest/ete/scripts/teardown-onap.sh54
-rw-r--r--test/mocks/sniroemulator/pom.xml2
-rw-r--r--version-manifest/pom.xml38
-rw-r--r--version-manifest/src/main/java/org/onap/integration/versionmanifest/VersionCheckMojo.java6
-rw-r--r--version-manifest/src/main/resources/docker-manifest.csv115
-rw-r--r--version-manifest/src/main/resources/java-manifest.csv234
-rw-r--r--version.properties6
160 files changed, 7403 insertions, 1579 deletions
diff --git a/.gitattributes b/.gitattributes
index bb0dee2a8..4048784af 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,3 +1,6 @@
*.java text eol=lf
*.xml text eol=lf
*.sh text eol=lf
+*.csv text eol=lf
+*.yaml text eol=lf
+*.json text eol=lf
diff --git a/bootstrap/jenkins/vagrant/Vagrantfile b/bootstrap/jenkins/vagrant/Vagrantfile
index 02b3ec18c..7dcb48eff 100644
--- a/bootstrap/jenkins/vagrant/Vagrantfile
+++ b/bootstrap/jenkins/vagrant/Vagrantfile
@@ -49,6 +49,7 @@ Vagrant.configure(2) do |config|
#
# # Customize the amount of memory on the VM:
vb.memory = "8192"
+ vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
end
#
# View the documentation for the provider you are using for more
diff --git a/bootstrap/jenkins/vagrant/bootstrap.sh b/bootstrap/jenkins/vagrant/bootstrap.sh
index fd0caf5c6..25c6a45f6 100755
--- a/bootstrap/jenkins/vagrant/bootstrap.sh
+++ b/bootstrap/jenkins/vagrant/bootstrap.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -x
#
# Copyright 2017 Huawei Technologies Co., Ltd.
#
@@ -39,16 +39,16 @@ apt-get update
apt-get -y install git
git config --global user.email "jenkins@localhost"
git config --global user.name "jenkins"
-apt-get -y install curl openjdk-8-jdk maven unzip
+apt-get -y install curl openjdk-8-jdk-headless maven unzip python-pip
# install Jenkins
wget -q -O - https://pkg.jenkins.io/debian-stable/jenkins.io.key | sudo apt-key add -
sh -c 'echo deb http://pkg.jenkins.io/debian-stable binary/ > /etc/apt/sources.list.d/jenkins.list'
apt-get update
-apt-get -y install jenkins jenkins-job-builder python-pip
+apt-get -y install jenkins
+# install docker
apt-get -y install docker.io
-sudo usermod -aG docker ubuntu
sudo usermod -aG docker jenkins
su -l jenkins -c "/vagrant/jenkins-init-1.sh"
diff --git a/bootstrap/jenkins/vagrant/jenkins-init-1.sh b/bootstrap/jenkins/vagrant/jenkins-init-1.sh
index 1f0a80859..3e83fa118 100755
--- a/bootstrap/jenkins/vagrant/jenkins-init-1.sh
+++ b/bootstrap/jenkins/vagrant/jenkins-init-1.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -x
#
# Copyright 2017 Huawei Technologies Co., Ltd.
#
diff --git a/bootstrap/jenkins/vagrant/jenkins-init-2.sh b/bootstrap/jenkins/vagrant/jenkins-init-2.sh
index a6cddbb10..f255a10ad 100755
--- a/bootstrap/jenkins/vagrant/jenkins-init-2.sh
+++ b/bootstrap/jenkins/vagrant/jenkins-init-2.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -x
#
# Copyright 2017 Huawei Technologies Co., Ltd.
#
diff --git a/bootstrap/jenkins/vagrant/jjb-init.sh b/bootstrap/jenkins/vagrant/jjb-init.sh
index e9adeb63b..10b96b4fa 100755
--- a/bootstrap/jenkins/vagrant/jjb-init.sh
+++ b/bootstrap/jenkins/vagrant/jjb-init.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -x
#
# Copyright 2017 Huawei Technologies Co., Ltd.
#
@@ -17,9 +17,10 @@ git commit -m 'Installed plugins, restarted Jenkins' > /dev/null
mkdir -p ~/.config/jenkins_jobs
cp /vagrant/jenkins_jobs.ini ~/.config/jenkins_jobs
-pip install --user jenkins-job-builder
+pip -v install --user jenkins-job-builder
+pip list
-jenkins-job-builder update -r /vagrant/jjb
+jenkins-jobs update -r /vagrant/jjb
cat > .gitignore <<EOF
jobs/*/builds
diff --git a/bootstrap/jenkins/vagrant/jjb/java-jobs.yaml b/bootstrap/jenkins/vagrant/jjb/java-jobs.yaml
index cb1f94afb..73d320e4a 100644
--- a/bootstrap/jenkins/vagrant/jjb/java-jobs.yaml
+++ b/bootstrap/jenkins/vagrant/jjb/java-jobs.yaml
@@ -304,9 +304,6 @@
repo: 'ncomp/utils'
pom: 'pom.xml'
- 'oom_registrator':
- repo: 'oom'
- pom: 'registrator/pom.xml'
- - 'oom_registrator':
repo: 'oom/registrator'
pom: 'pom.xml'
- 'oparent':
@@ -388,9 +385,6 @@
repo: 'so/libs'
pom: 'pom.xml'
- 'testsuite_heatbridge':
- repo: 'testsuite'
- pom: 'heatbridge/pom.xml'
- - 'testsuite_heatbridge':
repo: 'testsuite/heatbridge'
pom: 'pom.xml'
- 'ui_dmaapbc':
diff --git a/bootstrap/vagrant-onap/README.md b/bootstrap/vagrant-onap/README.md
index b0e1ee26b..8f143adc1 100644
--- a/bootstrap/vagrant-onap/README.md
+++ b/bootstrap/vagrant-onap/README.md
@@ -43,22 +43,27 @@ current options include:
|:----------:|-------------------------------------|
| aai | Active and Available Inventory |
| appc | Application Controller |
+| ccsdk | Common Controller SDK |
| dcae | Data Collection Analytics & Events |
| mr | Message Router |
| mso | Master Service Orchestrator |
+| msb | Microservices Bus Project |
+| multicloud | Multi Cloud |
+| oom | ONAP Operations Manager |
| policy | Policy |
| portal | Portal |
| robot | Robot |
| sdc | Service Design & Creation |
| sdnc | Software Defined Network Controller |
-| vid | Virtual Infrastructure Development |
| vfc | Virtual Function Controller |
-| multicloud | Multi Cloud |
-| ccsdk | Common Controller SDK |
+| vid | Virtual Infrastructure Development |
| vnfsdk | VNF SDK |
| vvp | VNF Validation Program |
+|:----------:|-------------------------------------|
| all_in_one | All ONAP services in a VM |
| testing | Unit Test VM |
+|:----------:|-------------------------------------|
+| openstack | OpenStack Deployment |
#### Generating documentation
diff --git a/bootstrap/vagrant-onap/Vagrantfile b/bootstrap/vagrant-onap/Vagrantfile
index 2490b1c75..31a9970a5 100644
--- a/bootstrap/vagrant-onap/Vagrantfile
+++ b/bootstrap/vagrant-onap/Vagrantfile
@@ -13,7 +13,7 @@ configuration = {
'nexus_password' => 'docker',
'dmaap_topic' => 'AUTO',
'artifacts_version' => '1.0.0',
- 'docker_version' => '1.0-STAGING-latest',
+ 'docker_version' => 'latest',
# Parameters for DCAE instantiation
'dcae_zone' => 'iad4',
'dcae_state' => 'vi',
@@ -23,6 +23,7 @@ configuration = {
'openstack_password' => '',
'odl_version' => '0.5.3-Boron-SR3',
# Parameters for enabling features
+ 'debug' => 'True',
'build_image' => 'True',
'clone_repo' => 'True',
'compile_repo' => 'False',
@@ -33,7 +34,7 @@ configuration = {
box = {
:virtualbox => 'ubuntu/trusty64',
- :libvirt => 'sputnik13/trusty64',
+ :libvirt => 'elastic/ubuntu-14.04-x86_64',
:openstack => nil
}
@@ -50,14 +51,14 @@ nodes = [
},
{
:name => "all-in-one",
- :ips => ['10.252.0.3', "192.168.50.3"],
+ :ips => ['10.252.1.3', "192.168.51.3"],
:macs => [],
:cpus => 2,
:cpu => "50",
:ram => 12 * 1024,
:groups => ["all-in-one"],
:flavor => 'm1.xlarge',
- :args => ['mr', 'sdc', 'aai', 'mso', 'robot', 'vid', 'sdnc', 'portal', 'dcae', 'policy', 'appc', 'vfc', 'ccsdk'],
+ :args => ['mr', 'sdc', 'aai', 'mso', 'robot', 'vid', 'sdnc', 'portal', 'dcae', 'policy', 'appc', 'vfc', 'ccsdk', 'multicloud', 'vnfsdk', 'vpp', 'msb'],
},
{
:name => "appc",
@@ -71,7 +72,7 @@ nodes = [
},
{
:name => "ccsdk",
- :ips => ['10.252.0.14', "192.168.50.17"],
+ :ips => ['10.252.0.19', "192.168.50.19"],
:macs => [],
:cpus => 2,
:cpu => "50",
@@ -112,7 +113,7 @@ nodes = [
},
{
:name => "mso",
- :ips => ['10.252.0.7', "192.168.50.7"],
+ :ips => ['10.252.0.20', "192.168.50.20"],
:macs => [],
:cpus => 2,
:cpu => "50",
@@ -121,6 +122,16 @@ nodes = [
:args => ["mso"],
},
{
+ :name => "msb",
+ :ips => ['10.252.0.7', "192.168.50.7"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ["msb"],
+ },
+ {
:name => "multicloud",
:ips => ['10.252.0.16', "192.168.50.16"],
:macs => [],
@@ -129,6 +140,24 @@ nodes = [
:ram => 4 * 1024,
:groups => ["individual"],
:args => ["multicloud"],
+ :fwds => [
+ { :guest => 9003, :host => 9003, :guest_ip => '192.168.50.16' },
+ ]
+ },
+ {
+ :name => "oom",
+ :ips => ['10.252.0.21', "192.168.50.21"],
+ :macs => [],
+ :cpus => 16,
+ :cpu => "50",
+ :ram => 64 * 1024,
+ :groups => ["individual"],
+ :args => ["oom"],
+ :hd => "60G",
+ :fwds => [
+ { :guest => 8880, :host => 8880, :guest_ip => '192.168.50.21' },
+ { :guest => 8989, :host => 8989, :guest_ip => '192.168.50.21' },
+ ]
},
{
:name => "policy",
@@ -169,6 +198,7 @@ nodes = [
:ram => 8 * 1024,
:groups => ["individual"],
:args => ["sdc"],
+ :hd => "20G",
},
{
:name => "sdnc",
@@ -182,7 +212,7 @@ nodes = [
},
{
:name => "testing",
- :ips => ['10.252.0.3', "192.168.50.3"],
+ :ips => ['10.252.2.3', "192.168.52.3"],
:macs => [],
:cpus => 2,
:cpu => "50",
@@ -212,14 +242,14 @@ nodes = [
:args => ['vid'],
},
{
- :name => "vnfsdk",
- :ips => ['10.252.0.16', "192.168.50.16"],
- :macs => [],
- :cpus => 2,
- :cpu => "50",
- :ram => 4 * 1024,
- :groups => ["individual"],
- :args => ['vnfsdk'],
+ :name => "vnfsdk",
+ :ips => ['10.252.0.18', "192.168.50.18"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 4 * 1024,
+ :groups => ["individual"],
+ :args => ['vnfsdk'],
},
{
:name => "vvp",
@@ -230,6 +260,20 @@ nodes = [
:ram => 4 * 1024,
:groups => ["individual"],
:args => ['vvp'],
+ },
+ {
+ :name => "openstack",
+ :ips => ['10.252.3.3', "192.168.53.3"],
+ :macs => [],
+ :cpus => 2,
+ :cpu => "50",
+ :ram => 8 * 1024,
+ :groups => ["individual"],
+ :args => ['openstack'],
+ :fwds => [
+ { :guest => 80, :host => 8888, :guest_ip => '192.168.53.4' },
+ { :guest => 6080, :host => 6080, :guest_ip => '192.168.53.4' },
+ ]
}
]
@@ -301,15 +345,14 @@ end
Vagrant.configure("2") do |config|
- # PROXY definitions
- if ENV['http_proxy'] != nil and ENV['https_proxy'] != nil and ENV['no_proxy'] != nil
+ # PROXY definitions
+ if ENV['http_proxy'] != nil and ENV['https_proxy'] != nil
if not Vagrant.has_plugin?('vagrant-proxyconf')
system 'vagrant plugin install vagrant-proxyconf'
raise 'vagrant-proxyconf was installed but it requires to execute again'
end
config.proxy.http = ENV['http_proxy']
config.proxy.https = ENV['https_proxy']
- config.proxy.no_proxy = ENV['no_proxy']
configuration['socks_proxy'] = ENV['socks_proxy']
end
@@ -318,11 +361,13 @@ Vagrant.configure("2") do |config|
config.vbguest.auto_update = false
end
+ sync_type = "virtualbox"
if provider == :libvirt
if not Vagrant.has_plugin?('vagrant-libvirt')
system 'vagrant plugin install vagrant-libvirt'
raise 'vagrant-libvirt was installed but it requires to execute again'
end
+ sync_type = "nfs"
end
if provider == :openstack
@@ -336,6 +381,15 @@ Vagrant.configure("2") do |config|
nodes.each do |node|
config.vm.define node[:name] do |nodeconfig|
+ # NO_PROXY definitions
+ if ENV['no_proxy'] != nil
+ if not Vagrant.has_plugin?('vagrant-proxyconf')
+ system 'vagrant plugin install vagrant-proxyconf'
+ raise 'vagrant-proxyconf was installed but it requires to execute again'
+ end
+ config.proxy.no_proxy = node[:ips].join(",") + "," + ENV['no_proxy']
+ end
+
# Common Settings:
nodeconfig.vm.provider "virtualbox" do |vbox|
@@ -346,11 +400,55 @@ Vagrant.configure("2") do |config|
vbox.customize ['modifyvm', :id, "--cpuexecutioncap", node[:cpu]]
vbox.customize ['modifyvm', :id, "--cpus", node[:cpus]]
vbox.customize ["modifyvm", :id, "--memory", node[:ram]]
+
+ # Set Network
+ nodeconfig.vm.network :private_network,
+ :adapter => 2,
+ :name => net_interface,
+ :ip => node[:ips][0]
+
+ nodeconfig.vm.network :private_network,
+ :adapter => 3,
+ :ip => node[:ips][1],
+ :type => :static
+
+ # Set Storage
+ if node.has_key? :hd
+ volume_file = node[:name] + '-vol1-data.vdi'
+ unless File.exist?(volume_file)
+ vbox.customize ['createhd', '--filename', volume_file, '--size', node[:hd]]
+ end
+ vbox.customize ['storageattach', :id, '--storagectl', 'SATAController', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', volume_file]
+ end
end
nodeconfig.vm.provider "libvirt" do |lbox|
lbox.memory = node[:ram]
lbox.nested = true
+ lbox.cpu_mode = 'host-passthrough'
+ lbox.cpus = node[:cpus]
+
+ # Set Network
+ nodeconfig.vm.network :private_network,
+ :ip => node[:ips][0]
+
+ nodeconfig.vm.network :private_network,
+ :ip => node[:ips][1],
+ :type => :static
+
+ # Set Storage
+ if node.has_key? :hd
+ lbox.storage :file, bus: 'sata', device: 'sda', size: node[:hd]
+ end
+ end
+ if node.has_key? :fwds
+ node[:fwds].each do |fwd|
+ nodeconfig.vm.network :forwarded_port,
+ :guest => fwd[:guest],
+ :guest_ip => fwd[:guest_ip],
+ :host => fwd[:host],
+ :host_ip => "0.0.0.0"
+ end
end
nodeconfig.vm.provider :openstack do |obox|
@@ -372,6 +470,12 @@ Vagrant.configure("2") do |config|
end
# Set Box type
+ if "openstack" == node[:name]
+ box = {
+ :virtualbox => 'ubuntu/xenial64',
+ :libvirt => 'elastic/ubuntu-16.04-x86_64'
+ }
+ end
nodeconfig.vm.box = box[provider]
# Set Node name
@@ -379,39 +483,14 @@ Vagrant.configure("2") do |config|
# Set Sync Folder
nodeconfig.vm.synced_folder ".", "/vagrant", disabled: true
- nodeconfig.vm.synced_folder './opt', '/opt/', create: true
- nodeconfig.vm.synced_folder './lib', '/var/onap/', create: true
+ nodeconfig.vm.synced_folder './opt', '/opt/', create: true, type: sync_type
+ nodeconfig.vm.synced_folder './lib', '/var/onap/', create: true, type: sync_type
if !is_windows
nodeconfig.vm.synced_folder '~/.m2', '/root/.m2/', create: true
end
- # Set Network
- nodeconfig.vm.network :private_network,
- :adapter => 2,
- :name => net_interface,
- :ip => node[:ips][0]
-
- nodeconfig.vm.network :private_network,
- :adapter => 3,
- :ip => node[:ips][1],
- :type => :static
-
# Specific settings:
- # Set Storage (For SDC or All-in-one)
- if node[:name].include?("all-in-one") || node[:name].include?("sdc")
- nodeconfig.vm.provider "virtualbox" do |v|
- unless File.exist?(sdc_volume)
- v.customize ['createhd', '--filename', sdc_volume, '--size', 20 * 1024]
- end
- v.customize ['storageattach', :id, '--storagectl', 'SATAController', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', sdc_volume]
- end
-
- nodeconfig.vm.provider "libvirt" do |v|
- v.storage :file, path: sdc_volume, bus: 'sata', device: 'vdb', size: '2G'
- end
- end
-
if node[:name].include? "testing"
nodeconfig.vm.synced_folder './tests', '/var/onap_tests/', create: true
test_suite = ENV.fetch('TEST_SUITE', '*')
diff --git a/bootstrap/vagrant-onap/doc/source/install/index.rst b/bootstrap/vagrant-onap/doc/source/install/index.rst
index 6691b4f0d..62f57e66f 100644
--- a/bootstrap/vagrant-onap/doc/source/install/index.rst
+++ b/bootstrap/vagrant-onap/doc/source/install/index.rst
@@ -15,14 +15,17 @@ Ubuntu 14.04 ("Trusty")
.. code-block:: console
- $ wget -q https://releases.hashicorp.com/vagrant/1.9.7/vagrant_1.9.7_x86_64.deb
- $ sudo dpkg -i vagrant_1.9.7_x86_64.deb
+ $ wget -q https://releases.hashicorp.com/vagrant/2.0.1/vagrant_2.0.1_x86_64.deb
+ $ sudo dpkg -i vagrant_2.0.1_x86_64.deb
+
$ echo "deb http://download.virtualbox.org/virtualbox/debian trusty contrib" >> /etc/apt/sources.list
$ wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
$ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -
$ sudo apt-get update -y
$ sudo apt-get install -y virtualbox-5.1 dkms
+ $ sudo apt install -y nfs-kernel-server
+
.. end
CentOS
@@ -30,12 +33,15 @@ CentOS
.. code-block:: console
- $ wget -q https://releases.hashicorp.com/vagrant/1.9.7/vagrant_1.9.7_x86_64.rpm
- $ sudo yum install vagrant_1.9.7_x86_64.rpm
+ $ wget -q https://releases.hashicorp.com/vagrant/2.0.1/vagrant_2.0.1_x86_64.rpm
+ $ sudo yum install vagrant_2.0.1_x86_64.rpm
+
$ wget -q http://download.virtualbox.org/virtualbox/rpm/rhel/virtualbox.repo -P /etc/yum.repos.d
$ sudo yum --enablerepo=epel install dkms
$ wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | rpm --import -
- $ sudo yum install VirtualBox-5.1
+ $ sudo yum install -y VirtualBox-5.1
+
+ $ sudo yum install -y nfs-utils nfs-utils-lib
.. end
diff --git a/bootstrap/vagrant-onap/doc/source/install/known_issues.rst b/bootstrap/vagrant-onap/doc/source/install/known_issues.rst
index 84d50940b..8db55e72c 100644
--- a/bootstrap/vagrant-onap/doc/source/install/known_issues.rst
+++ b/bootstrap/vagrant-onap/doc/source/install/known_issues.rst
@@ -48,3 +48,16 @@ Remove vagrant-vbguest plugin
C:\> vagrant plugin uninstall vagrant-vbguest
.. end
+
+
+Network configuration in Windows
+--------------------------------
+
+Some Virtual Machines present a problem in their network configuration so to
+make sure the install will work as it should install the virtualbox from the
+cmd window with the following command:
+
+.. code-block:: console
+
+ c:\downloads\VirtualBox-5.1.20-114628-Win.exe -msiparams NETWORKTYPE=NDIS5
+.. end
diff --git a/bootstrap/vagrant-onap/lib/_composed_functions b/bootstrap/vagrant-onap/lib/_composed_functions
index f46499766..9f2d0a1d8 100755
--- a/bootstrap/vagrant-onap/lib/_composed_functions
+++ b/bootstrap/vagrant-onap/lib/_composed_functions
@@ -1,16 +1,5 @@
#!/bin/bash
-# compile_src() - Function that compiles the java source code thru maven
-function compile_src {
- local src_folder=$1
- pushd $src_folder
- if [ -f pom.xml ]; then
- install_maven
- mvn clean install -U -DskipTests=true -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dadditionalparam=-Xdoclint:none
- fi
- popd
-}
-
# build_docker_image() - Build Docker container image from source code
function build_docker_image {
local src_folder=$1
diff --git a/bootstrap/vagrant-onap/lib/_onap_functions b/bootstrap/vagrant-onap/lib/_onap_functions
index cedd6f0a3..960b298ef 100755
--- a/bootstrap/vagrant-onap/lib/_onap_functions
+++ b/bootstrap/vagrant-onap/lib/_onap_functions
@@ -15,7 +15,7 @@ function create_configuration_files {
# docker_openecomp_login() - Login to OpenECOMP Docker Hub
function docker_openecomp_login {
install_docker
- docker login -u $nexus_username -p $nexus_password $nexus_docker_repo
+ docker login -u ${nexus_username:-docker} -p ${nexus_password:-docker} ${nexus_docker_repo:-nexus3.onap.org:10001}
}
# pull_openecomp_image() - Pull Docker container image from a Docker Registry Hub
@@ -23,7 +23,7 @@ function pull_openecomp_image {
local image=$1
local tag=$2
docker_openecomp_login
- pull_docker_image $nexus_docker_repo/openecomp/${image}:${docker_version-latest} $tag
+ pull_docker_image ${nexus_docker_repo:-nexus3.onap.org:10001}/openecomp/${image}:${docker_version-latest} $tag
docker logout
}
@@ -32,7 +32,7 @@ function pull_onap_image {
local image=$1
local tag=$2
docker_openecomp_login
- pull_docker_image $nexus_docker_repo/onap/${image}:${docker_version-latest} $tag
+ pull_docker_image ${nexus_docker_repo:-nexus3.onap.org:10001}/onap/${image}:${docker_version-latest} $tag
docker logout
}
diff --git a/bootstrap/vagrant-onap/lib/aai b/bootstrap/vagrant-onap/lib/aai
index d267e020d..7a68fbbf4 100755
--- a/bootstrap/vagrant-onap/lib/aai
+++ b/bootstrap/vagrant-onap/lib/aai
@@ -1,16 +1,8 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
hbase_version=1.2.3
-aai_src_folder=$git_src_folder/aai
-aai_repos=("aai/aai-common" "aai/aai-config" "aai/aai-data" "aai/aai-service" \
-"aai/babel" "aai/champ" "aai/data-router" "aai/esr-gui" "aai/esr-server" \
-"aai/gizmo" "aai/logging-service" "aai/model-loader" "aai/resources" \
-"aai/rest-client" "aai/router-core" "aai/search-data-service" \
-"aai/test-config" "aai/traversal" "aai/sparky-fe" "aai/sparky-be")
# install_hadoop() - Function that installs Hadoop
function install_hadoop {
@@ -53,18 +45,12 @@ function install_haproxy {
service haproxy restart
}
-# clone_all_aai_repos() - Function that clones AAI source repo.
-function clone_all_aai_repos {
- for repo in ${aai_repos[@]}; do
- clone_repo $repo $aai_src_folder${repo#*aai}
- done
-}
# compile_aai_repos() - Function that compiles AAI source repo.
function compile_aai_repos {
- local repos=("aai/aai-common" "aai/resources" "aai/logging-service" "aai/traversal")
+ local repos="aai/aai-common aai/resources aai/logging-service aai/traversal"
if [[ "$compile_repo" == "True" ]]; then
- repos=("${aai_repos[@]}")
+ repos="${repos[aai]}"
fi
for repo in ${repos[@]}; do
@@ -103,17 +89,12 @@ function start_aai_microservices {
done
}
-# _pull_hbase_image() - Pull HBase container image from a Docker Registry Hub
-function _pull_hbase_image {
- docker_openecomp_login
- docker pull $nexus_docker_repo/aaidocker/aai-hbase-${hbase_version}
-}
-
# install_hbase() - Install HBase Service
function install_hbase {
docker rm -f hbase
- _pull_hbase_image
- docker run -d --net=host --name="hbase" $nexus_docker_repo/aaidocker/aai-hbase-${hbase_version}
+ docker_openecomp_login
+ docker pull $nexus_docker_repo/aaidocker/aai-hbase-${hbase_version}
+ run_docker_image -d --net=host --name="hbase" $nexus_docker_repo/aaidocker/aai-hbase-${hbase_version}
}
# install_ajsc() - Install ASJC Java service container
@@ -131,7 +112,7 @@ EOL
pull_openecomp_image ajsc-aai
fi
- docker run --env-file /etc/ajsc-aai.conf --name=aai-service --net=host -v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt -it -d $nexus_docker_repo/openecomp/ajsc-aai:$docker_version
+ run_docker_image --env-file /etc/ajsc-aai.conf --name=aai-service --net=host -v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt -it -d $nexus_docker_repo/openecomp/ajsc-aai:$docker_version
}
# install_model_loader() Install Model Loader
@@ -160,7 +141,7 @@ EOL
pull_openecomp_image model-loader
ARGS+="--name=model-loader-service -it -d --env-file /etc/model-loader.conf $nexus_docker_repo/openecomp/model-loader:$docker_version"
fi
- docker run ${ARGS}
+ run_docker_image ${ARGS}
}
# _wait_for_sdc() - Function that determines if SDC is up and running
@@ -184,7 +165,7 @@ function init_aai {
install_haproxy
if [[ "$clone_repo" == "True" ]]; then
- clone_all_aai_repos
+ clone_repos "aai"
if [[ "$compile_repo" == "True" ]]; then
compile_aai_repos
fi
diff --git a/bootstrap/vagrant-onap/lib/appc b/bootstrap/vagrant-onap/lib/appc
index 4d93f54e5..ad01ca53e 100755
--- a/bootstrap/vagrant-onap/lib/appc
+++ b/bootstrap/vagrant-onap/lib/appc
@@ -1,27 +1,8 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/sdnc
source /var/onap/functions
-appc_src_folder=$git_src_folder/appc
-appc_repos=("appc" "appc/deployment")
-
-# clone_all_appc_repos() - Function that clones APPC source repo.
-function clone_all_appc_repos {
- for repo in ${appc_repos[@]}; do
- clone_repo $repo $appc_src_folder${repo#*appc}
- done
-}
-
-# compile_all_appc_repos() - Function that compiles APPC source repo.
-function compile_all_appc_repos {
- for repo in ${appc_repos[@]}; do
- compile_src $appc_src_folder${repo#*appc}
- done
-}
-
# _build_appc_images() - Function that creates APPC images from source code.
function _build_appc_images {
get_sdnc_images
@@ -49,9 +30,9 @@ function install_appc {
# init_appc() - Function that initialize APPC services
function init_appc {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_appc_repos
+ clone_repos "appc"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_appc_repos
+ compile_repos "appc"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/ccsdk b/bootstrap/vagrant-onap/lib/ccsdk
index 03987ea7e..112839c69 100755
--- a/bootstrap/vagrant-onap/lib/ccsdk
+++ b/bootstrap/vagrant-onap/lib/ccsdk
@@ -1,35 +1,10 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-ccsdk_src_folder=$git_src_folder/ccsdk
-ccsdk_repos=("ccsdk" "ccsdk/dashboard" "ccsdk/distribution" "ccsdk/parent"
-"ccsdk/platform/blueprints" "ccsdk/platform/nbapi" "ccsdk/platform/plugins"
-"ccsdk/sli" "ccsdk/sli/adaptors" "ccsdk/sli/core" "ccsdk/sli/northbound"
-"ccsdk/sli/plugins" "ccsdk/storage" "ccsdk/storage/esaas" "ccsdk/storage/pgaas"
-"ccsdk/utils")
-
-# clone_ccsdk_repos() - Function that clones the CCSDK repositories
-function clone_ccsdk_repos {
- for repo in ${ccsdk_repos[@]}; do
- clone_repo $repo $ccsdk_src_folder${repo#ccsdk}
- done
-}
-
-# compile_ccsdk_repos() -
-function compile_ccsdk_repos {
- install_package unzip
- for repo in ${ccsdk_repos[@]}; do
- compile_src $ccsdk_src_folder${repo#ccsdk}
- done
-}
-
# _build_ccsdk_images() - Build CCSDK Docker images from source code
function _build_ccsdk_images {
install_package unzip
- clone_repo ccsdk/distribution $ccsdk_src_folder/distribution
compile_src $ccsdk_src_folder/distribution
for image in ubuntu opendaylight odlsli dgbuilder-docker; do
build_docker_image $ccsdk_src_folder/distribution/$image docker
@@ -50,9 +25,12 @@ function get_ccsdk_images {
# init_ccsdk() - Function that initialize Multi Cloud services
function init_ccsdk {
if [[ "$clone_repo" == "True" ]]; then
- clone_ccsdk_repos
+ clone_repos "ccsdk"
if [[ "$compile_repo" == "True" ]]; then
- compile_ccsdk_repos
+ compile_repos "ccsdk"
fi
fi
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_ccsdk_images
+ fi
}
diff --git a/bootstrap/vagrant-onap/lib/commons b/bootstrap/vagrant-onap/lib/commons
index 26e2cc26a..5d0c69108 100755
--- a/bootstrap/vagrant-onap/lib/commons
+++ b/bootstrap/vagrant-onap/lib/commons
@@ -1,26 +1,27 @@
#!/bin/bash
-set -o xtrace
-
# update_repos() - Function that updates linux repositories
function update_repos {
+ echo "Updating repositories list..."
if [ -f /var/onap/files/sources.list ]; then
cp /var/onap/files/sources.list /etc/apt/sources.list
fi
- if [ -f /var/onap/files/proxyrc ]; then
- source /var/onap/files/proxyrc
- cp /var/onap/files/proxyrc /etc/profile.d/proxy.sh
-
- if [ -f /etc/apt/apt.conf ]; then
- echo "Acquire::http::Proxy \"${http_proxy}\";" >> /etc/apt/apt.conf
- echo "Acquire::https::Proxy \"${https_proxy}\";" >> /etc/apt/apt.conf
- fi
- if [ -d /etc/apt/apt.conf.d ] & [ ! -f /etc/apt/apt.conf.d/70proxy.conf ]; then
- echo "Acquire::http::Proxy \"${http_proxy}\";" >> /etc/apt/apt.conf.d/70proxy.conf
- echo "Acquire::https::Proxy \"${https_proxy}\";" >> /etc/apt/apt.conf.d/70proxy.conf
- fi
- fi
- apt-get update -qq -y
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ zypper -n ref
+ ;;
+ ubuntu|debian)
+ if [[ "$debug" == "False" ]]; then
+ apt-get update > /dev/null
+ else
+ apt-get update
+ fi
+ ;;
+ rhel|centos|fedora)
+ yum updateinfo
+ ;;
+ esac
}
# is_package_installed() - Function to tell if a package is installed
@@ -28,35 +29,88 @@ function is_package_installed {
if [[ -z "$@" ]]; then
return 1
fi
- dpkg -s "$@" > /dev/null 2> /dev/null
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ dpkg -s "$@" > /dev/null
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
}
# install_packages() - Install a list of packages
function install_packages {
local package=$@
- update_repos
- apt-get install -y -qq $package
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get install -y -qq $package
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
}
# install_package() - Install specific package if doesn't exist
function install_package {
local package=$1
+
if ! is_package_installed $package; then
- update_repos
- apt-get install -y -qq $package
+ echo "Installing $package..."
+
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ zypper install -y $package
+ ;;
+ ubuntu|debian)
+ if [[ "$debug" == "False" ]]; then
+ apt-get install -y -qq -o=Dpkg::Use-Pty=0 $package
+ else
+ apt-get install -y $package
+ fi
+ ;;
+ rhel|centos|fedora)
+ PKG_MANAGER=$(which dnf || which yum)
+ ${PKG_MANAGER} -y install $package
+ ;;
+ esac
fi
}
# uninstall_packages() - Uninstall a list of packages
function uninstall_packages {
local packages=$@
- apt-get purge -y -qq $packages
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get purge -y -qq $packages
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
}
# uninstall_package() - Uninstall specific package if exists
function uninstall_package {
local package=$1
if is_package_installed $package; then
- apt-get purge -y -qq $package
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get purge -y -qq $package
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
fi
}
diff --git a/bootstrap/vagrant-onap/lib/config/env-vars b/bootstrap/vagrant-onap/lib/config/env-vars
new file mode 100755
index 000000000..a55557ae7
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/config/env-vars
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+# Source code destination folder
+git_src_folder=/opt
+
+declare -A src_folders
+src_folders=(
+["aai"]="$git_src_folder/aai"
+["appc"]="$git_src_folder/appc"
+["ccsdk"]="$git_src_folder/ccsdk"
+["dcae"]="$git_src_folder/dcae"
+["mr"]="$git_src_folder/dcae/message-router"
+["msb"]="$git_src_folder/msb"
+["mso"]="$git_src_folder/mso"
+["multicloud"]="$git_src_folder/multicloud"
+["oom"]="$git_src_folder/oom"
+["policy"]="$git_src_folder/policy"
+["portal"]="$git_src_folder/portal"
+["robot"]="$git_src_folder/testsuite"
+["sdc"]="$git_src_folder/sdc"
+["sdnc"]="$git_src_folder/openecomp/sdnc"
+["vfc"]="$git_src_folder/vfc"
+["vid"]="$git_src_folder/vid"
+["vnfsdk"]="$git_src_folder/vnfsdk"
+["vvp"]="$git_src_folder/vvp"
+)
+
+# Repositories list
+declare -A repos
+repos=(
+["aai"]="aai/aai-common aai/aai-config aai/aai-data aai/aai-service \
+aai/babel aai/champ aai/data-router aai/esr-gui aai/esr-server aai/gizmo \
+aai/logging-service aai/model-loader aai/resources aai/rest-client \
+aai/router-core aai/search-data-service aai/test-config aai/traversal \
+aai/sparky-fe aai/sparky-be"
+["appc"]="appc appc/deployment"
+["ccsdk"]="ccsdk ccsdk/dashboard ccsdk/distribution ccsdk/parent \
+ccsdk/platform/blueprints ccsdk/platform/nbapi \
+ccsdk/platform/plugins ccsdk/sli ccsdk/sli/adaptors ccsdk/sli/core \
+ccsdk/sli/northbound ccsdk/sli/plugins ccsdk/storage \
+ccsdk/storage/esaas ccsdk/storage/pgaas ccsdk/utils"
+["dcae"]="dcae dcae/apod dcae/apod/analytics dcae/apod/buildtools \
+dcae/apod/cdap dcae/collectors dcae/collectors/ves dcae/controller \
+dcae/controller/analytics dcae/dcae-inventory dcae/demo \
+dcae/demo/startup dcae/demo/startup/aaf dcae/demo/startup/controller \
+dcae/demo/startup/message-router dcae/dmaapbc dcae/operation \
+dcae/operation/utils dcae/orch-dispatcher dcae/pgaas dcae/utils \
+dcae/utils/buildtools"
+["msb"]="msb/apigateway msb/discovery msb/java-sdk msb/swagger-sdk"
+["mso"]="mso mso/chef-repo mso/docker-config mso/libs mso/mso-config"
+["multicloud"]="multicloud multicloud/framework multicloud/openstack \
+multicloud/openstack/vmware multicloud/openstack/windriver \
+multicloud/azure"
+["oom"]="oom oom/registrator"
+["policy"]="policy/api policy/common policy/docker \
+policy/drools-applications policy/drools-pdp policy/engine \
+policy/gui policy/pap policy/pdp"
+["portal"]="portal portal/sdk ecompsdkos ui/dmaapbc"
+["robot"]="testsuite testsuite/heatbridge testsuite/properties \
+testsuite/python-testing-utils"
+["sdc"]="sdc sdc/jtosca sdc/sdc-distribution-client \
+sdc/sdc-docker-base sdc/sdc-titan-cassandra sdc/sdc-tosca \
+sdc/sdc-vnfdesign sdc/sdc-workflow-designer sdc/sdc_common"
+["sdnc"]="sdnc/adaptors sdnc/architecture sdnc/core sdnc/features \
+sdnc/northbound sdnc/oam sdnc/parent sdnc/plugins"
+["vfc"]="vfc/gvnfm vfc/gvnfm/vnflcm vfc/gvnfm/vnfmgr \
+vfc/gvnfm/vnfres vfc/nfvo vfc/nfvo/catalog vfc/nfvo/driver \
+vfc/nfvo/driver/ems vfc/nfvo/driver/sfc vfc/nfvo/driver/vnfm \
+vfc/nfvo/driver/vnfm/gvnfm vfc/nfvo/driver/vnfm/svnfm vfc/nfvo/lcm \
+vfc/nfvo/resmanagement vfc/nfvo/wfengine"
+["vid"]="vid vid/asdcclient"
+["vnfsdk"]="vnfsdk/compliance vnfsdk/functest vnfsdk/lctest \
+vnfsdk/model vnfsdk/pkgtools vnfsdk/refrepo vnfsdk/validation"
+["vvp"]="vvp/ansible-ice-bootstrap vvp/cms vvp/devkit \
+vvp/documentation vvp/engagementmgr vvp/gitlab vvp/image-scanner \
+vvp/jenkins vvp/portal vvp/postgresql vvp/test-engine \
+vvp/validation-scripts"
+)
+
diff --git a/bootstrap/vagrant-onap/lib/dcae b/bootstrap/vagrant-onap/lib/dcae
index 78ca8de06..d6fdd89b2 100755
--- a/bootstrap/vagrant-onap/lib/dcae
+++ b/bootstrap/vagrant-onap/lib/dcae
@@ -1,17 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-dcae_src_folder=$git_src_folder/dcae
-dcae_repos=("dcae" "dcae/apod" "dcae/apod/analytics" "dcae/apod/buildtools"
-"dcae/apod/cdap" "dcae/collectors" "dcae/collectors/ves" "dcae/controller"
-"dcae/controller/analytics" "dcae/dcae-inventory" "dcae/demo"
-"dcae/demo/startup" "dcae/demo/startup/aaf" "dcae/demo/startup/controller"
-"dcae/demo/startup/message-router" "dcae/dmaapbc" "dcae/operation"
-"dcae/operation/utils" "dcae/orch-dispatcher" "dcae/pgaas" "dcae/utils" "dcae/utils/buildtools")
-
# _create_config_file() - Creates a configuration yaml file for the controller
function _create_config_file {
cat > $dcae_src_folder/controller/config.yaml << EOL
@@ -41,50 +31,13 @@ GIT-MR-REPO: http://gerrit.onap.org/r/dcae/demo/startup/message-router.git
EOL
}
-# _build_docker_image() - Function that clones, compiles and build a Docker image
-function _build_docker_image {
- local src_folder=$1
- local name=$2
- install_docker
-
- pushd $src_folder
- # NOTE: Workaround for dmmapbc images
- sed -i '/LocalKey/d' Dockerfile
- local docker_build="docker build -t $name -f ./Dockerfile ."
- if [ $http_proxy ]; then
- docker_build+=" --build-arg http_proxy=$http_proxy"
- fi
- if [ $https_proxy ]; then
- docker_build+=" --build-arg https_proxy=$https_proxy"
- fi
- eval $docker_build
- popd
-}
-
-# clone_all_dcae_repos() - Function that clones DCAE source repo.
-function clone_all_dcae_repos {
- for repo in ${dcae_repos[@]}; do
- clone_repo $repo $dcae_src_folder${repo#*dcae}
- done
-}
-
-# compile_all_dcae_repos() - Function that compiles DCAE source repo.
-function compile_all_dcae_repos {
- for repo in ${dcae_repos[@]}; do
- compile_src $dcae_src_folder${repo#*dcae}
- done
-}
-
# _build_dcae_images() Function that builds DCAE docker images from source code.
function _build_dcae_images {
if [[ "$compile_repo" != "True" ]]; then
- compile_src $dcae_src_folder/dmaapbc
- compile_src $dcae_src_folder/orch-dispatcher
- compile_src $dcae_src_folder/demo
- compile_src $dcae_src_folder/dcae-inventory
+ compile_repos "dcae"
fi
- _build_docker_image $dcae_src_folder/dmaapbc openecomp/dcae-dmaapbc
- _build_docker_image $dcae_src_folder/orch-dispatcher dcae/orch-dispatcher
+ build_docker_image $dcae_src_folder/dmaapbc openecomp/dcae-dmaapbc
+ build_docker_image $dcae_src_folder/orch-dispatcher dcae/orch-dispatcher
pushd $dcae_src_folder/demo
bash dcae-demo-controller/src/main/docker-build/build.sh
@@ -118,15 +71,15 @@ function install_dcae {
make up
fi
popd
- # docker run -p 8080:8080 -d -v <some local directory>/config.yml:/opt/config.yml --name dcae-inventory <docker registry>/dcae-inventory:<version>
+ # run_docker_image -p 8080:8080 -d -v <some local directory>/config.yml:/opt/config.yml --name dcae-inventory <docker registry>/dcae-inventory:<version>
}
# init_dcae() - Function that initialize DCAE Controller services
function init_dcae {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_dcae_repos
+ clone_repos "dcae"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_dcae_repos
+ compile_repos "dcae"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/files/all-in-one b/bootstrap/vagrant-onap/lib/files/all-in-one
new file mode 100644
index 000000000..efdb2bfce
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/files/all-in-one
@@ -0,0 +1,585 @@
+# These initial groups are the only groups required to be modified. The
+# additional groups are for more control of the environment.
+[control]
+localhost ansible_connection=local
+
+[network]
+localhost ansible_connection=local
+
+[compute]
+localhost ansible_connection=local
+
+[storage]
+localhost ansible_connection=local
+
+[monitoring]
+localhost ansible_connection=local
+
+[deployment]
+localhost ansible_connection=local
+
+# You can explicitly specify which hosts run each project by updating the
+# groups in the sections below. Common services are grouped together.
+[chrony-server:children]
+haproxy
+
+[chrony:children]
+network
+compute
+storage
+monitoring
+
+[collectd:children]
+compute
+
+[baremetal:children]
+control
+
+[grafana:children]
+monitoring
+
+[etcd:children]
+control
+compute
+
+[karbor:children]
+control
+
+[kibana:children]
+control
+
+[telegraf:children]
+compute
+control
+monitoring
+network
+storage
+
+[elasticsearch:children]
+control
+
+[haproxy:children]
+network
+
+[hyperv]
+#hyperv_host
+
+[hyperv:vars]
+#ansible_user=user
+#ansible_password=password
+#ansible_port=5986
+#ansible_connection=winrm
+#ansible_winrm_server_cert_validation=ignore
+
+[mariadb:children]
+control
+
+[rabbitmq:children]
+control
+
+[outward-rabbitmq:children]
+control
+
+[qdrouterd:children]
+control
+
+[mongodb:children]
+control
+
+[keystone:children]
+control
+
+[glance:children]
+control
+
+[nova:children]
+control
+
+[neutron:children]
+network
+
+[openvswitch:children]
+network
+compute
+manila-share
+
+[opendaylight:children]
+network
+
+[cinder:children]
+control
+
+[cloudkitty:children]
+control
+
+[freezer:children]
+control
+
+[memcached:children]
+control
+
+[horizon:children]
+control
+
+[swift:children]
+control
+
+[barbican:children]
+control
+
+[heat:children]
+control
+
+[murano:children]
+control
+
+[ceph:children]
+control
+
+[ironic:children]
+control
+
+[influxdb:children]
+monitoring
+
+[magnum:children]
+control
+
+[sahara:children]
+control
+
+[solum:children]
+control
+
+[mistral:children]
+control
+
+[manila:children]
+control
+
+[panko:children]
+control
+
+[gnocchi:children]
+control
+
+[ceilometer:children]
+control
+
+[aodh:children]
+control
+
+[congress:children]
+control
+
+[tacker:children]
+control
+
+# Tempest
+[tempest:children]
+control
+
+[senlin:children]
+control
+
+[vmtp:children]
+control
+
+[trove:children]
+control
+
+[watcher:children]
+control
+
+[rally:children]
+control
+
+[searchlight:children]
+control
+
+[octavia:children]
+control
+
+[designate:children]
+control
+
+[placement:children]
+control
+
+[bifrost:children]
+deployment
+
+[zun:children]
+control
+
+[skydive:children]
+monitoring
+
+[redis:children]
+control
+
+# Additional control implemented here. These groups allow you to control which
+# services run on which hosts at a per-service level.
+#
+# Word of caution: Some services are required to run on the same host to
+# function appropriately. For example, neutron-metadata-agent must run on the
+# same host as the l3-agent and (depending on configuration) the dhcp-agent.
+
+# Glance
+[glance-api:children]
+glance
+
+[glance-registry:children]
+glance
+
+# Nova
+[nova-api:children]
+nova
+
+[nova-conductor:children]
+nova
+
+[nova-consoleauth:children]
+nova
+
+[nova-novncproxy:children]
+nova
+
+[nova-scheduler:children]
+nova
+
+[nova-spicehtml5proxy:children]
+nova
+
+[nova-compute-ironic:children]
+nova
+
+[nova-serialproxy:children]
+nova
+
+# Neutron
+[neutron-server:children]
+control
+
+[neutron-dhcp-agent:children]
+neutron
+
+[neutron-l3-agent:children]
+neutron
+
+[neutron-lbaas-agent:children]
+neutron
+
+[neutron-metadata-agent:children]
+neutron
+
+[neutron-vpnaas-agent:children]
+neutron
+
+[neutron-bgp-dragent:children]
+neutron
+
+# Ceph
+[ceph-mon:children]
+ceph
+
+[ceph-rgw:children]
+ceph
+
+[ceph-osd:children]
+storage
+
+# Cinder
+[cinder-api:children]
+cinder
+
+[cinder-backup:children]
+storage
+
+[cinder-scheduler:children]
+cinder
+
+[cinder-volume:children]
+storage
+
+# Cloudkitty
+[cloudkitty-api:children]
+cloudkitty
+
+[cloudkitty-processor:children]
+cloudkitty
+
+# Freezer
+[freezer-api:children]
+freezer
+
+# iSCSI
+[iscsid:children]
+compute
+storage
+ironic-conductor
+
+[tgtd:children]
+storage
+
+# Karbor
+[karbor-api:children]
+karbor
+
+[karbor-protection:children]
+karbor
+
+[karbor-operationengine:children]
+karbor
+
+# Manila
+[manila-api:children]
+manila
+
+[manila-scheduler:children]
+manila
+
+[manila-share:children]
+network
+
+[manila-data:children]
+manila
+
+# Swift
+[swift-proxy-server:children]
+swift
+
+[swift-account-server:children]
+storage
+
+[swift-container-server:children]
+storage
+
+[swift-object-server:children]
+storage
+
+# Barbican
+[barbican-api:children]
+barbican
+
+[barbican-keystone-listener:children]
+barbican
+
+[barbican-worker:children]
+barbican
+
+# Trove
+[trove-api:children]
+trove
+
+[trove-conductor:children]
+trove
+
+[trove-taskmanager:children]
+trove
+
+# Heat
+[heat-api:children]
+heat
+
+[heat-api-cfn:children]
+heat
+
+[heat-engine:children]
+heat
+
+# Murano
+[murano-api:children]
+murano
+
+[murano-engine:children]
+murano
+
+# Ironic
+[ironic-api:children]
+ironic
+
+[ironic-conductor:children]
+ironic
+
+[ironic-inspector:children]
+ironic
+
+[ironic-pxe:children]
+ironic
+
+# Magnum
+[magnum-api:children]
+magnum
+
+[magnum-conductor:children]
+magnum
+
+# Solum
+[solum-api:children]
+solum
+
+[solum-worker:children]
+solum
+
+[solum-deployer:children]
+solum
+
+[solum-conductor:children]
+solum
+
+# Mistral
+[mistral-api:children]
+mistral
+
+[mistral-executor:children]
+mistral
+
+[mistral-engine:children]
+mistral
+
+# Aodh
+[aodh-api:children]
+aodh
+
+[aodh-evaluator:children]
+aodh
+
+[aodh-listener:children]
+aodh
+
+[aodh-notifier:children]
+aodh
+
+# Panko
+[panko-api:children]
+panko
+
+# Gnocchi
+[gnocchi-api:children]
+gnocchi
+
+[gnocchi-statsd:children]
+gnocchi
+
+[gnocchi-metricd:children]
+gnocchi
+
+# Sahara
+[sahara-api:children]
+sahara
+
+[sahara-engine:children]
+sahara
+
+# Ceilometer
+[ceilometer-api:children]
+ceilometer
+
+[ceilometer-central:children]
+ceilometer
+
+[ceilometer-notification:children]
+ceilometer
+
+[ceilometer-collector:children]
+ceilometer
+
+[ceilometer-compute:children]
+compute
+
+# Congress
+[congress-api:children]
+congress
+
+[congress-datasource:children]
+congress
+
+[congress-policy-engine:children]
+congress
+
+# Multipathd
+[multipathd:children]
+compute
+
+# Watcher
+[watcher-api:children]
+watcher
+
+[watcher-engine:children]
+watcher
+
+[watcher-applier:children]
+watcher
+
+# Senlin
+[senlin-api:children]
+senlin
+
+[senlin-engine:children]
+senlin
+
+# Searchlight
+[searchlight-api:children]
+searchlight
+
+[searchlight-listener:children]
+searchlight
+
+# Octavia
+[octavia-api:children]
+octavia
+
+[octavia-health-manager:children]
+octavia
+
+[octavia-housekeeping:children]
+octavia
+
+[octavia-worker:children]
+octavia
+
+# Designate
+[designate-api:children]
+designate
+
+[designate-central:children]
+designate
+
+[designate-mdns:children]
+network
+
+[designate-worker:children]
+designate
+
+[designate-sink:children]
+designate
+
+[designate-backend-bind9:children]
+designate
+
+# Placement
+[placement-api:children]
+placement
+
+# Zun
+[zun-api:children]
+zun
+
+[zun-compute:children]
+compute
+
+# Skydive
+[skydive-analyzer:children]
+skydive
+
+[skydive-agent:children]
+compute
+network
+
+# Tacker
+[tacker-server:children]
+tacker
+
+[tacker-conductor:children]
+tacker
diff --git a/bootstrap/vagrant-onap/lib/files/globals.yml b/bootstrap/vagrant-onap/lib/files/globals.yml
new file mode 100644
index 000000000..d10cc3d83
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/files/globals.yml
@@ -0,0 +1,2 @@
+---
+openstack_release: "master"
diff --git a/bootstrap/vagrant-onap/lib/files/kolla-build.conf b/bootstrap/vagrant-onap/lib/files/kolla-build.conf
new file mode 100644
index 000000000..8dd14e6c6
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/files/kolla-build.conf
@@ -0,0 +1,5 @@
+[DEFAULT]
+base = ubuntu
+profile = main
+
+[profiles]
diff --git a/bootstrap/vagrant-onap/lib/files/kubectl_config_generator.py b/bootstrap/vagrant-onap/lib/files/kubectl_config_generator.py
new file mode 100644
index 000000000..6b5a6e9f6
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/files/kubectl_config_generator.py
@@ -0,0 +1,40 @@
+import requests
+import os
+import base64
+
+RANCHER_URL = str(os.environ['RANCHER_URL'])
+RANCHER_ENVIRONMENT_ID = str(os.environ['RANCHER_ENVIRONMENT'])
+data = requests.post(RANCHER_URL + '/v1/projects/' + RANCHER_ENVIRONMENT_ID + '/apikeys',
+ {"accountId": RANCHER_ENVIRONMENT_ID,
+ "description": "ONAP on Kubernetes",
+ "name": "ONAP on Kubernetes",
+ "publicValue": "string",
+ "secretValue": "password"})
+json_dct = data.json()
+access_key = json_dct['publicValue']
+secret_key = json_dct['secretValue']
+auth_header = 'Basic ' + base64.b64encode(access_key + ':' + secret_key)
+token = "\"" + str(base64.b64encode(auth_header)) + "\""
+dct = \
+"""
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "{}/r/projects/{}/kubernetes:6443"
+ name: "onap_on_kubernetes"
+contexts:
+- context:
+ cluster: "onap_on_kubernetes"
+ user: "onap_on_kubernetes"
+ name: "onap_on_kubernetes"
+current-context: "onap_on_kubernetes"
+users:
+- name: "onap_on_kubernetes"
+ user:
+ token: {}
+""".format(RANCHER_URL, RANCHER_ENVIRONMENT_ID, token)
+with open("config", "w") as file:
+ file.write(dct)
diff --git a/bootstrap/vagrant-onap/lib/files/passwords.yml b/bootstrap/vagrant-onap/lib/files/passwords.yml
new file mode 100644
index 000000000..f376e31f0
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/files/passwords.yml
@@ -0,0 +1,216 @@
+---
+###################
+# Ceph options
+####################
+# These options must be UUID4 values in string format
+# XXXXXXXX-XXXX-4XXX-XXXX-XXXXXXXXXXXX
+ceph_cluster_fsid:
+ceph_rgw_keystone_password:
+# for backward compatible consideration, rbd_secret_uuid is only used for nova,
+# cinder_rbd_secret_uuid is used for cinder
+rbd_secret_uuid:
+cinder_rbd_secret_uuid:
+
+###################
+# Database options
+####################
+database_password:
+
+####################
+# Docker options
+####################
+# This should only be set if you require a password for your Docker registry
+docker_registry_password:
+
+######################
+# OpenDaylight options
+######################
+opendaylight_password:
+
+####################
+# OpenStack options
+####################
+aodh_database_password:
+aodh_keystone_password:
+
+barbican_database_password:
+barbican_keystone_password:
+barbican_p11_password:
+barbican_crypto_key:
+
+keystone_admin_password:
+keystone_database_password:
+
+grafana_database_password:
+grafana_admin_password:
+
+glance_database_password:
+glance_keystone_password:
+
+gnocchi_database_password:
+gnocchi_keystone_password:
+
+karbor_database_password:
+karbor_keystone_password:
+karbor_openstack_infra_id:
+
+kuryr_keystone_password:
+
+nova_database_password:
+nova_api_database_password:
+nova_keystone_password:
+
+placement_keystone_password:
+
+neutron_database_password:
+neutron_keystone_password:
+metadata_secret:
+
+cinder_database_password:
+cinder_keystone_password:
+
+cloudkitty_database_password:
+cloudkitty_keystone_password:
+
+panko_database_password:
+panko_keystone_password:
+
+freezer_database_password:
+freezer_keystone_password:
+
+sahara_database_password:
+sahara_keystone_password:
+
+designate_database_password:
+designate_pool_manager_database_password:
+designate_keystone_password:
+# This option must be UUID4 value in string format
+designate_pool_id:
+# This option must be HMAC-MD5 value in string format
+designate_rndc_key:
+
+swift_keystone_password:
+swift_hash_path_suffix:
+swift_hash_path_prefix:
+
+heat_database_password:
+heat_keystone_password:
+heat_domain_admin_password:
+
+murano_database_password:
+murano_keystone_password:
+murano_agent_rabbitmq_password:
+
+ironic_database_password:
+ironic_keystone_password:
+
+ironic_inspector_database_password:
+ironic_inspector_keystone_password:
+
+magnum_database_password:
+magnum_keystone_password:
+
+mistral_database_password:
+mistral_keystone_password:
+
+trove_database_password:
+trove_keystone_password:
+
+ceilometer_database_password:
+ceilometer_keystone_password:
+
+watcher_database_password:
+watcher_keystone_password:
+
+congress_database_password:
+congress_keystone_password:
+
+rally_database_password:
+
+senlin_database_password:
+senlin_keystone_password:
+
+solum_database_password:
+solum_keystone_password:
+
+horizon_secret_key:
+horizon_database_password:
+
+telemetry_secret_key:
+
+manila_database_password:
+manila_keystone_password:
+
+octavia_database_password:
+octavia_keystone_password:
+octavia_ca_password:
+
+searchlight_keystone_password:
+
+tacker_database_password:
+tacker_keystone_password:
+
+zun_database_password:
+zun_keystone_password:
+
+memcache_secret_key:
+
+#HMAC secret key
+osprofiler_secret:
+
+nova_ssh_key:
+ private_key:
+ public_key:
+
+kolla_ssh_key:
+ private_key:
+ public_key:
+
+keystone_ssh_key:
+ private_key:
+ public_key:
+
+bifrost_ssh_key:
+ private_key:
+ public_key:
+
+####################
+# Gnocchi options
+####################
+gnocchi_project_id:
+gnocchi_resource_id:
+gnocchi_user_id:
+
+####################
+# Qdrouterd options
+####################
+qdrouterd_password:
+
+####################
+# RabbitMQ options
+####################
+rabbitmq_password:
+rabbitmq_cluster_cookie:
+outward_rabbitmq_password:
+outward_rabbitmq_cluster_cookie:
+
+####################
+# HAProxy options
+####################
+haproxy_password:
+keepalived_password:
+
+####################
+# Kibana options
+####################
+kibana_password:
+
+####################
+# etcd options
+####################
+etcd_cluster_token:
+
+####################
+# redis options
+####################
+redis_master_password:
diff --git a/bootstrap/vagrant-onap/lib/functions b/bootstrap/vagrant-onap/lib/functions
index 02111fa2c..c2c6d76a5 100755
--- a/bootstrap/vagrant-onap/lib/functions
+++ b/bootstrap/vagrant-onap/lib/functions
@@ -1,16 +1,15 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/commons
+source /var/onap/config/env-vars
source /var/onap/_composed_functions
source /var/onap/_onap_functions
export MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' |sort -n | head -1)
-export IP_ADDRESS=$(ifconfig eth0 | grep "inet addr" | tr -s ' ' | cut -d' ' -f3 | cut -d':' -f2)
+export NIC=$(ip route get 8.8.8.8 | awk '{ print $5; exit }')
+export IP_ADDRESS=$(ifconfig $NIC | grep "inet addr" | tr -s ' ' | cut -d' ' -f3 | cut -d':' -f2)
mvn_conf_file=/root/.m2/settings.xml
-git_src_folder=/opt
# configure_dns() - DNS/GW IP address configuration
function configure_dns {
@@ -18,6 +17,14 @@ function configure_dns {
resolvconf -u
}
+# get_next_ip() - Function that provides the next ip
+function get_next_ip {
+ local ip=${1:-$IP_ADDRESS}
+ ip_hex=$(printf '%.2X%.2X%.2X%.2X\n' `echo $ip | sed -e 's/\./ /g'`)
+ next_ip_hex=$(printf %.8X `echo $(( 0x$ip_hex + 1 ))`)
+ echo $(printf '%d.%d.%d.%d\n' `echo $next_ip_hex | sed -r 's/(..)/0x\1 /g'`)
+}
+
# _git_timed() - git can sometimes get itself infinitely stuck with transient network
# errors or other issues with the remote end. This wraps git in a
# timeout/retry loop and is intended to watch over non-local git
@@ -44,11 +51,15 @@ function _git_timed {
# clone_repo() - Clone Git repository into specific folder
function clone_repo {
- local repo_url=https://git.onap.org/
+ local repo_url=${3:-"https://git.onap.org/"}
local repo=$1
local dest_folder=${2:-$git_src_folder/$repo}
if [ ! -d $dest_folder ]; then
- _git_timed clone ${repo_url}${repo} $dest_folder
+ if [[ "$debug" == "False" ]]; then
+ _git_timed clone --quiet ${repo_url}${repo} $dest_folder
+ else
+ _git_timed clone ${repo_url}${repo} $dest_folder
+ fi
else
pushd $dest_folder
_git_timed pull
@@ -56,9 +67,14 @@ function clone_repo {
fi
}
-# install_dev_tools() - Install basic dependencies
-function install_dev_tools {
- install_packages apt-transport-https ca-certificates curl
+# clone_repos() - Function that clones source repositories for a given project
+function clone_repos {
+ local project=$1
+ local repo_name=${2:-$project}
+
+ for repo in ${repos[$project]}; do
+ clone_repo $repo ${src_folders[$project]}${repo#*$repo_name}
+ done
}
# _install_bind() - Install bind utils
@@ -71,8 +87,18 @@ function install_java {
if is_package_installed openjdk-8-jdk; then
return
fi
- install_package software-properties-common
- add-apt-repository -y ppa:openjdk-r/ppa
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ install_package software-properties-common
+ add-apt-repository -y ppa:openjdk-r/ppa
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ update_repos
# Remove Java 7
uninstall_packages default-jre openjdk-7-jdk openjdk-7-jre openjdk-7-jre-headless
@@ -88,8 +114,18 @@ function install_maven {
return
fi
install_java
- install_package software-properties-common
- add-apt-repository -y ppa:andrei-pozolotin/maven3
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ install_package software-properties-common
+ add-apt-repository -y ppa:andrei-pozolotin/maven3
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ update_repos
install_package maven3
# Remove Java 7
@@ -103,8 +139,9 @@ function _configure_docker_settings {
local docker_conf_backup=/tmp/docker.backup
local docker_conf=/etc/default/docker
local chameleonsocks_filename=chameleonsocks.sh
+ local max_concurrent_downloads=${1:-3}
- cp $docker_conf $docker_conf_backup
+ cp ${docker_conf} ${docker_conf_backup}
if [ $http_proxy ]; then
echo "export http_proxy=$http_proxy" >> $docker_conf
fi
@@ -119,13 +156,25 @@ function _configure_docker_settings {
port=$(echo $socks_proxy | sed -e "s/^.*://")
PROXY=$socks PORT=$port ./$chameleonsocks_filename --install
rm $chameleonsocks_filename
- cp $docker_conf_backup $docker_conf
+ cp ${docker_conf_backup} ${docker_conf}
fi
fi
- rm $docker_conf_backup
-
- echo "DOCKER_OPTS=\"-H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock\"" >> $docker_conf
- usermod -a -G docker vagrant
+ rm ${docker_conf_backup}
+
+ echo "DOCKER_OPTS=\"-H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --max-concurrent-downloads $max_concurrent_downloads \"" >> $docker_conf
+ usermod -aG docker $USER
+
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ service docker restart
+ sleep 10
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
}
# install_nodejs() - Download and install NodeJS
@@ -148,34 +197,52 @@ function install_python {
# _install_pip() - Install Python Package Manager
function _install_pip {
install_python
- if [ ! -f /usr/local/bin/pip ]; then
+ if ! which pip; then
curl -sL https://bootstrap.pypa.io/get-pip.py | python
fi
}
-# install_python_package() - Install a python module
+# install_python_package() - Install python modules
function install_python_package {
- local python_package=$1
+ local python_packages=$@
+
+ _install_pip
+ pip install $python_packages
+}
+
+# install_python_requirements() - Install a list of python modules defined in requirement.txt file
+function install_python_requirements {
+ local python_project_path=$1
_install_pip
- pip install $python_package
+ pushd $python_project_path
+ pip install -r requirements.txt
+ popd
}
# install_docker() - Download and install docker-engine
function install_docker {
- if is_package_installed docker-ce; then
+ if $(docker version &>/dev/null); then
return
fi
- install_package software-properties-common
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- add-apt-repository \
- "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
- $(lsb_release -cs) \
- stable"
+ source /etc/os-release || source /usr/lib/os-release
+ case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ install_packages software-properties-common linux-image-extra-$(uname -r) linux-image-extra-virtual apt-transport-https ca-certificates curl
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) stable"
+ ;;
+ rhel|centos|fedora)
+ ;;
+ esac
+ update_repos
+
install_package docker-ce
_configure_docker_settings
- service docker restart
- sleep 10
}
# pull_docker_image() - Pull Docker container image from the Public Docker Registry Hub
@@ -189,6 +256,12 @@ function pull_docker_image {
fi
}
+# run_docker_image() - Starts a Docker instance
+function run_docker_image {
+ install_docker
+ docker run $@
+}
+
# install_docker_compose() - Download and install docker-engine
function install_docker_compose {
local docker_compose_version=${1:-1.12.0}
@@ -204,8 +277,8 @@ function _install_ODL {
if [ ! -d /opt/opendaylight/current ]; then
mkdir -p /opt/opendaylight/
wget "https://nexus.opendaylight.org/content/repositories/public/org/opendaylight/integration/distribution-karaf/"$odl_version"/distribution-karaf-"$odl_version".tar.gz" -P /opt/
- tar xvf "/opt/distribution-karaf-"$odl_version".tar.gz" -C /opt/
- mv "/opt/distribution-karaf-"$odl_version /opt/opendaylight/current
+ tar xvf "/opt/distribution-karaf-"$odl_version".tar.gz" -C /tmp/
+ mv "/tmp/distribution-karaf-"$odl_version /opt/opendaylight/current
rm -rf "/opt/distribution-karaf-"$odl_version".tar.gz"
fi
}
@@ -225,43 +298,87 @@ function start_ODL {
function compile_src {
local src_folder=$1
pushd $src_folder
+ local mvn_build='mvn clean install -DskipTests=true -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dadditionalparam=-Xdoclint:none'
+ if [[ "$debug" == "False" ]]; then
+ mvn_build+=" -q"
+ fi
if [ -f pom.xml ]; then
install_maven
- mvn clean install -DskipTests=true -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -Dadditionalparam=-Xdoclint:none
+ echo "Compiling $src_folder folder..."
+ eval $mvn_build
fi
popd
}
+# compile_repos() - Function that compiles source repositories for a given project
+function compile_repos {
+ local project=$1
+
+ for repo in ${repos[$project]}; do
+ compile_src ${src_folders[$project]}${repo#*$project}
+ done
+}
+
# build_docker_image() - Build Docker container image from source code
function build_docker_image {
local src_folder=$1
local profile=$2
- install_maven
install_docker
pushd $src_folder
- # Cleanup external repo
- sed -i 's|${docker.push.registry}/||g' pom.xml
- local mvn_docker="mvn clean package docker:build"
- if [ $profile ]; then
- mvn_docker+=" -P $profile"
- fi
- if [ $http_proxy ]; then
- if ! grep -ql "docker.buildArg.http_proxy" pom.xml ; then
- mvn_docker+=" -Ddocker.buildArg.http_proxy=$http_proxy"
+ if [ -f pom.xml ]; then
+ install_maven
+ # Cleanup external repo
+ sed -i 's|${docker.push.registry}/||g' pom.xml
+ local docker_build="mvn clean package docker:build"
+ if [ $profile ]; then
+ docker_build+=" -P $profile"
fi
+ if [ $http_proxy ]; then
+ if ! grep -ql "docker.buildArg.http_proxy" pom.xml ; then
+ docker_build+=" -Ddocker.buildArg.http_proxy=$http_proxy"
+ fi
if ! grep -ql "docker.buildArg.HTTP_PROXY" pom.xml ; then
- mvn_docker+=" -Ddocker.buildArg.HTTP_PROXY=$http_proxy"
+ docker_build+=" -Ddocker.buildArg.HTTP_PROXY=$http_proxy"
fi
- fi
- if [ $https_proxy ]; then
- if ! grep -ql "docker.buildArg.https_proxy" pom.xml ; then
- mvn_docker+=" -Ddocker.buildArg.https_proxy=$https_proxy"
fi
- if ! grep -ql "docker.buildArg.HTTPS_PROXY" pom.xml ; then
- mvn_docker+=" -Ddocker.buildArg.HTTPS_PROXY=$https_proxy"
+ if [ $https_proxy ]; then
+ if ! grep -ql "docker.buildArg.https_proxy" pom.xml ; then
+ docker_build+=" -Ddocker.buildArg.https_proxy=$https_proxy"
+ fi
+ if ! grep -ql "docker.buildArg.HTTPS_PROXY" pom.xml ; then
+ docker_build+=" -Ddocker.buildArg.HTTPS_PROXY=$https_proxy"
+ fi
+ fi
+ elif [ -f Dockerfile ]; then
+ # NOTE: Workaround for dmmapbc images
+ sed -i '/LocalKey/d' Dockerfile
+ sed -i "s/nexus3.onap.org\:10003\///g" Dockerfile
+ local docker_build="docker build -t $profile -f ./Dockerfile ."
+ if [ $http_proxy ]; then
+ docker_build+=" --build-arg http_proxy=$http_proxy"
+ docker_build+=" --build-arg HTTP_PROXY=$http_proxy"
+ fi
+ if [ $https_proxy ]; then
+ docker_build+=" --build-arg https_proxy=$https_proxy"
+ docker_build+=" --build-arg HTTPS_PROXY=$https_proxy"
fi
fi
- eval $mvn_docker
+ echo $docker_build
+ eval $docker_build
popd
}
+
+# mount_external_partition() - Create partition and mount the external volume
+function mount_external_partition {
+ local dev_name="/dev/$1"
+ local mount_dir=$2
+
+ sfdisk $dev_name << EOF
+;
+EOF
+ mkfs -t ext4 ${dev_name}1
+ mkdir -p $mount_dir
+ mount ${dev_name}1 $mount_dir
+ echo "${dev_name}1 $mount_dir ext4 errors=remount-ro,noatime,barrier=0 0 1" >> /etc/fstab
+}
diff --git a/bootstrap/vagrant-onap/lib/mr b/bootstrap/vagrant-onap/lib/mr
index f221817fa..499f53f6a 100755
--- a/bootstrap/vagrant-onap/lib/mr
+++ b/bootstrap/vagrant-onap/lib/mr
@@ -1,16 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-mr_src_folder=$git_src_folder/dcae/message-router
-
-# clone_mr_repos() - Function that clones the Message Router repositories
-function clone_mr_repos {
- clone_repo dcae/demo/startup/message-router $mr_src_folder
-}
-
# get_mr_images() - Function that retrieves the Message Router Docker images
function get_mr_images {
pull_docker_image attos/dmaap
@@ -29,7 +20,7 @@ function install_message_router {
# init_mr() - Function that initialize Message Router services
function init_mr {
if [[ "$clone_repo" == "True" ]]; then
- clone_mr_repos
+ clone_repo dcae/demo/startup/message-router $mr_src_folder
fi
if [[ "$skip_get_images" == "False" ]]; then
get_mr_images
diff --git a/bootstrap/vagrant-onap/lib/msb b/bootstrap/vagrant-onap/lib/msb
new file mode 100755
index 000000000..a14e8ea6c
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/msb
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+# _build_msb_images() - Function that creates Microservices Docker images from source code
+function _build_msb_images {
+ if [[ "$compile_repo" != "True" ]]; then
+ compile_repos "msb"
+ fi
+
+ build_docker_image $msb_src_folder/apigateway/distributions/msb-apigateway/src/main/basedocker onap/msb/msb_base
+ build_docker_image $msb_src_folder/apigateway/distributions/msb-apigateway/src/main/docker onap/msb/msb_apigateway
+ build_docker_image $msb_src_folder/discovery/distributions/msb-discovery/src/main/docker onap/msb/msb_discovery
+}
+
+# get_msb_images() - Function that retrieves the Microservices Bus images
+function get_msb_images {
+ pull_docker_image "consul:0.9.3"
+ if [[ "$build_image" == "True" ]]; then
+ _build_msb_images
+ else
+ unset docker_version
+ pull_onap_image msb/msb_base
+ pull_onap_image msb/msb_apigateway
+ pull_onap_image msb/msb_discovery
+ fi
+}
+
+# install_msb() - Downloads and configure Microservices Bus source code
+function install_msb {
+ run_docker_image -d --net=host --name msb_consul consul:0.9.3
+ run_docker_image -d --net=host --name msb_discovery nexus3.onap.org:10001/onap/msb/msb_discovery
+ run_docker_image -d --net=host -e "ROUTE_LABELS=visualRange:1" --name msb_internal_apigateway nexus3.onap.org:10001/onap/msb/msb_apigateway
+}
+
+# init_msb() - Function that initialize Message Router services
+function init_msb {
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "msb"
+ if [[ "$compile_repo" == "True" ]]; then
+ compile_repos "msb"
+ fi
+ fi
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_msb_images
+ if [[ "$skip_install" == "False" ]]; then
+ install_msb
+ fi
+ fi
+}
diff --git a/bootstrap/vagrant-onap/lib/mso b/bootstrap/vagrant-onap/lib/mso
index 5439d991b..44d4a7b04 100755
--- a/bootstrap/vagrant-onap/lib/mso
+++ b/bootstrap/vagrant-onap/lib/mso
@@ -1,33 +1,13 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-src_folder=$git_src_folder/mso
-mso_repos=("mso" "mso/chef-repo" "mso/docker-config" "mso/libs"
-"mso/mso-config")
-
-# clone_all_mso_repos() - Function that clones MSO source repo.
-function clone_all_mso_repos {
- for repo in ${mso_repos[@]}; do
- clone_repo $repo $src_folder${repo#*mso}
- done
-}
-
-# compile_all_mso_repos() - Function that compiles MSO source repo.
-function compile_all_mso_repos {
- for repo in ${mso_repos[@]}; do
- compile_src $src_folder${repo#*mso}
- done
-}
-
# get_mso_images() - Function that retrieves or create MSO Docker images
function get_mso_images {
if [[ "$build_image" == "True" ]]; then
export GIT_NO_PROJECT=/opt/
- compile_src $src_folder
- build_docker_image $src_folder/packages/docker docker
+ compile_src $mso_src_folder
+ build_docker_image $mso_src_folder/packages/docker docker
fi
}
@@ -82,10 +62,10 @@ EOF
export MSO_CONFIG_UPDATES
export MSO_DOCKER_IMAGE_VERSION=$docker_version
- is_package_installed docker-ce || install_docker
+ install_docker
install_docker_compose
# Deploy the environment
- pushd $src_folder/docker-config
+ pushd $mso_src_folder/docker-config
chmod +x deploy.sh
if [[ "$build_image" == "True" ]]; then
bash deploy.sh
@@ -99,9 +79,9 @@ EOF
# init_mso() - Function that initialize MSO services
function init_mso {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_mso_repos
+ clone_repos "mso"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_mso_repos
+ compile_repos "mso"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/multicloud b/bootstrap/vagrant-onap/lib/multicloud
index b4a185aaf..d2b09f2aa 100755
--- a/bootstrap/vagrant-onap/lib/multicloud
+++ b/bootstrap/vagrant-onap/lib/multicloud
@@ -1,44 +1,47 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-multicloud_src_folder=$git_src_folder/multicloud
-multicloud_repos=("multicloud" "multicloud/framework" "multicloud/openstack" \
-"multicloud/openstack/vmware" "multicloud/openstack/windriver" \
-"multicloud/azure")
-
-# clone_multicloud_repos() - Function that clones the Multi Cloud repositories
-function clone_multicloud_repos {
- for repo in ${multicloud_repos[@]}; do
- clone_repo $repo $multicloud_src_folder${repo#*multicloud}
- done
-}
+openstack_release="newton"
-# compile_multicloud_repos() -
-function compile_multicloud_repos {
- for repo in ${multicloud_repos[@]}; do
- compile_src $multicloud_src_folder${repo#*multicloud}
- done
+# _build_multicloud_images() - Function that builds docker images from source code
+function _build_multicloud_images {
+ install_docker
+ pushd ${src_folders[multicloud]}/openstack/$openstack_release
+ install_python_requirements .
+ python setup.py develop
+ #bash build_image.sh
+ popd
}
# get_multicloud_images() -
function get_multicloud_images {
- echo "pass"
+ if [[ "$build_image" == "True" ]]; then
+ _build_multicloud_images
+ else
+ pull_onap_image multicloud/openstack-$openstack_release
+ fi
}
# install_multicloud() -
function install_multicloud {
- echo "pass"
+ #pushd ${src_folders[multicloud]}/openstack/$openstack_release
+ #/opt/docker/docker-compose up -d
+ #popd
+ if [[ "$build_image" == "True" ]]; then
+ multicloud-api --port 9003 --host 0.0.0.0 &
+ else
+ docker_id=`docker images | grep onap/multicloud/openstack-$openstack_release | grep latest | awk '{print $3; exit}'`
+ docker run -d -p 0.0.0.0:9003:9003 $docker_id
+ fi
}
# init_multicloud() - Function that initialize Multi Cloud services
function init_multicloud {
if [[ "$clone_repo" == "True" ]]; then
- clone_multicloud_repos
+ clone_repos "multicloud"
if [[ "$compile_repo" == "True" ]]; then
- compile_multicloud_repos
+ compile_repos "multicloud"
fi
fi
if [[ "$skip_get_images" == "False" ]]; then
diff --git a/bootstrap/vagrant-onap/lib/oom b/bootstrap/vagrant-onap/lib/oom
new file mode 100755
index 000000000..ef7e5ede5
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/oom
@@ -0,0 +1,220 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+RANCHER_PORT=8880
+oom_delay=30
+export RANCHER_URL=http://localhost:$RANCHER_PORT
+export RANCHER_ACCESS_KEY='access_key'
+export RANCHER_SECRET_KEY='secret_key'
+
+# _install_docker() - Function that installs Docker version for Rancher
+function _install_docker {
+ if ! $(docker version &>/dev/null); then
+ curl https://releases.rancher.com/install-docker/1.12.sh | sh
+ _configure_docker_settings 15
+ fi
+}
+
+# _pull_rancher_images() - Function that retrieves Rancher images required for k8s
+function _pull_rancher_images {
+ for image in "net:v0.13.5" "k8s:v1.8.5-rancher3" \
+"lb-service-rancher:v0.7.17" "network-manager:v0.7.18" "metadata:v0.9.5" \
+"kubectld:v0.8.5" "kubernetes-agent:v0.6.6" "dns:v0.15.3" \
+"kubernetes-auth:v0.0.8" "healthcheck:v0.3.3" "etcd:v2.3.7-13" \
+"etc-host-updater:v0.0.3" "net:holder"; do
+ pull_docker_image rancher/$image &
+ done
+}
+
+# _pull_k8s_images() - Function that retrieves Google k8s images
+function _pull_k8s_images {
+ for image in "kubernetes-dashboard-amd64:v1.7.1" \
+"k8s-dns-sidecar-amd64:1.14.5" "k8s-dns-kube-dns-amd64:1.14.5" \
+"k8s-dns-dnsmasq-nanny-amd64:1.14.5" "heapster-influxdb-amd64:v1.3.3" \
+"heapster-grafana-amd64:v4.4.3" "heapster-amd64:v1.4.0" "pause-amd64:3.0"; do
+ pull_docker_image gcr.io/google_containers/$image &
+ done
+}
+
+# _install_rancher() - Function that installs Rancher CLI and container
+function _install_rancher {
+ local rancher_version=v0.6.5
+ local rancher_server_version=v1.6.10
+ local rancher_server=rancher/server:$rancher_server_version
+
+ if [ ! -d /opt/rancher/current ]; then
+ mkdir -p /opt/rancher/current
+ wget https://github.com/rancher/cli/releases/download/$rancher_version/rancher-linux-amd64-$rancher_version.tar.gz
+ tar -xzf rancher-linux-amd64-$rancher_version.tar.gz -C /tmp
+ mv /tmp/rancher-$rancher_version/rancher /opt/rancher/current/
+ fi
+
+ _install_docker
+ pull_docker_image $rancher_server
+ run_docker_image -d --restart=unless-stopped -p $RANCHER_PORT:8080 $rancher_server
+ while true; do
+ if curl --fail -X GET $RANCHER_URL; then
+ break
+ fi
+ echo "waiting for racher"
+ sleep $oom_delay
+ done
+}
+
+# _install_kubernetes() - Function that deploys kubernetes via RancherOS host registration
+function _install_kubernetes {
+ local rancher_agent_version=v1.2.7
+ local rancher_agent=rancher/agent:$rancher_agent_version
+
+ _install_rancher
+
+ _pull_rancher_images
+ _pull_k8s_images
+ pull_docker_image $rancher_agent
+ _wait_docker_pull
+
+ pushd /opt/rancher/current/
+ export RANCHER_ENVIRONMENT=`./rancher env create -t kubernetes onap_on_kubernetes`
+ popd
+
+ install_python_package rancher-agent-registration
+ export no_proxy=$no_proxy,$IP_ADDRESS
+ rancher-agent-registration --host-ip $IP_ADDRESS --url http://$IP_ADDRESS:$RANCHER_PORT --environment $RANCHER_ENVIRONMENT --key $RANCHER_ACCESS_KEY --secret $RANCHER_SECRET_KEY
+}
+
+# _install_kubectl() - Function that installs kubectl as client for kubernetes
+function _install_kubectl {
+ if ! $(kubectl version &>/dev/null); then
+ rm -rf ~/.kube
+ curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
+ chmod +x ./kubectl
+ mv ./kubectl /usr/local/bin/kubectl
+ mkdir ~/.kube
+ pushd ~/.kube
+ python /var/onap/files/kubectl_config_generator.py
+ popd
+ fi
+}
+
+# _install_helm() - Function that install Kubernetes Package Manager
+function _install_helm {
+ local helm_version=v2.3.0
+
+ if ! $(helm version &>/dev/null); then
+ wget http://storage.googleapis.com/kubernetes-helm/helm-${helm_version}-linux-amd64.tar.gz
+ tar -zxvf helm-${helm_version}-linux-amd64.tar.gz -C /tmp
+ mv /tmp/linux-amd64/helm /usr/local/bin/helm
+ helm init
+ fi
+}
+
+# _pull_images_from_yaml() - Function that parses a yaml file and pull their images
+function _pull_images_from_yaml_file {
+ local values_file=$1
+ local prefix=$2
+ local s='[[:space:]]*'
+ local w='[a-zA-Z0-9_]*'
+ fs=`echo @|tr @ '\034'`
+
+ for line in $(sed -ne "s|^\($s\):|\1|" \
+-e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
+-e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $values_file |
+awk -F$fs '{
+indent = length($1)/2;
+vname[indent] = $2;
+for (i in vname) {
+ if (i > indent) {
+ delete vname[i]}
+ }
+ if (length($3) > 0) {
+ vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])(".")}
+ printf("%s%s%s=%s\n", "'$prefix'",vn, $2, $3);
+ }
+}' | grep image); do
+ echo $line
+ if echo $line | grep -q Version ; then
+ pull_docker_image "$image_name:$(echo $line | awk -F "=" '{print $2}')" &
+ else
+ image_name=`echo ${line#*=}`
+ if [[ ${image_name#*${nexus_docker_repo:-nexus3.onap.org:10001}} == *:* ]]; then
+ pull_docker_image $image_name &
+ else
+ pull_docker_image $image_name:latest
+ fi
+ fi
+ done
+}
+
+# _wait_docker_pull() - Function that waits for all docker pull processes
+function _wait_docker_pull {
+ local counter=150
+
+ while [ $(ps -ef | grep "docker pull" | wc -l) -gt 1 ]; do
+ sleep $oom_delay
+ counter=$((counter - 1))
+ if [ "$counter" -eq 0 ]; then
+ break
+ fi
+ done
+}
+
+# get_oom_images() - Function that retrieves ONAP images from official hub
+function get_oom_images {
+ if [[ "$build_image" == "True" ]]; then
+ # TODO(electrocucaracha): Create a function for calling the build docker function of every ONAP project
+ echo "Not Implemented"
+ else
+ if [[ "$clone_repo" != "True" ]]; then
+ clone_repos "oom"
+ fi
+
+ docker_openecomp_login
+ for values_file in `find ${src_folders[oom]}/kubernetes -name values.yaml -type f`; do
+ _pull_images_from_yaml_file $values_file
+ done
+ docker logout
+ _wait_docker_pull
+ fi
+}
+
+# _install_oom() - Function that clones OOM and deploys ONAP
+function install_oom {
+ if [[ "$clone_repo" != "True" ]]; then
+ clone_repos "oom"
+ fi
+ pushd ${src_folders[oom]}/kubernetes/oneclick
+ source setenv.bash
+
+ pushd ${src_folders[oom]}/kubernetes/config
+ cp onap-parameters-sample.yaml onap-parameters.yaml
+ ./createConfig.sh -n onap
+ popd
+
+ for app in consul msb mso message-router sdnc vid robot portal policy appc aai sdc dcaegen2 log cli multicloud clamp vnfsdk uui aaf vfc kube2msb; do
+ ./createAll.bash -n onap -a $app
+ done
+ popd
+}
+
+# init_oom() - Function that deploys ONAP using OOM
+function init_oom {
+ mount_external_partition sda /var/lib/docker/
+ _install_kubernetes
+ _install_kubectl
+ _install_helm
+ if [[ "$clone_repo" == "True" ]]; then
+ clone_repos "oom"
+ fi
+
+ if [[ "$skip_get_images" == "False" ]]; then
+ get_oom_images
+ if [[ "$skip_install" == "False" ]]; then
+ until kubectl cluster-info; do
+ echo "waiting for kubernetes host"
+ sleep $oom_delay
+ done
+ install_oom
+ fi
+ fi
+}
diff --git a/bootstrap/vagrant-onap/lib/openstack b/bootstrap/vagrant-onap/lib/openstack
new file mode 100755
index 000000000..205d7ae80
--- /dev/null
+++ b/bootstrap/vagrant-onap/lib/openstack
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+source /var/onap/functions
+
+kolla_config=/etc/kolla
+kolla_build=$kolla_config/kolla-build.conf
+kolla_passwords=$kolla_config/passwords.yml
+kolla_globals=$kolla_config/globals.yml
+kolla_inventory=/var/onap/files/all-in-one
+
+# install_dependencies() - Function that installs Kolla-Ansible requirements
+function install_dependencies {
+ install_docker
+
+ mkdir -p /etc/systemd/system/docker.service.d
+ tee /etc/systemd/system/docker.service.d/kolla.conf <<-'EOF'
+[Service]
+MountFlags=shared
+EOF
+ systemctl daemon-reload
+ systemctl restart docker
+
+ install_python_package ansible docker kolla-ansible python-openstackclient
+}
+
+# configure_deploy() - Function that modifies configuration files
+function configure_deploy {
+ local network_id=$1
+ local enable_opendaylight=${2-False}
+ local openstack_services="main = ceilometer,cinder,glance,heat,horizon,isci,keystone,neutron,nova-,swift"
+ nic=$(ip route get $network_id | awk '{ print $4; exit }')
+ ip_address=$(ip route get $network_id | awk '{ print $6; exit }')
+ internal_vip_address=$(get_next_ip $ip_address)
+
+ mkdir -p $kolla_config
+ cp /var/onap/files/globals.yml $kolla_globals
+ cp /var/onap/files/passwords.yml $kolla_passwords
+ cp /var/onap/files/kolla-build.conf $kolla_build
+ kolla-genpwd
+ echo "network_interface: \"$nic\"" >> $kolla_globals
+ echo "kolla_internal_vip_address: \"$internal_vip_address\"" >> $kolla_globals
+ echo "api_interface: \"{{ network_interface }}\"" >> $kolla_globals
+ if [[ $enable_opendaylight == True ]]; then
+ echo "enable_opendaylight: \"yes\"" >> $kolla_globals
+ openstack_services+=",opendaylight"
+ fi
+ echo $openstack_services >> $kolla_build
+
+ echo "$ip_address $(hostname)" >> /etc/hosts
+}
+
+# get_openstack_images() - Function that retrieves or builds docker images
+function get_openstack_images {
+ if [[ "$build_image" == "True" ]]; then
+ install_python_package kolla
+ kolla-build --config-file $kolla_build
+ else
+ kolla-ansible pull -i $kolla_inventory
+ fi
+}
+
+# deploy_openstack() - Function that provisions an OpenStack deployment
+function deploy_openstack {
+ install_dependencies
+ configure_deploy ${1:-"192.168.53.0"} "True"
+
+ get_openstack_images
+ kolla-ansible deploy -i $kolla_inventory
+ kolla-ansible post-deploy
+ echo "source /etc/kolla/admin-openrc.sh" >> ${HOME}/.bashrc
+}
diff --git a/bootstrap/vagrant-onap/lib/policy b/bootstrap/vagrant-onap/lib/policy
index be4cb2c18..ad982ad95 100755
--- a/bootstrap/vagrant-onap/lib/policy
+++ b/bootstrap/vagrant-onap/lib/policy
@@ -1,28 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-policy_src_folder=$git_src_folder/policy
-policy_repos=("policy/api" "policy/common" "policy/docker" \
-"policy/drools-applications" "policy/drools-pdp" "policy/engine" \
-"policy/gui" "policy/pap" "policy/pdp")
-
-# clone_all_policy_repos() - Function that clones Policy source repo.
-function clone_all_policy_repos {
- for repo in ${policy_repos[@]}; do
- clone_repo $repo $policy_src_folder${repo#*policy}
- done
-}
-
-# compile_all_policy_repos() - Function that compiles Policy source repo.
-function compile_all_policy_repos {
- for repo in ${policy_repos[@]}; do
- compile_src $policy_src_folder${repo#*policy}
- done
-}
-
# _build_policy_images() - Function that build Policy docker images from source code
function _build_policy_images {
compile_src $policy_src_folder/docker
@@ -61,9 +40,9 @@ function install_policy {
# init_policy() - Function that initialize Policy services
function init_policy {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_policy_repos
+ clone_repos "policy"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_policy_repos
+ compile_repos "policy"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/portal b/bootstrap/vagrant-onap/lib/portal
index b5e768b2c..fe5469822 100755
--- a/bootstrap/vagrant-onap/lib/portal
+++ b/bootstrap/vagrant-onap/lib/portal
@@ -1,33 +1,28 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-portal_src_folder=$git_src_folder/portal
-portal_repos=("portal" "portal/sdk" "ecompsdkos" "ui/dmaapbc")
-
# clone_all_portal_repos() - Function that clones Portal source repo.
function clone_all_portal_repos {
- for repo in ${portal_repos[@]}; do
+ for repo in ${repos[portal]}; do
if [[ "$repo" == "ui/dmaapbc" ]];then
prefix="ui"
else
prefix="portal"
fi
- clone_repo $repo $portal_src_folder/${repo#*$prefix}
+ clone_repo $repo ${src_folders[portal]}/${repo#*$prefix}
done
}
# compile_all_portal_repos() - Function that compiles Portal source repo.
function compile_all_portal_repos {
- for repo in ${portal_repos[@]}; do
+ for repo in ${repos[portal]}; do
if [[ "$repo" == "ui/dmaapbc" ]];then
prefix="ui"
else
prefix="portal"
fi
- compile_src $portal_src_folder/${repo#*$prefix}
+ compile_src ${src_folders[portal]}/${repo#*$prefix}
done
}
@@ -35,7 +30,7 @@ function compile_all_portal_repos {
function _build_portal_images {
install_maven
- pushd $portal_src_folder/deliveries
+ pushd ${src_folders[portal]}/deliveries
chmod +x *.sh
export MVN=$(which mvn)
export GLOBAL_SETTINGS_FILE=/usr/share/maven3/conf/settings.xml
@@ -66,7 +61,7 @@ function install_portal {
docker rm -f ecompdb_portal
docker rm -f 1610-1
- pushd $portal_src_folder/deliveries
+ pushd ${src_folders[portal]}/deliveries
mkdir -p /PROJECT/OpenSource/UbuntuEP/logs
install_package unzip
unzip -o etc.zip -d /PROJECT/OpenSource/UbuntuEP/
diff --git a/bootstrap/vagrant-onap/lib/robot b/bootstrap/vagrant-onap/lib/robot
index ebcca6e6b..90fbcf8c0 100755
--- a/bootstrap/vagrant-onap/lib/robot
+++ b/bootstrap/vagrant-onap/lib/robot
@@ -1,39 +1,15 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-robot_src_folder=$git_src_folder/testsuite
-robot_repos=("heatbridge" "properties" "python-testing-utils")
-
-# clone_robot_repos() - Function that clones Robot source repo.
-function clone_robot_repos {
- clone_repo testsuite $robot_src_folder
-
- for dirc in ${robot_repos[@]}; do
- clone_repo testsuite/$dirc $robot_src_folder/$dirc
- done
-}
-
-# compile_robot_repos() - Function that compile Robot source repo.
-function compile_robot_repos {
- install_python_package tox
- compile_src $robot_src_folder
-
- for dirc in ${robot_repos[@]}; do
- compile_src $robot_src_folder/$dirc
- done
-}
-
# _setup_ete_folder() - Create and copy ete folder structure
function _setup_ete_folder {
mkdir -p /opt/eteshare/config
- cp $src_folder/integration_* /opt/eteshare/config
- cp $src_folder/vm_config2robot.sh /opt/eteshare/config
- cp $src_folder/ete.sh /opt
- cp $src_folderdemo.sh /opt
+ cp $robot_src_folder/integration_* /opt/eteshare/config
+ cp $robot_src_folder/vm_config2robot.sh /opt/eteshare/config
+ cp $robot_src_folder/ete.sh /opt
+ cp $robot_src_folder/demo.sh /opt
chmod +x /opt/ete.sh
chmod +x /opt/demo.sh
@@ -47,16 +23,16 @@ function get_robot_images {
# install_robot() - Run Robot services
function install_robot {
docker rm -f openecompete_container
- docker run -d --name openecompete_container -v /opt/eteshare:/share -p 88:88 $nexus_docker_repo/openecomp/testsuite:$docker_version
+ run_docker_image -d --name openecompete_container -v /opt/eteshare:/share -p 88:88 $nexus_docker_repo/openecomp/testsuite:$docker_version
}
# init_robot() - Function that initialize Robot services
function init_robot {
if [[ "$clone_repo" == "True" ]]; then
- clone_robot_repos
+ clone_repos "robot" "testsuite"
_setup_ete_folder
if [[ "$compile_repo" == "True" ]]; then
- compile_robot_repos
+ compile_repos "robot"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/sdc b/bootstrap/vagrant-onap/lib/sdc
index a21e2a418..fee583d79 100755
--- a/bootstrap/vagrant-onap/lib/sdc
+++ b/bootstrap/vagrant-onap/lib/sdc
@@ -1,32 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-sdc_src_folder=$git_src_folder/sdc
-sdc_repos=("sdc" "sdc/jtosca" "sdc/sdc-distribution-client"
-"sdc/sdc-docker-base" "sdc/sdc-titan-cassandra" "sdc/sdc-tosca"
-"sdc/sdc-vnfdesign" "sdc/sdc-workflow-designer" "sdc/sdc_common")
-
-# _mount_external_partition() - Create partition and mount the external volume
-function _mount_external_partition {
- cat <<EOL > /tmp/sdc_ext_volume_partitions.txt
-# partition table of /dev/sdb
-unit: sectors
-
-/dev/sdb1 : start= 2048, size=209713152, Id=83
-/dev/sdb2 : start= 0, size= 0, Id= 0
-/dev/sdb3 : start= 0, size= 0, Id= 0
-/dev/sdb4 : start= 0, size= 0, Id= 0
-EOL
- sfdisk --force /dev/sdb < /tmp/sdc_ext_volume_partitions.txt
- mkfs -t ext4 /dev/sdb1
- mkdir -p /data
- mount /dev/sdb1 /data
- echo "/dev/sdb1 /data ext4 errors=remount-ro,noatime,barrier=0 0 1" >> /etc/fstab
-}
-
# _init_data_folders() - Function that initialize the data folders
function _init_data_folders {
mkdir -p /data/environments
@@ -47,20 +22,6 @@ alias health='/data/scripts/docker_health.sh'
EOL
}
-# clone_all_sdc_repos() - Function that clones SDC source repo.
-function clone_all_sdc_repos {
- for repo in ${sdc_repos[@]}; do
- clone_repo $repo $sdc_src_folder${repo#*sdc}
- done
-}
-
-# compile_all_sdc_repos() - Function that compiles SDC source repo.
-function compile_all_sdc_repos {
- for repo in ${sdc_repos[@]}; do
- compile_src $sdc_src_folder${repo#*sdc}
- done
-}
-
# get_sdc_images() - Function that retrieves the SDC docker images
function get_sdc_images {
if [[ "$build_image" == "True" ]]; then
@@ -103,11 +64,11 @@ function install_sdc {
# init_sdc() - Function that initialize SDC services
function init_sdc {
- _mount_external_partition
+ mount_external_partition sdb /data/
if [[ "$clone_repo" == "True" ]]; then
- clone_all_sdc_repos
+ clone_repos "sdc"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_sdc_repos
+ compile_repos "sdc"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/sdnc b/bootstrap/vagrant-onap/lib/sdnc
index 5faca942e..a8cf756b4 100755
--- a/bootstrap/vagrant-onap/lib/sdnc
+++ b/bootstrap/vagrant-onap/lib/sdnc
@@ -1,24 +1,11 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
source /var/onap/ccsdk
-sdnc_src_folder=$git_src_folder/openecomp/sdnc
-sdnc_repos=("sdnc/adaptors" "sdnc/architecture" "sdnc/core" "sdnc/features" \
-"sdnc/northbound" "sdnc/oam" "sdnc/parent" "sdnc/plugins")
-
-# clone_all_sdnc_repos() - Function that clones SDNC source repo.
-function clone_all_sdnc_repos {
- for repo in ${sdnc_repos[@]}; do
- clone_repo $repo $sdnc_src_folder${repo#*sdnc}
- done
-}
-
# compile_all_sdnc_repos() - Function that compiles SDNC source repo.
function compile_all_sdnc_repos {
- for repo in ${sdnc_repos[@]}; do
+ for repo in ${repos[sdnc]}; do
if [[ "$repo" == "sdnc/core" ]]; then
compile_src $sdnc_src_folder/core/rootpom
fi
@@ -32,7 +19,6 @@ function _build_sdnc_images {
get_ccsdk_images
install_package unzip
- clone_repo sdnc/oam $folder
# The OAM code depends on all the SDNC repos which should be downloaded and compiled first
if [[ "$compile_repo" != "True" ]]; then
compile_src $folder
@@ -56,7 +42,6 @@ function get_sdnc_images {
# install_sdnc() - Download and install SDNC services from source code
function install_sdnc {
- clone_repo sdnc/oam $sdnc_src_folder/oam
pushd $sdnc_src_folder/oam/installation/src/main/yaml
install_docker_compose
/opt/docker/docker-compose up -d
@@ -66,7 +51,7 @@ function install_sdnc {
# init_sdnc() - Function that initialize SDNC services
function init_sdnc {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_sdnc_repos
+ clone_repos "sdnc"
if [[ "$compile_repo" == "True" ]]; then
compile_all_sdnc_repos
fi
diff --git a/bootstrap/vagrant-onap/lib/vfc b/bootstrap/vagrant-onap/lib/vfc
index fc26af282..2b5407ca3 100755
--- a/bootstrap/vagrant-onap/lib/vfc
+++ b/bootstrap/vagrant-onap/lib/vfc
@@ -1,23 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-vfc_src_folder=$git_src_folder/vfc
-vfc_repos=("vfc/gvnfm" "vfc/gvnfm/vnflcm" "vfc/gvnfm/vnfmgr" \
-"vfc/gvnfm/vnfres" "vfc/nfvo" "vfc/nfvo/catalog" "vfc/nfvo/driver" \
-"vfc/nfvo/driver/ems" "vfc/nfvo/driver/sfc" "vfc/nfvo/driver/vnfm" \
-"vfc/nfvo/driver/vnfm/gvnfm" "vfc/nfvo/driver/vnfm/svnfm" "vfc/nfvo/lcm" \
-"vfc/nfvo/resmanagement" "vfc/nfvo/wfengine")
-
-# clone_all_vfc_repos() - Function that clones VF-C source repo.
-function clone_all_vfc_repos {
- for repo in ${vfc_repos[@]}; do
- clone_repo $repo $vfc_src_folder${repo#*vfc}
- done
-}
-
# compile_all_vfc_repos() - Function that compiles VF-C source repo.
function compile_all_vfc_repos {
install_python_package tox
@@ -85,11 +69,11 @@ function install_vfc {
vnfres_image=`docker images | grep vnfres | grep latest| awk '{print $1 ":" $2}'`
gvnfmdriver_image=`docker images | grep gvnfmdriver | grep latest| awk '{print $1 ":" $2}'`
- docker run -d --name vfc-nslcm -p 8403:8403 -e MSB_ADDR=127.0.0.1 $nslcm_image
- docker run -d --name vfc-vnflcm -p 8801:8801 -e MSB_ADDR=127.0.0.1 $vnflcm_image
- docker run -d --name vfc-vnfmgr -p 8803:8803 -e MSB_ADDR=127.0.0.1 $vnfmgr_image
- docker run -d --name vfc-vnfres -p 8802:8802 -e MSB_ADDR=127.0.0.1 $vnfres_image
- docker run -d --name vfc-gvnfmdriver -p 8484:8484 -e MSB_ADDR=127.0.0.1 $gvnfmdriver_image
+ run_docker_image -d --name vfc-nslcm -p 8403:8403 -e MSB_ADDR=127.0.0.1 $nslcm_image
+ run_docker_image -d --name vfc-vnflcm -p 8801:8801 -e MSB_ADDR=127.0.0.1 $vnflcm_image
+ run_docker_image -d --name vfc-vnfmgr -p 8803:8803 -e MSB_ADDR=127.0.0.1 $vnfmgr_image
+ run_docker_image -d --name vfc-vnfres -p 8802:8802 -e MSB_ADDR=127.0.0.1 $vnfres_image
+ run_docker_image -d --name vfc-gvnfmdriver -p 8484:8484 -e MSB_ADDR=127.0.0.1 $gvnfmdriver_image
# TODO(sshank): Run other VFC component docker images.
}
@@ -99,7 +83,7 @@ function init_vfc {
install_package libmysqlclient-dev
if [[ "$clone_repo" == "True" ]]; then
- clone_all_vfc_repos
+ clone_repos "vfc"
if [[ "$compile_repo" == "True" ]]; then
compile_all_vfc_repos
fi
diff --git a/bootstrap/vagrant-onap/lib/vid b/bootstrap/vagrant-onap/lib/vid
index f99fd6042..f5ca9d293 100755
--- a/bootstrap/vagrant-onap/lib/vid
+++ b/bootstrap/vagrant-onap/lib/vid
@@ -1,26 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-vid_src_folder=$git_src_folder/vid
-vid_repos=("vid" "vid/asdcclient")
-
-# clone_all_vid_repos() - Function that clones VID source code.
-function clone_all_vid_repos {
- for repo in ${vid_repos[@]}; do
- clone_repo $repo $vid_src_folder${repo#*vid}
- done
-}
-
-# compile_all_vid_repos() - Function that compiles VID source repo.
-function compile_all_vid_repos {
- for repo in ${vid_repos[@]}; do
- compile_src $vid_src_folder${repo#*vid}
- done
-}
-
# _build_vid_images() - Function that builds VID docker images
function _build_vid_images {
if [[ "$compile_repo" != "True" ]]; then
@@ -46,16 +27,16 @@ function install_vid {
docker rm -f vid-mariadb
docker rm -f vid-server
- docker run --name vid-mariadb -e MYSQL_DATABASE=vid_openecomp -e MYSQL_USER=vidadmin -e MYSQL_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U -e MYSQL_ROOT_PASSWORD=LF+tp_1WqgSY -v /opt/vid/lf_config/vid-my.cnf:/etc/mysql/my.cnf -v /opt/vid/lf_config/vid-pre-init.sql:/docker-entrypoint-initdb.d/vid-pre-init.sql -v /var/lib/mysql -d mariadb:10
- docker run -e VID_MYSQL_DBNAME=vid_openecomp -e VID_MYSQL_PASS=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U --name vid-server -p 8080:8080 --link vid-mariadb:vid-mariadb-docker-instance -d $vid_image
+ run_docker_image --name vid-mariadb -e MYSQL_DATABASE=vid_openecomp -e MYSQL_USER=vidadmin -e MYSQL_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U -e MYSQL_ROOT_PASSWORD=LF+tp_1WqgSY -v /opt/vid/lf_config/vid-my.cnf:/etc/mysql/my.cnf -v /opt/vid/lf_config/vid-pre-init.sql:/docker-entrypoint-initdb.d/vid-pre-init.sql -v /var/lib/mysql -d mariadb:10
+ run_docker_image -e VID_MYSQL_DBNAME=vid_openecomp -e VID_MYSQL_PASS=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U --name vid-server -p 8080:8080 --link vid-mariadb:vid-mariadb-docker-instance -d $vid_image
}
# init_vid() - Function that initialize Vid services
function init_vid {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_vid_repos
+ clone_repos "vid"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_vid_repos
+ compile_repos "vid"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/vnfsdk b/bootstrap/vagrant-onap/lib/vnfsdk
index c23ed8581..58e3a9aa2 100644..100755
--- a/bootstrap/vagrant-onap/lib/vnfsdk
+++ b/bootstrap/vagrant-onap/lib/vnfsdk
@@ -1,26 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-vnfsdk_src_folder=$git_src_folder/vnfsdk
-vnfsdk_repos=("vnfsdk/compliance" "vnfsdk/functest" "vnfsdk/lctest" "vnfsdk/model" "vnfsdk/pkgtools" "vnfsdk/refrepo" "vnfsdk/validation")
-
-# clone_all_vnfsdk_repos() - Function that clones vnfsdk source repo.
-function clone_all_vnfsdk_repos {
- for repo in ${vnfsdk_repos[@]}; do
- clone_repo $repo $vnfsdk_src_folder${repo#*vnfsdk}
- done
-}
-
-# compile_all_vnfsdk_repos - Function that builds vnfsdk source repo
-function compile_all_vnfsdk_repos {
- for repo in ${vnfsdk_repos[@]}; do
- compile_src $vnfsdk_src_folder${repo#*vnfsdk}
- done
-}
-
# _build_vnfsdk_images() - Builds VNFSDK images from source code
function _build_vnfsdk_images {
install_package unzip
@@ -51,9 +32,9 @@ function install_vnfsdk {
# init_vnfsdk() - Init VNFSDK services
function init_vnfsdk {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_vnfsdk_repos
+ clone_repos "vnfsdk"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_vnfsdk_repos
+ compile_repos "vnfsdk"
fi
fi
diff --git a/bootstrap/vagrant-onap/lib/vvp b/bootstrap/vagrant-onap/lib/vvp
index 588f32b87..f24431ee6 100644..100755
--- a/bootstrap/vagrant-onap/lib/vvp
+++ b/bootstrap/vagrant-onap/lib/vvp
@@ -1,26 +1,7 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/functions
-vvp_src_folder=$git_src_folder/vvp
-vvp_repos=("vvp/ansible-ice-bootstrap" "vvp/cms" "vvp/devkit" "vvp/documentation" "vvp/engagementmgr" "vvp/gitlab" "vvp/image-scanner" "vvp/jenkins" "vvp/portal" "vvp/postgresql" "vvp/test-engine" "vvp/validation-scripts")
-
-# clone_all_vvp_repos() - Function that clones vvp source repo.
-function clone_all_vvp_repos {
- for repo in ${vvp_repos[@]}; do
- clone_repo $repo $vvp_src_folder${repo#*vvp}
- done
-}
-
-# compile_all_vvp_repos - Function that builds vvp source repo
-function compile_all_vvp_repos {
- for repo in ${vvp_repos[@]}; do
- compile_src $vvp_src_folder${repo#*vvp}
- done
-}
-
# _build_vvp_images() - Builds VNFSDK images from source code
function _build_vvp_images {
echo "pass"
@@ -44,9 +25,9 @@ function install_vvp {
# init_vvp() - Init VNFSDK services
function init_vvp {
if [[ "$clone_repo" == "True" ]]; then
- clone_all_vvp_repos
+ clone_repos "vvp"
if [[ "$compile_repo" == "True" ]]; then
- compile_all_vvp_repos
+ compile_repos "vvp"
fi
fi
diff --git a/bootstrap/vagrant-onap/tests/_test_base b/bootstrap/vagrant-onap/tests/_test_base
index 7d0415a44..b30632d26 100644..100755
--- a/bootstrap/vagrant-onap/tests/_test_base
+++ b/bootstrap/vagrant-onap/tests/_test_base
@@ -1,11 +1,17 @@
#!/bin/bash
+if [[ "$debug" == "True" ]]; then
+ set -o xtrace
+fi
+
source /var/onap_tests/asserts
+source /var/onap/commons
# main() - Starting point for Unit Tests
function main {
local covered_functions=("$@")
+ update_repos
len=${#covered_functions[@]}
echo "1..$len"
for i in ${!covered_functions[@]}; do
@@ -15,7 +21,7 @@ function main {
echo "ok $((i+1)) - test_${covered_functions[$i]}"
# Teardown process
- if ! is_package_installed docker-ce; then
+ if is_package_installed docker-ce; then
docker images -q | xargs docker rmi -f
fi
dpkg --get-selections > installed-software_new
diff --git a/bootstrap/vagrant-onap/tests/asserts b/bootstrap/vagrant-onap/tests/asserts
index 02c269b4c..8eda3ce25 100755
--- a/bootstrap/vagrant-onap/tests/asserts
+++ b/bootstrap/vagrant-onap/tests/asserts
@@ -1,9 +1,19 @@
#!/bin/bash
-set -o xtrace
-
source /var/onap/commons
+# asserts_http_status_code() - Function that determines if a HTTP status code is retrieved from URL
+function asserts_http_status_code {
+ local url=$1
+ local expected_code=${2:-"200"}
+
+ code=$(curl -I $url | head -n 1 | cut -d$' ' -f2)
+ local error_msg=${3:-"The URL $url responded with $code status code"}
+ if [[ "$code" != "$expected_code" ]]; then
+ raise_error $error_msg
+ fi
+}
+
# asserts_process() - Function that verifies if a specific process is running
function asserts_process {
local process=$1
diff --git a/bootstrap/vagrant-onap/tests/test_aai b/bootstrap/vagrant-onap/tests/test_aai
index 10777d678..65917e092 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_aai
+++ b/bootstrap/vagrant-onap/tests/test_aai
@@ -28,50 +28,50 @@ function test_install_haproxy {
# test_clone_all_aai_repos() - Verify that all the AAI Repos are cloned
function test_clone_all_aai_repos {
- clone_all_aai_repos
-
- asserts_file_exist $aai_src_folder/aai-common/pom.xml
- asserts_file_exist $aai_src_folder/aai-config/cookbooks/aai-resources/runlist-aai-resources.json
- asserts_file_exist $aai_src_folder/aai-data/environments/solo.json
- asserts_file_exist $aai_src_folder/aai-service/pom.xml
- asserts_file_exist $aai_src_folder/babel/README.md
- asserts_file_exist $aai_src_folder/champ/pom.xml
- asserts_file_exist $aai_src_folder/data-router/pom.xml
- asserts_file_exist $aai_src_folder/esr-gui/pom.xml
- asserts_file_exist $aai_src_folder/esr-server/pom.xml
- asserts_file_exist $aai_src_folder/gizmo/pom.xml
- asserts_file_exist $aai_src_folder/logging-service/pom.xml
- asserts_file_exist $aai_src_folder/model-loader/pom.xml
- asserts_file_exist $aai_src_folder/resources/pom.xml
- asserts_file_exist $aai_src_folder/rest-client/pom.xml
- asserts_file_exist $aai_src_folder/router-core/pom.xml
- asserts_file_exist $aai_src_folder/search-data-service/pom.xml
- asserts_file_exist $aai_src_folder/sparky-be/pom.xml
- asserts_file_exist $aai_src_folder/sparky-fe/pom.xml
- asserts_file_exist $aai_src_folder/test-config/docker-compose-app.yml
- asserts_file_exist $aai_src_folder/traversal/pom.xml
+ clone_repos "aai"
+
+ asserts_file_exist ${src_folders[aai]}/aai-common/pom.xml
+ asserts_file_exist ${src_folders[aai]}/aai-config/cookbooks/aai-resources/runlist-aai-resources.json
+ asserts_file_exist ${src_folders[aai]}/aai-data/environments/solo.json
+ asserts_file_exist ${src_folders[aai]}/aai-service/pom.xml
+ asserts_file_exist ${src_folders[aai]}/babel/README.md
+ asserts_file_exist ${src_folders[aai]}/champ/pom.xml
+ asserts_file_exist ${src_folders[aai]}/data-router/pom.xml
+ asserts_file_exist ${src_folders[aai]}/esr-gui/pom.xml
+ asserts_file_exist ${src_folders[aai]}/esr-server/pom.xml
+ asserts_file_exist ${src_folders[aai]}/gizmo/pom.xml
+ asserts_file_exist ${src_folders[aai]}/logging-service/pom.xml
+ asserts_file_exist ${src_folders[aai]}/model-loader/pom.xml
+ asserts_file_exist ${src_folders[aai]}/resources/pom.xml
+ asserts_file_exist ${src_folders[aai]}/rest-client/pom.xml
+ asserts_file_exist ${src_folders[aai]}/router-core/pom.xml
+ asserts_file_exist ${src_folders[aai]}/search-data-service/pom.xml
+ asserts_file_exist ${src_folders[aai]}/sparky-be/pom.xml
+ asserts_file_exist ${src_folders[aai]}/sparky-fe/pom.xml
+ asserts_file_exist ${src_folders[aai]}/test-config/docker-compose-app.yml
+ asserts_file_exist ${src_folders[aai]}/traversal/pom.xml
}
# test_compile_aai_repos() - Verify that all the AAI Repositories complile properly
function test_compile_aai_repos {
- clone_all_aai_repos
+ clone_repos "aai"
compile_aai_repos
for common in annotations auth core schema utils; do
- asserts_file_exist $aai_src_folder/aai-common/aai-$common/target/aai-$common-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[aai]}/aai-common/aai-$common/target/aai-$common-1.1.0-SNAPSHOT.jar
done
for service in common-logging eelf-logging logging-api; do
- asserts_file_exist $aai_src_folder/logging-service/$service/target/$service-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[aai]}/logging-service/$service/target/$service-1.1.0-SNAPSHOT.jar
done
- asserts_file_exist $aai_src_folder/resources/aai-resources/target/aai-resources.jar
- asserts_file_exist $aai_src_folder/traversal/aai-traversal/target/traversal.jar
+ asserts_file_exist ${src_folders[aai]}/resources/aai-resources/target/aai-resources.jar
+ asserts_file_exist ${src_folders[aai]}/traversal/aai-traversal/target/traversal.jar
}
# test_setup_titan() - Verify that Titan Cassandra DB is up and running
function test_setup_titan {
- clone_all_aai_repos
+ clone_repos "aai"
install_hadoop
setup_titan
@@ -80,7 +80,7 @@ function test_setup_titan {
# test_start_aai_microservices() - Verify that AAI Resources and Traversal images works
function test_start_aai_microservices {
- clone_all_aai_repos
+ clone_repos "aai"
start_aai_microservices
# TODO(electrocucaracha): Investigate how to run AAI microservices in background
@@ -95,7 +95,7 @@ function test_install_hbase {
# test_install_ajsc_aai() - Verify that AJSC AAI service is up and running properly
function test_install_ajsc_aai {
- clone_all_aai_repos
+ clone_repos "aai"
install_ajsc_aai
asserts_image_running openecomp/ajsc-aai
@@ -103,7 +103,7 @@ function test_install_ajsc_aai {
# test_install_model_loader() - Verify that Model AAI service is up and running properly
function test_install_model_loader {
- clone_all_aai_repos
+ clone_repos "aai"
install_model_loader
asserts_image openecomp/model-loader
diff --git a/bootstrap/vagrant-onap/tests/test_appc b/bootstrap/vagrant-onap/tests/test_appc
index 2290f16a2..f567d7f1b 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_appc
+++ b/bootstrap/vagrant-onap/tests/test_appc
@@ -9,69 +9,71 @@ covered_functions=(
# test_clone_all_appc_repos() - Verify that the APPC source code is cloned
function test_clone_all_appc_repos {
- clone_all_appc_repos
+ clone_repos "appc"
- asserts_file_exist $appc_src_folder/pom.xml
- asserts_file_exist $appc_src_folder/deployment/pom.xml
+ asserts_file_exist ${src_folders[appc]}/pom.xml
+ asserts_file_exist ${src_folders[appc]}/deployment/pom.xml
}
# test_compile_all_appc_repos() - Verify that the APPC source code is compiled properly
function test_compile_all_appc_repos {
- clone_all_appc_repos
- compile_all_appc_repos
+ clone_repos "appc"
+ compile_repos "appc"
for adapter in appc-ansible-adapter appc-chef-adapter appc-dmaap-adapter appc-iaas-adapter appc-netconf-adapter appc-rest-adapter appc-rest-healthcheck-adapter; do
- asserts_file_exist $appc_src_folder/appc-adapters/$adapter/$adapter-bundle/target/$adapter-bundle-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-adapters/$adapter/$adapter-features/target/$adapter-features-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/$adapter/$adapter-bundle/target/$adapter-bundle-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/$adapter/$adapter-features/target/$adapter-features-*-SNAPSHOT.jar
if [[ "$adapter" == "appc-netconf-adapter" ]]; then
- asserts_file_exist $appc_src_folder/appc-adapters/$adapter/appc-netconf-installer/target/$adapter-1.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/$adapter/appc-netconf-installer/target/$adapter-*-SNAPSHOT.zip
else
- asserts_file_exist $appc_src_folder/appc-adapters/$adapter/$adapter-installer/target/$adapter-1.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/$adapter/$adapter-installer/target/$adapter-*-SNAPSHOT.zip
fi
done
- asserts_file_exist $appc_src_folder/appc-adapters/appc-dmaap-adapter/appc-message-adapter-api/target/appc-message-adapter-api-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-adapters/appc-dmaap-adapter/appc-message-adapter-factory/target/appc-message-adapter-factory-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-api/target/appc-ssh-adapter-api-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-features/target/appc-ssh-adapter-features-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-sshd/target/appc-ssh-adapter-sshd-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-tests/target/appc-ssh-adapter-tests-1.1.0-SNAPSHOT.jar
-
- for component in appc-event-listener appc-oam appc-provider; do
- asserts_file_exist $appc_src_folder/$component/$component-bundle/target/$component-bundle-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/$component/$component-features/target/$component-features-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/$component/$component-installer/target/$component-1.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-dmaap-adapter/appc-message-adapter-api/target/appc-message-adapter-api-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-dmaap-adapter/appc-message-adapter-factory/target/appc-message-adapter-factory-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-api/target/appc-ssh-adapter-api-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-features/target/appc-ssh-adapter-features-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-sshd/target/appc-ssh-adapter-sshd-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-adapters/appc-ssh-adapter/appc-ssh-adapter-tests/target/appc-ssh-adapter-tests-*-SNAPSHOT.jar
+
+ #for component in appc-event-listener appc-oam appc-provider; do
+ for component in appc-event-listener appc-provider; do
+ asserts_file_exist ${src_folders[appc]}/$component/$component-bundle/target/$component-bundle-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/$component/$component-features/target/$component-features-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/$component/$component-installer/target/$component-*-SNAPSHOT.zip
done
- for component in appc-oam appc-provider; do
- asserts_file_exist $appc_src_folder/$component/$component-model/target/$component-model-1.1.0-SNAPSHOT.jar
+ #for component in appc-oam appc-provider; do
+ for component in appc-provider; do
+ asserts_file_exist ${src_folders[appc]}/$component/$component-model/target/$component-model-*-SNAPSHOT.jar
done
- asserts_file_exist $appc_src_folder/appc-common/target/appc-common-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-common/target/appc-common-*-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-dg/appc-dg-shared/appc-dg-dependency-model/target/appc-dg-dependency-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-dg/appc-dg-shared/appc-dg-domain-model-lib/target/appc-dg-domain-model-lib-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dg/appc-dg-shared/appc-dg-dependency-model/target/appc-dg-dependency-model-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dg/appc-dg-shared/appc-dg-domain-model-lib/target/appc-dg-domain-model-lib-*-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-dispatcher/appc-command-executor/appc-command-executor-api/target/appc-command-executor-api-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-dispatcher/appc-dispatcher-common/appc-data-access-lib/target/appc-data-access-lib-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-dispatcher/appc-dispatcher-common/domain-model-lib/target/domain-model-lib-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-dispatcher/appc-dispatcher-common/execution-queue-management-lib/target/execution-queue-management-lib-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-dispatcher/appc-license-manager/appc-license-manager-api/target/appc-license-manager-api-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-dispatcher/appc-request-handler/appc-request-handler-api/target/appc-request-handler-api-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-command-executor/appc-command-executor-api/target/appc-command-executor-api-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-dispatcher-common/appc-data-access-lib/target/appc-data-access-lib-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-dispatcher-common/domain-model-lib/target/domain-model-lib-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-dispatcher-common/execution-queue-management-lib/target/execution-queue-management-lib-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-license-manager/appc-license-manager-api/target/appc-license-manager-api-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-dispatcher/appc-request-handler/appc-request-handler-api/target/appc-request-handler-api-*-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-lifecycle-management/appc-lifecycle-management-api/target/appc-lifecycle-management-api-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-lifecycle-management/appc-lifecycle-management-core/target/appc-lifecycle-management-core-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-lifecycle-management/appc-lifecycle-management-features/target/appc-lifecycle-management-features-1.1.0-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-lifecycle-management/appc-lifecycle-management-installer/target/appc-lifecycle-management-1.1.0-SNAPSHOT.zip
- asserts_file_exist $appc_src_folder/appc-lifecycle-management/state-machine-lib/target/state-machine-lib-1.1.0-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[appc]}/appc-lifecycle-management/appc-lifecycle-management-api/target/appc-lifecycle-management-api-*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[appc]}/appc-lifecycle-management/appc-lifecycle-management-core/target/appc-lifecycle-management-core-*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[appc]}/appc-lifecycle-management/appc-lifecycle-management-features/target/appc-lifecycle-management-features-*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[appc]}/appc-lifecycle-management/appc-lifecycle-management-installer/target/appc-lifecycle-management-*-SNAPSHOT.zip
+ #asserts_file_exist ${src_folders[appc]}/appc-lifecycle-management/state-machine-lib/target/state-machine-lib-*-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/appc-metric/appc-metric-bundle/target/appc-metric-bundle-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[appc]}/appc-metric/appc-metric-bundle/target/appc-metric-bundle-*-SNAPSHOT.jar
- asserts_file_exist $appc_src_folder/deployment/platform-logic/installer/target/platform-logic-installer-1.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[appc]}/deployment/platform-logic/installer/target/platform-logic-installer-*-SNAPSHOT.zip
}
# test_get_appc_images() - Verify that APPC Docker images can be retrieved
function test_get_appc_images {
- clone_all_appc_repo
+ clone_repos "appc"
get_appc_images
asserts_image openecomp/appc-image
@@ -79,7 +81,7 @@ function test_get_appc_images {
# test_install_appc() - Verify that the APPC Docker images are up and running
function test_install_appc {
- clone_all_appc_repos
+ clone_repos "appc"
get_appc_images
install_appc
diff --git a/bootstrap/vagrant-onap/tests/test_ccsdk b/bootstrap/vagrant-onap/tests/test_ccsdk
index 81bdd4877..28de27065 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_ccsdk
+++ b/bootstrap/vagrant-onap/tests/test_ccsdk
@@ -9,72 +9,73 @@ covered_functions=(
# test_clone_ccsdk_repos() - Verify that CCSDL repositories are retrieved properly
function test_clone_ccsdk_repos {
- clone_ccsdk_repos
+ clone_repos "ccsdk"
- asserts_file_exist $ccsdk_src_folder/dashboard/pom.xml
- asserts_file_exist $ccsdk_src_folder/distribution/pom.xml
- asserts_file_exist $ccsdk_src_folder/parent/pom.xml
- asserts_file_exist $ccsdk_src_folder/platform/blueprints/pom.xml
- asserts_file_exist $ccsdk_src_folder/platform/nbapi/pom.xml
- asserts_file_exist $ccsdk_src_folder/platform/plugins/pom.xml
- asserts_file_exist $ccsdk_src_folder/sli/adaptors/pom.xml
- asserts_file_exist $ccsdk_src_folder/sli/core/pom.xml
- asserts_file_exist $ccsdk_src_folder/sli/northbound/pom.xml
- asserts_file_exist $ccsdk_src_folder/sli/plugins/pom.xml
- asserts_file_exist $ccsdk_src_folder/storage/esaas/pom.xml
- asserts_file_exist $ccsdk_src_folder/storage/pgaas/pom.xml
- asserts_file_exist $ccsdk_src_folder/utils/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/dashboard/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/distribution/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/parent/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/platform/blueprints/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/platform/nbapi/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/platform/plugins/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/sli/adaptors/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/sli/core/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/sli/northbound/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/sli/plugins/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/storage/esaas/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/storage/pgaas/pom.xml
+ asserts_file_exist ${src_folders[ccsdk]}/utils/pom.xml
}
# test_compile_ccsdk_repos() - Verify the compilation of CCSDK repositories
function test_compile_ccsdk_repos {
- clone_ccsdk_repos
- compile_ccsdk_repos
+ clone_repos "ccsdk"
+ compile_repos "ccsdk"
- asserts_file_exist $ccsdk_src_folder/dashboard/ccsdk-app-common/target/ccsdk-app-common-1.1.0-SNAPSHOT.jar
- asserts_file_exist $ccsdk_src_folder/dashboard/ccsdk-app-os/target/ccsdk-app-os-1.1.0-SNAPSHOT.war
- asserts_file_exist $ccsdk_src_folder/dashboard/ccsdk-app-overlay/target/ccsdk-app-overlay-1.1.0-SNAPSHOT.war
- asserts_file_exist $ccsdk_src_folder/distribution/dgbuilder/target/dgbuilder.0.1.0-SNAPSHOT.zip
- asserts_file_exist $ccsdk_src_folder/distribution/platform-logic/installer/target/platform-logic-installer-0.1.0-SNAPSHOT.zip
- asserts_file_exist $ccsdk_src_folder/platform/nbapi/target/commonnbapi-0.0.3.war
+ asserts_file_exist ${src_folders[ccsdk]}/dashboard/ccsdk-app-common/target/ccsdk-app-common-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[ccsdk]}/dashboard/ccsdk-app-os/target/ccsdk-app-os-1.1.0-SNAPSHOT.war
+ asserts_file_exist ${src_folders[ccsdk]}/dashboard/ccsdk-app-overlay/target/ccsdk-app-overlay-1.1.0-SNAPSHOT.war
+ asserts_file_exist ${src_folders[ccsdk]}/distribution/dgbuilder/target/dgbuilder.0.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[ccsdk]}/distribution/platform-logic/installer/target/platform-logic-installer-0.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[ccsdk]}/platform/nbapi/target/commonnbapi-*.war
- for adaptor in aai-service mdsal-resource resource-assignment sql-resource; do
- for component in features provider; do
- asserts_file_exist $ccsdk_src_folder/sli/adaptors/$adaptor/$component/target/$adaptor-$component-0.1.0-SNAPSHOT.jar
- done
- asserts_file_exist $ccsdk_src_folder/sli/adaptors/$adaptor/installer/target/sdnc-$adaptor-0.1.0-SNAPSHOT-installer.zip
- done
+ #for adaptor in aai-service mdsal-resource resource-assignment sql-resource; do
+ #for component in features provider; do
+ #asserts_file_exist ${src_folders[ccsdk]}/sli/adaptors/$adaptor/$component/target/$adaptor-$component-0.1.0-SNAPSHOT.jar
+ #done
+ #asserts_file_exist ${src_folders[ccsdk]}/sli/adaptors/$adaptor/installer/target/sdnc-$adaptor-0.1.0-SNAPSHOT-installer.zip
+ #done
- for core in dblib filters sli sliapi sliPluginUtils; do
+ #for core in dblib filters sli sliapi sliPluginUtils; do
+ for core in dblib; do
for component in features provider; do
- asserts_file_exist $ccsdk_src_folder/sli/core/$core/$component/target/$core-$component-0.1.2-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[ccsdk]}/sli/core/$core/$component/target/$core-$component-*-SNAPSHOT.jar
done
- asserts_file_exist $ccsdk_src_folder/sli/core/$core/installer/target/sdnc-$core-0.1.2-SNAPSHOT-installer.zip
+ asserts_file_exist ${src_folders[ccsdk]}/sli/core/$core/installer/target/sdnc-$core-*-SNAPSHOT-installer.zip
done
- asserts_file_exist $ccsdk_src_folder/sli/core/sli/common/target/sli-common-0.1.2-SNAPSHOT.jar
- asserts_file_exist $ccsdk_src_folder/sli/core/sli/recording/target/sli-recording-0.1.2-SNAPSHOT.jar
- asserts_file_exist $ccsdk_src_folder/sli/core/sliapi/model/target/sliapi-model-0.1.2-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[ccsdk]}/sli/core/sli/common/target/sli-common-*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[ccsdk]}/sli/core/sli/recording/target/sli-recording-*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[ccsdk]}/sli/core/sliapi/model/target/sliapi-model-*-SNAPSHOT.jar
for northbound in asdcApi dataChange; do
for component in features model provider; do
- asserts_file_exist $ccsdk_src_folder/sli/northbound/$northbound/$component/target/$northbound-$component-0.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[ccsdk]}/sli/northbound/$northbound/$component/target/$northbound-$component-*-SNAPSHOT.jar
done
- asserts_file_exist $ccsdk_src_folder/sli/northbound/$northbound/installer/target/sdnc-$northbound-0.1.0-SNAPSHOT-installer.zip
+ asserts_file_exist ${src_folders[ccsdk]}/sli/northbound/$northbound/installer/target/sdnc-$northbound-*-SNAPSHOT-installer.zip
done
- asserts_file_exist $ccsdk_src_folder/sli/northbound/dmaap-listener/target/dmaap-listener-0.1.0-SNAPSHOT.jar
- asserts_file_exist $ccsdk_src_folder/sli/northbound/ueb-listener/target/ueb-listener-0.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[ccsdk]}/sli/northbound/dmaap-listener/target/dmaap-listener-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[ccsdk]}/sli/northbound/ueb-listener/target/ueb-listener-*-SNAPSHOT.jar
for plugin in properties-node restapi-call-node; do
for component in features provider; do
- asserts_file_exist $ccsdk_src_folder/sli/plugins/$plugin/$component/target/$plugin-$component-0.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[ccsdk]}/sli/plugins/$plugin/$component/target/$plugin-$component-*-SNAPSHOT.jar
done
- asserts_file_exist $ccsdk_src_folder/sli/plugins/$plugin/installer/target/sdnc-$plugin-0.1.0-SNAPSHOT-installer.zip
+ asserts_file_exist ${src_folders[ccsdk]}/sli/plugins/$plugin/installer/target/sdnc-$plugin-*-SNAPSHOT-installer.zip
done
}
# test_get_ccsdk_images() - Verify that CCSDK images are retrieved or built properly
function test_get_ccsdk_images {
- clone_ccsdk_repos
+ clone_repos "ccsdk"
get_ccsdk_images
for image in dgbuilder odlsli odl ubuntu; do
diff --git a/bootstrap/vagrant-onap/tests/test_dcae b/bootstrap/vagrant-onap/tests/test_dcae
index c6887cd78..3c5400fa1 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_dcae
+++ b/bootstrap/vagrant-onap/tests/test_dcae
@@ -9,82 +9,82 @@ covered_functions=(
# test_clone_all_dcae_repos() - Verifies that can retrieve DCAE repositories properly
function test_clone_all_dcae_repos {
- clone_all_dcae_repos
-
- asserts_file_exist $dcae_src_folder/apod/README.md
- asserts_file_exist $dcae_src_folder/apod/analytics/pom.xml
- asserts_file_exist $dcae_src_folder/apod/buildtools/LICENSE.txt
- asserts_file_exist $dcae_src_folder/apod/cdap/LICENSE.txt
- asserts_file_exist $dcae_src_folder/collectors/README.md
- asserts_file_exist $dcae_src_folder/collectors/ves/pom.xml
- asserts_file_exist $dcae_src_folder/controller/pom.xml
- asserts_file_exist $dcae_src_folder/controller/analytics/pom.xml
- asserts_file_exist $dcae_src_folder/dcae-inventory/pom.xml
- asserts_file_exist $dcae_src_folder/demo/pom.xml
- asserts_file_exist $dcae_src_folder/demo/startup/README.md
- asserts_file_exist $dcae_src_folder/demo/startup/aaf/LICENSE.txt
- asserts_file_exist $dcae_src_folder/demo/startup/controller/LICENSE.txt
- asserts_file_exist $dcae_src_folder/demo/startup/message-router/LICENSE.txt
- asserts_file_exist $dcae_src_folder/dmaapbc/pom.xml
- asserts_file_exist $dcae_src_folder/operation/README.md
- asserts_file_exist $dcae_src_folder/operation/utils/pom.xml
- asserts_file_exist $dcae_src_folder/orch-dispatcher/LICENSE.txt
- asserts_file_exist $dcae_src_folder/pgaas/LICENSE.txt
- asserts_file_exist $dcae_src_folder/utils/README.md
- asserts_file_exist $dcae_src_folder/utils/buildtools/LICENSE.txt
+ clone_repos "dcae"
+
+ asserts_file_exist ${src_folders[dcae]}/apod/README.md
+ asserts_file_exist ${src_folders[dcae]}/apod/analytics/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/apod/buildtools/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/apod/cdap/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/collectors/README.md
+ asserts_file_exist ${src_folders[dcae]}/collectors/ves/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/controller/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/controller/analytics/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/dcae-inventory/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/demo/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/demo/startup/README.md
+ asserts_file_exist ${src_folders[dcae]}/demo/startup/aaf/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/demo/startup/controller/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/demo/startup/message-router/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/dmaapbc/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/operation/README.md
+ asserts_file_exist ${src_folders[dcae]}/operation/utils/pom.xml
+ asserts_file_exist ${src_folders[dcae]}/orch-dispatcher/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/pgaas/LICENSE.txt
+ asserts_file_exist ${src_folders[dcae]}/utils/README.md
+ asserts_file_exist ${src_folders[dcae]}/utils/buildtools/LICENSE.txt
}
# test_compile_all_dcae_repos() - Verify that the DCAE source code is compiled properly
function test_compile_all_dcae_repos {
- clone_all_dcae_repos
- compile_all_dcae_repos
+ clone_repos "dcae"
+ compile_repos "dcae"
- asserts_file_exist $dcae_src_folder/collectors/ves/target/VESCollector-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/collectors/ves/target/VESCollector-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/analytics/dcae-analytics-cdap-common-model/target/dcae-analytics-cdap-common-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/analytics/dcae-analytics-cdap-common-model/target/dcae-analytics-cdap-common-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-core/dcae-controller-core-model/target/dcae-controller-core-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-core/dcae-controller-core-utils/target/dcae-controller-core-utils-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-core/dcae-controller-operation-utils/target/dcae-controller-operation-utils-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-core/dcae-controller-platform-model/target/dcae-controller-platform-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-core/dcae-controller-platform-server/target/dcae-controller-platform-server-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-core/dcae-controller-core-model/target/dcae-controller-core-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-core/dcae-controller-core-utils/target/dcae-controller-core-utils-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-core/dcae-controller-operation-utils/target/dcae-controller-operation-utils-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-core/dcae-controller-platform-model/target/dcae-controller-platform-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-core/dcae-controller-platform-server/target/dcae-controller-platform-server-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service/dcae-controller-service-dmaap-drsub/dcae-controller-service-dmaap-drsub-manager/target/dcae-controller-service-dmaap-drsub-manager-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service/dcae-controller-service-dmaap-drsub/dcae-controller-service-dmaap-drsub-model/target/dcae-controller-service-dmaap-drsub-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service/dcae-controller-service-standardeventcollector/dcae-controller-service-standardeventcollector-manager/target/dcae-controller-service-standardeventcollector-manager-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service/dcae-controller-service-standardeventcollector/dcae-controller-service-standardeventcollector-model/target/dcae-controller-service-standardeventcollector-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service/dcae-controller-service-storage-postgres-model/target/dcae-controller-service-storage-postgres-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service/dcae-controller-service-dmaap-drsub/dcae-controller-service-dmaap-drsub-manager/target/dcae-controller-service-dmaap-drsub-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service/dcae-controller-service-dmaap-drsub/dcae-controller-service-dmaap-drsub-model/target/dcae-controller-service-dmaap-drsub-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service/dcae-controller-service-standardeventcollector/dcae-controller-service-standardeventcollector-manager/target/dcae-controller-service-standardeventcollector-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service/dcae-controller-service-standardeventcollector/dcae-controller-service-standardeventcollector-model/target/dcae-controller-service-standardeventcollector-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service/dcae-controller-service-storage-postgres-model/target/dcae-controller-service-storage-postgres-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-adaptor/target/dcae-controller-service-cdap-adaptor-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-cluster/dcae-controller-service-cdap-cluster-manager/target/dcae-controller-service-cdap-cluster-manager-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-cluster/dcae-controller-service-cdap-cluster-model/target/dcae-controller-service-cdap-cluster-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-model/target/dcae-controller-service-cdap-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-adaptor/target/dcae-controller-service-cdap-adaptor-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-cluster/dcae-controller-service-cdap-cluster-manager/target/dcae-controller-service-cdap-cluster-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-cluster/dcae-controller-service-cdap-cluster-model/target/dcae-controller-service-cdap-cluster-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-cdap/dcae-controller-service-cdap-model/target/dcae-controller-service-cdap-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-docker/dcae-controller-service-common-docker/dcae-controller-service-common-docker-manager/target/dcae-controller-service-common-docker-manager-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-docker/dcae-controller-service-common-docker/dcae-controller-service-common-docker-model/target/dcae-controller-service-common-docker-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-docker/dcae-controller-service-docker-adaptor/target/dcae-controller-service-docker-adaptor-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-docker/dcae-controller-service-docker-host/dcae-controller-service-docker-host-manager/target/dcae-controller-service-docker-host-manager-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-docker/dcae-controller-service-docker-host/dcae-controller-service-docker-host-model/target/dcae-controller-service-docker-host-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-docker/dcae-controller-service-docker-model/target/dcae-controller-service-docker-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-common-docker/dcae-controller-service-common-docker-manager/target/dcae-controller-service-common-docker-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-common-docker/dcae-controller-service-common-docker-model/target/dcae-controller-service-common-docker-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-docker-adaptor/target/dcae-controller-service-docker-adaptor-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-docker-host/dcae-controller-service-docker-host-manager/target/dcae-controller-service-docker-host-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-docker-host/dcae-controller-service-docker-host-model/target/dcae-controller-service-docker-host-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-docker/dcae-controller-service-docker-model/target/dcae-controller-service-docker-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-vm/dcae-controller-service-common-vm/dcae-controller-service-common-vm-manager/target/dcae-controller-service-common-vm-manager-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-vm/dcae-controller-service-common-vm/dcae-controller-service-common-vm-model/target/dcae-controller-service-common-vm-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-vm/dcae-controller-service-vm-adaptor/target/dcae-controller-service-vm-adaptor-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/controller/dcae-controller-service-vm/dcae-controller-service-vm-model/target/dcae-controller-service-vm-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-vm/dcae-controller-service-common-vm/dcae-controller-service-common-vm-manager/target/dcae-controller-service-common-vm-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-vm/dcae-controller-service-common-vm/dcae-controller-service-common-vm-model/target/dcae-controller-service-common-vm-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-vm/dcae-controller-service-vm-adaptor/target/dcae-controller-service-vm-adaptor-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/controller/dcae-controller-service-vm/dcae-controller-service-vm-model/target/dcae-controller-service-vm-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $dcae_src_folder/dcae-inventory/target/dcae-inventory-1.1.0.jar
+ asserts_file_exist ${src_folders[dcae]}/dcae-inventory/target/dcae-inventory-1.1.0.jar
- asserts_file_exist $dcae_src_folder/demo/dcae-demo-controller/target/dcae-demo-controller-1.1.0-SNAPSHOT-runtime.zip
+ asserts_file_exist ${src_folders[dcae]}/demo/dcae-demo-controller/target/dcae-demo-controller-1.1.0-SNAPSHOT-runtime.zip
- asserts_file_exist $dcae_src_folder/dmaapbc/target/dcae_dmaapbc.jar
+ asserts_file_exist ${src_folders[dcae]}/dmaapbc/target/dcae_dmaapbc.jar
- asserts_file_exist $dcae_src_folder/operation/utils/operation-utils/target/operation-utils-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[dcae]}/operation/utils/operation-utils/target/operation-utils-1.1.0-SNAPSHOT.jar
}
# test_get_dcae_images() - Function that verifies DCAE images are retrieved properly
function test_get_dcae_images {
- clone_all_dcae_repos
- compile_all_dcae_repos
+ clone_repos "dcae"
+ compile_repos "dcae"
get_dcae_images
asserts_image openecomp/dcae-dmaapbc
@@ -95,7 +95,7 @@ function test_get_dcae_images {
# test_install_dcae() - Function that verifies that DCAE services are up and running
function test_install_dcae {
- clone_all_dcae_repos
+ clone_repos "dcae"
compile_all_dcae_repos
get_dcae_images
install_dcae
diff --git a/bootstrap/vagrant-onap/tests/test_functions b/bootstrap/vagrant-onap/tests/test_functions
index 3ab5828cb..9411b46f4 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_functions
+++ b/bootstrap/vagrant-onap/tests/test_functions
@@ -4,7 +4,7 @@ source /var/onap_tests/_test_base
source /var/onap/functions
covered_functions=(
-"create_configuration_files" "clone_repo" "install_dev_tools"
+"create_configuration_files" "clone_repo"
"configure_bind" "install_java" "install_maven" "install_nodejs" "install_python"
"install_docker" "pull_docker_image" "install_docker_compose" "configure_service"
"start_ODL" "compile_src" "build_docker_image" "docker_openecomp_login"
@@ -55,15 +55,6 @@ function test_clone_repo {
asserts_file_exist $git_src_folder/demo/LICENSE.TXT
}
-# test_install_dev_tools() - Verify the correct installation of developer tools
-function test_install_dev_tools {
- install_dev_tools
-
- asserts_installed_package apt-transport-https
- asserts_installed_package ca-certificates
- asserts_installed_package curl
-}
-
# test_configure_bind() - Verify the correct installation and configuration of bind
function test_configure_bind {
configure_bind
@@ -167,18 +158,12 @@ function test_build_docker_image {
# test_coverity_repos() - Verify that all the repos are covered by scripts
function test_coverity_repos {
- # Get repositories list from source code files
- repos_list=()
- for module in $( stat -c "%F %n" /var/onap/* | grep -v "_\|functions\|commons\|files" | cut -d' ' -f 3-); do
- source $module
- module_repo_list=$(eval echo \${$(basename $module)_repos[@]})
- repos_list=(${repos_list[@]} ${module_repo_list[@]})
- done
-
pushd /var/onap_tests/
cp projects.txt remaining_projects.txt
- for covered_repo in "${repos_list[@]}"; do
- sed -i '/^'${covered_repo//\//\\/}'$/d' remaining_projects.txt
+ for project in "${repos[@]}"; do
+ for covered_repo in $project; do
+ sed -i '/^'${covered_repo//\//\\/}'$/d' remaining_projects.txt
+ done
done
threshold=75
diff --git a/bootstrap/vagrant-onap/tests/test_mr b/bootstrap/vagrant-onap/tests/test_mr
index a863b99ba..ad1bcd2f5 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_mr
+++ b/bootstrap/vagrant-onap/tests/test_mr
@@ -9,9 +9,9 @@ covered_functions=(
# test_clone_mr_repos() - Verify that Message Router repositories are cloned properly
function test_clone_mr_repos {
- clone_mr_repos
+ clone_repo dcae/demo/startup/message-router ${src_folders[mr]}
- asserts_file_exist $mr_src_folder/deploy.sh
+ asserts_file_exist ${src_folders[mr]}/deploy.sh
}
# test_get_mr_images() - Verify that Message Router Docker images are retrieved
@@ -24,7 +24,7 @@ function test_get_mr_images {
# test_install_message_router() - Verify the built and start of Message Router services
function test_install_message_router {
- clone_mr_repos
+ clone_repo dcae/demo/startup/message-router ${src_folders[mr]}
get_mr_images
install_message_router
diff --git a/bootstrap/vagrant-onap/tests/test_msb b/bootstrap/vagrant-onap/tests/test_msb
new file mode 100755
index 000000000..0848d333b
--- /dev/null
+++ b/bootstrap/vagrant-onap/tests/test_msb
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+source /var/onap_tests/_test_base
+source /var/onap/msb
+
+covered_functions=(
+"clone_all_msb_repos" "compile_all_msb_repos" "get_msb_images" "install_msb"
+)
+
+# test_clone_all_msb_repos() - Verify the source code retrieve of Microservice Bus project
+function test_clone_all_msb_repos {
+ clone_repos "msb"
+
+ asserts_file_exist ${src_folders[msb]}/apigateway/pom.xml
+ asserts_file_exist ${src_folders[msb]}/discovery/pom.xml
+ asserts_file_exist ${src_folders[msb]}/java-sdk/pom.xml
+ asserts_file_exist ${src_folders[msb]}/swagger-sdk/pom.xml
+}
+
+# test_compile_all_msb_repos() - Verify the correct compilation of MSB project
+function test_compile_all_msb_repos {
+ clone_repos "msb"
+ compile_repos "msb"
+
+ asserts_file_exist ${src_folders[msb]}/apigateway/apiroute/apiroute-service/target/original-apiroute-service-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[msb]}/apigateway/apiroute/apiroute-standalone/target/apiroute-1.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[msb]}/discovery/discovery-ui/target/discovery-ui-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[msb]}/discovery/sdclient/discovery-service/target/original-discovery-service-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[msb]}/java-sdk/target/msb-java-sdk-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[msb]}/swagger-sdk/target/swagger-sdk-1.1.0-SNAPSHOT.jar
+}
+
+# test_get_msb_images() - Verify the creation of MSB Docker images
+function test_get_msb_images {
+ clone_repos "msb"
+ get_msb_images
+
+ for image in base apigateway discovery; do
+ asserts_image $nexus_docker_repo/onap/msb/msb_$image
+ done
+}
+
+# test_install_msb - Verify the execution of MSB Docker images
+function test_install_msb {
+ clone_repos "msb"
+ get_msb_images
+ install_msb
+
+ for image in apigateway discovery; do
+ asserts_image_running $nexus_docker_repo/onap/msb/msb_$image
+ done
+
+ asserts_http_status_code "http://127.0.0.1:10081/api/microservices/v1/services"
+ asserts_http_status_code "http://127.0.0.1/api/aai/v8/cloud-infrastructure/cloud-regions"
+}
+
+if [ "$1" != '*' ]; then
+ unset covered_functions
+ covered_functions=$1
+fi
+main "${covered_functions[@]}"
diff --git a/bootstrap/vagrant-onap/tests/test_mso b/bootstrap/vagrant-onap/tests/test_mso
index deea7bbf8..6a6bef772 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_mso
+++ b/bootstrap/vagrant-onap/tests/test_mso
@@ -9,40 +9,40 @@ covered_functions=(
# test_clone_all_mso_repos() - Verify the source code retrieve of MSO project
function test_clone_all_mso_repos {
- clone_all_mso_repos
+ clone_repos "mso"
- asserts_file_exist $src_folder/pom.xml
- asserts_file_exist $src_folder/chef-repo/LICENSE.txt
- asserts_file_exist $src_folder/docker-config/LICENSE.txt
- asserts_file_exist $src_folder/libs/pom.xml
- asserts_file_exist $src_folder/mso-config/LICENSE.txt
+ asserts_file_exist ${src_folders[mso]}/pom.xml
+ asserts_file_exist ${src_folders[mso]}/chef-repo/LICENSE.txt
+ asserts_file_exist ${src_folders[mso]}/docker-config/LICENSE.txt
+ asserts_file_exist ${src_folders[mso]}/libs/pom.xml
+ asserts_file_exist ${src_folders[mso]}/mso-config/LICENSE.txt
}
# test_compile_all_mso_repos() - Verify the correct compilation of MSO projects
function test_compile_all_mso_repos {
- clone_all_mso_repos
- compile_all_mso_repos
+ clone_repos "mso"
+ compile_repos "mso"
- asserts_file_exist $src_folder/libs/ceilometer-client/target/ceilometer-client-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/ceilometer-model/target/ceilometer-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/glance-client/target/glance-client-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/glance-model/target/glance-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/heat-client/target/heat-client-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/heat-model/target/heat-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/keystone-client/target/keystone-client-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/keystone-model/target/keystone-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/nova-client/target/nova-client-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/nova-model/target/nova-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/openstack-client/target/openstack-client-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/quantum-client/target/quantum-client-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/quantum-model/target/quantum-model-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/swift-client/target/swift-client-1.1.0-SNAPSHOT.jar
- asserts_file_exist $src_folder/libs/swift-model/target/swift-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/ceilometer-client/target/ceilometer-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/ceilometer-model/target/ceilometer-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/glance-client/target/glance-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/glance-model/target/glance-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/heat-client/target/heat-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/heat-model/target/heat-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/keystone-client/target/keystone-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/keystone-model/target/keystone-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/nova-client/target/nova-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/nova-model/target/nova-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/openstack-client/target/openstack-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/quantum-client/target/quantum-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/quantum-model/target/quantum-model-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/swift-client/target/swift-client-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[mso]}/libs/swift-model/target/swift-model-1.1.0-SNAPSHOT.jar
}
# test_get_mso_images() - Verify the creation of MSO Docker images
function test_get_mso_images {
- clone_all_mso_repos
+ clone_repos "mso"
install_mso
for image in mso mso-arquillian wildfly ubuntu-update jacoco; do
@@ -52,7 +52,7 @@ function test_get_mso_images {
# test_install_mso - Verify the execution of MSO Docker images
function test_install_mso {
- clone_all_mso_repos
+ clone_repos "mso"
install_mso
install_mso_docker_config
diff --git a/bootstrap/vagrant-onap/tests/test_multicloud b/bootstrap/vagrant-onap/tests/test_multicloud
index b0b674894..1b5b85de6 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_multicloud
+++ b/bootstrap/vagrant-onap/tests/test_multicloud
@@ -9,37 +9,42 @@ covered_functions=(
# test_clone_multicloud_repos() - Verify that Multi Cloud repositories are cloned properly
function test_clone_multicloud_repos {
- clone_multicloud_repos
-
- #asserts_file_exist $multicloud_src_folder/
- asserts_file_exist $multicloud_src_folder/framework/pom.xml
- asserts_file_exist $multicloud_src_folder/openstack/pom.xml
- asserts_file_exist $multicloud_src_folder/openstack/vmware/pom.xml
- asserts_file_exist $multicloud_src_folder/openstack/windriver/pom.xml
- #asserts_file_exist $multicloud_src_folder/azure/
+ clone_repos "multicloud"
+
+ #asserts_file_exist ${src_folders[multicloud]}/
+ asserts_file_exist ${src_folders[multicloud]}/framework/pom.xml
+ asserts_file_exist ${src_folders[multicloud]}/openstack/pom.xml
+ asserts_file_exist ${src_folders[multicloud]}/openstack/vmware/pom.xml
+ asserts_file_exist ${src_folders[multicloud]}/openstack/windriver/pom.xml
+ #asserts_file_exist ${src_folders[multicloud]}/azure/
}
# test_compile_multicloud_repos() -
function test_compile_multicloud_repos {
- clone_multicloud_repos
- compile_multicloud_repos
+ clone_repos "multicloud"
+ compile_repos "multicloud"
- asserts_file_exist $multicloud_src_folder/openstack/newton/target/multicloud-openstack-newton-1.0.0-SNAPSHOT.zip
- asserts_file_exist $multicloud_src_folder/openstack/ocata/target/multicloud-openstack-ocata-1.0.0-SNAPSHOT.zip
- asserts_file_exist $multicloud_src_folder/openstack/windriver/target/multicloud-openstack-windriver-1.0.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[multicloud]}/openstack/newton/target/multicloud-openstack-newton-1.0.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[multicloud]}/openstack/ocata/target/multicloud-openstack-ocata-1.0.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[multicloud]}/openstack/windriver/target/multicloud-openstack-windriver-1.0.0-SNAPSHOT.zip
}
# test_get_multicloud_images() -
function test_get_multicloud_images {
- clone_multicloud_repos
+ clone_repos "multicloud"
get_multicloud_images
+
+ asserts_image onap/multicloud/openstack-$openstack_release
}
# test_install_multicloud() - Verify the built and start of Multi Cloud services
function test_install_multicloud {
- clone_multicloud_repos
+ clone_repos "multicloud"
get_multicloud_images
install_multicloud
+
+ # NOTE(electrocucaracha): Depends on https://gerrit.onap.org/r/#/c/23631/
+ asserts_http_status_code http://127.0.0.1:9003/api/multicloud-$openstack_release/v0/swagger.json
}
if [ "$1" != '*' ]; then
diff --git a/bootstrap/vagrant-onap/tests/test_policy b/bootstrap/vagrant-onap/tests/test_policy
index 08231fc7a..b666cf9db 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_policy
+++ b/bootstrap/vagrant-onap/tests/test_policy
@@ -12,69 +12,69 @@ covered_functions=(
# test_clone_all_policy_repos() - Verify cloning of Policy source code
function test_clone_all_policy_repos {
- clone_all_policy_repos
+ clone_repos "policy"
- asserts_file_exist $policy_src_folder/api/pom.xml
- asserts_file_exist $policy_src_folder/common/pom.xml
- asserts_file_exist $policy_src_folder/docker/pom.xml
- asserts_file_exist $policy_src_folder/drools-applications/pom.xml
- asserts_file_exist $policy_src_folder/drools-pdp/pom.xml
- asserts_file_exist $policy_src_folder/engine/pom.xml
- asserts_file_exist $policy_src_folder/gui/pom.xml
- asserts_file_exist $policy_src_folder/pap/pom.xml
- asserts_file_exist $policy_src_folder/pdp/pom.xml
+ asserts_file_exist ${src_folders[policy]}/api/pom.xml
+ asserts_file_exist ${src_folders[policy]}/common/pom.xml
+ asserts_file_exist ${src_folders[policy]}/docker/pom.xml
+ asserts_file_exist ${src_folders[policy]}/drools-applications/pom.xml
+ asserts_file_exist ${src_folders[policy]}/drools-pdp/pom.xml
+ asserts_file_exist ${src_folders[policy]}/engine/pom.xml
+ asserts_file_exist ${src_folders[policy]}/gui/pom.xml
+ asserts_file_exist ${src_folders[policy]}/pap/pom.xml
+ asserts_file_exist ${src_folders[policy]}/pdp/pom.xml
}
# test_compile_all_policy_repos() - Verify compiling of Policy source code
function test_compile_all_policy_repos {
- clone_all_policy_repos
- compile_all_policy_repos
+ clone_repos "policy"
+ compile_repos "policy"
- asserts_file_exist $policy_src_folder/common/common-logging/target/ONAP-Logging-1.1.0-SNAPSHOT.jar
- asserts_file_exist $policy_src_folder/common/integrity-audit/target/integrity-audit-1.1.0-SNAPSHOT.jar
- asserts_file_exist $policy_src_folder/common/integrity-monitor/target/integrity-monitor-1.1.0-SNAPSHOT.jar
- asserts_file_exist $policy_src_folder/common/site-manager/target/site-manager-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/common/common-logging/target/ONAP-Logging-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/common/integrity-audit/target/integrity-audit-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/common/integrity-monitor/target/integrity-monitor-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/common/site-manager/target/site-manager-*-SNAPSHOT.jar
for actor in appc appclcm so test vfc; do
- asserts_file_exist $policy_src_folder/drools-applications/controlloop/common/actors/actor.$actor/target/actor.$actor-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/actors/actor.$actor/target/actor.$actor-*-SNAPSHOT.jar
done
- asserts_file_exist $policy_src_folder/drools-applications/controlloop/common/actors/actorServiceProvider/target/actorServiceProvider-1.1.0-SNAPSHOT.jar
- asserts_file_exist $policy_src_folder/drools-applications/controlloop/common/eventmanager/target/eventmanager-1.1.0-SNAPSHOT.jar
- asserts_file_exist $policy_src_folder/drools-applications/controlloop/common/guard/target/guard-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/actors/actorServiceProvider/target/actorServiceProvider-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/eventmanager/target/eventmanager-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/guard/target/guard-*-SNAPSHOT.jar
for module in aai appc appclcm events rest sdc so trafficgenerator vfc; do
- asserts_file_exist $policy_src_folder/drools-applications/controlloop/common/model-impl/$module/target/$module-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/model-impl/$module/target/$module-*-SNAPSHOT.jar
done
- asserts_file_exist $policy_src_folder/drools-applications/controlloop/common/policy-yaml/target/policy-yaml-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/common/policy-yaml/target/policy-yaml-*-SNAPSHOT.jar
for package in apps artifacts; do
- asserts_file_exist $policy_src_folder/drools-applications/controlloop/packages/$package/target/$package-1.1.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/packages/$package/target/$package-*-SNAPSHOT.zip
done
- asserts_file_exist $policy_src_folder/drools-applications/controlloop/packages/basex/target/basex-1.1.0-SNAPSHOT.tar.gz
- asserts_file_exist $policy_src_folder/drools-applications/controlloop/templates/template.demo/target/template.demo-1.1.0-SNAPSHOT.jar
- asserts_file_exist $policy_src_folder/drools-applications/controlloop/templates/template.demo/target/template.demo-1.1.0-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/packages/basex/target/basex-*-SNAPSHOT.tar.gz
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/templates/template.demo/target/template.demo-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-applications/controlloop/templates/template.demo/target/template.demo-*-SNAPSHOT.jar
for feature in eelf healthcheck session-persistence; do
- asserts_file_exist $policy_src_folder/drools-pdp/feature-$feature/target/feature-$feature-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-pdp/feature-$feature/target/feature-$feature-*-SNAPSHOT.jar
done
- asserts_file_exist $policy_src_folder/drools-pdp/packages/base/target/base-1.1.0-SNAPSHOT.tar.gz
- asserts_file_exist $policy_src_folder/drools-pdp/packages/install/target/install-drools-1.1.0-SNAPSHOT.zip
+ #asserts_file_exist ${src_folders[policy]}/drools-pdp/packages/base/target/base-*-SNAPSHOT.tar.gz
+ #asserts_file_exist ${src_folders[policy]}/drools-pdp/packages/install/target/install-drools-*-SNAPSHOT.zip
for policy in core endpoints management utils; do
- asserts_file_exist $policy_src_folder/drools-pdp/policy-$policy/target/policy-$policy-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/drools-pdp/policy-$policy/target/policy-$policy-*-SNAPSHOT.jar
done
for engine in BRMSGateway LogParser ONAP-PDP ONAP-XACML ONAP-REST; do
- asserts_file_exist $policy_src_folder/engine/$engine/target/$engine-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/engine/$engine/target/$engine-*-SNAPSHOT.jar
done
for engine in ONAP-PAP-REST ONAP-PDP-REST ONAP-SDK-APP; do
- asserts_file_exist $policy_src_folder/engine/$engine/target/$engine-1.1.0-SNAPSHOT.war
+ asserts_file_exist ${src_folders[policy]}/engine/$engine/target/$engine-*-SNAPSHOT.war
done
- asserts_file_exist $policy_src_folder/engine/packages/base/target/base-1.1.0-SNAPSHOT.tar.gz
- asserts_file_exist $policy_src_folder/engine/packages/install/target/install-1.1.0-SNAPSHOT.zip
- asserts_file_exist $policy_src_folder/engine/POLICY-SDK-APP/target/POLICY-SDK-APP-1.1.0-SNAPSHOT.war
- asserts_file_exist $policy_src_folder/engine/PolicyEngineAPI/target/PolicyEngineAPI-1.1.0-SNAPSHOT.jar
- asserts_file_exist $policy_src_folder/engine/PolicyEngineClient/target/PolicyEngineClient-1.1.0-SNAPSHOT.jar
- asserts_file_exist $policy_src_folder/engine/PolicyEngineUtils/target/PolicyEngineUtils-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/engine/packages/base/target/base-*-SNAPSHOT.tar.gz
+ asserts_file_exist ${src_folders[policy]}/engine/packages/install/target/install-*-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[policy]}/engine/POLICY-SDK-APP/target/POLICY-SDK-APP-*-SNAPSHOT.war
+ asserts_file_exist ${src_folders[policy]}/engine/PolicyEngineAPI/target/PolicyEngineAPI-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/engine/PolicyEngineClient/target/PolicyEngineClient-*-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[policy]}/engine/PolicyEngineUtils/target/PolicyEngineUtils-*-SNAPSHOT.jar
}
# test_get_policy_images() - Verify that Policy Docker images are retrieved properly
function test_get_policy_images {
- clone_all_policy_repos
+ clone_repos "policy"
get_policy_images
for image in os nexus db base drools pe; do
@@ -84,7 +84,7 @@ function test_get_policy_images {
# test_install_policy() - Verify that Policy services are started properly
function test_install_policy {
- clone_all_policy_repos
+ clone_repos "policy"
get_policy_images
install_policy
diff --git a/bootstrap/vagrant-onap/tests/test_portal b/bootstrap/vagrant-onap/tests/test_portal
index ebfd21de8..d20f173ce 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_portal
+++ b/bootstrap/vagrant-onap/tests/test_portal
@@ -14,10 +14,10 @@ covered_functions=(
function test_clone_all_portal_repos {
clone_all_portal_repos
- asserts_file_exist $portal_src_folder/pom.xml
- asserts_file_exist $portal_src_folder/ecompsdkos/LICENSE.txt
- asserts_file_exist $portal_src_folder/dmaapbc/pom.xml
- asserts_file_exist $portal_src_folder/sdk/LICENSE.txt
+ asserts_file_exist ${src_folders[portal]}/pom.xml
+ asserts_file_exist ${src_folders[portal]}/ecompsdkos/LICENSE.txt
+ asserts_file_exist ${src_folders[portal]}/dmaapbc/pom.xml
+ asserts_file_exist ${src_folders[portal]}/sdk/LICENSE.txt
}
# test_compile_all_portal_repos() - Verify compiling of Portal source code
@@ -25,9 +25,9 @@ function test_compile_all_portal_repos {
clone_all_portal_repos
compile_all_portal_repos
- asserts_file_exist $portal_src_folder/ecomp-portal-BE-common/target/ecompportal-be-common.war
- asserts_file_exist $portal_src_folder/ecomp-portal-BE-common-test/target/ecomp-portal-BE-common-test.jar
- asserts_file_exist $portal_src_folder/ecomp-portal-BE-os/target/ecompportal-be-os.war
+ asserts_file_exist ${src_folders[portal]}/ecomp-portal-BE-common/target/ecompportal-be-common.war
+ asserts_file_exist ${src_folders[portal]}/ecomp-portal-BE-common-test/target/ecomp-portal-BE-common-test.jar
+ asserts_file_exist ${src_folders[portal]}/ecomp-portal-BE-os/target/ecompportal-be-os.war
}
# test_get_portal_images() - Verify that Portal Docker images are retrieved or built properly
diff --git a/bootstrap/vagrant-onap/tests/test_robot b/bootstrap/vagrant-onap/tests/test_robot
index 702401e2a..b96a08848 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_robot
+++ b/bootstrap/vagrant-onap/tests/test_robot
@@ -9,36 +9,36 @@ covered_functions=(
# test_clone_robot_repos() - Verify that Robot repositories are cloned properly
function test_clone_robot_repos {
- clone_robot_repos
+ clone_repos "robot" "testsuite"
- asserts_file_exist $robot_src_folder/LICENSE.TXT
- asserts_file_exist $robot_src_folder/heatbridge/pom.xml
- asserts_file_exist $robot_src_folder/properties/LICENSE.TXT
- asserts_file_exist $robot_src_folder/python-testing-utils/LICENSE.TXT
+ asserts_file_exist ${src_folders[robot]}/LICENSE.TXT
+ asserts_file_exist ${src_folders[robot]}/heatbridge/pom.xml
+ asserts_file_exist ${src_folders[robot]}/properties/LICENSE.TXT
+ asserts_file_exist ${src_folders[robot]}/python-testing-utils/LICENSE.TXT
}
# test_compile_robot_repos() - Verify that Robot source code can be compiled properly
function test_compile_robot_repos {
- clone_robot_repos
- compile_robot_repos
+ clone_repos "robot" "testsuite"
+ compile_repos "robot"
- asserts_file_exist $robot_src_folder/heatbridge/target/maven-python/dist/heatbridge-0.3.0.dev0-py2-none-any.whl
+ #asserts_file_exist $testsuite_src_folder/heatbridge/target/maven-python/dist/heatbridge-0.3.0.dev0-py2-none-any.whl
}
# test_get_robot_images() - Verify that Robot Docker images are retrieved
-function test_get_mr_images {
+function test_get_robot_images {
get_robot_images
- asserts_image testsuite
+ asserts_image $nexus_docker_repo/openecomp/testsuite
}
# test_install_robot() - Verify the built and start of Robot services
-function test_install_message_router {
- clone_robot_repos
+function test_install_robot {
+ clone_repos "robot" "testsuite"
get_robot_images
install_robot
- asserts_image_running testsuite
+ asserts_image_running $nexus_docker_repo/openecomp/testsuite
}
if [ "$1" != '*' ]; then
diff --git a/bootstrap/vagrant-onap/tests/test_sdc b/bootstrap/vagrant-onap/tests/test_sdc
index 9b6f5a5a5..67657803e 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_sdc
+++ b/bootstrap/vagrant-onap/tests/test_sdc
@@ -9,40 +9,41 @@ covered_functions=(
# test_clone_all_sdc_repos() - Verifies the retrieval of SDC source code repos
function test_clone_all_sdc_repos {
- clone_all_sdc_repos
+ clone_repos "sdc"
- asserts_file_exist $sdc_src_folder/pom.xml
- asserts_file_exist $sdc_src_folder/sdc-os-chef/pom.xml
- asserts_file_exist $sdc_src_folder/jtosca/pom.xml
- asserts_file_exist $sdc_src_folder/sdc-distribution-client/pom.xml
- asserts_file_exist $sdc_src_folder/sdc-titan-cassandra/pom.xml
- asserts_file_exist $sdc_src_folder/sdc-tosca/pom.xml
- asserts_file_exist $sdc_src_folder/sdc_common/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/sdc-os-chef/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/jtosca/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/sdc-distribution-client/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/sdc-titan-cassandra/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/sdc-tosca/pom.xml
+ asserts_file_exist ${src_folders[sdc]}/sdc_common/pom.xml
}
# test_compile_all_sdc_repos() - Verifies the correct compilation of SDC repositories
function test_compile_all_sdc_repos {
- clone_all_sdc_repos
- compile_all_sdc_repos
+ clone_repos "sdc"
+ compile_repos "sdc"
- asserts_file_exist $sdc_src_folder/jtosca/target/jtosca-1.1.10-SNAPSHOT.jar
- asserts_file_exist $sdc_src_folder/sdc-distribution-client/sdc-distribution-ci/target/sdc-distribution-ci-1.1.*-SNAPSHOT.jar
- asserts_file_exist $sdc_src_folder/sdc-distribution-client/sdc-distribution-client/target/sdc-distribution-client-1.1.*-SNAPSHOT.jar
- asserts_file_exist $sdc_src_folder/sdc-titan-cassandra/target/jamm-0.3.0.jar
- asserts_file_exist $sdc_src_folder/sdc-tosca/target/sdc-tosca-1.1.*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[sdc]}/jtosca/target/jtosca-1.1.10-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[sdc]}/sdc-distribution-client/sdc-distribution-ci/target/sdc-distribution-ci-1.1.*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[sdc]}/sdc-distribution-client/sdc-distribution-client/target/sdc-distribution-client-1.1.*-SNAPSHOT.jar
+ #asserts_file_exist ${src_folders[sdc]}/sdc-titan-cassandra/target/jamm-0.3.0.jar
+ #asserts_file_exist ${src_folders[sdc]}/sdc-tosca/target/sdc-tosca-1.1.*-SNAPSHOT.jar
- for dirc in logging sdc-artifact-generator; do
+ #for dirc in logging sdc-artifact-generator; do
+ for dirc in logging; do
name="openecomp-$dirc"
for module in api core; do
fullname="$name-$module"
- asserts_file_exist $sdc_src_folder/sdc_common/$name-lib/$fullname/target/$fullname-1.1.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[sdc]}/sdc_common/$name-lib/$fullname/target/$fullname-1.1.0-SNAPSHOT.jar
done
done
}
# test_get_sdc_images() - Verifies the correct retrieval of SDC Docker images
function test_get_sdc_images {
- clone_all_sdc_repos
+ clone_repos "sdc"
get_sdc_images
for image in sanity elasticsearch cassandra kibana frontend backend; do
@@ -52,7 +53,7 @@ function test_get_sdc_images {
# test_install_sdc() - Verifies that SDC services are up and running
function test_install_sdc {
- clone_all_sdc_repos
+ clone_repos "sdc"
get_sdc_images
install_sdc
diff --git a/bootstrap/vagrant-onap/tests/test_sdnc b/bootstrap/vagrant-onap/tests/test_sdnc
index 7b54749dd..c4ddc56cd 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_sdnc
+++ b/bootstrap/vagrant-onap/tests/test_sdnc
@@ -9,39 +9,39 @@ covered_functions=(
# test_clone_all_sdnc_repos() - Verify the source code retrieve of SDNC project
function test_clone_all_sdnc_repos {
- clone_all_sdnc_repos
+ clone_repos "sdnc"
- asserts_file_exist $sdnc_src_folder/adaptors/pom.xml
- asserts_file_exist $sdnc_src_folder/architecture/docs/index.rst
- asserts_file_exist $sdnc_src_folder/core/pom.xml
- asserts_file_exist $sdnc_src_folder/features/docs/index.rst
- asserts_file_exist $sdnc_src_folder/northbound/pom.xml
- asserts_file_exist $sdnc_src_folder/oam/pom.xml
- asserts_file_exist $sdnc_src_folder/parent/docs/index.rst
- asserts_file_exist $sdnc_src_folder/plugins/pom.xml
+ asserts_file_exist ${src_folders[sdnc]}/adaptors/pom.xml
+ #asserts_file_exist ${src_folders[sdnc]}/architecture/docs/index.rst
+ asserts_file_exist ${src_folders[sdnc]}/core/pom.xml
+ #asserts_file_exist ${src_folders[sdnc]}/features/docs/index.rst
+ asserts_file_exist ${src_folders[sdnc]}/northbound/pom.xml
+ asserts_file_exist ${src_folders[sdnc]}/oam/pom.xml
+ #asserts_file_exist ${src_folders[sdnc]}/parent/docs/index.rst
+ asserts_file_exist ${src_folders[sdnc]}/plugins/pom.xml
}
# test_compile_all_sdnc_repos() - Verify the correct compilation of SDNC projects
function test_compile_all_sdnc_repos {
- clone_all_sdnc_repos
+ clone_repos "sdnc"
compile_all_sdnc_repos
for component in generic-resource-api vnfapi vnftools; do
if [[ "$component" == "vnfapi" ]]; then
- asserts_file_exist $sdnc_src_folder/northbound/vnfapi/model/target/vnfapi-model-1.2.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[sdnc]}/northbound/vnfapi/model/target/vnfapi-model-1.2.0-SNAPSHOT.jar
fi
- asserts_file_exist $sdnc_src_folder/northbound/$component/installer/target/sdnc-$component-1.2.0-SNAPSHOT-installer.zip
- asserts_file_exist $sdnc_src_folder/northbound/$component/features/target/$component-features-1.2.0-SNAPSHOT.jar
- asserts_file_exist $sdnc_src_folder/northbound/$component/provider/target/$component-provider-1.2.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[sdnc]}/northbound/$component/installer/target/sdnc-$component-1.2.0-SNAPSHOT-installer.zip
+ asserts_file_exist ${src_folders[sdnc]}/northbound/$component/features/target/$component-features-1.2.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[sdnc]}/northbound/$component/provider/target/$component-provider-1.2.0-SNAPSHOT.jar
done
- asserts_file_exist $sdnc_src_folder/oam/admportal/target/admportal.*-SNAPSHOT.zip
- asserts_file_exist $sdnc_src_folder/oam/dgbuilder/target/dgbuilder.*-SNAPSHOT.zip
- asserts_file_exist $sdnc_src_folder/oam/platform-logic/installer/target/platform-logic-installer-*-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[sdnc]}/oam/admportal/target/admportal.*-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[sdnc]}/oam/dgbuilder/target/dgbuilder.*-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[sdnc]}/oam/platform-logic/installer/target/platform-logic-installer-*-SNAPSHOT.zip
}
# test_get_sdnc_images() - Verify that the SDNC images are created or retrieved
function test_get_sdnc_images {
- clone_all_sdnc_repos
+ clone_repos "sdnc"
get_sdnc_images
asserts_image onap/sdnc-image
@@ -52,7 +52,7 @@ function test_get_sdnc_images {
# test_install_sdnc() - Verify that the SDNC Docker containers are up and running
function test_install_sdnc {
- clone_all_sdnc_repos
+ clone_repos "sdnc"
get_sdnc_images
install_sdnc
diff --git a/bootstrap/vagrant-onap/tests/test_vfc b/bootstrap/vagrant-onap/tests/test_vfc
index 53d9c0d5e..3fadeafad 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_vfc
+++ b/bootstrap/vagrant-onap/tests/test_vfc
@@ -9,28 +9,28 @@ covered_functions=(
# test_clone_all_vfc_repos() - Verify cloning and pulling source code from repositories
function test_clone_all_vfc_repos {
- clone_all_vfc_repos
+ clone_repos "vfc"
- #asserts_file_exist $vfc_src_folder/pom.xml
- #asserts_file_exist $vfc_src_folder/gvnfm
- asserts_file_exist $vfc_src_folder/gvnfm/vnflcm/pom.xml
- asserts_file_exist $vfc_src_folder/gvnfm/vnfmgr/pom.xml
- asserts_file_exist $vfc_src_folder/gvnfm/vnfres/pom.xml
- #asserts_file_exist $vfc_src_folder/nfvo
- asserts_file_exist $vfc_src_folder/nfvo/catalog/run.sh
- #asserts_file_exist $vfc_src_folder/nfvo/driver
- asserts_file_exist $vfc_src_folder/nfvo/driver/ems/pom.xml
- asserts_file_exist $vfc_src_folder/nfvo/driver/sfc/pom.xml
- #asserts_file_exist $vfc_src_folder/nfvo/driver/vnfm
- asserts_file_exist $vfc_src_folder/nfvo/driver/vnfm/gvnfm/pom.xml
- asserts_file_exist $vfc_src_folder/nfvo/driver/vnfm/svnfm/pom.xml
- asserts_file_exist $vfc_src_folder/nfvo/lcm/pom.xml
- asserts_file_exist $vfc_src_folder/nfvo/wfengine/wso2/pom.xml
+ #asserts_file_exist ${src_folders[vfc]}/pom.xml
+ #asserts_file_exist ${src_folders[vfc]}/gvnfm
+ asserts_file_exist ${src_folders[vfc]}/gvnfm/vnflcm/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/gvnfm/vnfmgr/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/gvnfm/vnfres/pom.xml
+ #asserts_file_exist ${src_folders[vfc]}/nfvo
+ asserts_file_exist ${src_folders[vfc]}/nfvo/catalog/run.sh
+ #asserts_file_exist ${src_folders[vfc]}/nfvo/driver
+ asserts_file_exist ${src_folders[vfc]}/nfvo/driver/ems/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/nfvo/driver/sfc/pom.xml
+ #asserts_file_exist ${src_folders[vfc]}/nfvo/driver/vnfm
+ asserts_file_exist ${src_folders[vfc]}/nfvo/driver/vnfm/gvnfm/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/nfvo/driver/vnfm/svnfm/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/nfvo/lcm/pom.xml
+ asserts_file_exist ${src_folders[vfc]}/nfvo/wfengine/wso2/pom.xml
}
# test_compile_all_vfc_repos() - Verify that all the VFC modules are compiled properly
function test_compile_all_vfc_repos {
- clone_all_vfc_repos
+ clone_repos "vfc"
compile_all_vfc_repos
# TODO(electrocucaracha): Add asserts_file_exist
@@ -38,7 +38,7 @@ function test_compile_all_vfc_repos {
# test_get_vfc_images() - Verify all VFC images are built correctly.
function test_get_vfc_images {
- clone_all_vfc_repos
+ clone_repos "vfc"
get_vfc_images
asserts_image onap/gvnfmdriver
@@ -50,7 +50,7 @@ function test_get_vfc_images {
# test_install_vfc() - Verify that the VFC are up and running
function test_install_vfc {
- clone_all_vfc_repos
+ clone_repos "vfc"
get_vfc_images
install_vfc
diff --git a/bootstrap/vagrant-onap/tests/test_vid b/bootstrap/vagrant-onap/tests/test_vid
index 731d005a3..79a415242 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_vid
+++ b/bootstrap/vagrant-onap/tests/test_vid
@@ -9,25 +9,25 @@ covered_functions=(
# test_clone_all_vid_repos() - Verifies that VID repositories are retrieved properly
function test_clone_all_vid_repos {
- clone_all_vid_repos
+ clone_repos "vid"
- asserts_file_exist $vid_src_folder/pom.xml
- asserts_file_exist $vid_src_folder/asdcclient/pom.xml
+ asserts_file_exist ${src_folders[vid]}/pom.xml
+ asserts_file_exist ${src_folders[vid]}/asdcclient/pom.xml
}
# test_compile_all_vid_repos() - Verifies that VID source code is compiled properly
function test_compile_all_vid_repos {
- clone_all_vid_repos
- compile_all_vid_repos
+ clone_repos "vid"
+ compile_repos "vid"
- asserts_file_exist $vid_src_folder/asdcclient/target/asdcclient-1.0.2-SNAPSHOT.jar
- asserts_file_exist $vid_src_folder/epsdk-app-onap/target/vid.war
- asserts_file_exist $vid_src_folder/vid-app-common/target/vid-common.war
+ asserts_file_exist ${src_folders[vid]}/asdcclient/target/asdcclient-1.0.2-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[vid]}/epsdk-app-onap/target/vid.war
+ asserts_file_exist ${src_folders[vid]}/vid-app-common/target/vid-common.war
}
# test_get_vid_images() - Verifies that VID Docker images are built properly
function test_get_vid_images {
- clone_all_vid_repos
+ clone_repos "vid"
get_vid_images
asserts_image openecomp/vid
@@ -36,7 +36,7 @@ function test_get_vid_images {
# test_install_vid() - Verifies taht VID services are up and running
function test_install_vid {
- clone_all_vid_repos
+ clone_repos "vid"
get_vid_images
install_vid
diff --git a/bootstrap/vagrant-onap/tests/test_vnfsdk b/bootstrap/vagrant-onap/tests/test_vnfsdk
index 250a9fcc5..cd29a9733 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_vnfsdk
+++ b/bootstrap/vagrant-onap/tests/test_vnfsdk
@@ -9,31 +9,31 @@ covered_functions=(
# test_clone_all_vnfsdk_repos() - Verify the cloning of VNFSDK source repo.
function test_clone_all_vnfsdk_repos {
- clone_all_vnfsdk_repos
-
- asserts_file_exist $vnfsdk_src_folder/compliance/veslibrary/README
- asserts_file_exist $vnfsdk_src_folder/functest/pom.xml
- asserts_file_exist $vnfsdk_src_folder/lctest/pom.xml
- asserts_file_exist $vnfsdk_src_folder/model/docs/index.rst
- asserts_file_exist $vnfsdk_src_folder/pkgtools/pom.xml
- asserts_file_exist $vnfsdk_src_folder/refrepo/pom.xml
- asserts_file_exist $vnfsdk_src_folder/validation/pom.xml
+ clone_repos "vnfsdk"
+
+ asserts_file_exist ${src_folders[vnfsdk]}/compliance/veslibrary/README
+ asserts_file_exist ${src_folders[vnfsdk]}/functest/pom.xml
+ asserts_file_exist ${src_folders[vnfsdk]}/lctest/pom.xml
+ asserts_file_exist ${src_folders[vnfsdk]}/model/docs/index.rst
+ asserts_file_exist ${src_folders[vnfsdk]}/pkgtools/pom.xml
+ asserts_file_exist ${src_folders[vnfsdk]}/refrepo/pom.xml
+ asserts_file_exist ${src_folders[vnfsdk]}/validation/pom.xml
}
# test_compile_all_vnfsdk_repos () - Verify if VNFSDK source repo compiles correctly.
function test_compile_all_vnfsdk_repos {
- clone_all_vnfsdk_repos
- compile_all_vnfsdk_repos
+ clone_repos "vnfsdk"
+ compile_repos "vnfsdk"
- asserts_file_exist $vnfsdk_src_folder/lctest/lifecycle-test/target/lifecycle-test-service-1.0.0-SNAPSHOT.war
- asserts_file_exist $vnfsdk_src_folder/pkgtools/target/vnf-sdk-pkgtools-1.0.0-SNAPSHOT.zip
- asserts_file_exist $vnfsdk_src_folder/refrepo/vnfmarket-be/vnf-sdk-marketplace/target/ROOT.war
- asserts_file_exist $vnfsdk_src_folder/validation/csarvalidation/target/validation-csar-1.0.0-SNAPSHOT.jar
+ asserts_file_exist ${src_folders[vnfsdk]}/lctest/lifecycle-test/target/lifecycle-test-service-1.0.0-SNAPSHOT.war
+ asserts_file_exist ${src_folders[vnfsdk]}/pkgtools/target/vnf-sdk-pkgtools-1.0.0-SNAPSHOT.zip
+ asserts_file_exist ${src_folders[vnfsdk]}/refrepo/vnfmarket-be/vnf-sdk-marketplace/target/ROOT.war
+ asserts_file_exist ${src_folders[vnfsdk]}/validation/csarvalidation/target/validation-csar-1.0.0-SNAPSHOT.jar
}
# test_get_vnfsdk_images() - Verify that the VNFSDK images are created or retrieved
function test_get_vnfsdk_images {
- clone_all_vnfsdk_repos
+ clone_repos "vnfsdk"
get_vnfsdk_images
asserts_image refrepo:1.0-STAGING-latest
@@ -42,7 +42,7 @@ function test_get_vnfsdk_images {
# test_install_vnfsdk() - Verify that VNFSDK docker images are running.
function test_install_vnfsdk {
- clone_all_vnfsdk_repos
+ clone_repos "vnfsdk"
get_vnfsdk_images
install_vnfsdk
diff --git a/bootstrap/vagrant-onap/tests/test_vvp b/bootstrap/vagrant-onap/tests/test_vvp
index f77fdf4d3..8e9594dcd 100644..100755
--- a/bootstrap/vagrant-onap/tests/test_vvp
+++ b/bootstrap/vagrant-onap/tests/test_vvp
@@ -9,31 +9,31 @@ covered_functions=(
# test_clone_all_vvp_repos() - Verify the cloning of VNFSDK source repo.
function test_clone_all_vvp_repos {
- clone_all_vvp_repos
-
- asserts_file_exist $vvp_src_folder/cms/pom.xml
- asserts_file_exist $vvp_src_folder/devkit/LICENSE.TXT
- asserts_file_exist $vvp_src_folder/engagementmgr/pom.xml
- asserts_file_exist $vvp_src_folder/gitlab/pom.xml
- asserts_file_exist $vvp_src_folder/image-scanner/pom.xml
- asserts_file_exist $vvp_src_folder/jenkins/pom.xml
- asserts_file_exist $vvp_src_folder/portal/pom.xml
- asserts_file_exist $vvp_src_folder/postgresql/pom.xml
- asserts_file_exist $vvp_src_folder/test-engine/pom.xml
- asserts_file_exist $vvp_src_folder/validation-scripts/LICENSE.txt
+ clone_repos "vvp"
+
+ asserts_file_exist ${src_folders[vvp]}/cms/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/devkit/LICENSE.TXT
+ asserts_file_exist ${src_folders[vvp]}/engagementmgr/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/gitlab/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/image-scanner/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/jenkins/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/portal/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/postgresql/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/test-engine/pom.xml
+ asserts_file_exist ${src_folders[vvp]}/validation-scripts/LICENSE.txt
}
# test_compile_all_vvp_repos () - Verify if VNFSDK source repo compiles correctly.
function test_compile_all_vvp_repos {
- clone_all_vvp_repos
- compile_all_vvp_repos
+ clone_repos "vvp"
+ compile_repos "vvp"
- # asserts_file_exist $vvp_src_folder/
+ # asserts_file_exist ${src_folders[vvp]}/
}
# test_get_vvp_images() - Verify that the VNFSDK images are created or retrieved
function test_get_vvp_images {
- clone_all_vvp_repos
+ clone_repos "vvp"
get_vvp_images
#asserts_image refrepo:latest
@@ -41,7 +41,7 @@ function test_get_vvp_images {
# test_install_vvp() - Verify that VNFSDK docker images are running.
function test_install_vvp {
- clone_all_vvp_repos
+ clone_repos "vvp"
get_vvp_images
install_vvp
diff --git a/bootstrap/vagrant-onap/tools/Run.ps1 b/bootstrap/vagrant-onap/tools/Run.ps1
index a74dcb72d..de57a1564 100644
--- a/bootstrap/vagrant-onap/tools/Run.ps1
+++ b/bootstrap/vagrant-onap/tools/Run.ps1
@@ -31,7 +31,8 @@ https://wiki.onap.org/display/DW/ONAP+on+Vagrant
#>
Param(
- [ValidateSet("all_in_one","dns", "mr", "sdc", "aai", "mso", "robot", "vid", "sdnc", "portal", "dcae", "policy", "appc", "vfc", "multicloud", "ccsdk", "vnfsdk", "vvp", "testing")]
+ [ValidateSet("all_in_one","dns", "mr", "sdc", "aai", "mso", "robot", "vid", "sdnc", "portal", "dcae", "policy", "appc", "vfc", "multicloud", "ccsdk", "vnfsdk", "vvp", "openstack", "msb", "oom", "testing")]
+
[Parameter(Mandatory=$True,Position=0)]
[ValidateNotNullOrEmpty()]
[String]
@@ -85,7 +86,7 @@ $env:SKIP_INSTALL=$skip_install
switch ($Command)
{
"all_in_one" { $env:DEPLOY_MODE="all-in-one" }
- { @("dns", "mr", "sdc", "aai", "mso", "robot", "vid", "sdnc", "portal", "dcae", "policy", "appc", "vfc", "multicloud", "ccsdk", "vnfsdk", "vvp") -contains $_ } { $env:DEPLOY_MODE="individual" }
+ { @("dns", "mr", "sdc", "aai", "mso", "robot", "vid", "sdnc", "portal", "dcae", "policy", "appc", "vfc", "multicloud", "ccsdk", "vnfsdk", "vvp", "openstack", "msb", "oom") -contains $_ } { $env:DEPLOY_MODE="individual" }
"testing"
{
$env:DEPLOY_MODE="testing"
diff --git a/bootstrap/vagrant-onap/tools/run.sh b/bootstrap/vagrant-onap/tools/run.sh
index eaa49431e..27e0aa31e 100755
--- a/bootstrap/vagrant-onap/tools/run.sh
+++ b/bootstrap/vagrant-onap/tools/run.sh
@@ -16,7 +16,7 @@ Optional arguments:
Test case to use in testing mode.
Commands:
all_in_one Deploy in all-in-one mode.
- dns|mr|sdc|aai|mso|robot|vid|sdnc|portal|dcae|policy|appc|vfc|vnfsdk|multicloud|ccsdk|vvp Deploy chosen service.
+ dns|mr|sdc|aai|mso|robot|vid|sdnc|portal|dcae|policy|appc|vfc|vnfsdk|multicloud|ccsdk|vvp|openstack|msb|oom Deploy chosen service.
testing Deploy in testing mode.
EOF
}
@@ -65,7 +65,7 @@ case $COMMAND in
"all_in_one" )
export DEPLOY_MODE='all-in-one'
;;
- "dns" | "mr" | "sdc" | "aai" | "mso" | "robot" | "vid" | "sdnc" | "portal" | "dcae" | "policy" | "appc" | "vfc" | "vnfsdk"| "multicloud" | "ccsdk" | "vvp" )
+ "dns" | "mr" | "sdc" | "aai" | "mso" | "robot" | "vid" | "sdnc" | "portal" | "dcae" | "policy" | "appc" | "vfc" | "vnfsdk"| "multicloud" | "ccsdk" | "vvp" | "openstack" | "msb" | "oom" )
export DEPLOY_MODE='individual'
;;
"testing" )
diff --git a/bootstrap/vagrant-onap/tools/setup_libvirt.sh b/bootstrap/vagrant-onap/tools/setup_libvirt.sh
new file mode 100755
index 000000000..54003d691
--- /dev/null
+++ b/bootstrap/vagrant-onap/tools/setup_libvirt.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+export VAGRANT_DEFAULT_PROVIDER=libvirt
+
+source /etc/os-release || source /usr/lib/os-release
+case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ # vagrant-libvirt dependencies
+ sudo apt-get install -y qemu libvirt-bin ebtables dnsmasq libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev
+
+ # NFS
+ sudo apt-get install -y nfs-kernel-server
+ ;;
+ rhel|centos|fedora)
+ PKG_MANAGER=$(which dnf || which yum)
+ sudo $PKG_MANAGER install -y qemu libvirt libvirt-devel ruby-devel gcc qemu-kvm
+ ;;
+esac
+vagrant plugin install vagrant-libvirt
diff --git a/bootstrap/vagrant-onap/vagrant_utils/postinstall.sh b/bootstrap/vagrant-onap/vagrant_utils/postinstall.sh
index 89a69dd9a..3b5017a66 100755
--- a/bootstrap/vagrant-onap/vagrant_utils/postinstall.sh
+++ b/bootstrap/vagrant-onap/vagrant_utils/postinstall.sh
@@ -1,15 +1,26 @@
#!/bin/bash
-set -o xtrace
+if [[ "$debug" == "True" ]]; then
+ set -o xtrace
+fi
+
+if [[ "$1" == "openstack" ]]; then
+ source /var/onap/openstack
+ deploy_openstack
+ exit
+fi
source /var/onap/functions
+update_repos
create_configuration_files
-install_dev_tools
configure_bind
for serv in $@; do
source /var/onap/${serv}
configure_service ${serv}_serv.sh
init_${serv}
+ echo "source /var/onap/${serv}" >> ~/.bashrc
done
+
+echo "source /var/onap/functions" >> ~/.bashrc
diff --git a/bootstrap/vagrant-onap/vagrant_utils/unit_testing.sh b/bootstrap/vagrant-onap/vagrant_utils/unit_testing.sh
index a378ad0cd..3a97ad9cf 100755
--- a/bootstrap/vagrant-onap/vagrant_utils/unit_testing.sh
+++ b/bootstrap/vagrant-onap/vagrant_utils/unit_testing.sh
@@ -1,5 +1,9 @@
#!/bin/bash
+if [[ "$debug" == "True" ]]; then
+ set -o xtrace
+fi
+
set -o errexit
TEST_SUITE=${1:-*}
diff --git a/docs/heat.rst b/docs/heat.rst
new file mode 100644
index 000000000..78796df95
--- /dev/null
+++ b/docs/heat.rst
@@ -0,0 +1,236 @@
+ONAP HEAT Template
+------------------
+
+Source files
+~~~~~~~~~~~~
+
+- Template file: https://git.onap.org/demo/plain/heat/ONAP/onap_openstack.yaml
+- Environment file: https://git.onap.org/demo/plain/heat/ONAP/onap_openstack.env
+
+Description
+~~~~~~~~~~~
+
+The ONAP HEAT template spins up the entire ONAP platform. The template,
+onap_openstack.yaml, comes with an environment file,
+onap_openstack.env, in which all the default values are defined.
+
+.. note::
+ onap_openstack.yaml AND onap_openstack.env ARE THE HEAT TEMPLATE
+ AND ENVIRONMENT FILE CURRENTLY SUPPORTED.
+ onap_openstack_float.yaml/env AND onap_openstack_nofloat.yaml/env
+ AREN'T UPDATED AND THEIR USAGE IS NOT SUGGESTED.
+
+The HEAT template is composed of two sections: (i) parameters, and (ii)
+resources.
+The parameter section contains the declaration and
+description of the parameters that will be used to spin up ONAP, such as
+public network identifier, URLs of code and artifacts repositories, etc.
+The default values of these parameters can be found in the environment
+file.
+
+The resource section contains the definition of:
+
+- ONAP Private Management Network, which ONAP components use to communicate with each other and with VNFs
+- ONAP Virtual Machines (VMs)
+- Public/private key pair used to access ONAP VMs
+- Virtual interfaces towards the ONAP Private Management Network
+- Disk volumes.
+
+Each VM specification includes Operating System image name, VM size
+(i.e. flavor), VM name, etc. Each VM has two virtual network interfaces:
+one towards the public network and one towards the ONAP Private
+Management network, as described above. Furthermore, each VM runs a
+post-instantiation script that downloads and installs software
+dependencies (e.g. Java JDK, gcc, make, Python, ...) and ONAP software
+packages and docker containers from remote repositories.
+
+When the HEAT template is executed, the Openstack HEAT engine creates
+the resources defined in the HEAT template, based on the parameters
+values defined in the environment file.
+
+Environment file
+~~~~~~~~~~~~~~~~
+
+Before running HEAT, it is necessary to customize the environment file.
+Indeed, some parameters, namely public_net_id, pub_key,
+openstack_tenant_id, openstack_username, and openstack_api_key,
+need to be set depending on the user's environment:
+
+**Global parameters**
+
+::
+
+ public_net_id: PUT YOUR NETWORK ID/NAME HERE
+ pub_key: PUT YOUR PUBLIC KEY HERE
+ openstack_tenant_id: PUT YOUR OPENSTACK PROJECT ID HERE
+ openstack_username: PUT YOUR OPENSTACK USERNAME HERE
+ openstack_api_key: PUT YOUR OPENSTACK PASSWORD HERE
+ horizon_url: PUT THE HORIZON URL HERE
+ keystone_url: PUT THE KEYSTONE URL HERE (do not include version number)
+
+openstack_region parameter is set to RegionOne (OpenStack default). If
+your OpenStack is using another Region, please modify this parameter.
+
+public_net_id is the unique identifier (UUID) or name of the public
+network of the cloud provider. To get the public_net_id, use the
+following OpenStack CLI command (ext is the name of the external
+network, change it with the name of the external network of your
+installation)
+
+::
+
+ openstack network list | grep ext | awk '{print $2}'
+
+pub_key is string value of the public key that will be installed in
+each ONAP VM. To create a public/private key pair in Linux, please
+execute the following instruction:
+
+::
+
+ user@ubuntu:~$ ssh-keygen -t rsa
+
+The following operations to create the public/private key pair occur:
+
+::
+
+ Generating public/private rsa key pair.
+ Enter file in which to save the key (/home/user/.ssh/id_rsa):
+ Created directory '/home/user/.ssh'.
+ Enter passphrase (empty for no passphrase):
+ Enter same passphrase again:
+ Your identification has been saved in /home/user/.ssh/id_rsa.
+ Your public key has been saved in /home/user/.ssh/id_rsa.pub.
+
+openstack_username, openstack_tenant_id (password), and
+openstack_api_key are user's credentials to access the
+OpenStack-based cloud.
+
+**Images and flavors parameters**
+
+::
+
+ ubuntu_1404_image: PUT THE UBUNTU 14.04 IMAGE NAME HERE
+ ubuntu_1604_image: PUT THE UBUNTU 16.04 IMAGE NAME HERE
+ flavor_small: PUT THE SMALL FLAVOR NAME HERE
+ flavor_medium: PUT THE MEDIUM FLAVOR NAME HERE
+ flavor_large: PUT THE LARGE FLAVOR NAME HERE
+ flavor_xlarge: PUT THE XLARGE FLAVOR NAME HERE
+ flavor_xxlarge: PUT THE XXLARGE FLAVOR NAME HERE
+
+To get the images in your OpenStack environment, use the following
+OpenStack CLI command:
+
+::
+
+ openstack image list | grep 'ubuntu'
+
+To get the flavor names used in your OpenStack environment, use the
+following OpenStack CLI command:
+
+::
+
+ openstack flavor list
+
+**DNS parameters**
+
+::
+
+ dns_list: PUT THE ADDRESS OFTHE EXTERNAL DNS HERE (e.g. a comma-separated list of IP addresses in your /etc/resolv.conf in UNIX-based Operating Systems). THIS LIST MUST INCLUDE THE DNS SERVER THAT OFFERS DNS AS AS SERVICE (see DCAE section below for more details)
+ external_dns: PUT THE FIRST ADDRESS OF THE EXTERNAL DNS LIST HERE oam_network_cidr: 10.0.0.0/16
+
+You can use the Google Public DNS 8.8.8.8 and 4.4.4.4 address or your internal DNS servers
+
+**DCAE Parameters**
+
+DCAE spins up ONAP's data collection and analytics system in two phases.
+The first is the launching of a bootstrap VM that is specified in the
+ONAP Heat template. This VM requires a number of deployment specific
+conifiguration parameters being provided so that it can subsequently
+bring up the DCAE system. There are two groups of parameters.
+
+The first group relates to the launching of DCAE VMs, including parameters such as
+the keystone URL and additional VM image IDs/names. DCAE VMs are
+connected to the same internal network as the rest of ONAP VMs, but
+dynamically spun up by the DCAE core platform. Hence these parameters
+need to be provided to DCAE. Note that although DCAE VMs will be
+launched in the same tenant as the rest of ONAP, because DCAE may use
+MultiCloud node as the agent for interfacing with the underying cloud,
+it needs a separate keystone URL (which points to MultiCloud node
+instead of the underlying cloud).
+
+The second group of configuration parameters relate to DNS As A Service support (DNSaaS).
+DCAE requires DNSaaS for registering its VMs into organization-wide DNS service. For
+OpenStack, DNSaaS is provided by Designate. Designate support can be
+provided via an integrated service endpoint listed under the service
+catalog of the OpenStack installation; or proxyed by the ONAP MultiCloud
+service. For the latter case, a number of parameters are needed to
+configure MultiCloud to use the correct Designate service. These
+parameters are described below:
+
+::
+
+ dcae_keystone_url: PUT THE KEYSTONE URL OF THE OPENSTACK INSTANCE WHERE DCAE IS DEPLOYED (Note: put the MultiCloud proxy URL if the DNSaaS is proxyed by MultiCloud)
+ dcae_centos_7_image: PUT THE CENTOS7 IMAGE ID/NAME AVAILABLE AT THE OPENSTACK INSTANCE WHERE DCAE IS DEPLOYED
+ dcae_security_group: PUT THE SECURITY GROUP ID/NAME TO BE USED AT THE OPENSTACK INSTANCE WHERE DCAE IS DEPLOYED
+ dcae_key_name: PUT THE ACCESS KEY-PAIR NAME REGISTER AT THE OPENSTACK INSTANCE WHERE DCAE IS DEPLOYED
+ dcae_public_key: PUT THE PUBLIC KEY OF A KEY-PAIR USED FOR DCAE BOOTSTRAP NODE TO COMMUNICATE WITH DCAE VMS
+ dcae_private_key: PUT THE PRIVATE KEY OF A KEY-PAIR USED FOR DCAE BOOTSTRAP NODE TO COMMUNICATE WITH DCAE VMS
+
+ dnsaas_config_enabled: true or false FOR WHETHER DNSAAS IS PROXYED
+ dnsaas_region: PUT THE REGION OF THE OPENSTACK INSTANCE WHERE DNSAAS IS PROVIDED
+ dnsaas_tenant_id: PUT THE TENANT ID/NAME OF THE OPENSTACK INSTANCE WHERE DNSAAS IS PROVIDED
+ dnsaas_keystone_url: PUT THE KEYSTONE URL OF THE OPENSTACK INSTANCE WHERE DNSAAS IS PROVIDED
+ dnsaas_username: PUT THE USERNAME OF THE OPENSTACK INSTANCE WHERE DNSAAS IS PROVIDED
+ dnsaas_password: PUT THE PASSWORD OF THE OPENSTACK INSTANCE WHERE DNSAAS IS PROVIDED
+
+Instantiation
+~~~~~~~~~~~~~
+
+The ONAP platform can be instantiated via Horizon (OpenStack dashboard)
+or Command Line.
+
+**Instantiation via Horizon:**
+
+- Login to Horizon URL with your personal credentials
+- Click "Stacks" from the "Orchestration" menu
+- Click "Launch Stack"
+- Paste or manually upload the HEAT template file (onap_openstack.yaml) in the "Template Source" form
+- Paste or manually upload the HEAT environment file (onap_openstack.env) in the "Environment Source" form
+- Click "Next" - Specify a name in the "Stack Name" form
+- Provide the password in the "Password" form
+- Click "Launch"
+
+**Instantiation via Command Line:**
+
+- Install the HEAT client on your machine, e.g. in Ubuntu (ref. http://docs.openstack.org/user-guide/common/cli-install-openstack-command-line-clients.html):
+
+::
+
+ apt-get install python-dev python-pip
+ pip install python-heatclient # Install heat client
+ pip install python-openstackclient # Install the Openstack client to support multiple services
+
+- Create a file (named i.e. ~/openstack/openrc) that sets all the
+ environmental variables required to access Rackspace:
+
+::
+
+ export OS_AUTH_URL=INSERT THE AUTH URL HERE
+ export OS_USERNAME=INSERT YOUR USERNAME HERE
+ export OS_TENANT_ID=INSERT YOUR TENANT ID HERE
+ export OS_REGION_NAME=INSERT THE REGION HERE
+ export OS_PASSWORD=INSERT YOUR PASSWORD HERE
+
+- Run the script from command line:
+
+::
+
+ source ~/openstack/openrc
+
+- In order to install the ONAP platform, type:
+
+::
+
+ heat stack-create STACK_NAME -f PATH_TO_HEAT_TEMPLATE(YAML FILE) -e PATH_TO_ENV_FILE # Old HEAT client, OR
+ openstack stack create -t PATH_TO_HEAT_TEMPLATE(YAML FILE) -e PATH_TO_ENV_FILE STACK_NAME # New Openstack client
+
diff --git a/docs/index.rst b/docs/index.rst
index 833e1aa96..f018fc6ce 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,8 +1,11 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-TODO Add files to toctree and delete this header
-------------------------------------------------
-.. toctree::
- :maxdepth: 1
+INTEGRATION
+===========
+The Integration project provides the followning artifacts:
+- Heat template to deploy ONAP instance
+- Test suites to check the various ONAP components based on Robot Framework
+- Artifacts for the use-case deployments
+.. include:: heat.rst
diff --git a/test/csit/plans/aai/resources/docker-compose.yml b/test/csit/plans/aai/resources/docker-compose.yml
index 09d2a8127..80865bb90 100644
--- a/test/csit/plans/aai/resources/docker-compose.yml
+++ b/test/csit/plans/aai/resources/docker-compose.yml
@@ -1,14 +1,13 @@
version: '2'
services:
- aai-resources.api.simpledemo.openecomp.org:
- image: ${DOCKER_REGISTRY}/openecomp/aai-resources
- hostname: aai-resources.api.simpledemo.openecomp.org
+ aai-resources.api.simpledemo.onap.org:
+ image: ${DOCKER_REGISTRY}/onap/aai-resources
+ hostname: aai-resources.api.simpledemo.onap.org
environment:
- AAI_CHEF_ENV=simpledemo
- AAI_CHEF_LOC=/var/chef/aai-data/environments
- CHEF_BRANCH=master
- CHEF_GIT_URL=http://gerrit.onap.org/r/aai
- - AAI_CORE_VERSION=1.1.0-SNAPSHOT
- LOCAL_USER_ID=${USER_ID}
ports:
- 8447:8447
@@ -17,15 +16,14 @@ services:
options:
max-size: "30m"
max-file: "5"
- aai-traversal.api.simpledemo.openecomp.org:
- image: ${DOCKER_REGISTRY}/openecomp/aai-traversal
- hostname: aai-traversal.api.simpledemo.openecomp.org
+ aai-traversal.api.simpledemo.onap.org:
+ image: ${DOCKER_REGISTRY}/onap/aai-traversal
+ hostname: aai-traversal.api.simpledemo.onap.org
environment:
- AAI_CHEF_ENV=simpledemo
- AAI_CHEF_LOC=/var/chef/aai-data/environments
- CHEF_BRANCH=master
- CHEF_GIT_URL=http://gerrit.onap.org/r/aai
- - AAI_CORE_VERSION=1.1.0-SNAPSHOT
- LOCAL_USER_ID=${USER_ID}
- DISABLE_UPDATE_QUERY=true
ports:
@@ -35,14 +33,14 @@ services:
options:
max-size: "30m"
max-file: "5"
- aai.api.simpledemo.openecomp.org:
- image: ${AAI_HAPROXY_IMAGE}
- hostname: aai.api.simpledemo.openecomp.org
+ aai.api.simpledemo.onap.org:
+ image: ${AAI_HAPROXY_IMAGE}:${HAPROXY_VERSION}
+ hostname: aai.api.simpledemo.onap.org
ports:
- 8443:8443
links:
- - aai-resources.api.simpledemo.openecomp.org
- - aai-traversal.api.simpledemo.openecomp.org
+ - aai-resources.api.simpledemo.onap.org
+ - aai-traversal.api.simpledemo.onap.org
volumes:
- /dev/log:/dev/log
logging:
@@ -50,9 +48,9 @@ services:
options:
max-size: "30m"
max-file: "5"
- aai.hbase.simpledemo.openecomp.org:
+ aai.hbase.simpledemo.onap.org:
image: ${HBASE_IMAGE}:${HBASE_VERSION}
- hostname: aai.hbase.simpledemo.openecomp.org
+ hostname: aai.hbase.simpledemo.onap.org
ports:
- 2181:2181
- 8080:8080
diff --git a/test/csit/plans/aai/resources/setup.sh b/test/csit/plans/aai/resources/setup.sh
index 19beb6365..a4c7648c0 100644
--- a/test/csit/plans/aai/resources/setup.sh
+++ b/test/csit/plans/aai/resources/setup.sh
@@ -24,7 +24,7 @@ NEXUS_PASSWD=$(cat /opt/config/nexus_password.txt)
NEXUS_DOCKER_REPO=$(cat /opt/config/nexus_docker_repo.txt)
DOCKER_IMAGE_VERSION=$(cat /opt/config/docker_version.txt)
DOCKER_REGISTRY=${NEXUS_DOCKER_REPO}
-DOCKER_IMAGE_VERSION=1.1-STAGING-latest
+DOCKER_IMAGE_VERSION=1.2-STAGING-latest
function wait_for_container() {
@@ -54,23 +54,24 @@ DOCKER_COMPOSE_CMD="docker-compose";
export MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' | sort -n | head -1);
export DOCKER_REGISTRY="nexus3.onap.org:10001";
export AAI_HAPROXY_IMAGE="${AAI_HAPROXY_IMAGE:-aaionap/haproxy}";
+export HAPROXY_VERSION="${HAPROXY_VERSION:-1.2.0}";
export HBASE_IMAGE="${HBASE_IMAGE:-aaionap/hbase}";
export HBASE_VERSION="${HBASE_VERSION:-1.2.0}";
docker pull ${HBASE_IMAGE}:${HBASE_VERSION};
docker pull ${HBASE_IMAGE}:${HBASE_VERSION};
-docker pull ${DOCKER_REGISTRY}/openecomp/aai-resources:${DOCKER_IMAGE_VERSION};
-docker tag ${DOCKER_REGISTRY}/openecomp/aai-resources:${DOCKER_IMAGE_VERSION} ${DOCKER_REGISTRY}/openecomp/aai-resources:latest;
+docker pull ${DOCKER_REGISTRY}/onap/aai-resources:${DOCKER_IMAGE_VERSION};
+docker tag ${DOCKER_REGISTRY}/onap/aai-resources:${DOCKER_IMAGE_VERSION} ${DOCKER_REGISTRY}/onap/aai-resources:latest;
-docker pull ${DOCKER_REGISTRY}/openecomp/aai-traversal:${DOCKER_IMAGE_VERSION};
-docker tag ${DOCKER_REGISTRY}/openecomp/aai-traversal:${DOCKER_IMAGE_VERSION} ${DOCKER_REGISTRY}/openecomp/aai-traversal:latest;
+docker pull ${DOCKER_REGISTRY}/onap/aai-traversal:${DOCKER_IMAGE_VERSION};
+docker tag ${DOCKER_REGISTRY}/onap/aai-traversal:${DOCKER_IMAGE_VERSION} ${DOCKER_REGISTRY}/onap/aai-traversal:latest;
${DOCKER_COMPOSE_CMD} stop
${DOCKER_COMPOSE_CMD} rm -f -v
# Start the hbase where the data will be stored
-HBASE_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai.hbase.simpledemo.openecomp.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
+HBASE_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai.hbase.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:8085';
wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:8080';
wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:9095';
@@ -99,12 +100,12 @@ else
export USER_ID=$(id -u aaiadmin);
fi;
-RESOURCES_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai-resources.api.simpledemo.openecomp.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
+RESOURCES_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai-resources.api.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
wait_for_container ${RESOURCES_CONTAINER_NAME} '0.0.0.0:8447';
docker logs ${RESOURCES_CONTAINER_NAME};
-${DOCKER_COMPOSE_CMD} up -d aai-traversal.api.simpledemo.openecomp.org aai.api.simpledemo.openecomp.org
+${DOCKER_COMPOSE_CMD} up -d aai-traversal.api.simpledemo.onap.org aai.api.simpledemo.onap.org
echo "A&AI Microservices, resources and traversal, are up and running along with HAProxy";
# Set the host ip for robot from the haproxy
ROBOT_VARIABLES="-v HOST_IP:`ip addr show docker0 | head -3 | tail -1 | cut -d' ' -f6 | cut -d'/' -f1`"
diff --git a/test/csit/plans/aai/traversal/docker-compose.yml b/test/csit/plans/aai/traversal/docker-compose.yml
index 01dd4b481..8b07c336f 100644
--- a/test/csit/plans/aai/traversal/docker-compose.yml
+++ b/test/csit/plans/aai/traversal/docker-compose.yml
@@ -1,14 +1,13 @@
version: '2'
services:
- aai-resources.api.simpledemo.openecomp.org:
- image: ${DOCKER_REGISTRY}/openecomp/aai-resources
- hostname: aai-resources.api.simpledemo.openecomp.org
+ aai-resources.api.simpledemo.onap.org:
+ image: ${DOCKER_REGISTRY}/onap/aai-resources
+ hostname: aai-resources.api.simpledemo.onap.org
environment:
- AAI_CHEF_ENV=simpledemo
- AAI_CHEF_LOC=/var/chef/aai-data/environments
- CHEF_BRANCH=master
- CHEF_GIT_URL=http://gerrit.onap.org/r/aai
- - AAI_CORE_VERSION=1.1.0-SNAPSHOT
- LOCAL_USER_ID=${USER_ID}
ports:
- 8447:8447
@@ -17,15 +16,14 @@ services:
options:
max-size: "30m"
max-file: "5"
- aai-traversal.api.simpledemo.openecomp.org:
- image: ${DOCKER_REGISTRY}/openecomp/aai-traversal
- hostname: aai-traversal.api.simpledemo.openecomp.org
+ aai-traversal.api.simpledemo.onap.org:
+ image: ${DOCKER_REGISTRY}/onap/aai-traversal
+ hostname: aai-traversal.api.simpledemo.onap.org
environment:
- AAI_CHEF_ENV=simpledemo
- AAI_CHEF_LOC=/var/chef/aai-data/environments
- CHEF_BRANCH=master
- CHEF_GIT_URL=http://gerrit.onap.org/r/aai
- - AAI_CORE_VERSION=1.1.0-SNAPSHOT
- DISABLE_UPDATE_QUERY=true
ports:
- 8446:8446
@@ -34,14 +32,14 @@ services:
options:
max-size: "30m"
max-file: "5"
- aai.api.simpledemo.openecomp.org:
- image: ${AAI_HAPROXY_IMAGE}
- hostname: aai.api.simpledemo.openecomp.org
+ aai.api.simpledemo.onap.org:
+ image: ${AAI_HAPROXY_IMAGE}:${HAPROXY_VERSION}
+ hostname: aai.api.simpledemo.onap.org
ports:
- 8443:8443
links:
- - aai-resources.api.simpledemo.openecomp.org
- - aai-traversal.api.simpledemo.openecomp.org
+ - aai-resources.api.simpledemo.onap.org
+ - aai-traversal.api.simpledemo.onap.org
volumes:
- /dev/log:/dev/log
logging:
@@ -49,9 +47,9 @@ services:
options:
max-size: "30m"
max-file: "5"
- aai.hbase.simpledemo.openecomp.org:
+ aai.hbase.simpledemo.onap.org:
image: ${HBASE_IMAGE}:${HBASE_VERSION}
- hostname: aai.hbase.simpledemo.openecomp.org
+ hostname: aai.hbase.simpledemo.onap.org
ports:
- 2181:2181
- 8080:8080
diff --git a/test/csit/plans/aai/traversal/setup.sh b/test/csit/plans/aai/traversal/setup.sh
index 010b0352e..d3635d4ca 100644
--- a/test/csit/plans/aai/traversal/setup.sh
+++ b/test/csit/plans/aai/traversal/setup.sh
@@ -24,7 +24,7 @@ NEXUS_PASSWD=$(cat /opt/config/nexus_password.txt)
NEXUS_DOCKER_REPO=$(cat /opt/config/nexus_docker_repo.txt)
DOCKER_IMAGE_VERSION=$(cat /opt/config/docker_version.txt)
DOCKER_REGISTRY=${NEXUS_DOCKER_REPO}
-DOCKER_IMAGE_VERSION=1.1-STAGING-latest
+DOCKER_IMAGE_VERSION=1.2-STAGING-latest
function wait_for_container() {
@@ -54,22 +54,23 @@ DOCKER_COMPOSE_CMD="docker-compose";
export MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' | sort -n | head -1);
export DOCKER_REGISTRY="nexus3.onap.org:10001";
export AAI_HAPROXY_IMAGE="${AAI_HAPROXY_IMAGE:-aaionap/haproxy}";
+export HAPROXY_VERSION="${HAPROXY_VERSION:-1.2.0}";
export HBASE_IMAGE="${HBASE_IMAGE:-aaionap/hbase}";
export HBASE_VERSION="${HBASE_VERSION:-1.2.0}";
docker pull ${HBASE_IMAGE}:${HBASE_VERSION};
-docker pull ${DOCKER_REGISTRY}/openecomp/aai-resources:${DOCKER_IMAGE_VERSION};
-docker tag ${DOCKER_REGISTRY}/openecomp/aai-resources:${DOCKER_IMAGE_VERSION} ${DOCKER_REGISTRY}/openecomp/aai-resources:latest;
+docker pull ${DOCKER_REGISTRY}/onap/aai-resources:${DOCKER_IMAGE_VERSION};
+docker tag ${DOCKER_REGISTRY}/onap/aai-resources:${DOCKER_IMAGE_VERSION} ${DOCKER_REGISTRY}/onap/aai-resources:latest;
-docker pull ${DOCKER_REGISTRY}/openecomp/aai-traversal:${DOCKER_IMAGE_VERSION};
-docker tag ${DOCKER_REGISTRY}/openecomp/aai-traversal:${DOCKER_IMAGE_VERSION} ${DOCKER_REGISTRY}/openecomp/aai-traversal:latest;
+docker pull ${DOCKER_REGISTRY}/onap/aai-traversal:${DOCKER_IMAGE_VERSION};
+docker tag ${DOCKER_REGISTRY}/onap/aai-traversal:${DOCKER_IMAGE_VERSION} ${DOCKER_REGISTRY}/onap/aai-traversal:latest;
${DOCKER_COMPOSE_CMD} stop
${DOCKER_COMPOSE_CMD} rm -f -v
# Start the hbase where the data will be stored
-HBASE_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai.hbase.simpledemo.openecomp.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
+HBASE_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai.hbase.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:8085';
wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:8080';
wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:9095';
@@ -98,13 +99,13 @@ else
export USER_ID=$(id -u aaiadmin);
fi;
-RESOURCES_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai-resources.api.simpledemo.openecomp.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
+RESOURCES_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai-resources.api.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
wait_for_container ${RESOURCES_CONTAINER_NAME} '0.0.0.0:8447';
-TRAVERSAL_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai-traversal.api.simpledemo.openecomp.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
+TRAVERSAL_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai-traversal.api.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
wait_for_container ${TRAVERSAL_CONTAINER_NAME} '0.0.0.0:8446';
-${DOCKER_COMPOSE_CMD} up -d aai.api.simpledemo.openecomp.org
+${DOCKER_COMPOSE_CMD} up -d aai.api.simpledemo.onap.org
echo "A&AI Microservices, resources and traversal, are up and running along with HAProxy";
# Set the host ip for robot from the haproxy
ROBOT_VARIABLES="-v HOST_IP:`ip addr show docker0 | head -3 | tail -1 | cut -d' ' -f6 | cut -d'/' -f1`"
diff --git a/test/csit/plans/appc/healthcheck/bundle_query.sh b/test/csit/plans/appc/healthcheck/bundle_query.sh
index 45a92ea31..5b1a99da4 100755
--- a/test/csit/plans/appc/healthcheck/bundle_query.sh
+++ b/test/csit/plans/appc/healthcheck/bundle_query.sh
@@ -19,7 +19,8 @@ echo $SCRIPTS
num_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1)
-num_failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure | wc -l)
+#num_failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure | wc -l)
+num_failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Nothing | wc -l)
failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure)
echo "There are $num_failed_bundles failed bundles out of $num_bundles installed bundles."
diff --git a/test/csit/plans/appc/healthcheck/setup.sh b/test/csit/plans/appc/healthcheck/setup.sh
index 8798e561a..406743303 100755
--- a/test/csit/plans/appc/healthcheck/setup.sh
+++ b/test/csit/plans/appc/healthcheck/setup.sh
@@ -19,13 +19,24 @@
# Place the scripts in run order:
SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source ${WORKSPACE}/test/csit/scripts/appc/script1.sh
+amsterdam="$(echo ${WORKSPACE} | grep amsterdam | wc -l)"
+
+if [ "$amsterdam" != "1" ]; then
+ export APPC_DOCKER_IMAGE_VERSION=1.3.0-SNAPSHOT-latest
+ export CCSDK_DOCKER_IMAGE_VERSION=0.1-STAGING-latest
+ export BRANCH=master
+ export SOLUTION_NAME=onap
+else
+ export APPC_DOCKER_IMAGE_VERSION=v1.2.0
+ export CCSDK_DOCKER_IMAGE_VERSION=v0.1.0
+ export BRANCH=amsterdam
+ export SOLUTION_NAME=openecomp
+fi
export NEXUS_USERNAME=docker
export NEXUS_PASSWD=docker
export NEXUS_DOCKER_REPO=nexus3.onap.org:10001
export DMAAP_TOPIC=AUTO
-export APPC_DOCKER_IMAGE_VERSION=1.1-STAGING-latest
-export CCSDK_DOCKER_IMAGE_VERSION=0.1-STAGING-latest
export MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' | sort -n | head -1)
@@ -37,14 +48,14 @@ fi
# Clone APPC repo to get docker-compose for APPC
mkdir -p $WORKSPACE/archives/appc
cd $WORKSPACE/archives
-git clone -b master --single-branch http://gerrit.onap.org/r/appc/deployment.git appc
+git clone -b $BRANCH --single-branch http://gerrit.onap.org/r/appc/deployment.git appc
cd $WORKSPACE/archives/appc
git pull
cd $WORKSPACE/archives/appc/docker-compose
sed -i "s/DMAAP_TOPIC_ENV=.*/DMAAP_TOPIC_ENV="$DMAAP_TOPIC"/g" docker-compose.yml
docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
-docker pull $NEXUS_DOCKER_REPO/openecomp/appc-image:$APPC_DOCKER_IMAGE_VERSION
-docker tag $NEXUS_DOCKER_REPO/openecomp/appc-image:$APPC_DOCKER_IMAGE_VERSION openecomp/appc-image:latest
+docker pull $NEXUS_DOCKER_REPO/${SOLUTION_NAME}/appc-image:$APPC_DOCKER_IMAGE_VERSION
+docker tag $NEXUS_DOCKER_REPO/${SOLUTION_NAME}/appc-image:$APPC_DOCKER_IMAGE_VERSION ${SOLUTION_NAME}/appc-image:latest
docker pull $NEXUS_DOCKER_REPO/onap/ccsdk-dgbuilder-image:$CCSDK_DOCKER_IMAGE_VERSION
docker tag $NEXUS_DOCKER_REPO/onap/ccsdk-dgbuilder-image:$CCSDK_DOCKER_IMAGE_VERSION onap/ccsdk-dgbuilder-image:latest
# start APPC containers with docker compose and configuration from docker-compose.yml
diff --git a/test/csit/plans/appc/healthcheck/testplan.txt b/test/csit/plans/appc/healthcheck/testplan.txt
index fbf2319a7..2a8c1ea84 100644
--- a/test/csit/plans/appc/healthcheck/testplan.txt
+++ b/test/csit/plans/appc/healthcheck/testplan.txt
@@ -1,5 +1,4 @@
# Test suites are relative paths under [integration.git]/test/csit/tests/.
# Place the suites in run order.
appc/healthcheck
-#appc/testsuite
diff --git a/test/csit/plans/cli/sanity-check/setup.sh b/test/csit/plans/cli/sanity-check/setup.sh
index 17fb18c45..7945ff565 100644
--- a/test/csit/plans/cli/sanity-check/setup.sh
+++ b/test/csit/plans/cli/sanity-check/setup.sh
@@ -41,7 +41,7 @@ done
sleep 60
# Start cli
-docker run -d --name cli -e CLI_MODE=daemon nexus3.onap.org:10001/onap/cli:1.1-STAGING-latest
+docker run -d --name cli -e CLI_MODE=daemon nexus3.onap.org:10001/onap/cli:v1.1.0
# Wait for cli initialization
echo Wait for CLI initialization
diff --git a/test/csit/plans/sdnc/healthcheck/setup.sh b/test/csit/plans/sdnc/healthcheck/setup.sh
index 5e51b0e6a..3ffed5d26 100644
--- a/test/csit/plans/sdnc/healthcheck/setup.sh
+++ b/test/csit/plans/sdnc/healthcheck/setup.sh
@@ -37,7 +37,7 @@ fi
# Clone SDNC repo to get docker-compose for SDNC
mkdir -p $WORKSPACE/archives/sdnc
cd $WORKSPACE/archives
-git clone -b master --single-branch http://gerrit.onap.org/r/sdnc/oam.git sdnc
+git clone -b master --single-branch --depth=1 http://gerrit.onap.org/r/sdnc/oam.git sdnc
cd $WORKSPACE/archives/sdnc
git pull
unset http_proxy https_proxy
@@ -55,13 +55,21 @@ docker tag $NEXUS_DOCKER_REPO/onap/ccsdk-dgbuilder-image:$CCSDK_DOCKER_IMAGE_VER
docker pull $NEXUS_DOCKER_REPO/onap/admportal-sdnc-image:$DOCKER_IMAGE_VERSION
docker tag $NEXUS_DOCKER_REPO/onap/admportal-sdnc-image:$DOCKER_IMAGE_VERSION onap/admportal-sdnc-image:latest
+docker pull $NEXUS_DOCKER_REPO/onap/sdnc-ueb-listener-image:$DOCKER_IMAGE_VERSION
+docker tag $NEXUS_DOCKER_REPO/onap/sdnc-ueb-listener-image:$DOCKER_IMAGE_VERSION onap/sdnc-ueb-listener-image:latest
+
+docker pull $NEXUS_DOCKER_REPO/onap/sdnc-dmaap-listener-image:$DOCKER_IMAGE_VERSION
+
+docker tag $NEXUS_DOCKER_REPO/onap/sdnc-dmaap-listener-image:$DOCKER_IMAGE_VERSION onap/sdnc-dmaap-listener-image:latest
+
+
# start SDNC containers with docker compose and configuration from docker-compose.yml
curl -L https://github.com/docker/compose/releases/download/1.9.0/docker-compose-`uname -s`-`uname -m` > docker-compose
chmod +x docker-compose
./docker-compose up -d
-# WAIT 5 minutes maximum and test every 5 seconds if SDNC is up using HealthCheck API
-TIME_OUT=500
+# WAIT 10 minutes maximum and test every 5 seconds if SDNC is up using HealthCheck API
+TIME_OUT=1000
INTERVAL=30
TIME=0
while [ "$TIME" -lt "$TIME_OUT" ]; do
@@ -120,8 +128,8 @@ if [ "$num_failed_bundles" -ge 1 ]; then
echo " $failed_bundles"
fi
-# Sleep additional 120 to give application time to finish
-sleep 120
+# Sleep additional 5 minutes (300 secs) to give application time to finish
+sleep 300
# Pass any variables required by Robot test suites in ROBOT_VARIABLES
ROBOT_VARIABLES="-v SCRIPTS:${SCRIPTS}"
diff --git a/test/csit/plans/so/sanity-check/setup.sh b/test/csit/plans/so/sanity-check/setup.sh
index 77a33bf65..cd0cbdc33 100644
--- a/test/csit/plans/so/sanity-check/setup.sh
+++ b/test/csit/plans/so/sanity-check/setup.sh
@@ -17,8 +17,12 @@
# Place the scripts in run order:
# Start all process required for executing test case
+#start mariadb
+docker run -d --name mariadb -h db.mso.testlab.openecomp.org -e MYSQL_ROOT_PASSWORD=password -p 3306:3306 -v ${WORKSPACE}/test/csit/scripts/mariadb/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d -v ${WORKSPACE}/test/csit/scripts/mariadb/conf.d:/etc/mysql/conf.d nexus3.onap.org:10001/mariadb
+
#start so
-docker run -d -i -t --name=so -p 8080:8080 nexus3.onap.org:10001/openecomp/mso
+docker run -d --name so -h mso.mso.testlab.openecomp.org -e MYSQL_ROOT_PASSWORD=password --link=mariadb:db.mso.testlab.openecomp.org -p 8080:8080 -v ${WORKSPACE}/test/csit/scripts/so/chef-config:/shared nexus3.onap.org:10001/openecomp/mso:1.1-STAGING-latest
+
SO_IP=`get-instance-ip.sh so`
# Wait for initialization
diff --git a/test/csit/plans/so/sanity-check/teardown.sh b/test/csit/plans/so/sanity-check/teardown.sh
index a924a074f..1696c745c 100644
--- a/test/csit/plans/so/sanity-check/teardown.sh
+++ b/test/csit/plans/so/sanity-check/teardown.sh
@@ -15,4 +15,5 @@
# limitations under the License.
#
+kill-instance.sh mariadb
kill-instance.sh so
diff --git a/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh b/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh
index f2eadb010..047c067e8 100644
--- a/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh
+++ b/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh
@@ -42,20 +42,20 @@ done
echo sleep 60
sleep 60
-# start vfc-ztevmanagerdriver
-docker run -d --name vfc-ztevmanagerdriver -e MSB_ADDR=${MSB_IAG_IP}:80 nexus3.onap.org:10001/onap/vfc/ztevmanagerdriver
-ZTEVMANAGERDRIVER_IP=`get-instance-ip.sh vfc-ztevmanagerdriver`
+# start vfc-ztevnfmdriver
+docker run -d --name vfc-ztevnfmdriver -p 8410:8410 -e MSB_ADDR=${MSB_IAG_IP}:80 nexus3.onap.org:10001/onap/vfc/ztevnfmdriver
+ZTEVNFMDRIVER_IP=`get-instance-ip.sh vfc-ztevnfmdriver`
# Wait for initialization
for i in {1..10}; do
- curl -sS ${ZTEVMANAGERDRIVER_IP}:8410 && break
+ curl -sS ${ZTEVNFMDRIVER_IP}:8410 && break
echo sleep $i
sleep $i
done
# Start svnfm-huawei
-docker run -d --name vfc-svnfm-huawei -e MSB_ADDR=${MSB_IAG_IP}:80 nexus3.onap.org:10001/onap/vfc/nfvo/svnfm/huawei
+docker run -d --name vfc-svnfm-huawei -p 8482:8482 -p 8443:8443 -e MSB_ADDR=${MSB_IAG_IP}:80 nexus3.onap.org:10001/onap/vfc/nfvo/svnfm/huawei
SERVICE_IP=`get-instance-ip.sh vfc-svnfm-huawei`
for i in {1..20}; do
curl -sS ${SERVICE_IP}:8482 && break
@@ -64,4 +64,4 @@ for i in {1..20}; do
done
# Pass any variables required by Robot test suites in ROBOT_VARIABLES
-ROBOT_VARIABLES="-v MSB_IAG_IP:${MSB_IAG_IP} -v ZTEVMANAGERDRIVER_IP:${ZTEVMANAGERDRIVER_IP} -v MSB_IP:${MSB_IAG_IP} -v SERVICE_IP:${SERVICE_IP} -v SCRIPTS:${SCRIPTS}"
+ROBOT_VARIABLES="-v MSB_IAG_IP:${MSB_IAG_IP} -v ZTEVNFMDRIVER_IP:${ZTEVNFMDRIVER_IP} -v MSB_IP:${MSB_IAG_IP} -v SERVICE_IP:${SERVICE_IP} -v SCRIPTS:${SCRIPTS}"
diff --git a/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/teardown.sh b/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/teardown.sh
index e440068fb..a7909581e 100644
--- a/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/teardown.sh
+++ b/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/teardown.sh
@@ -19,6 +19,6 @@
kill-instance.sh msb_internal_apigateway
kill-instance.sh msb_discovery
kill-instance.sh msb_consul
-kill-instance.sh vfc-ztevmanagerdriver
+kill-instance.sh vfc-ztevnfmdriver
kill-instance.sh vfc-svnfm-huawei
diff --git a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh
index d00a66c52..5a578230b 100644
--- a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh
+++ b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh
@@ -48,30 +48,40 @@ DOCKER_REPOSITORY="nexus3.onap.org:10001"
IMAGE="wfengine-activiti"
IMAGE_ACTIVITI_NAME="${DOCKER_REPOSITORY}/${ORG}/${PROJECT}/${IMAGE}"
+#get current host IP addres
+SERVICE_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $NF}')
+
# start wfengine-activiti
-##docker run -d --name ${IMAGE} -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_ACTIVITI_NAME}
-##WFENGINEACTIVITIR_IP=`get-instance-ip.sh ${IMAGE}`
+docker run -d --name vfc_wfengine_activiti -p 8804:8080 -e SERVICE_IP=$SERVICE_IP -e SERVICE_PORT=8804 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_ACTIVITI_NAME}
+WFENGINE_ACTIVITI_IP=`get-instance-ip.sh vfc_wfengine_activiti`
# Wait for initialization
-##for i in {1..10}; do
-## curl -sS ${WFENGINEACTIVITIR_IP}:8080 && break
-## echo sleep $i
-## sleep $i
-##done
+for i in {1..10}; do
+ curl -sS ${WFENGINE_ACTIVITI_IP}:8080 && break
+ echo sleep $i
+ sleep $i
+done
+for i in {1..10}; do
+ curl -sS ${SERVICE_IP}:8804 && break
+ echo sleep $i
+ sleep $i
+done
+docker logs vfc_wfengine_activiti
-##IMAGE="wfengine-mgrservice"
-##IMAGE_MGRSERVICE_NAME="${DOCKER_REPOSITORY}/${ORG}/${PROJECT}/${IMAGE}"
+IMAGE="wfengine-mgrservice"
+IMAGE_MGRSERVICE_NAME="${DOCKER_REPOSITORY}/${ORG}/${PROJECT}/${IMAGE}"
# Start wfengine-mgrservice
-#docker run -d --name ${IMAGE} -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_MGRSERVICE_NAME}
+docker run -d --name vfc_wfengine_mgrservice -p 8805:10550 -e SERVICE_IP=$SERVICE_IP -e SERVICE_PORT=8805 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_MGRSERVICE_NAME}
##docker run -d --name ${IMAGE} -e OPENPALETTE_MSB_IP=${WFENGINEACTIVITIR_IP} -e OPENPALETTE_MSB_PORT=8080 ${IMAGE_MGRSERVICE_NAME}
-##WFENGINEMGRSERVICE_IP=`get-instance-ip.sh ${IMAGE}`
-##for i in {1..10}; do
-## curl -sS ${WFENGINEMGRSERVICE_IP}:10550 && break
-## echo sleep $i
-## sleep $i
-##done
+WFENGINE_MGRSERVICE_IP=`get-instance-ip.sh vfc_wfengine_mgrservice`
+for i in {1..10}; do
+ curl -sS ${WFENGINE_MGRSERVICE_IP}:10550 && break
+ echo sleep $i
+ sleep $i
+done
+docker logs vfc_wfengine_mgrservice
# Pass any variables required by Robot test suites in ROBOT_VARIABLES
-#ROBOT_VARIABLES="-v MSB_IAG_IP:${MSB_IAG_IP} -v MSB_IP:${MSB_IAG_IP} -v MSB_PORT:80 -v MSB_DISCOVERY_IP:${MSB_DISCOVERY_IP} -v WFENGINEACTIVITIR_IP:${WFENGINEACTIVITIR_IP} -v WFENGINEACTIVITIR_PORT:8080 -v WFENGINEMGRSERVICE_IP:${WFENGINEMGRSERVICE_IP} -v WFENGINEMGRSERVICE_PORT:10550 -v SCRIPTS:${SCRIPTS}"
+ROBOT_VARIABLES="-v MSB_IAG_IP:${MSB_IAG_IP} -v MSB_IP:${MSB_IAG_IP} -v MSB_PORT:80 -v MSB_DISCOVERY_IP:${MSB_DISCOVERY_IP} -v ACTIVITI_IP:${WFENGINE_ACTIVITI_IP} -v ACTIVITI_PORT:8080 -v MGRSERVICE_IP:${WFENGINE_MGRSERVICE_IP} -v MGRSERVICE_PORT:10550 -v SCRIPTS:${SCRIPTS}"
##ROBOT_VARIABLES="-v MSB_IAG_IP:${WFENGINEACTIVITIR_IP} -v MSB_IP:${WFENGINEMGRSERVICE_IP} -v MSB_PORT:10550 -v MSB_DISCOVERY_IP:${WFENGINEACTIVITIR_IP} -v MSB_DISCOVERY_PORT:8080 -v WFENGINEACTIVITIR_IP:${WFENGINEACTIVITIR_IP} -v WFENGINEACTIVITIR_PORT:8080 -v WFENGINEMGRSERVICE_IP:${WFENGINEMGRSERVICE_IP} -v WFENGINEMGRSERVICE_PORT:10550 -v SCRIPTS:${SCRIPTS}" \ No newline at end of file
diff --git a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh
index caa506ecf..384bc3935 100644
--- a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh
+++ b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh
@@ -19,5 +19,5 @@
kill-instance.sh msb_internal_apigateway
kill-instance.sh msb_discovery
kill-instance.sh msb_consul
-##kill-instance.sh wfengine-mgrservice
-##kill-instance.sh wfengine-activiti
+kill-instance.sh vfc_wfengine_mgrservice
+kill-instance.sh vfc_wfengine_activiti
diff --git a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/testplan.txt b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/testplan.txt
index ff9f4d5d6..5f6910bdd 100644
--- a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/testplan.txt
+++ b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/testplan.txt
@@ -1,4 +1,4 @@
-# Test suites are relative paths under [integration.git]/test/csit/tests/.
-# Place the suites in run order.
-
+# Test suites are relative paths under [integration.git]/test/csit/tests/.
+# Place the suites in run order.
+
vfc/nfvo-wfengine/workflow.robot \ No newline at end of file
diff --git a/test/csit/plans/vvp/sanity/setup.sh b/test/csit/plans/vvp/sanity/setup.sh
new file mode 100644
index 000000000..cab074c06
--- /dev/null
+++ b/test/csit/plans/vvp/sanity/setup.sh
@@ -0,0 +1,36 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+# Place the scripts in run order:
+
+
+source ${WORKSPACE}/test/csit/scripts/vvp/clone_and_setup_vvp_data.sh
+
+source ${WORKSPACE}/test/csit/scripts/vvp/start_vvp_containers.sh
+
+source ${WORKSPACE}/test/csit/scripts/vvp/docker_health.sh
+
+source ${WORKSPACE}/test/csit/scripts/vvp/start_vvp_sanity.sh
+
+
+VVP_IP=`${WORKSPACE}/test/csit/scripts/get-instance-ip.sh vvp-engagementmgr`
+echo VVP_IP=${VVP_IP}
+
+
+# Pass any variables required by Robot test suites in ROBOT_VARIABLES
+ROBOT_VARIABLES="-v VVP_IP:${VVP_IP}"
diff --git a/test/csit/plans/vvp/sanity/teardown.sh b/test/csit/plans/vvp/sanity/teardown.sh
new file mode 100644
index 000000000..b0f33a646
--- /dev/null
+++ b/test/csit/plans/vvp/sanity/teardown.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+
+source ${WORKSPACE}/test/csit/scripts/vvp/kill_containers_and_remove_dataFolders.sh
diff --git a/test/csit/plans/vvp/sanity/testplan.txt b/test/csit/plans/vvp/sanity/testplan.txt
new file mode 100644
index 000000000..0acb50833
--- /dev/null
+++ b/test/csit/plans/vvp/sanity/testplan.txt
@@ -0,0 +1,3 @@
+# Test suites are relative paths under [integration.git]/test/csit/tests/.
+# Place the suites in run order.
+vvp/sanity
diff --git a/test/csit/run-csit.sh b/test/csit/run-csit.sh
index 3070239b7..4b2a57181 100755
--- a/test/csit/run-csit.sh
+++ b/test/csit/run-csit.sh
@@ -94,7 +94,7 @@ if ! type pybot > /dev/null; then
fi
# install required Robot libraries
-pip install --upgrade robotframework-extendedselenium2library
+pip install robotframework-selenium2library==1.8.0 robotframework-extendedselenium2library==0.9.1
# install eteutils
mkdir -p ${ROBOT_VENV}/src/onap
@@ -102,6 +102,8 @@ rm -rf ${ROBOT_VENV}/src/onap/testsuite
git clone https://gerrit.onap.org/r/testsuite/python-testing-utils.git ${ROBOT_VENV}/src/onap/testsuite/python-testing-utils
pip install --upgrade ${ROBOT_VENV}/src/onap/testsuite/python-testing-utils
+pip freeze
+
# install chrome driver
if [ ! -x ${ROBOT_VENV}/bin/chromedriver ]; then
pushd ${ROBOT_VENV}/bin
diff --git a/test/csit/scripts/nfvo-wfengine/demo.bpmn20.xml b/test/csit/scripts/nfvo-wfengine/demo.bpmn20.xml
new file mode 100644
index 000000000..4f0c83a6e
--- /dev/null
+++ b/test/csit/scripts/nfvo-wfengine/demo.bpmn20.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<definitions xmlns="http://www.omg.org/spec/BPMN/20100524/MODEL" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:activiti="http://activiti.org/bpmn" xmlns:bpmndi="http://www.omg.org/spec/BPMN/20100524/DI" xmlns:omgdc="http://www.omg.org/spec/DD/20100524/DC" xmlns:omgdi="http://www.omg.org/spec/DD/20100524/DI" typeLanguage="http://www.w3.org/2001/XMLSchema" expressionLanguage="http://www.w3.org/1999/XPath" targetNamespace="http://www.activiti.org/processdef">
+ <process id="demo" name="demofirst" isExecutable="true">
+ <startEvent id="sid-6B31A927-8A0F-4A2F-BC58-A2E3D90327D2"/>
+ <userTask id="sid-FDC595C2-41CE-4DE8-86D4-562414F0A0F3"/>
+ <sequenceFlow id="sid-B7ABA5B4-FB87-486C-B7D7-CC0FE5A4CE31" sourceRef="sid-6B31A927-8A0F-4A2F-BC58-A2E3D90327D2" targetRef="sid-FDC595C2-41CE-4DE8-86D4-562414F0A0F3"/>
+ <endEvent id="sid-EEACC228-A53D-41E8-96A1-8F2E4BF38596"/>
+ <sequenceFlow id="sid-07534141-12BB-49F0-9288-137A4E0BC8D8" sourceRef="sid-FDC595C2-41CE-4DE8-86D4-562414F0A0F3" targetRef="sid-EEACC228-A53D-41E8-96A1-8F2E4BF38596"/>
+ </process>
+
+</definitions> \ No newline at end of file
diff --git a/test/csit/scripts/so/chef-config/aai.crt b/test/csit/scripts/so/chef-config/aai.crt
new file mode 100644
index 000000000..4ffa426c1
--- /dev/null
+++ b/test/csit/scripts/so/chef-config/aai.crt
@@ -0,0 +1,27 @@
+-----BEGIN CERTIFICATE-----
+MIIEiTCCA3GgAwIBAgIJAIPKfDLcn3MpMA0GCSqGSIb3DQEBCwUAMIGtMQswCQYD
+VQQGEwJVUzELMAkGA1UECAwCTkoxEzARBgNVBAcMCkJlZG1pbnN0ZXIxEjAQBgNV
+BAoMCU9wZW5FQ09NUDETMBEGA1UECwwKc2ltcGxlZGVtbzEqMCgGA1UEAwwhT3Bl
+bkVDT01QIHNpbXBsZWRlbW8gU2VydmVyIENBIFgxMScwJQYJKoZIhvcNAQkBFhhz
+aW1wbGVkZW1vQG9wZW5lY29tcC5vcmcwHhcNMTYxMTMwMTUzODM5WhcNMTcxMTMw
+MTUzODM5WjCBuTELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5KMRMwEQYDVQQHDApC
+ZWRtaW5zdGVyMRIwEAYDVQQKDAlPcGVuRUNPTVAxEzARBgNVBAsMClNpbXBsZURl
+bW8xKTAnBgNVBAMMIGFhaS5hcGkuc2ltcGxlZGVtby5vcGVuZWNvbXAub3JnMTQw
+MgYJKoZIhvcNAQkBFiVhYWktaG9zdEBhcGkuc2ltcGxlZGVtby5vcGVuZWNvbXAu
+b3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwQrQl8A0rT0Jjlos
+Mr/7LEhT5UOif4GGPOk+3NCIxT3lOqAbUf+d9ZXyT2jWFRiKWua03vQ+Dxc8c2h2
+RRuH8LwEiOiWqPjWRxNqsARzZMI3ryHFCFBZh0FcpjH9kEeKVlLDYuV68k+ZucKd
+NiqUNn61lD7kbmEGwvzKwf91FrJ09+CBMx1OnWKm3gCNKDqAEFMZCOdn2MgesJYB
+/03lzPBS1jDfBXImXRcTBzpgA+wdCLn0cIQ1eLWUwS5tUqUJNh36nHdVyJ0P2Yjd
+JLuxhFcmBKOz1ShyyO+BBtKBO8EGbU6qKflOiwOw0Fsn8LjKcrHQ58NPui5y04BU
+Rypf3QIDAQABo4GdMIGaMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgO4MB0G
+A1UdDgQWBBQyMUOsE2J+CKzK0qd8KFBD2gaWyjBbBgNVHSAEVDBSMFAGBFUdIAAw
+SDBGBggrBgEFBQcCAjA6GjhLZWVwIGF3YXkgZnJvbSBjaGlsZHJlbi4gIFRoaXMg
+Y2VydGlmaWNhdGUgaXMgbm90IGEgdG95LjANBgkqhkiG9w0BAQsFAAOCAQEAnkoy
+2tWJOyyyIQwtVojUxv1GWQPnw3WCUcKpuX4CJhHXLxNErW1fBg7bmo08BNmBPPpq
+WrJsy5lbBgUo9kgpViux5Stfy1rRIRsRLfl/icgCvJmUAxkmRCZL7yUvwG4K7s+8
+DwT+nW/XuWNP6Hd/qHccexB6COJ8KwvTdVoxAkCdX8qw4MCb/f7Kb1yle/vwBM5Q
+UUONCJ4bEns1vnb9DGlNDUJNwCfwORAaVJpVS38Mv4UnSTmb2KMePtCWcx/dNsYR
+2XrSGqLDnTvHwOpyhbfFTmackysGoSuDytORXy8YbwEiF13BwEK8i3rgNN0Z2ojf
+cpmE2xxmaa+A2uuN6g==
+-----END CERTIFICATE----- \ No newline at end of file
diff --git a/test/csit/scripts/so/chef-config/mso-docker.json b/test/csit/scripts/so/chef-config/mso-docker.json
new file mode 100644
index 000000000..13b0d22fc
--- /dev/null
+++ b/test/csit/scripts/so/chef-config/mso-docker.json
@@ -0,0 +1,220 @@
+{
+ "name": "mso-docker",
+ "description": "MSO Docker Images",
+ "chef_type": "environment",
+ "json_class": "Chef::Environment",
+
+ "default_attributes":
+ {
+ "mso_config_path": "/etc/mso/config.d/",
+
+ "mso-topology-config":
+ {
+ "msb-server-ip": "172.30.3.34",
+ "msb-server-port": "80"
+ },
+
+ "mso-api-handler-infra-config":
+ {
+ "bpelURL": "http://mso:8080",
+ "bpelAuth": "786864AA53D0DCD881AED1154230C0C3058D58B9339D2EFB6193A0F0D82530E1",
+ "camundaURL": "http://mso:8080",
+ "camundaAuth": "5119D1AF37F671FC01FFAD2151D93EFB2BBB503E879FD07104D024EDDF118FD1"
+ },
+
+ "asdc-connections":
+ {
+ "asdc-controller1":
+ {
+ "user": "mso",
+ "consumerGroup": "sdc-OpenSource-Env1",
+ "consumerId": "sdc-COpenSource-Env11",
+ "environmentName": "SDC-OpenSource-Env1",
+ "asdcAddress": "c2.vm1.sdc.simpledemo.openecomp.org:8443",
+ "password": "613AF3483E695524F9857643B697FA51C7A9A0951094F53791485BF3458F9EADA37DBACCCEBD0CB242B85B4062745247",
+ "pollingInterval": 60,
+ "pollingTimeout": 60,
+ "relevantArtifactTypes": "HEAT,HEAT_ENV,HEAT_VOL",
+ "activateServerTLSAuth": "false",
+ "keyStorePassword": "",
+ "keyStorePath": ""
+ },
+
+ "asdc-controller2":
+ {
+ "user": "user",
+ "consumerGroup": "mso",
+ "consumerId": "mso",
+ "environmentName": "PROD",
+ "asdcAddress": "asdc_hostname2:8443",
+ "password": "f3895035812addbf115bfaf7d2dc850e",
+ "pollingInterval": 60,
+ "pollingTimeout": 60,
+ "relevantArtifactTypes": "HEAT,HEAT_ENV,HEAT_VOL",
+ "activateServerTLSAuth": "false",
+ "keyStorePassword": "",
+ "keyStorePath": ""
+ }
+ },
+
+ "mso-sdnc-adapter-config":
+ {
+ "sdncurls":
+ [
+ "http://c1.vm1.sdnc.simpledemo.openecomp.org:8282/restconf/operations/L3SDN-API:",
+ "http://c1.vm1.sdnc.simpledemo.openecomp.org:8282/restconf/config/L3SDN-API:",
+ "http://c1.vm1.sdnc.simpledemo.openecomp.org:8282/restconf/operations/Firewall-API:",
+ "http://c1.vm1.sdnc.simpledemo.openecomp.org:8282/restconf/config",
+ "http://c1.vm1.sdnc.simpledemo.openecomp.org:8282/restconf/operations/VNF-API:",
+ "http://c1.vm1.sdnc.simpledemo.openecomp.org:8282/restconf/operations/NBNC-API:",
+ "http://c1.vm1.sdnc.simpledemo.openecomp.org:8282/restconf/operations/NORTHBOUND-API:service-topology-operation",
+ "http://c1.vm1.sdnc.simpledemo.openecomp.org:8282/restconf/operations/GENERIC-RESOURCE-API:",
+ "http://c1.vm1.sdnc.simpledemo.openecomp.org:8282/restconf/operations/VNFTOPOLOGYAIC-API:",
+ "http://c1.vm1.sdnc.simpledemo.openecomp.org:8282/"
+ ],
+
+ "bpelurl": "http://c1.vm1.mso.simpledemo.openecomp.org:8080/mso/SDNCAdapterCallbackService",
+ "restbpelurl": "http://c1.vm1.mso.simpledemo.openecomp.org:8080/mso/WorkflowMessage",
+ "myurl": "http://c1.vm1.mso.simpledemo.openecomp.org:8080/adapters/rest/SDNCNotify",
+ "sdncauth": "263f7d5f944d4d0c76db74b4148bec67d0bc796a874bc0d2a2a12aae89a866aa69133f700f391f784719a37f6a68d29bf5a2fbae1dab0402db7788c800c5ba73",
+ "bpelauth": "5119D1AF37F671FC01FFAD2151D93EFB2BBB503E879FD07104D024EDDF118FD1",
+ "sdncconnecttime": "5000"
+ },
+
+ "mso-po-adapter-config":
+ {
+ "identity_services":
+ [
+ {
+ "dcp_clli": "RAX_KEYSTONE",
+ "identity_url": "https://identity.api.rackspacecloud.com/v2.0",
+ "mso_id": "RACKSPACE_ACCOUNT_ID",
+ "mso_pass": "RACKSPACE_ACCOUNT_APIKEY",
+ "admin_tenant": "service",
+ "member_role": "admin",
+ "tenant_metadata": "true",
+ "identity_server_type": "KEYSTONE",
+ "identity_authentication_type": "RACKSPACE_APIKEY"
+ }
+ ],
+
+ "cloud_sites":
+ [
+ {
+ "id": "Dallas",
+ "aic_version": "2.5",
+ "lcp_clli": "DFW",
+ "region_id": "DFW",
+ "identity_service_id": "RAX_KEYSTONE"
+ },
+
+ {
+ "id": "Northern Virginia",
+ "aic_version": "2.5",
+ "lcp_clli": "IAD",
+ "region_id": "IAD",
+ "identity_service_id": "RAX_KEYSTONE"
+ },
+
+ {
+ "id": "Chicago",
+ "aic_version": "2.5",
+ "lcp_clli": "ORD",
+ "region_id": "ORD",
+ "identity_service_id": "RAX_KEYSTONE"
+ }
+ ],
+
+ "vnfbpelauth": "5119D1AF37F671FC01FFAD2151D93EFB2BBB503E879FD07104D024EDDF118FD1",
+ "checkrequiredparameters": "true",
+ "nwbpelauth": "5119D1AF37F671FC01FFAD2151D93EFB2BBB503E879FD07104D024EDDF118FD1"
+ },
+
+ "mso-workflow-message-adapter-config":
+ {
+ "wmbpelurl": "http://mso:8080/mso/WorkflowMessage",
+ "wmbpelauth": "5119D1AF37F671FC01FFAD2151D93EFB2BBB503E879FD07104D024EDDF118FD1"
+ },
+
+ "mso-appc-adapter-config":
+ {
+ "appc_url": "http://localhost:18080",
+ "appc_stub": "/AppC-Simulator/action/execute",
+ "appc_auth": "786864AA53D0DCD881AED1154230C0C3058D58B9339D2EFB6193A0F0D82530E1",
+ "appc_timeout": "30",
+ "ueb_cluster_address": "http://localhost:18080",
+ "ueb_consumer_group": "testgroup",
+ "ueb_consumer_id": "testuser",
+ "ueb_topic": "queuetopic",
+ "ueb_polling_interval": "30",
+ "ueb_user": "user",
+ "ueb_password": "1ec0d74615d4e4639f991c0590c83b88",
+ "bpel_url": "http://localhost:18080",
+ "bpel_stub": "/AppC-Simulator/bpmn/appCResponse",
+ "bpel_auth": "786864AA53D0DCD881AED1154230C0C3058D58B9339D2EFB6193A0F0D82530E1",
+ "bpel_timeout": "30"
+ },
+
+ "mso-bpmn-config":
+ {
+ "urnFileSystemLoadingEnabled": "true"
+ },
+
+ "mso-bpmn-urn-config":
+ {
+ "debug":"false",
+ "invariantIdL3ToHigherLayerDeleteBonding": "50359538-066f-4a8d-807f-f2bc8eaa79dc",
+ "versionIdL3ToHigherLayerDeleteBonding": "52dbec20-47aa-42e4-936c-331d8e350d44",
+ "infraCustomerId": "21014aa2-526b-11e6-beb8-9e71128cae77",
+ "sniroAuth": "test:testpwd",
+ "sniroEndpoint": "http://sniro.api.simpledemo.openecomp.org:8080/sniro/api/v2/placement",
+ "sniroTimeout": "PT30M",
+ "serviceAgnosticSniroHost": "http://sniro.api.simpledemo.openecomp.org:8080",
+ "serviceAgnosticSniroEndpoint": "/sniro/api/v2/placement",
+ "aaiEndpoint": "https://aai.api.simpledemo.openecomp.org:8443",
+ "aaiAuth": "2630606608347B7124C244AB0FE34F6F",
+ "adaptersNamespace": "http://org.openecomp.mso",
+ "adaptersCompletemsoprocessEndpoint": "http://mso:8080/CompleteMsoProcess",
+ "adaptersDbEndpoint": "http://mso:8080/dbadapters/MsoRequestsDbAdapter",
+ "adaptersOpenecompDbEndpoint": "http://mso:8080/dbadapters/RequestsDbAdapter",
+ "catalogDbEndpoint": "http://mso:8080/ecomp/mso/catalog",
+ "adaptersSdncEndpoint": "http://mso:8080/adapters/SDNCAdapter",
+ "adaptersSdncRestEndpoint": "http://mso:8080/adapters/rest/v1/sdnc",
+ "adaptersTenantEndpoint": "http://mso:8080/tenants/TenantAdapter",
+ "adaptersDbAuth": "6B0E6863FB8EE010AB6F191B3C0489437601E81DC7C86305CB92DB98AFC53D74",
+ "adaptersWorkflowMessageEndpoint": "http://mso:8080/workflows/messages",
+ "workflowMessageEndpoint": "http://mso:8080/mso/WorkflowMessage",
+ "workflowSdncAdapterCallback": "http://mso:8080/mso/SDNCAdapterCallbackService",
+ "workflowSdncReplicationDelay": "PT5S",
+ "workflowAaiDistributionDelay": "PT30S",
+ "msoKey": "07a7159d3bf51a0e53be7a8f89699be7",
+ "adaptersPoAuth": "6B0E6863FB8EE010AB6F191B3C0489437601E81DC7C86305CB92DB98AFC53D74",
+ "sdncTimeout": "PT5M",
+ "rollback": "true",
+ "adaptersNetworkEndpoint": "http://mso:8080/networks/NetworkAdapter",
+ "adaptersNetworkRestEndpoint": "http://mso:8080/networks/rest/v1/networks",
+ "adaptersVnfAsyncEndpoint": "http://mso:8080/vnfs/VnfAdapterAsync",
+ "workflowVnfAdapterDeleteCallback": "http://mso:8080/mso/vnfAdapterNotify",
+ "workflowVnfAdapterCreateCallback": "http://mso:8080/mso/vnfAdapterNotify",
+ "adaptersVnfRestEndpoint": "http://mso:8080/vnfs/rest/v1/vnfs",
+ "workflowVnfAdapterRestCallback": "http://mso:8080/mso/vnfAdapterRestNotify",
+ "poTimeout": "PT5M",
+ "sdncFirewallYangModel": "http://com/att/svc/mis/firewall-lite-gui",
+ "sdncFirewallYangModelVersion": "2015-05-15",
+ "sdncTimeoutFirewall": "20",
+ "callbackRetryAttempts": "30",
+ "callbackRetrySleepTime": "1000",
+ "workflowL3ToHigherLayerAddBondingModelName": "WAN Bonding",
+ "workflowL3ToHigherLayerAddBondingModelVersion": "2.0"
+ }
+ },
+
+ "override_attributes":
+ {
+ },
+
+ "cookbook_versions":
+ {
+ "mso-config": "~> 1.0.0"
+ }
+}
diff --git a/test/csit/scripts/so/mariadb/conf.d/mariadb1.cnf b/test/csit/scripts/so/mariadb/conf.d/mariadb1.cnf
new file mode 100644
index 000000000..39ed02248
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/conf.d/mariadb1.cnf
@@ -0,0 +1,193 @@
+# Example MySQL config file for medium systems.
+#
+# This is for a system with memory 8G where MySQL plays
+# an important part, or systems up to 128M where MySQL is used together with
+# other programs (such as a web server)
+#
+# In this file, you can use all long options that a program supports.
+# If you want to know which options a program supports, run the program
+# with the "--help" option.
+
+# The following options will be passed to all MySQL clients
+##[client]
+##user = root
+##port = 3306
+##socket = //opt/app/mysql/mysql.sock
+
+# Here follows entries for some specific programs
+
+# The MySQL server
+[mysqld]
+##performance_schema
+
+slow_query_log =ON
+long_query_time =2
+slow_query_log_file =//var/lib/mysql/slow_query.log
+
+skip-external-locking
+explicit_defaults_for_timestamp = true
+skip-symbolic-links
+local-infile = 0
+key_buffer_size = 16M
+max_allowed_packet = 4M
+table_open_cache = 100
+sort_buffer_size = 512K
+net_buffer_length = 8K
+read_buffer_size = 256K
+read_rnd_buffer_size = 512K
+myisam_sort_buffer_size = 8M
+max_connections = 500
+lower_case_table_names = 1
+thread_stack = 256K
+thread_cache_size = 25
+query_cache_size = 8M
+query_cache_type = 0
+query_prealloc_size = 512K
+query_cache_limit = 1M
+
+# Password validation
+##plugin-load-add=simple_password_check.so
+##simple_password_check_other_characters=0
+
+# Audit Log settings
+plugin-load-add=server_audit.so
+server_audit=FORCE_PLUS_PERMANENT
+server_audit_file_path=//var/lib/mysql/audit.log
+server_audit_file_rotate_size=50M
+server_audit_events=CONNECT,QUERY,TABLE
+server_audit_logging=on
+
+# Don't listen on a TCP/IP port at all. This can be a security enhancement,
+# if all processes that need to connect to mysqld run on the same host.
+# All interaction with mysqld must be made via Unix sockets or named pipes.
+# Note that using this option without enabling named pipes on Windows
+# (via the "enable-named-pipe" option) will render mysqld useless!
+#
+#skip-networking
+
+# Replication Master Server (default)
+# binary logging is required for replication
+##log-bin=//var/lib/mysql/mysql-bin
+
+# binary logging format - mixed recommended
+binlog_format=row
+
+# required unique id between 1 and 2^32 - 1
+# defaults to 1 if master-host is not set
+# but will not function as a master if omitted
+
+# Replication Slave (comment out master section to use this)
+#
+# To configure this host as a replication slave, you can choose between
+# two methods :
+#
+# 1) Use the CHANGE MASTER TO command (fully described in our manual) -
+# the syntax is:
+#
+# CHANGE MASTER TO MASTER_HOST=<host>, MASTER_PORT=<port>,
+# MASTER_USER=<user>, MASTER_PASSWORD=<password> ;
+#
+# where you replace <host>, <user>, <password> by quoted strings and
+# <port> by the master's port number (3306 by default).
+#
+# Example:
+#
+# CHANGE MASTER TO MASTER_HOST='125.564.12.1', MASTER_PORT=3306,
+# MASTER_USER='joe', MASTER_PASSWORD='secret';
+#
+# OR
+#
+# 2) Set the variables below. However, in case you choose this method, then
+# start replication for the first time (even unsuccessfully, for example
+# if you mistyped the password in master-password and the slave fails to
+# connect), the slave will create a master.info file, and any later
+# change in this file to the variables' values below will be ignored and
+# overridden by the content of the master.info file, unless you shutdown
+# the slave server, delete master.info and restart the slaver server.
+# For that reason, you may want to leave the lines below untouched
+# (commented) and instead use CHANGE MASTER TO (see above)
+#
+# required unique id between 2 and 2^32 - 1
+# (and different from the master)
+# defaults to 2 if master-host is set
+# but will not function as a slave if omitted
+#server-id = 2
+#
+# The replication master for this slave - required
+#master-host = <hostname>
+#
+# The username the slave will use for authentication when connecting
+# to the master - required
+#master-user = <username>
+#
+# The password the slave will authenticate with when connecting to
+# the master - required
+#master-password = <password>
+#
+# The port the master is listening on.
+# optional - defaults to 3306
+#master-port = <port>
+#
+# binary logging - not required for slaves, but recommended
+#log-bin=mysql-bin
+
+# Uncomment the following if you are using InnoDB tables
+##innodb_data_home_dir = //opt/app/mysql/data
+##innodb_data_file_path = ibdata1:20M:autoextend:max:32G
+##innodb_log_group_home_dir = //opt/app/mysql/iblogs
+# You can set .._buffer_pool_size up to 50 - 80 %
+# of RAM but beware of setting memory usage too high
+#innodb_buffer_pool_size = 6380M
+#innodb_additional_mem_pool_size = 2M
+# Set .._log_file_size to 25 % of buffer pool size
+innodb_log_file_size = 150M
+innodb_log_files_in_group = 3
+innodb_log_buffer_size = 8M
+#innodb_flush_log_at_trx_commit = 1
+innodb_lock_wait_timeout = 50
+innodb_autoextend_increment = 100
+expire_logs_days = 8
+open_files_limit = 2000
+transaction-isolation=READ-COMMITTED
+####### Galera parameters #######
+## Galera Provider configuration
+wsrep_provider=/usr/lib/galera/libgalera_smm.so
+wsrep_provider_options="gcache.size=2G; gcache.page_size=1G"
+## Galera Cluster configuration
+wsrep_cluster_name="MSO-automated-tests-cluster"
+wsrep_cluster_address="gcomm://"
+#wsrep_cluster_address="gcomm://mariadb1,mariadb2,mariadb3"
+##wsrep_cluster_address="gcomm://192.169.3.184,192.169.3.185,192.169.3.186"
+## Galera Synchronization configuration
+wsrep_sst_method=rsync
+#wsrep_sst_method=xtrabackup-v2
+#wsrep_sst_auth="sstuser:Mon#2o!6"
+## Galera Node configuration
+wsrep_node_name="mariadb1"
+##wsrep_node_address="192.169.3.184"
+wsrep_on=ON
+## Status notification
+#wsrep_notify_cmd=/opt/app/mysql/bin/wsrep_notify
+#######
+
+
+[mysqldump]
+quick
+max_allowed_packet = 16M
+
+[mysql]
+no-auto-rehash
+# Remove the next comment character if you are not familiar with SQL
+#safe-updates
+
+[myisamchk]
+key_buffer_size = 20971520
+
+##[mysqlhotcopy]
+##interactive-timeout
+##[mysqld_safe]
+##malloc-lib=//opt/app/mysql/local/lib/libjemalloc.so.1
+##log-error=//opt/app/mysql/log/mysqld.log
+
+general_log_file = /var/log/mysql/mysql.log
+general_log = 1
diff --git a/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/01-load-default-sql-files.sh b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/01-load-default-sql-files.sh
new file mode 100644
index 000000000..326f27356
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/01-load-default-sql-files.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+#
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+#
+# ECOMP and OpenECOMP are trademarks
+# and service marks of AT&T Intellectual Property.
+#
+#
+cd /docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw
+mysql -uroot -p$MYSQL_ROOT_PASSWORD -f < create_mso_db-demo-vfw.sql \ No newline at end of file
diff --git a/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/02-load-additional-changes.sh b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/02-load-additional-changes.sh
new file mode 100644
index 000000000..a16380108
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/02-load-additional-changes.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+#
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
+# ===================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+#
+# ECOMP and OpenECOMP are trademarks
+# and service marks of AT&T Intellectual Property.
+#
+#
+
+# TODO: update this script to work with the new DB schema
+
+# mysql -uroot -p$MYSQL_ROOT_PASSWORD -e "UPDATE heat_environment SET ENVIRONMENT='parameters:\n vfw_image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)\n vfw_flavor_name: 4 GB General Purpose v1\n public_net_id: 00000000-0000-0000-0000-000000000000\n unprotected_private_net_id: zdfw1fwl01_unprotected\n protected_private_net_id: zdfw1fwl01_protected\n ecomp_private_net_id: oam_ecomp\n unprotected_private_net_cidr: 192.168.10.0/24\n protected_private_net_cidr: 192.168.20.0/24\n ecomp_private_net_cidr: 192.168.9.0/24\n vfw_private_ip_0: 192.168.10.100\n vfw_private_ip_1: 192.168.20.100\n vfw_private_ip_2: 192.168.9.100\n vpg_private_ip_0: 192.168.10.200\n vpg_private_ip_1: 192.168.9.200\n vsn_private_ip_0: 192.168.20.250\n vsn_private_ip_1: 192.168.9.250\n vfw_name_0: zdfw1fwl01fwl01\n vpg_name_0: zdfw1fwl01pgn01\n vsn_name_0: zdfw1fwl01snk01\n vnf_id: vFirewall_demo_app\n vf_module_id: vFirewall\n webserver_ip: 162.242.237.182\n dcae_collector_ip: 192.168.9.1\n key_name: vfw_key\n pub_key: INSERT YOUR PUBLIC KEY HERE' where id=5;" mso_catalog
diff --git a/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/automated-tests/create_mso_db-tests.sql b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/automated-tests/create_mso_db-tests.sql
new file mode 100644
index 000000000..146ad0160
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/automated-tests/create_mso_db-tests.sql
@@ -0,0 +1,49 @@
+SOURCE ../default/create_mso_db-default.sql
+
+USE `mso_requests`;
+DROP USER 'mso';
+CREATE USER 'mso';
+GRANT ALL on mso_requests.* to 'mso' identified by 'mso123' with GRANT OPTION;
+FLUSH PRIVILEGES;
+
+USE `mso_catalog`;
+DROP USER 'catalog';
+CREATE USER 'catalog';
+GRANT ALL on mso_catalog.* to 'catalog' identified by 'catalog123' with GRANT OPTION;
+FLUSH PRIVILEGES;
+
+LOCK TABLES `NETWORK_RESOURCE` WRITE;
+/*!40000 ALTER TABLE `NETWORK_RESOURCE` DISABLE KEYS */;
+/*!40000 ALTER TABLE `NETWORK_RESOURCE` ENABLE KEYS */;
+insert into NETWORK_RESOURCE (id, NETWORK_TYPE, VERSION_STR, ORCHESTRATION_MODE ,DESCRIPTION, TEMPLATE_ID, NEUTRON_NETWORK_TYPE, AIC_VERSION_MIN) values
+(1, "vlan",'1',"NEUTRON","Cool network",1,"BASIC","0");
+UNLOCK TABLES;
+
+LOCK TABLES `NETWORK_RECIPE` WRITE;
+/*!40000 ALTER TABLE `NETWORK_RECIPE` DISABLE KEYS */;
+INSERT INTO `NETWORK_RECIPE`(`NETWORK_TYPE`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`) VALUES
+('vlan','CREATE','1',NULL,'/active-bpel/services/REST/CreateNetwork',NULL,180,NULL),
+('vlan','DELETE','1',NULL,'/active-bpel/services/REST/DeleteNetwork',NULL,180,NULL);
+/*!40000 ALTER TABLE `NETWORK_RECIPE` ENABLE KEYS */;
+UNLOCK TABLES;
+
+LOCK TABLES `VNF_RECIPE` WRITE;
+INSERT INTO `VNF_RECIPE`(`ID`, `VNF_TYPE`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`) VALUES
+(100,'VPE','CREATE','1','','/active-bpel/services/REST/CreateGenericVNF','',180,'SDN-ETHERNET-INTERNET'),
+(101,'VPE','DELETE','1','','/active-bpel/services/REST/DeleteGenericVNF','',180,'SDN-ETHERNET-INTERNET');
+UNLOCK TABLES;
+
+LOCK TABLES `VF_MODULE` WRITE;
+INSERT INTO `VF_MODULE`(`ID`, `TYPE`, `ASDC_SERVICE_MODEL_VERSION`, `MODEL_NAME`, `MODEL_VERSION`, `IS_BASE`, `VNF_RESOURCE_ID`) VALUES
+(100,'dns-servicetest/DNSResource-1::VF_DNS::module-1','1.0','VF_DNS::module-1','1.0','1','7'),
+(101,'dns-servicetest/DNSResource-1::Mog111..mog_psm..module-1','1.0','Mog111..mog_psm..module-1','1.0','1','7');
+UNLOCK TABLES;
+
+LOCK TABLES `VNF_RESOURCE` WRITE;
+INSERT INTO `VNF_RESOURCE`(`ID`, `VNF_TYPE`, `ASDC_SERVICE_MODEL_VERSION`, `ORCHESTRATION_MODE`, `MODEL_VERSION`) VALUES
+(100,'dns-servicetest/DNSResource-1','1.0','VF_DNS::module-1','1.0');
+UNLOCK TABLES;
+
+DELETE FROM HEAT_TEMPLATE_PARAMS;
+DELETE FROM HEAT_TEMPLATE;
+DELETE FROM HEAT_ENVIRONMENT;
diff --git a/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql
new file mode 100644
index 000000000..1fea840d7
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/default/create_mso_db-default.sql
@@ -0,0 +1,128 @@
+SOURCE ../../camunda/mariadb_engine_7.7.3-ee.sql
+
+--
+-- Create an admin user automatically for the cockpit
+--
+SOURCE ../../camunda/mysql_create_camunda_admin.sql
+
+--
+-- Current Database: `mso_requests`
+--
+
+DROP DATABASE IF EXISTS `mso_requests`;
+
+CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mso_requests` /*!40100 DEFAULT CHARACTER SET latin1 */;
+
+USE `mso_requests`;
+
+SOURCE ../../main-schemas/MySQL-Requests-schema.sql
+
+--
+-- Current Database: `mso_catalog`
+--
+
+DROP DATABASE IF EXISTS `mso_catalog`;
+
+CREATE DATABASE /*!32312 IF NOT EXISTS*/ `mso_catalog` /*!40100 DEFAULT CHARACTER SET latin1 */;
+
+USE `mso_catalog`;
+
+SOURCE ../../main-schemas/MySQL-Catalog-schema.sql
+
+SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `TIMEOUT_MINUTES`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','Contrail30-l2nodhcp','1',NULL,'heat_template_version: 2015-04-30\n\ndescription:\n HOT template that creates a Contrail Virtual Network with L2NODHCP\n\nparameters:\n network_name:\n type: string\n description: Name of direct network (e.g. core, dmz)\n default: ECOMPNetwork\n shared:\n type: boolean\n description: Shared amongst tenants\n default: False\n external:\n type: boolean\n description: router_external for the VirtualNetwork\n default: False\n route_targets:\n type: comma_delimited_list\n description: Network route-targets (RT)\n default: \"\"\n subnet_list:\n type: json\n description: Network subnets\n default: []\n policy_refs:\n type: comma_delimited_list\n description: Policies referenced by Network\n default: \"\"\n policy_refsdata:\n type: json\n description: Policies referenced by Network\n default: []\n route_table_refs:\n type: comma_delimited_list\n description: Route Tables referenced by Network\n default: \"\"\n virtual_network_properties_allow_transit:\n type: boolean\n description: allow_transit for the VirtualNetwork\n default: True\n virtual_network_properties_forwarding_mode:\n type: string\n description: forwarding_mode for the VirtualNetwork\n default: l2\n virtual_network_properties_rpf:\n type: string\n description: rpf for the VirtualNetwork\n default: disable\n flood_unknown_unicast:\n type: boolean\n description: flood_unknown_unicast for the VirtualNetwork\n default: True\n\noutputs:\n network_id:\n description: Openstack network identifier\n value: { get_resource: network }\n network_fqdn:\n description: Openstack network identifier\n value: {list_join: [\':\', { get_attr: [network, fq_name] } ] }\n\nresources:\n networkIpam:\n type: OS::ContrailV2::NetworkIpam\n properties:\n name: { get_param: network_name }\n\n network:\n type: OS::ContrailV2::VirtualNetwork\n properties:\n name: { get_param: network_name }\n is_shared: {get_param: shared}\n router_external: { get_param: external }\n route_target_list:\n {\n route_target_list_route_target: { get_param: route_targets }\n }\n network_ipam_refs: [{ get_resource: networkIpam }]\n network_ipam_refs_data:\n [\n {\n network_ipam_refs_data_ipam_subnets: { get_param: subnet_list }\n }\n ]\n network_policy_refs: { get_param: policy_refs }\n network_policy_refs_data: { get_param: policy_refsdata }\n route_table_refs: { get_param: route_table_refs }\n flood_unknown_unicast: { get_param: flood_unknown_unicast } \n virtual_network_properties:\n {\n virtual_network_properties_allow_transit: { get_param: virtual_network_properties_allow_transit },\n virtual_network_properties_forwarding_mode: { get_param: virtual_network_properties_forwarding_mode },\n virtual_network_properties_rpf: { get_param: virtual_network_properties_rpf },\n }\n',10,'MANUAL RECORD','2017-10-05 18:52:03');
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `TIMEOUT_MINUTES`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','Contrail30-gndirect','1',NULL,'heat_template_version: 2015-04-30\n\ndescription:\n HOT template that creates a Contrail Virtual Network for GNDIRECT\n\nparameters:\n network_name:\n type: string\n description: Name of direct network (e.g. core, dmz)\n default: ECOMPNetwork\n shared:\n type: boolean\n description: Shared amongst tenants\n default: False\n external:\n type: boolean\n description: router_external for the VirtualNetwork\n default: False\n route_targets:\n type: comma_delimited_list\n description: Network route-targets (RT)\n default: \"\"\n subnet_list:\n type: json\n description: Network subnets\n default: []\n policy_refs:\n type: comma_delimited_list\n description: Policies referenced by Network\n default: \"\"\n policy_refsdata:\n type: json\n description: Policies referenced by Network\n default: []\n route_table_refs:\n type: comma_delimited_list\n description: Route Tables referenced by Network\n default: \"\"\n virtual_network_properties_rpf:\n type: string\n description: rpf for the VirtualNetwork\n default: disable\n\noutputs:\n network_id:\n description: Openstack network identifier\n value: { get_resource: network }\n network_fqdn:\n description: Openstack network identifier\n value: {list_join: [\':\', { get_attr: [network, fq_name] } ] }\n\nresources:\n networkIpam:\n type: OS::ContrailV2::NetworkIpam\n properties:\n name: { get_param: network_name }\n\n network:\n type: OS::ContrailV2::VirtualNetwork\n properties:\n name: { get_param: network_name }\n is_shared: {get_param: shared}\n router_external: { get_param: external }\n route_target_list:\n {\n route_target_list_route_target: { get_param: route_targets }\n }\n network_ipam_refs: [{ get_resource: networkIpam }]\n network_ipam_refs_data:\n [\n {\n network_ipam_refs_data_ipam_subnets: { get_param: subnet_list }\n }\n ]\n network_policy_refs: { get_param: policy_refs }\n network_policy_refs_data: { get_param: policy_refsdata }\n route_table_refs: { get_param: route_table_refs }\n virtual_network_properties:\n {\n virtual_network_properties_rpf: { get_param: virtual_network_properties_rpf }\n }\n',10,'MANUAL RECORD','2017-10-05 18:52:03');
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`,`NAME`,`VERSION`,`BODY`,`TIMEOUT_MINUTES`,`DESCRIPTION`,`CREATION_TIMESTAMP`,`ARTIFACT_CHECKSUM`) VALUES ('efee1d84-b8ec-11e7-abc4-cec278b6b50a','Generic NeutronNet','1','heat_template_version: 2013-05-23\n\ndescription:\n HOT template that creates a Generic Neutron Network\n\nparameters:\n network_name:\n type: string\n description: Name of direct network (e.g. core, dmz)\n default: ECOMPNetwork\n network_subnet_name:\n type: string\n description: Name of subnet network (e.g. core, dmz)\n default: ECOMPNetwork\n network_subnet_cidr:\n type: string\n description: CIDR of subnet network (e.g. core, dmz)\n default: 10.0.0.0/16\n\noutputs:\n network_id:\n description: Openstack network identifier\n value: { get_resource: network }\n network_fqdn:\n description: Openstack network identifier\n value: {list_join: [\':\', { get_attr: [network, fq_name] } ] }\n\nresources:\n network:\n type: OS::Neutron::Net\n properties:\n name: {get_param: network_name }\n\n subnet:\n type: OS::Neutron::Subnet\n properties:\n name: { get_param: network_subnet_name }\n network_id: { get_resource: network }\n cidr: { get_param: network_subnet_cidr }\n enable_dhcp: false\n',10,'Generic Neutron Template','2017-10-26 14:44:00', 'MANUAL RECORD');
+
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','external','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','flood_unknown_unicast','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','network_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','policy_refs','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','policy_refsdata','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','route_table_refs','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','route_targets','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','shared','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','virtual_network_properties_allow_transit','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','virtual_network_properties_forwarding_mode','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c198-a9fe-11e7-8b4b-0242ac120002','virtual_network_properties_rpf','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','external','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','network_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','policy_refs','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','policy_refsdata','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','route_table_refs','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','route_targets','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','shared','\0','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('4885c7a1-a9fe-11e7-8b4b-0242ac120002','virtual_network_properties_rpf','\0','string',NULL);
+
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (1,'CONTRAIL_BASIC','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (2,'CONTRAIL_BASIC','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (3,'CONTRAIL_BASIC','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (4,'CONTRAIL_SHARED','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (5,'CONTRAIL_SHARED','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (6,'CONTRAIL_SHARED','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (7,'CONTRAIL_EXTERNAL','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (8,'CONTRAIL_EXTERNAL','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (9,'CONTRAIL_EXTERNAL','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (10,'CONTRAIL30_BASIC','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (11,'CONTRAIL30_BASIC','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (12,'CONTRAIL30_BASIC','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (13,'CONTRAIL30_MPSCE','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (14,'CONTRAIL30_MPSCE','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (15,'CONTRAIL30_MPSCE','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (16,'VID_DEFAULT','createInstance','VID_DEFAULT recipe to create network if no custom BPMN flow is found','/mso/async/services/CreateNetworkInstance',NULL,180,NULL,'2017-10-05 18:52:03','1.0');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (17,'VID_DEFAULT','updateInstance','VID_DEFAULT recipe to update network if no custom BPMN flow is found','/mso/async/services/UpdateNetworkInstance',NULL,180,NULL,'2017-10-05 18:52:03','1.0');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (18,'VID_DEFAULT','deleteInstance','VID_DEFAULT recipe to delete network if no custom BPMN flow is found','/mso/async/services/DeleteNetworkInstance',NULL,180,NULL,'2017-10-05 18:52:03','1.0');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (19,'CONTRAIL30_L2NODHCP','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (20,'CONTRAIL30_L2NODHCP','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (21,'CONTRAIL30_L2NODHCP','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (22,'CONTRAIL30_GNDIRECT','CREATE',NULL,'/mso/async/services/CreateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (23,'CONTRAIL30_GNDIRECT','UPDATE',NULL,'/mso/async/services/UpdateNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+INSERT INTO `network_recipe` (`id`, `MODEL_NAME`, `ACTION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `NETWORK_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TYPE`, `CREATION_TIMESTAMP`, `VERSION_STR`) VALUES (24,'CONTRAIL30_GNDIRECT','DELETE',NULL,'/mso/async/services/DeleteNetworkV2',NULL,180,NULL,'2017-10-05 18:52:03','1');
+
+INSERT INTO `service` (`MODEL_UUID`, `MODEL_NAME`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `TOSCA_CSAR_ARTIFACT_UUID`) VALUES ('48cc36cc-a9fe-11e7-8b4b-0242ac120002','VID_DEFAULT','48cd56c8-a9fe-11e7-8b4b-0242ac120002','1.0','Default service for VID to use for infra APIH orchestration1707MIGRATED1707MIGRATED','2017-10-05 18:52:03',NULL);
+INSERT INTO `service` (`MODEL_UUID`, `MODEL_NAME`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `TOSCA_CSAR_ARTIFACT_UUID`) VALUES ('48cc3acd-a9fe-11e7-8b4b-0242ac120002','*','48ce2256-a9fe-11e7-8b4b-0242ac120002','1.0','Default service to use for infra APIH orchestration1707MIGRATED1707MIGRATED','2017-10-05 18:52:03',NULL);
+
+INSERT INTO `service_recipe` (`id`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, `SERVICE_MODEL_UUID`) VALUES (1,'createInstance','1','VID_DEFAULT recipe to create service-instance if no custom BPMN flow is found','/mso/async/services/CreateGenericALaCarteServiceInstance',NULL,180,NULL,'2017-10-05 18:52:03','48cc36cc-a9fe-11e7-8b4b-0242ac120002');
+INSERT INTO `service_recipe` (`id`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, `SERVICE_MODEL_UUID`) VALUES (2,'deleteInstance','1','VID_DEFAULT recipe to delete service-instance if no custom BPMN flow is found','/mso/async/services/DeleteGenericALaCarteServiceInstance',NULL,180,NULL,'2017-10-05 18:52:03','48cc36cc-a9fe-11e7-8b4b-0242ac120002');
+INSERT INTO `service_recipe` (`id`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, `SERVICE_MODEL_UUID`) VALUES (3,'createInstance','1','DEFAULT recipe to create service-instance if no custom BPMN flow is found','/mso/async/services/CreateGenericALaCarteServiceInstance',NULL,180,NULL,'2017-10-05 18:52:03','48cc3acd-a9fe-11e7-8b4b-0242ac120002');
+INSERT INTO `service_recipe` (`id`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, `SERVICE_MODEL_UUID`) VALUES (4,'deleteInstance','1','DEFAULT recipe to delete service-instance if no custom BPMN flow is found','/mso/async/services/DeleteGenericALaCarteServiceInstance',NULL,180,NULL,'2017-10-05 18:52:03','48cc3acd-a9fe-11e7-8b4b-0242ac120002');
+
+--
+-- Custom Reciepe for the VoLTE service added temporarily
+--
+
+INSERT INTO `service` (`MODEL_UUID`, `MODEL_NAME`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `TOSCA_CSAR_ARTIFACT_UUID`) VALUES ('dfcd7471-16c7-444e-8268-d4c50d90593a','UUI_DEFAULT','dfcd7471-16c7-444e-8268-d4c50d90593a','1.0','Default service for UUI to use for infra APIH orchestration1707MIGRATED1707MIGRATED','2017-10-23 18:52:03',NULL);
+
+INSERT INTO `service_recipe` (`id`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, `SERVICE_MODEL_UUID`) VALUES (11,'createInstance','1','Custom recipe to create E2E service-instance if no custom BPMN flow is found','/mso/async/services/CreateCustomE2EServiceInstance',NULL,180,NULL,'2017-10-05 18:52:03','dfcd7471-16c7-444e-8268-d4c50d90593a');
+INSERT INTO `service_recipe` (`id`, `ACTION`, `VERSION_STR`, `DESCRIPTION`, `ORCHESTRATION_URI`, `SERVICE_PARAM_XSD`, `RECIPE_TIMEOUT`, `SERVICE_TIMEOUT_INTERIM`, `CREATION_TIMESTAMP`, `SERVICE_MODEL_UUID`) VALUES (12,'deleteInstance','1','Custom recipe to delete E2E service-instance if no custom BPMN flow is found','/mso/async/services/DeleteCustomE2EServiceInstance',NULL,180,NULL,'2017-10-05 18:52:03','dfcd7471-16c7-444e-8268-d4c50d90593a');
+
+INSERT INTO `temp_network_heat_template_lookup` (`NETWORK_RESOURCE_MODEL_NAME`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `AIC_VERSION_MIN`, `AIC_VERSION_MAX`) VALUES ('CONTRAIL30_GNDIRECT','4885c7a1-a9fe-11e7-8b4b-0242ac120002','3.0',NULL);
+INSERT INTO `temp_network_heat_template_lookup` (`NETWORK_RESOURCE_MODEL_NAME`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `AIC_VERSION_MIN`, `AIC_VERSION_MAX`) VALUES ('CONTRAIL30_L2NODHCP','4885c198-a9fe-11e7-8b4b-0242ac120002','3.0',NULL);
+INSERT INTO `temp_network_heat_template_lookup` (`NETWORK_RESOURCE_MODEL_NAME`, `HEAT_TEMPLATE_ARTIFACT_UUID`,`AIC_VERSION_MIN` , `AIC_VERSION_MAX` ) VALUES ('Generic NeutronNet','efee1d84-b8ec-11e7-abc4-cec278b6b50a','2.0','NULL');
+
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (1,'*','VOLUME_GROUP',NULL,'CREATE',NULL,'1','Recipe Match All for','/mso/async/services/createCinderVolumeV1',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (2,'*','VOLUME_GROUP',NULL,'DELETE',NULL,'1','Recipe Match All for','/mso/async/services/deleteCinderVolumeV1',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (3,'*','VOLUME_GROUP',NULL,'UPDATE',NULL,'1','Recipe Match All for','/mso/async/services/updateCinderVolumeV1',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (4,NULL,'VOLUME_GROUP',NULL,'CREATE_VF_MODULE_VOL',NULL,'1','Recipe Match All for','/mso/async/services/CreateVfModuleVolume',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (5,NULL,'VOLUME_GROUP',NULL,'DELETE_VF_MODULE_VOL',NULL,'1','Recipe Match All for','/mso/async/services/DeleteVfModuleVolume',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (6,NULL,'VOLUME_GROUP',NULL,'UPDATE_VF_MODULE_VOL',NULL,'1','Recipe Match All for','/mso/async/services/UpdateVfModuleVolume',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (7,NULL,'volumeGroup','VID_DEFAULT','createInstance',NULL,'1','VID_DEFAULT recipe t','/mso/async/services/CreateVfModuleVolumeInfraV1',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (8,NULL,'volumeGroup','VID_DEFAULT','deleteInstance',NULL,'1','VID_DEFAULT recipe t','/mso/async/services/DeleteVfModuleVolumeInfraV1',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (9,NULL,'volumeGroup','VID_DEFAULT','updateInstance',NULL,'1','VID_DEFAULT recipe t','/mso/async/services/UpdateVfModuleVolumeInfraV1',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (10,NULL,'vfModule','VID_DEFAULT','createInstance',NULL,'1','VID_DEFAULT recipe t','/mso/async/services/CreateVfModuleInfra',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (11,NULL,'vfModule','VID_DEFAULT','deleteInstance',NULL,'1','VID_DEFAULT recipe t','/mso/async/services/DeleteVfModuleInfra',null,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_components_recipe` (`id`, `VNF_TYPE`, `VNF_COMPONENT_TYPE`, `VF_MODULE_MODEL_UUID`, `ACTION`, `SERVICE_TYPE`, `VERSION`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_COMPONENT_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (12,NULL,'vfModule','VID_DEFAULT','updateInstance',NULL,'1','VID_DEFAULT recipe t','/mso/async/services/UpdateVfModuleInfra',null,180,'2017-10-05 18:52:03');
+
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (1,NULL,'CREATE',NULL,'1','*','Recipe Match All for VNFs if no custom flow exists','/mso/workflow/services/CreateGenericVNFV1',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (2,NULL,'DELETE',NULL,'1','*','Recipe Match All for VNFs if no custom flow exists','/mso/async/services//deleteGenericVNFV1',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (3,NULL,'UPDATE',NULL,'1','*','Recipe Match All for VNFs if no custom flow exists','/mso/workflow/services/updateGenericVNFV1',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (4,'*','CREATE_VF_MODULE',NULL,'1',NULL,'Recipe Match All for VNFs if no custom flow exists','/mso/async/services/CreateVfModule',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (5,'*','DELETE_VF_MODULE',NULL,'1',NULL,'Recipe Match All for VNFs if no custom flow exists','/mso/async/services/DeleteVfModule',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (6,'*','UPDATE_VF_MODULE',NULL,'1',NULL,'Recipe Match All for VNFs if no custom flow exists','/mso/async/services/UpdateVfModule',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (7,NULL,'createInstance',NULL,'1','VID_DEFAULT','VID_DEFAULT recipe to create VNF if no custom BPMN flow is found','/mso/async/services/CreateVnfInfra',NULL,180,'2017-10-05 18:52:03');
+INSERT INTO `vnf_recipe` (`id`, `VF_MODULE_ID`, `ACTION`, `SERVICE_TYPE`, `VERSION_STR`, `VNF_TYPE`, `DESCRIPTION`, `ORCHESTRATION_URI`, `VNF_PARAM_XSD`, `RECIPE_TIMEOUT`, `CREATION_TIMESTAMP`) VALUES (8,NULL,'deleteInstance',NULL,'1','VID_DEFAULT','VID_DEFAULT recipe to delete VNF if no custom BPMN flow is found','/mso/async/services/DeleteVnfInfra',NULL,180,'2017-10-05 18:52:03');
+
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
diff --git a/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql
new file mode 100644
index 000000000..b5063defd
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-dns/create_mso_db-demo-dns.sql
@@ -0,0 +1,77 @@
+SOURCE ../default/create_mso_db-default.sql
+
+USE `mso_requests`;
+DROP USER 'mso';
+CREATE USER 'mso';
+GRANT ALL on mso_requests.* to 'mso' identified by 'mso123' with GRANT OPTION;
+FLUSH PRIVILEGES;
+
+USE `mso_catalog`;
+DROP USER 'catalog';
+CREATE USER 'catalog';
+GRANT ALL on mso_catalog.* to 'catalog' identified by 'catalog123' with GRANT OPTION;
+FLUSH PRIVILEGES;
+
+SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
+
+INSERT INTO `heat_environment` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('EnvArtifact-UUID1','base_vlb.env','1.0','BASE VLB ENV file','parameters:\n vlb_image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)\n vlb_flavor_name: 4 GB General Purpose v1\n public_net_id: 00000000-0000-0000-0000-000000000000\n vlb_private_net_id: zdfw1lb01_private\n ecomp_private_net_id: oam_ecomp\n vlb_private_net_cidr: 192.168.10.0/24\n ecomp_private_net_cidr: 192.168.9.0/24\n vlb_private_ip_0: 192.168.10.111\n vlb_private_ip_1: 192.168.9.111\n vdns_private_ip_0: 192.168.10.211\n vdns_private_ip_1: 192.168.9.211\n vlb_name_0: zdfw1lb01lb01\n vdns_name_0: zdfw1lb01dns01\n vnf_id: vLoadBalancer_demo_app\n vf_module_id: vLoadBalancer\n webserver_ip: 162.242.237.182\n dcae_collector_ip: 192.168.9.1\n key_name: vlb_key\n pub_key: INSERT YOUR PUBLIC KEY HERE','MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_environment` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('EnvArtifact-UUID2','dnsscaling.env','1.0','DNS Scaling ENV file','parameters:\n vlb_image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)\n vlb_flavor_name: 4 GB General Purpose v1\n public_net_id: 00000000-0000-0000-0000-000000000000\n vlb_private_net_id: zdfw1lb01_private\n ecomp_private_net_id: oam_ecomp\n vlb_private_ip_0: 192.168.10.111\n vlb_private_ip_1: 192.168.9.111\n vdns_private_ip_0: 192.168.10.222\n vdns_private_ip_1: 192.168.9.222\n vdns_name_0: zdfw1lb01dns02\n vnf_id: vLoadBalancer_demo_app\n vf_module_id: vLoadBalancer\n webserver_ip: 162.242.237.182\n dcae_collector_ip: 192.168.9.1\n key_name: vlb_key\n pub_key: INSERT YOUR PUBLIC KEY HERE','MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `TIMEOUT_MINUTES`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('Artifact-UUID1','base_vlb.yaml','1.0','Base VLB Heat','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy vLoadBalancer/vDNS demo app for OpenECOMP\n\nparameters:\n vlb_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vlb_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n vlb_private_net_id:\n type: string\n label: vLoadBalancer private network name or ID\n description: Private network that connects vLoadBalancer with vDNSs\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n vlb_private_net_cidr:\n type: string\n label: vLoadBalancer private network CIDR\n description: The CIDR of the vLoadBalancer private network\n ecomp_private_net_cidr:\n type: string\n label: ECOMP private network CIDR\n description: The CIDR of the protected private network\n vlb_private_ip_0:\n type: string\n label: vLoadBalancer private IP address towards the private network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with the vDNSs\n vlb_private_ip_1:\n type: string\n label: vLoadBalancer private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with ECOMP components\n vdns_private_ip_0:\n type: string\n label: vDNS private IP address towards the private network\n description: Private IP address that is assigned to the vDNS to communicate with the vLoadBalancer\n vdns_private_ip_1:\n type: string\n label: vDNS private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vDNS to communicate with ECOMP components\n vlb_name_0:\n type: string\n label: vLoadBalancer name\n description: Name of the vLoadBalancer\n vdns_name_0:\n type: string\n label: vDNS name\n description: Name of the vDNS\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vLoadBalancer Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n my_keypair:\n type: OS::Nova::KeyPair\n properties:\n name: { get_param: key_name }\n public_key: { get_param: pub_key }\n save_private_key: false\n\n vlb_private_network:\n type: OS::Neutron::Net\n properties:\n name: { get_param: vlb_private_net_id }\n\n vlb_private_subnet:\n type: OS::Neutron::Subnet\n properties:\n name: { get_param: vlb_private_net_id }\n network_id: { get_resource: vlb_private_network }\n cidr: { get_param: vlb_private_net_cidr }\n\n vlb_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vlb_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vlb_private_0_port }\n - port: { get_resource: vlb_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __dcae_collector_ip__: { get_param: dcae_collector_ip }\n __local_private_ipaddr__: { get_param: vlb_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n DCAE_COLLECTOR_IP=__dcae_collector_ip__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_lb_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vlb.sh\n chmod +x v_lb_init.sh\n chmod +x vlb.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $DCAE_COLLECTOR_IP > config/dcae_collector_ip.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo \"no\" > config/install.txt\n LOCAL_PUBLIC_IPADDR=$(ifconfig eth0 | grep \"inet addr\" | tr -s \' \' | cut -d\' \' -f3 | cut -d\':\' -f2)\n echo $LOCAL_PUBLIC_IPADDR > config/local_public_ipaddr.txt\n mv vlb.sh /etc/init.d\n update-rc.d vlb.sh defaults\n ./v_lb_init.sh\n\n vlb_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: vlb_private_network }\n fixed_ips: [{\"subnet\": { get_resource: vlb_private_subnet }, \"ip_address\": { get_param: vlb_private_ip_0 }}]\n\n vlb_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vlb_private_ip_1 }}]\n\n vdns_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vdns_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vdns_private_0_port }\n - port: { get_resource: vdns_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __lb_oam_int__ : { get_param: vlb_private_ip_1 }\n __lb_private_ipaddr__: { get_param: vlb_private_ip_0 }\n __local_private_ipaddr__: { get_param: vdns_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n LB_OAM_INT=__lb_oam_int__\n LB_PRIVATE_IPADDR=__lb_private_ipaddr__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_dns_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vdns.sh\n chmod +x v_dns_init.sh\n chmod +x vdns.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $LB_OAM_INT > config/lb_oam_int.txt\n echo $LB_PRIVATE_IPADDR > config/lb_private_ipaddr.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo \"no\" > config/install.txt\n mv vdns.sh /etc/init.d\n update-rc.d vdns.sh defaults\n ./v_dns_init.sh\n\n vdns_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: vlb_private_network }\n fixed_ips: [{\"subnet\": { get_resource: vlb_private_subnet }, \"ip_address\": { get_param: vdns_private_ip_0 }}]\n\n vdns_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vdns_private_ip_1 }}]\n',300,'MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `TIMEOUT_MINUTES`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('Artifact-UUID2','dnsscaling.yaml','1.0','DNS Scaling Heat','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy a vDNS for OpenECOMP (scaling-up scenario)\n\nparameters:\n vlb_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vlb_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n vlb_private_net_id:\n type: string\n label: vLoadBalancer private network name or ID\n description: Private network that connects vLoadBalancer with vDNSs\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n vlb_private_ip_0:\n type: string\n label: vLoadBalancer private IP address towards the private network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with the vDNSs\n vlb_private_ip_1:\n type: string\n label: vLoadBalancer private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vLoadBalancer to communicate with ECOMP components\n vdns_private_ip_0:\n type: string\n label: vDNS private IP address towards the private network\n description: Private IP address that is assigned to the vDNS to communicate with the vLoadBalancer\n vdns_private_ip_1:\n type: string\n label: vDNS private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vDNS to communicate with ECOMP components\n vdns_name_0:\n type: string\n label: vDNS name\n description: Name of the vDNS\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vLoadBalancer Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n vdns_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vlb_image_name }\n flavor: { get_param: vlb_flavor_name }\n name: { get_param: vdns_name_0 }\n key_name: { get_param: key_name }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vdns_private_0_port }\n - port: { get_resource: vdns_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __lb_oam_int__ : { get_param: vlb_private_ip_1 }\n __lb_private_ipaddr__: { get_param: vlb_private_ip_0 }\n __local_private_ipaddr__: { get_param: vdns_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n LB_OAM_INT=__lb_oam_int__\n LB_PRIVATE_IPADDR=__lb_private_ipaddr__\n LOCAL_PRIVATE_IPADDR=__local_private_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_dns_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vdns.sh\n chmod +x v_dns_init.sh\n chmod +x vdns.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $LB_OAM_INT > config/lb_oam_int.txt\n echo $LB_PRIVATE_IPADDR > config/lb_private_ipaddr.txt\n echo $LOCAL_PRIVATE_IPADDR > config/local_private_ipaddr.txt\n echo \"no\" > config/install.txt\n mv vdns.sh /etc/init.d\n update-rc.d vdns.sh defaults\n ./v_dns_init.sh\n\n vdns_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: vlb_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: vlb_private_net_id }, \"ip_address\": { get_param: vdns_private_ip_0 }}]\n\n vdns_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vdns_private_ip_1 }}]\n',300,'MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','dcae_collector_ip','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','ecomp_private_net_cidr','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','ecomp_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','key_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','public_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','pub_key','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vdns_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vdns_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vdsn_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vf_module_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_flavor_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_image_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_private_net_cidr','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vlb_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','vnf_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID1','webserver_ip','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','dcae_collector_ip','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','ecomp_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','key_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','public_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','pub_key','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vdns_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vdns_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vdsn_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vf_module_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vlb_flavor_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vlb_image_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vlb_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vlb_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','vnf_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID2','webserver_ip','','string',NULL);
+
+INSERT INTO `service` (`MODEL_UUID`, `MODEL_NAME`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `TOSCA_CSAR_ARTIFACT_UUID`) VALUES ('1e34774e-715e-4fd6-bd09-7b654622f35i','dns-service','585822c8-4027-4f84-ba50-e9248606f111','1.0','dns service for unit test','2016-11-14 13:04:07',NULL);
+
+INSERT INTO `service_to_resource_customizations` (`SERVICE_MODEL_UUID`, `RESOURCE_MODEL_CUSTOMIZATION_UUID`, `MODEL_TYPE`, `CREATION_TIMESTAMP`) VALUES ('1e34774e-715e-4fd6-bd09-7b654622f35i','302aa6be-a9fe-11e7-8b4b-0242ac120002','vnf','2017-10-05 18:51:28');
+
+INSERT INTO `vf_module` (`MODEL_UUID`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `DESCRIPTION`, `IS_BASE`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `VOL_HEAT_TEMPLATE_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`) VALUES ('1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-1::module-1.group','585822c7-4027-4f84-ba50-e9248606f132','1.0','VF_RI1_DNS::module-1',NULL,1,'Artifact-UUID1',NULL,'2016-11-14 13:04:07','585822c7-4027-4f84-ba50-e9248606f131');
+INSERT INTO `vf_module` (`MODEL_UUID`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `DESCRIPTION`, `IS_BASE`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `VOL_HEAT_TEMPLATE_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`) VALUES ('1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-2::module-1.group','585822c7-4027-4f84-ba50-e9248606f133','1.0','VF_RI1_DNS::module-2',NULL,0,'Artifact-UUID2',NULL,'2016-11-14 13:04:07','585822c7-4027-4f84-ba50-e9248606f131');
+
+INSERT INTO `vf_module_customization` (`MODEL_CUSTOMIZATION_UUID`, `LABEL`, `INITIAL_COUNT`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_COUNT`, `HEAT_ENVIRONMENT_ARTIFACT_UUID`, `VOL_ENVIRONMENT_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VF_MODULE_MODEL_UUID`) VALUES ('30316d81-a9fe-11e7-8b4b-0242ac120002',NULL,1,0,NULL,NULL,'EnvArtifact-UUID1',NULL,'2017-10-05 18:51:25','1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-1::module-1.group');
+INSERT INTO `vf_module_customization` (`MODEL_CUSTOMIZATION_UUID`, `LABEL`, `INITIAL_COUNT`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_COUNT`, `HEAT_ENVIRONMENT_ARTIFACT_UUID`, `VOL_ENVIRONMENT_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VF_MODULE_MODEL_UUID`) VALUES ('303170ae-a9fe-11e7-8b4b-0242ac120002',NULL,0,0,NULL,NULL,'EnvArtifact-UUID2',NULL,'2017-10-05 18:51:25','1e34774e-715e-4fd5-bd08-7b654622f33e.VF_RI1_DNS::module-2::module-1.group');
+INSERT INTO `vnf_res_custom_to_vf_module_custom` (`VNF_RESOURCE_CUST_MODEL_CUSTOMIZATION_UUID`, `VF_MODULE_CUST_MODEL_CUSTOMIZATION_UUID`, `CREATION_TIMESTAMP`) VALUES ('302aa6be-a9fe-11e7-8b4b-0242ac120002','30316d81-a9fe-11e7-8b4b-0242ac120002','2017-10-05 18:51:26');
+
+INSERT INTO `vnf_res_custom_to_vf_module_custom` (`VNF_RESOURCE_CUST_MODEL_CUSTOMIZATION_UUID`, `VF_MODULE_CUST_MODEL_CUSTOMIZATION_UUID`, `CREATION_TIMESTAMP`) VALUES ('302aa6be-a9fe-11e7-8b4b-0242ac120002','303170ae-a9fe-11e7-8b4b-0242ac120002','2017-10-05 18:51:26');
+
+INSERT INTO `vnf_resource` (`ORCHESTRATION_MODE`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `MODEL_UUID`, `AIC_VERSION_MIN`, `AIC_VERSION_MAX`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `TOSCA_NODE_TYPE`, `HEAT_TEMPLATE_ARTIFACT_UUID`) VALUES ('HEAT','dns service for unit test1707MIGRATED','2016-11-14 13:04:07','585822c7-4027-4f84-ba50-e9248606f131',NULL,NULL,'585822c7-4027-4f84-ba50-e9248606f112','1.0','DNSResource',NULL,NULL);
+
+INSERT INTO `vnf_resource_customization` (`MODEL_CUSTOMIZATION_UUID`, `MODEL_INSTANCE_NAME`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_MAX_COUNT`, `NF_TYPE`, `NF_ROLE`, `NF_FUNCTION`, `NF_NAMING_CODE`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`) VALUES ('302aa6be-a9fe-11e7-8b4b-0242ac120002','DNSResource-1',NULL,NULL,NULL,NULL,NULL,NULL,NULL,'2017-10-05 18:51:25','585822c7-4027-4f84-ba50-e9248606f131');
+
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
diff --git a/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql
new file mode 100644
index 000000000..15001050b
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/bulkload-files/demo-vfw/create_mso_db-demo-vfw.sql
@@ -0,0 +1,59 @@
+SOURCE ../default/create_mso_db-default.sql
+
+USE `mso_requests`;
+DROP USER 'mso';
+CREATE USER 'mso';
+GRANT ALL on mso_requests.* to 'mso' identified by 'mso123' with GRANT OPTION;
+FLUSH PRIVILEGES;
+
+USE `mso_catalog`;
+DROP USER 'catalog';
+CREATE USER 'catalog';
+GRANT ALL on mso_catalog.* to 'catalog' identified by 'catalog123' with GRANT OPTION;
+FLUSH PRIVILEGES;
+
+SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
+
+INSERT INTO `heat_environment` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('EnvArtifact-UUID3','base_vfw.env','1.0','base_vfw ENV file','parameters:\n vfw_image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)\n vfw_flavor_name: 4 GB General Purpose v1\n public_net_id: 00000000-0000-0000-0000-000000000000\n unprotected_private_net_id: zdfw1fwl01_unprotected\n protected_private_net_id: zdfw1fwl01_protected\n ecomp_private_net_id: oam_ecomp\n unprotected_private_net_cidr: 192.168.10.0/24\n protected_private_net_cidr: 192.168.20.0/24\n ecomp_private_net_cidr: 192.168.9.0/24\n vfw_private_ip_0: 192.168.10.100\n vfw_private_ip_1: 192.168.20.100\n vfw_private_ip_2: 192.168.9.100\n vpg_private_ip_0: 192.168.10.200\n vpg_private_ip_1: 192.168.9.200\n vsn_private_ip_0: 192.168.20.250\n vsn_private_ip_1: 192.168.9.250\n vfw_name_0: zdfw1fwl01fwl01\n vpg_name_0: zdfw1fwl01pgn01\n vsn_name_0: zdfw1fwl01snk01\n vnf_id: vFirewall_demo_app\n vf_module_id: vFirewall\n webserver_ip: 162.242.237.182\n dcae_collector_ip: 192.168.9.1\n key_name: vfw_key\n pub_key: INSERT YOUR PUBLIC KEY HERE','MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_template` (`ARTIFACT_UUID`, `NAME`, `VERSION`, `DESCRIPTION`, `BODY`, `TIMEOUT_MINUTES`, `ARTIFACT_CHECKSUM`, `CREATION_TIMESTAMP`) VALUES ('Artifact-UUID3','base_vfw.yaml','1.0','Base VFW Heat','heat_template_version: 2013-05-23\n\ndescription: Heat template to deploy vFirewall demo app for OpenECOMP\n\nparameters:\n vfw_image_name:\n type: string\n label: Image name or ID\n description: Image to be used for compute instance\n vfw_flavor_name:\n type: string\n label: Flavor\n description: Type of instance (flavor) to be used\n public_net_id:\n type: string\n label: Public network name or ID\n description: Public network that enables remote connection to VNF\n unprotected_private_net_id:\n type: string\n label: Unprotected private network name or ID\n description: Private network that connects vPacketGenerator with vFirewall\n protected_private_net_id:\n type: string\n label: Protected private network name or ID\n description: Private network that connects vFirewall with vSink\n ecomp_private_net_id:\n type: string\n label: ECOMP management network name or ID\n description: Private network that connects ECOMP component and the VNF\n unprotected_private_net_cidr:\n type: string\n label: Unprotected private network CIDR\n description: The CIDR of the unprotected private network\n protected_private_net_cidr:\n type: string\n label: Protected private network CIDR\n description: The CIDR of the protected private network\n ecomp_private_net_cidr:\n type: string\n label: ECOMP private network CIDR\n description: The CIDR of the protected private network\n vfw_private_ip_0:\n type: string\n label: vFirewall private IP address towards the unprotected network\n description: Private IP address that is assigned to the vFirewall to communicate with the vPacketGenerator\n vfw_private_ip_1:\n type: string\n label: vFirewall private IP address towards the protected network\n description: Private IP address that is assigned to the vFirewall to communicate with the vSink\n vfw_private_ip_2:\n type: string\n label: vFirewall private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vFirewall to communicate with ECOMP components\n vpg_private_ip_0:\n type: string\n label: vPacketGenerator private IP address towards the unprotected network\n description: Private IP address that is assigned to the vPacketGenerator to communicate with the vFirewall\n vpg_private_ip_1:\n type: string\n label: vPacketGenerator private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vPacketGenerator to communicate with ECOMP components\n vsn_private_ip_0:\n type: string\n label: vSink private IP address towards the protected network\n description: Private IP address that is assigned to the vSink to communicate with the vFirewall\n vsn_private_ip_1:\n type: string\n label: vSink private IP address towards the ECOMP management network\n description: Private IP address that is assigned to the vSink to communicate with ECOMP components\n vfw_name_0:\n type: string\n label: vFirewall name\n description: Name of the vFirewall\n vpg_name_0:\n type: string\n label: vPacketGenerator name\n description: Name of the vPacketGenerator\n vsn_name_0:\n type: string\n label: vSink name\n description: Name of the vSink\n vnf_id:\n type: string\n label: VNF ID\n description: The VNF ID is provided by ECOMP\n vf_module_id:\n type: string\n label: vFirewall module ID\n description: The vFirewall Module ID is provided by ECOMP\n webserver_ip:\n type: string\n label: Webserver IP address\n description: IP address of the webserver that hosts the source code and binaries\n dcae_collector_ip:\n type: string\n label: DCAE collector IP address\n description: IP address of the DCAE collector\n key_name:\n type: string\n label: Key pair name\n description: Public/Private key pair name\n pub_key:\n type: string\n label: Public key\n description: Public key to be installed on the compute instance\n\nresources:\n my_keypair:\n type: OS::Nova::KeyPair\n properties:\n name: { get_param: key_name }\n public_key: { get_param: pub_key }\n save_private_key: false\n\n unprotected_private_network:\n type: OS::Neutron::Net\n properties:\n name: { get_param: unprotected_private_net_id }\n\n protected_private_network:\n type: OS::Neutron::Net\n properties:\n name: { get_param: protected_private_net_id }\n\n unprotected_private_subnet:\n type: OS::Neutron::Subnet\n properties:\n network_id: { get_resource: unprotected_private_network }\n cidr: { get_param: unprotected_private_net_cidr }\n\n protected_private_subnet:\n type: OS::Neutron::Subnet\n properties:\n network_id: { get_resource: protected_private_network }\n cidr: { get_param: protected_private_net_cidr }\n\n vfw_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vfw_image_name }\n flavor: { get_param: vfw_flavor_name }\n name: { get_param: vfw_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vfw_private_0_port }\n - port: { get_resource: vfw_private_1_port }\n - port: { get_resource: vfw_private_2_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __dcae_collector_ip__ : { get_param: dcae_collector_ip }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n DCAE_COLLECTOR_IP=__dcae_collector_ip__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_firewall_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vfirewall.sh\n chmod +x v_firewall_init.sh\n chmod +x vfirewall.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $DCAE_COLLECTOR_IP > config/dcae_collector_ip.txt\n echo \"no\" > config/install.txt\n mv vfirewall.sh /etc/init.d\n sudo update-rc.d vfirewall.sh defaults\n ./v_firewall_init.sh\n\n vfw_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: unprotected_private_network }\n fixed_ips: [{\"subnet\": { get_resource: unprotected_private_subnet }, \"ip_address\": { get_param: vfw_private_ip_0 }}]\n\n vfw_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: protected_private_network }\n fixed_ips: [{\"subnet\": { get_resource: protected_private_subnet }, \"ip_address\": { get_param: vfw_private_ip_1 }}]\n\n vfw_private_2_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vfw_private_ip_2 }}]\n\n vpg_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vfw_image_name }\n flavor: { get_param: vfw_flavor_name }\n name: { get_param: vpg_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vpg_private_0_port }\n - port: { get_resource: vpg_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __fw_ipaddr__: { get_param: vfw_private_ip_0 }\n __protected_net_cidr__: { get_param: protected_private_net_cidr }\n __sink_ipaddr__: { get_param: vsn_private_ip_0 }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n FW_IPADDR=__fw_ipaddr__\n PROTECTED_NET_CIDR=__protected_net_cidr__\n SINK_IPADDR=__sink_ipaddr__\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_packetgen_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vpacketgen.sh\n chmod +x v_packetgen_init.sh\n chmod +x vpacketgen.sh\n echo $WEBSERVER_IP > config/webserver_ip.txt\n echo $FW_IPADDR > config/fw_ipaddr.txt\n echo $PROTECTED_NET_CIDR > config/protected_net_cidr.txt\n echo $SINK_IPADDR > config/sink_ipaddr.txt\n echo \"no\" > config/install.txt\n mv vpacketgen.sh /etc/init.d\n sudo update-rc.d vpacketgen.sh defaults\n ./v_packetgen_init.sh\n\n vpg_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: unprotected_private_network }\n fixed_ips: [{\"subnet\": { get_resource: unprotected_private_subnet }, \"ip_address\": { get_param: vpg_private_ip_0 }}]\n\n vpg_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vpg_private_ip_1 }}]\n\n vsn_0:\n type: OS::Nova::Server\n properties:\n image: { get_param: vfw_image_name }\n flavor: { get_param: vfw_flavor_name }\n name: { get_param: vsn_name_0 }\n key_name: { get_resource: my_keypair }\n networks:\n - network: { get_param: public_net_id }\n - port: { get_resource: vsn_private_0_port }\n - port: { get_resource: vsn_private_1_port }\n metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }}\n user_data_format: RAW\n user_data:\n str_replace:\n params:\n __webserver__: { get_param: webserver_ip }\n __protected_net_gw__: { get_param: vfw_private_ip_1 }\n __unprotected_net__: { get_param: unprotected_private_net_cidr }\n template: |\n #!/bin/bash\n\n WEBSERVER_IP=__webserver__\n PROTECTED_NET_GW=__protected_net_gw__\n UNPROTECTED_NET=__unprotected_net__\n UNPROTECTED_NET=$(echo $UNPROTECTED_NET | cut -d\'/\' -f1)\n\n mkdir /opt/config\n cd /opt\n wget http://$WEBSERVER_IP/demo_repo/v_sink_init.sh\n wget http://$WEBSERVER_IP/demo_repo/vsink.sh\n chmod +x v_sink_init.sh\n chmod +x vsink.sh\n echo $PROTECTED_NET_GW > config/protected_net_gw.txt\n echo $UNPROTECTED_NET > config/unprotected_net.txt\n echo \"no\" > config/install.txt\n mv vsink.sh /etc/init.d\n sudo update-rc.d vsink.sh defaults\n ./v_sink_init.sh\n\n vsn_private_0_port:\n type: OS::Neutron::Port\n properties:\n network: { get_resource: protected_private_network }\n fixed_ips: [{\"subnet\": { get_resource: protected_private_subnet }, \"ip_address\": { get_param: vsn_private_ip_0 }}]\n\n vsn_private_1_port:\n type: OS::Neutron::Port\n properties:\n network: { get_param: ecomp_private_net_id }\n fixed_ips: [{\"subnet\": { get_param: ecomp_private_net_id }, \"ip_address\": { get_param: vsn_private_ip_1 }}]\n \n',300,'MANUAL RECORD','2016-11-14 13:04:07');
+
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','dcae_collector_ip','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','ecomp_private_net_cidr','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','ecomp_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','key_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','protected_private_net_cidr','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','protected_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','public_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','pub_key','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','unprotected_private_net_cidr','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','unprotected_private_net_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_flavor_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_image_name','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vfw_private_ip_2','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vf_module_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vnf_id','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vpg_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vpg_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vpg_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vsn_name_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vsn_private_ip_0','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','vsn_private_ip_1','','string',NULL);
+INSERT INTO `heat_template_params` (`HEAT_TEMPLATE_ARTIFACT_UUID`, `PARAM_NAME`, `IS_REQUIRED`, `PARAM_TYPE`, `PARAM_ALIAS`) VALUES ('Artifact-UUID3','webserver_ip','','string',NULL);
+
+INSERT INTO `service` (`MODEL_UUID`, `MODEL_NAME`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `TOSCA_CSAR_ARTIFACT_UUID`) VALUES ('2e34774e-715e-4fd5-bd09-7b654622f35i','vfw-service','585822c7-4027-4f84-ba50-e9248606f112','1.0','VFW service','2016-11-14 13:04:07',NULL);
+
+INSERT INTO `vf_module` (`MODEL_UUID`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `DESCRIPTION`, `IS_BASE`, `HEAT_TEMPLATE_ARTIFACT_UUID`, `VOL_HEAT_TEMPLATE_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`) VALUES ('1e34774e-715e-4fd5-bd08-7b654622f33f.VF_RI1_VFW::module-1::module-1.group','585822c7-4027-4f84-ba50-e9248606f134','1.0','VF_RI1_VFW::module-1',NULL,1,'Artifact-UUID3',NULL,'2016-11-14 13:04:07','685822c7-4027-4f84-ba50-e9248606f132');
+
+INSERT INTO `vf_module_customization` (`MODEL_CUSTOMIZATION_UUID`, `LABEL`, `INITIAL_COUNT`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_COUNT`, `HEAT_ENVIRONMENT_ARTIFACT_UUID`, `VOL_ENVIRONMENT_ARTIFACT_UUID`, `CREATION_TIMESTAMP`, `VF_MODULE_MODEL_UUID`) VALUES ('5aa23938-a9fe-11e7-8b4b-0242ac120002',NULL,1,0,NULL,NULL,'EnvArtifact-UUID3',NULL,'2017-10-05 18:52:03','1e34774e-715e-4fd5-bd08-7b654622f33f.VF_RI1_VFW::module-1::module-1.group');
+
+INSERT INTO `vnf_res_custom_to_vf_module_custom` (`VNF_RESOURCE_CUST_MODEL_CUSTOMIZATION_UUID`, `VF_MODULE_CUST_MODEL_CUSTOMIZATION_UUID`, `CREATION_TIMESTAMP`) VALUES ('5a9bd247-a9fe-11e7-8b4b-0242ac120002','5aa23938-a9fe-11e7-8b4b-0242ac120002','2017-10-05 18:52:03');
+
+INSERT INTO `vnf_resource` (`ORCHESTRATION_MODE`, `DESCRIPTION`, `CREATION_TIMESTAMP`, `MODEL_UUID`, `AIC_VERSION_MIN`, `AIC_VERSION_MAX`, `MODEL_INVARIANT_UUID`, `MODEL_VERSION`, `MODEL_NAME`, `TOSCA_NODE_TYPE`, `HEAT_TEMPLATE_ARTIFACT_UUID`) VALUES ('HEAT','VFW service1707MIGRATED','2016-11-14 13:04:07','685822c7-4027-4f84-ba50-e9248606f132',NULL,NULL,'585822c7-4027-4f84-ba50-e9248606f113','1.0','VFWResource',NULL,NULL);
+
+INSERT INTO `vnf_resource_customization` (`MODEL_CUSTOMIZATION_UUID`, `MODEL_INSTANCE_NAME`, `MIN_INSTANCES`, `MAX_INSTANCES`, `AVAILABILITY_ZONE_MAX_COUNT`, `NF_TYPE`, `NF_ROLE`, `NF_FUNCTION`, `NF_NAMING_CODE`, `CREATION_TIMESTAMP`, `VNF_RESOURCE_MODEL_UUID`) VALUES ('5a9bd247-a9fe-11e7-8b4b-0242ac120002','VFWResource-1',NULL,NULL,NULL,NULL,NULL,NULL,NULL,'2017-10-05 18:52:03','685822c7-4027-4f84-ba50-e9248606f132');
+
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
diff --git a/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mariadb_engine_7.7.3-ee.sql b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mariadb_engine_7.7.3-ee.sql
new file mode 100644
index 000000000..b9b8dd62c
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mariadb_engine_7.7.3-ee.sql
@@ -0,0 +1,1195 @@
+DROP DATABASE IF EXISTS `camundabpmn`;
+
+CREATE DATABASE `camundabpmn`;
+
+USE `camundabpmn`;
+
+# DROP USER IF EXISTS 'camunda';
+delete from mysql.user where User='camunda';
+CREATE USER 'camunda';
+GRANT ALL on camundabpmn.* to 'camunda' identified by 'camunda123' with GRANT OPTION;
+FLUSH PRIVILEGES;
+
+USE `camundabpmn`;
+
+create table ACT_GE_PROPERTY (
+ NAME_ varchar(64),
+ VALUE_ varchar(300),
+ REV_ integer,
+ primary key (NAME_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+insert into ACT_GE_PROPERTY
+values ('schema.version', 'fox', 1);
+
+insert into ACT_GE_PROPERTY
+values ('schema.history', 'create(fox)', 1);
+
+insert into ACT_GE_PROPERTY
+values ('next.dbid', '1', 1);
+
+insert into ACT_GE_PROPERTY
+values ('deployment.lock', '0', 1);
+
+insert into ACT_GE_PROPERTY
+values ('history.cleanup.job.lock', '0', 1);
+
+create table ACT_GE_BYTEARRAY (
+ ID_ varchar(64),
+ REV_ integer,
+ NAME_ varchar(255),
+ DEPLOYMENT_ID_ varchar(64),
+ BYTES_ LONGBLOB,
+ GENERATED_ TINYINT,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RE_DEPLOYMENT (
+ ID_ varchar(64),
+ NAME_ varchar(255),
+ DEPLOY_TIME_ timestamp(3),
+ SOURCE_ varchar(255),
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_EXECUTION (
+ ID_ varchar(64),
+ REV_ integer,
+ PROC_INST_ID_ varchar(64),
+ BUSINESS_KEY_ varchar(255),
+ PARENT_ID_ varchar(64),
+ PROC_DEF_ID_ varchar(64),
+ SUPER_EXEC_ varchar(64),
+ SUPER_CASE_EXEC_ varchar(64),
+ CASE_INST_ID_ varchar(64),
+ ACT_ID_ varchar(255),
+ ACT_INST_ID_ varchar(64),
+ IS_ACTIVE_ TINYINT,
+ IS_CONCURRENT_ TINYINT,
+ IS_SCOPE_ TINYINT,
+ IS_EVENT_SCOPE_ TINYINT,
+ SUSPENSION_STATE_ integer,
+ CACHED_ENT_STATE_ integer,
+ SEQUENCE_COUNTER_ bigint,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_JOB (
+ ID_ varchar(64) NOT NULL,
+ REV_ integer,
+ TYPE_ varchar(255) NOT NULL,
+ LOCK_EXP_TIME_ timestamp(3) NULL,
+ LOCK_OWNER_ varchar(255),
+ EXCLUSIVE_ boolean,
+ EXECUTION_ID_ varchar(64),
+ PROCESS_INSTANCE_ID_ varchar(64),
+ PROCESS_DEF_ID_ varchar(64),
+ PROCESS_DEF_KEY_ varchar(255),
+ RETRIES_ integer,
+ EXCEPTION_STACK_ID_ varchar(64),
+ EXCEPTION_MSG_ varchar(4000),
+ DUEDATE_ timestamp(3) NULL,
+ REPEAT_ varchar(255),
+ HANDLER_TYPE_ varchar(255),
+ HANDLER_CFG_ varchar(4000),
+ DEPLOYMENT_ID_ varchar(64),
+ SUSPENSION_STATE_ integer NOT NULL DEFAULT 1,
+ JOB_DEF_ID_ varchar(64),
+ PRIORITY_ bigint NOT NULL DEFAULT 0,
+ SEQUENCE_COUNTER_ bigint,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_JOBDEF (
+ ID_ varchar(64) NOT NULL,
+ REV_ integer,
+ PROC_DEF_ID_ varchar(64),
+ PROC_DEF_KEY_ varchar(255),
+ ACT_ID_ varchar(255),
+ JOB_TYPE_ varchar(255) NOT NULL,
+ JOB_CONFIGURATION_ varchar(255),
+ SUSPENSION_STATE_ integer,
+ JOB_PRIORITY_ bigint,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RE_PROCDEF (
+ ID_ varchar(64) not null,
+ REV_ integer,
+ CATEGORY_ varchar(255),
+ NAME_ varchar(255),
+ KEY_ varchar(255) not null,
+ VERSION_ integer not null,
+ DEPLOYMENT_ID_ varchar(64),
+ RESOURCE_NAME_ varchar(4000),
+ DGRM_RESOURCE_NAME_ varchar(4000),
+ HAS_START_FORM_KEY_ TINYINT,
+ SUSPENSION_STATE_ integer,
+ TENANT_ID_ varchar(64),
+ VERSION_TAG_ varchar(64),
+ HISTORY_TTL_ integer,
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_TASK (
+ ID_ varchar(64),
+ REV_ integer,
+ EXECUTION_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ PROC_DEF_ID_ varchar(64),
+ CASE_EXECUTION_ID_ varchar(64),
+ CASE_INST_ID_ varchar(64),
+ CASE_DEF_ID_ varchar(64),
+ NAME_ varchar(255),
+ PARENT_TASK_ID_ varchar(64),
+ DESCRIPTION_ varchar(4000),
+ TASK_DEF_KEY_ varchar(255),
+ OWNER_ varchar(255),
+ ASSIGNEE_ varchar(255),
+ DELEGATION_ varchar(64),
+ PRIORITY_ integer,
+ CREATE_TIME_ timestamp(3),
+ DUE_DATE_ datetime(3),
+ FOLLOW_UP_DATE_ datetime(3),
+ SUSPENSION_STATE_ integer,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_IDENTITYLINK (
+ ID_ varchar(64),
+ REV_ integer,
+ GROUP_ID_ varchar(255),
+ TYPE_ varchar(255),
+ USER_ID_ varchar(255),
+ TASK_ID_ varchar(64),
+ PROC_DEF_ID_ varchar(64),
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_VARIABLE (
+ ID_ varchar(64) not null,
+ REV_ integer,
+ TYPE_ varchar(255) not null,
+ NAME_ varchar(255) not null,
+ EXECUTION_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ CASE_EXECUTION_ID_ varchar(64),
+ CASE_INST_ID_ varchar(64),
+ TASK_ID_ varchar(64),
+ BYTEARRAY_ID_ varchar(64),
+ DOUBLE_ double,
+ LONG_ bigint,
+ TEXT_ LONGBLOB NULL,
+ TEXT2_ LONGBLOB NULL,
+ VAR_SCOPE_ varchar(64) not null,
+ SEQUENCE_COUNTER_ bigint,
+ IS_CONCURRENT_LOCAL_ TINYINT,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_EVENT_SUBSCR (
+ ID_ varchar(64) not null,
+ REV_ integer,
+ EVENT_TYPE_ varchar(255) not null,
+ EVENT_NAME_ varchar(255),
+ EXECUTION_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ ACTIVITY_ID_ varchar(255),
+ CONFIGURATION_ varchar(255),
+ CREATED_ timestamp(3) not null,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_INCIDENT (
+ ID_ varchar(64) not null,
+ REV_ integer not null,
+ INCIDENT_TIMESTAMP_ timestamp(3) not null,
+ INCIDENT_MSG_ varchar(4000),
+ INCIDENT_TYPE_ varchar(255) not null,
+ EXECUTION_ID_ varchar(64),
+ ACTIVITY_ID_ varchar(255),
+ PROC_INST_ID_ varchar(64),
+ PROC_DEF_ID_ varchar(64),
+ CAUSE_INCIDENT_ID_ varchar(64),
+ ROOT_CAUSE_INCIDENT_ID_ varchar(64),
+ CONFIGURATION_ varchar(255),
+ TENANT_ID_ varchar(64),
+ JOB_DEF_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_AUTHORIZATION (
+ ID_ varchar(64) not null,
+ REV_ integer not null,
+ TYPE_ integer not null,
+ GROUP_ID_ varchar(255),
+ USER_ID_ varchar(255),
+ RESOURCE_TYPE_ integer not null,
+ RESOURCE_ID_ varchar(64),
+ PERMS_ integer,
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_FILTER (
+ ID_ varchar(64) not null,
+ REV_ integer not null,
+ RESOURCE_TYPE_ varchar(255) not null,
+ NAME_ varchar(255) not null,
+ OWNER_ varchar(255),
+ QUERY_ LONGTEXT not null,
+ PROPERTIES_ LONGTEXT,
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_METER_LOG (
+ ID_ varchar(64) not null,
+ NAME_ varchar(64) not null,
+ REPORTER_ varchar(255),
+ VALUE_ bigint,
+ TIMESTAMP_ timestamp(3),
+ MILLISECONDS_ bigint DEFAULT 0,
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_EXT_TASK (
+ ID_ varchar(64) not null,
+ REV_ integer not null,
+ WORKER_ID_ varchar(255),
+ TOPIC_NAME_ varchar(255),
+ RETRIES_ integer,
+ ERROR_MSG_ varchar(4000),
+ ERROR_DETAILS_ID_ varchar(64),
+ LOCK_EXP_TIME_ timestamp(3) NULL,
+ SUSPENSION_STATE_ integer,
+ EXECUTION_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ PROC_DEF_ID_ varchar(64),
+ PROC_DEF_KEY_ varchar(255),
+ ACT_ID_ varchar(255),
+ ACT_INST_ID_ varchar(64),
+ TENANT_ID_ varchar(64),
+ PRIORITY_ bigint NOT NULL DEFAULT 0,
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_RU_BATCH (
+ ID_ varchar(64) not null,
+ REV_ integer not null,
+ TYPE_ varchar(255),
+ TOTAL_JOBS_ integer,
+ JOBS_CREATED_ integer,
+ JOBS_PER_SEED_ integer,
+ INVOCATIONS_PER_JOB_ integer,
+ SEED_JOB_DEF_ID_ varchar(64),
+ BATCH_JOB_DEF_ID_ varchar(64),
+ MONITOR_JOB_DEF_ID_ varchar(64),
+ SUSPENSION_STATE_ integer,
+ CONFIGURATION_ varchar(255),
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create index ACT_IDX_EXEC_BUSKEY on ACT_RU_EXECUTION(BUSINESS_KEY_);
+create index ACT_IDX_EXEC_TENANT_ID on ACT_RU_EXECUTION(TENANT_ID_);
+create index ACT_IDX_TASK_CREATE on ACT_RU_TASK(CREATE_TIME_);
+create index ACT_IDX_TASK_ASSIGNEE on ACT_RU_TASK(ASSIGNEE_);
+create index ACT_IDX_TASK_TENANT_ID on ACT_RU_TASK(TENANT_ID_);
+create index ACT_IDX_IDENT_LNK_USER on ACT_RU_IDENTITYLINK(USER_ID_);
+create index ACT_IDX_IDENT_LNK_GROUP on ACT_RU_IDENTITYLINK(GROUP_ID_);
+create index ACT_IDX_EVENT_SUBSCR_CONFIG_ on ACT_RU_EVENT_SUBSCR(CONFIGURATION_);
+create index ACT_IDX_EVENT_SUBSCR_TENANT_ID on ACT_RU_EVENT_SUBSCR(TENANT_ID_);
+create index ACT_IDX_VARIABLE_TASK_ID on ACT_RU_VARIABLE(TASK_ID_);
+create index ACT_IDX_VARIABLE_TENANT_ID on ACT_RU_VARIABLE(TENANT_ID_);
+create index ACT_IDX_ATHRZ_PROCEDEF on ACT_RU_IDENTITYLINK(PROC_DEF_ID_);
+create index ACT_IDX_INC_CONFIGURATION on ACT_RU_INCIDENT(CONFIGURATION_);
+create index ACT_IDX_INC_TENANT_ID on ACT_RU_INCIDENT(TENANT_ID_);
+-- CAM-5914
+create index ACT_IDX_JOB_EXECUTION_ID on ACT_RU_JOB(EXECUTION_ID_);
+-- this index needs to be limited in mariadb see CAM-6938
+create index ACT_IDX_JOB_HANDLER on ACT_RU_JOB(HANDLER_TYPE_(100),HANDLER_CFG_(155));
+create index ACT_IDX_JOB_PROCINST on ACT_RU_JOB(PROCESS_INSTANCE_ID_);
+create index ACT_IDX_JOB_TENANT_ID on ACT_RU_JOB(TENANT_ID_);
+create index ACT_IDX_JOBDEF_TENANT_ID on ACT_RU_JOBDEF(TENANT_ID_);
+
+-- new metric milliseconds column
+CREATE INDEX ACT_IDX_METER_LOG_MS ON ACT_RU_METER_LOG(MILLISECONDS_);
+CREATE INDEX ACT_IDX_METER_LOG_NAME_MS ON ACT_RU_METER_LOG(NAME_, MILLISECONDS_);
+CREATE INDEX ACT_IDX_METER_LOG_REPORT ON ACT_RU_METER_LOG(NAME_, REPORTER_, MILLISECONDS_);
+
+-- old metric timestamp column
+CREATE INDEX ACT_IDX_METER_LOG_TIME ON ACT_RU_METER_LOG(TIMESTAMP_);
+CREATE INDEX ACT_IDX_METER_LOG ON ACT_RU_METER_LOG(NAME_, TIMESTAMP_);
+
+create index ACT_IDX_EXT_TASK_TOPIC on ACT_RU_EXT_TASK(TOPIC_NAME_);
+create index ACT_IDX_EXT_TASK_TENANT_ID on ACT_RU_EXT_TASK(TENANT_ID_);
+create index ACT_IDX_EXT_TASK_PRIORITY ON ACT_RU_EXT_TASK(PRIORITY_);
+create index ACT_IDX_EXT_TASK_ERR_DETAILS ON ACT_RU_EXT_TASK(ERROR_DETAILS_ID_);
+create index ACT_IDX_AUTH_GROUP_ID ON ACT_RU_AUTHORIZATION(GROUP_ID_);
+create index ACT_IDX_JOB_JOB_DEF_ID on ACT_RU_JOB(JOB_DEF_ID_);
+
+alter table ACT_GE_BYTEARRAY
+ add constraint ACT_FK_BYTEARR_DEPL
+ foreign key (DEPLOYMENT_ID_)
+ references ACT_RE_DEPLOYMENT (ID_);
+
+alter table ACT_RU_EXECUTION
+ add constraint ACT_FK_EXE_PROCINST
+ foreign key (PROC_INST_ID_)
+ references ACT_RU_EXECUTION (ID_) on delete cascade on update cascade;
+
+alter table ACT_RU_EXECUTION
+ add constraint ACT_FK_EXE_PARENT
+ foreign key (PARENT_ID_)
+ references ACT_RU_EXECUTION (ID_);
+
+alter table ACT_RU_EXECUTION
+ add constraint ACT_FK_EXE_SUPER
+ foreign key (SUPER_EXEC_)
+ references ACT_RU_EXECUTION (ID_);
+
+alter table ACT_RU_EXECUTION
+ add constraint ACT_FK_EXE_PROCDEF
+ foreign key (PROC_DEF_ID_)
+ references ACT_RE_PROCDEF (ID_);
+
+alter table ACT_RU_IDENTITYLINK
+ add constraint ACT_FK_TSKASS_TASK
+ foreign key (TASK_ID_)
+ references ACT_RU_TASK (ID_);
+
+alter table ACT_RU_IDENTITYLINK
+ add constraint ACT_FK_ATHRZ_PROCEDEF
+ foreign key (PROC_DEF_ID_)
+ references ACT_RE_PROCDEF(ID_);
+
+alter table ACT_RU_TASK
+ add constraint ACT_FK_TASK_EXE
+ foreign key (EXECUTION_ID_)
+ references ACT_RU_EXECUTION (ID_);
+
+alter table ACT_RU_TASK
+ add constraint ACT_FK_TASK_PROCINST
+ foreign key (PROC_INST_ID_)
+ references ACT_RU_EXECUTION (ID_);
+
+alter table ACT_RU_TASK
+ add constraint ACT_FK_TASK_PROCDEF
+ foreign key (PROC_DEF_ID_)
+ references ACT_RE_PROCDEF (ID_);
+
+alter table ACT_RU_VARIABLE
+ add constraint ACT_FK_VAR_EXE
+ foreign key (EXECUTION_ID_)
+ references ACT_RU_EXECUTION (ID_);
+
+alter table ACT_RU_VARIABLE
+ add constraint ACT_FK_VAR_PROCINST
+ foreign key (PROC_INST_ID_)
+ references ACT_RU_EXECUTION(ID_);
+
+alter table ACT_RU_VARIABLE
+ add constraint ACT_FK_VAR_BYTEARRAY
+ foreign key (BYTEARRAY_ID_)
+ references ACT_GE_BYTEARRAY (ID_);
+
+alter table ACT_RU_JOB
+ add constraint ACT_FK_JOB_EXCEPTION
+ foreign key (EXCEPTION_STACK_ID_)
+ references ACT_GE_BYTEARRAY (ID_);
+
+alter table ACT_RU_EVENT_SUBSCR
+ add constraint ACT_FK_EVENT_EXEC
+ foreign key (EXECUTION_ID_)
+ references ACT_RU_EXECUTION(ID_);
+
+alter table ACT_RU_INCIDENT
+ add constraint ACT_FK_INC_EXE
+ foreign key (EXECUTION_ID_)
+ references ACT_RU_EXECUTION (ID_);
+
+alter table ACT_RU_INCIDENT
+ add constraint ACT_FK_INC_PROCINST
+ foreign key (PROC_INST_ID_)
+ references ACT_RU_EXECUTION (ID_);
+
+alter table ACT_RU_INCIDENT
+ add constraint ACT_FK_INC_PROCDEF
+ foreign key (PROC_DEF_ID_)
+ references ACT_RE_PROCDEF (ID_);
+
+alter table ACT_RU_INCIDENT
+ add constraint ACT_FK_INC_CAUSE
+ foreign key (CAUSE_INCIDENT_ID_)
+ references ACT_RU_INCIDENT (ID_) on delete cascade on update cascade;
+
+alter table ACT_RU_INCIDENT
+ add constraint ACT_FK_INC_RCAUSE
+ foreign key (ROOT_CAUSE_INCIDENT_ID_)
+ references ACT_RU_INCIDENT (ID_) on delete cascade on update cascade;
+
+alter table ACT_RU_EXT_TASK
+ add constraint ACT_FK_EXT_TASK_ERROR_DETAILS
+ foreign key (ERROR_DETAILS_ID_)
+ references ACT_GE_BYTEARRAY (ID_);
+
+create index ACT_IDX_INC_JOB_DEF on ACT_RU_INCIDENT(JOB_DEF_ID_);
+alter table ACT_RU_INCIDENT
+ add constraint ACT_FK_INC_JOB_DEF
+ foreign key (JOB_DEF_ID_)
+ references ACT_RU_JOBDEF (ID_);
+
+alter table ACT_RU_AUTHORIZATION
+ add constraint ACT_UNIQ_AUTH_USER
+ unique (USER_ID_,TYPE_,RESOURCE_TYPE_,RESOURCE_ID_);
+
+alter table ACT_RU_AUTHORIZATION
+ add constraint ACT_UNIQ_AUTH_GROUP
+ unique (GROUP_ID_,TYPE_,RESOURCE_TYPE_,RESOURCE_ID_);
+
+alter table ACT_RU_VARIABLE
+ add constraint ACT_UNIQ_VARIABLE
+ unique (VAR_SCOPE_, NAME_);
+
+alter table ACT_RU_EXT_TASK
+ add constraint ACT_FK_EXT_TASK_EXE
+ foreign key (EXECUTION_ID_)
+ references ACT_RU_EXECUTION (ID_);
+
+create index ACT_IDX_BATCH_SEED_JOB_DEF ON ACT_RU_BATCH(SEED_JOB_DEF_ID_);
+alter table ACT_RU_BATCH
+ add constraint ACT_FK_BATCH_SEED_JOB_DEF
+ foreign key (SEED_JOB_DEF_ID_)
+ references ACT_RU_JOBDEF (ID_);
+
+create index ACT_IDX_BATCH_MONITOR_JOB_DEF ON ACT_RU_BATCH(MONITOR_JOB_DEF_ID_);
+alter table ACT_RU_BATCH
+ add constraint ACT_FK_BATCH_MONITOR_JOB_DEF
+ foreign key (MONITOR_JOB_DEF_ID_)
+ references ACT_RU_JOBDEF (ID_);
+
+create index ACT_IDX_BATCH_JOB_DEF ON ACT_RU_BATCH(BATCH_JOB_DEF_ID_);
+alter table ACT_RU_BATCH
+ add constraint ACT_FK_BATCH_JOB_DEF
+ foreign key (BATCH_JOB_DEF_ID_)
+ references ACT_RU_JOBDEF (ID_);
+
+-- indexes for deadlock problems - https://app.camunda.com/jira/browse/CAM-2567 --
+create index ACT_IDX_INC_CAUSEINCID on ACT_RU_INCIDENT(CAUSE_INCIDENT_ID_);
+create index ACT_IDX_INC_EXID on ACT_RU_INCIDENT(EXECUTION_ID_);
+create index ACT_IDX_INC_PROCDEFID on ACT_RU_INCIDENT(PROC_DEF_ID_);
+create index ACT_IDX_INC_PROCINSTID on ACT_RU_INCIDENT(PROC_INST_ID_);
+create index ACT_IDX_INC_ROOTCAUSEINCID on ACT_RU_INCIDENT(ROOT_CAUSE_INCIDENT_ID_);
+-- index for deadlock problem - https://app.camunda.com/jira/browse/CAM-4440 --
+create index ACT_IDX_AUTH_RESOURCE_ID on ACT_RU_AUTHORIZATION(RESOURCE_ID_);
+-- index to prevent deadlock on fk constraint - https://app.camunda.com/jira/browse/CAM-5440 --
+create index ACT_IDX_EXT_TASK_EXEC on ACT_RU_EXT_TASK(EXECUTION_ID_);
+
+-- indexes to improve deployment
+create index ACT_IDX_BYTEARRAY_NAME on ACT_GE_BYTEARRAY(NAME_);
+create index ACT_IDX_DEPLOYMENT_NAME on ACT_RE_DEPLOYMENT(NAME_);
+create index ACT_IDX_DEPLOYMENT_TENANT_ID on ACT_RE_DEPLOYMENT(TENANT_ID_);
+create index ACT_IDX_JOBDEF_PROC_DEF_ID ON ACT_RU_JOBDEF(PROC_DEF_ID_);
+create index ACT_IDX_JOB_HANDLER_TYPE ON ACT_RU_JOB(HANDLER_TYPE_);
+create index ACT_IDX_EVENT_SUBSCR_EVT_NAME ON ACT_RU_EVENT_SUBSCR(EVENT_NAME_);
+create index ACT_IDX_PROCDEF_DEPLOYMENT_ID ON ACT_RE_PROCDEF(DEPLOYMENT_ID_);
+create index ACT_IDX_PROCDEF_TENANT_ID ON ACT_RE_PROCDEF(TENANT_ID_);
+create index ACT_IDX_PROCDEF_VER_TAG ON ACT_RE_PROCDEF(VERSION_TAG_);
+-- create case definition table --
+create table ACT_RE_CASE_DEF (
+ ID_ varchar(64) not null,
+ REV_ integer,
+ CATEGORY_ varchar(255),
+ NAME_ varchar(255),
+ KEY_ varchar(255) not null,
+ VERSION_ integer not null,
+ DEPLOYMENT_ID_ varchar(64),
+ RESOURCE_NAME_ varchar(4000),
+ DGRM_RESOURCE_NAME_ varchar(4000),
+ TENANT_ID_ varchar(64),
+ HISTORY_TTL_ integer,
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+-- create case execution table --
+create table ACT_RU_CASE_EXECUTION (
+ ID_ varchar(64) NOT NULL,
+ REV_ integer,
+ CASE_INST_ID_ varchar(64),
+ SUPER_CASE_EXEC_ varchar(64),
+ SUPER_EXEC_ varchar(64),
+ BUSINESS_KEY_ varchar(255),
+ PARENT_ID_ varchar(64),
+ CASE_DEF_ID_ varchar(64),
+ ACT_ID_ varchar(255),
+ PREV_STATE_ integer,
+ CURRENT_STATE_ integer,
+ REQUIRED_ boolean,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+-- create case sentry part table --
+
+create table ACT_RU_CASE_SENTRY_PART (
+ ID_ varchar(64) NOT NULL,
+ REV_ integer,
+ CASE_INST_ID_ varchar(64),
+ CASE_EXEC_ID_ varchar(64),
+ SENTRY_ID_ varchar(255),
+ TYPE_ varchar(255),
+ SOURCE_CASE_EXEC_ID_ varchar(64),
+ STANDARD_EVENT_ varchar(255),
+ SOURCE_ varchar(255),
+ VARIABLE_EVENT_ varchar(255),
+ VARIABLE_NAME_ varchar(255),
+ SATISFIED_ boolean,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+-- create index on business key --
+create index ACT_IDX_CASE_EXEC_BUSKEY on ACT_RU_CASE_EXECUTION(BUSINESS_KEY_);
+
+-- create foreign key constraints on ACT_RU_CASE_EXECUTION --
+alter table ACT_RU_CASE_EXECUTION
+ add constraint ACT_FK_CASE_EXE_CASE_INST
+ foreign key (CASE_INST_ID_)
+ references ACT_RU_CASE_EXECUTION(ID_) on delete cascade on update cascade;
+
+alter table ACT_RU_CASE_EXECUTION
+ add constraint ACT_FK_CASE_EXE_PARENT
+ foreign key (PARENT_ID_)
+ references ACT_RU_CASE_EXECUTION(ID_);
+
+alter table ACT_RU_CASE_EXECUTION
+ add constraint ACT_FK_CASE_EXE_CASE_DEF
+ foreign key (CASE_DEF_ID_)
+ references ACT_RE_CASE_DEF(ID_);
+
+-- create foreign key constraints on ACT_RU_VARIABLE --
+alter table ACT_RU_VARIABLE
+ add constraint ACT_FK_VAR_CASE_EXE
+ foreign key (CASE_EXECUTION_ID_)
+ references ACT_RU_CASE_EXECUTION(ID_);
+
+alter table ACT_RU_VARIABLE
+ add constraint ACT_FK_VAR_CASE_INST
+ foreign key (CASE_INST_ID_)
+ references ACT_RU_CASE_EXECUTION(ID_);
+
+-- create foreign key constraints on ACT_RU_TASK --
+alter table ACT_RU_TASK
+ add constraint ACT_FK_TASK_CASE_EXE
+ foreign key (CASE_EXECUTION_ID_)
+ references ACT_RU_CASE_EXECUTION(ID_);
+
+alter table ACT_RU_TASK
+ add constraint ACT_FK_TASK_CASE_DEF
+ foreign key (CASE_DEF_ID_)
+ references ACT_RE_CASE_DEF(ID_);
+
+-- create foreign key constraints on ACT_RU_CASE_SENTRY_PART --
+alter table ACT_RU_CASE_SENTRY_PART
+ add constraint ACT_FK_CASE_SENTRY_CASE_INST
+ foreign key (CASE_INST_ID_)
+ references ACT_RU_CASE_EXECUTION(ID_);
+
+alter table ACT_RU_CASE_SENTRY_PART
+ add constraint ACT_FK_CASE_SENTRY_CASE_EXEC
+ foreign key (CASE_EXEC_ID_)
+ references ACT_RU_CASE_EXECUTION(ID_);
+
+create index ACT_IDX_CASE_DEF_TENANT_ID on ACT_RE_CASE_DEF(TENANT_ID_);
+create index ACT_IDX_CASE_EXEC_TENANT_ID on ACT_RU_CASE_EXECUTION(TENANT_ID_);
+-- create decision definition table --
+create table ACT_RE_DECISION_DEF (
+ ID_ varchar(64) not null,
+ REV_ integer,
+ CATEGORY_ varchar(255),
+ NAME_ varchar(255),
+ KEY_ varchar(255) not null,
+ VERSION_ integer not null,
+ DEPLOYMENT_ID_ varchar(64),
+ RESOURCE_NAME_ varchar(4000),
+ DGRM_RESOURCE_NAME_ varchar(4000),
+ DEC_REQ_ID_ varchar(64),
+ DEC_REQ_KEY_ varchar(255),
+ TENANT_ID_ varchar(64),
+ HISTORY_TTL_ integer,
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+-- create decision requirements definition table --
+create table ACT_RE_DECISION_REQ_DEF (
+ ID_ varchar(64) NOT NULL,
+ REV_ integer,
+ CATEGORY_ varchar(255),
+ NAME_ varchar(255),
+ KEY_ varchar(255) NOT NULL,
+ VERSION_ integer NOT NULL,
+ DEPLOYMENT_ID_ varchar(64),
+ RESOURCE_NAME_ varchar(4000),
+ DGRM_RESOURCE_NAME_ varchar(4000),
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+alter table ACT_RE_DECISION_DEF
+ add constraint ACT_FK_DEC_REQ
+ foreign key (DEC_REQ_ID_)
+ references ACT_RE_DECISION_REQ_DEF(ID_);
+
+create index ACT_IDX_DEC_DEF_TENANT_ID on ACT_RE_DECISION_DEF(TENANT_ID_);
+create index ACT_IDX_DEC_DEF_REQ_ID on ACT_RE_DECISION_DEF(DEC_REQ_ID_);
+create index ACT_IDX_DEC_REQ_DEF_TENANT_ID on ACT_RE_DECISION_REQ_DEF(TENANT_ID_);
+create table ACT_HI_PROCINST (
+ ID_ varchar(64) not null,
+ PROC_INST_ID_ varchar(64) not null,
+ BUSINESS_KEY_ varchar(255),
+ PROC_DEF_KEY_ varchar(255),
+ PROC_DEF_ID_ varchar(64) not null,
+ START_TIME_ datetime(3) not null,
+ END_TIME_ datetime(3),
+ DURATION_ bigint,
+ START_USER_ID_ varchar(255),
+ START_ACT_ID_ varchar(255),
+ END_ACT_ID_ varchar(255),
+ SUPER_PROCESS_INSTANCE_ID_ varchar(64),
+ SUPER_CASE_INSTANCE_ID_ varchar(64),
+ CASE_INST_ID_ varchar(64),
+ DELETE_REASON_ varchar(4000),
+ TENANT_ID_ varchar(64),
+ STATE_ varchar(255),
+ primary key (ID_),
+ unique (PROC_INST_ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_ACTINST (
+ ID_ varchar(64) not null,
+ PARENT_ACT_INST_ID_ varchar(64),
+ PROC_DEF_KEY_ varchar(255),
+ PROC_DEF_ID_ varchar(64) not null,
+ PROC_INST_ID_ varchar(64) not null,
+ EXECUTION_ID_ varchar(64) not null,
+ ACT_ID_ varchar(255) not null,
+ TASK_ID_ varchar(64),
+ CALL_PROC_INST_ID_ varchar(64),
+ CALL_CASE_INST_ID_ varchar(64),
+ ACT_NAME_ varchar(255),
+ ACT_TYPE_ varchar(255) not null,
+ ASSIGNEE_ varchar(64),
+ START_TIME_ datetime(3) not null,
+ END_TIME_ datetime(3),
+ DURATION_ bigint,
+ ACT_INST_STATE_ integer,
+ SEQUENCE_COUNTER_ bigint,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_TASKINST (
+ ID_ varchar(64) not null,
+ TASK_DEF_KEY_ varchar(255),
+ PROC_DEF_KEY_ varchar(255),
+ PROC_DEF_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ EXECUTION_ID_ varchar(64),
+ CASE_DEF_KEY_ varchar(255),
+ CASE_DEF_ID_ varchar(64),
+ CASE_INST_ID_ varchar(64),
+ CASE_EXECUTION_ID_ varchar(64),
+ ACT_INST_ID_ varchar(64),
+ NAME_ varchar(255),
+ PARENT_TASK_ID_ varchar(64),
+ DESCRIPTION_ varchar(4000),
+ OWNER_ varchar(255),
+ ASSIGNEE_ varchar(255),
+ START_TIME_ datetime(3) not null,
+ END_TIME_ datetime(3),
+ DURATION_ bigint,
+ DELETE_REASON_ varchar(4000),
+ PRIORITY_ integer,
+ DUE_DATE_ datetime(3),
+ FOLLOW_UP_DATE_ datetime(3),
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_VARINST (
+ ID_ varchar(64) not null,
+ PROC_DEF_KEY_ varchar(255),
+ PROC_DEF_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ EXECUTION_ID_ varchar(64),
+ ACT_INST_ID_ varchar(64),
+ CASE_DEF_KEY_ varchar(255),
+ CASE_DEF_ID_ varchar(64),
+ CASE_INST_ID_ varchar(64),
+ CASE_EXECUTION_ID_ varchar(64),
+ TASK_ID_ varchar(64),
+ NAME_ varchar(255) not null,
+ VAR_TYPE_ varchar(100),
+ REV_ integer,
+ BYTEARRAY_ID_ varchar(64),
+ DOUBLE_ double,
+ LONG_ bigint,
+ TEXT_ LONGBLOB NULL,
+ TEXT2_ LONGBLOB NULL,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_DETAIL (
+ ID_ varchar(64) not null,
+ TYPE_ varchar(255) not null,
+ PROC_DEF_KEY_ varchar(255),
+ PROC_DEF_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ EXECUTION_ID_ varchar(64),
+ CASE_DEF_KEY_ varchar(255),
+ CASE_DEF_ID_ varchar(64),
+ CASE_INST_ID_ varchar(64),
+ CASE_EXECUTION_ID_ varchar(64),
+ TASK_ID_ varchar(64),
+ ACT_INST_ID_ varchar(64),
+ VAR_INST_ID_ varchar(64),
+ NAME_ varchar(255) not null,
+ VAR_TYPE_ varchar(255),
+ REV_ integer,
+ TIME_ datetime(3) not null,
+ BYTEARRAY_ID_ varchar(64),
+ DOUBLE_ double,
+ LONG_ bigint,
+ TEXT_ LONGBLOB NULL,
+ TEXT2_ LONGBLOB NULL,
+ SEQUENCE_COUNTER_ bigint,
+ TENANT_ID_ varchar(64),
+ OPERATION_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_IDENTITYLINK (
+ ID_ varchar(64) not null,
+ TIMESTAMP_ timestamp(3) not null,
+ TYPE_ varchar(255),
+ USER_ID_ varchar(255),
+ GROUP_ID_ varchar(255),
+ TASK_ID_ varchar(64),
+ PROC_DEF_ID_ varchar(64),
+ OPERATION_TYPE_ varchar(64),
+ ASSIGNER_ID_ varchar(64),
+ PROC_DEF_KEY_ varchar(255),
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_COMMENT (
+ ID_ varchar(64) not null,
+ TYPE_ varchar(255),
+ TIME_ datetime(3) not null,
+ USER_ID_ varchar(255),
+ TASK_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ ACTION_ varchar(255),
+ MESSAGE_ varchar(4000),
+ FULL_MSG_ LONGBLOB,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_ATTACHMENT (
+ ID_ varchar(64) not null,
+ REV_ integer,
+ USER_ID_ varchar(255),
+ NAME_ varchar(255),
+ DESCRIPTION_ varchar(4000),
+ TYPE_ varchar(255),
+ TASK_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ URL_ varchar(4000),
+ CONTENT_ID_ varchar(64),
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_OP_LOG (
+ ID_ varchar(64) not null,
+ DEPLOYMENT_ID_ varchar(64),
+ PROC_DEF_ID_ varchar(64),
+ PROC_DEF_KEY_ varchar(255),
+ PROC_INST_ID_ varchar(64),
+ EXECUTION_ID_ varchar(64),
+ CASE_DEF_ID_ varchar(64),
+ CASE_INST_ID_ varchar(64),
+ CASE_EXECUTION_ID_ varchar(64),
+ TASK_ID_ varchar(64),
+ JOB_ID_ varchar(64),
+ JOB_DEF_ID_ varchar(64),
+ BATCH_ID_ varchar(64),
+ USER_ID_ varchar(255),
+ TIMESTAMP_ timestamp(3) not null,
+ OPERATION_TYPE_ varchar(64),
+ OPERATION_ID_ varchar(64),
+ ENTITY_TYPE_ varchar(30),
+ PROPERTY_ varchar(64),
+ ORG_VALUE_ varchar(4000),
+ NEW_VALUE_ varchar(4000),
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_INCIDENT (
+ ID_ varchar(64) not null,
+ PROC_DEF_KEY_ varchar(255),
+ PROC_DEF_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ EXECUTION_ID_ varchar(64),
+ CREATE_TIME_ timestamp(3) not null,
+ END_TIME_ timestamp(3) null,
+ INCIDENT_MSG_ varchar(4000),
+ INCIDENT_TYPE_ varchar(255) not null,
+ ACTIVITY_ID_ varchar(255),
+ CAUSE_INCIDENT_ID_ varchar(64),
+ ROOT_CAUSE_INCIDENT_ID_ varchar(64),
+ CONFIGURATION_ varchar(255),
+ INCIDENT_STATE_ integer,
+ TENANT_ID_ varchar(64),
+ JOB_DEF_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_JOB_LOG (
+ ID_ varchar(64) not null,
+ TIMESTAMP_ timestamp(3) not null,
+ JOB_ID_ varchar(64) not null,
+ JOB_DUEDATE_ timestamp(3) NULL,
+ JOB_RETRIES_ integer,
+ JOB_PRIORITY_ bigint NOT NULL DEFAULT 0,
+ JOB_EXCEPTION_MSG_ varchar(4000),
+ JOB_EXCEPTION_STACK_ID_ varchar(64),
+ JOB_STATE_ integer,
+ JOB_DEF_ID_ varchar(64),
+ JOB_DEF_TYPE_ varchar(255),
+ JOB_DEF_CONFIGURATION_ varchar(255),
+ ACT_ID_ varchar(255),
+ EXECUTION_ID_ varchar(64),
+ PROCESS_INSTANCE_ID_ varchar(64),
+ PROCESS_DEF_ID_ varchar(64),
+ PROCESS_DEF_KEY_ varchar(255),
+ DEPLOYMENT_ID_ varchar(64),
+ SEQUENCE_COUNTER_ bigint,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_BATCH (
+ ID_ varchar(64) not null,
+ TYPE_ varchar(255),
+ TOTAL_JOBS_ integer,
+ JOBS_PER_SEED_ integer,
+ INVOCATIONS_PER_JOB_ integer,
+ SEED_JOB_DEF_ID_ varchar(64),
+ MONITOR_JOB_DEF_ID_ varchar(64),
+ BATCH_JOB_DEF_ID_ varchar(64),
+ TENANT_ID_ varchar(64),
+ START_TIME_ datetime(3) not null,
+ END_TIME_ datetime(3),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_EXT_TASK_LOG (
+ ID_ varchar(64) not null,
+ TIMESTAMP_ timestamp(3) not null,
+ EXT_TASK_ID_ varchar(64) not null,
+ RETRIES_ integer,
+ TOPIC_NAME_ varchar(255),
+ WORKER_ID_ varchar(255),
+ PRIORITY_ bigint NOT NULL DEFAULT 0,
+ ERROR_MSG_ varchar(4000),
+ ERROR_DETAILS_ID_ varchar(64),
+ ACT_ID_ varchar(255),
+ ACT_INST_ID_ varchar(64),
+ EXECUTION_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ PROC_DEF_ID_ varchar(64),
+ PROC_DEF_KEY_ varchar(255),
+ TENANT_ID_ varchar(64),
+ STATE_ integer,
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create index ACT_IDX_HI_PRO_INST_END on ACT_HI_PROCINST(END_TIME_);
+create index ACT_IDX_HI_PRO_I_BUSKEY on ACT_HI_PROCINST(BUSINESS_KEY_);
+create index ACT_IDX_HI_PRO_INST_TENANT_ID on ACT_HI_PROCINST(TENANT_ID_);
+create index ACT_IDX_HI_PRO_INST_PROC_DEF_KEY on ACT_HI_PROCINST(PROC_DEF_KEY_);
+
+create index ACT_IDX_HI_ACT_INST_START on ACT_HI_ACTINST(START_TIME_);
+create index ACT_IDX_HI_ACT_INST_END on ACT_HI_ACTINST(END_TIME_);
+create index ACT_IDX_HI_ACT_INST_PROCINST on ACT_HI_ACTINST(PROC_INST_ID_, ACT_ID_);
+create index ACT_IDX_HI_ACT_INST_COMP on ACT_HI_ACTINST(EXECUTION_ID_, ACT_ID_, END_TIME_, ID_);
+create index ACT_IDX_HI_ACT_INST_STATS on ACT_HI_ACTINST(PROC_DEF_ID_, ACT_ID_, END_TIME_, ACT_INST_STATE_);
+create index ACT_IDX_HI_ACT_INST_TENANT_ID on ACT_HI_ACTINST(TENANT_ID_);
+create index ACT_IDX_HI_ACT_INST_PROC_DEF_KEY on ACT_HI_ACTINST(PROC_DEF_KEY_);
+
+create index ACT_IDX_HI_TASK_INST_TENANT_ID on ACT_HI_TASKINST(TENANT_ID_);
+create index ACT_IDX_HI_TASK_INST_PROC_DEF_KEY on ACT_HI_TASKINST(PROC_DEF_KEY_);
+
+create index ACT_IDX_HI_DETAIL_PROC_INST on ACT_HI_DETAIL(PROC_INST_ID_);
+create index ACT_IDX_HI_DETAIL_ACT_INST on ACT_HI_DETAIL(ACT_INST_ID_);
+create index ACT_IDX_HI_DETAIL_CASE_INST on ACT_HI_DETAIL(CASE_INST_ID_);
+create index ACT_IDX_HI_DETAIL_CASE_EXEC on ACT_HI_DETAIL(CASE_EXECUTION_ID_);
+create index ACT_IDX_HI_DETAIL_TIME on ACT_HI_DETAIL(TIME_);
+create index ACT_IDX_HI_DETAIL_NAME on ACT_HI_DETAIL(NAME_);
+create index ACT_IDX_HI_DETAIL_TASK_ID on ACT_HI_DETAIL(TASK_ID_);
+create index ACT_IDX_HI_DETAIL_TENANT_ID on ACT_HI_DETAIL(TENANT_ID_);
+create index ACT_IDX_HI_DETAIL_PROC_DEF_KEY on ACT_HI_DETAIL(PROC_DEF_KEY_);
+
+create index ACT_IDX_HI_IDENT_LNK_USER on ACT_HI_IDENTITYLINK(USER_ID_);
+create index ACT_IDX_HI_IDENT_LNK_GROUP on ACT_HI_IDENTITYLINK(GROUP_ID_);
+create index ACT_IDX_HI_IDENT_LNK_TENANT_ID on ACT_HI_IDENTITYLINK(TENANT_ID_);
+create index ACT_IDX_HI_IDENT_LNK_PROC_DEF_KEY on ACT_HI_IDENTITYLINK(PROC_DEF_KEY_);
+
+create index ACT_IDX_HI_PROCVAR_PROC_INST on ACT_HI_VARINST(PROC_INST_ID_);
+create index ACT_IDX_HI_PROCVAR_NAME_TYPE on ACT_HI_VARINST(NAME_, VAR_TYPE_);
+create index ACT_IDX_HI_CASEVAR_CASE_INST on ACT_HI_VARINST(CASE_INST_ID_);
+create index ACT_IDX_HI_VAR_INST_TENANT_ID on ACT_HI_VARINST(TENANT_ID_);
+create index ACT_IDX_HI_VAR_INST_PROC_DEF_KEY on ACT_HI_VARINST(PROC_DEF_KEY_);
+
+create index ACT_IDX_HI_INCIDENT_TENANT_ID on ACT_HI_INCIDENT(TENANT_ID_);
+create index ACT_IDX_HI_INCIDENT_PROC_DEF_KEY on ACT_HI_INCIDENT(PROC_DEF_KEY_);
+
+create index ACT_IDX_HI_JOB_LOG_PROCINST on ACT_HI_JOB_LOG(PROCESS_INSTANCE_ID_);
+create index ACT_IDX_HI_JOB_LOG_PROCDEF on ACT_HI_JOB_LOG(PROCESS_DEF_ID_);
+create index ACT_IDX_HI_JOB_LOG_TENANT_ID on ACT_HI_JOB_LOG(TENANT_ID_);
+create index ACT_IDX_HI_JOB_LOG_JOB_DEF_ID on ACT_HI_JOB_LOG(JOB_DEF_ID_);
+create index ACT_IDX_HI_JOB_LOG_PROC_DEF_KEY on ACT_HI_JOB_LOG(PROCESS_DEF_KEY_);
+
+create index ACT_HI_EXT_TASK_LOG_PROCINST on ACT_HI_EXT_TASK_LOG(PROC_INST_ID_);
+create index ACT_HI_EXT_TASK_LOG_PROCDEF on ACT_HI_EXT_TASK_LOG(PROC_DEF_ID_);
+create index ACT_HI_EXT_TASK_LOG_PROC_DEF_KEY on ACT_HI_EXT_TASK_LOG(PROC_DEF_KEY_);
+create index ACT_HI_EXT_TASK_LOG_TENANT_ID on ACT_HI_EXT_TASK_LOG(TENANT_ID_);
+
+create index ACT_IDX_HI_OP_LOG_PROCINST on ACT_HI_OP_LOG(PROC_INST_ID_);
+create index ACT_IDX_HI_OP_LOG_PROCDEF on ACT_HI_OP_LOG(PROC_DEF_ID_);
+create table ACT_HI_CASEINST (
+ ID_ varchar(64) not null,
+ CASE_INST_ID_ varchar(64) not null,
+ BUSINESS_KEY_ varchar(255),
+ CASE_DEF_ID_ varchar(64) not null,
+ CREATE_TIME_ datetime(3) not null,
+ CLOSE_TIME_ datetime(3),
+ DURATION_ bigint,
+ STATE_ integer,
+ CREATE_USER_ID_ varchar(255),
+ SUPER_CASE_INSTANCE_ID_ varchar(64),
+ SUPER_PROCESS_INSTANCE_ID_ varchar(64),
+ TENANT_ID_ varchar(64),
+ primary key (ID_),
+ unique (CASE_INST_ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_HI_CASEACTINST (
+ ID_ varchar(64) not null,
+ PARENT_ACT_INST_ID_ varchar(64),
+ CASE_DEF_ID_ varchar(64) not null,
+ CASE_INST_ID_ varchar(64) not null,
+ CASE_ACT_ID_ varchar(255) not null,
+ TASK_ID_ varchar(64),
+ CALL_PROC_INST_ID_ varchar(64),
+ CALL_CASE_INST_ID_ varchar(64),
+ CASE_ACT_NAME_ varchar(255),
+ CASE_ACT_TYPE_ varchar(255),
+ CREATE_TIME_ datetime(3) not null,
+ END_TIME_ datetime(3),
+ DURATION_ bigint,
+ STATE_ integer,
+ REQUIRED_ boolean,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create index ACT_IDX_HI_CAS_I_CLOSE on ACT_HI_CASEINST(CLOSE_TIME_);
+create index ACT_IDX_HI_CAS_I_BUSKEY on ACT_HI_CASEINST(BUSINESS_KEY_);
+create index ACT_IDX_HI_CAS_I_TENANT_ID on ACT_HI_CASEINST(TENANT_ID_);
+create index ACT_IDX_HI_CAS_A_I_CREATE on ACT_HI_CASEACTINST(CREATE_TIME_);
+create index ACT_IDX_HI_CAS_A_I_END on ACT_HI_CASEACTINST(END_TIME_);
+create index ACT_IDX_HI_CAS_A_I_COMP on ACT_HI_CASEACTINST(CASE_ACT_ID_, END_TIME_, ID_);
+create index ACT_IDX_HI_CAS_A_I_CASEINST on ACT_HI_CASEACTINST(CASE_INST_ID_, CASE_ACT_ID_);
+create index ACT_IDX_HI_CAS_A_I_TENANT_ID on ACT_HI_CASEACTINST(TENANT_ID_);
+-- create history decision instance table --
+create table ACT_HI_DECINST (
+ ID_ varchar(64) NOT NULL,
+ DEC_DEF_ID_ varchar(64) NOT NULL,
+ DEC_DEF_KEY_ varchar(255) NOT NULL,
+ DEC_DEF_NAME_ varchar(255),
+ PROC_DEF_KEY_ varchar(255),
+ PROC_DEF_ID_ varchar(64),
+ PROC_INST_ID_ varchar(64),
+ CASE_DEF_KEY_ varchar(255),
+ CASE_DEF_ID_ varchar(64),
+ CASE_INST_ID_ varchar(64),
+ ACT_INST_ID_ varchar(64),
+ ACT_ID_ varchar(255),
+ EVAL_TIME_ datetime(3) not null,
+ COLLECT_VALUE_ double,
+ USER_ID_ varchar(255),
+ ROOT_DEC_INST_ID_ varchar(64),
+ DEC_REQ_ID_ varchar(64),
+ DEC_REQ_KEY_ varchar(255),
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+-- create history decision input table --
+create table ACT_HI_DEC_IN (
+ ID_ varchar(64) NOT NULL,
+ DEC_INST_ID_ varchar(64) NOT NULL,
+ CLAUSE_ID_ varchar(64),
+ CLAUSE_NAME_ varchar(255),
+ VAR_TYPE_ varchar(100),
+ BYTEARRAY_ID_ varchar(64),
+ DOUBLE_ double,
+ LONG_ bigint,
+ TEXT_ LONGBLOB NULL,
+ TEXT2_ LONGBLOB NULL,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+-- create history decision output table --
+create table ACT_HI_DEC_OUT (
+ ID_ varchar(64) NOT NULL,
+ DEC_INST_ID_ varchar(64) NOT NULL,
+ CLAUSE_ID_ varchar(64),
+ CLAUSE_NAME_ varchar(255),
+ RULE_ID_ varchar(64),
+ RULE_ORDER_ integer,
+ VAR_NAME_ varchar(255),
+ VAR_TYPE_ varchar(100),
+ BYTEARRAY_ID_ varchar(64),
+ DOUBLE_ double,
+ LONG_ bigint,
+ TEXT_ LONGBLOB NULL,
+ TEXT2_ LONGBLOB NULL,
+ TENANT_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+
+create index ACT_IDX_HI_DEC_INST_ID on ACT_HI_DECINST(DEC_DEF_ID_);
+create index ACT_IDX_HI_DEC_INST_KEY on ACT_HI_DECINST(DEC_DEF_KEY_);
+create index ACT_IDX_HI_DEC_INST_PI on ACT_HI_DECINST(PROC_INST_ID_);
+create index ACT_IDX_HI_DEC_INST_CI on ACT_HI_DECINST(CASE_INST_ID_);
+create index ACT_IDX_HI_DEC_INST_ACT on ACT_HI_DECINST(ACT_ID_);
+create index ACT_IDX_HI_DEC_INST_ACT_INST on ACT_HI_DECINST(ACT_INST_ID_);
+create index ACT_IDX_HI_DEC_INST_TIME on ACT_HI_DECINST(EVAL_TIME_);
+create index ACT_IDX_HI_DEC_INST_TENANT_ID on ACT_HI_DECINST(TENANT_ID_);
+create index ACT_IDX_HI_DEC_INST_ROOT_ID on ACT_HI_DECINST(ROOT_DEC_INST_ID_);
+create index ACT_IDX_HI_DEC_INST_REQ_ID on ACT_HI_DECINST(DEC_REQ_ID_);
+create index ACT_IDX_HI_DEC_INST_REQ_KEY on ACT_HI_DECINST(DEC_REQ_KEY_);
+
+
+create index ACT_IDX_HI_DEC_IN_INST on ACT_HI_DEC_IN(DEC_INST_ID_);
+create index ACT_IDX_HI_DEC_IN_CLAUSE on ACT_HI_DEC_IN(DEC_INST_ID_, CLAUSE_ID_);
+
+create index ACT_IDX_HI_DEC_OUT_INST on ACT_HI_DEC_OUT(DEC_INST_ID_);
+create index ACT_IDX_HI_DEC_OUT_RULE on ACT_HI_DEC_OUT(RULE_ORDER_, CLAUSE_ID_);
+
+
+-- mariadb identity:
+
+create table ACT_ID_GROUP (
+ ID_ varchar(64),
+ REV_ integer,
+ NAME_ varchar(255),
+ TYPE_ varchar(255),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_ID_MEMBERSHIP (
+ USER_ID_ varchar(64),
+ GROUP_ID_ varchar(64),
+ primary key (USER_ID_, GROUP_ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_ID_USER (
+ ID_ varchar(64),
+ REV_ integer,
+ FIRST_ varchar(255),
+ LAST_ varchar(255),
+ EMAIL_ varchar(255),
+ PWD_ varchar(255),
+ SALT_ varchar(255),
+ PICTURE_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_ID_INFO (
+ ID_ varchar(64),
+ REV_ integer,
+ USER_ID_ varchar(64),
+ TYPE_ varchar(64),
+ KEY_ varchar(255),
+ VALUE_ varchar(255),
+ PASSWORD_ LONGBLOB,
+ PARENT_ID_ varchar(255),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_ID_TENANT (
+ ID_ varchar(64),
+ REV_ integer,
+ NAME_ varchar(255),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+create table ACT_ID_TENANT_MEMBER (
+ ID_ varchar(64) not null,
+ TENANT_ID_ varchar(64) not null,
+ USER_ID_ varchar(64),
+ GROUP_ID_ varchar(64),
+ primary key (ID_)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE utf8_bin;
+
+alter table ACT_ID_MEMBERSHIP
+ add constraint ACT_FK_MEMB_GROUP
+ foreign key (GROUP_ID_)
+ references ACT_ID_GROUP (ID_);
+
+alter table ACT_ID_MEMBERSHIP
+ add constraint ACT_FK_MEMB_USER
+ foreign key (USER_ID_)
+ references ACT_ID_USER (ID_);
+
+alter table ACT_ID_TENANT_MEMBER
+ add constraint ACT_UNIQ_TENANT_MEMB_USER
+ unique (TENANT_ID_, USER_ID_);
+
+alter table ACT_ID_TENANT_MEMBER
+ add constraint ACT_UNIQ_TENANT_MEMB_GROUP
+ unique (TENANT_ID_, GROUP_ID_);
+
+alter table ACT_ID_TENANT_MEMBER
+ add constraint ACT_FK_TENANT_MEMB
+ foreign key (TENANT_ID_)
+ references ACT_ID_TENANT (ID_);
+
+alter table ACT_ID_TENANT_MEMBER
+ add constraint ACT_FK_TENANT_MEMB_USER
+ foreign key (USER_ID_)
+ references ACT_ID_USER (ID_);
+
+alter table ACT_ID_TENANT_MEMBER
+ add constraint ACT_FK_TENANT_MEMB_GROUP
+ foreign key (GROUP_ID_)
+ references ACT_ID_GROUP (ID_);
+
diff --git a/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql
new file mode 100644
index 000000000..3658c6c23
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/camunda/mysql_create_camunda_admin.sql
@@ -0,0 +1,25 @@
+USE camundabpmn;
+
+INSERT INTO `act_id_group` (`ID_`, `REV_`, `NAME_`, `TYPE_`) VALUES ('camunda-admin',1,'camunda BPM Administrators','SYSTEM');
+
+INSERT INTO `act_id_user` (`ID_`, `REV_`, `FIRST_`, `LAST_`, `EMAIL_`, `PWD_`, `SALT_`, `PICTURE_ID_`) VALUES ('admin',1,'admin','user','camundaadmin@onap.org','{SHA-512}n5jUw7fvXM9sZBcrIkLiAOCqiPHutaqEkbg6IQVQdylVP1im8SczBJf4f2xL7cvWwIAZjkcSSQzgFTsdaJSEiA==','ftTn4gNgMcq07wdSD0lEJQ==',NULL);
+
+INSERT INTO `act_id_membership` (`USER_ID_`, `GROUP_ID_`) VALUES ('admin','camunda-admin');
+
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49b0e028-a3c6-11e7-b0ec-0242ac120003',1,1,NULL,'admin',1,'admin',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49b525e9-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,2,'camunda-admin',2);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49b8814a-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,0,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49baa42b-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,1,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49bd8a5c-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,2,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49bfd44d-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,3,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49c1f72e-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,4,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49c41a0f-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,5,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49c77570-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,6,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49ca5ba1-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,7,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49cca592-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,8,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49ceef83-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,9,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49d11264-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,10,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49d38365-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,11,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49d5a646-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,12,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49d83e57-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,13,'*',2147483647);
+INSERT INTO `act_ru_authorization` (`ID_`, `REV_`, `TYPE_`, `GROUP_ID_`, `USER_ID_`, `RESOURCE_TYPE_`, `RESOURCE_ID_`, `PERMS_`) VALUES ('49da3a28-a3c6-11e7-b0ec-0242ac120003',1,1,'camunda-admin',NULL,14,'*',2147483647);
diff --git a/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql
new file mode 100644
index 000000000..ca002fbe6
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Catalog-schema.sql
@@ -0,0 +1,462 @@
+
+ alter table HEAT_TEMPLATE_PARAMS
+ drop
+ foreign key FK_p3ol1xcvp831glqohrlu6o07o;
+
+ alter table MODEL_RECIPE
+ drop
+ foreign key FK_c23r0puyqug6n44jg39dutm1c;
+
+ alter table SERVICE
+ drop
+ foreign key FK_l3qy594u2xr1tfpmma3uigsna;
+
+ alter table SERVICE_RECIPE
+ drop
+ foreign key FK_i3r1b8j6e7dg9hkp49evnnm5y;
+
+ alter table SERVICE_TO_RESOURCE_CUSTOMIZATIONS
+ drop
+ foreign key FK_kiddaay6cfe0aob1f1jaio1bb;
+
+ alter table VF_MODULE
+ drop
+ foreign key FK_12jptc9it7gs3pru08skobxxc;
+
+ alter table VNF_RESOURCE_CUSTOMIZATION
+ drop
+ foreign key FK_iff1ayhb1hrp5jhea3vvikuni;
+
+ drop table if exists ALLOTTED_RESOURCE;
+
+ drop table if exists ALLOTTED_RESOURCE_CUSTOMIZATION;
+
+ drop table if exists HEAT_ENVIRONMENT;
+
+ drop table if exists HEAT_FILES;
+
+ drop table if exists HEAT_NESTED_TEMPLATE;
+
+ drop table if exists HEAT_TEMPLATE;
+
+ drop table if exists HEAT_TEMPLATE_PARAMS;
+
+ drop table if exists MODEL;
+
+ drop table if exists MODEL_RECIPE;
+
+ drop table if exists NETWORK_RECIPE;
+
+ drop table if exists NETWORK_RESOURCE;
+
+ drop table if exists NETWORK_RESOURCE_CUSTOMIZATION;
+
+ drop table if exists SERVICE;
+
+ drop table if exists SERVICE_RECIPE;
+
+ drop table if exists SERVICE_TO_ALLOTTED_RESOURCES;
+
+ drop table if exists SERVICE_TO_NETWORKS;
+
+ drop table if exists SERVICE_TO_RESOURCE_CUSTOMIZATIONS;
+
+ drop table if exists TEMP_NETWORK_HEAT_TEMPLATE_LOOKUP;
+
+ drop table if exists TOSCA_CSAR;
+
+ drop table if exists VF_MODULE;
+
+ drop table if exists VF_MODULE_CUSTOMIZATION;
+
+ drop table if exists VF_MODULE_TO_HEAT_FILES;
+
+ drop table if exists VNF_COMPONENTS;
+
+ drop table if exists VNF_COMPONENTS_RECIPE;
+
+ drop table if exists VNF_RECIPE;
+
+ drop table if exists VNF_RESOURCE;
+
+ drop table if exists VNF_RESOURCE_CUSTOMIZATION;
+
+ drop table if exists VNF_RES_CUSTOM_TO_VF_MODULE_CUSTOM;
+
+ create table ALLOTTED_RESOURCE (
+ MODEL_UUID varchar(255) not null,
+ MODEL_INVARIANT_UUID varchar(255),
+ MODEL_VERSION varchar(255),
+ MODEL_NAME varchar(255),
+ TOSCA_NODE_TYPE varchar(255),
+ SUBCATEGORY varchar(255),
+ DESCRIPTION varchar(255),
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (MODEL_UUID)
+ );
+
+ create table ALLOTTED_RESOURCE_CUSTOMIZATION (
+ MODEL_CUSTOMIZATION_UUID varchar(200) not null,
+ MODEL_INSTANCE_NAME varchar(255),
+ AR_MODEL_UUID varchar(255),
+ PROVIDING_SERVICE_MODEL_INVARIANT_UUID varchar(255),
+ TARGET_NETWORK_ROLE varchar(255),
+ NF_FUNCTION varchar(255),
+ NF_TYPE varchar(255),
+ NF_ROLE varchar(255),
+ NF_NAMING_CODE varchar(255),
+ MIN_INSTANCES integer,
+ MAX_INSTANCES integer,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (MODEL_CUSTOMIZATION_UUID)
+ );
+
+ create table HEAT_ENVIRONMENT (
+ ARTIFACT_UUID varchar(200) not null,
+ NAME varchar(100) not null,
+ VERSION varchar(20) not null,
+ DESCRIPTION varchar(1200),
+ BODY longtext not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ ARTIFACT_CHECKSUM varchar(200) default 'MANUAL RECORD',
+ primary key (ARTIFACT_UUID)
+ );
+
+ create table HEAT_FILES (
+ ARTIFACT_UUID varchar(255) not null,
+ DESCRIPTION varchar(255),
+ NAME varchar(255),
+ VERSION varchar(255),
+ BODY longtext,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ ARTIFACT_CHECKSUM varchar(255),
+ primary key (ARTIFACT_UUID)
+ );
+
+ create table HEAT_NESTED_TEMPLATE (
+ PARENT_HEAT_TEMPLATE_UUID varchar(200) not null,
+ CHILD_HEAT_TEMPLATE_UUID varchar(200) not null,
+ PROVIDER_RESOURCE_FILE varchar(100),
+ primary key (PARENT_HEAT_TEMPLATE_UUID, CHILD_HEAT_TEMPLATE_UUID)
+ );
+
+ create table HEAT_TEMPLATE (
+ ARTIFACT_UUID varchar(200) not null,
+ NAME varchar(200) not null,
+ VERSION varchar(20) not null,
+ BODY longtext not null,
+ TIMEOUT_MINUTES integer,
+ DESCRIPTION varchar(1200),
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ ARTIFACT_CHECKSUM varchar(200) default 'MANUAL RECORD' not null,
+ primary key (ARTIFACT_UUID)
+ );
+
+ create table HEAT_TEMPLATE_PARAMS (
+ HEAT_TEMPLATE_ARTIFACT_UUID varchar(255) not null,
+ PARAM_NAME varchar(255) not null,
+ IS_REQUIRED bit not null,
+ PARAM_TYPE varchar(20),
+ PARAM_ALIAS varchar(45),
+ primary key (HEAT_TEMPLATE_ARTIFACT_UUID, PARAM_NAME)
+ );
+
+ create table MODEL (
+ id integer not null auto_increment,
+ MODEL_TYPE varchar(20) not null,
+ MODEL_VERSION_ID varchar(40) not null,
+ MODEL_INVARIANT_ID varchar(40),
+ MODEL_NAME varchar(40) not null,
+ MODEL_VERSION varchar(20),
+ MODEL_CUSTOMIZATION_ID varchar(40),
+ MODEL_CUSTOMIZATION_NAME varchar(40),
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (id)
+ );
+
+ create table MODEL_RECIPE (
+ id integer not null auto_increment,
+ MODEL_ID integer not null,
+ ACTION varchar(20) not null,
+ SCHEMA_VERSION varchar(20),
+ DESCRIPTION varchar(1200),
+ ORCHESTRATION_URI varchar(256) not null,
+ MODEL_PARAM_XSD varchar(2048),
+ RECIPE_TIMEOUT integer,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (id)
+ );
+
+ create table NETWORK_RECIPE (
+ id integer not null auto_increment,
+ MODEL_NAME varchar(20) not null,
+ ACTION varchar(20) not null,
+ VERSION_STR varchar(20) not null,
+ SERVICE_TYPE varchar(45),
+ DESCRIPTION varchar(1200),
+ ORCHESTRATION_URI varchar(256) not null,
+ NETWORK_PARAM_XSD varchar(2048),
+ RECIPE_TIMEOUT integer,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (id)
+ );
+
+ create table NETWORK_RESOURCE (
+ MODEL_UUID varchar(200) not null,
+ MODEL_NAME varchar(200) not null,
+ MODEL_INVARIANT_UUID varchar(200),
+ MODEL_VERSION varchar(20),
+ TOSCA_NODE_TYPE varchar(200),
+ NEUTRON_NETWORK_TYPE varchar(20),
+ DESCRIPTION varchar(1200),
+ ORCHESTRATION_MODE varchar(20),
+ HEAT_TEMPLATE_ARTIFACT_UUID varchar(200) not null,
+ AIC_VERSION_MIN varchar(20) default 2.5 not null,
+ AIC_VERSION_MAX varchar(20) default 2.5,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (MODEL_UUID)
+ );
+
+ create table NETWORK_RESOURCE_CUSTOMIZATION (
+ MODEL_CUSTOMIZATION_UUID varchar(200) not null,
+ NETWORK_RESOURCE_MODEL_UUID varchar(200) not null,
+ MODEL_INSTANCE_NAME varchar(255),
+ NETWORK_TECHNOLOGY varchar(255),
+ NETWORK_TYPE varchar(255),
+ NETWORK_SCOPE varchar(255),
+ NETWORK_ROLE varchar(255),
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (MODEL_CUSTOMIZATION_UUID)
+ );
+
+ create table SERVICE (
+ MODEL_UUID varchar(200) not null,
+ MODEL_NAME varchar(200) not null,
+ MODEL_VERSION varchar(20) not null,
+ DESCRIPTION varchar(1200),
+ TOSCA_CSAR_ARTIFACT_UUID varchar(200),
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ MODEL_INVARIANT_UUID varchar(200) default 'MANUAL_RECORD' not null,
+ SERVICE_TYPE varchar(20),
+ SERVICE_ROLE varchar(20),
+ primary key (MODEL_UUID)
+ );
+
+ create table SERVICE_RECIPE (
+ id integer not null auto_increment,
+ SERVICE_MODEL_UUID varchar(200) not null,
+ ACTION varchar(40) not null,
+ ORCHESTRATION_URI varchar(256) not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ VERSION_STR varchar(20),
+ DESCRIPTION varchar(1200),
+ SERVICE_PARAM_XSD varchar(2048),
+ RECIPE_TIMEOUT integer,
+ SERVICE_TIMEOUT_INTERIM integer,
+ primary key (id)
+ );
+
+ create table SERVICE_TO_ALLOTTED_RESOURCES (
+ SERVICE_MODEL_UUID varchar(200) not null,
+ AR_MODEL_CUSTOMIZATION_UUID varchar(200) not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (SERVICE_MODEL_UUID, AR_MODEL_CUSTOMIZATION_UUID)
+ );
+
+ create table SERVICE_TO_NETWORKS (
+ SERVICE_MODEL_UUID varchar(200) not null,
+ NETWORK_MODEL_CUSTOMIZATION_UUID varchar(200) not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (SERVICE_MODEL_UUID, NETWORK_MODEL_CUSTOMIZATION_UUID)
+ );
+
+ create table SERVICE_TO_RESOURCE_CUSTOMIZATIONS (
+ MODEL_TYPE varchar(20) not null,
+ RESOURCE_MODEL_CUSTOMIZATION_UUID varchar(200) not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ SERVICE_MODEL_UUID varchar(200) not null,
+ primary key (MODEL_TYPE, RESOURCE_MODEL_CUSTOMIZATION_UUID)
+ );
+
+ create table TEMP_NETWORK_HEAT_TEMPLATE_LOOKUP (
+ NETWORK_RESOURCE_MODEL_NAME varchar(200) not null,
+ HEAT_TEMPLATE_ARTIFACT_UUID varchar(200) not null,
+ AIC_VERSION_MIN varchar(20) not null,
+ AIC_VERSION_MAX varchar(20),
+ primary key (NETWORK_RESOURCE_MODEL_NAME)
+ );
+
+ create table TOSCA_CSAR (
+ ARTIFACT_UUID varchar(200) not null,
+ NAME varchar(200) not null,
+ VERSION varchar(20) not null,
+ ARTIFACT_CHECKSUM varchar(200) not null,
+ URL varchar(200) not null,
+ DESCRIPTION varchar(1200),
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (ARTIFACT_UUID)
+ );
+
+ create table VF_MODULE (
+ MODEL_UUID varchar(200) not null,
+ VNF_RESOURCE_MODEL_UUID varchar(200),
+ MODEL_INVARIANT_UUID varchar(200),
+ MODEL_VERSION varchar(20) not null,
+ MODEL_NAME varchar(200) not null,
+ DESCRIPTION varchar(1200),
+ IS_BASE integer not null,
+ HEAT_TEMPLATE_ARTIFACT_UUID varchar(200) not null,
+ VOL_HEAT_TEMPLATE_ARTIFACT_UUID varchar(200),
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (MODEL_UUID)
+ );
+
+ create table VF_MODULE_CUSTOMIZATION (
+ MODEL_CUSTOMIZATION_UUID varchar(200) not null,
+ VF_MODULE_MODEL_UUID varchar(200) not null,
+ VOL_ENVIRONMENT_ARTIFACT_UUID varchar(200),
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ HEAT_ENVIRONMENT_ARTIFACT_UUID varchar(200),
+ MIN_INSTANCES integer,
+ MAX_INSTANCES integer,
+ INITIAL_COUNT integer,
+ AVAILABILITY_ZONE_COUNT integer,
+ LABEL varchar(200),
+ primary key (MODEL_CUSTOMIZATION_UUID)
+ );
+
+ create table VF_MODULE_TO_HEAT_FILES (
+ VF_MODULE_MODEL_UUID varchar(200) not null,
+ HEAT_FILES_ARTIFACT_UUID varchar(200) not null,
+ primary key (VF_MODULE_MODEL_UUID, HEAT_FILES_ARTIFACT_UUID)
+ );
+
+ create table VNF_COMPONENTS (
+ VNF_ID integer not null,
+ COMPONENT_TYPE varchar(20) not null,
+ HEAT_TEMPLATE_ID integer,
+ HEAT_ENVIRONMENT_ID integer,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (VNF_ID, COMPONENT_TYPE)
+ );
+
+ create table VNF_COMPONENTS_RECIPE (
+ id integer not null auto_increment,
+ VNF_TYPE varchar(200),
+ VF_MODULE_MODEL_UUID varchar(100),
+ VNF_COMPONENT_TYPE varchar(45) not null,
+ ACTION varchar(20) not null,
+ SERVICE_TYPE varchar(45),
+ VERSION varchar(20),
+ DESCRIPTION varchar(1200),
+ ORCHESTRATION_URI varchar(256) not null,
+ VNF_COMPONENT_PARAM_XSD varchar(2048),
+ RECIPE_TIMEOUT integer,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (id)
+ );
+
+ create table VNF_RECIPE (
+ id integer not null auto_increment,
+ VF_MODULE_ID varchar(100),
+ ACTION varchar(20) not null,
+ VERSION_STR varchar(20) not null,
+ VNF_TYPE varchar(200),
+ SERVICE_TYPE varchar(45) default null,
+ DESCRIPTION varchar(1200),
+ ORCHESTRATION_URI varchar(256) not null,
+ VNF_PARAM_XSD varchar(2048),
+ RECIPE_TIMEOUT integer,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (id)
+ );
+
+ create table VNF_RESOURCE (
+ MODEL_UUID varchar(200) not null,
+ MODEL_INVARIANT_UUID varchar(200),
+ MODEL_VERSION varchar(20) not null,
+ MODEL_NAME varchar(200),
+ TOSCA_NODE_TYPE varchar(200),
+ DESCRIPTION varchar(1200),
+ ORCHESTRATION_MODE varchar(20) not null,
+ AIC_VERSION_MIN varchar(20),
+ AIC_VERSION_MAX varchar(20),
+ HEAT_TEMPLATE_ARTIFACT_UUID varchar(200),
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (MODEL_UUID)
+ );
+
+ create table VNF_RESOURCE_CUSTOMIZATION (
+ MODEL_CUSTOMIZATION_UUID varchar(200) not null,
+ MODEL_INSTANCE_NAME varchar(200) not null,
+ MIN_INSTANCES integer,
+ MAX_INSTANCES integer,
+ AVAILABILITY_ZONE_MAX_COUNT integer,
+ NF_FUNCTION varchar(200),
+ NF_TYPE varchar(200),
+ NF_ROLE varchar(200),
+ NF_NAMING_CODE varchar(200),
+ VNF_RESOURCE_MODEL_UUID varchar(200) not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (MODEL_CUSTOMIZATION_UUID)
+ );
+
+ create table VNF_RES_CUSTOM_TO_VF_MODULE_CUSTOM (
+ VNF_RESOURCE_CUST_MODEL_CUSTOMIZATION_UUID varchar(200) not null,
+ VF_MODULE_CUST_MODEL_CUSTOMIZATION_UUID varchar(200) not null,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (VNF_RESOURCE_CUST_MODEL_CUSTOMIZATION_UUID, VF_MODULE_CUST_MODEL_CUSTOMIZATION_UUID)
+ );
+
+ alter table MODEL
+ add constraint UK_rra00f1rk6eyy7g00k9raxh2v unique (MODEL_TYPE, MODEL_VERSION_ID);
+
+ alter table MODEL_RECIPE
+ add constraint UK_b4g8j9wtqrkxfycyi3ursk7gb unique (MODEL_ID, ACTION);
+
+ alter table NETWORK_RECIPE
+ add constraint UK_pbsa8i44m8p10f9529jdgfuk9 unique (MODEL_NAME, ACTION, VERSION_STR);
+
+ alter table SERVICE_RECIPE
+ add constraint UK_2lr377dpqnvl5aqlp5dtj2fcp unique (SERVICE_MODEL_UUID, ACTION);
+
+ alter table VNF_COMPONENTS_RECIPE
+ add constraint UK_g3je95aaxxiuest25f0qoy2u8 unique (VNF_TYPE, VF_MODULE_MODEL_UUID, VNF_COMPONENT_TYPE, ACTION, SERVICE_TYPE, VERSION);
+
+ alter table VNF_RECIPE
+ add constraint UK_f3tvqau498vrifq3cr8qnigkr unique (VF_MODULE_ID, ACTION, VERSION_STR);
+
+ alter table HEAT_TEMPLATE_PARAMS
+ add constraint FK_p3ol1xcvp831glqohrlu6o07o
+ foreign key (HEAT_TEMPLATE_ARTIFACT_UUID)
+ references HEAT_TEMPLATE (ARTIFACT_UUID);
+
+ alter table MODEL_RECIPE
+ add constraint FK_c23r0puyqug6n44jg39dutm1c
+ foreign key (MODEL_ID)
+ references MODEL (id);
+
+ alter table SERVICE
+ add constraint FK_l3qy594u2xr1tfpmma3uigsna
+ foreign key (TOSCA_CSAR_ARTIFACT_UUID)
+ references TOSCA_CSAR (ARTIFACT_UUID);
+
+ alter table SERVICE_RECIPE
+ add constraint FK_i3r1b8j6e7dg9hkp49evnnm5y
+ foreign key (SERVICE_MODEL_UUID)
+ references SERVICE (MODEL_UUID);
+
+ alter table SERVICE_TO_RESOURCE_CUSTOMIZATIONS
+ add constraint FK_kiddaay6cfe0aob1f1jaio1bb
+ foreign key (SERVICE_MODEL_UUID)
+ references SERVICE (MODEL_UUID);
+
+ alter table VF_MODULE
+ add constraint FK_12jptc9it7gs3pru08skobxxc
+ foreign key (VNF_RESOURCE_MODEL_UUID)
+ references VNF_RESOURCE (MODEL_UUID);
+
+ alter table VNF_RESOURCE_CUSTOMIZATION
+ add constraint FK_iff1ayhb1hrp5jhea3vvikuni
+ foreign key (VNF_RESOURCE_MODEL_UUID)
+ references VNF_RESOURCE (MODEL_UUID);
diff --git a/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql
new file mode 100644
index 000000000..f64548e23
--- /dev/null
+++ b/test/csit/scripts/so/mariadb/docker-entrypoint-initdb.d/db-sql-scripts/main-schemas/MySQL-Requests-schema.sql
@@ -0,0 +1,84 @@
+
+ drop table if exists INFRA_ACTIVE_REQUESTS;
+
+ drop table if exists SITE_STATUS;
+
+ create table INFRA_ACTIVE_REQUESTS (
+ REQUEST_ID varchar(45) not null,
+ CLIENT_REQUEST_ID varchar(45),
+ ACTION varchar(45) not null,
+ REQUEST_STATUS varchar(20),
+ STATUS_MESSAGE varchar(2000),
+ PROGRESS bigint,
+ START_TIME datetime,
+ END_TIME datetime,
+ SOURCE varchar(45),
+ VNF_ID varchar(45),
+ VNF_NAME varchar(80),
+ VNF_TYPE varchar(200),
+ SERVICE_TYPE varchar(45),
+ AIC_NODE_CLLI varchar(11),
+ TENANT_ID varchar(45),
+ PROV_STATUS varchar(20),
+ VNF_PARAMS longtext,
+ VNF_OUTPUTS longtext,
+ REQUEST_BODY longtext,
+ RESPONSE_BODY longtext,
+ LAST_MODIFIED_BY varchar(50),
+ MODIFY_TIME datetime,
+ REQUEST_TYPE varchar(20),
+ VOLUME_GROUP_ID varchar(45),
+ VOLUME_GROUP_NAME varchar(45),
+ VF_MODULE_ID varchar(45),
+ VF_MODULE_NAME varchar(200),
+ VF_MODULE_MODEL_NAME varchar(200),
+ AAI_SERVICE_ID varchar(50),
+ AIC_CLOUD_REGION varchar(11),
+ CALLBACK_URL varchar(200),
+ CORRELATOR varchar(80),
+ SERVICE_INSTANCE_ID varchar(45),
+ SERVICE_INSTANCE_NAME varchar(80),
+ REQUEST_SCOPE varchar(20),
+ REQUEST_ACTION varchar(45) not null,
+ NETWORK_ID varchar(45),
+ NETWORK_NAME varchar(80),
+ NETWORK_TYPE varchar(80),
+ REQUESTOR_ID varchar(80),
+ primary key (REQUEST_ID)
+ );
+
+ create table SITE_STATUS (
+ SITE_NAME varchar(255) not null,
+ STATUS bit,
+ CREATION_TIMESTAMP datetime default CURRENT_TIMESTAMP,
+ primary key (SITE_NAME)
+ );
+ create table OPERATION_STATUS (
+ SERVICE_ID varchar(255) not null,
+ OPERATION_ID varchar(255) not null,
+ SERVICE_NAME varchar(255),
+ OPERATION_TYPE varchar(255),
+ USER_ID varchar(255),
+ RESULT varchar(255),
+ OPERATION_CONTENT varchar(255),
+ PROGRESS varchar(255),
+ REASON varchar(255),
+ OPERATE_AT datetime,
+ FINISHED_AT datetime,
+ primary key (SERVICE_ID,OPERATION_ID)
+ );
+ create table RESOURCE_OPERATION_STATUS (
+ SERVICE_ID varchar(255) not null,
+ OPERATION_ID varchar(255) not null,
+ RESOURCE_TEMPLATE_UUID varchar(255) not null,
+ OPER_TYPE varchar(255),
+ RESOURCE_INSTANCE_ID varchar(255),
+ JOB_ID varchar(255),
+ STATUS varchar(255),
+ PROGRESS varchar(255),
+ ERROR_CODE varchar(255) ,
+ STATUS_DESCRIPOTION varchar(255) ,
+ primary key (SERVICE_ID,OPERATION_ID,RESOURCE_TEMPLATE_UUID)
+ );
+ alter table INFRA_ACTIVE_REQUESTS
+ add constraint UK_bhu6w8p7wvur4pin0gjw2d5ak unique (CLIENT_REQUEST_ID);
diff --git a/test/csit/scripts/vvp/clone_and_setup_vvp_data.sh b/test/csit/scripts/vvp/clone_and_setup_vvp_data.sh
new file mode 100644
index 000000000..a39ed083f
--- /dev/null
+++ b/test/csit/scripts/vvp/clone_and_setup_vvp_data.sh
@@ -0,0 +1,110 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP CLAMP
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+
+echo "This is ${WORKSPACE}/test/csit/scripts/vvp/clone_and_setup_vvp_data.sh"
+
+# Clone vvp enviroment template
+mkdir -p ${WORKSPACE}/data/environments/
+mkdir -p ${WORKSPACE}/data/clone/
+mkdir -p /opt/configmaps/settings/
+
+cd ${WORKSPACE}/data/clone
+git clone --depth 1 http://gerrit.onap.org/r/vvp/engagementmgr -b master
+
+chmod -R 775 ${WORKSPACE}/data/
+
+# copy settings file from tox environment infrastructure:
+cp -f ${WORKSPACE}/data/clone/engagementmgr/django/vvp/settings/tox_settings.py /opt/configmaps/settings/__init__.py
+
+# uwsgi.ini file creation
+echo "[uwsgi]
+http = :80
+plugin = python
+chdir = /srv
+module = vvp.wsgi:application
+master = True
+pidfile = /tmp/project-master.pid
+vacuum = True
+max-requests = 5000
+enable-threads = True
+stats = 0.0.0.0:9000
+stats-http = True" > /opt/configmaps/settings/uwsgi.ini
+
+# storage.py file creation
+echo "from storages.backends.s3boto import S3BotoStorage
+from django.conf import settings
+class S3StaticStorage(S3BotoStorage):
+ custom_domain = '%s/%s' % (settings.AWS_S3_HOST, settings.STATIC_BUCKET)
+ bucket_name = settings.STATIC_BUCKET
+class S3MediaStorage(S3BotoStorage):
+ custom_domain = '%s/%s' % (settings.AWS_S3_HOST, settings.MEDIA_BUCKET)
+ bucket_name = settings.MEDIA_BUCKET" > /opt/configmaps/settings/storage.py
+
+# envbool.py file creation
+echo "import os
+def envbool(key, default=False, unknown=True):
+ return {'true': True, '1': True, 'false': False, '0': False,
+ '': default,}.get(os.getenv(key, '').lower(), unknown)" > /opt/configmaps/settings/envbool.py
+
+# vvp_env.list file creation
+echo "# set enviroment variables
+OAUTHLIB_INSECURE_TRANSPORT=1
+HOST_IP=${IP}
+ENVNAME=${ENVIRONMENT}
+http_proxy=${http_proxy}
+https_proxy=${https_proxy}
+no_proxy=${no_proxy}
+DJANGO_SETTINGS_MODULE=vvp.settings
+# export PYTHONPATH={pwd}
+SECRET_KEY=6mo22&FAKEFALEFALEFKEuq0u*4ksk^aq8lte&)yul
+ENVIRONMENT=development
+SERVICE_PROVIDER=ExampleProvider
+PROGRAM_NAME=VVP
+PROGRAM_NAME_URL_PREFIX=vvp
+SERVICE_PROVIDER_DOMAIN=example-domain.com
+EMAIL_HOST=localhost
+EMAIL_HOST_PASSWORD=
+EMAIL_HOST_USER=
+EMAIL_PORT=25
+PGDATABASE=icedb
+PGUSER=iceuser
+PGPASSWORD=Aa123456
+PGHOST=localhost
+PGPORT=5433
+SECRET_WEBHOOK_TOKEN=AiwiFAKEFAKEFAKEmahch2zahshaGi
+SECRET_GITLAB_AUTH_TOKEN=ieNgFAKEFAKE4zohvee9a
+SECRET_JENKINS_PASSWORD=xaiyiFAKEFAKEqueuBu
+SECRET_CMS_APP_CLIENT_ID=MHmJo0ccDhFAKEFAKEFAKEPAC6H6HAMzhCCM16
+SECRET_CMS_APP_CLIENT_SECRET=nI8QFAKEEEpnw5nTs
+SLACK_API_TOKEN=
+S3_HOST=localhost
+S3_PORT=443
+AWS_ACCESS_KEY_ID=FD2FAKEFAKEFAKEVD1MWRN
+AWS_SECRET_ACCESS_KEY=TKoiwxzFAKEFAKEFAKEFAKEFAKEQ27nP2lCiutEsD
+STATIC_ROOT=/app/htdocs" > ${WORKSPACE}/data/environments/vvp_env.list
+
+ifconfig
+
+IP_ADDRESS=`ip route get 8.8.8.8 | awk '/src/{ print $7 }'`
+export HOST_IP=$IP_ADDRESS
diff --git a/test/csit/scripts/vvp/docker_health.sh b/test/csit/scripts/vvp/docker_health.sh
new file mode 100644
index 000000000..520b2dc3a
--- /dev/null
+++ b/test/csit/scripts/vvp/docker_health.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP CLAMP
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+
+echo "VVP-Engagement-Manager health-Check:"
+echo ""
+echo ""
+res=`curl -s -X GET -H "Accept: application/json" -H "Content-Type: application/json" "http://localhost:9090/vvp/v1/engmgr/vendors" | wc -w`
+if [ ${res} == 0 ]; then
+ echo "Error [${res}] while performing vvp engagement manager vendor existance check"
+ exit 1
+fi
+echo "check vvp engagement manager vendor existance: OK [${res}]"
diff --git a/test/csit/scripts/vvp/kill_containers_and_remove_dataFolders.sh b/test/csit/scripts/vvp/kill_containers_and_remove_dataFolders.sh
new file mode 100644
index 000000000..a6c108ebc
--- /dev/null
+++ b/test/csit/scripts/vvp/kill_containers_and_remove_dataFolders.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+
+echo "This is ${WORKSPACE}/test/csit/scripts/vvp/kill_and_remove_dataFolder.sh"
+
+
+CONTAINER_NAME="vvp-engagementmgr"
+
+#kill and remove all vvp dockers
+docker stop $CONTAINER_NAME
+docker rm -f $CONTAINER_NAME
+
+
+#delete data folder
+rm -rf ${WORKSPACE}/data/*
diff --git a/test/csit/scripts/vvp/start_vvp_containers.sh b/test/csit/scripts/vvp/start_vvp_containers.sh
new file mode 100644
index 000000000..5f905b6ad
--- /dev/null
+++ b/test/csit/scripts/vvp/start_vvp_containers.sh
@@ -0,0 +1,93 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP CLAMP
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+
+echo "This is ${WORKSPACE}/test/csit/scripts/vvp/start_vvp_containers.sh"
+
+export IP=$HOST_IP
+export PREFIX='nexus3.onap.org:10001/openecomp/vvp'
+export RELEASE='latest'
+
+#start Engagement Manager pod:
+docker run \
+--detach \
+--entrypoint="" \
+--name vvp-engagementmgr \
+--env-file ${WORKSPACE}/data/environments/vvp_env.list \
+--log-driver=json-file \
+--log-opt max-size=100m \
+--log-opt max-file=10 \
+--ulimit memlock=-1:-1 \
+--memory 4g \
+--memory-swap=4g \
+--ulimit nofile=4096:100000 \
+--volume /etc/localtime:/etc/localtime:ro \
+--volume /opt/configmaps/settings:/opt/configmaps/settings/ \
+--publish 9090:80 ${PREFIX}/engagementmgr:${RELEASE}
+
+docker cp /opt/configmaps/settings/uwsgi.ini vvp-engagementmgr:/srv/vvp/settings/
+
+echo "please wait while Engagement Manager is starting..."
+echo ""
+c=60 # seconds to wait
+REWRITE="\e[25D\e[1A\e[K"
+while [ $c -gt 0 ]; do
+ c=$((c-1))
+ sleep 1
+ echo -e "${REWRITE}$c"
+done
+echo -e ""
+
+#run migration again:
+docker exec -d vvp-engagementmgr sh -c "python3 /srv/manage.py migrate"
+
+#run initial populate db again:
+docker exec -d vvp-engagementmgr sh -c "python3 /srv/manage.py initial_populate_db"
+
+
+echo "Will copy the generated DB sqlite3 file into the application directory in 30 seconds..."
+sleep 30
+#copy the generated DB sqlite3 file into the application directory:
+docker exec -d vvp-engagementmgr sh -c "cp emdb.db /srv/emdb.db -f"
+
+TIME_OUT=600
+INTERVAL=5
+TIME=0
+while [ "$TIME" -lt "$TIME_OUT" ]; do
+ response=$(curl --write-out '%{http_code}' --silent --output /dev/null http://localhost:9090/vvp/v1/engmgr/vendors); echo $response
+
+ if [ "$response" == "200" ]; then
+ echo VVP-Engagement-Manager well started in $TIME seconds
+ break;
+ fi
+
+ echo Sleep: $INTERVAL seconds before testing if VVP-Engagement-Manager is up. Total wait time up now is: $TIME seconds. Timeout is: $TIME_OUT seconds
+ sleep $INTERVAL
+ TIME=$(($TIME+$INTERVAL))
+done
+
+if [ "$TIME" -ge "$TIME_OUT" ]; then
+ echo TIME OUT: Docker containers not started in $TIME_OUT seconds... Could cause problems for tests...
+else
+ echo "Done starting vvp containers!"
+fi
diff --git a/test/csit/scripts/vvp/start_vvp_sanity.sh b/test/csit/scripts/vvp/start_vvp_sanity.sh
new file mode 100644
index 000000000..1de1aaa34
--- /dev/null
+++ b/test/csit/scripts/vvp/start_vvp_sanity.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP CLAMP
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+
+
+# will run CI for sanity checks
diff --git a/test/csit/tests/clamp/APIs/01__TCA.robot b/test/csit/tests/clamp/APIs/01__TCA.robot
index c0b268bf1..c96d9bcab 100644
--- a/test/csit/tests/clamp/APIs/01__TCA.robot
+++ b/test/csit/tests/clamp/APIs/01__TCA.robot
@@ -44,19 +44,10 @@ Put Requests to add Close Loop TCA Model1
${resp}= Put Request clamp /restservices/clds/v1/clds/model/TCAModel1 data=${data} headers=${headers}
Should Be Equal As Strings ${resp.status_code} 200
-Put Requests to add Close Loop TCA Model2
- ${auth}= Create List admin 5f4dcc3b5aa765d61d8327deb882cf99
- Create Session clamp http://localhost:8080 auth=${auth}
- ${data}= Get Binary File ${CURDIR}${/}data${/}createTCAModel2.json
- &{headers}= Create Dictionary Content-Type=application/json
- ${resp}= Put Request clamp /restservices/clds/v1/clds/model/TCAModel2 data=${data} headers=${headers}
- Should Be Equal As Strings ${resp.status_code} 200
-
-Get Requests verify TCA Model1 and Model2 found
+Get Requests verify TCA Model1 found
${auth}= Create List admin 5f4dcc3b5aa765d61d8327deb882cf99
Create Session clamp http://localhost:8080 auth=${auth}
${resp}= Get Request clamp /restservices/clds/v1/clds/model-names
Should Be Equal As Strings ${resp.status_code} 200
Should Contain Match ${resp} *TCAModel1*
- Should Contain Match ${resp} *TCAModel2*
Should Not Contain Match ${resp} *TCAModel99*
diff --git a/test/csit/tests/dcaegen2/testcases/assets/json_events/dcae_healthcheck.json b/test/csit/tests/dcaegen2/testcases/assets/json_events/dcae_healthcheck.json
index 1c8f2e7ce..d612b0821 100644
--- a/test/csit/tests/dcaegen2/testcases/assets/json_events/dcae_healthcheck.json
+++ b/test/csit/tests/dcaegen2/testcases/assets/json_events/dcae_healthcheck.json
@@ -1,5 +1,5 @@
-{
- "path": "/reports/dcae/service-instances",
- "start": "-24hour",
- "end": "now"
+{
+ "path": "/reports/dcae/service-instances",
+ "start": "-24hour",
+ "end": "now"
} \ No newline at end of file
diff --git a/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_vfirewall_measurement.json b/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_vfirewall_measurement.json
index 5dbedd3c5..5820fc8b3 100644
--- a/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_vfirewall_measurement.json
+++ b/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_vfirewall_measurement.json
@@ -1,36 +1,36 @@
-{
- "event": {
- "commonEventHeader": {
- "reportingEntityName": "VM name will be provided by ECOMP",
- "startEpochMicrosec": 1506008587564787,
- "lastEpochMicrosec": 1506008587564787,
- "eventName": "Measurement_VFirewall_VNicStat",
- "eventId": "0b2b5790-3673-480a-a4bd-5a00b88e5af6",
- "sourceName": "Dummy VM name - No Metadata available",
- "sequence": 18123,
- "priority": "Normal",
- "functionalRole": "vFirewall",
- "domain": "measurementsForVfScaling",
- "reportingEntityId": "VM UUID will be provided by ECOMP",
- "sourceId": "Dummy VM UUID - No Metadata available",
- "version": 1.1
- },
- "measurementsForVfScalingFields": {
- "measurementInterval": 10,
- "measurementsForVfScalingVersion": 1.1,
- "vNicUsageArray": [{
- "multicastPacketsIn": 0,
- "bytesIn": 3896,
- "unicastPacketsIn": 0,
- "multicastPacketsOut": 0,
- "broadcastPacketsOut": 0,
- "packetsOut": 28,
- "bytesOut": 12178,
- "broadcastPacketsIn": 0,
- "packetsIn": 58,
- "unicastPacketsOut": 0,
- "vNicIdentifier": "eth0"
- }]
- }
- }
+{
+ "event": {
+ "commonEventHeader": {
+ "reportingEntityName": "VM name will be provided by ECOMP",
+ "startEpochMicrosec": 1506008587564787,
+ "lastEpochMicrosec": 1506008587564787,
+ "eventName": "Measurement_VFirewall_VNicStat",
+ "eventId": "0b2b5790-3673-480a-a4bd-5a00b88e5af6",
+ "sourceName": "Dummy VM name - No Metadata available",
+ "sequence": 18123,
+ "priority": "Normal",
+ "functionalRole": "vFirewall",
+ "domain": "measurementsForVfScaling",
+ "reportingEntityId": "VM UUID will be provided by ECOMP",
+ "sourceId": "Dummy VM UUID - No Metadata available",
+ "version": 1.1
+ },
+ "measurementsForVfScalingFields": {
+ "measurementInterval": 10,
+ "measurementsForVfScalingVersion": 1.1,
+ "vNicUsageArray": [{
+ "multicastPacketsIn": 0,
+ "bytesIn": 3896,
+ "unicastPacketsIn": 0,
+ "multicastPacketsOut": 0,
+ "broadcastPacketsOut": 0,
+ "packetsOut": 28,
+ "bytesOut": 12178,
+ "broadcastPacketsIn": 0,
+ "packetsIn": 58,
+ "unicastPacketsOut": 0,
+ "vNicIdentifier": "eth0"
+ }]
+ }
+ }
} \ No newline at end of file
diff --git a/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_fault_eventlist_batch.json b/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_fault_eventlist_batch.json
index 9a711507a..2d931a96a 100644
--- a/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_fault_eventlist_batch.json
+++ b/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_fault_eventlist_batch.json
@@ -1,62 +1,62 @@
-{
- "eventList": [
- {
- "commonEventHeader": {
- "version": 3.0,
- "domain": "fault",
- "eventName": "Fault_MobileCallRecording_PilotNumberPoolExhaustion",
- "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546016",
- "sequence": 0,
- "priority": "High",
- "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
- "reportingEntityName": "EricssonOamVf",
- "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
- "sourceName": "scfx0001vm002cap001",
- "nfNamingCode": "scfx",
- "nfcNamingCode": "ssc",
- "startEpochMicrosec": 1413378172000000,
- "lastEpochMicrosec": 1413378172000000
- },
- "faultFields": {
- "faultFieldsVersion": 2.0,
- "alarmCondition": "PilotNumberPoolExhaustion",
- "eventSourceType": "other",
- "specificProblem": "Calls cannot complete - pilot numbers are unavailable",
- "eventSeverity": "CRITICAL",
- "vfStatus": "Active",
- "alarmAdditionalInformation": [
- {
- "name": "PilotNumberPoolSize",
- "value": "1000"
- }
- ]
- }
- },
- {
- "commonEventHeader": {
- "version": 3.0,
- "domain": "fault",
- "eventName": "Fault_MobileCallRecording_RecordingServerUnreachable",
- "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546025",
- "sequence": 0,
- "priority": "High",
- "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
- "reportingEntityName": "EricssonOamVf",
- "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
- "sourceName": "scfx0001vm002cap001",
- "nfNamingCode": "scfx",
- "nfcNamingCode": "ssc",
- "startEpochMicrosec": 1413378172000010,
- "lastEpochMicrosec": 1413378172000010
- },
- "faultFields": {
- "faultFieldsVersion": 2.0,
- "alarmCondition": "RecordingServerUnreachable",
- "eventSourceType": "other",
- "specificProblem": "Recording server unreachable",
- "eventSeverity": "CRITICAL",
- "vfStatus": "Active"
- }
- }
- ]
-}
+{
+ "eventList": [
+ {
+ "commonEventHeader": {
+ "version": 3.0,
+ "domain": "fault",
+ "eventName": "Fault_MobileCallRecording_PilotNumberPoolExhaustion",
+ "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546016",
+ "sequence": 0,
+ "priority": "High",
+ "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
+ "reportingEntityName": "EricssonOamVf",
+ "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
+ "sourceName": "scfx0001vm002cap001",
+ "nfNamingCode": "scfx",
+ "nfcNamingCode": "ssc",
+ "startEpochMicrosec": 1413378172000000,
+ "lastEpochMicrosec": 1413378172000000
+ },
+ "faultFields": {
+ "faultFieldsVersion": 2.0,
+ "alarmCondition": "PilotNumberPoolExhaustion",
+ "eventSourceType": "other",
+ "specificProblem": "Calls cannot complete - pilot numbers are unavailable",
+ "eventSeverity": "CRITICAL",
+ "vfStatus": "Active",
+ "alarmAdditionalInformation": [
+ {
+ "name": "PilotNumberPoolSize",
+ "value": "1000"
+ }
+ ]
+ }
+ },
+ {
+ "commonEventHeader": {
+ "version": 3.0,
+ "domain": "fault",
+ "eventName": "Fault_MobileCallRecording_RecordingServerUnreachable",
+ "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546025",
+ "sequence": 0,
+ "priority": "High",
+ "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
+ "reportingEntityName": "EricssonOamVf",
+ "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
+ "sourceName": "scfx0001vm002cap001",
+ "nfNamingCode": "scfx",
+ "nfcNamingCode": "ssc",
+ "startEpochMicrosec": 1413378172000010,
+ "lastEpochMicrosec": 1413378172000010
+ },
+ "faultFields": {
+ "faultFieldsVersion": 2.0,
+ "alarmCondition": "RecordingServerUnreachable",
+ "eventSourceType": "other",
+ "specificProblem": "Recording server unreachable",
+ "eventSeverity": "CRITICAL",
+ "vfStatus": "Active"
+ }
+ }
+ ]
+}
diff --git a/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_single_fault_event.json b/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_single_fault_event.json
index a45c51dd9..2ccb3f649 100644
--- a/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_single_fault_event.json
+++ b/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_single_fault_event.json
@@ -1,34 +1,34 @@
-{
- "event": {
- "commonEventHeader": {
- "version": 3.0,
- "domain": "fault",
- "eventName": "Fault_MobileCallRecording_PilotNumberPoolExhaustion",
- "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546015",
- "sequence": 0,
- "priority": "High",
- "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
- "reportingEntityName": "EricssonOamVf",
- "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
- "sourceName": "scfx0001vm002cap001",
- "nfNamingCode": "scfx",
- "nfcNamingCode": "ssc",
- "startEpochMicrosec": 1413378172000000,
- "lastEpochMicrosec": 1413378172000000
- },
- "faultFields": {
- "faultFieldsVersion": 2.0,
- "alarmCondition": "PilotNumberPoolExhaustion",
- "eventSourceType": "other",
- "specificProblem": "Calls cannot complete - pilot numbers are unavailable",
- "eventSeverity": "CRITICAL",
- "vfStatus": "Active",
- "alarmAdditionalInformation": [
- {
- "name": "PilotNumberPoolSize",
- "value": "1000"
- }
- ]
- }
- }
-}
+{
+ "event": {
+ "commonEventHeader": {
+ "version": 3.0,
+ "domain": "fault",
+ "eventName": "Fault_MobileCallRecording_PilotNumberPoolExhaustion",
+ "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546015",
+ "sequence": 0,
+ "priority": "High",
+ "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
+ "reportingEntityName": "EricssonOamVf",
+ "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
+ "sourceName": "scfx0001vm002cap001",
+ "nfNamingCode": "scfx",
+ "nfcNamingCode": "ssc",
+ "startEpochMicrosec": 1413378172000000,
+ "lastEpochMicrosec": 1413378172000000
+ },
+ "faultFields": {
+ "faultFieldsVersion": 2.0,
+ "alarmCondition": "PilotNumberPoolExhaustion",
+ "eventSourceType": "other",
+ "specificProblem": "Calls cannot complete - pilot numbers are unavailable",
+ "eventSeverity": "CRITICAL",
+ "vfStatus": "Active",
+ "alarmAdditionalInformation": [
+ {
+ "name": "PilotNumberPoolSize",
+ "value": "1000"
+ }
+ ]
+ }
+ }
+}
diff --git a/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_single_fault_event_bad.json b/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_single_fault_event_bad.json
index fd831cee2..b36c27df0 100644
--- a/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_single_fault_event_bad.json
+++ b/test/csit/tests/dcaegen2/testcases/assets/json_events/ves_volte_single_fault_event_bad.json
@@ -1,34 +1,34 @@
-{
- "event": {
- "commonEventHeader": {
- "version": 3.0
- "domain": "fault",
- "eventName": "Fault_MobileCallRecording_PilotNumberPoolExhaustion",
- "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546015",
- "sequence": 0,
- "priority": "High",
- "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
- "reportingEntityName": "EricssonOamVf",
- "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
- "sourceName": "scfx0001vm002cap001",
- "nfNamingCode": "scfx",
- "nfcNamingCode": "ssc",
- "startEpochMicrosec": 1413378172000000,
- "lastEpochMicrosec": 1413378172000000
- },
- "faultFields": {
- "faultFieldsVersion": 2.0,
- "alarmCondition": "PilotNumberPoolExhaustion",
- "eventSourceType": "other",
- "specificProblem": "Calls cannot complete - pilot numbers are unavailable",
- "eventSeverity": "CRITICAL",
- "vfStatus": "Active",
- "alarmAdditionalInformation": [
- {
- "name": "PilotNumberPoolSize",
- "value": "1000"
- }
- ]
- }
- }
+{
+ "event": {
+ "commonEventHeader": {
+ "version": 3.0
+ "domain": "fault",
+ "eventName": "Fault_MobileCallRecording_PilotNumberPoolExhaustion",
+ "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546015",
+ "sequence": 0,
+ "priority": "High",
+ "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
+ "reportingEntityName": "EricssonOamVf",
+ "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
+ "sourceName": "scfx0001vm002cap001",
+ "nfNamingCode": "scfx",
+ "nfcNamingCode": "ssc",
+ "startEpochMicrosec": 1413378172000000,
+ "lastEpochMicrosec": 1413378172000000
+ },
+ "faultFields": {
+ "faultFieldsVersion": 2.0,
+ "alarmCondition": "PilotNumberPoolExhaustion",
+ "eventSourceType": "other",
+ "specificProblem": "Calls cannot complete - pilot numbers are unavailable",
+ "eventSeverity": "CRITICAL",
+ "vfStatus": "Active",
+ "alarmAdditionalInformation": [
+ {
+ "name": "PilotNumberPoolSize",
+ "value": "1000"
+ }
+ ]
+ }
+ }
} \ No newline at end of file
diff --git a/test/csit/tests/holmes/testcase/EngineMgt/Engine-Mgt.robot b/test/csit/tests/holmes/testcase/EngineMgt/Engine-Mgt.robot
index b84dff285..0c06cbb97 100644
--- a/test/csit/tests/holmes/testcase/EngineMgt/Engine-Mgt.robot
+++ b/test/csit/tests/holmes/testcase/EngineMgt/Engine-Mgt.robot
@@ -28,7 +28,7 @@ deploy_invalid_rule
deploy_valid_rule
[Documentation] Add a rule with valid contents to the engine.
- ${dic4} create dictionary content=package rule03080002 engineid="" loopcontrolname=test
+ ${dic4} create dictionary content=package rule03080002;\n\nimport java.util.Locale; engineid="" loopcontrolname=test
${Jsonparam} encode ${dic4}
${response} deployEngineRule ${Jsonparam}
diff --git a/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot b/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot
index bccb0f72d..ac25cbfad 100644
--- a/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot
+++ b/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot
@@ -8,7 +8,7 @@ Resource Rule-Keywords.robot
*** Test Cases ***
add_valid_rule
[Documentation] Add a valid rule.
- ${dict2} create dictionary rulename=youbowu0314 description=create a new rule! content=package rule03140002 enabled=1 loopcontrolname=closedControlLoop
+ ${dict2} create dictionary rulename=youbowu0314 description=create a new rule! content=package rule03140002;\n\nimport java.util.Locale; enabled=1 loopcontrolname=closedControlLoop
${jsonParams} encode ${dict2}
${response} createRule ${jsonParams}
${respJson} to json ${response.content}
diff --git a/test/csit/tests/policy/suite1/configpolicy_vCPE_R1.template b/test/csit/tests/policy/suite1/configpolicy_vCPE_R1.template
index 6dcf3c600..77653c3f6 100644
--- a/test/csit/tests/policy/suite1/configpolicy_vCPE_R1.template
+++ b/test/csit/tests/policy/suite1/configpolicy_vCPE_R1.template
@@ -1,6 +1,7 @@
{
- "configBody": "{ \"service\": \"policy_tosca_tca\", \"location\": \"SampleServiceLocation\", \"uuid\": \"test\", \"policyName\": \"MicroServicevCPE\", \"description\": \"MicroService vCPE Policy\", \"configName\": \"SampleConfigName\", \"templateVersion\": \"OpenSource.version.1\", \"version\": \"1.0.0\", \"priority\": \"1\", \"policyScope\": \"resource=SampleResource,service=SampleService,type=SampleType,closedLoopControlName=ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\", \"riskType\": \"SampleRiskType\", \"riskLevel\": \"1\", \"guard\": \"False\", \"content\": { \"policyVersion\": \"v0.0.1\", \"threshholds\": [{ \"severity\": \"MAJOR\", \"fieldPath\": \"$$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\", \"thresholdValue\": \"0\", \"closedLoopEventStatus\": \"ABATED\", \"closedLoopControlName\": \"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\", \"version\": \"1.0.2\", \"direction\": \"EQUAL\" }, { \"severity\": \"CRITICAL\", \"fieldPath\": \"$$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\", \"thresholdValue\": \"0\", \"closedLoopEventStatus\": \"ONSET\", \"closedLoopControlName\": \"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\", \"version\": \"1.0.2\", \"direction\": \"GREATER\" }], \"policyName\": \"DCAE.Config_tca-hi-lo\", \"controlLoopSchemaType\": \"VNF\", \"policyScope\": \"DCAE\", \"eventName\": \"Measurement_vGMUX\" } }",
+ "configBody": "{ \"service\": \"tca_policy\", \"location\": \"SampleServiceLocation\", \"uuid\": \"test\", \"policyName\": \"MicroServicevCPE\", \"description\": \"MicroService vCPE Policy\", \"configName\": \"SampleConfigName\", \"templateVersion\": \"OpenSource.version.1\", \"version\": \"1.1.0\", \"priority\": \"1\", \"policyScope\": \"resource=SampleResource,service=SampleService,type=SampleType,closedLoopControlName=ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\", \"riskType\": \"SampleRiskType\", \"riskLevel\": \"1\", \"guard\": \"False\", \"content\": { \"policyVersion\": \"v0.0.1\", \"threshholds\": [{ \"severity\": \"MAJOR\", \"fieldPath\": \"$$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\", \"thresholdValue\": \"0\", \"closedLoopEventStatus\": \"ABATED\", \"closedLoopControlName\": \"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\", \"version\": \"1.0.2\", \"direction\": \"EQUAL\" }, { \"severity\": \"CRITICAL\", \"fieldPath\": \"$$.event.measurementsForVfScalingFields.additionalMeasurements[*].arrayOfFields[0].value\", \"thresholdValue\": \"0\", \"closedLoopEventStatus\": \"ONSET\", \"closedLoopControlName\": \"ControlLoop-vCPE-48f0c2c3-a172-4192-9ae3-052274181b6e\", \"version\": \"1.0.2\", \"direction\": \"GREATER\" }], \"policyName\": \"DCAE.Config_tca-hi-lo\", \"controlLoopSchemaType\": \"VNF\", \"policyScope\": \"DCAE\", \"eventName\": \"Measurement_vGMUX\" } }",
"policyConfigType": "MicroService",
"policyName": "${policy_name}",
- "onapName": "DCAE"
-} \ No newline at end of file
+ "onapName": "DCAE",
+ "priority": "5"
+}
diff --git a/test/csit/tests/policy/suite1/configpolicy_vDNS_R1.template b/test/csit/tests/policy/suite1/configpolicy_vDNS_R1.template
index 0a9ecc6ff..185a7c72c 100644
--- a/test/csit/tests/policy/suite1/configpolicy_vDNS_R1.template
+++ b/test/csit/tests/policy/suite1/configpolicy_vDNS_R1.template
@@ -1,6 +1,7 @@
{
- "configBody": "{ \"service\": \"policy_tosca_tca\", \"location\": \"SampleServiceLocation\", \"uuid\": \"test\", \"policyName\": \"MicroServicevDNS\", \"description\": \"MicroService vDNS Policy\", \"configName\": \"SampleConfigName\", \"templateVersion\": \"OpenSource.version.1\", \"version\": \"1.0.0\", \"priority\": \"1\", \"policyScope\": \"resource=SampleResource,service=SampleService,type=SampleType,closedLoopControlName=ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\", \"riskType\": \"SampleRiskType\", \"riskLevel\": \"1\", \"guard\": \"False\", \"content\": { \"policyVersion\": \"v0.0.1\", \"threshholds\": [{ \"severity\": \"CRITICAL\", \"fieldPath\": \"$$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\", \"thresholdValue\": \"300\", \"closedLoopEventStatus\": \"ONSET\", \"closedLoopControlName\": \"ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\", \"version\": \"1.0.2\", \"direction\": \"GREATER_OR_EQUAL\" }], \"policyName\": \"DCAE.Config_tca-hi-lo\", \"controlLoopSchemaType\": \"VM\", \"policyScope\": \"DCAE\", \"eventName\": \"vLoadBalancer\" } }",
+ "configBody": "{ \"service\": \"tca_policy\", \"location\": \"SampleServiceLocation\", \"uuid\": \"test\", \"policyName\": \"MicroServicevDNS\", \"description\": \"MicroService vDNS Policy\", \"configName\": \"SampleConfigName\", \"templateVersion\": \"OpenSource.version.1\", \"version\": \"1.1.0\", \"priority\": \"1\", \"policyScope\": \"resource=SampleResource,service=SampleService,type=SampleType,closedLoopControlName=ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\", \"riskType\": \"SampleRiskType\", \"riskLevel\": \"1\", \"guard\": \"False\", \"content\": { \"policyVersion\": \"v0.0.1\", \"threshholds\": [{ \"severity\": \"CRITICAL\", \"fieldPath\": \"$$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\", \"thresholdValue\": \"300\", \"closedLoopEventStatus\": \"ONSET\", \"closedLoopControlName\": \"ControlLoop-vDNS-6f37f56d-a87d-4b85-b6a9-cc953cf779b3\", \"version\": \"1.0.2\", \"direction\": \"GREATER_OR_EQUAL\" }], \"policyName\": \"DCAE.Config_tca-hi-lo\", \"controlLoopSchemaType\": \"VM\", \"policyScope\": \"DCAE\", \"eventName\": \"vLoadBalancer\" } }",
"policyConfigType": "MicroService",
"policyName": "${policy_name}",
- "onapName": "DCAE"
-} \ No newline at end of file
+ "onapName": "DCAE",
+ "priority": "5"
+}
diff --git a/test/csit/tests/policy/suite1/configpolicy_vFW_R1.template b/test/csit/tests/policy/suite1/configpolicy_vFW_R1.template
index 4c3617d5e..067ed07ff 100644
--- a/test/csit/tests/policy/suite1/configpolicy_vFW_R1.template
+++ b/test/csit/tests/policy/suite1/configpolicy_vFW_R1.template
@@ -1,6 +1,7 @@
{
- "configBody": "{\"service\": \"policy_tosca_tca\",\"location\": \"SampleServiceLocation\",\"uuid\": \"test\",\"policyName\": \"MicroServicevFirewall\",\"description\": \"MicroService vFirewall Policy\",\"configName\": \"SampleConfigName\",\"templateVersion\": \"OpenSource.version.1\",\"version\": \"1.0.0\",\"priority\": \"1\",\"policyScope\": \"resource=SampleResource,service=SampleService,type=SampleType,closedLoopControlName=ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"riskType\": \"SampleRiskType\",\"riskLevel\": \"1\",\"guard\": \"False\",\"content\": {\"policyVersion\": \"v0.0.1\",\"threshholds\": [{\"severity\": \"MAJOR\",\"fieldPath\": \"$$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\": \"300\",\"closedLoopEventStatus\": \"ONSET\",\"closedLoopControlName\": \"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\": \"1.0.2\",\"direction\": \"LESS_OR_EQUAL\"}, {\"severity\": \"CRITICAL\",\"fieldPath\": \"$$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\": \"700\",\"closedLoopEventStatus\": \"ONSET\",\"closedLoopControlName\": \"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\": \"1.0.2\",\"direction\": \"GREATER_OR_EQUAL\"}],\"policyName\": \"DCAE.Config_tca-hi-lo\",\"controlLoopSchemaType\": \"VNF\",\"policyScope\": \"DCAE\",\"eventName\": \"vFirewallBroadcastPackets\"}}",
+ "configBody": "{\"service\": \"tca_policy\",\"location\": \"SampleServiceLocation\",\"uuid\": \"test\",\"policyName\": \"MicroServicevFirewall\",\"description\": \"MicroService vFirewall Policy\",\"configName\": \"SampleConfigName\",\"templateVersion\": \"OpenSource.version.1\",\"version\": \"1.1.0\",\"priority\": \"1\",\"policyScope\": \"resource=SampleResource,service=SampleService,type=SampleType,closedLoopControlName=ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"riskType\": \"SampleRiskType\",\"riskLevel\": \"1\",\"guard\": \"False\",\"content\": {\"policyVersion\": \"v0.0.1\",\"threshholds\": [{\"severity\": \"MAJOR\",\"fieldPath\": \"$$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\": \"300\",\"closedLoopEventStatus\": \"ONSET\",\"closedLoopControlName\": \"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\": \"1.0.2\",\"direction\": \"LESS_OR_EQUAL\"}, {\"severity\": \"CRITICAL\",\"fieldPath\": \"$$.event.measurementsForVfScalingFields.vNicUsageArray[*].receivedTotalPacketsDelta\",\"thresholdValue\": \"700\",\"closedLoopEventStatus\": \"ONSET\",\"closedLoopControlName\": \"ControlLoop-vFirewall-d0a1dfc6-94f5-4fd4-a5b5-4630b438850a\",\"version\": \"1.0.2\",\"direction\": \"GREATER_OR_EQUAL\"}],\"policyName\": \"DCAE.Config_tca-hi-lo\",\"controlLoopSchemaType\": \"VNF\",\"policyScope\": \"DCAE\",\"eventName\": \"vFirewallBroadcastPackets\"}}",
"policyConfigType": "MicroService",
"policyName": "${policy_name}",
- "onapName": "DCAE"
-} \ No newline at end of file
+ "onapName": "DCAE",
+ "priority": "5"
+}
diff --git a/test/csit/tests/portal/testsuites/news_widget.zip b/test/csit/tests/portal/testsuites/news_widget.zip
new file mode 100644
index 000000000..17d4ce92d
--- /dev/null
+++ b/test/csit/tests/portal/testsuites/news_widget.zip
Binary files differ
diff --git a/test/csit/tests/portal/testsuites/test1.robot b/test/csit/tests/portal/testsuites/test1.robot
index 70fdcf0d6..bbbe5172f 100644
--- a/test/csit/tests/portal/testsuites/test1.robot
+++ b/test/csit/tests/portal/testsuites/test1.robot
@@ -610,7 +610,7 @@ Portal admin Microservice Onboarding
Portal Admin Create Widget for All users
[Documentation] Naviage to Create Widget menu tab
- ${WidgetAttachment}= Catenate ${PORTAL_ASSETS_DIRECTORY}//widget_news.zip
+ ${WidgetAttachment}= Catenate ${PORTAL_ASSETS_DIRECTORY}//news_widget.zip
Click Link xpath=//a[@title='Widget Onboarding']
Click Button xpath=//button[@ng-click='toggleSidebar()']
Click Button xpath=//button[@id='widget-onboarding-button-add']
@@ -650,7 +650,7 @@ Portal Admin Delete Widget for All users
Portal Admin Create Widget for Application Roles
[Documentation] Naviage to Create Widget menu tab
- ${WidgetAttachment}= Catenate ${PORTAL_ASSETS_DIRECTORY}//widget_news.zip
+ ${WidgetAttachment}= Catenate ${PORTAL_ASSETS_DIRECTORY}//news_widget.zip
Click Link xpath=//a[@title='Widget Onboarding']
Click Button xpath=//button[@ng-click='toggleSidebar()']
Click Button xpath=//button[@id='widget-onboarding-button-add']
diff --git a/test/csit/tests/portal/testsuites/widget_news.zip b/test/csit/tests/portal/testsuites/widget_news.zip
index 532cf6105..5c3217be3 100644
--- a/test/csit/tests/portal/testsuites/widget_news.zip
+++ b/test/csit/tests/portal/testsuites/widget_news.zip
Binary files differ
diff --git a/test/csit/tests/so/sanity-check/data/createE2eservice.json b/test/csit/tests/so/sanity-check/data/createE2eservice.json
index b24cc9c28..b8bac167e 100644
--- a/test/csit/tests/so/sanity-check/data/createE2eservice.json
+++ b/test/csit/tests/so/sanity-check/data/createE2eservice.json
@@ -1,30 +1,100 @@
{
- "service": {
- "name": "service",
- "description": "so_test1",
- "serviceDefId": "182834434345",
- "templateId": "5994888392",
- "parameters": {
- "domainHost": "127.0.0.1",
- "nodeTemplateName": "model:v3",
- "nodeType": "service",
- "globalSubscriberId": "49923893499",
- "subscriberName": "NEED THIS UUI - AAI",
- "requestParameters": {
- "subscriptionServiceType": "MOG",
- "userParams": [
- {
- "name": "someUserParam",
- "value": "someValue"
- },
- {
- "name": "segments",
- "value": "[\n{\n\"domainHost\":\"localhost\",\n\"nodeTemplateName\":\"IMS_NS\",\n\"nodeType\":\"tosca.nodes.nfv.NS.IMS\",\n\"segments\":[\n\n],\n\"nsParameters\":{\n\"locationConstraints\":[\n{\n\"vnfProfileId\":\"zte-CSCF-1.0\",\n\"locationConstraints\":{\n\"vimId\":\"4050083f-465f-4838-af1e-47a545222ad0\"\n}\n}\n],\n\"additionalParamForNs\":{\n\"externalDataNetworkName\":\"Flow_out_net\",\n\"m6000_mng_ip\":\"181.18.20.2\",\n\"externalCompanyFtpDataNetworkName\":\"Flow_out_net\",\n\"externalPluginManageNetworkName\":\"plugin_net_2014\",\n\"externalManageNetworkName\":\"mng_net_2017\",\n\"sfc_data_network\":\"sfc_data_net_2016\",\n\"NatIpRange\":\"210.1.1.10-210.1.1.20\",\n\"location\":\"4050083f-465f-4838-af1e-47a545222ad0\",\n\"sdncontroller\":\"9b9f02c0-298b-458a-bc9c-be3692e4f35e\"\n}\n}\n},\n{\n\"domainHost\":\"localhost\",\n\"nodeTemplateName\":\"EPC_NS\",\n\"nodeType\":\"tosca.nodes.nfv.NS.IMS\",\n\"segments\":[\n\n],\n\"nsParameters\":{\n\"locationConstraints\":[\n{\n\"vnfProfileId\":\"zte-CSCF-1.0\",\n\"locationConstraints\":{\n\"vimId\":\"4050083f-465f-4838-af1e-47a545222ad0\"\n}\n}\n],\n\"additionalParamForNs\":{\n\"externalDataNetworkName\":\"Flow_out_net\",\n\"m6000_mng_ip\":\"181.18.20.2\",\n\"externalCompanyFtpDataNetworkName\":\"Flow_out_net\",\n\"externalPluginManageNetworkName\":\"plugin_net_2014\",\n\"externalManageNetworkName\":\"mng_net_2017\",\n\"sfc_data_network\":\"sfc_data_net_2016\",\n\"NatIpRange\":\"210.1.1.10-210.1.1.20\",\n\"location\":\"4050083f-465f-4838-af1e-47a545222ad0\",\n\"sdncontroller\":\"9b9f02c0-298b-458a-bc9c-be3692e4f35e\"\n}\n}\n}\n]"
- },
- {
- "name": "nsParameters",
- "value": "{\n \"locationConstraints\": {},\n \"additionalParamForNs\": {\n \"E2EServcie.param1\": \"value1\",\n \"E2EServcie.param2\": \"value2\"\n }\n }"
- }
- ]
- }}}
+ "service":{
+"name":"so_test5",
+"description":"so_test2",
+ "serviceDefId":"60c3e96e-0970-4871-b6e0-3b6de7561519",
+"templateId":"592f9437-a9c0-4303-b9f6-c445bb7e9814",
+ "parameters":{
+ "globalSubscriberId":"123457",
+ "subscriberName":"Customer1",
+ "serviceType":"voLTE",
+"templateName":"voLTE Service:1.0",
+ "resources":[
+ {
+"resourceName":"vIMS",
+ "resourceDefId":"60c3e96e-0970-4871-b6e0-3b6de7561516",
+ "resourceId":"60c3e96e-0970-4871-b6e0-3b6de7561512",
+"nsParameters":{
+ "locationConstraints":[
+ {
+"vnfProfileId":"zte-vBAS-1.0",
+ "locationConstraints":{
+ "vimId":"4050083f-465f-4838-af1e-47a545222ad0"
+ }
+},
+ {
+ "vnfProfileId":"zte-vMME-1.0",
+ "locationConstraints":{
+ "vimId":"4050083f-465f-4838-af1e-47a545222ad0"
+ }
+ }
+ ],
+ "additionalParamForNs":{
+
+ }
+}
+ },
+ {
+ "resourceName":"vEPC",
+ "resourceDefId":"61c3e96e-0970-4871-b6e0-3b6de7561516",
+ "resourceId":"62c3e96e-0970-4871-b6e0-3b6de7561512",
+ "nsParameters":{
+"locationConstraints":[
+ {
+"vnfProfileId":"zte-CSCF-1.0",
+"locationConstraints":{
+ "vimId":"4050083f-465f-4838-af1e-47a545222ad1"
+}
+ }
+ ],
+"additionalParamForNs":{
+
+ }
+ }
+ },
+ {
+ "resourceName":"underlayvpn",
+"resourceDefId":"60c3e96e-0970-4871-b6e0-3b6de7561513",
+ "resourceId":"60c3e96e-0970-4871-b6e0-3b6de7561514",
+"nsParameters":{
+"locationConstraints":[
+
+ ],
+"additionalParamForNs":{
+ "externalDataNetworkName":"Flow_out_net",
+"m6000_mng_ip":"181.18.20.2",
+ "externalCompanyFtpDataNetworkName":"Flow_out_net",
+ "externalPluginManageNetworkName":"plugin_net_2014",
+ "externalManageNetworkName":"mng_net_2017",
+ "sfc_data_network":"sfc_data_net_2016",
+"NatIpRange":"210.1.1.10-210.1.1.20",
+"location":"4050083f-465f-4838-af1e-47a545222ad0",
+ "sdncontroller":"9b9f02c0-298b-458a-bc9c-be3692e4f35e"
+ }
+ }
+ },
+ {
+ "resourceName":"overlayvpn",
+ "resourceDefId":"60c3e96e-0970-4871-b6e0-3b6de7561517",
+ "resourceId":"60c3e96e-0970-4871-b6e0-3b6de7561518",
+"nsParameters":{
+ "locationConstraints":[
+
+ ],
+ "additionalParamForNs":{
+"externalDataNetworkName":"Flow_out_net",
+ "m6000_mng_ip":"181.18.20.2",
+ "externalCompanyFtpDataNetworkName":"Flow_out_net",
+ "externalPluginManageNetworkName":"plugin_net_2014",
+ "externalManageNetworkName":"mng_net_2017",
+ "sfc_data_network":"sfc_data_net_2016",
+"NatIpRange":"210.1.1.10-210.1.1.20",
+"location":"4050083f-465f-4838-af1e-47a545222ad0",
+ "sdncontroller":"9b9f02c0-298b-458a-bc9c-be3692e4f35e"
+}
+ }
+}
+ ]
+}
+}
} \ No newline at end of file
diff --git a/test/csit/tests/so/sanity-check/data/deleteE2eservice.json b/test/csit/tests/so/sanity-check/data/deleteE2eservice.json
index f5a87b1d3..d423dc34f 100644
--- a/test/csit/tests/so/sanity-check/data/deleteE2eservice.json
+++ b/test/csit/tests/so/sanity-check/data/deleteE2eservice.json
@@ -1,30 +1,4 @@
{
- "service": {
- "name": "instanceName",
- "description": "so_test1",
- "serviceDefId": "modelInvariantId value from SDC?",
- "templateId": "modelVersionId value from SDC??",
- "parameters": {
- "domainHost": "localhost",
- "nodeTemplateName": "modelName+:+modelVersion",
- "nodeType": "modelType?? == service",
- "globalSubscriberId": "NEED THIS UUI - AAI",
- "subscriberName": "NEED THIS UUI - AAI",
- "requestParameters": {
- "subscriptionServiceType": "MOG",
- "userParams": [
- {
- "name": "someUserParam",
- "value": "someValue"
- },
- {
- "name": "segments",
- "value": "[\n{\n\"domainHost\":\"localhost\",\n\"nodeTemplateName\":\"IMS_NS\",\n\"nodeType\":\"tosca.nodes.nfv.NS.IMS\",\n\"segments\":[\n\n],\n\"nsParameters\":{\n\"locationConstraints\":[\n{\n\"vnfProfileId\":\"zte-CSCF-1.0\",\n\"locationConstraints\":{\n\"vimId\":\"4050083f-465f-4838-af1e-47a545222ad0\"\n}\n}\n],\n\"additionalParamForNs\":{\n\"externalDataNetworkName\":\"Flow_out_net\",\n\"m6000_mng_ip\":\"181.18.20.2\",\n\"externalCompanyFtpDataNetworkName\":\"Flow_out_net\",\n\"externalPluginManageNetworkName\":\"plugin_net_2014\",\n\"externalManageNetworkName\":\"mng_net_2017\",\n\"sfc_data_network\":\"sfc_data_net_2016\",\n\"NatIpRange\":\"210.1.1.10-210.1.1.20\",\n\"location\":\"4050083f-465f-4838-af1e-47a545222ad0\",\n\"sdncontroller\":\"9b9f02c0-298b-458a-bc9c-be3692e4f35e\"\n}\n}\n},\n{\n\"domainHost\":\"localhost\",\n\"nodeTemplateName\":\"EPC_NS\",\n\"nodeType\":\"tosca.nodes.nfv.NS.IMS\",\n\"segments\":[\n\n],\n\"nsParameters\":{\n\"locationConstraints\":[\n{\n\"vnfProfileId\":\"zte-CSCF-1.0\",\n\"locationConstraints\":{\n\"vimId\":\"4050083f-465f-4838-af1e-47a545222ad0\"\n}\n}\n],\n\"additionalParamForNs\":{\n\"externalDataNetworkName\":\"Flow_out_net\",\n\"m6000_mng_ip\":\"181.18.20.2\",\n\"externalCompanyFtpDataNetworkName\":\"Flow_out_net\",\n\"externalPluginManageNetworkName\":\"plugin_net_2014\",\n\"externalManageNetworkName\":\"mng_net_2017\",\n\"sfc_data_network\":\"sfc_data_net_2016\",\n\"NatIpRange\":\"210.1.1.10-210.1.1.20\",\n\"location\":\"4050083f-465f-4838-af1e-47a545222ad0\",\n\"sdncontroller\":\"9b9f02c0-298b-458a-bc9c-be3692e4f35e\"\n}\n}\n}\n]"
- },
- {
- "name": "nsParameters",
- "value": "{\n \"locationConstraints\": {},\n \"additionalParamForNs\": {\n \"E2EServcie.param1\": \"value1\",\n \"E2EServcie.param2\": \"value2\"\n }\n }"
- }
- ]
- }}}
+ "globalSubscriberId":"388499302",
+ "serviceType" : "VoLTE"
} \ No newline at end of file
diff --git a/test/csit/tests/so/sanity-check/sanity_test_so.robot b/test/csit/tests/so/sanity-check/sanity_test_so.robot
index 065414569..7d6ddf31c 100644
--- a/test/csit/tests/so/sanity-check/sanity_test_so.robot
+++ b/test/csit/tests/so/sanity-check/sanity_test_so.robot
@@ -207,4 +207,4 @@ Delete E2EService with invalid input data
${data}= Get Binary File ${CURDIR}${/}data${/}deleteE2eserviceInvalid.json
&{headers}= Create Dictionary Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA== Content-Type=application/json Accept=application/json
${resp}= Delete Request refrepo /ecomp/mso/infra/e2eServiceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000 data=${data} headers=${headers}
- Run Keyword If '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405' log to console \nexecuted with expected result
+ Run Keyword If '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405' log to console \nexecuted with expected result \ No newline at end of file
diff --git a/test/csit/tests/vfc/nfvo-driver-svnfm/jsoninput/hwvnfm_createtoken.json b/test/csit/tests/vfc/nfvo-driver-svnfm/jsoninput/hwvnfm_createtoken.json
index e9a6c3e92..d21664b09 100644
--- a/test/csit/tests/vfc/nfvo-driver-svnfm/jsoninput/hwvnfm_createtoken.json
+++ b/test/csit/tests/vfc/nfvo-driver-svnfm/jsoninput/hwvnfm_createtoken.json
@@ -1,13 +1,13 @@
-{
- "auth": {
- "identity": {
- "methods": ["password"],
- "password": {
- "user": {
- "name": "admin",
- "password": "User@12345"
- }
- }
- }
- }
+{
+ "auth": {
+ "identity": {
+ "methods": ["password"],
+ "password": {
+ "user": {
+ "name": "admin",
+ "password": "User@12345"
+ }
+ }
+ }
+ }
} \ No newline at end of file
diff --git a/test/csit/tests/vfc/nfvo-driver-svnfm/ztevmanager.robot b/test/csit/tests/vfc/nfvo-driver-svnfm/ztevmanager.robot
index 7d7d305b1..44a4f9d90 100644
--- a/test/csit/tests/vfc/nfvo-driver-svnfm/ztevmanager.robot
+++ b/test/csit/tests/vfc/nfvo-driver-svnfm/ztevmanager.robot
@@ -6,13 +6,13 @@ Library json
*** Variables ***
@{return_ok_list}= 200 201 202
-${queryswagger_url} /api/ztevmanagerdriver/v1/swagger.json
+${queryswagger_url} /api/ztevnfmdriver/v1/swagger.json
*** Test Cases ***
VnfresSwaggerTest
- [Documentation] query ztevmanagerdriver swagger info rest test
+ [Documentation] query ztevnfmdriver swagger info rest test
${headers} Create Dictionary Content-Type=application/json Accept=application/json
- Create Session web_session http://${ZTEVMANAGERDRIVER_IP}:8410 headers=${headers}
+ Create Session web_session http://${ZTEVNFMDRIVER_IP}:8410 headers=${headers}
${resp}= Get Request web_session ${queryswagger_url}
${responese_code}= Convert To String ${resp.status_code}
List Should Contain Value ${return_ok_list} ${responese_code}
diff --git a/test/csit/tests/vfc/nfvo-wfengine/workflow.robot b/test/csit/tests/vfc/nfvo-wfengine/workflow.robot
index 82cdaed7d..07bfe6979 100644
--- a/test/csit/tests/vfc/nfvo-wfengine/workflow.robot
+++ b/test/csit/tests/vfc/nfvo-wfengine/workflow.robot
@@ -1,15 +1,113 @@
-*** Settings ***
-Library Collections
-Library requests
-
-*** Test Cases ***
-Deploy BPMN File Test
- [Documentation] Check if the test bpmn file can be deployed in activiti engine
- Should Be Equal 200 200
-UnDeploy BPMN File Test
- [Documentation] Check if the test bpmn file can be undeployed in activiti engine
- Should Be Equal 404 404
-
-Exectue BPMN File Test
- [Documentation] Check if the test bpmn file can be exectued in activiti engine
- Should Be Equal 200 200 \ No newline at end of file
+*** Settings ***
+Resource ../../common.robot
+Library Collections
+Library json
+Library OperatingSystem
+Library RequestsLibrary
+Library HttpLibrary.HTTP
+
+*** Variables ***
+${MSB_IP} 127.0.0.1
+${MSB_PORT} 10550
+${ACTIVITI_IP} 127.0.0.1
+${ACTIVITI_PORT} 8804
+${MGRSERVICE_IP} 127.0.0.1
+${MGRSERVICE_PORT} 8805
+${processId} demo
+${deployid} 0
+${bmpfilepath} ${SCRIPTS}/nfvo-wfengine/demo.bpmn20.xml
+
+*** Test Cases ***
+Deploy BPMN File Test On Activiti
+ [Documentation] Check if the test bpmn file can be deployed in activiti engine
+ ${auth}= Create List kermit kermit
+ ${headers}= Create Dictionary Accept=application/json
+ Create Session web_session http://${ACTIVITI_IP}:${ACTIVITI_PORT} headers=${headers} auth=${auth}
+ ${files}= evaluate {"file":open('${bmpfilepath}','rb')}
+ ${resp}= Post Request web_session /activiti-rest/service/repository/deployments files=${files}
+ Should Be Equal ${resp.status_code} ${201}
+ Log ${resp.json()}
+ ${deployedId}= Set Variable ${resp.json()["id"]}
+ Set Global Variable ${deployedId}
+
+Exectue BPMN File Testt On Activiti
+ [Documentation] Check if the test bpmn file can be exectued in activiti engine
+ ${headers} Create Dictionary Content-Type=application/json Accept=application/json Authorization=Basic a2VybWl0Omtlcm1pdA==
+ Create Session web_session http://${ACTIVITI_IP}:${ACTIVITI_PORT} headers=${headers}
+ ${body} Create Dictionary processDefinitionKey=${processId}
+ ${body} dumps ${body}
+ ${resp}= Post Request web_session /activiti-rest/service/runtime/process-instances ${body}
+ Should Be Equal ${resp.status_code} ${201}
+
+UnDeploy BPMN File Testt On Activiti
+ [Documentation] Check if the test bpmn file can be undeployed in activiti engine
+ log ${deployedId}
+ ${auth}= Create List kermit kermit
+ ${headers} Create Dictionary Content-Type=application/json Accept=application/json
+ Create Session web_session http://${ACTIVITI_IP}:${ACTIVITI_PORT} headers=${headers} auth=${auth}
+ ${resp}= Delete Request web_session /activiti-rest/service/repository/deployments/${deployedId}?cascade=true
+ Should Be Equal ${resp.status_code} ${204}
+
+Deploy BPMN File Test On MgrService
+ [Documentation] Check if the test bpmn file can be deployed in Management Service
+ ${auth}= Create List kermit kermit
+ ${headers}= Create Dictionary Accept=application/json
+ Create Session web_session http://${MGRSERVICE_IP}:${MGRSERVICE_PORT} headers=${headers} auth=${auth}
+ ${files}= evaluate {"file":open('${bmpfilepath}','rb')}
+ ${resp}= Post Request web_session api/workflow/v1/package files=${files}
+ Should Be Equal ${resp.status_code} ${200}
+ Log ${resp.json()}
+ ${deployedId}= Set Variable ${resp.json()["deployedId"]}
+ Set Global Variable ${deployedId}
+
+Exectue BPMN File Testt On MgrService
+ [Documentation] Check if the test bpmn file can be exectued in Management Service
+ ${headers} Create Dictionary Content-Type=application/json Accept=application/json Authorization=Basic a2VybWl0Omtlcm1pdA==
+ Create Session web_session http://${MGRSERVICE_IP}:${MGRSERVICE_PORT} headers=${headers}
+ ${body} Create Dictionary processDefinitionKey=${processId}
+ ${body} dumps ${body}
+ ${resp}= Post Request web_session api/workflow/v1/process/instance ${body}
+ Should Be Equal ${resp.status_code} ${200}
+ Log ${resp.json()}
+ Should Be Equal ${resp.json()["processDefinitionKey"]} ${processId}
+
+UnDeploy BPMN File Testt On MgrService
+ [Documentation] Check if the test bpmn file can be undeployed in Management Service
+ log ${deployedId}
+ ${auth}= Create List kermit kermit
+ ${headers} Create Dictionary Content-Type=application/json Accept=application/json
+ Create Session web_session http://${MGRSERVICE_IP}:${MGRSERVICE_PORT} headers=${headers} auth=${auth}
+ ${resp}= Delete Request web_session /api/workflow/v1/package/${deployedId}
+ Should Be Equal ${resp.status_code} ${200}
+
+Deploy BPMN File Test On MSB
+ [Documentation] Check if the test bpmn file can be deployed in activiti engine
+ ${auth}= Create List kermit kermit
+ ${headers}= Create Dictionary Accept=application/json
+ Create Session web_session http://${MSB_IP}:${MSB_PORT} headers=${headers} auth=${auth}
+ ${files}= evaluate {"file":open('${bmpfilepath}','rb')}
+ ${resp}= Post Request web_session api/workflow/v1/package files=${files}
+ Should Be Equal ${resp.status_code} ${200}
+ Log ${resp.json()}
+ ${deployedId}= Set Variable ${resp.json()["deployedId"]}
+ Set Global Variable ${deployedId}
+
+Exectue BPMN File Testt On MSB
+ [Documentation] Check if the test bpmn file can be exectued in MSB
+ ${headers} Create Dictionary Content-Type=application/json Accept=application/json Authorization=Basic a2VybWl0Omtlcm1pdA==
+ Create Session web_session http://${MSB_IP}:${MSB_PORT} headers=${headers}
+ ${body} Create Dictionary processDefinitionKey=${processId}
+ ${body} dumps ${body}
+ ${resp}= Post Request web_session api/workflow/v1/process/instance ${body}
+ Should Be Equal ${resp.status_code} ${200}
+ Log ${resp.json()}
+ Should Be Equal ${resp.json()["processDefinitionKey"]} ${processId}
+
+UnDeploy BPMN File Testt On MSB
+ [Documentation] Check if the test bpmn file can be undeployed in MSB
+ log ${deployedId}
+ ${auth}= Create List kermit kermit
+ ${headers} Create Dictionary Content-Type=application/json Accept=application/json
+ Create Session web_session http://${MSB_IP}:${MSB_PORT} headers=${headers} auth=${auth}
+ ${resp}= Delete Request web_session /api/workflow/v1/package/${deployedId}
+ Should Be Equal ${resp.status_code} ${200}
diff --git a/test/csit/tests/vid/login/test1.robot b/test/csit/tests/vid/login/test1.robot
index 12b23b725..acb6aae8d 100644
--- a/test/csit/tests/vid/login/test1.robot
+++ b/test/csit/tests/vid/login/test1.robot
@@ -1,6 +1,6 @@
*** Settings ***
Documentation Logins to VID
-Library ExtendedSelenium2Library
+Library Selenium2Library
Library Collections
Library String
Library RequestsLibrary
diff --git a/test/csit/tests/vnfsdk-pkgtools/tosca-metadata/csar/test_entry.yaml b/test/csit/tests/vnfsdk-pkgtools/tosca-metadata/csar/test_entry.yaml
index d1aed3c18..5c2f8909f 100644
--- a/test/csit/tests/vnfsdk-pkgtools/tosca-metadata/csar/test_entry.yaml
+++ b/test/csit/tests/vnfsdk-pkgtools/tosca-metadata/csar/test_entry.yaml
@@ -1,23 +1,23 @@
-#
-# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-tosca_definitions_version: tosca_simple_yaml_1_0
-
-topology_template:
- node_templates:
- test_node:
- type: tosca.nodes.Compute
-
+#
+# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+topology_template:
+ node_templates:
+ test_node:
+ type: tosca.nodes.Compute
+
diff --git a/test/csit/tests/vvp/sanity/__init__.robot b/test/csit/tests/vvp/sanity/__init__.robot
new file mode 100644
index 000000000..6bc0362e8
--- /dev/null
+++ b/test/csit/tests/vvp/sanity/__init__.robot
@@ -0,0 +1,2 @@
+*** Settings ***
+Documentation VVP - HealthCheck
diff --git a/test/csit/tests/vvp/sanity/test1.robot b/test/csit/tests/vvp/sanity/test1.robot
new file mode 100644
index 000000000..27612fdb8
--- /dev/null
+++ b/test/csit/tests/vvp/sanity/test1.robot
@@ -0,0 +1,19 @@
+*** Settings ***
+Library Collections
+Library OperatingSystem
+Library RequestsLibrary
+Library json
+
+# http://localhost:9090/vvp/v1/engmgr/vendors
+# vvp-engagementmgr
+
+*** Test Cases ***
+Get Requests health check ok
+ [Tags] get
+ CreateSession vvp-engagementmgr http://localhost:9090
+ ${headers}= Create Dictionary Accept=application/json Content-Type=application/json
+ ${resp}= Get Request vvp-engagementmgr /vvp/v1/engmgr/vendors headers=&{headers}
+ Should Be Equal As Strings ${resp.status_code} 200
+ @{ITEMS}= Copy List ${resp.json()}
+ : FOR ${ELEMENT} IN @{ITEMS}
+ \ Log ${ELEMENT['uuid']} ${ELEMENT['name']}
diff --git a/test/ete/labs/gwu/apt-proxy.sh b/test/ete/labs/gwu/apt-proxy.sh
new file mode 100755
index 000000000..d69415bd8
--- /dev/null
+++ b/test/ete/labs/gwu/apt-proxy.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+sed -i '/#!\/bin\/bash/a\
+ # sleep up to 3 minutes to avoid disk contention\
+ sleep $((RANDOM / 200))\
+ mkdir -p /etc/docker\
+ cat > /etc/docker/daemon.json <<EOF\
+ {\
+ "insecure-registries" : ["192.168.1.51:5000"]\
+ }\
+ EOF\
+ cat > /etc/apt/apt.conf.d/30proxy<<EOF\
+ Acquire::http { Proxy "http://192.168.1.51:3142"; };\
+ Acquire::https::Proxy "DIRECT";\
+ EOF\
+ apt-get -y update' $1
+
+# don't use insecure docker proxy in dcae
+perl -i -0pe 's/(?<=dcae_c_vm:)(.*?)\{ get_param: nexus_docker_repo \}/$1"nexus3.onap.org:10001"/s' $1
diff --git a/test/ete/labs/gwu/onap-oom.env b/test/ete/labs/gwu/onap-oom.env
new file mode 100644
index 000000000..6315267ef
--- /dev/null
+++ b/test/ete/labs/gwu/onap-oom.env
@@ -0,0 +1,26 @@
+parameters:
+
+ ##############################################
+ # #
+ # Parameters used across all ONAP components #
+ # #
+ ##############################################
+
+ public_net_id: 024582bd-ef9b-48b9-9e70-e6732559d9df
+ public_net_name: provider
+ docker_proxy: 192.168.1.51:5000
+ apt_proxy: 192.168.1.51:3142
+ ubuntu_1404_image: trusty
+ ubuntu_1604_image: xenial
+ centos_7_image: centos7
+ openstack_tenant_id: 4c93f99551604bf7af25a8f80c7f34cb
+ openstack_tenant_name: onap
+ openstack_username: demo
+ openstack_api_key: demo
+ keystone_url: http://192.168.1.11:5000
+ rancher_vm_flavor: m1.small
+ k8s_vm_flavor: m2.xxlarge
+ dns_list: ["192.168.1.11", "192.168.1.3"]
+ dns_forwarder: 192.168.1.11
+ oam_network_cidr: 172.16.0.0/16
+ dcae_ip_addr: 172.16.0.4
diff --git a/test/ete/labs/gwu/onap-openrc b/test/ete/labs/gwu/onap-openrc
new file mode 100644
index 000000000..1a452fa0a
--- /dev/null
+++ b/test/ete/labs/gwu/onap-openrc
@@ -0,0 +1,9 @@
+export OS_PROJECT_ID=4c93f99551604bf7af25a8f80c7f34cb
+export OS_PROJECT_DOMAIN_NAME=Default
+export OS_USER_DOMAIN_NAME=Default
+export OS_PROJECT_NAME=onap
+export OS_USERNAME=demo
+export OS_PASSWORD=demo
+export OS_AUTH_URL=http://controller:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_IMAGE_API_VERSION=2
diff --git a/test/ete/labs/gwu/onap-openstack-template.env b/test/ete/labs/gwu/onap-openstack-template.env
new file mode 100644
index 000000000..97adca265
--- /dev/null
+++ b/test/ete/labs/gwu/onap-openstack-template.env
@@ -0,0 +1,142 @@
+parameters:
+
+ ##############################################
+ # #
+ # Parameters used across all ONAP components #
+ # #
+ ##############################################
+
+ public_net_id: 024582bd-ef9b-48b9-9e70-e6732559d9df
+
+ public_net_name: provider
+
+ ubuntu_1404_image: trusty
+
+ ubuntu_1604_image: xenial
+
+ flavor_small: m1.small
+
+ flavor_medium: m1.medium
+
+ flavor_large: m1.large
+
+ flavor_xlarge: m1.xlarge
+
+ flavor_xxlarge: m1.xlarge # purposely use a smaller VM here to reduce RAM usage
+
+ vm_base_name: onap
+
+ key_name: onap_key
+
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh
+
+ nexus_repo: https://nexus.onap.org/content/sites/raw
+
+ nexus_docker_repo: 192.168.1.51:5000
+
+ nexus_username: anonymous
+
+ nexus_password: anonymous
+
+ dmaap_topic: AUTO
+
+ artifacts_version: 1.1.1
+
+ openstack_tenant_id: ${OS_PROJECT_ID}
+
+ openstack_tenant_name: ${OS_PROJECT_NAME}
+
+ openstack_username: ${OS_USERNAME}
+
+ openstack_api_key: ${OS_PASSWORD}
+
+ openstack_auth_method: password
+
+ openstack_region: RegionOne
+
+ horizon_url: http://192.168.1.11/horizon
+
+ keystone_url: http://192.168.1.11:5000
+
+ cloud_env: RegionOne
+
+
+ ######################
+ # #
+ # Network parameters #
+ # #
+ ######################
+
+ dns_list: ["192.168.1.11", "192.168.1.3"]
+ external_dns: 192.168.1.3
+ dns_forwarder: 192.168.1.11
+ oam_network_cidr: 172.16.0.0/16
+
+ ### Private IP addresses ###
+
+ aai1_ip_addr: 172.16.1.1
+ aai2_ip_addr: 172.16.1.2
+ appc_ip_addr: 172.16.2.1
+ dcae_ip_addr: 172.16.4.1
+ dns_ip_addr: 172.16.100.1
+ so_ip_addr: 172.16.5.1
+ mr_ip_addr: 172.16.11.1
+ policy_ip_addr: 172.16.6.1
+ portal_ip_addr: 172.16.9.1
+ robot_ip_addr: 172.16.10.1
+ sdc_ip_addr: 172.16.3.1
+ sdnc_ip_addr: 172.16.7.1
+ vid_ip_addr: 172.16.8.1
+ clamp_ip_addr: 172.16.12.1
+ openo_ip_addr: 172.16.14.1
+
+ ###########################
+ # #
+ # Parameters used by DCAE #
+ # #
+ ###########################
+
+ dnsaas_config_enabled: false
+ dnsaas_region: RegionOne
+ dnsaas_keystone_url: http://192.168.1.11:5000/v3
+ dnsaas_tenant_name: ${OS_PROJECT_NAME}
+ dnsaas_username: ${OS_USERNAME}
+ dnsaas_password: ${OS_PASSWORD}
+ dcae_keystone_url: http://192.168.1.11:5000/v2.0
+ dcae_centos_7_image: centos7
+ dcae_domain: dcaeg2.onap.org
+ dcae_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh
+ dcae_private_key: '-----BEGIN RSA PRIVATE KEY-----\n
+MIIEpQIBAAKCAQEAylw4KKN/ljqnFBvP+blG5PNfsnM4MAuGPMsE5rkKmzcZWNaE\n
+NGMXTFKlJ4YrUl7OUv8kbgFTmB8BoNpgrNtKACDaz/psQSOeOADCG/YrT4wrYKrR\n
+NhFqOjJpxRmxweEsd14qBOxeFT7Ie42qbCMMzo260HvjLmtUxkOXeJ3xDkGmoJVy\n
+yzxX7nO1m4WyWyukO6x6mX0XDsADF4A6AapcqinoisJ7pnXaNkcjU/JY2Jrwem7s\n
++ypzIp86O6gdLpLVU9ORR/UYNAk1h+Z6K5Rual4D9mrpC9IJNaYfIgLe7mC39ZLa\n
+fiySNoGhei9P6pYvRJlQki69bid/EPAgX5YZIQIDAQABAoIBAQClDekkhI9ZqseC\n
+qFjPuKaxsizZMg+faJb6WSHLSxzyk1OSWY6F6FklgLeC8HW/fuLNYZyGOYDEsG20\n
+lMqL02Wdiy7OutS3oOS5iyzIf9a90HfFJi706el6RIpvINETcaXCS0T8tQrcS1Rd\n
+KqTaBRC6HXJGAPbBcvw3pwQSdskatU6a/Kt2a3x6DsqqinQcgEB/SbrDaJCUX9sb\n
+F2HVUwdq7aZK1Lk0ozr1FID9mrhjwWuQ6XC+vjG0FqtyXeMpR5iaQ73hex3FXQ8z\n
+OjkFbMwuHWSh1DSx70r5yFrrBqwQKnMsBqx4QDRf3fIENUnWviaL+n+gwcXA07af\n
+4kaNUFUtAoGBAPuNNRAGhZnyZ9zguns9PM56nmeMUikV5dPN2DTbQb79cpfV+7pC\n
+6PeSH/dTKFLz62d6qAM2EsNXQvewf8fipBVBRPsRqKOv+uepd01dHNy62I5B+zRm\n
+be9Kbe+EN60qdzvyPM+2hV6CnvGv1dirimS9pu6RrxD2Rmz1ectnJE+rAoGBAM3w\n
+UbSEemyZ6EKjck2RfdipzY0MNBnIZ2cUqHh8mmPXjdTLzpXb9vmPbHb01Qwo8MP+\n
+gMnTbTBOzyNAaHdIrCO9FHW6C85j3ot5Yzcr+EcBVcua+7KHU0Sgn44JNH8DisJ7\n
+Y63UP/1Xb4d1/QvHfxYy3WOvvRdVZ7pPo8JNX95jAoGAIe5CIg8/JizUZa7KeKUh\n
+9pgDleQPkQsrHQ6/AyIwFBsLwf9THSS5V+uV9D57SfUs46Bf2U8J6N90YQSlt8iS\n
+aWuManFPVgT+yxDIzt6obf2mCEpOIBtQ6N4ZRh2HhQwdWTCrkzkDdGQaHG+jYL6C\n
+xGPwiG2ON7OAfGIAM7eN5lECgYEAhoRLWlaOgRGnHKAWsYQvZ67CjTdDcPPuVu6v\n
+fMQnNMA/7JeTwV+E205L0wfpgZ/cZKmBBlQMJlnUA3q2wfO+PTnse1mjDJU/cGtB\n
+22/lJLxChlQdxGeQhGtGzUhF+hEeOhrO6WSSx7CtMRZoy6Dr6lwfMFZCdVNcBd6v\n
+YOOZk3ECgYEAseUKGb6E80XTVVNziyuiVbQCsI0ZJuRfqMZ2IIDQJU9u6AnGAway\n
+itqHbkGsmDT+4HUz01+1JKnnw42RdSrHdU/LaOonD+RIGqe2x800QXzqASKLdCXr\n
+y7RoiFqJtkdFQykzJemA+xOXvHLgKi/MXFsU90PCD0VJKLj8vwpX78Y=\n
+-----END RSA PRIVATE KEY-----'
+
+ ################################
+ # #
+ # Docker versions and branches #
+ # Rest of the file will be autogenerated from demo repo
+ # #
+ ################################
diff --git a/test/ete/labs/huawei/apt-proxy.sh b/test/ete/labs/huawei/apt-proxy.sh
new file mode 100755
index 000000000..37018ca6b
--- /dev/null
+++ b/test/ete/labs/huawei/apt-proxy.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+sed -i '/#!\/bin\/bash/a\
+ mkdir -p /etc/docker\
+ cat > /etc/docker/daemon.json <<EOF\
+ {\
+ "insecure-registries" : ["10.145.122.118:5000"]\
+ }\
+ EOF\
+ cat > /etc/apt/apt.conf.d/30proxy<<EOF\
+ Acquire::http { Proxy "http://10.145.122.118:3142"; };\
+ Acquire::https::Proxy "DIRECT";\
+ EOF\
+ apt-get -y update' $1
+
+# don't use insecure docker proxy in dcae
+perl -i -0pe 's/(?<=dcae_c_vm:)(.*?)\{ get_param: nexus_docker_repo \}/$1"nexus3.onap.org:10001"/s' $1
diff --git a/test/ete/labs/huawei/k8s_vm_entrypoint.sh b/test/ete/labs/huawei/k8s_vm_entrypoint.sh
new file mode 100644
index 000000000..cae0f392b
--- /dev/null
+++ b/test/ete/labs/huawei/k8s_vm_entrypoint.sh
@@ -0,0 +1,196 @@
+#!/bin/bash -x
+printenv
+
+mkdir -p /opt/config
+echo "__rancher_ip_addr__" > /opt/config/rancher_ip_addr.txt
+echo `hostname -I` `hostname` >> /etc/hosts
+mkdir -p /etc/docker
+cat > /etc/docker/daemon.json <<EOF
+{
+ "insecure-registries" : ["__docker_proxy__"]
+}
+EOF
+cat > /etc/apt/apt.conf.d/30proxy<<EOF
+Acquire::http { Proxy "http://__apt_proxy__"; };
+Acquire::https::Proxy "DIRECT";
+EOF
+apt-get -y update
+apt-get -y install jq
+
+cd ~
+
+# install docker 1.12
+curl -s https://releases.rancher.com/install-docker/1.12.sh | sh
+usermod -aG docker ubuntu
+
+# install kubernetes 1.8.6
+curl -s -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.6/bin/linux/amd64/kubectl
+chmod +x ./kubectl
+sudo mv ./kubectl /usr/local/bin/kubectl
+mkdir ~/.kube
+
+# install helm 2.3
+wget -q http://storage.googleapis.com/kubernetes-helm/helm-v2.3.0-linux-amd64.tar.gz
+tar -zxvf helm-v2.3.0-linux-amd64.tar.gz
+sudo mv linux-amd64/helm /usr/local/bin/helm
+
+# Fix virtual memory allocation for onap-log:elasticsearch:
+echo "vm.max_map_count=262144" >> /etc/sysctl.conf
+sysctl -p
+
+# install rancher agent
+echo export RANCHER_IP=__rancher_ip_addr__ > api-keys-rc
+source api-keys-rc
+
+sleep 50
+until curl -s -o projects.json -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projects; do
+ sleep 10
+done
+OLD_PID=$(jq -r '.data[0].id' projects.json)
+
+curl -s -H "Accept: application/json" -H "Content-Type: application/json" -d '{"accountId":"1a1"}' http://$RANCHER_IP:8080/v2-beta/apikeys | tee apikeys.json
+echo export RANCHER_ACCESS_KEY=`jq -r '.publicValue' apikeys.json` >> api-keys-rc
+echo export RANCHER_SECRET_KEY=`jq -r '.secretValue' apikeys.json` >> api-keys-rc
+source api-keys-rc
+
+curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X DELETE -H 'Content-Type: application/json' "http://$RANCHER_IP:8080/v2-beta/projects/$OLD_PID"
+
+until [ ! -z "$TEMPLATE_ID" ] && [ "$TEMPLATE_ID" != "null" ]; do
+ sleep 5
+ curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v2-beta/projectTemplates?name=Kubernetes | tee projectTemplatesKubernetes.json
+ TEMPLATE_ID=$(jq -r '.data[0].id' projectTemplatesKubernetes.json)
+done
+
+curl -s -u "${RANCHER_ACCESS_KEY}:${RANCHER_SECRET_KEY}" -X POST -H 'Content-Type: application/json' -d '{ "name":"oom", "projectTemplateId":"'$TEMPLATE_ID'" }' "http://$RANCHER_IP:8080/v2-beta/projects" | tee project.json
+PID=`jq -r '.id' project.json`
+echo export RANCHER_URL=http://$RANCHER_IP:8080/v1/projects/$PID >> api-keys-rc
+source api-keys-rc
+
+until [ $(jq -r '.state' project.json) == "active" ]; do
+ sleep 5
+ curl -s -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID | tee project.json
+done
+
+TID=$(curl -s -X POST -H "Accept: application/json" -H "Content-Type: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationTokens | jq -r '.id')
+touch token.json
+while [ $(jq -r .command token.json | wc -c) -lt 10 ]; do
+ sleep 5
+ curl -s -X GET -H "Accept: application/json" http://$RANCHER_IP:8080/v1/projects/$PID/registrationToken/$TID | tee token.json
+done
+CMD=$(jq -r .command token.json)
+eval $CMD
+
+# download rancher CLI
+wget -q https://github.com/rancher/cli/releases/download/v0.6.7/rancher-linux-amd64-v0.6.7.tar.xz
+unxz rancher-linux-amd64-v0.6.7.tar.xz
+tar xvf rancher-linux-amd64-v0.6.7.tar
+
+# Clone OOM:
+cd ~
+git clone -b amsterdam http://gerrit.onap.org/r/oom
+
+# Update values.yaml to point to docker-proxy instead of nexus3:
+cd ~/oom/kubernetes
+perl -p -i -e 's/nexus3.onap.org:10001/__docker_proxy__/g' `find ./ -name values.yaml` oneclick/setenv.bash
+
+KUBETOKEN=$(echo -n 'Basic '$(echo -n "$RANCHER_ACCESS_KEY:$RANCHER_SECRET_KEY" | base64 -w 0) | base64 -w 0)
+
+# create .kube/config
+cat > ~/.kube/config <<EOF
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://$RANCHER_IP:8080/r/projects/$PID/kubernetes:6443"
+ name: "oom"
+contexts:
+- context:
+ cluster: "oom"
+ user: "oom"
+ name: "oom"
+current-context: "oom"
+users:
+- name: "oom"
+ user:
+ token: "$KUBETOKEN"
+EOF
+
+export KUBECONFIG=/root/.kube/config
+kubectl config view
+
+# Update ~/oom/kubernetes/kube2msb/values.yaml kubeMasterAuthToken to use the token from ~/.kube/config
+sed -i "s/kubeMasterAuthToken:.*/kubeMasterAuthToken: $KUBETOKEN/" ~/oom/kubernetes/kube2msb/values.yaml
+
+# Put your onap_key ssh private key in ~/.ssh/onap_key
+
+# Create or edit ~/oom/kubernetes/config/onap-parameters.yaml
+cp ~/oom/kubernetes/config/onap-parameters-sample.yaml ~/oom/kubernetes/config/onap-parameters.yaml
+cat >> ~/oom/kubernetes/config/onap-parameters.yaml <<EOF
+
+####################################
+# Overridden by k8s_vm_entrypoint.sh
+####################################
+
+OPENSTACK_UBUNTU_14_IMAGE: "__ubuntu_1404_image__"
+OPENSTACK_UBUNTU_16_IMAGE: "__ubuntu_1604_image__"
+OPENSTACK_CENTOS_7_IMAGE: "__centos_7_image__"
+OPENSTACK_PUBLIC_NET_ID: "__public_net_id__"
+OPENSTACK_PUBLIC_NET_NAME: "__public_net_name__"
+OPENSTACK_OAM_NETWORK_CIDR: "__oam_network_cidr__"
+OPENSTACK_TENANT_NAME: "__openstack_tenant_name__"
+OPENSTACK_TENANT_ID: "__openstack_tenant_id__"
+OPENSTACK_USERNAME: "__openstack_username__"
+OPENSTACK_API_KEY: "__openstack_api_key__"
+OPENSTACK_KEYSTONE_URL: "__keystone_url__"
+DCAE_IP_ADDR: "__dcae_ip_addr__"
+DCAE_KEYSTONE_URL: "__keystone_url__/v2.0"
+DNS_LIST: "__dns_list__"
+DNS_FORWARDER: "__dns_forwarder__"
+EXTERNAL_DNS: "8.8.8.8"
+DNSAAS_REGION: "RegionOne"
+DNSAAS_KEYSTONE_URL: "__keystone_url__/v2.0"
+DNSAAS_TENANT_NAME: "__openstack_tenant_name__"
+DNSAAS_USERNAME: "__openstack_username__"
+DNSAAS_PASSWORD: "__openstack_api_key__"
+
+EOF
+cat ~/oom/kubernetes/config/onap-parameters.yaml
+
+
+# wait for kubernetes to initialze
+sleep 100
+until [ $(kubectl get pods --namespace kube-system | tail -n +2 | grep -c Running) -ge 6 ]; do
+ sleep 10
+done
+
+# Source the environment file:
+cd ~/oom/kubernetes/oneclick/
+source setenv.bash
+
+# run the config pod creation
+cd ~/oom/kubernetes/config
+./createConfig.sh -n onap
+
+# Wait until the config container completes.
+sleep 200
+until [ $(kubectl get pods --namespace onap -a | tail -n +2 | grep -c Completed) -eq 1 ]; do
+ sleep 10
+done
+
+# version control the config to see what's happening
+cd /dockerdata-nfs/
+git init
+git config user.email "root@k8s"
+git config user.name "root"
+git add -A
+git commit -m "initial commit"
+
+# Run ONAP:
+cd ~/oom/kubernetes/oneclick/
+./createAll.bash -n onap
+
+# Check ONAP status:
+sleep 3
+kubectl get pods --all-namespaces
diff --git a/test/ete/labs/huawei/onap-ci-amsterdam-heat-openrc b/test/ete/labs/huawei/onap-ci-amsterdam-heat-openrc
new file mode 100644
index 000000000..5acb86759
--- /dev/null
+++ b/test/ete/labs/huawei/onap-ci-amsterdam-heat-openrc
@@ -0,0 +1,9 @@
+export OS_PROJECT_DOMAIN_NAME=Default
+export OS_USER_DOMAIN_NAME=Default
+export OS_PROJECT_ID=3d228d2fcbb7447bbba3cde703431bc1
+export OS_PROJECT_NAME=onap-ci-amsterdam-heat
+export OS_USERNAME=demo
+export OS_PASSWORD=demo
+export OS_AUTH_URL=http://controller.neo.futurewei.com:5000/v3
+export OS_IDENTITY_API_VERSION=3
+export OS_IMAGE_API_VERSION=2
diff --git a/test/ete/labs/huawei/onap-oom.env b/test/ete/labs/huawei/onap-oom.env
new file mode 100644
index 000000000..423480fef
--- /dev/null
+++ b/test/ete/labs/huawei/onap-oom.env
@@ -0,0 +1,21 @@
+parameters:
+
+ ##############################################
+ # #
+ # Parameters used across all ONAP components #
+ # #
+ ##############################################
+
+ public_net_id: 3a6247f1-fac6-4167-a49f-33cc8415ccf4
+ docker_proxy: 10.145.122.118:5000
+ apt_proxy: 10.145.122.118:3142
+ dns_list: ["10.145.122.117", "10.145.122.118"]
+ oam_network_cidr: 172.16.0.0/16
+ ubuntu_1404_image: trusty
+ openstack_tenant_id: 3d228d2fcbb7447bbba3cde703431bc1
+ openstack_tenant_name: onap-ci-amsterdam-heat
+ openstack_username: demo
+ openstack_api_key: demo
+ keystone_url: http://10.145.122.117:5000
+ rancher_vm_flavor: m1.small
+ k8s_vm_flavor: m1.xxlarge
diff --git a/test/ete/labs/huawei/onap-oom.yaml b/test/ete/labs/huawei/onap-oom.yaml
new file mode 100644
index 000000000..5db534471
--- /dev/null
+++ b/test/ete/labs/huawei/onap-oom.yaml
@@ -0,0 +1,130 @@
+heat_template_version: 2015-10-15
+description: ONAP on Kubernetes using OOM
+
+parameters:
+ docker_proxy:
+ type: string
+
+ apt_proxy:
+ type: string
+
+ public_net_id:
+ type: string
+ description: The ID of the Public network for floating IP address allocation
+
+ public_net_name:
+ type: string
+ description: The name of the Public network referred by public_net_id
+
+ dns_list:
+ type: comma_delimited_list
+ description: List of External DNS for OAM ONAP network
+
+ oam_network_cidr:
+ type: string
+ description: CIDR of the OAM ONAP network
+
+ keystone_url:
+ type: string
+ description: URL of OpenStack Keystone
+
+ openstack_tenant_id:
+ type: string
+ description: OpenStack tenant ID
+
+ openstack_tenant_name:
+ type: string
+ description: OpenStack tenant name (matching with the openstack_tenant_id)
+
+ openstack_username:
+ type: string
+ description: OpenStack username
+
+ openstack_api_key:
+ type: string
+ description: OpenStack password or API Key
+
+ ubuntu_1404_image:
+ type: string
+ description: Name of the Ubuntu 14.04 image
+
+ ubuntu_1604_image:
+ type: string
+ description: Name of the Ubuntu 16.04 image
+
+ centos_7_image:
+ type: string
+ description: the id/name of the CentOS 7 VM imange
+
+ rancher_vm_flavor:
+ type: string
+ description: Name of the Ubuntu 14.04 image
+
+ k8s_vm_flavor:
+ type: string
+ description: Name of the Ubuntu 14.04 image
+
+ dcae_ip_addr:
+ type: string
+
+ dns_forwarder:
+ type: string
+ description: the forwarder address for setting up ONAP's private DNS server
+
+
+resources:
+ random-str:
+ type: OS::Heat::RandomString
+ properties:
+ length: 4
+
+ rancher_vm:
+ type: OS::Nova::Server
+ properties:
+ name: rancher
+ image: xenial
+ flavor: { get_param: rancher_vm_flavor }
+ key_name: onap_key
+ networks:
+ - network: { get_param: public_net_id }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ template:
+ get_file: rancher_vm_entrypoint.sh
+
+ k8s_vm:
+ type: OS::Nova::Server
+ properties:
+ name: k8s
+ image: xenial
+ flavor: { get_param: k8s_vm_flavor }
+ key_name: onap_key
+ networks:
+ - network: { get_param: public_net_id }
+ user_data_format: RAW
+ user_data:
+ str_replace:
+ params:
+ __docker_proxy__: { get_param: docker_proxy }
+ __apt_proxy__: { get_param: apt_proxy }
+ __rancher_ip_addr__: { get_attr: [rancher_vm, first_address] }
+ __openstack_tenant_id__: { get_param: openstack_tenant_id }
+ __openstack_tenant_name__: { get_param: openstack_tenant_name }
+ __openstack_username__: { get_param: openstack_username }
+ __openstack_api_key__: { get_param : openstack_api_key }
+ __public_net_id__: { get_param: public_net_id }
+ __public_net_name__: { get_param: public_net_name }
+ __oam_network_cidr__: { get_param: oam_network_cidr }
+ __ubuntu_1404_image__: { get_param: ubuntu_1404_image }
+ __ubuntu_1604_image__: { get_param: ubuntu_1604_image }
+ __centos_7_image__: { get_param: centos_7_image }
+ __keystone_url__: { get_param: keystone_url }
+ __dcae_ip_addr__: { get_param: dcae_ip_addr }
+ __dns_list__: { get_param: [dns_list, 0] }
+ __dns_forwarder__: { get_param: dns_forwarder }
+ template:
+ get_file: k8s_vm_entrypoint.sh
diff --git a/test/ete/labs/huawei/onap-openstack-template.env b/test/ete/labs/huawei/onap-openstack-template.env
new file mode 100644
index 000000000..bffbd2df2
--- /dev/null
+++ b/test/ete/labs/huawei/onap-openstack-template.env
@@ -0,0 +1,142 @@
+parameters:
+
+ ##############################################
+ # #
+ # Parameters used across all ONAP components #
+ # #
+ ##############################################
+
+ public_net_id: 3a6247f1-fac6-4167-a49f-33cc8415ccf4
+
+ public_net_name: provider
+
+ ubuntu_1404_image: trusty
+
+ ubuntu_1604_image: xenial
+
+ flavor_small: m1.small
+
+ flavor_medium: m1.medium
+
+ flavor_large: m1.large
+
+ flavor_xlarge: m1.xlarge
+
+ flavor_xxlarge: m1.2xlarge
+
+ vm_base_name: onap
+
+ key_name: onap_key
+
+ pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh
+
+ nexus_repo: https://nexus.onap.org/content/sites/raw
+
+ nexus_docker_repo: 10.145.122.118:5000
+
+ nexus_username: anonymous
+
+ nexus_password: anonymous
+
+ dmaap_topic: AUTO
+
+ artifacts_version: 1.1.1
+
+ openstack_tenant_id: ${OS_PROJECT_ID}
+
+ openstack_tenant_name: ${OS_PROJECT_NAME}
+
+ openstack_username: ${OS_USERNAME}
+
+ openstack_api_key: ${OS_PASSWORD}
+
+ openstack_auth_method: password
+
+ openstack_region: RegionOne
+
+ horizon_url: http://10.145.122.117/horizon
+
+ keystone_url: http://10.145.122.117:5000
+
+ cloud_env: RegionOne
+
+
+ ######################
+ # #
+ # Network parameters #
+ # #
+ ######################
+
+ dns_list: ["10.145.122.117", "10.145.122.118"]
+ external_dns: 10.145.122.118
+ dns_forwarder: 10.145.122.117
+ oam_network_cidr: 172.16.0.0/16
+
+ ### Private IP addresses ###
+
+ aai1_ip_addr: 172.16.1.1
+ aai2_ip_addr: 172.16.1.2
+ appc_ip_addr: 172.16.2.1
+ dcae_ip_addr: 172.16.4.1
+ dns_ip_addr: 172.16.100.1
+ so_ip_addr: 172.16.5.1
+ mr_ip_addr: 172.16.11.1
+ policy_ip_addr: 172.16.6.1
+ portal_ip_addr: 172.16.9.1
+ robot_ip_addr: 172.16.10.1
+ sdc_ip_addr: 172.16.3.1
+ sdnc_ip_addr: 172.16.7.1
+ vid_ip_addr: 172.16.8.1
+ clamp_ip_addr: 172.16.12.1
+ openo_ip_addr: 172.16.14.1
+
+ ###########################
+ # #
+ # Parameters used by DCAE #
+ # #
+ ###########################
+
+ dnsaas_config_enabled: false
+ dnsaas_region: RegionOne
+ dnsaas_keystone_url: http://10.145.122.117:5000/v3
+ dnsaas_tenant_name: ${OS_PROJECT_NAME}
+ dnsaas_username: ${OS_USERNAME}
+ dnsaas_password: ${OS_PASSWORD}
+ dcae_keystone_url: http://10.145.122.117:5000/v2.0
+ dcae_centos_7_image: centos7
+ dcae_domain: dcaeg2.onap.org
+ dcae_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh
+ dcae_private_key: '-----BEGIN RSA PRIVATE KEY-----\n
+MIIEpQIBAAKCAQEAylw4KKN/ljqnFBvP+blG5PNfsnM4MAuGPMsE5rkKmzcZWNaE\n
+NGMXTFKlJ4YrUl7OUv8kbgFTmB8BoNpgrNtKACDaz/psQSOeOADCG/YrT4wrYKrR\n
+NhFqOjJpxRmxweEsd14qBOxeFT7Ie42qbCMMzo260HvjLmtUxkOXeJ3xDkGmoJVy\n
+yzxX7nO1m4WyWyukO6x6mX0XDsADF4A6AapcqinoisJ7pnXaNkcjU/JY2Jrwem7s\n
++ypzIp86O6gdLpLVU9ORR/UYNAk1h+Z6K5Rual4D9mrpC9IJNaYfIgLe7mC39ZLa\n
+fiySNoGhei9P6pYvRJlQki69bid/EPAgX5YZIQIDAQABAoIBAQClDekkhI9ZqseC\n
+qFjPuKaxsizZMg+faJb6WSHLSxzyk1OSWY6F6FklgLeC8HW/fuLNYZyGOYDEsG20\n
+lMqL02Wdiy7OutS3oOS5iyzIf9a90HfFJi706el6RIpvINETcaXCS0T8tQrcS1Rd\n
+KqTaBRC6HXJGAPbBcvw3pwQSdskatU6a/Kt2a3x6DsqqinQcgEB/SbrDaJCUX9sb\n
+F2HVUwdq7aZK1Lk0ozr1FID9mrhjwWuQ6XC+vjG0FqtyXeMpR5iaQ73hex3FXQ8z\n
+OjkFbMwuHWSh1DSx70r5yFrrBqwQKnMsBqx4QDRf3fIENUnWviaL+n+gwcXA07af\n
+4kaNUFUtAoGBAPuNNRAGhZnyZ9zguns9PM56nmeMUikV5dPN2DTbQb79cpfV+7pC\n
+6PeSH/dTKFLz62d6qAM2EsNXQvewf8fipBVBRPsRqKOv+uepd01dHNy62I5B+zRm\n
+be9Kbe+EN60qdzvyPM+2hV6CnvGv1dirimS9pu6RrxD2Rmz1ectnJE+rAoGBAM3w\n
+UbSEemyZ6EKjck2RfdipzY0MNBnIZ2cUqHh8mmPXjdTLzpXb9vmPbHb01Qwo8MP+\n
+gMnTbTBOzyNAaHdIrCO9FHW6C85j3ot5Yzcr+EcBVcua+7KHU0Sgn44JNH8DisJ7\n
+Y63UP/1Xb4d1/QvHfxYy3WOvvRdVZ7pPo8JNX95jAoGAIe5CIg8/JizUZa7KeKUh\n
+9pgDleQPkQsrHQ6/AyIwFBsLwf9THSS5V+uV9D57SfUs46Bf2U8J6N90YQSlt8iS\n
+aWuManFPVgT+yxDIzt6obf2mCEpOIBtQ6N4ZRh2HhQwdWTCrkzkDdGQaHG+jYL6C\n
+xGPwiG2ON7OAfGIAM7eN5lECgYEAhoRLWlaOgRGnHKAWsYQvZ67CjTdDcPPuVu6v\n
+fMQnNMA/7JeTwV+E205L0wfpgZ/cZKmBBlQMJlnUA3q2wfO+PTnse1mjDJU/cGtB\n
+22/lJLxChlQdxGeQhGtGzUhF+hEeOhrO6WSSx7CtMRZoy6Dr6lwfMFZCdVNcBd6v\n
+YOOZk3ECgYEAseUKGb6E80XTVVNziyuiVbQCsI0ZJuRfqMZ2IIDQJU9u6AnGAway\n
+itqHbkGsmDT+4HUz01+1JKnnw42RdSrHdU/LaOonD+RIGqe2x800QXzqASKLdCXr\n
+y7RoiFqJtkdFQykzJemA+xOXvHLgKi/MXFsU90PCD0VJKLj8vwpX78Y=\n
+-----END RSA PRIVATE KEY-----'
+
+ ################################
+ # #
+ # Docker versions and branches #
+ # Rest of the file will be autogenerated from demo repo
+ # #
+ ################################
diff --git a/test/ete/labs/huawei/rancher_vm_entrypoint.sh b/test/ete/labs/huawei/rancher_vm_entrypoint.sh
new file mode 100644
index 000000000..da1955446
--- /dev/null
+++ b/test/ete/labs/huawei/rancher_vm_entrypoint.sh
@@ -0,0 +1,18 @@
+#!/bin/bash -x
+printenv
+
+echo `hostname -I` `hostname` >> /etc/hosts
+mkdir -p /etc/docker
+cat > /etc/docker/daemon.json <<EOF
+{
+ "insecure-registries" : ["__docker_proxy__"]
+}
+EOF
+cat > /etc/apt/apt.conf.d/30proxy<<EOF
+Acquire::http { Proxy "http://__apt_proxy__"; };
+Acquire::https::Proxy "DIRECT";
+EOF
+apt-get -y update
+apt-get -y install docker.io
+usermod -aG docker ubuntu
+docker run --restart unless-stopped -d -p 8080:8080 rancher/server:v1.6.10
diff --git a/test/ete/labs/windriver/Integration-Jenkins-openrc.sh b/test/ete/labs/windriver/Integration-Jenkins-openrc.sh
index ef2aa9a25..ca2e2c2cf 100644
--- a/test/ete/labs/windriver/Integration-Jenkins-openrc.sh
+++ b/test/ete/labs/windriver/Integration-Jenkins-openrc.sh
@@ -15,7 +15,7 @@ export OS_AUTH_URL=http://10.12.25.2:5000/v3
# With the addition of Keystone we have standardized on the term **project**
# as the entity that owns the resources.
-export OS_PROJECT_ID=09d8566ea45e43aa974cf447ed591d77
+export OS_PROJECT_ID="09d8566ea45e43aa974cf447ed591d77"
export OS_PROJECT_NAME="Integration-Jenkins"
export OS_USER_DOMAIN_NAME="Default"
if [ -z "$OS_USER_DOMAIN_NAME" ]; then unset OS_USER_DOMAIN_NAME; fi
@@ -26,7 +26,10 @@ unset OS_TENANT_NAME
# In addition to the owning entity (tenant), OpenStack stores the entity
# performing the action as the **user**.
-export OS_USERNAME="gary_wu"
+if [ -z ${OS_USERNAME_INPUT+x} ]; then
+ read -sp "Please enter your OpenStack Username for project $OS_PROJECT_NAME: " OS_USERNAME_INPUT
+fi
+export OS_USERNAME=$OS_USERNAME_INPUT
# With Keystone you pass the keystone password.
if [ -z ${OS_PASSWORD_INPUT+x} ]; then
diff --git a/test/ete/labs/windriver/Integration-SB-04-openrc.sh b/test/ete/labs/windriver/Integration-SB-04-openrc.sh
new file mode 100644
index 000000000..e7cee93b9
--- /dev/null
+++ b/test/ete/labs/windriver/Integration-SB-04-openrc.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+# To use an OpenStack cloud you need to authenticate against the Identity
+# service named keystone, which returns a **Token** and **Service Catalog**.
+# The catalog contains the endpoints for all services the user/tenant has
+# access to - such as Compute, Image Service, Identity, Object Storage, Block
+# Storage, and Networking (code-named nova, glance, keystone, swift,
+# cinder, and neutron).
+#
+# *NOTE*: Using the 3 *Identity API* does not necessarily mean any other
+# OpenStack API is version 3. For example, your cloud provider may implement
+# Image API v1.1, Block Storage API v2, and Compute API v2.0. OS_AUTH_URL is
+# only for the Identity API served through keystone.
+export OS_AUTH_URL=http://10.12.25.2:5000/v3
+
+# With the addition of Keystone we have standardized on the term **project**
+# as the entity that owns the resources.
+export OS_PROJECT_ID="d570c718cbc545029f40e50b75eb13df"
+export OS_PROJECT_NAME="Integration-SB-04"
+export OS_USER_DOMAIN_NAME="Default"
+if [ -z "$OS_USER_DOMAIN_NAME" ]; then unset OS_USER_DOMAIN_NAME; fi
+
+# unset v2.0 items in case set
+unset OS_TENANT_ID
+unset OS_TENANT_NAME
+
+# In addition to the owning entity (tenant), OpenStack stores the entity
+# performing the action as the **user**.
+# In addition to the owning entity (tenant), OpenStack stores the entity
+# performing the action as the **user**.
+if [ -z ${OS_USERNAME_INPUT+x} ]; then
+ read -sp "Please enter your OpenStack Username for project $OS_PROJECT_NAME: " OS_USERNAME_INPUT
+fi
+export OS_USERNAME=$OS_USERNAME_INPUT
+
+# With Keystone you pass the keystone password.
+if [ -z ${OS_PASSWORD_INPUT+x} ]; then
+ read -sp "Please enter your OpenStack Password for project $OS_PROJECT_NAME as user $OS_USERNAME: " OS_PASSWORD_INPUT
+fi
+export OS_PASSWORD=$OS_PASSWORD_INPUT
+
+# If your configuration has multiple regions, we set that information here.
+# OS_REGION_NAME is optional and only valid in certain environments.
+export OS_REGION_NAME="RegionOne"
+# Don't leave a blank variable, unset it if it was empty
+if [ -z "$OS_REGION_NAME" ]; then unset OS_REGION_NAME; fi
+
+export OS_INTERFACE=public
+export OS_IDENTITY_API_VERSION=3
diff --git a/test/ete/labs/windriver/apt-proxy.sh b/test/ete/labs/windriver/apt-proxy.sh
new file mode 100755
index 000000000..f8bb32ff0
--- /dev/null
+++ b/test/ete/labs/windriver/apt-proxy.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+sed -i '/#!\/bin\/bash/a\
+ mkdir -p /etc/docker\
+ cat > /etc/docker/daemon.json <<EOF\
+ {\
+ "insecure-registries" : ["10.12.5.80:5000"]\
+ }\
+ EOF\
+ cat > /etc/apt/apt.conf.d/30proxy<<EOF\
+ Acquire::http { Proxy "http://10.12.5.80:3142"; };\
+ Acquire::https::Proxy "DIRECT";\
+ EOF\
+ apt-get -y update' $1
+
+# don't use insecure docker proxy in dcae
+perl -i -0pe 's/(?<=dcae_c_vm:)(.*?)\{ get_param: nexus_docker_repo \}/$1"nexus3.onap.org:10001"/s' $1
diff --git a/test/ete/labs/windriver/onap-openstack-template.env b/test/ete/labs/windriver/onap-openstack-template.env
index b1b31ee88..1992a42ed 100644
--- a/test/ete/labs/windriver/onap-openstack-template.env
+++ b/test/ete/labs/windriver/onap-openstack-template.env
@@ -32,15 +32,15 @@ parameters:
nexus_repo: https://nexus.onap.org/content/sites/raw
- nexus_docker_repo: nexus3.onap.org:10001
+ nexus_docker_repo: 10.12.5.80:5000
- nexus_username: docker
+ nexus_username: anonymous
- nexus_password: docker
+ nexus_password: anonymous
dmaap_topic: AUTO
- artifacts_version: 1.1.0-SNAPSHOT
+ artifacts_version: 1.2.0-SNAPSHOT
openstack_tenant_id: ${OS_PROJECT_ID}
@@ -69,6 +69,7 @@ parameters:
dns_list: ["10.12.25.5", "8.8.8.8"]
external_dns: 8.8.8.8
+ dns_forwarder: 10.12.25.5
oam_network_cidr: 10.0.0.0/16
### Private IP addresses ###
@@ -77,11 +78,6 @@ parameters:
aai2_ip_addr: 10.0.1.2
appc_ip_addr: 10.0.2.1
dcae_ip_addr: 10.0.4.1
- dcae_coll_ip_addr: 10.0.4.102
- dcae_db_ip_addr: 10.0.4.101
- dcae_hdp1_ip_addr: 10.0.4.103
- dcae_hdp2_ip_addr: 10.0.4.104
- dcae_hdp3_ip_addr: 10.0.4.105
dns_ip_addr: 10.0.100.1
so_ip_addr: 10.0.5.1
mr_ip_addr: 10.0.11.1
@@ -94,32 +90,12 @@ parameters:
clamp_ip_addr: 10.0.12.1
openo_ip_addr: 10.0.14.1
-# dcae_coll_float_ip: PUT DCAE COLLECTOR FLOATING IP HERE
-# dcae_db_float_ip: PUT DCAE DATABASE FLOATING IP HERE
-# dcae_hdp1_float_ip: PUT DCAE HADOOP VM1 FLOATING IP HERE
-# dcae_hdp2_float_ip: PUT DCAE HADOOP VM2 FLOATING IP HERE
-# dcae_hdp3_float_ip: PUT DCAE HADOOP VM3 FLOATING IP HERE
-
###########################
# #
# Parameters used by DCAE #
# #
###########################
-# dcae_base_environment: 1-NIC-FLOATING-IPS
-
-# dcae_zone: ZONE
-
-# dcae_state: STATE
-
-# nexus_repo_root: https://nexus.onap.org
-
-# nexus_url_snapshot: https://nexus.onap.org/content/repositories/snapshots
-
-# gitlab_branch: master
-
-# dcae_code_version: 1.1.0
-
dnsaas_config_enabled: true
dnsaas_region: RegionOne
dnsaas_keystone_url: http://10.12.25.5:5000/v3
@@ -128,92 +104,39 @@ parameters:
dnsaas_password: ${OS_PASSWORD}
dcae_keystone_url: "http://10.0.14.1/api/multicloud-titanium_cloud/v0/pod25_RegionOne/identity/v2.0"
dcae_centos_7_image: CentOS-7
- dcae_security_group: default
- dcae_key_name: 'id_lji_onap'
- dcae_public_key: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDloKgBHx/yKRV77lr828rqa+zK+iTZpqmH3WSUU3vdNhSbEMNKkGVXR4+Gq1hNd8UNF+jMg87kOFSlQbE5jvsQMWuu1unxLKbH3AeXJd21gR1Gx4KXjkWsfl4URWMZ9WNvH0hMvqEV5SdFIDQmx07C/NOfy3R5N1pCgNsh9RT+EpDFh7jmimxrAqdxP0HnGFC2oM3rYMnzfh2/+Obkag6O3RZOkYx/WLQKbOKTi8K1C4UM5pwFzLT/vC+d9DF8pE7P9dlrbHTF9c3IGsP5oCa8CQ/WE4lVj/L9/iFNs0WsxdYaOnf11GJmPs663hltvWbQiqsFpdjX6tk/zMb3Xipz ubuntu@lusheng-sm-b781d54e-48ac-42fa-a780-3289b56e6598'
- dcae_private_key: '-----BEGIN RSA PRIVATE KEY-----\n
-MIIEowIBAAKCAQEA5aCoAR8f8ikVe+5a/NvK6mvsyvok2aaph91klFN73TYUmxDD\n
-SpBlV0ePhqtYTXfFDRfozIPO5DhUpUGxOY77EDFrrtbp8Symx9wHlyXdtYEdRseC\n
-l45FrH5eFEVjGfVjbx9ITL6hFeUnRSA0JsdOwvzTn8t0eTdaQoDbIfUU/hKQxYe4\n
-5opsawKncT9B5xhQtqDN62DJ834dv/jm5GoOjt0WTpGMf1i0Cmzik4vCtQuFDOac\n
-Bcy0/7wvnfQxfKROz/XZa2x0xfXNyBrD+aAmvAkP1hOJVY/y/f4hTbNFrMXWGjp3\n
-9dRiZj7Out4Zbb1m0IqrBaXY1+rZP8zG914qcwIDAQABAoIBADusR1ybQQsGQC4H\n
-oB+L4qyOxWduH/trwyyqjQO6ujqfXjV54rZS1fMbhqHhjz36fPvNFdNoAXDvbpfP\n
-fYXOsVHg9fHmqb7h8qmHdXWDAp2zYu9M05QdBeAwqrQr3/gT+/YZtAk1lNkCxvTA\n
-zKyzKLWlTD1/etIxX0RfBhNKBF2N6X8faO2QFxXKFomFsSRUJgbAb3sJFWpGBQAE\n
-FcwpR3wB188y/qXXD5XY52zzKzFTsJW4Y48j/3tSirT68QzsqUm4CZl1/98oOjE+\n
-GQC3GCNZDHqh5n23KzTy0SuxhqVT30Ot9S497h1nEbgxZPjK5chHQjuIpGZIyEme\n
-TQOn5BkCgYEA+UVwel1PSV55fd3nRb9rwXEqeNg3X0Zlvx8qvPwfzdTSK5XCMTPe\n
-C6V3BS8IHvpXsuzmLkrCBUy+O3rOkbhlXhGvNAndBE9y3NY2K38hfiTM38irPwzB\n
-2Ksrc/Nu5uQaLk/5t08N3W6cZCml3aX4PVkJRCcH/K032ohGx2u8tR8CgYEA69N4\n
-sTIy56076TtgXM6A26W/HGY7X1olM82ZABnwCiSzXJWQc9QeepCiZCqeiyzxZAs2\n
-sBYb0+uKMkEYRpZUSCUtFnwoKyK9bFSo9lo8YyOUopi2e1KYfKhC2tR6HhNp+WrY\n
-YGMlwMmPdbd1NqbeTQdLnJwqJjKWhFM5mVzPLC0CgYAoFNxTMLHREFnkvUu00WTY\n
-RAQaTloI/d7abn6GyNusUy5AR+Jj2v03TjHnKXra0FJNuP4CL48nHwHvun2AvO+/\n
-woQzj+p0CPplPCSVtemCyRQQX8n5Z2m8FznzeQ86HS+AhueWbCXEl0aabH/5NfjG\n
-lIyC4uvL22aBwyvuYQqE7QKBgE6zKREBbqmQT3EbZqIyLCChJLEmkOPWYpnyIAA/\n
-p7LK4qygIS/2dyFyCS+iZXyOyBQaBesnxauobFsvDBnqa0AUYAKj9ofGtS5k5moo\n
-XQS8yAqnKibnvonDYWRECmjlE7Wv6XvpOp0m5uBjFBPkBkXwjFQ8bXiH42FPoOZE\n
-acMVAoGBAJk4+VGPw1Z7NZbJiDRtdCOY1SZBqWXf1Mth6H8JFUGPLyTfFq1cGpN8\n
-+Odna/7rl4jCiiWrCJyob4F4DF0AE1t3lEa1XgWwDIdagnldH9e5z/psR3I/p1wv\n
-m3bZLBvlAVvCajosd/qeX0FkPGZlgk1dGZ8/7SyK7NITqAfgMMRS\n
+ dcae_domain: dcaeg2.onap.org
+ dcae_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh
+ dcae_private_key: '-----BEGIN RSA PRIVATE KEY-----\n
+MIIEpQIBAAKCAQEAylw4KKN/ljqnFBvP+blG5PNfsnM4MAuGPMsE5rkKmzcZWNaE\n
+NGMXTFKlJ4YrUl7OUv8kbgFTmB8BoNpgrNtKACDaz/psQSOeOADCG/YrT4wrYKrR\n
+NhFqOjJpxRmxweEsd14qBOxeFT7Ie42qbCMMzo260HvjLmtUxkOXeJ3xDkGmoJVy\n
+yzxX7nO1m4WyWyukO6x6mX0XDsADF4A6AapcqinoisJ7pnXaNkcjU/JY2Jrwem7s\n
++ypzIp86O6gdLpLVU9ORR/UYNAk1h+Z6K5Rual4D9mrpC9IJNaYfIgLe7mC39ZLa\n
+fiySNoGhei9P6pYvRJlQki69bid/EPAgX5YZIQIDAQABAoIBAQClDekkhI9ZqseC\n
+qFjPuKaxsizZMg+faJb6WSHLSxzyk1OSWY6F6FklgLeC8HW/fuLNYZyGOYDEsG20\n
+lMqL02Wdiy7OutS3oOS5iyzIf9a90HfFJi706el6RIpvINETcaXCS0T8tQrcS1Rd\n
+KqTaBRC6HXJGAPbBcvw3pwQSdskatU6a/Kt2a3x6DsqqinQcgEB/SbrDaJCUX9sb\n
+F2HVUwdq7aZK1Lk0ozr1FID9mrhjwWuQ6XC+vjG0FqtyXeMpR5iaQ73hex3FXQ8z\n
+OjkFbMwuHWSh1DSx70r5yFrrBqwQKnMsBqx4QDRf3fIENUnWviaL+n+gwcXA07af\n
+4kaNUFUtAoGBAPuNNRAGhZnyZ9zguns9PM56nmeMUikV5dPN2DTbQb79cpfV+7pC\n
+6PeSH/dTKFLz62d6qAM2EsNXQvewf8fipBVBRPsRqKOv+uepd01dHNy62I5B+zRm\n
+be9Kbe+EN60qdzvyPM+2hV6CnvGv1dirimS9pu6RrxD2Rmz1ectnJE+rAoGBAM3w\n
+UbSEemyZ6EKjck2RfdipzY0MNBnIZ2cUqHh8mmPXjdTLzpXb9vmPbHb01Qwo8MP+\n
+gMnTbTBOzyNAaHdIrCO9FHW6C85j3ot5Yzcr+EcBVcua+7KHU0Sgn44JNH8DisJ7\n
+Y63UP/1Xb4d1/QvHfxYy3WOvvRdVZ7pPo8JNX95jAoGAIe5CIg8/JizUZa7KeKUh\n
+9pgDleQPkQsrHQ6/AyIwFBsLwf9THSS5V+uV9D57SfUs46Bf2U8J6N90YQSlt8iS\n
+aWuManFPVgT+yxDIzt6obf2mCEpOIBtQ6N4ZRh2HhQwdWTCrkzkDdGQaHG+jYL6C\n
+xGPwiG2ON7OAfGIAM7eN5lECgYEAhoRLWlaOgRGnHKAWsYQvZ67CjTdDcPPuVu6v\n
+fMQnNMA/7JeTwV+E205L0wfpgZ/cZKmBBlQMJlnUA3q2wfO+PTnse1mjDJU/cGtB\n
+22/lJLxChlQdxGeQhGtGzUhF+hEeOhrO6WSSx7CtMRZoy6Dr6lwfMFZCdVNcBd6v\n
+YOOZk3ECgYEAseUKGb6E80XTVVNziyuiVbQCsI0ZJuRfqMZ2IIDQJU9u6AnGAway\n
+itqHbkGsmDT+4HUz01+1JKnnw42RdSrHdU/LaOonD+RIGqe2x800QXzqASKLdCXr\n
+y7RoiFqJtkdFQykzJemA+xOXvHLgKi/MXFsU90PCD0VJKLj8vwpX78Y=\n
-----END RSA PRIVATE KEY-----'
################################
# #
# Docker versions and branches #
+ # Rest of the file will be autogenerated from demo repo
# #
################################
-
- aai_branch: master
- appc_branch: master
- so_branch: master
- mr_branch: master
- dcae_branch: master
- policy_branch: master
- portal_branch: release-1.3.0
- robot_branch: master
- sdc_branch: master
- sdnc_branch: master
- vid_branch: master
- clamp_branch: master
- vnfsdk_branch: master
-
- aai_docker: 1.1-STAGING-latest
- appc_docker: 1.1-STAGING-latest
- so_docker: 1.1-STAGING-latest
- mr_docker: 1.1-STAGING-latest
- dcae_docker: 1.1-latest
- policy_docker: 1.1-STAGING-latest
- portal_docker: 1.3-STAGING-latest
- robot_docker: 1.1-STAGING-latest
- sdc_docker: 1.1-STAGING-latest
- sdnc_docker: 1.2-STAGING-latest
- vid_docker: 1.1-STAGING-latest
- clamp_docker: 1.1-STAGING-latest
- msb_docker: latest
- mvim_docker: latest
- vfc_docker: latest
- uui_docker: latest
- esr_docker: latest
- dgbuilder_docker: 0.1-STAGING-latest
- cli_docker: 1.1-STAGING-latest
-
- #####################
- # #
- # ONAP repositories #
- # #
- #####################
- aai_repo: http://gerrit.onap.org/r/aai/test-config
- appc_repo: http://gerrit.onap.org/r/appc/deployment.git
- dcae_repo: http://gerrit.onap.org/r/dcae/demo/startup/controller.git
- mr_repo: http://gerrit.onap.org/r/dcae/demo/startup/message-router.git
- so_repo: http://gerrit.onap.org/r/so/docker-config.git
- policy_repo: http://gerrit.onap.org/r/policy/docker.git
- portal_repo: http://gerrit.onap.org/r/portal.git
- robot_repo: http://gerrit.onap.org/r/testsuite/properties.git
- sdc_repo: http://gerrit.onap.org/r/sdc.git
- sdnc_repo: http://gerrit.onap.org/r/sdnc/oam.git
- vid_repo: http://gerrit.onap.org/r/vid.git
- clamp_repo: http://gerrit.onap.org/r/clamp.git
- vnfsdk_repo: http://gerrit.onap.org/r/vnfsdk/refrepo.git
diff --git a/test/ete/scripts/deploy-onap.sh b/test/ete/scripts/deploy-onap.sh
index 69296ab17..68ca34830 100755
--- a/test/ete/scripts/deploy-onap.sh
+++ b/test/ete/scripts/deploy-onap.sh
@@ -1,26 +1,91 @@
-#!/bin/bash
+#!/bin/bash -x
+
+if [ "$#" -ne 1 ]; then
+ echo "Usage: $0 <lab-name>"
+ exit 1
+fi
if [ -z "$WORKSPACE" ]; then
export WORKSPACE=`git rev-parse --show-toplevel`
fi
+LAB_DIR=${WORKSPACE}/test/ete/labs/$1
+
+if [ ! -d "$LAB_DIR" ]; then
+ echo "Directory $LAB_DIR not found"
+ exit 2
+fi
+
source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh
-# Delete all existing stacks
-STACKS=$(openstack stack list -c "Stack Name" -f value)
-if [ ! -z "${STACKS}" ]; then
- echo "Deleting Stacks ${STACKS}"
- openstack stack delete -y $STACKS
-else
- echo "No existing stacks to delete."
+
+SENTINEL='Docker versions and branches'
+
+mkdir -p ${LAB_DIR}/target
+YAML_FILE=${LAB_DIR}/target/onap_openstack.yaml
+ENV_FILE=${LAB_DIR}/target/onap_openstack.env
+YAML_SRC=${ONAP_WORKDIR}/demo/heat/ONAP/onap_openstack.yaml
+ENV_SRC=${ONAP_WORKDIR}/demo/heat/ONAP/onap_openstack.env
+
+# copy heat template to WORKSPACE
+cp ${YAML_SRC} ${YAML_FILE}
+
+# generate final env file
+pushd ${ONAP_WORKDIR}/demo
+envsubst < ${LAB_DIR}/onap-openstack-template.env | sed -n "1,/${SENTINEL}/p" > ${ENV_FILE}
+echo " # Rest of the file was AUTO-GENERATED from" | tee -a ${ENV_FILE}
+echo " #" $(git config --get remote.origin.url) heat/ONAP/onap_openstack.env $(git rev-parse HEAD) | tee -a ${ENV_FILE}
+popd
+sed "1,/${SENTINEL}/d" ${ENV_SRC} >> ${ENV_FILE}
+cat ${ENV_FILE}
+
+sdiff -w 180 ${ENV_SRC} ${ENV_FILE}
+
+# generate final heat template
+# add apt proxy to heat template if applicable
+if [ -x $LAB_DIR/apt-proxy.sh ]; then
+ $LAB_DIR/apt-proxy.sh ${YAML_FILE}
+ sdiff -w 180 ${YAML_SRC} ${YAML_FILE}
fi
+
+#exit 0
+
+#diff ${LAB_DIR}/onap-openstack-template.env ${LAB_DIR}/onap-openstack.env
+
+
+# tear down old deployment
+$WORKSPACE/test/ete/scripts/teardown-onap.sh
+
+# create new stack
STACK="ete-$(uuidgen | cut -c-8)"
echo "New Stack Name: ${STACK}"
+openstack stack create -t ${YAML_FILE} -e ${ENV_FILE} $STACK
+
+while [ "CREATE_IN_PROGRESS" == "$(openstack stack show -c stack_status -f value $STACK)" ]; do
+ sleep 20
+done
+STATUS=$(openstack stack show -c stack_status -f value $STACK)
+echo $STATUS
+if [ "CREATE_COMPLETE" != "$STATUS" ]; then
+ exit 1
+fi
+
+
+# wait until Robot VM initializes
+ROBOT_IP=$($WORKSPACE/test/ete/scripts/get-floating-ip.sh onap-robot)
+echo "ROBOT_IP=${ROBOT_IP}"
+
+if [ "" == "${ROBOT_IP}" ]; then
+ exit 1
+fi
-cp ${ONAP_WORKDIR}/demo/heat/ONAP/onap_openstack.env ${WORKSPACE}/test/ete/labs/windriver/onap-openstack-demo.env
-envsubst < ${WORKSPACE}/test/ete/labs/windriver/onap-openstack-template.env > ${WORKSPACE}/test/ete/labs/windriver/onap-openstack.env
+ssh-keygen -R ${ROBOT_IP}
-openstack stack create -t ${ONAP_WORKDIR}/demo/heat/ONAP/onap_openstack.yaml -e ${WORKSPACE}/test/ete/labs/windriver/onap-openstack.env $STACK
+SSH_KEY=~/.ssh/onap_key
+until ssh -o StrictHostKeychecking=no -i ${SSH_KEY} ubuntu@${ROBOT_IP} "sudo docker ps" | grep openecompete_container
+do
+ sleep 2m
+done
diff --git a/test/ete/scripts/dns-zones/delete-dns-zones.sh b/test/ete/scripts/dns-zones/delete-dns-zones.sh
new file mode 100755
index 000000000..54cafecd8
--- /dev/null
+++ b/test/ete/scripts/dns-zones/delete-dns-zones.sh
@@ -0,0 +1,27 @@
+#!/bin/bash -x
+
+if [ "$#" -ne 1 ]; then
+ echo "Usage: $0 <project-name>"
+ exit 1
+fi
+
+if [ -z "$WORKSPACE" ]; then
+ export WORKSPACE=`git rev-parse --show-toplevel`
+fi
+
+MULTICLOUD_IP=$($WORKSPACE/test/ete/scripts/get-floating-ip.sh onap-multi-service)
+
+export MULTICLOUD_PLUGIN_ENDPOINT=http://$MULTICLOUD_IP:9005/api/multicloud-titanium_cloud/v0/pod25_RegionOne
+
+export TOKEN=$(curl -v -s -H "Content-Type: application/json" -X POST -d '{"auth": {"identity": {"methods": ["password"],"password": {"user": {"name": "'$OS_USERNAME'","password": "'$OS_PASSWORD'"}}},"scope": {"project":{"domain":{"name":"Default"},"name": "'$1'" } }}}' $MULTICLOUD_PLUGIN_ENDPOINT/identity/v3/auth/tokens 2>&1 | grep X-Subject-Token | sed "s/^.*: //")
+
+
+ZONES=$(curl -v -s -H "Content-Type: application/json" -H "X-Auth-Token: $TOKEN" -X GET $MULTICLOUD_PLUGIN_ENDPOINT/dns-delegate/v2/zones | jq '.["zones"][] | .name' | tr -d '"' )
+
+echo $ZONES
+
+for ZONENAME in $ZONES; do
+ echo $ZONENAME;
+ export ZONEID=$(curl -v -s -H "Content-Type: application/json" -H "X-Auth-Token: $TOKEN" -X GET $MULTICLOUD_PLUGIN_ENDPOINT/dns-delegate/v2/zones?name=$ZONENAME |sed 's/^.*"id":"\([a-zA-Z0-9-]*\)",.*$/\1/')
+ curl -v -s -H "Content-Type: application/json" -H "X-Auth-Token: $TOKEN" -X DELETE $MULTICLOUD_PLUGIN_ENDPOINT/dns-delegate/v2/zones/$ZONEID
+done
diff --git a/test/ete/scripts/get-floating-ip.sh b/test/ete/scripts/get-floating-ip.sh
index 55854e73f..196f6d244 100755
--- a/test/ete/scripts/get-floating-ip.sh
+++ b/test/ete/scripts/get-floating-ip.sh
@@ -1,6 +1,3 @@
-#!/bin/sh
+#!/bin/bash
# Get floating IP assigned to a server name
-
-PORT_ID=$(openstack server show -f json $1 | python -c 'import sys, json; print json.load(sys.stdin)["wrs-if:nics"][0]["nic1"]["port_id"]')
-FLOATING_IP=$(openstack floating ip list -f json --port $PORT_ID | python -c 'import sys, json; print json.load(sys.stdin)[0]["Floating IP Address"]')
-echo $FLOATING_IP
+openstack server show -c addresses -f json $1 | jq -r '.addresses' | tr -d ' ' | cut -d ',' -f 2
diff --git a/test/ete/scripts/install_openstack_cli.sh b/test/ete/scripts/install_openstack_cli.sh
index ec5ad1f76..8f1529606 100755
--- a/test/ete/scripts/install_openstack_cli.sh
+++ b/test/ete/scripts/install_openstack_cli.sh
@@ -26,7 +26,7 @@ else
source ${ONAP_VENV}/bin/activate
pip install --upgrade pip
- pip install --upgrade python-openstackclient python-heatclient
+ pip install openstacksdk==0.9.19 python-openstackclient python-heatclient python-designateclient
echo "ONAP_VENV=${ONAP_VENV}" >> $WORKSPACE/env.properties
fi
@@ -38,7 +38,7 @@ if [ -z "$ONAP_WORKDIR" ]; then
fi
echo "ONAP_WORKDIR=${ONAP_WORKDIR}"
if [ ! -d ${ONAP_WORKDIR}/demo ]; then
- git clone http://gerrit.onap.org/r/demo ${ONAP_WORKDIR}/demo
+ git clone https://gerrit.onap.org/r/demo ${ONAP_WORKDIR}/demo
else
pushd ${ONAP_WORKDIR}/demo
git pull
diff --git a/test/ete/scripts/post-jenkins-results.sh b/test/ete/scripts/post-jenkins-results.sh
new file mode 100755
index 000000000..8e01e6cde
--- /dev/null
+++ b/test/ete/scripts/post-jenkins-results.sh
@@ -0,0 +1,6 @@
+#!/bin/bash -x
+
+CRUMB=$(curl -s -u "lf:lf" 'http://12.234.32.117/jenkins/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,":",//crumb)')
+
+curl -v -u "lf:eea50a6d845752e1d2fa459a3c0ca25e" -H "$CRUMB" -d '<run><log encoding="hexBinary">4142430A</log><result>0</result><duration>17</duration></run>' http://12.234.32.117/jenkins/job/external-job/postBuildResult
+
diff --git a/test/ete/scripts/pull-docker-images.sh b/test/ete/scripts/pull-docker-images.sh
new file mode 100755
index 000000000..e14ee8349
--- /dev/null
+++ b/test/ete/scripts/pull-docker-images.sh
@@ -0,0 +1,22 @@
+#!/bin/bash -x
+
+# this script will pull all the docker images listed in the manifest
+# specify a parameter to override the default proxy of nexus3.onap.org:100001
+
+if [ "$#" -ne 1 ]; then
+ PROXY=nexus3.onap.org:10001
+else
+ PROXY=$1
+fi
+
+
+if [ -z "$WORKSPACE" ]; then
+ export WORKSPACE=`git rev-parse --show-toplevel`
+fi
+
+MANIFEST=${WORKSPACE}/version-manifest/src/main/resources/docker-manifest.csv
+IMAGES=$(tail -n +2 $MANIFEST | tr ',' ':')
+
+for image in $IMAGES; do
+ docker pull ${PROXY}/${image}
+done
diff --git a/test/ete/scripts/remote/run-robot.sh b/test/ete/scripts/remote/run-robot.sh
index ff7223a17..162acbe24 100755
--- a/test/ete/scripts/remote/run-robot.sh
+++ b/test/ete/scripts/remote/run-robot.sh
@@ -1,18 +1,20 @@
-#!/bin/bash
+#!/bin/bash -x
cd /opt
-if [ ! -x /opt/demo.sh ]; then
- echo "Robot VM not initialized"
+docker ps | grep -q openecompete_container
+if [ ! $? -eq 0 ]; then
+ echo "Robot not initialized"
exit 2
fi
if [ ! -d eteshare/logs/demo ]; then
- echo "09d8566ea45e43aa974cf447ed591d77" > /opt/config/openstack_tenant_id.txt
- echo "gary_wu" > /opt/config/openstack_username.txt
- echo $OS_PASSWORD_INPUT > /opt/config/openstack_password.txt
+ echo $OS_PROJECT_ID > /opt/config/openstack_tenant_id.txt
+ echo $OS_USERNAME > /opt/config/openstack_username.txt
+ echo $OS_PASSWORD > /opt/config/openstack_password.txt
/bin/bash /opt/eteshare/config/vm_config2robot.sh
- echo "test" | /opt/demo.sh init_robot
+ # set robot VM http server password
+ echo "admin" | /opt/demo.sh init_robot
fi
/opt/ete.sh health
diff --git a/test/ete/scripts/run-healthcheck.sh b/test/ete/scripts/run-healthcheck.sh
index 1555dad85..2f0f014d3 100755
--- a/test/ete/scripts/run-healthcheck.sh
+++ b/test/ete/scripts/run-healthcheck.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -x
SSH_KEY=~/.ssh/onap_key
@@ -13,10 +13,14 @@ cd $WORKSPACE/test/ete/scripts
ROBOT_IP=$(./get-floating-ip.sh onap-robot)
echo "ROBOT_IP=${ROBOT_IP}"
-# allow direct login as root
-ssh -o StrictHostKeychecking=no -i ${SSH_KEY} ubuntu@${ROBOT_IP} 'sudo cp /home/ubuntu/.ssh/authorized_keys /root/.ssh/'
+if [ "" == "${ROBOT_IP}" ]; then
+ exit 1
+fi
+
+ssh-keygen -R ${ROBOT_IP}
+
+ssh -o StrictHostKeychecking=no -i ${SSH_KEY} root@${ROBOT_IP} "OS_PROJECT_ID=$OS_PROJECT_ID OS_USERNAME=$OS_USERNAME OS_PASSWORD=$OS_PASSWORD bash -s" < ./remote/run-robot.sh
-ssh -o StrictHostKeychecking=no -i ${SSH_KEY} root@${ROBOT_IP} "OS_PASSWORD_INPUT=$OS_PASSWORD_INPUT bash -s" < ./remote/run-robot.sh
LOG_DIR=$(ssh -o StrictHostKeychecking=no -i ${SSH_KEY} root@${ROBOT_IP} "ls -1t /opt/eteshare/logs | head -1")
echo "Browse Robot results at http://${ROBOT_IP}:88/logs/${LOG_DIR}/"
rsync -e "ssh -i ${SSH_KEY}" -avPz root@${ROBOT_IP}:/opt/eteshare/logs/${LOG_DIR}/ $WORKSPACE/archives/
diff --git a/test/ete/scripts/teardown-onap.sh b/test/ete/scripts/teardown-onap.sh
new file mode 100755
index 000000000..eecef876b
--- /dev/null
+++ b/test/ete/scripts/teardown-onap.sh
@@ -0,0 +1,54 @@
+#!/bin/bash -x
+
+if [ -z "$WORKSPACE" ]; then
+ export WORKSPACE=`git rev-parse --show-toplevel`
+fi
+
+source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh
+
+# delete all Designate DNS records; do this first since we rely on multi-vim for this
+$WORKSPACE/test/ete/scripts/dns-zones/delete-dns-zones.sh $OS_PROJECT_NAME
+sleep 1
+
+# delete all instances
+openstack server delete $(openstack server list -c ID -f value)
+sleep 1
+
+# miscellaneous cleanup
+openstack floating ip delete $(openstack floating ip list -c ID -f value)
+sleep 1
+
+ROUTERS=$(openstack router list -c ID -f value)
+echo $ROUTERS
+for ROUTER in $ROUTERS; do
+ echo $ZONENAME;
+ PORTS=$(openstack router show $ROUTER -c "interfaces_info" -f "value" | jq -r '.[].port_id')
+ for PORT in $PORTS; do
+ openstack router remove port $ROUTER $PORT
+ done
+ openstack router delete $ROUTER
+done
+
+openstack port delete $(openstack port list -f value -c ID)
+openstack volume delete $(openstack volume list -f value -c ID)
+
+# delete all except "default" security group
+SECURITY_GROUPS=$(openstack security group list -c Name -f value | grep -v default)
+openstack security group delete $SECURITY_GROUPS
+sleep 1
+
+
+# Delete all existing stacks
+STACKS=$(openstack stack list -c "Stack Name" -f value)
+
+if [ ! -z "${STACKS}" ]; then
+ echo "Deleting Stacks ${STACKS}"
+ openstack stack delete -y $STACKS
+ for STACK in ${STACKS}; do
+ until [ "DELETE_IN_PROGRESS" != "$(openstack stack show -c stack_status -f value $STACK)" ]; do
+ sleep 2
+ done
+ done
+else
+ echo "No existing stacks to delete."
+fi
diff --git a/test/mocks/sniroemulator/pom.xml b/test/mocks/sniroemulator/pom.xml
index b2902fe9a..11b556d34 100644
--- a/test/mocks/sniroemulator/pom.xml
+++ b/test/mocks/sniroemulator/pom.xml
@@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.onap.integration</groupId>
<artifactId>sniroemulator</artifactId>
- <version>1.0-SNAPSHOT</version>
+ <version>1.1.0-SNAPSHOT</version>
<name>sniroemulator</name>
<description>onap sniro emulator project based on Spring Boot</description>
diff --git a/version-manifest/pom.xml b/version-manifest/pom.xml
index 0826aeb87..20a7210fb 100644
--- a/version-manifest/pom.xml
+++ b/version-manifest/pom.xml
@@ -8,7 +8,7 @@
</parent>
<groupId>org.onap.integration</groupId>
<artifactId>version-manifest</artifactId>
- <version>0.1.0-SNAPSHOT</version>
+ <version>1.0.0-SNAPSHOT</version>
<packaging>maven-plugin</packaging>
<name>ONAP Version Manifest and Maven Plugin</name>
<url>https://www.onap.org</url>
@@ -87,6 +87,42 @@
<generateGitPropertiesFile>true</generateGitPropertiesFile>
</configuration>
</plugin>
+ <plugin>
+ <groupId>org.codehaus.mojo</groupId>
+ <artifactId>exec-maven-plugin</artifactId>
+ <version>1.6.0</version>
+ <configuration>
+ <executable>sort</executable>
+ </configuration>
+ <executions>
+ <execution>
+ <id>check-docker-manifest-sorted</id>
+ <phase>validate</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>-c</argument>
+ <argument>${project.basedir}/src/main/resources/docker-manifest.csv</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ <execution>
+ <id>check-java-manifest-sorted</id>
+ <phase>validate</phase>
+ <goals>
+ <goal>exec</goal>
+ </goals>
+ <configuration>
+ <arguments>
+ <argument>-c</argument>
+ <argument>${project.basedir}/src/main/resources/java-manifest.csv</argument>
+ </arguments>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
</plugins>
</build>
</project>
diff --git a/version-manifest/src/main/java/org/onap/integration/versionmanifest/VersionCheckMojo.java b/version-manifest/src/main/java/org/onap/integration/versionmanifest/VersionCheckMojo.java
index 75da50ff0..dfac6cd17 100644
--- a/version-manifest/src/main/java/org/onap/integration/versionmanifest/VersionCheckMojo.java
+++ b/version-manifest/src/main/java/org/onap/integration/versionmanifest/VersionCheckMojo.java
@@ -130,9 +130,9 @@ public class VersionCheckMojo extends AbstractMojo {
// used for formatting
int[] columnWidths = new int[10];
- columnWidths[0] = actualVersions.keySet().stream().mapToInt(String::length).max().orElse(1);
- columnWidths[1] = actualVersions.values().stream().mapToInt(String::length).max().orElse(1);
- columnWidths[2] = expectedVersions.values().stream().mapToInt(String::length).max().orElse(1);
+ columnWidths[0] = actualVersions.keySet().stream().mapToInt(s -> ("" + s).length()).max().orElse(1);
+ columnWidths[1] = actualVersions.values().stream().mapToInt(s -> ("" + s).length()).max().orElse(1);
+ columnWidths[2] = expectedVersions.values().stream().mapToInt(s -> ("" + s).length()).max().orElse(1);
String format = " %-" + columnWidths[0] + "s" + " %" + columnWidths[1] + "s -> %" + columnWidths[2] + "s";
if (mismatches.isEmpty()) {
diff --git a/version-manifest/src/main/resources/docker-manifest.csv b/version-manifest/src/main/resources/docker-manifest.csv
index 75c60c72b..4e538ad6a 100644
--- a/version-manifest/src/main/resources/docker-manifest.csv
+++ b/version-manifest/src/main/resources/docker-manifest.csv
@@ -1,43 +1,74 @@
image,tag
-onap/cli,1.1-STAGING-latest
-onap/portal-apps,1.3-STAGING-latest
-onap/portal-db,1.3-STAGING-latest
-onap/portal-wms,1.3-STAGING-latest
-onap/policy/policy-db,1.1-STAGING-latest
-onap/policy/policy-pe,1.1-STAGING-latest
-onap/policy/policy-drools,1.1-STAGING-latest
-onap/policy/policy-nexus,1.1-STAGING-latest
-onap/ccsdk-odl-image,0.1-STAGING-latest
-onap/ccsdk-odlsli-image,0.1-STAGING-latest
-onap/ccsdk-dgbuilder-image,0.1-STAGING-latest
-onap/sdnc-image,1.2-STAGING-latest
-onap/admportal-sdnc-image,1.2-STAGING-latest
-onap/holmes/dsa,1.0.0-STAGING-latest
-onap/holmes/engine-management,1.0.0-STAGING-latest
-onap/holmes/rule-management,1.0.0-STAGING-latest
-onap/msb/msb_discovery,1.0.0-STAGING-latest
-onap/msb/msb_apigateway,1.0.0-STAGING-latest
-onap/oom/kube2msb,1.0.0-STAGING-latest
-onap/vnfsdk/refrepo,1.0.0-STAGING-latest
-openecomp/mso,1.1-STAGING-latest
-onap/vfc/emsdriver,1.0.0-STAGING-latest
-onap/vfc/catalog,1.0.0-STAGING-latest
-onap/vfc/gvnfmdriver,1.0.0-STAGING-latest
-onap/vfc/jujudriver,1.0.0-STAGING-latest
-onap/vfc/nslcm,1.0.0-STAGING-latest
-onap/vfc/resmanagement,1.0.0-STAGING-latest
-onap/vfc/vnfmgr,1.0.0-STAGING-latest
-onap/vfc/vnflcm,1.0.0-STAGING-latest
-onap/vfc/vnfres,1.0.0-STAGING-latest
-onap/vfc/ztesdncdriver,1.0.0-STAGING-latest
-onap/vfc/wfengine-mgrservice,1.0.0-STAGING-latest
-onap/vfc/wfengine-activiti,1.0.0-STAGING-latest
-onap/vfc/ztevmanagerdriver,1.0.0-STAGING-latest
-onap/vfc/nfvo/svnfm/huawei,1.0.0-STAGING-latest
-onap/vfc/nfvo/svnfm/nokia,1.0.0-STAGING-latest
-openecomp/sdc-backend,1.1-STAGING-latest
-openecomp/sdc-frontend,1.1-STAGING-latest
-openecomp/sdc-elasticsearch,1.1-STAGING-latest
-openecomp/sdc-kibana,1.1-STAGING-latest
-openecomp/sdc-sanity,1.1-STAGING-latest
-
+onap/aai/esr-gui,v1.0.0
+onap/aai/esr-server,v1.0.0
+onap/admportal-sdnc-image,v1.2.1
+onap/ccsdk-dgbuilder-image,v0.1.0
+onap/ccsdk-odl-image,v0.1.0
+onap/ccsdk-odlsli-image,v0.1.0
+onap/clamp,v1.1.0
+onap/cli,v1.1.0
+onap/data-router,v1.1.1
+onap/dmaap/dmaap-mr,1.0.1
+onap/holmes/engine-management,v1.0.0
+onap/holmes/rule-management,v1.0.0
+onap/modeling/javatoscachecker,v1.0.0
+onap/model-loader,v1.1.1
+onap/msb/msb_apigateway,1.0.0
+onap/msb/msb_discovery,1.0.0
+onap/multicloud/framework,v1.0.0
+onap/multicloud/openstack-newton,v1.0.0
+onap/multicloud/openstack-ocata,v1.0.0
+onap/multicloud/openstack-windriver,v1.0.0
+onap/multicloud/vio,v1.0.0
+onap/multicloud/vio-vesagent,v1.0.0
+onap/oom/kube2msb,1.0.0
+onap/org.onap.dcaegen2.collectors.ves.vescollector,v1.1.0
+onap/org.onap.dcaegen2.deployments.bootstrap,v1.1.1
+onap/org.onap.dcaegen2.platform.cdapbroker,v4.0.0
+onap/org.onap.dcaegen2.platform.configbinding,v1.2.0
+onap/org.onap.dcaegen2.platform.deployment-handler,v1.1.0
+onap/org.onap.dcaegen2.platform.inventory-api,v1.2.0
+onap/org.onap.dcaegen2.platform.policy-handler,v1.1.0
+onap/org.onap.dcaegen2.platform.servicechange-handler,v1.0.0
+onap/policy/policy-db,v1.1.1
+onap/policy/policy-drools,v1.1.1
+onap/policy/policy-nexus,v1.1.1
+onap/policy/policy-pe,v1.1.1
+onap/portal-apps,v1.3.0
+onap/portal-db,v1.3.0
+onap/portal-wms,v1.3.0
+onap/sdc/sdc-workflow-designer,1.0.0-STAGING-latest
+onap/sdnc-dmaap-listener-image,v1.2.1
+onap/sdnc-image,v1.2.1
+onap/sdnc-ueb-listener-image,v1.2.1
+onap/search-data-service,v1.1.1
+onap/sparky-be,v1.1.1
+onap/usecase-ui/usecase-ui-server,v1.0.1
+onap/usecase-ui,v1.0.1
+onap/vfc/catalog,v1.0.2
+onap/vfc/emsdriver,v1.0.1
+onap/vfc/gvnfmdriver,v1.0.1
+onap/vfc/jujudriver,v1.0.0
+onap/vfc/nfvo/svnfm/huawei,v1.0.2
+onap/vfc/nfvo/svnfm/nokia,v1.0.2
+onap/vfc/nslcm,v1.0.2
+onap/vfc/resmanagement,v1.0.0
+onap/vfc/vnflcm,v1.0.1
+onap/vfc/vnfmgr,v1.0.1
+onap/vfc/vnfres,v1.0.1
+onap/vfc/wfengine-activiti,v1.0.0
+onap/vfc/wfengine-mgrservice,v1.0.0
+onap/vfc/ztesdncdriver,v1.0.0
+onap/vfc/ztevnfmdriver,v1.0.2
+onap/vnfsdk/refrepo,1.0.0
+onap/vnfsdk/refrepo-postgres,1.0.0
+openecomp/aai-resources,v1.1.1
+openecomp/aai-traversal,v1.1.1
+openecomp/appc-image,v1.2.0
+openecomp/mso,v1.1.1
+openecomp/sdc-backend,v1.1.0
+openecomp/sdc-elasticsearch,v1.1.0
+openecomp/sdc-frontend,v1.1.0
+openecomp/sdc-kibana,v1.1.0
+openecomp/sdc-sanity,v1.1.0
+openecomp/vid,v1.1.1
diff --git a/version-manifest/src/main/resources/java-manifest.csv b/version-manifest/src/main/resources/java-manifest.csv
index e22129f56..de7c5cbeb 100644
--- a/version-manifest/src/main/resources/java-manifest.csv
+++ b/version-manifest/src/main/resources/java-manifest.csv
@@ -1,24 +1,46 @@
groupId,artifactId,version
-org.onap.oparent,oparent,0.1.1
-org.onap.portal.sdk,epsdk-analytics,1.3.0
-org.onap.portal.sdk,epsdk-app-common,1.3.0
-org.onap.portal.sdk,epsdk-app-overlay,1.3.0
-org.onap.portal.sdk,epsdk-core,1.3.0
-org.onap.portal.sdk,epsdk-fw,1.3.0
-org.onap.portal.sdk,epsdk-workflow,1.3.0
+org.onap.aai.aai-common,aai-annotations,1.1.1
+org.onap.aai.aai-common,aai-auth,1.1.1
+org.onap.aai.aai-common,aai-common,1.1.1
+org.onap.aai.aai-common,aai-core,1.1.1
+org.onap.aai.aai-common,aai-schema,1.1.1
+org.onap.aai.aai-common,aai-utils,1.1.1
+org.onap.aai.data-router,data-router,1.1.1
+org.onap.aai.esr-gui,aai-esr-gui,1.0.0
+org.onap.aai.esr-gui,common,1.0.0
+org.onap.aai.esr-gui,distribution,1.0.0
+org.onap.aai.esr-gui,integration,1.0.0
+org.onap.aai.esr-gui,portal,1.0.0
+org.onap.aai.esr-server,aai-esr-server,1.0.0
+org.onap.aai.esr-server,esr-manager,1.0.0
+org.onap.aai.esr-server,standalone,1.0.0
+org.onap.aai.logging-service,common-logging,1.1.1
+org.onap.aai.logging-service,eelf-logging,1.1.1
+org.onap.aai.logging-service,logging-api,1.1.1
+org.onap.aai.logging-service,logging-service,1.1.1
+org.onap.aai.model-loader,model-loader,1.1.1
+org.onap.aai.resources,aai-resources,1.1.1
+org.onap.aai.resources,resources,1.1.1
+org.onap.aai,rest-client,1.1.1
+org.onap.aai.router-core,router-core,1.1.1
+org.onap.aai,search-data-service,1.1.1
+org.onap.aai.sparky-be,sparky-be,1.1.1
+org.onap.aai.sparky-fe,sparky-fe,1.1.1
+org.onap.aai.traversal,aai-traversal,1.1.1
+org.onap.aai.traversal,traversal,1.1.1
org.onap.ccsdk.parent,odlparent-carbon-sr1,0.0.2
org.onap.ccsdk.sli.adaptors,aai-service-provider,0.1.0
org.onap.ccsdk.sli.adaptors,mdsal-resource-provider,0.1.0
org.onap.ccsdk.sli.adaptors,resource-assignment-provider,0.1.0
org.onap.ccsdk.sli.adaptors,sql-resource-provider,0.1.0
-org.onap.ccsdk.sli.core,dblib-provider,0.1.0
-org.onap.ccsdk.sli.core,filters-provider,0.1.0
-org.onap.ccsdk.sli.core,sli-common,0.1.0
-org.onap.ccsdk.sli.core,sli-provider,0.1.0
-org.onap.ccsdk.sli.core,sli-recording,0.1.0
-org.onap.ccsdk.sli.core,sliapi-provider,0.1.0
-org.onap.ccsdk.sli.core,sliPluginUtils-provider,0.1.0
-org.onap.ccsdk.sli.core,utils-provider,0.1.0
+org.onap.ccsdk.sli.core,dblib-provider,0.1.2
+org.onap.ccsdk.sli.core,filters-provider,0.1.2
+org.onap.ccsdk.sli.core,sliapi-provider,0.1.2
+org.onap.ccsdk.sli.core,sli-common,0.1.2
+org.onap.ccsdk.sli.core,sliPluginUtils-provider,0.1.2
+org.onap.ccsdk.sli.core,sli-provider,0.1.2
+org.onap.ccsdk.sli.core,sli-recording,0.1.2
+org.onap.ccsdk.sli.core,utils-provider,1.0.0
org.onap.ccsdk.sli.northbound,asdcApi-provider,0.1.0
org.onap.ccsdk.sli.northbound,dataChange-provider,0.1.0
org.onap.ccsdk.sli.northbound,dmaap-listener,0.1.0
@@ -28,32 +50,180 @@ org.onap.ccsdk.sli.plugins,properties-node-provider,0.1.0
org.onap.ccsdk.sli.plugins,restapi-call-node-provider,0.1.0
org.onap.ccsdk.storage.pgaas,pgaas,1.0.0
org.onap.ccsdk.utils,utils,1.0.0
-org.onap.sdnc.northbound,generic-resource-api.provider,1.2.0
-org.onap.sdnc.northbound,vnfapi-provider,1.2.0
-org.onap.sdnc.northbound,vnftools-provider,1.2.0
+org.onap.clamp.clds.clamp,clamp,1.1.0
+org.onap.cli,cli-framework,1.1.0
+org.onap.cli,cli-main,1.1.0
+org.onap.cli,cli-plugins-aai,1.1.0
+org.onap.cli,cli-plugins-msb,1.1.0
+org.onap.cli,cli-plugins-sample,1.1.0
+org.onap.cli,cli-plugins-sdc,1.1.0
+org.onap.cli,cli-plugins-so,1.1.0
+org.onap.cli,cli-sample-mock-generator,1.1.0
+org.onap.cli,cli-sample-yaml-generator,1.1.0
+org.onap.cli,cli-validation,1.1.0
+org.onap.cli,cli-zip,1.1.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics,2.0.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-aai,2.0.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-cdap-common,2.0.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-cdap-plugins,2.0.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-cdap-tca,2.0.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-common,2.0.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-dmaap,2.0.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-it,2.0.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-model,2.0.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-tca,2.0.0
+org.onap.dcaegen2.analytics.tca,dcae-analytics-test,2.0.0
+org.onap.dcaegen2.collectors.ves,VESCollector,1.1.4
+org.onap.dcaegen2,dcaegen2,1.1.0
+org.onap.dcaegen2.platform,inventory-api,1.0.0
+org.onap.dcaegen2.platform,servicechange-handler,1.0.0
+org.onap.dmaap.messagerouter.dmaapclient,dmaapClient,1.0.0
+org.onap.dmaap.messagerouter.messageservice,dmaapMR1,1.0.1
+org.onap.dmaap.messagerouter.mirroragent,dmaapMMAgent,1.0.0
+org.onap.dmaap.messagerouter.msgrtr,msgrtr,1.0.0
org.onap.holmes.common,holmes-actions,1.0.0
org.onap.holmes.dsa,dmaap-dsa,1.0.0
org.onap.holmes.engine-management,holmes-engine-d,1.0.0
org.onap.holmes.rule-management,holmes-rulemgt,1.0.0
+org.onap.modeling.toscaparsers,Checker,1.0.0
+org.onap.modeling.toscaparsers,kwalify,1.0.0
+org.onap.modeling.toscaparsers.nfvparser,modeling-toscaparsers-nfvparser,1.0.0
+org.onap.modeling.toscaparsers,Service,1.0.0
org.onap.msb.java-sdk,msb-java-sdk,1.0.0
org.onap.msb.swagger-sdk,swagger-sdk,1.0.0
-org.onap.vfc.nfvo.lcm,vfc-nfvo-lcm,1.0.0
-org.onap.vfc.nfvo.catalog,vfc-nfvo-catalog,1.0.0
-org.onap.vfc.nfvo.driver.ems.ems,emsdriver-standalone,1.0.0
-org.onap.vfc.nfvo.driver.vnfm.gvnfm.gvnfmadapter,vfc-nfvo-driver-vnfm-gvnfm-gvnfmadapter,1.0.0
-org.onap.vfc.nfvo.driver.vnfm.svnfm.zte.vmanager,vfc-nfvo-driver-vnfm-svnfm-zte-vmanager,1.0.0
-org.onap.vfc.nfvo.driver.vnfm.svnfm.huawei.vnfmadapter,hw-vnfmadapter-deployment,1.0.0
-org.onap.vfc.nfvo.driver.vnfm.svnfm,vfcadaptor-deployment,1.0.0
+org.onap.multicloud.framework,multivimbroker,1.0.0
+org.onap.multicloud.openstack,newton,1.0.0
+org.onap.multicloud.openstack,ocata,1.0.0
+org.onap.multicloud.openstack.vmware,vesagent,1.0.0
+org.onap.multicloud.openstack.vmware,vio,1.0.0
+org.onap.multicloud.openstack,windriver,1.0.0
+org.onap.oparent,oparent,0.1.1
+org.onap.policy.common,common-modules,1.1.1
+org.onap.policy.drools-applications,drools-pdp-apps,1.1.1
+org.onap.policy.drools-pdp,drools-pdp,1.1.1
+org.onap.policy.engine,PolicyEngineSuite,1.1.1
+org.onap.portal.sdk,epsdk-analytics,1.3.2
+org.onap.portal.sdk,epsdk-app-common,1.3.2
+org.onap.portal.sdk,epsdk-app-overlay,1.3.2
+org.onap.portal.sdk,epsdk-core,1.3.2
+org.onap.portal.sdk,epsdk-fw,1.3.2
+org.onap.portal.sdk,epsdk-workflow,1.3.2
+org.onap.sdc.sdc-workflow-designer,sdc-workflow-designer,1.0.0
+org.onap.sdnc.northbound,generic-resource-api.provider,1.2.2
+org.onap.sdnc.northbound,vnfapi-provider,1.2.2
+org.onap.sdnc.northbound,vnftools-provider,1.2.2
+org.onap.usecase-ui.server,usecase-ui-server,1.0.1
+org.onap.usecase-ui,usecaseui-common,1.0.1
+org.onap.vfc.gvnfm.vnflcm.lcm,vfc-gvnfm-vnflcm-lcm,1.0.1
+org.onap.vfc.gvnfm.vnfmgr.mgr,vfc-gvnfm-vnfmgr-mgr,1.0.1
+org.onap.vfc.gvnfm.vnfres.res,vfc-gvnfm-vnfres-res,1.0.1
+org.onap.vfc.nfvo.catalog,vfc-nfvo-catalog,1.0.2
+org.onap.vfc.nfvo.driver.ems.ems,emsdriver-standalone,1.0.1
org.onap.vfc.nfvo.driver.sfc.zte.sfc-driver-standalone,vfc-nfvo-driver-sfc-zte-sfc-driver,1.0.0
-org.onap.vfc.gvnfm.vnflcm.lcm,vfc-gvnfm-vnflcm-lcm,1.0.0
-org.onap.vfc.gvnfm.vnfmgr.mgr,vfc-gvnfm-vnfmgr-mgr,1.0.0
-org.onap.vfc.gvnfm.vnfres.res,vfc-gvnfm-vnfres-res,1.0.0
+org.onap.vfc.nfvo.driver.vnfm.gvnfm.gvnfmadapter,vfc-nfvo-driver-vnfm-gvnfm-gvnfmadapter,1.0.1
+org.onap.vfc.nfvo.driver.vnfm.svnfm.huawei.vnfmadapter,hw-vnfmadapter-deployment,1.0.0
+org.onap.vfc.nfvo.driver.vnfm.svnfm,vfcadaptor-deployment,1.0.1
+org.onap.vfc.nfvo.driver.vnfm.svnfm.zte.vmanager,vfc-nfvo-driver-vnfm-svnfm-zte-vmanager,1.0.2
+org.onap.vfc.nfvo.lcm,vfc-nfvo-lcm,1.0.2
org.onap.vfc.nfvo.resmanagement,vfc-nfvo-resmanagement-deployment,1.0.0
org.onap.vfc.nfvo.wfengine,activiti-extension,1.0.0
org.onap.vfc.nfvo.wfengine,workflow-engine-mgr-service,1.0.0
-org.openecomp.sdc.sdc-distribution-client,sdc-main-distribution-client,1.1.32
+org.onap.vnfsdk.refrepo,vnfmarket,1.0.0
+org.openecomp.appc,appc-aai-client-provider,1.2.0
+org.openecomp.appc,appc-ansible-adapter-bundle,1.2.0
+org.openecomp.appc,appc-artifact-handler-provider,1.2.0
+org.openecomp.appc,appc-chef-adapter-bundle,1.2.0
+org.openecomp.appc,appc-command-executor-api,1.2.0
+org.openecomp.appc,appc-command-executor-core,1.2.0
+org.openecomp.appc,appc-common,1.2.0
+org.openecomp.appc,appc-config-adaptor-provider,1.2.0
+org.openecomp.appc,appc-config-audit-provider,1.2.0
+org.openecomp.appc,appc-config-data-services-provider,1.2.0
+org.openecomp.appc,appc-config-encryption-tool-provider,1.2.0
+org.openecomp.appc,appc-config-flow-controller-provider,1.2.0
+org.openecomp.appc,appc-config-generator-provider,1.2.0
+org.openecomp.appc,appc-config-params-provider,1.2.0
+org.openecomp.appc,appc-data-access-lib,1.2.0
+org.openecomp.appc,appc-design-services-provider,1.2.0
+org.openecomp.appc,appc-dg-aai,1.2.0
+org.openecomp.appc,appc-dg-common,1.2.0
+org.openecomp.appc,appc-dg-license-manager,1.2.0
+org.openecomp.appc,appc-dg-mdsal-store,1.2.0
+org.openecomp.appc,appc-dg-netconf,1.2.0
+org.openecomp.appc,appc-dg-provider,1.2.0
+org.openecomp.appc,appc-dg-ssh,1.2.0
+org.openecomp.appc,appc-dg-util-bundle,1.2.0
+org.openecomp.appc,appc-dmaap-adapter-bundle,1.2.0
+org.openecomp.appc,appc-event-listener-bundle,1.2.0
+org.openecomp.appc,appc-iaas-adapter-bundle,1.2.0
+org.openecomp.appc,appc-license-manager-api,1.2.0
+org.openecomp.appc,appc-license-manager-core,1.2.0
+org.openecomp.appc,appc-lifecycle-management-api,1.2.0
+org.openecomp.appc,appc-lifecycle-management-core,1.2.0
+org.openecomp.appc,appc-message-adapter-api,1.2.0
+org.openecomp.appc,appc-message-adapter-factory,1.2.0
+org.openecomp.appc,appc-metric-bundle,1.2.0
+org.openecomp.appc,appc-netconf-adapter-bundle,1.2.0
+org.openecomp.appc,appc-network-inventory-client-provider,1.2.0
+org.openecomp.appc,appc-oam-bundle,1.2.0
+org.openecomp.appc,appc-provider-bundle,1.2.0
+org.openecomp.appc,appc-ranking-framework-lib,1.2.0
+org.openecomp.appc,appc-request-handler-api,1.2.0
+org.openecomp.appc,appc-request-handler-core,1.2.0
+org.openecomp.appc,appc-rest-adapter-bundle,1.2.0
+org.openecomp.appc,appc-rest-healthcheck-adapter-bundle,1.2.0
+org.openecomp.appc,appc-sdc-listener-bundle,1.2.0
+org.openecomp.appc,appc-sequence-generator-bundle,1.2.0
+org.openecomp.appc,appc-ssh-adapter-api,1.2.0
+org.openecomp.appc,appc-ssh-adapter-sshd,1.2.0
+org.openecomp.appc,appc-ssh-adapter-tests,1.2.0
+org.openecomp.appc,appc-workflow-management-api,1.2.0
+org.openecomp.appc,appc-workflow-management-core,1.2.0
+org.openecomp.appc,appc-yang-generator,1.2.0
+org.openecomp.appc.client,client-kit,1.2.0
+org.openecomp.appc.client,client-lib,1.2.0
+org.openecomp.appc.client,client-simulator,1.2.0
+org.openecomp.appc.client,code-generator,1.2.0
+org.openecomp.appc,execution-queue-management-lib,1.2.0
+org.openecomp.appc,lock-manager-api,1.2.0
+org.openecomp.appc,lock-manager-impl,1.2.0
+org.openecomp.appc.plugins,dg-loader-provider,1.2.0
+org.openecomp.appc,state-machine-lib,1.2.0
+org.openecomp.appc,transaction-recorder,1.2.0
org.openecomp.sdc.jtosca,jtosca,1.1.1
-org.openecomp.sdc.sdc-tosca,sdc-tosca,1.1.32
+org.openecomp.sdc.sdc-distribution-client,sdc-main-distribution-client,1.1.32
org.openecomp.sdc.sdc-titan-cassandra,sdc-titan-cassandra,1.0.0
-
-
+org.openecomp.sdc.sdc-tosca,sdc-tosca,1.1.32
+org.openecomp.so.adapters,mso-adapters-rest-interface,1.1.1
+org.openecomp.so.adapters,mso-adapter-utils,1.1.1
+org.openecomp.so.adapters,mso-network-adapter-async-client,1.1.1
+org.openecomp.so.adapters,mso-vnf-adapter-async-client,1.1.1
+org.openecomp.so,common,1.1.1
+org.openecomp.so.libs.openstack-java-sdk,ceilometer-client,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,ceilometer-model,1.1.0
+org.openecomp.so.libs.openstack-java-sdk.client-connectors,http-connector,1.1.0
+org.openecomp.so.libs.openstack-java-sdk.client-connectors,jersey2-connector,1.1.0
+org.openecomp.so.libs.openstack-java-sdk.client-connectors,jersey-connector,1.1.0
+org.openecomp.so.libs.openstack-java-sdk.client-connectors,resteasy-connector,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,glance-client,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,glance-model,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,heat-client,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,heat-model,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,keystone-client,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,keystone-model,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,nova-client,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,nova-model,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,openstack-client,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,quantum-client,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,quantum-model,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,swift-client,1.1.0
+org.openecomp.so.libs.openstack-java-sdk,swift-model,1.1.0
+org.openecomp.so,mso-api-handler-common,1.1.1
+org.openecomp.so,mso-catalog-db,1.1.1
+org.openecomp.so,MSOCommonBPMN,1.1.1
+org.openecomp.so,MSOCoreBPMN,1.1.1
+org.openecomp.so,MSOInfrastructureBPMN,1.1.1
+org.openecomp.so,mso-requests-db,1.1.1
+org.openecomp.so,MSORESTClient,1.1.1
+org.openecomp.so,status-control,1.1.1
+org.openecomp.vid,asdcclient,1.0.2
diff --git a/version.properties b/version.properties
index f841ab77d..49e2720ae 100644
--- a/version.properties
+++ b/version.properties
@@ -3,12 +3,12 @@
# Note that these variables cannot be structured (e.g. : version.release or version.snapshot etc... )
# because they are used in Jenkins, whose plug-in doesn't support
-major_version=0
-minor_version=1
+major_version=1
+minor_version=0
patch_version=0
base_version=${major_version}.${minor_version}.${patch_version}
# Release must be completed with GIT information # in Jenkins
release_version=${base_version}
-snapshot_version=${base_version}-SNAPSHOT \ No newline at end of file
+snapshot_version=${base_version}-SNAPSHOT