aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bootstrap/vagrant-onap/Vagrantfile66
-rw-r--r--bootstrap/vagrant-onap/lib/sdc10
-rwxr-xr-xbootstrap/vagrant-onap/lib/sdnc2
-rwxr-xr-xbootstrap/vagrant-onap/lib/vfc4
-rw-r--r--bootstrap/vagrant-onap/tests/test_sdc19
-rw-r--r--bootstrap/vagrant-onap/tests/test_sdnc51
m---------integration10
-rwxr-xr-xtest/csit/plans/dmaap/mrpubsub/setup.sh95
-rwxr-xr-xtest/csit/plans/dmaap/mrpubsub/teardown.sh22
-rw-r--r--test/csit/plans/dmaap/mrpubsub/testplan.txt2
-rw-r--r--test/csit/plans/multicloud-ocata/functionality1/setup.sh2
-rw-r--r--test/csit/plans/sdc/healthCheck/setup.sh33
-rw-r--r--test/csit/plans/sdc/healthCheck/teardown.sh22
-rw-r--r--test/csit/plans/sdc/healthCheck/testplan.txt3
-rw-r--r--test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh2
-rwxr-xr-xtest/csit/run-csit.sh2
-rw-r--r--test/csit/scripts/sdc/clone_and_setup_sdc_data.sh52
-rw-r--r--test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh31
-rw-r--r--test/csit/scripts/sdc/start_sdc_containers.sh108
-rwxr-xr-xtest/csit/tests/dmaap/mrpubsub/mrpubsub.robot73
-rw-r--r--test/csit/tests/holmes/testcase/CommonKeywords/HttpRequest.robot4
-rw-r--r--test/csit/tests/holmes/testcase/RuleMgt/Rule-Keywords.robot4
-rw-r--r--test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot4
-rw-r--r--test/csit/tests/sdc/healthCheck/__init__.robot2
-rw-r--r--test/csit/tests/sdc/healthCheck/test1.robot16
-rw-r--r--test/csit/tests/vfc/nfvo-driver-svnfm/huawei.robot13
26 files changed, 552 insertions, 100 deletions
diff --git a/bootstrap/vagrant-onap/Vagrantfile b/bootstrap/vagrant-onap/Vagrantfile
index adc73ca0d..800ed992e 100644
--- a/bootstrap/vagrant-onap/Vagrantfile
+++ b/bootstrap/vagrant-onap/Vagrantfile
@@ -42,7 +42,7 @@ box = {
nodes = [
- {
+ {
:name => "aai",
:ips => ['10.252.0.6', "192.168.50.6"],
:macs => [],
@@ -52,7 +52,7 @@ nodes = [
:groups => ["individual"],
:args => ["aai"]
},
- {
+ {
:name => "all-in-one",
:ips => ['10.252.0.3', "192.168.50.3"],
:macs => [],
@@ -63,7 +63,7 @@ nodes = [
:flavor => 'm1.xlarge',
:args => ['mr', 'sdc', 'aai', 'mso', 'robot', 'vid', 'sdnc', 'portal', 'dcae', 'policy', 'appc', 'vfc', 'ccsdk'],
},
- {
+ {
:name => "appc",
:ips => ['10.252.0.14', "192.168.50.14"],
:macs => [],
@@ -73,7 +73,7 @@ nodes = [
:groups => ["individual"],
:args => ["appc"],
},
- {
+ {
:name => "ccsdk",
:ips => ['10.252.0.14', "192.168.50.17"],
:macs => [],
@@ -83,7 +83,7 @@ nodes = [
:groups => ["individual"],
:args => ["ccsdk"],
},
- {
+ {
:name => "dcae",
:ips => ['10.252.0.12', "192.168.50.12"],
:macs => [],
@@ -93,7 +93,7 @@ nodes = [
:groups => ["individual"],
:args => ["dcae"],
},
- {
+ {
:name => "dns",
:ips => ['10.252.0.3', "192.168.50.3"],
:macs => [],
@@ -104,7 +104,7 @@ nodes = [
:flavor => 'm1.small',
:args => [" "]
},
- {
+ {
:name => "message-router",
:ips => ['10.252.0.4', "192.168.50.4"],
:macs => [],
@@ -114,7 +114,7 @@ nodes = [
:groups => ["individual"],
:args => ["mr"],
},
- {
+ {
:name => "mso",
:ips => ['10.252.0.7', "192.168.50.7"],
:macs => [],
@@ -124,7 +124,7 @@ nodes = [
:groups => ["individual"],
:args => ["mso"],
},
- {
+ {
:name => "multicloud",
:ips => ['10.252.0.16', "192.168.50.16"],
:macs => [],
@@ -134,7 +134,7 @@ nodes = [
:groups => ["individual"],
:args => ["multicloud"],
},
- {
+ {
:name => "policy",
:ips => ['10.252.0.13', "192.168.50.13"],
:macs => [],
@@ -144,7 +144,7 @@ nodes = [
:groups => ["individual"],
:args => ["policy"],
},
- {
+ {
:name => "portal",
:ips => ['10.252.0.11', "192.168.50.11"],
:macs => [],
@@ -154,7 +154,7 @@ nodes = [
:groups => ["individual"],
:args => ["portal"],
},
- {
+ {
:name => "robot",
:ips => ['10.252.0.8', "192.168.50.8"],
:macs => [],
@@ -164,7 +164,7 @@ nodes = [
:groups => ["individual"],
:args => ["robot"],
},
- {
+ {
:name => "sdc",
:ips => ['10.252.0.5', "192.168.50.5"],
:macs => [],
@@ -174,7 +174,7 @@ nodes = [
:groups => ["individual"],
:args => ["sdc"],
},
- {
+ {
:name => "sdnc",
:ips => ['10.252.0.10', "192.168.50.10"],
:macs => [],
@@ -182,9 +182,9 @@ nodes = [
:cpu => "50",
:ram => 4 * 1024,
:groups => ["individual"],
- :args => ["sdnc"],
+ :args => ["sdnc"],
},
- {
+ {
:name => "testing",
:ips => ['10.252.0.3', "192.168.50.3"],
:macs => [],
@@ -195,7 +195,7 @@ nodes = [
:flavor => 'm1.small',
:args => [""],
},
- {
+ {
:name => "vfc",
:ips => ['10.252.0.15', "192.168.50.15"],
:macs => [],
@@ -205,8 +205,7 @@ nodes = [
:groups => ["individual"],
:args => ['vfc'],
},
-
- {
+ {
:name => "vid",
:ips => ['10.252.0.9', "192.168.50.9"],
:macs => [],
@@ -215,13 +214,12 @@ nodes = [
:ram => 4 * 1024,
:groups => ["individual"],
:args => ['vid'],
- },
+ },
]
-run_path = 'vagrant_utils/postinstall.sh'
-
-sdc_volume='vol1-sdc-data.vdi'
+run_path = 'vagrant_utils/postinstall.sh'
+sdc_volume = 'vol1-sdc-data.vdi'
Vagrant.require_version ">= 1.8.6"
@@ -229,7 +227,6 @@ Vagrant.require_version ">= 1.8.6"
provider = (ENV['VAGRANT_DEFAULT_PROVIDER'] || :virtualbox).to_sym
puts "[INFO] Provider: #{provider} "
-
vd_conf = ENV.fetch('VD_CONF', 'etc/settings.yaml')
if File.exist?(vd_conf)
require 'yaml'
@@ -277,7 +274,6 @@ case deploy_mode
nodes.select! do |node|
if node[:groups][0].include?("individual")
true if node[:name]
-
end
end
@@ -325,22 +321,22 @@ Vagrant.configure("2") do |config|
config.vm.define node[:name] do |nodeconfig|
# Common Settings:
-
+
nodeconfig.vm.provider "virtualbox" do |vbox|
vbox.customize ['modifyvm', :id, '--nictype1', 'virtio']
vbox.customize ['modifyvm', :id, '--audio', 'none']
vbox.customize ['modifyvm', :id, '--vram', '1']
vbox.customize ['modifyvm', :id, "--cpuhotplug", "off"]
vbox.customize ['modifyvm', :id, "--cpuexecutioncap", node[:cpu]]
- vbox.customize ['modifyvm', :id, "--cpus", node[:cpus]]
+ vbox.customize ['modifyvm', :id, "--cpus", node[:cpus]]
vbox.customize ["modifyvm", :id, "--memory", node[:ram]]
end
-
+
nodeconfig.vm.provider "libvirt" do |lbox|
lbox.memory = node[:ram]
lbox.nested = true
end
-
+
nodeconfig.vm.provider :openstack do |obox|
obox.openstack_auth_url = ENV.fetch('OS_AUTH_URL', '')
obox.tenant_name = ENV.fetch('OS_TENANT_NAME', '')
@@ -357,15 +353,14 @@ Vagrant.configure("2") do |config|
obox.networks = ENV.fetch('OS_NETWORK', '')
obox.flavor = node[:flavor]
obox.server_name = node[:name]
-
end
# Set Box type
nodeconfig.vm.box = box[provider]
-
+
# Set Node name
nodeconfig.vm.hostname = node[:name]
-
+
# Set Sync Folder
nodeconfig.vm.synced_folder ".", "/vagrant", disabled: true
nodeconfig.vm.synced_folder './opt', '/opt/', create: true
@@ -373,11 +368,11 @@ Vagrant.configure("2") do |config|
if !is_windows
nodeconfig.vm.synced_folder '~/.m2', '/root/.m2/', create: true
end
- # Set Network
+ # Set Network
nodeconfig.vm.network :private_network, ip: node[:ips][1]
# Specific settings:
-
+
#Set Storage (For SDC or All-in-one)
if node[:name].include?("all-in-one") || node[:name].include?("sdc")
nodeconfig.vm.provider "virtualbox" do |v|
@@ -386,7 +381,7 @@ Vagrant.configure("2") do |config|
end
v.customize ['storageattach', :id, '--storagectl', 'SATAController', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', sdc_volume]
end
-
+
nodeconfig.vm.provider "libvirt" do |v|
v.storage :file, path: sdc_volume, bus: 'sata', device: 'vdb', size: '2G'
end
@@ -416,4 +411,3 @@ Vagrant.configure("2") do |config|
end #nodeconfig
end #node
end #config
-
diff --git a/bootstrap/vagrant-onap/lib/sdc b/bootstrap/vagrant-onap/lib/sdc
index 275f64485..9ec04b21b 100644
--- a/bootstrap/vagrant-onap/lib/sdc
+++ b/bootstrap/vagrant-onap/lib/sdc
@@ -5,7 +5,7 @@ set -o xtrace
source /var/onap/functions
sdc_src_folder=$git_src_folder/sdc
-sdc_repos=("sdc-os-chef" "jtosca" "sdc-distribution-client" "sdc-titan-cassandra" "sdc-tosca" "sdc_common")
+sdc_repos=("jtosca" "sdc-distribution-client" "sdc-docker-base" "sdc-titan-cassandra" "sdc-tosca" "sdc-vnfdesign" "sdc-workflow-designer" "sdc_common")
# _mount_external_partition() - Create partition and mount the external volume
function _mount_external_partition {
@@ -69,7 +69,7 @@ function get_sdc_images {
if [[ "$compile_repo" != "True" ]]; then
compile_src $sdc_src_folder
fi
- build_docker_image $sdc_src_folder/sdc-os-chef docker
+ build_docker_image $sdc_src_folder/sdc-docker-base
else
for image in backend frontend elasticsearch kibana cassandra sanity; do
pull_openecomp_image openecomp/sdc-$image
@@ -81,8 +81,6 @@ function get_sdc_images {
function install_sdc {
local ENV_NAME=$dmaap_topic
local MR_IP_ADDR='10.0.11.1'
- local RELEASE=$docker_version
- NEXUS_DOCKER_PORT=$(echo $nexus_docker_repo | cut -d':' -f2)
_init_data_folders
cp $sdc_src_folder/sdc-os-chef/environments/Template.json /data/environments
@@ -99,7 +97,9 @@ function install_sdc {
install_docker
if [[ "$pull_docker_image" == "True" ]]; then
docker_openecomp_login
- bash /data/scripts/docker_run.sh -e $ENV_NAME -r $RELEASE -p $NEXUS_DOCKER_PORT
+ bash /data/scripts/docker_run.sh -e $ENV_NAME -r $docker_version -p $(echo $nexus_docker_repo | cut -d':' -f2)
+ else
+ bash /data/scripts/docker_run.sh -e $ENV_NAME -l
fi
}
diff --git a/bootstrap/vagrant-onap/lib/sdnc b/bootstrap/vagrant-onap/lib/sdnc
index 770f6be9f..c6d013b79 100755
--- a/bootstrap/vagrant-onap/lib/sdnc
+++ b/bootstrap/vagrant-onap/lib/sdnc
@@ -6,7 +6,7 @@ source /var/onap/functions
source /var/onap/ccsdk
sdnc_src_folder=$git_src_folder/openecomp/sdnc
-sdnc_repos=("core" "adaptors" "northbound" "plugins" "oam")
+sdnc_repos=("adaptors" "architecture" "core" "features" "northbound" "oam" "parent" "plugins")
# clone_all_sdnc_repos() - Function that clones SDNC source repo.
function clone_all_sdnc_repos {
diff --git a/bootstrap/vagrant-onap/lib/vfc b/bootstrap/vagrant-onap/lib/vfc
index 2d1418258..fb32708e0 100755
--- a/bootstrap/vagrant-onap/lib/vfc
+++ b/bootstrap/vagrant-onap/lib/vfc
@@ -19,8 +19,8 @@ function clone_all_vfc_repos {
function compile_all_vfc_repos {
install_python_package tox
- tox_repos=("gvnfm/vnflcm/lcm" "gvnfm/vnfmgr/mgr" "gvnfm/vnfres/res" "nfvo/lcm"
- "nfvo/driver/vnfm/gvnfm/gvnfmadapter" "nfvo/driver/vnfm/svnfm/zte/vmanager")
+ tox_repos=("gvnfm/vnflcm/lcm" "gvnfm/vnfmgr/mgr" "gvnfm/vnfres/res" "nfvo/lcm" \
+ "nfvo/driver/vnfm/gvnfm/gvnfmadapter" "nfvo/driver/vnfm/svnfm/zte/vmanager")
for dirc in ${tox_repos[@]}; do
pushd $src_folder/$dirc
tox -e py27
diff --git a/bootstrap/vagrant-onap/tests/test_sdc b/bootstrap/vagrant-onap/tests/test_sdc
index bb0cd7010..9b6f5a5a5 100644
--- a/bootstrap/vagrant-onap/tests/test_sdc
+++ b/bootstrap/vagrant-onap/tests/test_sdc
@@ -1,7 +1,7 @@
#!/bin/bash
source /var/onap_tests/_test_base
-source /var/onap/asdc
+source /var/onap/sdc
covered_functions=(
"clone_all_sdc_repos" "compile_all_sdc_repos" "get_sdc_images" "install_sdc"
@@ -26,11 +26,10 @@ function test_compile_all_sdc_repos {
compile_all_sdc_repos
asserts_file_exist $sdc_src_folder/jtosca/target/jtosca-1.1.10-SNAPSHOT.jar
- asserts_file_exist $sdc_src_folder/sdc-distribution-client/sdc-distribution-ci/target/sdc-distribution-ci-1.1.32-SNAPSHOT.jar
- asserts_file_exist $sdc_src_folder/sdc-distribution-client/sdc-distribution-client/target/sdc-distribution-client-1.1.32-SNAPSHOT.jar
- asserts_file_exist $sdc_src_folder/sdc-os-chef/target/sdc-os-chef-1.1.0-SNAPSHOT.jar
- asserts_file_exist $sdc_src_folder/sdc-titan-cassandra/target/sdc-titan-cassandra-1.0.0.jar
- asserts_file_exist $sdc_src_folder/sdc-tosca/target/sdc-tosca-1.1.50-SNAPSHOT.jar
+ asserts_file_exist $sdc_src_folder/sdc-distribution-client/sdc-distribution-ci/target/sdc-distribution-ci-1.1.*-SNAPSHOT.jar
+ asserts_file_exist $sdc_src_folder/sdc-distribution-client/sdc-distribution-client/target/sdc-distribution-client-1.1.*-SNAPSHOT.jar
+ asserts_file_exist $sdc_src_folder/sdc-titan-cassandra/target/jamm-0.3.0.jar
+ asserts_file_exist $sdc_src_folder/sdc-tosca/target/sdc-tosca-1.1.*-SNAPSHOT.jar
for dirc in logging sdc-artifact-generator; do
name="openecomp-$dirc"
@@ -46,8 +45,8 @@ function test_get_sdc_images {
clone_all_sdc_repos
get_sdc_images
- for image in backend frontend elasticsearch kibana cassandra sanity; do
- asserts_image openecomp/sdc-$image
+ for image in sanity elasticsearch cassandra kibana frontend backend; do
+ asserts_image openecomp/base_sdc-$image
done
}
@@ -57,8 +56,8 @@ function test_install_sdc {
get_sdc_images
install_sdc
- for image in backend frontend elasticsearch kibana cassandra sanity; do
- asserts_image_running openecomp/sdc-$image
+ for image in elasticsearch cassandra kibana frontend backend; do
+ asserts_image_running openecomp/base_sdc-$image
done
}
diff --git a/bootstrap/vagrant-onap/tests/test_sdnc b/bootstrap/vagrant-onap/tests/test_sdnc
index bc742fad2..ddc1d9c6b 100644
--- a/bootstrap/vagrant-onap/tests/test_sdnc
+++ b/bootstrap/vagrant-onap/tests/test_sdnc
@@ -11,11 +11,14 @@ covered_functions=(
function test_clone_all_sdnc_repos {
clone_all_sdnc_repos
- asserts_file_exist $sdnc_src_folder/core/pom.xml
asserts_file_exist $sdnc_src_folder/adaptors/pom.xml
+ # asserts_file_exist $sdnc_src_folder/architecture/pom.xml
+ asserts_file_exist $sdnc_src_folder/core/pom.xml
+ # asserts_file_exist $sdnc_src_folder/features/pom.xml
asserts_file_exist $sdnc_src_folder/northbound/pom.xml
- asserts_file_exist $sdnc_src_folder/plugins/pom.xml
asserts_file_exist $sdnc_src_folder/oam/pom.xml
+ # asserts_file_exist $sdnc_src_folder/parent/pom.xml
+ asserts_file_exist $sdnc_src_folder/plugins/pom.xml
}
# test_compile_all_sdnc_repos() - Verify the correct compilation of SDNC projects
@@ -23,36 +26,24 @@ function test_compile_all_sdnc_repos {
clone_all_sdnc_repos
compile_all_sdnc_repos
- for adaptor in aai-service mdsal-resource resource-assignment sql-resource; do
- asserts_file_exist $sdnc_src_folder/adaptors/$adaptor/features/target/$adaptor-features-1.1.2-SNAPSHOT.jar
- asserts_file_exist $sdnc_src_folder/adaptors/$adaptor/installer/target/sdnc-$adaptor-1.1.2-SNAPSHOT-installer.zip
- asserts_file_exist $sdnc_src_folder/adaptors/$adaptor/provider/target/$adaptor-provider-1.1.2-SNAPSHOT.jar
- done
-
- asserts_file_exist $sdnc_src_folder/core/dblib/provider/target/dblib-provider-1.1.2-SNAPSHOT.jar
- asserts_file_exist $sdnc_src_folder/core/rootpom/target/rootpom-1.1.0-site_en.xml
- asserts_file_exist $sdnc_src_folder/core/target/sdnc-core-1.1.2-SNAPSHOT-site.xml
-
- for component in asdcApi dataChange vnfapi vnftools; do
- asserts_file_exist $sdnc_src_folder/northbound/$component/features/target/$component-features-1.1.2-SNAPSHOT.jar
- asserts_file_exist $sdnc_src_folder/northbound/$component/installer/target/sdnc-$component-1.1.2-SNAPSHOT-installer.zip
- if [[ "$component" != "vnftools" ]]; then
- asserts_file_exist $sdnc_src_folder/northbound/$component/model/target/$component-model-1.1.2-SNAPSHOT.jar
+ for component in generic-resource-api vnfapi vnftools; do
+ if [[ "$component" != "generic-resource-api" ]]; then
+ if [[ "$component" == "vnfapi" ]]; then
+ asserts_file_exist $sdnc_src_folder/northbound/vnfapi/model/target/vnfapi-model-1.2.0-SNAPSHOT.jar
+ fi
+ asserts_file_exist $sdnc_src_folder/northbound/$component/installer/target/sdnc-$component-1.2.0-SNAPSHOT-installer.zip
+ asserts_file_exist $sdnc_src_folder/northbound/$component/features/target/$component-features-1.2.0-SNAPSHOT.jar
+ asserts_file_exist $sdnc_src_folder/northbound/$component/provider/target/$component-provider-1.2.0-SNAPSHOT.jar
+ else
+ asserts_file_exist $sdnc_src_folder/northbound/generic-resource-api/features/target/generic-resource-api.features-1.2.0-SNAPSHOT.jar
+ asserts_file_exist $sdnc_src_folder/northbound/generic-resource-api/installer/target/sdnc-generic-resources-api-1.2.0-SNAPSHOT-installer.zip
+ asserts_file_exist $sdnc_src_folder/northbound/generic-resource-api/model/target/generic-resource-api.model-1.2.0-SNAPSHOT.jar
+ asserts_file_exist $sdnc_src_folder/northbound/generic-resource-api/provider/target/generic-resource-api.provider-1.2.0-SNAPSHOT.jar
fi
- asserts_file_exist $sdnc_src_folder/northbound/$component/provider/target/$component-provider-1.1.2-SNAPSHOT.jar
- done
- asserts_file_exist $sdnc_src_folder/northbound/dmaap-listener/target/dmaap-listener-1.1.2-SNAPSHOT.jar
- asserts_file_exist $sdnc_src_folder/northbound/ueb-listener/target/ueb-listener-1.1.2-SNAPSHOT.jar
-
- asserts_file_exist $sdnc_src_folder/oam/admportal/target/admportal.1.1.2-SNAPSHOT.zip
- asserts_file_exist $sdnc_src_folder/oam/dgbuilder/target/dgbuilder.1.1.2-SNAPSHOT.zip
- asserts_file_exist $sdnc_src_folder/oam/platform-logic/installer/target/platform-logic-installer-1.1.2-SNAPSHOT.zip
-
- for plugin in properties-node restapi-call-node; do
- asserts_file_exist $sdnc_src_folder/plugins/$plugin/features/target/$plugin-features-1.1.2-SNAPSHOT.jar
- asserts_file_exist $sdnc_src_folder/plugins/$plugin/installer/target/sdnc-$plugin-1.1.2-SNAPSHOT-installer.zip
- asserts_file_exist $sdnc_src_folder/plugins/$plugin/provider/target/$plugin-provider-1.1.2-SNAPSHOT.jar
done
+ asserts_file_exist $sdnc_src_folder/oam/admportal/target/admportal.*-SNAPSHOT.zip
+ asserts_file_exist $sdnc_src_folder/oam/dgbuilder/target/dgbuilder.*-SNAPSHOT.zip
+ asserts_file_exist $sdnc_src_folder/oam/platform-logic/installer/target/platform-logic-installer-*-SNAPSHOT.zip
}
# test_get_sdnc_images() - Verify that the SDNC images are created or retrieved
diff --git a/integration b/integration
new file mode 160000
+Subproject e14cfb55cf1d0a7b8bded66e5b834ee8922ecec
diff --git a/test/csit/plans/dmaap/mrpubsub/setup.sh b/test/csit/plans/dmaap/mrpubsub/setup.sh
new file mode 100755
index 000000000..3e8950f2b
--- /dev/null
+++ b/test/csit/plans/dmaap/mrpubsub/setup.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP DMAAP MR
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+# Place the scripts in run order:
+source ${SCRIPTS}/common_functions.sh
+
+# Clone DMaaP Message Router repo
+mkdir -p $WORKSPACE/archives/dmaapmr
+cd $WORKSPACE/archives/dmaapmr
+#unset http_proxy https_proxy
+git clone --depth 1 http://gerrit.onap.org/r/dmaap/messagerouter/messageservice -b master
+git pull
+cd $WORKSPACE/archives/dmaapmr/messageservice/src/main/resources/docker-compose
+cp $WORKSPACE/archives/dmaapmr/messageservice/bundleconfig-local/etc/appprops/MsgRtrApi.properties /var/tmp/
+
+
+# start DMaaP MR containers with docker compose and configuration from docker-compose.yml
+docker-compose up -d
+
+# Wait for initialization of Docker contaienr for DMaaP MR, Kafka and Zookeeper
+for i in {1..50}; do
+ if [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ] && \
+ [ $(docker inspect --format '{{ .State.Running }}' dockercompose_zookeeper_1) ] && \
+ [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ]
+ then
+ echo "DMaaP Service Running"
+ break
+ else
+ echo sleep $i
+ sleep $i
+ fi
+done
+
+
+DMAAP_MR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_dmaap_1)
+KAFKA_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_kafka_1)
+ZOOKEEPER_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_zookeeper_1)
+
+echo DMAAP_MR_IP=${DMAAP_MR_IP}
+echo KAFKA_IP=${KAFKA_IP}
+echo ZOOKEEPER_IP=${ZOOKEEPER_IP}
+
+# Initial docker-compose up and down is for populating kafka and zookeeper IPs in /var/tmp/MsgRtrApi.properites
+docker-compose down
+
+# Update kafkfa and zookeeper properties in MsgRtrApi.propeties which will be copied to DMaaP Container
+sed -i -e 's/<zookeeper_host>/'$ZOOKEEPER_IP'/' /var/tmp/MsgRtrApi.properties
+sed -i -e 's/<kafka_host>:<kafka_port>/'$KAFKA_IP':9092/' /var/tmp/MsgRtrApi.properties
+
+docker-compose build
+docker-compose up -d
+
+# Wait for initialization of Docker containers
+for i in {1..50}; do
+ if [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ] && \
+ [ $(docker inspect --format '{{ .State.Running }}' dockercompose_zookeeper_1) ] && \
+ [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ]
+ then
+ echo "DMaaP Service Running"
+ break
+ else
+ echo sleep $i
+ sleep $i
+ fi
+done
+
+# Wait for initialization of docker services
+for i in {1..50}; do
+ curl -sS -m 1 ${DMAAP_MR_IP}:3904/events/TestTopic && break
+ echo sleep $i
+ sleep $i
+done
+
+#Pass any variables required by Robot test suites in ROBOT_VARIABLES
+ROBOT_VARIABLES="-v DMAAP_MR_IP:${DMAAP_MR_IP}"
diff --git a/test/csit/plans/dmaap/mrpubsub/teardown.sh b/test/csit/plans/dmaap/mrpubsub/teardown.sh
new file mode 100755
index 000000000..1b4303240
--- /dev/null
+++ b/test/csit/plans/dmaap/mrpubsub/teardown.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+
+kill-instance.sh dockercompose_dmaap_1
+kill-instance.sh dockercompose_kafka_1
+kill-instance.sh dockercompose_zookeeper_1
diff --git a/test/csit/plans/dmaap/mrpubsub/testplan.txt b/test/csit/plans/dmaap/mrpubsub/testplan.txt
new file mode 100644
index 000000000..6a98eb790
--- /dev/null
+++ b/test/csit/plans/dmaap/mrpubsub/testplan.txt
@@ -0,0 +1,2 @@
+# Place the suites in run order.
+dmaap/mrpubsub
diff --git a/test/csit/plans/multicloud-ocata/functionality1/setup.sh b/test/csit/plans/multicloud-ocata/functionality1/setup.sh
index 5630849cf..75411781e 100644
--- a/test/csit/plans/multicloud-ocata/functionality1/setup.sh
+++ b/test/csit/plans/multicloud-ocata/functionality1/setup.sh
@@ -20,7 +20,7 @@ source ${SCRIPTS}/common_functions.sh
# start multicloud-ocata
docker run -d --name multicloud-ocata nexus3.onap.org:10001/onap/multicloud/openstack-ocata
SERVICE_IP=`get-instance-ip.sh multicloud-ocata`
-SERVICE_PORT=9004
+SERVICE_PORT=9006
for i in {1..50}; do
curl -sS ${SERVICE_IP}:${SERVICE_PORT} && break
diff --git a/test/csit/plans/sdc/healthCheck/setup.sh b/test/csit/plans/sdc/healthCheck/setup.sh
new file mode 100644
index 000000000..f247be656
--- /dev/null
+++ b/test/csit/plans/sdc/healthCheck/setup.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+# Place the scripts in run order:
+
+
+source ${WORKSPACE}/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh
+
+source ${WORKSPACE}/test/csit/scripts/sdc/start_sdc_containers.sh
+
+
+BE_IP=`get-instance-ip.sh sdc-BE`
+echo BE_IP=${BE_IP}
+
+
+# Pass any variables required by Robot test suites in ROBOT_VARIABLES
+ROBOT_VARIABLES="-v BE_IP:${BE_IP}"
+
diff --git a/test/csit/plans/sdc/healthCheck/teardown.sh b/test/csit/plans/sdc/healthCheck/teardown.sh
new file mode 100644
index 000000000..a5f69819e
--- /dev/null
+++ b/test/csit/plans/sdc/healthCheck/teardown.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+
+source ${WORKSPACE}/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh
+
+# $WORKSPACE/archives/clamp-clone deleted with archives folder when tests starts so we keep it at the end for debugging
diff --git a/test/csit/plans/sdc/healthCheck/testplan.txt b/test/csit/plans/sdc/healthCheck/testplan.txt
new file mode 100644
index 000000000..2b2db1ede
--- /dev/null
+++ b/test/csit/plans/sdc/healthCheck/testplan.txt
@@ -0,0 +1,3 @@
+# Test suites are relative paths under [integration.git]/test/csit/tests/.
+# Place the suites in run order.
+sdc/healthCheck
diff --git a/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh b/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh
index 382cb7a8d..64fa5a4d0 100644
--- a/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh
+++ b/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh
@@ -64,4 +64,4 @@ for i in {1..10}; do
done
# Pass any variables required by Robot test suites in ROBOT_VARIABLES
-ROBOT_VARIABLES="-v MSB_IAG_IP:${MSB_IAG_IP} -v ZTEVMANAGERDRIVER_IP:${ZTEVMANAGERDRIVER_IP} -v MSB_IP:${MSB_IAG_IP} -v SERVICE_IP:${SERVICE_IP}"
+ROBOT_VARIABLES="-v MSB_IAG_IP:${MSB_IAG_IP} -v ZTEVMANAGERDRIVER_IP:${ZTEVMANAGERDRIVER_IP} -v MSB_IP:${MSB_IAG_IP} -v SERVICE_IP:${SERVICE_IP} -v SCRIPTS:${SCRIPTS}"
diff --git a/test/csit/run-csit.sh b/test/csit/run-csit.sh
index f499d8ed1..3070239b7 100755
--- a/test/csit/run-csit.sh
+++ b/test/csit/run-csit.sh
@@ -105,7 +105,7 @@ pip install --upgrade ${ROBOT_VENV}/src/onap/testsuite/python-testing-utils
# install chrome driver
if [ ! -x ${ROBOT_VENV}/bin/chromedriver ]; then
pushd ${ROBOT_VENV}/bin
- wget -N http://chromedriver.storage.googleapis.com/2.32/chromedriver_linux64.zip
+ wget -N http://chromedriver.storage.googleapis.com/2.27/chromedriver_linux64.zip
unzip chromedriver_linux64.zip
chmod +x chromedriver
popd
diff --git a/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh b/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh
new file mode 100644
index 000000000..da421e4cf
--- /dev/null
+++ b/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP CLAMP
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+
+echo "This is ${WORKSPACE}/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh"
+
+# Clone sdc enviroment template
+mkdir -p ${WORKSPACE}/data/environments/
+mkdir -p ${WORKSPACE}/data/clone/
+
+cd ${WORKSPACE}/data/clone
+git clone --depth 1 http://gerrit.onap.org/r/sdc -b master
+
+
+# set enviroment variables
+
+ENV_NAME=CSIT
+MR_IP_ADDR=10.0.0.1
+
+if [ -e /opt/config/public_ip.txt ]
+ then
+ IP_ADDRESS=$(cat /opt/config/public_ip.txt)
+ else
+ IP_ADDRESS=$(ifconfig eth0 | grep "inet addr" | tr -s ' ' | cut -d' ' -f3 | cut -d':' -f2)
+ fi
+
+ cat ${WORKSPACE}/data/clone/sdc/sdc-os-chef/environments/Template.json | sed "s/yyy/"$IP_ADDRESS"/g" > ${WORKSPACE}/data/environments/$ENV_NAME.json
+ sed -i "s/xxx/"$ENV_NAME"/g" ${WORKSPACE}/data/environments/$ENV_NAME.json
+ sed -i "s/\"ueb_url_list\":.*/\"ueb_url_list\": \""$MR_IP_ADDR","$MR_IP_ADDR"\",/g" ${WORKSPACE}/data/environments/$ENV_NAME.json
+ sed -i "s/\"fqdn\":.*/\"fqdn\": [\""$MR_IP_ADDR"\", \""$MR_IP_ADDR"\"]/g" ${WORKSPACE}/data/environments/$ENV_NAME.json
+
+
diff --git a/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh b/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh
new file mode 100644
index 000000000..e03284248
--- /dev/null
+++ b/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+#
+
+echo "This is ${WORKSPACE}/test/csit/scripts/sdc/kill_and_remove_dataFolder.sh"
+
+#kill and remove all sdc dockers
+docker stop $(docker ps -a -q --filter="name=sdc")
+docker rm $(docker ps -a -q --filter="name=sdc")
+
+
+#delete data folder
+
+rm -rf ${WORKSPACE}/data/*
+
+
diff --git a/test/csit/scripts/sdc/start_sdc_containers.sh b/test/csit/scripts/sdc/start_sdc_containers.sh
new file mode 100644
index 000000000..31105acb0
--- /dev/null
+++ b/test/csit/scripts/sdc/start_sdc_containers.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP CLAMP
+# ================================================================================
+# Copyright (C) 2017 AT&T Intellectual Property. All rights
+# reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END============================================
+# ===================================================================
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+
+echo "This is ${WORKSPACE}/test/csit/scripts/sdc/start_sdc_containers.sh"
+
+
+RELEASE=latest
+LOCAL=false
+SKIPTESTS=false
+DEP_ENV=CSIT
+#[ -f /opt/config/nexus_username.txt ] && NEXUS_USERNAME=$(cat /opt/config/nexus_username.txt) || NEXUS_USERNAME=release
+#[ -f /opt/config/nexus_password.txt ] && NEXUS_PASSWD=$(cat /opt/config/nexus_password.txt) || NEXUS_PASSWD=sfWU3DFVdBr7GVxB85mTYgAW
+#[ -f /opt/config/nexus_docker_repo.txt ] && NEXUS_DOCKER_REPO=$(cat /opt/config/nexus_docker_repo.txt) || NEXUS_DOCKER_REPO=ecomp-nexus:${PORT}
+#[ -f /opt/config/nexus_username.txt ] && docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
+export IP=`ifconfig eth0 | awk -F: '/inet addr/ {gsub(/ .*/,"",$2); print $2}'`
+#export PREFIX=${NEXUS_DOCKER_REPO}'/openecomp'
+export PREFIX='nexus3.onap.org:10001/openecomp'
+
+#start Elastic-Search
+docker run --detach --name sdc-es --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --memory 1g --memory-swap=1g --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro -e ES_HEAP_SIZE=1024M --volume ${WORKSPACE}/data/ES:/usr/share/elasticsearch/data --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9200:9200 --publish 9300:9300 ${PREFIX}/sdc-elasticsearch:${RELEASE}
+
+#start cassandra
+docker run --detach --name sdc-cs --env RELEASE="${RELEASE}" --env ENVNAME="${DEP_ENV}" --env HOST_IP=${IP} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9042:9042 --publish 9160:9160 ${PREFIX}/sdc-cassandra:${RELEASE}
+
+echo "please wait while CS is starting..."
+echo ""
+c=120 # seconds to wait
+REWRITE="\e[25D\e[1A\e[K"
+while [ $c -gt 0 ]; do
+ c=$((c-1))
+ sleep 1
+ echo -e "${REWRITE}$c"
+done
+echo -e ""
+
+
+#start kibana
+docker run --detach --name sdc-kbn --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --memory 2g --memory-swap=2g --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 5601:5601 ${PREFIX}/sdc-kibana:${RELEASE}
+
+#start sdc-backend
+docker run --detach --name sdc-BE --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env http_proxy=${http_proxy} --env https_proxy=${https_proxy} --env no_proxy=${no_proxy} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --memory 4g --memory-swap=4g --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/logs/BE/:/var/lib/jetty/logs --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 8443:8443 --publish 8080:8080 ${PREFIX}/sdc-backend:${RELEASE}
+
+echo "please wait while BE is starting..."
+echo ""
+c=120 # seconds to wait
+REWRITE="\e[45D\e[1A\e[K"
+while [ $c -gt 0 ]; do
+ c=$((c-1))
+ sleep 1
+ echo -e "${REWRITE}$c"
+done
+echo -e ""
+
+#start Front-End
+docker run --detach --name sdc-FE --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env http_proxy=${http_proxy} --env https_proxy=${https_proxy} --env no_proxy=${no_proxy} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --memory 2g --memory-swap=2g --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/logs/FE/:/var/lib/jetty/logs --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9443:9443 --publish 8181:8181 ${PREFIX}/sdc-frontend:${RELEASE}
+
+echo "please wait while FE is starting..."
+echo ""
+c=120 # seconds to wait
+REWRITE="\e[45D\e[1A\e[K"
+while [ $c -gt 0 ]; do
+ c=$((c-1))
+ sleep 1
+ echo -e "${REWRITE}$c"
+done
+echo -e ""
+
+
+
+
+#TIME=0
+#while [ "$TIME" -lt "$TIME_OUT" ]; do
+# response=$(curl --write-out '%{http_code}' --silent --output /dev/null http://localhost:8080/restservices/clds/v1/clds/healthcheck); echo $response
+
+# if [ "$response" == "200" ]; then
+# echo Clamp and its database well started in $TIME seconds
+# break;
+# fi
+
+# echo Sleep: $INTERVAL seconds before testing if Clamp is up. Total wait time up now is: $TIME seconds. Timeout is: $TIME_OUT seconds
+# sleep $INTERVAL
+# TIME=$(($TIME+$INTERVAL))
+#done
+
+#if [ "$TIME" -ge "$TIME_OUT" ]; then
+# echo TIME OUT: Docker containers not started in $TIME_OUT seconds... Could cause problems for tests...
+#fi
diff --git a/test/csit/tests/dmaap/mrpubsub/mrpubsub.robot b/test/csit/tests/dmaap/mrpubsub/mrpubsub.robot
new file mode 100755
index 000000000..c711bd754
--- /dev/null
+++ b/test/csit/tests/dmaap/mrpubsub/mrpubsub.robot
@@ -0,0 +1,73 @@
+*** Settings ***
+Library OperatingSystem
+Library RequestsLibrary
+Library requests
+Library Collections
+Library String
+
+*** Variables ***
+${TARGETURL_PUBLISH} http://${DMAAP_MR_IP}:3904/events/TestTopic1
+${TARGETURL_TOPICS} http://${DMAAP_MR_IP}:3904/topics
+${TARGETURL_SUBSCR} http://${DMAAP_MR_IP}:3904/events/TestTopic1/CG1/C1?timeout=1000
+${TEST_DATA} {"topicName": "TestTopic1"}
+${TOPIC_DATA} {"topicName":"FirstTopic","topicDescription":"This is a TestTopic","partitionCount":"1","replicationCount":"3","transactionEnabled":"true"}
+
+*** Test Cases ***
+Run Topic Creation and Publish
+ [Documentation] Topic Creation
+ [Timeout] 1 minute
+ ${resp}= PostCall ${TARGETURL_PUBLISH} ${TEST_DATA}
+ log ${TARGETURL_PUBLISH}
+ log ${resp.text}
+ Should Be Equal As Strings ${resp.status_code} 200
+ ${count}= Evaluate $resp.json().get('count')
+ log 'JSON Response Code:'${resp}
+
+Run Subscribing a message status
+ [Documentation] Subscribide message status
+ [Timeout] 1 minute
+ ${resp}= GetCall ${TARGETURL_SUBSCR}
+ log ${TARGETURL_SUBSCR}
+ Should Be Equal As Strings ${resp.status_code} 200
+ log 'JSON Response Code :'${resp}
+
+Run check topics are exisiting
+ [Documentation] Get the count of the Topics
+ [Timeout] 1 minute
+ ${resp}= GetCall ${TARGETURL_TOPICS}
+ log ${TARGETURL_TOPICS}
+ Should Be Equal As Strings ${resp.status_code} 200
+ log 'JSON Response Code :'${resp}
+ ${topics}= Evaluate $resp.json().get('topics')
+ log ${topics}
+ ${ListLength}= Get Length ${topics}
+ log ${ListLength}
+ List Should Contain Value ${topics} TestTopic1
+
+Run Publich and Subscribe a message
+ [Documentation] Publish and Subscribe the message
+ [Timeout] 1 minute
+ ${resp}= PostCall ${TARGETURL_PUBLISH} ${TEST_DATA}
+ log ${TARGETURL_PUBLISH}
+ log ${resp.text}
+ Should Be Equal As Strings ${resp.status_code} 200
+ ${sub_resp}= GetCall ${TARGETURL_SUBSCR}
+ log ${TARGETURL_SUBSCR}
+ Should Be Equal As Strings ${sub_resp.status_code} 200
+ log 'JSON Response Code :'${sub_resp}
+ ${ListLength}= Get Length ${sub_resp.json()}
+ log ${ListLength}
+ List Should Contain Value ${sub_resp.json()} {"topicName":"TestTopic1"} case_insensitive=yes
+
+*** Keywords ***
+PostCall
+ [Arguments] ${url} ${data}
+ ${headers}= Create Dictionary Accept=application/json Content-Type=application/json
+ ${resp}= Evaluate requests.post('${url}',data='${data}', headers=${headers},verify=False) requests
+ [Return] ${resp}
+
+GetCall
+ [Arguments] ${url}
+ ${headers}= Create Dictionary Accept=application/json Content-Type=application/json
+ ${resp}= Evaluate requests.get('${url}', headers=${headers}, verify=False) requests
+ [Return] ${resp}
diff --git a/test/csit/tests/holmes/testcase/CommonKeywords/HttpRequest.robot b/test/csit/tests/holmes/testcase/CommonKeywords/HttpRequest.robot
index 5b8417404..45bec5ef5 100644
--- a/test/csit/tests/holmes/testcase/CommonKeywords/HttpRequest.robot
+++ b/test/csit/tests/holmes/testcase/CommonKeywords/HttpRequest.robot
@@ -30,8 +30,8 @@ httpPost
[Return] ${postResponse}
httpDelete
- [Arguments] ${restHost} ${restUrl} ${data}
+ [Arguments] ${restHost} ${restUrl}
${headers} create dictionary Content-Type=application/json Accept=application/json
create session microservices ${restHost} ${headers}
- ${deleteResponse} delete request microservices ${restUrl} ${data}
+ ${deleteResponse} delete request microservices ${restUrl}
[Return] ${deleteResponse}
diff --git a/test/csit/tests/holmes/testcase/RuleMgt/Rule-Keywords.robot b/test/csit/tests/holmes/testcase/RuleMgt/Rule-Keywords.robot
index 03f840904..182737f54 100644
--- a/test/csit/tests/holmes/testcase/RuleMgt/Rule-Keywords.robot
+++ b/test/csit/tests/holmes/testcase/RuleMgt/Rule-Keywords.robot
@@ -72,8 +72,8 @@ modifyRule
[Return] ${response}
deleteRule
- [Arguments] ${jsonParam} ${codeFlag}=1
- ${response} httpDelete ${ruleMgtHost} ${ruleMgtUrl} ${jsonParam}
+ [Arguments] ${ruleId} ${codeFlag}=1
+ ${response} httpDelete ${ruleMgtHost} ${ruleMgtUrl}/${ruleId}
log ${response.content}
run keyword if ${codeFlag}==1 Should be equal as strings ${response.status_code} 200
run keyword if ${codeFlag}!=1 Should be equal as strings ${response.status_code} 499
diff --git a/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot b/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot
index 03ee70849..ad2a540fd 100644
--- a/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot
+++ b/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot
@@ -120,8 +120,8 @@ modify_rule_with_description
delete_existing_rule
[Documentation] Delete an existing rule.
should not be empty ${RULEID}
- deleteRule {"ruleid":"${RULEID}"}
+ deleteRule ${RULEID}
delete_non_existing_rule
[Documentation] Delete a non-existing rule.
- deleteRule {"ruleid":"${RULEID}"} -1
+ deleteRule ${RULEID} -1
diff --git a/test/csit/tests/sdc/healthCheck/__init__.robot b/test/csit/tests/sdc/healthCheck/__init__.robot
new file mode 100644
index 000000000..8ee10d5f6
--- /dev/null
+++ b/test/csit/tests/sdc/healthCheck/__init__.robot
@@ -0,0 +1,2 @@
+*** Settings ***
+Documentation Sdc - HealthCheck
diff --git a/test/csit/tests/sdc/healthCheck/test1.robot b/test/csit/tests/sdc/healthCheck/test1.robot
new file mode 100644
index 000000000..6d4dc242d
--- /dev/null
+++ b/test/csit/tests/sdc/healthCheck/test1.robot
@@ -0,0 +1,16 @@
+*** Settings ***
+Library Collections
+Library OperatingSystem
+Library RequestsLibrary
+Library json
+
+*** Test Cases ***
+Get Requests health check ok
+ [Tags] get
+ CreateSession sdc-be http://localhost:8080
+ ${headers}= Create Dictionary Accept=application/json Content-Type=application/json
+ ${resp}= Get Request sdc-be /sdc2/rest/healthCheck headers=&{headers}
+ Should Be Equal As Strings ${resp.status_code} 200
+ @{ITEMS}= Copy List ${resp.json()['componentsInfo']}
+ : FOR ${ELEMENT} IN @{ITEMS}
+ \ Log ${ELEMENT['healthCheckComponent']} ${ELEMENT['healthCheckStatus']}
diff --git a/test/csit/tests/vfc/nfvo-driver-svnfm/huawei.robot b/test/csit/tests/vfc/nfvo-driver-svnfm/huawei.robot
index b1aea584a..e0679fbd8 100644
--- a/test/csit/tests/vfc/nfvo-driver-svnfm/huawei.robot
+++ b/test/csit/tests/vfc/nfvo-driver-svnfm/huawei.robot
@@ -1,4 +1,5 @@
*** settings ***
+Resource ../../common.robot
Library Collections
Library RequestsLibrary
Library simplejson
@@ -9,7 +10,7 @@ Library HttpLibrary.HTTP
*** Variables ***
@{return_ok_list}= 200 201 202 204
${queryswagger_url} /api/hwvnfm/v1/swagger.json
-${createauthtoken_url} /rest/vnfmmed/v2/auth/tokens
+${createauthtoken_url} /rest/plat/smapp/v1/oauth/token
#json files
${hwvnfm_createtoken_json} ${SCRIPTS}/../tests/vfc/nfvo-driver-svnfm/jsoninput/hwvnfm_createtoken.json
@@ -18,7 +19,7 @@ ${hwvnfm_createtoken_json} ${SCRIPTS}/../tests/vfc/nfvo-driver-svnfm/jsoninpu
SwaggerFuncTest
[Documentation] query swagger info rest test
${headers} Create Dictionary Content-Type=application/json Accept=application/json
- Create Session web_session http://${MSB_IP}:80 headers=${headers}
+ Create Session web_session http://${SERVICE_IP}:8482 headers=${headers}
${resp}= Get Request web_session ${queryswagger_url}
${responese_code}= Convert To String ${resp.status_code}
List Should Contain Value ${return_ok_list} ${responese_code}
@@ -31,10 +32,8 @@ AuthTokenFuncTest
${json_value}= json_from_file ${hwvnfm_createtoken_json}
${json_string}= string_from_json ${json_value}
${headers} Create Dictionary Content-Type=application/json Accept=application/json
- Create Session web_session http://${MSB_IP}:80 headers=${headers}
+ Create Session web_session http://${SERVICE_IP}:8482 headers=${headers}
Set Request Body ${json_string}
- ${resp}= Post Request web_session ${createauthtoken_url} ${json_string}
+ ${resp}= Put Request web_session ${createauthtoken_url} ${json_string}
${responese_code}= Convert To String ${resp.status_code}
- List Should Contain Value ${return_ok_list} ${responese_code}
- ${response_json} json.loads ${resp.content}
- Dictionary Should Contain Key ${response_json} token \ No newline at end of file
+ List Should Contain Value ${return_ok_list} ${responese_code} \ No newline at end of file