aboutsummaryrefslogtreecommitdiffstats
path: root/test/csit/plans
diff options
context:
space:
mode:
Diffstat (limited to 'test/csit/plans')
-rw-r--r--test/csit/plans/aaf/aafapi/setup.sh22
-rwxr-xr-xtest/csit/plans/aaf/sms-test-plan/setup.sh71
-rw-r--r--test/csit/plans/aaf/sms-test-plan/teardown.sh25
-rw-r--r--test/csit/plans/aaf/sms-test-plan/testplan.txt3
-rw-r--r--test/csit/plans/aai/resources/aai-resources/appconfig/janusgraph-cached.properties (renamed from test/csit/plans/aai/resources/aai-resources/appconfig/titan-cached.properties)6
-rw-r--r--test/csit/plans/aai/resources/aai-resources/appconfig/janusgraph-realtime.properties (renamed from test/csit/plans/aai/traversal/aai-resources/appconfig/titan-realtime.properties)6
-rw-r--r--test/csit/plans/aai/resources/aai-traversal/appconfig/janusgraph-cached.properties (renamed from test/csit/plans/aai/traversal/aai-resources/appconfig/titan-cached.properties)6
-rw-r--r--test/csit/plans/aai/resources/aai-traversal/appconfig/janusgraph-realtime.properties (renamed from test/csit/plans/aai/resources/aai-resources/appconfig/titan-realtime.properties)6
-rw-r--r--test/csit/plans/aai/resources/docker-compose.yml27
-rw-r--r--test/csit/plans/aai/resources/setup.sh8
-rw-r--r--test/csit/plans/aai/traversal/aai-resources/appconfig/janusgraph-cached.properties (renamed from test/csit/plans/aai/resources/aai-traversal/appconfig/titan-cached.properties)6
-rw-r--r--test/csit/plans/aai/traversal/aai-resources/appconfig/janusgraph-realtime.properties (renamed from test/csit/plans/aai/traversal/aai-traversal/appconfig/titan-realtime.properties)6
-rw-r--r--test/csit/plans/aai/traversal/aai-traversal/appconfig/janusgraph-cached.properties (renamed from test/csit/plans/aai/traversal/aai-traversal/appconfig/titan-cached.properties)6
-rw-r--r--test/csit/plans/aai/traversal/aai-traversal/appconfig/janusgraph-realtime.properties (renamed from test/csit/plans/aai/resources/aai-traversal/appconfig/titan-realtime.properties)6
-rw-r--r--test/csit/plans/aai/traversal/docker-compose.yml27
-rw-r--r--test/csit/plans/aai/traversal/setup.sh10
-rwxr-xr-xtest/csit/plans/dmaap/mrpubsub/setup.sh2
-rw-r--r--test/csit/plans/holmes-rule-management/sanity-check/setup.sh6
-rw-r--r--test/csit/plans/multicloud-vmware/functionality1/testplan.txt4
-rwxr-xr-xtest/csit/plans/multicloud/functionality1/setup.sh4
-rwxr-xr-xtest/csit/plans/multicloud/functionality1/teardown.sh1
-rw-r--r--test/csit/plans/music/music-distributed-kv-store-test-plan/setup.sh60
-rw-r--r--test/csit/plans/music/music-distributed-kv-store-test-plan/teardown.sh21
-rw-r--r--test/csit/plans/music/music-distributed-kv-store-test-plan/testplan.txt3
-rwxr-xr-xtest/csit/plans/music/music-test-plan/setup.sh30
-rw-r--r--test/csit/plans/portal-sdk/testsuite/.env29
-rw-r--r--test/csit/plans/portal-sdk/testsuite/docker-compose.yml127
-rw-r--r--test/csit/plans/portal-sdk/testsuite/setup.sh24
-rw-r--r--test/csit/plans/portal/testsuite/.env29
-rw-r--r--test/csit/plans/portal/testsuite/docker-compose.yml127
-rw-r--r--test/csit/plans/portal/testsuite/setup.sh26
-rw-r--r--test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/setup.sh44
-rw-r--r--test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/teardown.sh22
-rw-r--r--test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/testplan.txt3
-rw-r--r--test/csit/plans/vfc-nfvo-wfengine/sanity-check/testplan.txt6
-rw-r--r--test/csit/plans/vnfsdk-ice/sanity-check/setup.sh2
36 files changed, 625 insertions, 186 deletions
diff --git a/test/csit/plans/aaf/aafapi/setup.sh b/test/csit/plans/aaf/aafapi/setup.sh
index bfaff925c..4a312704f 100644
--- a/test/csit/plans/aaf/aafapi/setup.sh
+++ b/test/csit/plans/aaf/aafapi/setup.sh
@@ -30,24 +30,24 @@ cd $WORKSPACE/archives/aafcsit
#unset http_proxy https_proxy
git clone --depth 1 http://gerrit.onap.org/r/aaf/authz -b master
git pull
-cd $WORKSPACE/archives/aafcsit/authz/authz-service/src/main/resources/docker-compose
+cd $WORKSPACE/archives/aafcsit/authz/auth/auth-service/src/main/resources/docker-compose
pwd
-chmod -R 777 $WORKSPACE/archives/aafcsit/authz/authz-service/src/main/resources/docker-compose
+chmod -R 777 $WORKSPACE/archives/aafcsit/authz/auth/auth-service/src/main/resources/docker-compose
# start aaf containers with docker compose and configuration from docker-compose.yml
docker-compose up -d
# Wait for initialization of Docker contaienr for AAF & Cassandra
-for i in {1..50}; do
+for i in {1..12}; do
if [ $(docker inspect --format '{{ .State.Running }}' dockercompose_aaf_container_1) ] && \
[ $(docker inspect --format '{{ .State.Running }}' dockercompose_cassandra_container_1) ] && \
- [ $(docker inspect --format '{{ .State.Running }}' dockercompose_aaf_container_1) ]
+ [ $(docker inspect --format '{{ .State.Running }}' dockercompose_aaf_container_1) ]
then
- echo "AAF Service Running"
- break
- else
- echo sleep $i
+ echo "AAF Service Running"
+ break
+ else
+ echo sleep $i
sleep $i
fi
done
@@ -62,11 +62,11 @@ echo CASSANDRA_IP=${CASSANDRA_IP}
# Wait for initialization of docker services
-for i in {1..50}; do
- curl -sS -m 1 ${AAF_IP}:8101 && break
+for i in {1..12}; do
+ curl -sS -m 1 ${AAF_IP}:8101 && break
echo sleep $i
sleep $i
done
#Pass any variables required by Robot test suites in ROBOT_VARIABLES
-ROBOT_VARIABLES="-v AAF_IP:${AAF_IP}"
+ROBOT_VARIABLES="-v AAF_IP:${AAF_IP}"
diff --git a/test/csit/plans/aaf/sms-test-plan/setup.sh b/test/csit/plans/aaf/sms-test-plan/setup.sh
new file mode 100755
index 000000000..9f77b698e
--- /dev/null
+++ b/test/csit/plans/aaf/sms-test-plan/setup.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+#
+# Copyright 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Not sure why this is needed.
+source ${SCRIPTS}/common_functions.sh
+
+CONFIG_FILE=$(pwd)/config/smsconfig.json
+
+mkdir -p $(pwd)/config
+
+docker login -u docker -p docker nexus3.onap.org:10001
+docker pull nexus3.onap.org:10001/onap/aaf/sms
+docker pull docker.io/vault:0.9.5
+
+#
+# Running vault in dev server mode here for CSIT
+# In HELM it runs in production mode
+#
+docker run -e "VAULT_DEV_ROOT_TOKEN_ID=aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee" \
+ -e SKIP_SETCAP=true \
+ --name vault -d -p 8200:8200 vault:0.9.5
+
+SMSDB_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' vault)
+cat << EOF > $CONFIG_FILE
+{
+ "cafile": "auth/selfsignedca.pem",
+ "servercert": "auth/server.cert",
+ "serverkey": "auth/server.key",
+
+ "smsdbaddress": "http://$SMSDB_IP:8200",
+ "vaulttoken": "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee",
+ "disable_tls": true
+}
+EOF
+
+cat $CONFIG_FILE
+
+docker run --workdir /sms -v $CONFIG_FILE:/sms/smsconfig.json \
+ --name sms -d -p 10443:10443 nexus3.onap.org:10001/onap/aaf/sms
+
+SMS_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' sms)
+
+echo "###### WAITING FOR ALL CONTAINERS TO COME UP"
+sleep 20
+for i in {1..20}; do
+ curl -sS -m 1 http://${SMSDB_IP}:8200/v1/sys/seal-status && break
+ echo sleep $i
+ sleep $i
+done
+
+#
+# add here all ROBOT_VARIABLES settings
+#
+echo "# sms robot variables settings";
+ROBOT_VARIABLES="-v SMS_HOSTNAME:http://${SMS_IP} -v SMS_PORT:10443"
+
+echo ${ROBOT_VARIABLES}
diff --git a/test/csit/plans/aaf/sms-test-plan/teardown.sh b/test/csit/plans/aaf/sms-test-plan/teardown.sh
new file mode 100644
index 000000000..d6fa32924
--- /dev/null
+++ b/test/csit/plans/aaf/sms-test-plan/teardown.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+docker cp sms:/sms/sms.log .
+cat sms.log
+rm sms.log
+rm -rf config
+docker stop sms vault
+docker rm sms vault
+docker rmi nexus3.onap.org:10001/onap/aaf/sms
+docker rmi docker.io/vault:0.9.5
diff --git a/test/csit/plans/aaf/sms-test-plan/testplan.txt b/test/csit/plans/aaf/sms-test-plan/testplan.txt
new file mode 100644
index 000000000..c2b3b7b1b
--- /dev/null
+++ b/test/csit/plans/aaf/sms-test-plan/testplan.txt
@@ -0,0 +1,3 @@
+# Test suites are relative paths under [integration.git]/test/csit/tests/.
+# Place the suites in run order.
+aaf/aaf-sms-suite \ No newline at end of file
diff --git a/test/csit/plans/aai/resources/aai-resources/appconfig/titan-cached.properties b/test/csit/plans/aai/resources/aai-resources/appconfig/janusgraph-cached.properties
index 97bb81863..c2110f77c 100644
--- a/test/csit/plans/aai/resources/aai-resources/appconfig/titan-cached.properties
+++ b/test/csit/plans/aai/resources/aai-resources/appconfig/janusgraph-cached.properties
@@ -21,10 +21,14 @@
#
query.fast-property=true
+query.smart-limit=false
+
# the following parameters are not reloaded automatically and require a manual bounce
-storage.backend=hbase
+storage.backend=cassandra
storage.hostname=aai.hbase.simpledemo.onap.org
+storage.cassandra.keyspace=aaigraph
+
#schema.default=none
storage.lock.wait-time=300
storage.hbase.table=aaigraph-dev1.dev
diff --git a/test/csit/plans/aai/traversal/aai-resources/appconfig/titan-realtime.properties b/test/csit/plans/aai/resources/aai-resources/appconfig/janusgraph-realtime.properties
index 0c97b753a..4791431a1 100644
--- a/test/csit/plans/aai/traversal/aai-resources/appconfig/titan-realtime.properties
+++ b/test/csit/plans/aai/resources/aai-resources/appconfig/janusgraph-realtime.properties
@@ -21,10 +21,14 @@
#
query.fast-property=true
+query.smart-limit=false
+
# the following parameters are not reloaded automatically and require a manual bounce
-storage.backend=hbase
+storage.backend=cassandra
storage.hostname=aai.hbase.simpledemo.onap.org
+storage.cassandra.keyspace=aaigraph
+
#schema.default=none
storage.lock.wait-time=300
storage.hbase.table=aaigraph-dev1.dev
diff --git a/test/csit/plans/aai/traversal/aai-resources/appconfig/titan-cached.properties b/test/csit/plans/aai/resources/aai-traversal/appconfig/janusgraph-cached.properties
index 97bb81863..c2110f77c 100644
--- a/test/csit/plans/aai/traversal/aai-resources/appconfig/titan-cached.properties
+++ b/test/csit/plans/aai/resources/aai-traversal/appconfig/janusgraph-cached.properties
@@ -21,10 +21,14 @@
#
query.fast-property=true
+query.smart-limit=false
+
# the following parameters are not reloaded automatically and require a manual bounce
-storage.backend=hbase
+storage.backend=cassandra
storage.hostname=aai.hbase.simpledemo.onap.org
+storage.cassandra.keyspace=aaigraph
+
#schema.default=none
storage.lock.wait-time=300
storage.hbase.table=aaigraph-dev1.dev
diff --git a/test/csit/plans/aai/resources/aai-resources/appconfig/titan-realtime.properties b/test/csit/plans/aai/resources/aai-traversal/appconfig/janusgraph-realtime.properties
index 0c97b753a..4791431a1 100644
--- a/test/csit/plans/aai/resources/aai-resources/appconfig/titan-realtime.properties
+++ b/test/csit/plans/aai/resources/aai-traversal/appconfig/janusgraph-realtime.properties
@@ -21,10 +21,14 @@
#
query.fast-property=true
+query.smart-limit=false
+
# the following parameters are not reloaded automatically and require a manual bounce
-storage.backend=hbase
+storage.backend=cassandra
storage.hostname=aai.hbase.simpledemo.onap.org
+storage.cassandra.keyspace=aaigraph
+
#schema.default=none
storage.lock.wait-time=300
storage.hbase.table=aaigraph-dev1.dev
diff --git a/test/csit/plans/aai/resources/docker-compose.yml b/test/csit/plans/aai/resources/docker-compose.yml
index 3f465c3ec..6f62e6ef2 100644
--- a/test/csit/plans/aai/resources/docker-compose.yml
+++ b/test/csit/plans/aai/resources/docker-compose.yml
@@ -6,11 +6,12 @@ services:
environment:
- LOCAL_USER_ID=${USER_ID}
- LOCAL_GROUP_ID=${GROUP_ID}
+ - SKIP_CREATE_DB_SCHEMA_AT_STARTUP=true
ports:
- 8447:8447
volumes:
- - ${CURRENT_PWD}/aai-resources/appconfig/titan-realtime.properties:/opt/app/aai-resources/resources/etc/appprops/titan-realtime.properties
- - ${CURRENT_PWD}/aai-resources/appconfig/titan-cached.properties:/opt/app/aai-resources/resources/etc/appprops/titan-cached.properties
+ - ${CURRENT_PWD}/aai-resources/appconfig/janusgraph-realtime.properties:/opt/app/aai-resources/resources/etc/appprops/janusgraph-realtime.properties
+ - ${CURRENT_PWD}/aai-resources/appconfig/janusgraph-cached.properties:/opt/app/aai-resources/resources/etc/appprops/janusgraph-cached.properties
- ${CURRENT_PWD}/aai-resources/appconfig/aaiconfig.properties:/opt/app/aai-resources/resources/etc/appprops/aaiconfig.properties
- ${CURRENT_PWD}/aai-resources/appconfig/application.properties:/opt/app/aai-resources/resources/application.properties
- ${CURRENT_PWD}/aai-resources/appconfig/logback.xml:/opt/app/aai-resources/resources/logback.xml
@@ -30,8 +31,8 @@ services:
- LOCAL_GROUP_ID=${GROUP_ID}
- DISABLE_UPDATE_QUERY=true
volumes:
- - ${CURRENT_PWD}/aai-traversal/appconfig/titan-realtime.properties:/opt/app/aai-traversal/resources/etc/appprops/titan-realtime.properties
- - ${CURRENT_PWD}/aai-traversal/appconfig/titan-cached.properties:/opt/app/aai-traversal/resources/etc/appprops/titan-cached.properties
+ - ${CURRENT_PWD}/aai-traversal/appconfig/janusgraph-realtime.properties:/opt/app/aai-traversal/resources/etc/appprops/janusgraph-realtime.properties
+ - ${CURRENT_PWD}/aai-traversal/appconfig/janusgraph-cached.properties:/opt/app/aai-traversal/resources/etc/appprops/janusgraph-cached.properties
- ${CURRENT_PWD}/aai-traversal/appconfig/aaiconfig.properties:/opt/app/aai-traversal/resources/etc/appprops/aaiconfig.properties
- ${CURRENT_PWD}/aai-traversal/appconfig/application.properties:/opt/app/aai-traversal/resources/application.properties
- ${CURRENT_PWD}/aai-traversal/appconfig/logback.xml:/opt/app/aai-traversal/resources/logback.xml
@@ -61,16 +62,18 @@ services:
max-size: "30m"
max-file: "5"
aai.hbase.simpledemo.onap.org:
- image: ${HBASE_IMAGE}:${HBASE_VERSION}
+ image: cassandra:2.1
hostname: aai.hbase.simpledemo.onap.org
ports:
- - 2181:2181
- - 8080:8080
- - 8085:8085
- - 9090:9090
- - 16000:16000
- - 16010:16010
- - 16201:16201
+ - 7000:7000
+ - 7001:7001
+ - 7199:7199
+ - 9042:9042
+ environment:
+ - CASSANDRA_SEEDS=aai.hbase.simpledemo.onap.org
+ - CASSANDRA_DC=Heat
+ - CASSANDRA_RACK=Rack1
+ - CASSANDRA_AUTO_BOOTSTRAP=true
logging:
driver: "json-file"
options:
diff --git a/test/csit/plans/aai/resources/setup.sh b/test/csit/plans/aai/resources/setup.sh
index 14b567108..bd5cb5214 100644
--- a/test/csit/plans/aai/resources/setup.sh
+++ b/test/csit/plans/aai/resources/setup.sh
@@ -73,10 +73,8 @@ ${DOCKER_COMPOSE_CMD} stop
${DOCKER_COMPOSE_CMD} rm -f -v
# Start the hbase where the data will be stored
-HBASE_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai.hbase.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
-wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:8085';
-wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:8080';
-wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:9095';
+CASSANDRA_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai.hbase.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
+wait_for_container $CASSANDRA_CONTAINER_NAME 'Listening for thrift clients';
USER_EXISTS=$(check_if_user_exists aaiadmin);
@@ -102,6 +100,8 @@ else
export USER_ID=$(id -u aaiadmin);
fi;
+$DOCKER_COMPOSE_CMD run --rm aai-resources.api.simpledemo.onap.org createDBSchema.sh
+
RESOURCES_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai-resources.api.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
wait_for_container $RESOURCES_CONTAINER_NAME 'Resources Microservice Started';
diff --git a/test/csit/plans/aai/resources/aai-traversal/appconfig/titan-cached.properties b/test/csit/plans/aai/traversal/aai-resources/appconfig/janusgraph-cached.properties
index 97bb81863..c2110f77c 100644
--- a/test/csit/plans/aai/resources/aai-traversal/appconfig/titan-cached.properties
+++ b/test/csit/plans/aai/traversal/aai-resources/appconfig/janusgraph-cached.properties
@@ -21,10 +21,14 @@
#
query.fast-property=true
+query.smart-limit=false
+
# the following parameters are not reloaded automatically and require a manual bounce
-storage.backend=hbase
+storage.backend=cassandra
storage.hostname=aai.hbase.simpledemo.onap.org
+storage.cassandra.keyspace=aaigraph
+
#schema.default=none
storage.lock.wait-time=300
storage.hbase.table=aaigraph-dev1.dev
diff --git a/test/csit/plans/aai/traversal/aai-traversal/appconfig/titan-realtime.properties b/test/csit/plans/aai/traversal/aai-resources/appconfig/janusgraph-realtime.properties
index 0c97b753a..4791431a1 100644
--- a/test/csit/plans/aai/traversal/aai-traversal/appconfig/titan-realtime.properties
+++ b/test/csit/plans/aai/traversal/aai-resources/appconfig/janusgraph-realtime.properties
@@ -21,10 +21,14 @@
#
query.fast-property=true
+query.smart-limit=false
+
# the following parameters are not reloaded automatically and require a manual bounce
-storage.backend=hbase
+storage.backend=cassandra
storage.hostname=aai.hbase.simpledemo.onap.org
+storage.cassandra.keyspace=aaigraph
+
#schema.default=none
storage.lock.wait-time=300
storage.hbase.table=aaigraph-dev1.dev
diff --git a/test/csit/plans/aai/traversal/aai-traversal/appconfig/titan-cached.properties b/test/csit/plans/aai/traversal/aai-traversal/appconfig/janusgraph-cached.properties
index 97bb81863..c2110f77c 100644
--- a/test/csit/plans/aai/traversal/aai-traversal/appconfig/titan-cached.properties
+++ b/test/csit/plans/aai/traversal/aai-traversal/appconfig/janusgraph-cached.properties
@@ -21,10 +21,14 @@
#
query.fast-property=true
+query.smart-limit=false
+
# the following parameters are not reloaded automatically and require a manual bounce
-storage.backend=hbase
+storage.backend=cassandra
storage.hostname=aai.hbase.simpledemo.onap.org
+storage.cassandra.keyspace=aaigraph
+
#schema.default=none
storage.lock.wait-time=300
storage.hbase.table=aaigraph-dev1.dev
diff --git a/test/csit/plans/aai/resources/aai-traversal/appconfig/titan-realtime.properties b/test/csit/plans/aai/traversal/aai-traversal/appconfig/janusgraph-realtime.properties
index 0c97b753a..4791431a1 100644
--- a/test/csit/plans/aai/resources/aai-traversal/appconfig/titan-realtime.properties
+++ b/test/csit/plans/aai/traversal/aai-traversal/appconfig/janusgraph-realtime.properties
@@ -21,10 +21,14 @@
#
query.fast-property=true
+query.smart-limit=false
+
# the following parameters are not reloaded automatically and require a manual bounce
-storage.backend=hbase
+storage.backend=cassandra
storage.hostname=aai.hbase.simpledemo.onap.org
+storage.cassandra.keyspace=aaigraph
+
#schema.default=none
storage.lock.wait-time=300
storage.hbase.table=aaigraph-dev1.dev
diff --git a/test/csit/plans/aai/traversal/docker-compose.yml b/test/csit/plans/aai/traversal/docker-compose.yml
index 3f465c3ec..6f62e6ef2 100644
--- a/test/csit/plans/aai/traversal/docker-compose.yml
+++ b/test/csit/plans/aai/traversal/docker-compose.yml
@@ -6,11 +6,12 @@ services:
environment:
- LOCAL_USER_ID=${USER_ID}
- LOCAL_GROUP_ID=${GROUP_ID}
+ - SKIP_CREATE_DB_SCHEMA_AT_STARTUP=true
ports:
- 8447:8447
volumes:
- - ${CURRENT_PWD}/aai-resources/appconfig/titan-realtime.properties:/opt/app/aai-resources/resources/etc/appprops/titan-realtime.properties
- - ${CURRENT_PWD}/aai-resources/appconfig/titan-cached.properties:/opt/app/aai-resources/resources/etc/appprops/titan-cached.properties
+ - ${CURRENT_PWD}/aai-resources/appconfig/janusgraph-realtime.properties:/opt/app/aai-resources/resources/etc/appprops/janusgraph-realtime.properties
+ - ${CURRENT_PWD}/aai-resources/appconfig/janusgraph-cached.properties:/opt/app/aai-resources/resources/etc/appprops/janusgraph-cached.properties
- ${CURRENT_PWD}/aai-resources/appconfig/aaiconfig.properties:/opt/app/aai-resources/resources/etc/appprops/aaiconfig.properties
- ${CURRENT_PWD}/aai-resources/appconfig/application.properties:/opt/app/aai-resources/resources/application.properties
- ${CURRENT_PWD}/aai-resources/appconfig/logback.xml:/opt/app/aai-resources/resources/logback.xml
@@ -30,8 +31,8 @@ services:
- LOCAL_GROUP_ID=${GROUP_ID}
- DISABLE_UPDATE_QUERY=true
volumes:
- - ${CURRENT_PWD}/aai-traversal/appconfig/titan-realtime.properties:/opt/app/aai-traversal/resources/etc/appprops/titan-realtime.properties
- - ${CURRENT_PWD}/aai-traversal/appconfig/titan-cached.properties:/opt/app/aai-traversal/resources/etc/appprops/titan-cached.properties
+ - ${CURRENT_PWD}/aai-traversal/appconfig/janusgraph-realtime.properties:/opt/app/aai-traversal/resources/etc/appprops/janusgraph-realtime.properties
+ - ${CURRENT_PWD}/aai-traversal/appconfig/janusgraph-cached.properties:/opt/app/aai-traversal/resources/etc/appprops/janusgraph-cached.properties
- ${CURRENT_PWD}/aai-traversal/appconfig/aaiconfig.properties:/opt/app/aai-traversal/resources/etc/appprops/aaiconfig.properties
- ${CURRENT_PWD}/aai-traversal/appconfig/application.properties:/opt/app/aai-traversal/resources/application.properties
- ${CURRENT_PWD}/aai-traversal/appconfig/logback.xml:/opt/app/aai-traversal/resources/logback.xml
@@ -61,16 +62,18 @@ services:
max-size: "30m"
max-file: "5"
aai.hbase.simpledemo.onap.org:
- image: ${HBASE_IMAGE}:${HBASE_VERSION}
+ image: cassandra:2.1
hostname: aai.hbase.simpledemo.onap.org
ports:
- - 2181:2181
- - 8080:8080
- - 8085:8085
- - 9090:9090
- - 16000:16000
- - 16010:16010
- - 16201:16201
+ - 7000:7000
+ - 7001:7001
+ - 7199:7199
+ - 9042:9042
+ environment:
+ - CASSANDRA_SEEDS=aai.hbase.simpledemo.onap.org
+ - CASSANDRA_DC=Heat
+ - CASSANDRA_RACK=Rack1
+ - CASSANDRA_AUTO_BOOTSTRAP=true
logging:
driver: "json-file"
options:
diff --git a/test/csit/plans/aai/traversal/setup.sh b/test/csit/plans/aai/traversal/setup.sh
index 118a1bc9c..70dda84b6 100644
--- a/test/csit/plans/aai/traversal/setup.sh
+++ b/test/csit/plans/aai/traversal/setup.sh
@@ -72,10 +72,8 @@ ${DOCKER_COMPOSE_CMD} stop
${DOCKER_COMPOSE_CMD} rm -f -v
# Start the hbase where the data will be stored
-HBASE_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai.hbase.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
-wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:8085';
-wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:8080';
-wait_for_container ${HBASE_CONTAINER_NAME} ' Started SelectChannelConnector@0.0.0.0:9095';
+CASSANDRA_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai.hbase.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
+wait_for_container $CASSANDRA_CONTAINER_NAME 'Listening for thrift clients';
USER_EXISTS=$(check_if_user_exists aaiadmin);
@@ -103,12 +101,16 @@ else
export GROUP_ID=$(id -g aaiadmin);
fi;
+$DOCKER_COMPOSE_CMD run --rm aai-resources.api.simpledemo.onap.org createDBSchema.sh
+
RESOURCES_CONTAINER_NAME=$(${DOCKER_COMPOSE_CMD} up -d aai-resources.api.simpledemo.onap.org 2>&1 | grep 'Creating' | grep -v 'volume' | grep -v 'network' | awk '{ print $2; }' | head -1);
wait_for_container $RESOURCES_CONTAINER_NAME 'Resources Microservice Started';
${DOCKER_COMPOSE_CMD} up -d aai-traversal.api.simpledemo.onap.org aai.api.simpledemo.onap.org
TRAVERSAL_CONTAINER_NAME=$(echo $RESOURCES_CONTAINER_NAME | sed 's/aai-resources/aai-traversal/g');
+$DOCKER_COMPOSE_CMD run --rm aai-traversal.api.simpledemo.onap.org install/updateQueryData.sh
+
echo "A&AI Microservices, resources and traversal, are up and running along with HAProxy";
wait_for_container $TRAVERSAL_CONTAINER_NAME 'Traversal Microservice Started';
diff --git a/test/csit/plans/dmaap/mrpubsub/setup.sh b/test/csit/plans/dmaap/mrpubsub/setup.sh
index 3e8950f2b..0a1f9d359 100755
--- a/test/csit/plans/dmaap/mrpubsub/setup.sh
+++ b/test/csit/plans/dmaap/mrpubsub/setup.sh
@@ -35,6 +35,7 @@ cp $WORKSPACE/archives/dmaapmr/messageservice/bundleconfig-local/etc/appprops/Ms
# start DMaaP MR containers with docker compose and configuration from docker-compose.yml
+docker login -u docker -p docker nexus3.onap.org:10001
docker-compose up -d
# Wait for initialization of Docker contaienr for DMaaP MR, Kafka and Zookeeper
@@ -68,6 +69,7 @@ sed -i -e 's/<zookeeper_host>/'$ZOOKEEPER_IP'/' /var/tmp/MsgRtrApi.properties
sed -i -e 's/<kafka_host>:<kafka_port>/'$KAFKA_IP':9092/' /var/tmp/MsgRtrApi.properties
docker-compose build
+docker login -u docker -p docker nexus3.onap.org:10001
docker-compose up -d
# Wait for initialization of Docker containers
diff --git a/test/csit/plans/holmes-rule-management/sanity-check/setup.sh b/test/csit/plans/holmes-rule-management/sanity-check/setup.sh
index ae78ec49a..89688eeaf 100644
--- a/test/csit/plans/holmes-rule-management/sanity-check/setup.sh
+++ b/test/csit/plans/holmes-rule-management/sanity-check/setup.sh
@@ -60,6 +60,12 @@ for i in {1..10}; do
echo sleep $i
sleep $i
done
+
+echo sleep 30s for service registration
+sleep 30
+
+docker logs i-rulemgt
+docker logs i-engine-d
#Pass any variables required by Robot test suites in ROBOT_VARIABLES
ROBOT_VARIABLES="-v MSB_IP:${MSB_IP} -v RULEMGT_IP:${RULEMGT_IP} -v ENGINE_D_IP:${ENGINE_D_IP}"
diff --git a/test/csit/plans/multicloud-vmware/functionality1/testplan.txt b/test/csit/plans/multicloud-vmware/functionality1/testplan.txt
index 2f5ad1b6f..0a2ad45af 100644
--- a/test/csit/plans/multicloud-vmware/functionality1/testplan.txt
+++ b/test/csit/plans/multicloud-vmware/functionality1/testplan.txt
@@ -7,3 +7,7 @@ multicloud-vmware/provision/sanity_test_neutron.robot
multicloud-vmware/nova/sanity-flavor.robot
multicloud-vmware/nova/sanity-host.robot
multicloud-vmware/nova/sanity-server.robot
+multicloud-vmware/samples/sanity-sample.robot
+multicloud-vmware/hosts/sanity-host.robot
+multicloud-vmware/networks/sanity-network.robot
+multicloud-vmware/images/sanity-image.robot
diff --git a/test/csit/plans/multicloud/functionality1/setup.sh b/test/csit/plans/multicloud/functionality1/setup.sh
index 993a39f68..50118a54a 100755
--- a/test/csit/plans/multicloud/functionality1/setup.sh
+++ b/test/csit/plans/multicloud/functionality1/setup.sh
@@ -20,7 +20,9 @@
source ${SCRIPTS}/common_functions.sh
# start multivim-broker
-docker run -d --name multivim-broker nexus3.onap.org:10001/onap/multicloud/framework
+docker run -d --name multivim-vio nexus3.onap.org:10001/onap/multicloud/vio
+docker run -d --name multivim-broker --link multivim-vio -e MSB_ADDR=multivim-vio -e MSB_PORT=9004 nexus3.onap.org:10001/onap/multicloud/framework
+
BROKER_IP=`get-instance-ip.sh multivim-broker`
for i in {1..50}; do
curl -sS ${BROKER_IP}:9001 && break
diff --git a/test/csit/plans/multicloud/functionality1/teardown.sh b/test/csit/plans/multicloud/functionality1/teardown.sh
index 1732649af..a2ef0e35e 100755
--- a/test/csit/plans/multicloud/functionality1/teardown.sh
+++ b/test/csit/plans/multicloud/functionality1/teardown.sh
@@ -16,4 +16,5 @@
#
# This script is sourced by run-csit.sh after Robot test completion.
+kill-instance.sh multivim-vio
kill-instance.sh multivim-broker
diff --git a/test/csit/plans/music/music-distributed-kv-store-test-plan/setup.sh b/test/csit/plans/music/music-distributed-kv-store-test-plan/setup.sh
new file mode 100644
index 000000000..88becf69e
--- /dev/null
+++ b/test/csit/plans/music/music-distributed-kv-store-test-plan/setup.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+#
+# Copyright 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Not sure why this is needed.
+source ${SCRIPTS}/common_functions.sh
+
+# Initial Configuration.
+DATASTORE="consul"
+DATASTORE_IP="localhost"
+
+MOUNTPATH="/dkv_mount_path/configs/"
+DEFAULT_CONFIGS=$(pwd)/mountpath/default
+
+mkdir -p mountpath/default
+
+pushd mountpath/default
+cat << EOF > sampleConfig1.properties
+foo1=bar1
+hello1=world1
+key1=value1
+EOF
+cat << EOF > sampleConfig2.properties
+foo2=bar2
+hello2=world2
+key2=value2
+EOF
+popd
+
+docker login -u docker -p docker nexus3.onap.org:10001
+docker pull nexus3.onap.org:10001/onap/music/distributed-kv-store
+docker run -e DATASTORE=$DATASTORE -e DATASTORE_IP=$DATASTORE_IP -e MOUNTPATH=$MOUNTPATH -d \
+ --name dkv \
+ -v $DEFAULT_CONFIGS:/dkv_mount_path/configs/default \
+ -p 8200:8200 -p 8080:8080 nexus3.onap.org:10001/onap/music/distributed-kv-store
+
+
+echo "###### WAITING FOR DISTRIBUTED KV STORE CONTAINER TO COME UP"
+sleep 10
+
+#
+# add here all ROBOT_VARIABLES settings
+#
+echo "# music robot variables settings";
+ROBOT_VARIABLES="-v DKV_HOSTNAME:http://localhost -v DKV_PORT:8080"
+
+echo ${ROBOT_VARIABLES} \ No newline at end of file
diff --git a/test/csit/plans/music/music-distributed-kv-store-test-plan/teardown.sh b/test/csit/plans/music/music-distributed-kv-store-test-plan/teardown.sh
new file mode 100644
index 000000000..0abf3a62d
--- /dev/null
+++ b/test/csit/plans/music/music-distributed-kv-store-test-plan/teardown.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2018 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+rm -rf mountpath
+docker stop dkv
+docker rm dkv
+docker rmi nexus3.onap.org:10001/onap/music/distributed-kv-store
diff --git a/test/csit/plans/music/music-distributed-kv-store-test-plan/testplan.txt b/test/csit/plans/music/music-distributed-kv-store-test-plan/testplan.txt
new file mode 100644
index 000000000..ac37bc5a6
--- /dev/null
+++ b/test/csit/plans/music/music-distributed-kv-store-test-plan/testplan.txt
@@ -0,0 +1,3 @@
+# Test suites are relative paths under [integration.git]/test/csit/tests/.
+# Place the suites in run order.
+music/music-distributed-kv-store-suite \ No newline at end of file
diff --git a/test/csit/plans/music/music-test-plan/setup.sh b/test/csit/plans/music/music-test-plan/setup.sh
index 63d2ef93f..ddfdfc023 100755
--- a/test/csit/plans/music/music-test-plan/setup.sh
+++ b/test/csit/plans/music/music-test-plan/setup.sh
@@ -50,14 +50,21 @@ docker network create music-net;
# Start Cassandra
docker run -d --name music-db --network music-net -p "7000:7000" -p "7001:7001" -p "7199:7199" -p "9042:9042" -p "9160:9160" -e CASSUSER=${CASS_USERNAME} -e CASSPASS=${CASS_PASSWORD} ${CASS_IMG};
+CASSA_IP=`docker inspect -f '{{ $network := index .NetworkSettings.Networks "music-net" }}{{ $network.IPAddress}}' music-db`
+echo "CASSANDRA_IP=${CASSA_IP}"
+${WORKSPACE}/test/csit/scripts/optf-has/has/wait_for_port.sh ${CASSA_IP} 9042
+
# Start Music war
docker run -d --name music-war -v music-vol:/app ${MUSIC_IMG};
# Start Zookeeper
docker run -d --name music-zk --network music-net -p "2181:2181" -p "2888:2888" -p "3888:3888" ${ZK_IMG};
-# Delay for Cassandra
-sleep 20;
+ZOO_IP=`docker inspect -f '{{ $network := index .NetworkSettings.Networks "music-net" }}{{ $network.IPAddress}}' music-zk`
+echo "ZOOKEEPER_IP=${ZOO_IP}"
+
+# Delay between Cassandra/Zookeeper and Tomcat
+sleep 60;
# Start Up tomcat - Needs to have properties,logs dir and war file volume mapped.
docker run -d --name music-tomcat --network music-net -p "8080:8080" -v music-vol:/usr/local/tomcat/webapps -v ${WORK_DIR}/properties:/opt/app/music/etc:ro -v ${WORK_DIR}/logs:/opt/app/music/logs ${TOMCAT_IMG};
@@ -65,20 +72,6 @@ docker run -d --name music-tomcat --network music-net -p "8080:8080" -v music-vo
# Connect tomcat to host bridge network so that its port can be seen.
docker network connect bridge music-tomcat;
-##################################
-
-#
-# add here below the start of all docker containers needed for music CSIT testing
-#
-
-CASSA_IP=`docker inspect --format '{{ .NetworkSettings.Networks.bridge.IPAddress}}' music-db`
-echo "CASSANDRA_IP=${CASSA_IP}"
-
-ZOO_IP=`docker inspect --format '{{ .NetworkSettings.Networks.bridge.IPAddress}}' music-zk`
-echo "ZOOKEEPER_IP=${ZOO_IP}"
-
-${WORKSPACE}/test/csit/scripts/music/music-scripts/wait_for_port.sh ${CASSA_IP} 9042
-
TOMCAT_IP=`docker inspect --format '{{ .NetworkSettings.Networks.bridge.IPAddress}}' music-tomcat`
echo "TOMCAT_IP=${TOMCAT_IP}"
@@ -93,7 +86,10 @@ docker inspect music-war
docker volume inspect music-vol
docker network inspect music-net
-
+echo "dump music content just after music is started"
+docker exec music-db /usr/bin/nodetool status
+docker exec music-db /usr/bin/cqlsh -unelson24 -pwinman123 -e 'SELECT * FROM system_schema.keyspaces'
+docker exec music-db /usr/bin/cqlsh -unelson24 -pwinman123 -e 'SELECT * FROM admin.keyspace_master'
#
diff --git a/test/csit/plans/portal-sdk/testsuite/.env b/test/csit/plans/portal-sdk/testsuite/.env
index 0b7de9560..27e9aa3fb 100644
--- a/test/csit/plans/portal-sdk/testsuite/.env
+++ b/test/csit/plans/portal-sdk/testsuite/.env
@@ -2,25 +2,34 @@
# used by docker-compose AND by other shell scripts
# Host directory with config files
-LOGS_DIR=./logs
-PROPS_DIR=./properties
-
-
-# Directory within containers
-WEBAPPS_DIR=/opt/apache-tomcat-8.0.37/webapps
-
# Following are ALSO used in demo/boot/portal_vm_init.sh
-EP_IMG_NAME=onap/portal-apps
+EP_IMG_NAME=onap/portal-app
+SDK_IMG_NAME=onap/portal-sdk
DB_IMG_NAME=onap/portal-db
+CDR_IMG_NAME=onap/music/cassandra_music
+ZK_IMG_NAME=zookeeper
WMS_IMG_NAME=onap/portal-wms
+# Deployed with portal; built elsewhere
CLI_IMG_NAME=onap/cli
# Tag all images with this
-PORTAL_TAG=1.3.0
-DOCKER_IMAGE_VERSION=1.3-STAGING-latest
+DOCKER_IMAGE_VERSION=2.1-STAGING-latest
CLI_DOCKER_VERSION=1.1-STAGING-latest
+CDR_IMAGE_VERSION=latest
+ZK_IMAGE_VERSION=3.4
NEXUS_DOCKER_REPO=nexus3.onap.org:10003
+# This is used during builds and in docker-compose;
+# it is never published to the ONAP registry.
+PORTAL_TAG=beijing
+
+# Name of directory in apps container (NOT host)
+WEBAPPS_DIR=/opt/apache-tomcat-8.0.37/webapps
+
+# Required settings with default values.
+# Export shell environment variables on ALL hosts.
+LOGS_DIR=./logs
+PROPS_DIR=./properties
# Optional settings with no defaults.
EXTRA_HOST_IP=""
diff --git a/test/csit/plans/portal-sdk/testsuite/docker-compose.yml b/test/csit/plans/portal-sdk/testsuite/docker-compose.yml
index 7a9fb8caa..dda74c91a 100644
--- a/test/csit/plans/portal-sdk/testsuite/docker-compose.yml
+++ b/test/csit/plans/portal-sdk/testsuite/docker-compose.yml
@@ -1,11 +1,12 @@
# docker-compose for ONAP portal containers: database, microservice, portal apps.
-# Relies on .env file in current directory.
+# Relies on .env file, which CANNOT be specified via command-line option
# Works in multiple environments; does not pull from a Nexus registry.
# Exposes the portal apps docker (but not DB nor WMS dockers) on the host network.
# Images must be pulled from ONAP Nexus registry after logging in like this:
# docker login -u USER -p PASS nexus3.onap.org:10001
+# Uses healthcheck feature added in docker-compose v2.1
-version: '2.0'
+version: '2.1'
services:
@@ -29,11 +30,51 @@ services:
volumes:
# Just specify a path and let the Engine create a volume
- /var/lib/mysql
+ # Inject the onboarding script at start time
+ - ./Apps_Users_OnBoarding_Script.sql:/docker-entrypoint-initdb.d/zzz_apps_users_onboarding.sql
logging:
driver: json-file
+ healthcheck:
+ test: [ "CMD", "mysqladmin", "ping", "-h", "localhost" ]
+ timeout: 10s
+ retries: 30
+
+ # Config files may use hostname "portal-cassandra"
+ portal-cassandra:
+ image: ${CDR_IMG_NAME}:${PORTAL_TAG}
+ environment:
+ - CASSUSER=root
+ - CASSPASS=Aa123456
+ expose:
+ - 7000
+ - 7001
+ - 7199
+ - 9042
+ - 9160
+ ports:
+ - 7000:7000
+ - 7001:7001
+ - 7199:7199
+ - 9042:9042
+ - 9160:9160
+ volumes:
+ - ./portal.cql:/docker-entrypoint-initdb.d/zzz_portal.cql
+ - ./portalsdk.cql:/docker-entrypoint-initdb.d/zzz_portalsdk.cql
+ links:
+ - portal-db
+ depends_on:
+ portal-db:
+ condition: service_healthy
+
+ # Config files may use hostname "portal-zk"
+ portal-zk:
+ image: ${ZK_IMG_NAME}:${PORTAL_TAG}
+ expose:
+ - 2181
+ ports:
+ - 2181:2181
- # An environment variable here CAN override the database URL;
- # instead the value in the config file uses hostname from above
+ # The app config file uses the docker name above
portal-wms:
image: ${WMS_IMG_NAME}:${PORTAL_TAG}
expose:
@@ -41,25 +82,18 @@ services:
links:
- portal-db
depends_on:
- - portal-db
+ portal-db:
+ condition: service_healthy
volumes:
- - ${PROPS_DIR}/ECOMPWIDGETMS/application.properties:/application.properties
+ - ${PROPS_DIR}/ONAPWIDGETMS/application.properties:/application.properties
+ - ${PROPS_DIR}/ONAPWIDGETMS/application.yml:/application.yml
command:
- - /wait-for.sh
- - -t
- - "420"
- - portal-db:3306
- - --
- - /start-wms-cmd.sh
+ - /start-wms.sh
logging:
driver: json-file
- # Environment variables here CANNOT override the database URL because
- # two apps use identical configuration keys with different values
- portal-apps:
+ portal-app:
image: ${EP_IMG_NAME}:${PORTAL_TAG}
- expose:
- - 8989
ports:
- 8989:8080
- 8010:8009
@@ -67,32 +101,51 @@ services:
links:
- portal-db
- portal-wms
+ - portal-zk
+ - portal-cassandra
depends_on:
- - portal-db
- - portal-wms
+ portal-db:
+ condition: service_healthy
+ portal-wms:
+ condition: service_started
volumes:
- - ${PROPS_DIR}/ECOMPPORTALAPP/system.properties:${WEBAPPS_DIR}/ECOMPPORTAL/WEB-INF/conf/system.properties
- - ${PROPS_DIR}/ECOMPPORTALAPP/fusion.properties:${WEBAPPS_DIR}/ECOMPPORTAL/WEB-INF/fusion/conf/fusion.properties
- - ${PROPS_DIR}/ECOMPPORTALAPP/portal.properties:${WEBAPPS_DIR}/ECOMPPORTAL/WEB-INF/classes/portal.properties
- - ${PROPS_DIR}/ECOMPPORTALAPP/openid-connect.properties:${WEBAPPS_DIR}/ECOMPPORTAL/WEB-INF/classes/openid-connect.properties
- - ${PROPS_DIR}/ECOMPPORTALAPP/logback.xml:${WEBAPPS_DIR}/ECOMPPORTAL/WEB-INF/classes/logback.xml
- - ${PROPS_DIR}/ECOMPSDKAPP/fusion.properties:${WEBAPPS_DIR}/ECOMPSDKAPP/WEB-INF/fusion/conf/fusion.properties
- - ${PROPS_DIR}/ECOMPSDKAPP/system.properties:${WEBAPPS_DIR}/ECOMPSDKAPP/WEB-INF/conf/system.properties
- - ${PROPS_DIR}/ECOMPSDKAPP/portal.properties:${WEBAPPS_DIR}/ECOMPSDKAPP/WEB-INF/classes/portal.properties
- - ${PROPS_DIR}/ECOMPDBCAPP/system.properties:${WEBAPPS_DIR}/ECOMPDBCAPP/WEB-INF/conf/system.properties
- - ${PROPS_DIR}/ECOMPDBCAPP/portal.properties:${WEBAPPS_DIR}/ECOMPDBCAPP/WEB-INF/classes/portal.properties
- - ${PROPS_DIR}/ECOMPDBCAPP/dbcapp.properties:${WEBAPPS_DIR}/ECOMPDBCAPP/WEB-INF/dbcapp/dbcapp.properties
- - ${PROPS_DIR}/ECOMPDBCAPP/fusion.properties:${WEBAPPS_DIR}/ECOMPDBCAPP/WEB-INF/fusion/conf/fusion.properties
+ - ${PROPS_DIR}/ONAPPORTAL/system.properties:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/conf/system.properties
+ - ${PROPS_DIR}/ONAPPORTAL/fusion.properties:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/fusion/conf/fusion.properties
+ - ${PROPS_DIR}/ONAPPORTAL/portal.properties:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/classes/portal.properties
+ - ${PROPS_DIR}/ONAPPORTAL/music.properties:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/classes/music.properties
+ - ${PROPS_DIR}/ONAPPORTAL/openid-connect.properties:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/classes/openid-connect.properties
+ - ${PROPS_DIR}/ONAPPORTAL/logback.xml:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/classes/logback.xml
- ${LOGS_DIR}:/opt/apache-tomcat-8.0.37/logs
command:
- - /wait-for.sh
- - -t
- - "420"
- - portal-db:3306
- - --
- - /start-apps-cmd.sh
+ - /start-apache-tomcat.sh
# see comments in .env file
+ - -i
- $EXTRA_HOST_IP
+ - -n
- $EXTRA_HOST_NAME
logging:
driver: json-file
+
+ portal-sdk:
+ image: ${SDK_IMG_NAME}:${PORTAL_TAG}
+ ports:
+ - 8990:8080
+ links:
+ - portal-db
+ - portal-wms
+ - portal-zk
+ - portal-cassandra
+ depends_on:
+ portal-db:
+ condition: service_healthy
+ volumes:
+ - ${PROPS_DIR}/ONAPPORTALSDK/fusion.properties:${WEBAPPS_DIR}/ONAPPORTALSDK/WEB-INF/fusion/conf/fusion.properties
+ - ${PROPS_DIR}/ONAPPORTALSDK/system.properties:${WEBAPPS_DIR}/ONAPPORTALSDK/WEB-INF/conf/system.properties
+ - ${PROPS_DIR}/ONAPPORTALSDK/portal.properties:${WEBAPPS_DIR}/ONAPPORTALSDK/WEB-INF/classes/portal.properties
+ - ${PROPS_DIR}/ONAPPORTALSDK/music.properties:${WEBAPPS_DIR}/ONAPPORTALSDK/WEB-INF/classes/music.properties
+ - ${PROPS_DIR}/ONAPPORTALSDK/logback.xml:${WEBAPPS_DIR}/ONAPPORTALSDK/WEB-INF/classes/logback.xml
+ - ${LOGS_DIR}:/opt/apache-tomcat-8.0.37/logs
+ command:
+ - /start-apache-tomcat.sh
+ logging:
+ driver: json-file
diff --git a/test/csit/plans/portal-sdk/testsuite/setup.sh b/test/csit/plans/portal-sdk/testsuite/setup.sh
index 0c90dc66b..6510311cb 100644
--- a/test/csit/plans/portal-sdk/testsuite/setup.sh
+++ b/test/csit/plans/portal-sdk/testsuite/setup.sh
@@ -48,7 +48,7 @@ NEXUS_DOCKER_REPO=nexus3.onap.org:10003
CURR="$(pwd)"
-git clone http://gerrit.onap.org/r/portal -b "release-1.3.0"
+git clone http://gerrit.onap.org/r/portal -b "master"
# Refresh configuration and scripts
cd portal
@@ -80,15 +80,21 @@ mkdir -p $LOGS_DIR
# Refresh images
docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
-docker pull $NEXUS_DOCKER_REPO/${DB_IMG_NAME}:$DOCKER_IMAGE_VERSION
-docker pull $NEXUS_DOCKER_REPO/${EP_IMG_NAME}:$DOCKER_IMAGE_VERSION
-docker pull $NEXUS_DOCKER_REPO/${WMS_IMG_NAME}:$DOCKER_IMAGE_VERSION
+docker pull $NEXUS_DOCKER_REPO/$DB_IMG_NAME:$DOCKER_IMAGE_VERSION
+docker pull $NEXUS_DOCKER_REPO/$EP_IMG_NAME:$DOCKER_IMAGE_VERSION
+docker pull $NEXUS_DOCKER_REPO/$SDK_IMG_NAME:$DOCKER_IMAGE_VERSION
+docker pull $NEXUS_DOCKER_REPO/$CDR_IMG_NAME:$CDR_IMAGE_VERSION
+docker pull $ZK_IMG_NAME:$ZK_IMAGE_VERSION
+docker pull $NEXUS_DOCKER_REPO/$WMS_IMG_NAME:$DOCKER_IMAGE_VERSION
docker pull $NEXUS_DOCKER_REPO/$CLI_IMG_NAME:$CLI_DOCKER_VERSION
# Tag them as expected by docker-compose file
-docker tag $NEXUS_DOCKER_REPO/${DB_IMG_NAME}:$DOCKER_IMAGE_VERSION $DB_IMG_NAME:$PORTAL_TAG
-docker tag $NEXUS_DOCKER_REPO/${EP_IMG_NAME}:$DOCKER_IMAGE_VERSION $EP_IMG_NAME:$PORTAL_TAG
-docker tag $NEXUS_DOCKER_REPO/${WMS_IMG_NAME}:$DOCKER_IMAGE_VERSION $WMS_IMG_NAME:$PORTAL_TAG
+docker tag $NEXUS_DOCKER_REPO/$DB_IMG_NAME:$DOCKER_IMAGE_VERSION $DB_IMG_NAME:$PORTAL_TAG
+docker tag $NEXUS_DOCKER_REPO/$EP_IMG_NAME:$DOCKER_IMAGE_VERSION $EP_IMG_NAME:$PORTAL_TAG
+docker tag $NEXUS_DOCKER_REPO/$SDK_IMG_NAME:$DOCKER_IMAGE_VERSION $SDK_IMG_NAME:$PORTAL_TAG
+docker tag $NEXUS_DOCKER_REPO/$CDR_IMG_NAME:$CDR_IMAGE_VERSION $CDR_IMG_NAME:$PORTAL_TAG
+docker tag $ZK_IMG_NAME:$ZK_IMAGE_VERSION $ZK_IMG_NAME:$PORTAL_TAG
+docker tag $NEXUS_DOCKER_REPO/$WMS_IMG_NAME:$DOCKER_IMAGE_VERSION $WMS_IMG_NAME:$PORTAL_TAG
docker tag $NEXUS_DOCKER_REPO/$CLI_IMG_NAME:$CLI_DOCKER_VERSION $CLI_IMG_NAME:$PORTAL_TAG
@@ -130,7 +136,7 @@ fi
-sleep 3m
+sleep 6m
# WAIT 5 minutes maximum and test every 5 seconds if Portal up using HealthCheck API
TIME_OUT=500
@@ -170,7 +176,7 @@ HOST_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $NF}')
export HOST_IP=${HOST_IP}
#docker logs deliveries_portal-db_1
-docker logs deliveries_portal-apps_1
+docker logs deliveries_portal-app_1
docker logs deliveries_portal-wms_1
diff --git a/test/csit/plans/portal/testsuite/.env b/test/csit/plans/portal/testsuite/.env
index 0b7de9560..27e9aa3fb 100644
--- a/test/csit/plans/portal/testsuite/.env
+++ b/test/csit/plans/portal/testsuite/.env
@@ -2,25 +2,34 @@
# used by docker-compose AND by other shell scripts
# Host directory with config files
-LOGS_DIR=./logs
-PROPS_DIR=./properties
-
-
-# Directory within containers
-WEBAPPS_DIR=/opt/apache-tomcat-8.0.37/webapps
-
# Following are ALSO used in demo/boot/portal_vm_init.sh
-EP_IMG_NAME=onap/portal-apps
+EP_IMG_NAME=onap/portal-app
+SDK_IMG_NAME=onap/portal-sdk
DB_IMG_NAME=onap/portal-db
+CDR_IMG_NAME=onap/music/cassandra_music
+ZK_IMG_NAME=zookeeper
WMS_IMG_NAME=onap/portal-wms
+# Deployed with portal; built elsewhere
CLI_IMG_NAME=onap/cli
# Tag all images with this
-PORTAL_TAG=1.3.0
-DOCKER_IMAGE_VERSION=1.3-STAGING-latest
+DOCKER_IMAGE_VERSION=2.1-STAGING-latest
CLI_DOCKER_VERSION=1.1-STAGING-latest
+CDR_IMAGE_VERSION=latest
+ZK_IMAGE_VERSION=3.4
NEXUS_DOCKER_REPO=nexus3.onap.org:10003
+# This is used during builds and in docker-compose;
+# it is never published to the ONAP registry.
+PORTAL_TAG=beijing
+
+# Name of directory in apps container (NOT host)
+WEBAPPS_DIR=/opt/apache-tomcat-8.0.37/webapps
+
+# Required settings with default values.
+# Export shell environment variables on ALL hosts.
+LOGS_DIR=./logs
+PROPS_DIR=./properties
# Optional settings with no defaults.
EXTRA_HOST_IP=""
diff --git a/test/csit/plans/portal/testsuite/docker-compose.yml b/test/csit/plans/portal/testsuite/docker-compose.yml
index 7a9fb8caa..dda74c91a 100644
--- a/test/csit/plans/portal/testsuite/docker-compose.yml
+++ b/test/csit/plans/portal/testsuite/docker-compose.yml
@@ -1,11 +1,12 @@
# docker-compose for ONAP portal containers: database, microservice, portal apps.
-# Relies on .env file in current directory.
+# Relies on .env file, which CANNOT be specified via command-line option
# Works in multiple environments; does not pull from a Nexus registry.
# Exposes the portal apps docker (but not DB nor WMS dockers) on the host network.
# Images must be pulled from ONAP Nexus registry after logging in like this:
# docker login -u USER -p PASS nexus3.onap.org:10001
+# Uses healthcheck feature added in docker-compose v2.1
-version: '2.0'
+version: '2.1'
services:
@@ -29,11 +30,51 @@ services:
volumes:
# Just specify a path and let the Engine create a volume
- /var/lib/mysql
+ # Inject the onboarding script at start time
+ - ./Apps_Users_OnBoarding_Script.sql:/docker-entrypoint-initdb.d/zzz_apps_users_onboarding.sql
logging:
driver: json-file
+ healthcheck:
+ test: [ "CMD", "mysqladmin", "ping", "-h", "localhost" ]
+ timeout: 10s
+ retries: 30
+
+ # Config files may use hostname "portal-cassandra"
+ portal-cassandra:
+ image: ${CDR_IMG_NAME}:${PORTAL_TAG}
+ environment:
+ - CASSUSER=root
+ - CASSPASS=Aa123456
+ expose:
+ - 7000
+ - 7001
+ - 7199
+ - 9042
+ - 9160
+ ports:
+ - 7000:7000
+ - 7001:7001
+ - 7199:7199
+ - 9042:9042
+ - 9160:9160
+ volumes:
+ - ./portal.cql:/docker-entrypoint-initdb.d/zzz_portal.cql
+ - ./portalsdk.cql:/docker-entrypoint-initdb.d/zzz_portalsdk.cql
+ links:
+ - portal-db
+ depends_on:
+ portal-db:
+ condition: service_healthy
+
+ # Config files may use hostname "portal-zk"
+ portal-zk:
+ image: ${ZK_IMG_NAME}:${PORTAL_TAG}
+ expose:
+ - 2181
+ ports:
+ - 2181:2181
- # An environment variable here CAN override the database URL;
- # instead the value in the config file uses hostname from above
+ # The app config file uses the docker name above
portal-wms:
image: ${WMS_IMG_NAME}:${PORTAL_TAG}
expose:
@@ -41,25 +82,18 @@ services:
links:
- portal-db
depends_on:
- - portal-db
+ portal-db:
+ condition: service_healthy
volumes:
- - ${PROPS_DIR}/ECOMPWIDGETMS/application.properties:/application.properties
+ - ${PROPS_DIR}/ONAPWIDGETMS/application.properties:/application.properties
+ - ${PROPS_DIR}/ONAPWIDGETMS/application.yml:/application.yml
command:
- - /wait-for.sh
- - -t
- - "420"
- - portal-db:3306
- - --
- - /start-wms-cmd.sh
+ - /start-wms.sh
logging:
driver: json-file
- # Environment variables here CANNOT override the database URL because
- # two apps use identical configuration keys with different values
- portal-apps:
+ portal-app:
image: ${EP_IMG_NAME}:${PORTAL_TAG}
- expose:
- - 8989
ports:
- 8989:8080
- 8010:8009
@@ -67,32 +101,51 @@ services:
links:
- portal-db
- portal-wms
+ - portal-zk
+ - portal-cassandra
depends_on:
- - portal-db
- - portal-wms
+ portal-db:
+ condition: service_healthy
+ portal-wms:
+ condition: service_started
volumes:
- - ${PROPS_DIR}/ECOMPPORTALAPP/system.properties:${WEBAPPS_DIR}/ECOMPPORTAL/WEB-INF/conf/system.properties
- - ${PROPS_DIR}/ECOMPPORTALAPP/fusion.properties:${WEBAPPS_DIR}/ECOMPPORTAL/WEB-INF/fusion/conf/fusion.properties
- - ${PROPS_DIR}/ECOMPPORTALAPP/portal.properties:${WEBAPPS_DIR}/ECOMPPORTAL/WEB-INF/classes/portal.properties
- - ${PROPS_DIR}/ECOMPPORTALAPP/openid-connect.properties:${WEBAPPS_DIR}/ECOMPPORTAL/WEB-INF/classes/openid-connect.properties
- - ${PROPS_DIR}/ECOMPPORTALAPP/logback.xml:${WEBAPPS_DIR}/ECOMPPORTAL/WEB-INF/classes/logback.xml
- - ${PROPS_DIR}/ECOMPSDKAPP/fusion.properties:${WEBAPPS_DIR}/ECOMPSDKAPP/WEB-INF/fusion/conf/fusion.properties
- - ${PROPS_DIR}/ECOMPSDKAPP/system.properties:${WEBAPPS_DIR}/ECOMPSDKAPP/WEB-INF/conf/system.properties
- - ${PROPS_DIR}/ECOMPSDKAPP/portal.properties:${WEBAPPS_DIR}/ECOMPSDKAPP/WEB-INF/classes/portal.properties
- - ${PROPS_DIR}/ECOMPDBCAPP/system.properties:${WEBAPPS_DIR}/ECOMPDBCAPP/WEB-INF/conf/system.properties
- - ${PROPS_DIR}/ECOMPDBCAPP/portal.properties:${WEBAPPS_DIR}/ECOMPDBCAPP/WEB-INF/classes/portal.properties
- - ${PROPS_DIR}/ECOMPDBCAPP/dbcapp.properties:${WEBAPPS_DIR}/ECOMPDBCAPP/WEB-INF/dbcapp/dbcapp.properties
- - ${PROPS_DIR}/ECOMPDBCAPP/fusion.properties:${WEBAPPS_DIR}/ECOMPDBCAPP/WEB-INF/fusion/conf/fusion.properties
+ - ${PROPS_DIR}/ONAPPORTAL/system.properties:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/conf/system.properties
+ - ${PROPS_DIR}/ONAPPORTAL/fusion.properties:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/fusion/conf/fusion.properties
+ - ${PROPS_DIR}/ONAPPORTAL/portal.properties:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/classes/portal.properties
+ - ${PROPS_DIR}/ONAPPORTAL/music.properties:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/classes/music.properties
+ - ${PROPS_DIR}/ONAPPORTAL/openid-connect.properties:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/classes/openid-connect.properties
+ - ${PROPS_DIR}/ONAPPORTAL/logback.xml:${WEBAPPS_DIR}/ONAPPORTAL/WEB-INF/classes/logback.xml
- ${LOGS_DIR}:/opt/apache-tomcat-8.0.37/logs
command:
- - /wait-for.sh
- - -t
- - "420"
- - portal-db:3306
- - --
- - /start-apps-cmd.sh
+ - /start-apache-tomcat.sh
# see comments in .env file
+ - -i
- $EXTRA_HOST_IP
+ - -n
- $EXTRA_HOST_NAME
logging:
driver: json-file
+
+ portal-sdk:
+ image: ${SDK_IMG_NAME}:${PORTAL_TAG}
+ ports:
+ - 8990:8080
+ links:
+ - portal-db
+ - portal-wms
+ - portal-zk
+ - portal-cassandra
+ depends_on:
+ portal-db:
+ condition: service_healthy
+ volumes:
+ - ${PROPS_DIR}/ONAPPORTALSDK/fusion.properties:${WEBAPPS_DIR}/ONAPPORTALSDK/WEB-INF/fusion/conf/fusion.properties
+ - ${PROPS_DIR}/ONAPPORTALSDK/system.properties:${WEBAPPS_DIR}/ONAPPORTALSDK/WEB-INF/conf/system.properties
+ - ${PROPS_DIR}/ONAPPORTALSDK/portal.properties:${WEBAPPS_DIR}/ONAPPORTALSDK/WEB-INF/classes/portal.properties
+ - ${PROPS_DIR}/ONAPPORTALSDK/music.properties:${WEBAPPS_DIR}/ONAPPORTALSDK/WEB-INF/classes/music.properties
+ - ${PROPS_DIR}/ONAPPORTALSDK/logback.xml:${WEBAPPS_DIR}/ONAPPORTALSDK/WEB-INF/classes/logback.xml
+ - ${LOGS_DIR}:/opt/apache-tomcat-8.0.37/logs
+ command:
+ - /start-apache-tomcat.sh
+ logging:
+ driver: json-file
diff --git a/test/csit/plans/portal/testsuite/setup.sh b/test/csit/plans/portal/testsuite/setup.sh
index 0c90dc66b..76cf5f373 100644
--- a/test/csit/plans/portal/testsuite/setup.sh
+++ b/test/csit/plans/portal/testsuite/setup.sh
@@ -48,7 +48,7 @@ NEXUS_DOCKER_REPO=nexus3.onap.org:10003
CURR="$(pwd)"
-git clone http://gerrit.onap.org/r/portal -b "release-1.3.0"
+git clone http://gerrit.onap.org/r/portal -b "master"
# Refresh configuration and scripts
cd portal
@@ -80,15 +80,21 @@ mkdir -p $LOGS_DIR
# Refresh images
docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO
-docker pull $NEXUS_DOCKER_REPO/${DB_IMG_NAME}:$DOCKER_IMAGE_VERSION
-docker pull $NEXUS_DOCKER_REPO/${EP_IMG_NAME}:$DOCKER_IMAGE_VERSION
-docker pull $NEXUS_DOCKER_REPO/${WMS_IMG_NAME}:$DOCKER_IMAGE_VERSION
+docker pull $NEXUS_DOCKER_REPO/$DB_IMG_NAME:$DOCKER_IMAGE_VERSION
+docker pull $NEXUS_DOCKER_REPO/$EP_IMG_NAME:$DOCKER_IMAGE_VERSION
+docker pull $NEXUS_DOCKER_REPO/$SDK_IMG_NAME:$DOCKER_IMAGE_VERSION
+docker pull $NEXUS_DOCKER_REPO/$CDR_IMG_NAME:$CDR_IMAGE_VERSION
+docker pull $ZK_IMG_NAME:$ZK_IMAGE_VERSION
+docker pull $NEXUS_DOCKER_REPO/$WMS_IMG_NAME:$DOCKER_IMAGE_VERSION
docker pull $NEXUS_DOCKER_REPO/$CLI_IMG_NAME:$CLI_DOCKER_VERSION
# Tag them as expected by docker-compose file
-docker tag $NEXUS_DOCKER_REPO/${DB_IMG_NAME}:$DOCKER_IMAGE_VERSION $DB_IMG_NAME:$PORTAL_TAG
-docker tag $NEXUS_DOCKER_REPO/${EP_IMG_NAME}:$DOCKER_IMAGE_VERSION $EP_IMG_NAME:$PORTAL_TAG
-docker tag $NEXUS_DOCKER_REPO/${WMS_IMG_NAME}:$DOCKER_IMAGE_VERSION $WMS_IMG_NAME:$PORTAL_TAG
+docker tag $NEXUS_DOCKER_REPO/$DB_IMG_NAME:$DOCKER_IMAGE_VERSION $DB_IMG_NAME:$PORTAL_TAG
+docker tag $NEXUS_DOCKER_REPO/$EP_IMG_NAME:$DOCKER_IMAGE_VERSION $EP_IMG_NAME:$PORTAL_TAG
+docker tag $NEXUS_DOCKER_REPO/$SDK_IMG_NAME:$DOCKER_IMAGE_VERSION $SDK_IMG_NAME:$PORTAL_TAG
+docker tag $NEXUS_DOCKER_REPO/$CDR_IMG_NAME:$CDR_IMAGE_VERSION $CDR_IMG_NAME:$PORTAL_TAG
+docker tag $ZK_IMG_NAME:$ZK_IMAGE_VERSION $ZK_IMG_NAME:$PORTAL_TAG
+docker tag $NEXUS_DOCKER_REPO/$WMS_IMG_NAME:$DOCKER_IMAGE_VERSION $WMS_IMG_NAME:$PORTAL_TAG
docker tag $NEXUS_DOCKER_REPO/$CLI_IMG_NAME:$CLI_DOCKER_VERSION $CLI_IMG_NAME:$PORTAL_TAG
@@ -130,7 +136,7 @@ fi
-sleep 3m
+sleep 6m
# WAIT 5 minutes maximum and test every 5 seconds if Portal up using HealthCheck API
TIME_OUT=500
@@ -169,8 +175,8 @@ fi
HOST_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $NF}')
export HOST_IP=${HOST_IP}
-#docker logs deliveries_portal-db_1
-docker logs deliveries_portal-apps_1
+docker logs deliveries_portal-db_1
+docker logs deliveries_portal-app_1
docker logs deliveries_portal-wms_1
diff --git a/test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/setup.sh b/test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/setup.sh
new file mode 100644
index 000000000..4d97f33eb
--- /dev/null
+++ b/test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/setup.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Place the scripts in run order:
+# Start all process required for executing test case
+
+#login to the onap nexus docker repo
+docker login -u docker -p docker nexus3.onap.org:10001
+
+# Start MSB
+docker run -d -p 8500:8500 --name msb_consul consul:0.9.3
+CONSUL_IP=`get-instance-ip.sh msb_consul`
+echo CONSUL_IP=${CONSUL_IP}
+docker run -d -p 10081:10081 -e CONSUL_IP=$CONSUL_IP --name msb_discovery nexus3.onap.org:10001/onap/msb/msb_discovery
+DISCOVERY_IP=`get-instance-ip.sh msb_discovery`
+echo DISCOVERY_IP=${DISCOVERY_IP}
+docker run -d -p 80:80 -e CONSUL_IP=$CONSUL_IP -e SDCLIENT_IP=$DISCOVERY_IP --name msb_internal_apigateway nexus3.onap.org:10001/onap/msb/msb_apigateway
+MSB_IP==`get-instance-ip.sh msb_internal_apigateway`
+echo MSB_IP=${MSB_IP}
+
+# Start resmgr
+docker run -d --name vfc-multivimproxy -e MSB_ADDR=${MSB_IP}:80 nexus3.onap.org:10001/onap/vfc/multivimproxy
+RESMGR_IP=`get-instance-ip.sh vfc-multivimproxy`
+for i in {1..20}; do
+ curl -sS ${RESMGR_IP}:8486 && break
+ echo sleep $i
+ sleep $i
+done
+
+# Pass any variables required by Robot test suites in ROBOT_VARIABLES
+ROBOT_VARIABLES="-v MSB_IP:${MSB_IP} -v RESMGR_IP:${RESMGR_IP}"
diff --git a/test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/teardown.sh b/test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/teardown.sh
new file mode 100644
index 000000000..c158d1997
--- /dev/null
+++ b/test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/teardown.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2018 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This script is sourced by run-csit.sh after Robot test completion.
+kill-instance.sh msb_internal_apigateway
+kill-instance.sh msb_discovery
+kill-instance.sh msb_consul
+kill-instance.sh vfc-multivimproxy
diff --git a/test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/testplan.txt b/test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/testplan.txt
new file mode 100644
index 000000000..f81be8ecf
--- /dev/null
+++ b/test/csit/plans/vfc-nfvo-multivimproxy/sanity-check/testplan.txt
@@ -0,0 +1,3 @@
+# Test suites are relative paths under [integration.git]/test/csit/tests/.
+# Place the suites in run order.
+vfc/nfvo-multivimproxy/test.robot \ No newline at end of file
diff --git a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/testplan.txt b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/testplan.txt
index 5f6910bdd..ff9f4d5d6 100644
--- a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/testplan.txt
+++ b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/testplan.txt
@@ -1,4 +1,4 @@
-# Test suites are relative paths under [integration.git]/test/csit/tests/.
-# Place the suites in run order.
-
+# Test suites are relative paths under [integration.git]/test/csit/tests/.
+# Place the suites in run order.
+
vfc/nfvo-wfengine/workflow.robot \ No newline at end of file
diff --git a/test/csit/plans/vnfsdk-ice/sanity-check/setup.sh b/test/csit/plans/vnfsdk-ice/sanity-check/setup.sh
index 7e777a133..22d9acddc 100644
--- a/test/csit/plans/vnfsdk-ice/sanity-check/setup.sh
+++ b/test/csit/plans/vnfsdk-ice/sanity-check/setup.sh
@@ -18,7 +18,7 @@
#Start ice server
-docker run --rm --name vnfsdk-ice -d -p 5000:5000 onap/vnfsdk/ice
+docker run --name vnfsdk-ice -d -p 5000:5000 nexus3.onap.org:10001/onap/vnfsdk/ice:latest
# Wait for server initialization
echo Wait for vnfsdk-ice initialization