diff options
Diffstat (limited to 'test/csit/plans')
10 files changed, 186 insertions, 28 deletions
diff --git a/test/csit/plans/dcae-bulkpm/bulkpm-suite/setup.sh b/test/csit/plans/dcae-bulkpm/bulkpm-suite/setup.sh new file mode 100644 index 000000000..530d97da1 --- /dev/null +++ b/test/csit/plans/dcae-bulkpm/bulkpm-suite/setup.sh @@ -0,0 +1,86 @@ +#!/bin/bash +# Place the scripts in run order: +#Make sure python-uuid is installed + +# Place the scripts in run order: +source ${SCRIPTS}/dcae-bulkpm/xNFSimulator.sh + +# Place the scripts in run order: +source ${SCRIPTS}/common_functions.sh + +#get current host IP addres +HOST_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $NF}') + +VESC_IMAGE=nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.3.1 +echo VESC_IMAGE=${VESC_IMAGE} + +# Start DCAE VES Collector +docker run -d -p 8080:8080/tcp -p 8443:8443/tcp -P --name vesc -e DMAAPHOST=${HOST_IP} ${VESC_IMAGE} + +# Clone DMaaP Message Router repo +mkdir -p $WORKSPACE/archives/dmaapmr +cd $WORKSPACE/archives/dmaapmr +#unset http_proxy https_proxy +git clone --depth 1 http://gerrit.onap.org/r/dmaap/messagerouter/messageservice -b master +git pull +cd $WORKSPACE/archives/dmaapmr/messageservice/src/main/resources/docker-compose +cp $WORKSPACE/archives/dmaapmr/messageservice/bundleconfig-local/etc/appprops/MsgRtrApi.properties /var/tmp/ + +# Update kafkfa and zookeeper properties in MsgRtrApi.propeties which will be copied to DMaaP Container +sed -i -e 's#nexus3.onap.org:10001/onap/dmaap/kafka01101:0.0.1#wurstmeister/kafka:1.1.0#' $WORKSPACE/archives/dmaapmr/messageservice/src/main/resources/docker-compose/docker-compose.yml + +# start DMaaP MR containers with docker compose and configuration from docker-compose.yml +docker login -u docker -p docker nexus3.onap.org:10001 +docker-compose up -d + +# Wait for initialization of Docker contaienr for DMaaP MR, Kafka and Zookeeper +for i in {1..50}; do +if [ $(docker inspect --format '{{ .State.Running }}' dockercompose_kafka_1) ] && \ +[ $(docker inspect --format '{{ .State.Running }}' dockercompose_zookeeper_1) ] && \ +[ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ] +then + echo "DMaaP Service Running" + break +else + echo sleep $i + sleep $i +fi +done + +# Get IP address of DMAAP, KAFKA, Zookeeper +DMAAP_MR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_dmaap_1) +KAFKA_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_kafka_1) +ZOOKEEPER_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_zookeeper_1) + +echo DMAAP_MR_IP=${DMAAP_MR_IP} +echo KAFKA_IP=${KAFKA_IP} +echo ZOOKEEPER_IP=${ZOOKEEPER_IP} + +# Shutdown DMAAP Container +docker kill dockercompose_dmaap_1 + +# Initial docker-compose up and down is for populating kafka and zookeeper IPs in /var/tmp/MsgRtrApi.properites +sed -i -e '/config.zk.servers=/ s/=.*/='$ZOOKEEPER_IP'/' /var/tmp/MsgRtrApi.properties +sed -i -e '/kafka.metadata.broker.list=/ s/=.*/='$KAFKA_IP':9092/' /var/tmp/MsgRtrApi.properties + +# Start DMaaP MR containers with docker compose and configuration from docker-compose.yml +docker-compose build +docker login -u docker -p docker nexus3.onap.org:10001 +docker-compose up -d +sleep 5 + +# Get IP address of DMAAP, KAFKA, Zookeeper and VESC +DMAAP_MR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_dmaap_1) +KAFKA_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_kafka_1) +ZOOKEEPER_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_zookeeper_1) + +VESC_IP=`get-instance-ip.sh vesc` +export VESC_IP=${VESC_IP} +export HOST_IP=${HOST_IP} +export DMAAP_MR_IP=${DMAAP_MR_IP} + +ROBOT_VARIABLES="-v DMAAP_MR_IP:${DMAAP_MR_IP} -v VESC_IP:${VESC_IP}" + +pip install jsonschema uuid +# Wait container ready +sleep 2 diff --git a/test/csit/plans/dcae-bulkpm/bulkpm-suite/teardown.sh b/test/csit/plans/dcae-bulkpm/bulkpm-suite/teardown.sh new file mode 100644 index 000000000..85428dad9 --- /dev/null +++ b/test/csit/plans/dcae-bulkpm/bulkpm-suite/teardown.sh @@ -0,0 +1,8 @@ +#!/bin/bash +echo "Starting teardown script" +kill-instance.sh vesc +kill-instance.sh dockercompose_dmaap_1 +kill-instance.sh dockercompose_kafka_1 +kill-instance.sh dockercompose_zookeeper_1 + + diff --git a/test/csit/plans/dcae-bulkpm/bulkpm-suite/testplan.txt b/test/csit/plans/dcae-bulkpm/bulkpm-suite/testplan.txt new file mode 100644 index 000000000..25a5d6e8b --- /dev/null +++ b/test/csit/plans/dcae-bulkpm/bulkpm-suite/testplan.txt @@ -0,0 +1,3 @@ +# Test suites are relative paths under [integration.git]/test/csit/tests/. +# Place the suites in run order. +dcae-bulkpm/testcases diff --git a/test/csit/plans/dcaegen2-collectors-hv-ves/testsuites/docker-compose.yml b/test/csit/plans/dcaegen2-collectors-hv-ves/testsuites/docker-compose.yml index 66cbde22f..1673715cb 100644 --- a/test/csit/plans/dcaegen2-collectors-hv-ves/testsuites/docker-compose.yml +++ b/test/csit/plans/dcaegen2-collectors-hv-ves/testsuites/docker-compose.yml @@ -60,6 +60,25 @@ services: networks: - ves-hv-default + unencrypted-ves-hv-collector: + image: $DOCKER_REGISTRY/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:latest + ports: + - "7060:6060" + - "7061:6061/tcp" + entrypoint: ["java", "-Dio.netty.leakDetection.level=paranoid", "-cp", "*:", "org.onap.dcae.collectors.veshv.main.MainKt"] + command: ["--listen-port", "6061","--config-url", "http://consul:8500/v1/kv/veshv-config", "--ssl-disable"] + healthcheck: + interval: 10s + timeout: 5s + retries: 2 + test: "curl --request GET --fail --silent --show-error localhost:6060/health/ready && nc -vz localhost 6061" + depends_on: + - kafka + volumes: + - ./ssl/:/etc/ves-hv/ + networks: + - ves-hv-default + dcae-app-simulator: image: $DOCKER_REGISTRY/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-dcae-app-simulator:latest ports: diff --git a/test/csit/plans/dcaegen2/prh-testsuites/setup.sh b/test/csit/plans/dcaegen2/prh-testsuites/setup.sh index a5ce48b52..52167bf5c 100644 --- a/test/csit/plans/dcaegen2/prh-testsuites/setup.sh +++ b/test/csit/plans/dcaegen2/prh-testsuites/setup.sh @@ -8,26 +8,11 @@ export AAI_SIMULATOR="aai_simulator" cd ${WORKSPACE}/test/csit/tests/dcaegen2/prh-testcases/resources/ -docker login -u docker -p docker nexus3.onap.org:10001 pip uninstall -y docker-py pip uninstall -y docker pip install -U docker docker-compose up -d --build -# Wait for initialization of Docker containers -for i in {1..10}; do - if [ $(docker inspect --format '{{ .State.Running }}' ${PRH_SERVICE}) ] && \ - [ $(docker inspect --format '{{ .State.Running }}' ${DMAAP_SIMULATOR}) ] && \ - [ $(docker inspect --format '{{ .State.Running }}' ${AAI_SIMULATOR}) ] - then - echo "dmaap_simulator, aai_simulator and prh services are running" - break - else - echo sleep ${i} - sleep ${i} - fi -done - PRH_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${PRH_SERVICE}) DMAAP_SIMULATOR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${DMAAP_SIMULATOR}) AAI_SIMULATOR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${AAI_SIMULATOR}) @@ -47,12 +32,5 @@ for i in {1..10}; do sleep ${i} done -docker stop prh -docker cp prh:/config/prh_endpoints.json ${WORKDIR} -sed -i -e 's/"dmaapHostName":.*/"dmaapHostName": "'${DMAAP_SIMULATOR_IP}'",/g' ${WORKDIR}/prh_endpoints.json -sed -i -e 's/"aaiHost":.*/"aaiHost": "'${AAI_SIMULATOR_IP}'",/g' ${WORKDIR}/prh_endpoints.json -docker cp ${WORKDIR}/prh_endpoints.json prh:/config/ -docker start prh - # #Pass any variables required by Robot test suites in ROBOT_VARIABLES ROBOT_VARIABLES="-v DMAAP_SIMULATOR:${DMAAP_SIMULATOR_IP}:2222 -v AAI_SIMULATOR:${AAI_SIMULATOR_IP}:3333 -v PRH:${PRH_IP}:8100" diff --git a/test/csit/plans/policy/apex-pdp/setup.sh b/test/csit/plans/policy/apex-pdp/setup.sh new file mode 100644 index 000000000..7ab5b9e22 --- /dev/null +++ b/test/csit/plans/policy/apex-pdp/setup.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# ============LICENSE_START======================================================= +# Copyright (C) 2018 Ericsson. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# ============LICENSE_END========================================================= + +docker run -d --name apex -p 12561:12561 -p 23324:23324 -it nexus3.onap.org:10001/onap/policy-apex-pdp:2.0-SNAPSHOT-latest /bin/bash -c "/opt/app/policy/apex-pdp/bin/apexEngine.sh -c /opt/app/policy/apex-pdp/examples/config/SampleDomain/RESTServerJsonEvent.json" + +APEX_IP=`get-instance-ip.sh apex` +echo APEX IP IS ${APEX_IP} +Wait for initialization +for i in {1..10}; do + curl -sS ${APEX_IP}:23324 && break + echo sleep $i + sleep $i +done + +ROBOT_VARIABLES="-v APEX_IP:${APEX_IP}" diff --git a/test/csit/plans/policy/apex-pdp/teardown.sh b/test/csit/plans/policy/apex-pdp/teardown.sh new file mode 100644 index 000000000..ca8e92e6c --- /dev/null +++ b/test/csit/plans/policy/apex-pdp/teardown.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# ============LICENSE_START======================================================= +# Copyright (C) 2018 Ericsson. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# ============LICENSE_END========================================================= + +kill-instance.sh apex diff --git a/test/csit/plans/policy/apex-pdp/testplan.txt b/test/csit/plans/policy/apex-pdp/testplan.txt new file mode 100644 index 000000000..cee9abda5 --- /dev/null +++ b/test/csit/plans/policy/apex-pdp/testplan.txt @@ -0,0 +1,3 @@ +# Test suites are relative paths under [integration.git]/test/csit/tests/. +# Place the suites in run order. +policy/apex-pdp/apex-pdp-test.robot diff --git a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh index 5a578230b..f990aa5a7 100644 --- a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh +++ b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh @@ -24,10 +24,10 @@ source ${SCRIPTS}/common_functions.sh docker run -d -p 8500:8500 --name msb_consul consul:0.9.3 MSB_CONSUL_IP=`get-instance-ip.sh msb_consul` echo MSB_CONSUL_IP=${MSB_CONSUL_IP} -docker run -d -p 10081:10081 -e CONSUL_IP=$MSB_CONSUL_IP --name msb_discovery nexus3.onap.org:10001/onap/msb/msb_discovery +docker run -d -p 10081:10081 -e CONSUL_IP=$MSB_CONSUL_IP --name msb_discovery nexus3.onap.org:10001/onap/msb/msb_discovery:1.1.0 MSB_DISCOVERY_IP=`get-instance-ip.sh msb_discovery` echo MSB_DISCOVERY_IP=${MSB_DISCOVERY_IP} -docker run -d -p 80:80 -e CONSUL_IP=$MSB_CONSUL_IP -e SDCLIENT_IP=$MSB_DISCOVERY_IP -e "ROUTE_LABELS=visualRange:1" --name msb_internal_apigateway nexus3.onap.org:10001/onap/msb/msb_apigateway +docker run -d -p 80:80 -e CONSUL_IP=$MSB_CONSUL_IP -e SDCLIENT_IP=$MSB_DISCOVERY_IP --name msb_internal_apigateway nexus3.onap.org:10001/onap/msb/msb_apigateway:1.1.0 MSB_IAG_IP=`get-instance-ip.sh msb_internal_apigateway` echo MSB_IAG_IP=${MSB_IAG_IP} @@ -39,8 +39,8 @@ for i in {1..10}; do done # wait for container initalization -echo sleep 60 -sleep 60 +echo sleep 30 +sleep 30 ORG="onap" PROJECT="vfc" @@ -52,7 +52,8 @@ IMAGE_ACTIVITI_NAME="${DOCKER_REPOSITORY}/${ORG}/${PROJECT}/${IMAGE}" SERVICE_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $NF}') # start wfengine-activiti -docker run -d --name vfc_wfengine_activiti -p 8804:8080 -e SERVICE_IP=$SERVICE_IP -e SERVICE_PORT=8804 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_ACTIVITI_NAME} +# docker run -d --name vfc_wfengine_activiti -p 8804:8080 -e SERVICE_IP=$SERVICE_IP -e SERVICE_PORT=8804 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_ACTIVITI_NAME} +docker run -d --name vfc_wfengine_activiti -p 8804:8080 -e SERVICE_PORT=8080 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_ACTIVITI_NAME} WFENGINE_ACTIVITI_IP=`get-instance-ip.sh vfc_wfengine_activiti` # Wait for initialization @@ -72,7 +73,10 @@ IMAGE="wfengine-mgrservice" IMAGE_MGRSERVICE_NAME="${DOCKER_REPOSITORY}/${ORG}/${PROJECT}/${IMAGE}" # Start wfengine-mgrservice -docker run -d --name vfc_wfengine_mgrservice -p 8805:10550 -e SERVICE_IP=$SERVICE_IP -e SERVICE_PORT=8805 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_MGRSERVICE_NAME} +#docker run -d --name vfc_wfengine_mgrservice -p 8805:10550 -e SERVICE_IP=$SERVICE_IP -e SERVICE_PORT=8805 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_MGRSERVICE_NAME} +# docker run -d --name vfc_wfengine_mgrservice -p 8805:10550 -e SERVICE_PORT=10550 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_MGRSERVICE_NAME} +docker run -d --name vfc_wfengine_mgrservice -p 8805:10550 -e SERVICE_PORT=10550 -e OPENPALETTE_MSB_IP=${WFENGINE_ACTIVITI_IP} -e OPENPALETTE_MSB_PORT=8080 ${IMAGE_MGRSERVICE_NAME} + ##docker run -d --name ${IMAGE} -e OPENPALETTE_MSB_IP=${WFENGINEACTIVITIR_IP} -e OPENPALETTE_MSB_PORT=8080 ${IMAGE_MGRSERVICE_NAME} WFENGINE_MGRSERVICE_IP=`get-instance-ip.sh vfc_wfengine_mgrservice` for i in {1..10}; do diff --git a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh index 384bc3935..bca33569b 100644 --- a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh +++ b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh @@ -16,6 +16,12 @@ # # This script is sourced by run-csit.sh after Robot test completion. +echo === logs vfc_wfengine_activiti === +docker logs vfc_wfengine_activiti + +echo === logs vfc_wfengine_mgrservice === +docker logs vfc_wfengine_mgrservice + kill-instance.sh msb_internal_apigateway kill-instance.sh msb_discovery kill-instance.sh msb_consul |