aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHerbert Eiselt <herbert.eiselt@highstreet-technologies.com>2021-07-20 13:53:16 +0000
committerGerrit Code Review <gerrit@onap.org>2021-07-20 13:53:16 +0000
commit0bb4a58d0710eb065e9df2ba1c739990d8932a38 (patch)
treeac25d3e6c9294fc01688efc209af113c98955b11
parent3f0cf04efd86ee181e2358488390b408a2e1b4d3 (diff)
parentcb75097bcd353161aa91fdbf420ec92d01e31ec1 (diff)
Merge "[SNDC-CSIT] Provide test deployment for SDNC/R"
Former-commit-id: c3ff30e996b138db0cf25789ca734fc189c395b5
-rwxr-xr-xcsit/plans/sdnr/setup.sh29
-rwxr-xr-xcsit/plans/sdnr/teardown.sh22
-rw-r--r--csit/plans/sdnr/testdata/localhost.py49
-rw-r--r--csit/plans/sdnr/testdata/nts-networkfunctions.csv4
-rw-r--r--csit/plans/sdnr/testplan.txt5
-rwxr-xr-xcsit/prepare-csit.sh4
-rwxr-xr-xcsit/run-csit.sh6
-rw-r--r--csit/scripts/sdnr/docker-compose/.env44
-rw-r--r--csit/scripts/sdnr/docker-compose/docker-compose-nts-networkfunction.yaml47
-rw-r--r--csit/scripts/sdnr/docker-compose/docker-compose-onap-addons.yaml98
-rw-r--r--csit/scripts/sdnr/docker-compose/docker-compose-sdnrdb-elasticsearch.yaml22
-rw-r--r--csit/scripts/sdnr/docker-compose/docker-compose-single-sdnr-web.override.yaml52
-rw-r--r--csit/scripts/sdnr/docker-compose/docker-compose-single-sdnr.yaml43
-rwxr-xr-xcsit/scripts/sdnr/docker-compose/kafka/zk_client_jaas.conf5
-rw-r--r--csit/scripts/sdnr/docker-compose/mr/MsgRtrApi.properties169
-rw-r--r--csit/scripts/sdnr/docker-compose/mr/cadi.properties19
-rw-r--r--csit/scripts/sdnr/docker-compose/mr/logback.xml208
-rwxr-xr-xcsit/scripts/sdnr/docker-compose/nts-networkfunctions-launch.sh73
-rw-r--r--csit/scripts/sdnr/docker-compose/sdnr/certs/certs.properties2
-rw-r--r--csit/scripts/sdnr/docker-compose/sdnr/certs/keys0.zipbin0 -> 3917 bytes
-rw-r--r--csit/scripts/sdnr/docker-compose/sdnr/mountpoint-registrar.properties43
-rw-r--r--csit/scripts/sdnr/docker-compose/vesc/collector.properties64
-rw-r--r--csit/scripts/sdnr/docker-compose/zk/zk_server_jaas.conf4
-rwxr-xr-xcsit/scripts/sdnr/sdnr-launch.sh152
-rwxr-xr-xcsit/scripts/sdnr/sdnr-teardown.sh62
-rw-r--r--csit/tests/sdnr/functional/dummy.robot10
-rw-r--r--csit/tests/sdnr/healthcheck/20_healthcheckSUT.robot39
27 files changed, 1271 insertions, 4 deletions
diff --git a/csit/plans/sdnr/setup.sh b/csit/plans/sdnr/setup.sh
new file mode 100755
index 00000000..08babea5
--- /dev/null
+++ b/csit/plans/sdnr/setup.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+# Modifications copyright (c) 2021 highstreet technologies GmbH Property
+#
+
+source ${WORKSPACE}/scripts/sdnr/sdnr-launch.sh
+onap_dependent_components_launch
+nts_networkfunctions_launch ${WORKSPACE}/plans/sdnr/testdata/nts-networkfunctions.csv
+sdnr_launch
+
+#Pass any variables required by Robot test suites in ROBOT_VARIABLES
+ROBOT_VARIABLES="--variablefile=${WORKSPACE}/plans/sdnr/testdata/localhost.py"
+ROBOT_IMAGE="hightec/sdnc-test-lib:latest"
+
diff --git a/csit/plans/sdnr/teardown.sh b/csit/plans/sdnr/teardown.sh
new file mode 100755
index 00000000..005f6f31
--- /dev/null
+++ b/csit/plans/sdnr/teardown.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2017 AT&T Intellectual Property
+# Modifications copyright (c) 2021 highstreet technologies GmbH Property
+#
+
+source ${WORKSPACE}/scripts/sdnr/sdnr-teardown.sh
+sdnr_teardown
diff --git a/csit/plans/sdnr/testdata/localhost.py b/csit/plans/sdnr/testdata/localhost.py
new file mode 100644
index 00000000..0de4bac5
--- /dev/null
+++ b/csit/plans/sdnr/testdata/localhost.py
@@ -0,0 +1,49 @@
+#!python
+
+# This file describes test all paramters for a specific test environment and system under test.
+# SDNR Custom keywords and test suites use this file to be independent
+# This file es created once for a test system
+# in robot commandline pass the file with '--variablefile <my_environment>.py'
+
+## Access SDNR cluster
+SDNR_PROTOCOL = "http://"
+SDNR_HOST = "127.0.0.1"
+SDNR_PORT = "8181"
+SDNR_USER = "admin"
+SDNR_PASSWORD = "Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U"
+
+# for odlux gui testing
+WEBDRIVER_PATH = "/usr/local/bin/chromedriver"
+
+# Access to elastic search SDNRDB
+SDNRDB = {'PROTOCOL': 'http', 'IP': '127.0.0.1', 'PORT': '8181', 'USE_API_GATEWAY': True, 'USE_SSL': False,
+ 'VERIFY_CERTS': False}
+# elastic DB to store statistic datas
+ELASTIC_LOG_DB = SDNRDB
+RESTCONF_TIMEOUT = '90 s'
+# Restconf response time longer than VALID_RESPONSE_TIME in s will be notified as warning in the robot logs
+VALID_RESPONSE_TIME = 5
+
+
+VESCOLLECTOR = {"SCHEME": "https", "IP": "172.40.0.1", "PORT": 8443, "AUTHMETHOD": "basic-auth", "USERNAME": "sample1",
+ "PASSWORD": "sample1"}
+
+NTS_SSH_CONNECTIONS = 10
+NTS_TLS_CONNECTIONS = 10
+# ssh settings for karaf-shell
+# list of default log topics, short name (defined in ...) or long name
+KARAF_CONSOLE = {'KARAF_USER': "karaf", 'KARAF_PASSWORD': "karaf", 'KARAF_LOG_LEVEL': "DEFAULT",
+ 'KARAF_LOGGER_LIST': ['netconf', 'wtfeatures'],
+ 'HOST_LIST': [{'KARAF_HOST': "127.0.0.1", 'KARAF_PORT': 8101}
+ ]}
+# define log level used by default
+KARAF_LOG_LEVEL = "DEFAULT"
+# save karaf logs after test execution
+KARAF_GET_LOG = True
+KARAF_LOG_FILE_PATH = '/opt/opendaylight/data/log/'
+# KARAF_LOG_FILE_PATH = '/var/log/onap/sdnc/karaf.log'
+# write useful statistics in background
+WRITE_STATISTICS_BACKGROUND = False
+WRITE_STATISTICS_BACKGROUND_INTERVAL = 5
+
+GLOBAL_SUITE_SETUP_CONFIG = {'setup_ssh_lib': True}
diff --git a/csit/plans/sdnr/testdata/nts-networkfunctions.csv b/csit/plans/sdnr/testdata/nts-networkfunctions.csv
new file mode 100644
index 00000000..3a9219aa
--- /dev/null
+++ b/csit/plans/sdnr/testdata/nts-networkfunctions.csv
@@ -0,0 +1,4 @@
+NAME,NTS_NF_DOCKER_REPOSITORY,NTS_NF_IMAGE_NAME,NTS_NF_IMAGE_TAG,NTSFUNC-IP,NTS_HOST_NETCONF_SSH_BASE_PORT,NTS_HOST_NETCONF_TLS_BASE_PORT,NTS_NF_SSH_CONNECTIONS,NTS_NF_TLS_CONNECTIONS
+ONF-CORE-1-4,docker.io/hightec/,nts-ng-onf-core-1-4,1.3.1,172.40.0.31,31000,31500,1,1
+O-RAN-FH,docker.io/hightec/,nts-ng-o-ran-fh,1.3.1,172.40.0.40,40000,40500,1,1
+X-RAN,docker.io/hightec/,nts-ng-x-ran,1.3.1,172.40.0.42,42000,42500,1,1
diff --git a/csit/plans/sdnr/testplan.txt b/csit/plans/sdnr/testplan.txt
new file mode 100644
index 00000000..24966d0c
--- /dev/null
+++ b/csit/plans/sdnr/testplan.txt
@@ -0,0 +1,5 @@
+# Test suites are relative paths under [integration/csit.git]/tests/.
+# Place the suites in run order.
+#echo "Successfully triggered the Test plan"
+sdnr/healthcheck
+sdnr/functional
diff --git a/csit/prepare-csit.sh b/csit/prepare-csit.sh
index 3cc143de..dc59c58f 100755
--- a/csit/prepare-csit.sh
+++ b/csit/prepare-csit.sh
@@ -18,12 +18,12 @@
#
if [ -z "$WORKSPACE" ]; then
- export WORKSPACE=`git rev-parse --show-toplevel`
+ export WORKSPACE=`git rev-parse --show-toplevel`/csit
fi
TESTPLANDIR=${WORKSPACE}/${TESTPLAN}
-# Assume that if ROBOT_VENV is set and virtualenv with system site packages can be activated,
+# Assume that if ROBOT_VENV is set and virtualenv with system site packages can be activated,
# ci-management/jjb/integration/include-raw-integration-install-robotframework.sh has already
# been executed
diff --git a/csit/run-csit.sh b/csit/run-csit.sh
index eb255e93..e1b831af 100755
--- a/csit/run-csit.sh
+++ b/csit/run-csit.sh
@@ -154,12 +154,14 @@ TESTPLANDIR="${WORKSPACE}/${TESTPLAN}"
# Set env variables
source_safely "${WORKSPACE}/sdnc-csit.env"
+if [[ -z $ROBOT_IMAGE ]]; then
+ # Run installation of prerequired libraries
+ source_safely "${WORKSPACE}/prepare-csit.sh"
-# Run installation of prerequired libraries
-source_safely "${WORKSPACE}/prepare-csit.sh"
# Activate the virtualenv containing all the required libraries installed by prepare-csit.sh
source_safely "${ROBOT_VENV}/bin/activate"
+fi
WORKDIR=$(mktemp -d --suffix=-robot-workdir)
cd "${WORKDIR}"
diff --git a/csit/scripts/sdnr/docker-compose/.env b/csit/scripts/sdnr/docker-compose/.env
new file mode 100644
index 00000000..53039467
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/.env
@@ -0,0 +1,44 @@
+COMPOSE_PROJECT_NAME=integration
+
+# network
+# all components are reachable via docker network
+# IP adresses are defined within each section
+NETWORK_NAME=integration
+NETWORK_SUBNET=172.40.0.0/16
+GATEWAY_IP=172.40.0.1
+IPV6_ENABLED="false"
+
+# sdnc/r
+NEXUS_DOCKER_REPO=nexus3.onap.org:10001
+NEXUS_DOCKER_IMAGE_NAME=onap/sdnc-image
+NEXUS_DOCKER_IMAGE_TAG=2.2-STAGING-latest
+ODL_ADMIN_USERNAME=admin
+ODL_ADMIN_PASSWORD=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
+ODL_CERT_DIR=/opt/opendaylight/current/certs
+SDN_CONTROLLER_PROTOCOL="http"
+SDNR_IP=172.40.0.21
+SDNRPORT=8181
+SDNR_DM=true
+
+# sdnrdb
+ES_VERSION=7.9.3
+ES_IMAGE=docker.elastic.co/elasticsearch/elasticsearch-oss
+ESDB_IP=172.40.0.30
+
+# sdnc-web
+#NEXUS_DOCKER_REPO see sdnc section
+NEXUS_DOCKER_WEB_IMAGE_NAME=onap/sdnc-web-image
+NEXUS_DOCKER_WEB_IMAGE_TAG=2.2-STAGING-latest
+SDNR_WEB_IP=172.40.0.25
+SDNC_WEB_PORT=8282
+
+
+# onap dependent components
+VESC_IMAGE=nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.8.0
+DMAAP_IMAGE=nexus3.onap.org:10001/onap/dmaap/dmaap-mr:1.1.18
+KAFKA_IMAGE=nexus3.onap.org:10001/onap/dmaap/kafka111:1.0.4
+ZOOKEEPER_IMAGE=nexus3.onap.org:10001/onap/dmaap/zookeeper:6.0.3
+ZOOKEEPER_IP=172.40.0.60
+KAFKA_IP=172.40.0.70
+DMAAP_IP=172.40.0.80
+VESCOLLECTOR_IP=172.40.0.90
diff --git a/csit/scripts/sdnr/docker-compose/docker-compose-nts-networkfunction.yaml b/csit/scripts/sdnr/docker-compose/docker-compose-nts-networkfunction.yaml
new file mode 100644
index 00000000..05ab6a98
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/docker-compose-nts-networkfunction.yaml
@@ -0,0 +1,47 @@
+version: "3"
+services:
+ nts-function:
+ image: ${NTS_NF_DOCKER_REPOSITORY}${NTS_NF_IMAGE_NAME}:${NTS_NF_IMAGE_TAG}
+ container_name: ${NTS_NF_IMAGE_NAME}
+ stop_grace_period: 5m
+ ports:
+ - "::${NTS_HOST_NETCONF_SSH_BASE_PORT}-${NTS_HOST_NETCONF_SSH_BASE_PORT_PLUS_SSH_CON}:${EXPOSE_PORT_SSH}-${EXPOSE_PORT_SSH_PLUS_CON}"
+ - "::${NTS_HOST_NETCONF_TLS_BASE_PORT}-${NTS_HOST_NETCONF_TLS_BASE_PORT_PLUS_TLS_CON}:${EXPOSE_PORT_TLS}-${EXPOSE_PORT_TLS_PLUS_CON}"
+ environment:
+ NTS_NF_STANDALONE_START_FEATURES: "datastore-populate ves-heartbeat ves-pnf-registration web-cut-through manual-notification-generation netconf-call-home"
+ NTS_NF_MOUNT_POINT_ADDRESSING_METHOD: "host-mapping"
+ NTS_HOST_IP: ${NTS_HOST_IP}
+ HOSTNAME: ${NTS_NF_CONTAINER_NAME}
+ IPv6Enabled: ${IPV6_ENABLED}
+
+ SSH_CONNECTIONS: ${NTS_NF_SSH_CONNECTIONS}
+ TLS_CONNECTIONS: ${NTS_NF_TLS_CONNECTIONS}
+ NTS_HOST_NETCONF_SSH_BASE_PORT: ${NTS_HOST_NETCONF_SSH_BASE_PORT}
+ NTS_HOST_NETCONF_TLS_BASE_PORT: ${NTS_HOST_NETCONF_TLS_BASE_PORT}
+
+ SDN_CONTROLLER_PROTOCOL: ${SDN_CONTROLLER_PROTOCOL}
+ SDN_CONTROLLER_IP: ${NTS_NF_SDN_CONTROLLER_IP}
+ SDN_CONTROLLER_PORT: ${NTS_NF_SDN_CONTROLLER_PORT}
+ SDN_CONTROLLER_CALLHOME_PORT: 6666
+ SDN_CONTROLLER_USERNAME: ${ODL_ADMIN_USERNAME}
+ SDN_CONTROLLER_PASSWORD: ${ODL_ADMIN_PASSWORD}
+
+ VES_ENDPOINT_PROTOCOL: "https"
+ VES_ENDPOINT_IP: "127.0.0.1"
+ VES_ENDPOINT_PORT: 1234
+ VES_ENDPOINT_AUTH_METHOD: "no-auth"
+ VES_ENDPOINT_USERNAME: "admin"
+ VES_ENDPOINT_PASSWORD: "admin"
+ networks:
+ integration:
+ ipv4_address: ${NTS_NF_IP}
+
+networks:
+ integration:
+ name: ${NETWORK_NAME}
+ driver: bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: ${NETWORK_SUBNET}
+ gateway: ${GATEWAY_IP}
diff --git a/csit/scripts/sdnr/docker-compose/docker-compose-onap-addons.yaml b/csit/scripts/sdnr/docker-compose/docker-compose-onap-addons.yaml
new file mode 100644
index 00000000..f931450b
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/docker-compose-onap-addons.yaml
@@ -0,0 +1,98 @@
+version: '2'
+services:
+ zookeeper:
+ image: ${ZOOKEEPER_IMAGE}
+ container_name: zookeeper
+ ports:
+ - "2181:2181"
+ environment:
+ ZOOKEEPER_REPLICAS: 1
+ ZOOKEEPER_TICK_TIME: 2000
+ ZOOKEEPER_SYNC_LIMIT: 5
+ ZOOKEEPER_INIT_LIMIT: 10
+ ZOOKEEPER_MAX_CLIENT_CNXNS: 200
+ ZOOKEEPER_AUTOPURGE_SNAP_RETAIN_COUNT: 3
+ ZOOKEEPER_AUTOPURGE_PURGE_INTERVAL: 24
+ ZOOKEEPER_CLIENT_PORT: 2181
+ KAFKA_OPTS: -Djava.security.auth.login.config=/etc/zookeeper/secrets/jaas/zk_server_jaas.conf -Dzookeeper.kerberos.removeHostFromPrincipal=true -Dzookeeper.kerberos.removeRealmFromPrincipal=true -Dzookeeper.authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider -Dzookeeper.requireClientAuthScheme=sasl
+ ZOOKEEPER_SERVER_ID:
+ volumes:
+ - ./zk/zk_server_jaas.conf:/etc/zookeeper/secrets/jaas/zk_server_jaas.conf
+ networks:
+ integration:
+ aliases:
+ - zookeeper
+ ipv4_address: ${ZOOKEEPER_IP}
+ kafka:
+ image: ${KAFKA_IMAGE}
+ container_name: kafka
+ ports:
+ - "9092:9092"
+ environment:
+ enableCadi: 'false'
+ KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+ KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 40000
+ KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: 40000
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL_PLAINTEXT:PLAINTEXT,EXTERNAL_PLAINTEXT:PLAINTEXT
+ KAFKA_ADVERTISED_LISTENERS: INTERNAL_PLAINTEXT://kafka:9092
+ KAFKA_LISTENERS: INTERNAL_PLAINTEXT://0.0.0.0:9092
+ KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL_PLAINTEXT
+ KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
+ KAFKA_OPTS: -Djava.security.auth.login.config=/etc/kafka/secrets/jaas/zk_client_jaas.conf
+ KAFKA_ZOOKEEPER_SET_ACL: 'true'
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ # Reduced the number of partitions only to avoid the timeout error for the first subscribe call in slow environment
+ KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: 1
+ volumes:
+ - ./kafka/zk_client_jaas.conf:/etc/kafka/secrets/jaas/zk_client_jaas.conf
+ networks:
+ integration:
+ aliases:
+ - kafka
+ ipv4_address: ${KAFKA_IP}
+
+ depends_on:
+ - zookeeper
+ onap-dmaap:
+ container_name: onap-dmaap
+ image: ${DMAAP_IMAGE}
+ ports:
+ - "3904:3904"
+ - "3905:3905"
+ environment:
+ enableCadi: 'false'
+ volumes:
+ - ./mr/MsgRtrApi.properties:/appl/dmaapMR1/bundleconfig/etc/appprops/MsgRtrApi.properties
+ - ./mr/logback.xml:/appl/dmaapMR1/bundleconfig/etc/logback.xml
+ - ./mr/cadi.properties:/appl/dmaapMR1/etc/cadi.properties
+ networks:
+ integration:
+ aliases:
+ - dmaap
+ ipv4_address: ${DMAAP_IP}
+
+ depends_on:
+ - zookeeper
+ - kafka
+ vesc:
+ image: ${VESC_IMAGE}
+ container_name: vescollector
+ environment:
+ DMAAPHOST: "dmaap"
+ ports:
+ - "8080:8080"
+ - "8443:8443"
+ volumes:
+ - ./vesc/collector.properties:/opt/app/VESCollector/etc/collector.properties
+ networks:
+ integration:
+ ipv4_address: ${VESCOLLECTOR_IP}
+
+networks:
+ integration:
+ name: ${NETWORK_NAME}
+ driver: bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: ${NETWORK_SUBNET}
diff --git a/csit/scripts/sdnr/docker-compose/docker-compose-sdnrdb-elasticsearch.yaml b/csit/scripts/sdnr/docker-compose/docker-compose-sdnrdb-elasticsearch.yaml
new file mode 100644
index 00000000..0072b5bc
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/docker-compose-sdnrdb-elasticsearch.yaml
@@ -0,0 +1,22 @@
+version: "2.2"
+services:
+ sdnrdb:
+ image: ${ES_IMAGE}:${ES_VERSION}
+ container_name: sdnrdb
+ environment:
+ - discovery.type=single-node
+ networks:
+ integration:
+ ipv4_address: ${ESDB_IP}
+ sdnr:
+ environment:
+ - SDNRDBURL=http://sdnrdb:9200
+networks:
+ integration:
+ name: ${NETWORK_NAME}
+ driver: bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: ${NETWORK_SUBNET}
+ gateway: ${GATEWAY_IP}
diff --git a/csit/scripts/sdnr/docker-compose/docker-compose-single-sdnr-web.override.yaml b/csit/scripts/sdnr/docker-compose/docker-compose-single-sdnr-web.override.yaml
new file mode 100644
index 00000000..c8d1728a
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/docker-compose-single-sdnr-web.override.yaml
@@ -0,0 +1,52 @@
+version: "2.2"
+services:
+ sdnr:
+ environment:
+ - SDNRDM="true"
+ topology-api:
+ image: ${TOPOLOGY_DOCKER_REPO}/${TOPOLOGY_DOCKER_IMAGE_NAME}:${TOPOLOGY_DOCKER_IMAGE_TAG}
+ container_name: topology-api
+ ports:
+ - "3001:3001"
+ environment:
+ - AUTH_ENABLED=${AUTH_ENABLED}
+ - AUTH_HOST_URL=${IDENTITY_PROVIDER_URL}
+ # - AUTH_CONFIG_FILE
+ - PROVIDERS=${TOPOLOGY_PROVIDERS}
+ - LOAD_PACKAGES=${LOAD_PACKAGES}
+ depends_on:
+ - sdnr
+ networks:
+ integration:
+ ipv4_address: ${TOPOLOGY_IP}
+ sdnr-web:
+ image: ${NEXUS_DOCKER_REPO}/${NEXUS_DOCKER_WEB_IMAGE_NAME}:${NEXUS_DOCKER_IMAGE_TAG}
+ container_name: sdnr-web
+ ports:
+ - "${SDNC_WEB_PORT}:${SDNC_WEB_PORT}"
+ environment:
+ - WEBPROTOCOL=HTTP
+ - WEBPORT=${SDNC_WEB_PORT}
+ - SDNRPROTOCOL=HTTP
+ - SDNRHOST=sdnr
+ - SDNRPORT=${SDNRPORT}
+ - TOPOURL=${TOPOURL}
+ - TILEURL=${TILEURL}
+ depends_on:
+ - topology-api
+ - sdnr
+ command: ["/wait-for-sdnc.sh", "sdnr:${SDNRPORT}/ready", "/opt/bitnami/nginx/sbin/run.sh"]
+ volumes:
+ - ./wait-for-sdnc.sh:/wait-for-sdnc.sh
+ networks:
+ integration:
+ ipv4_address: ${SDNR_WEB_IP}
+networks:
+ integration:
+ name: ${NETWORK_NAME}
+ driver: bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: ${NETWORK_SUBNET}
+ gateway: ${GATEWAY_IP}
diff --git a/csit/scripts/sdnr/docker-compose/docker-compose-single-sdnr.yaml b/csit/scripts/sdnr/docker-compose/docker-compose-single-sdnr.yaml
new file mode 100644
index 00000000..05724cff
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/docker-compose-single-sdnr.yaml
@@ -0,0 +1,43 @@
+version: "2.2"
+services:
+ sdnr:
+ image: ${NEXUS_DOCKER_REPO}/${NEXUS_DOCKER_IMAGE_NAME}:${NEXUS_DOCKER_IMAGE_TAG}
+ container_name: sdnr
+ ports:
+ - "8181:8181"
+ - "8101:8101"
+#entrypoint: ["/bin/bash", "/opt/onap/sdnc/bin/startODL.oom.sh"]
+ environment:
+ - SDNC_CONFIG_DIR=/opt/onap/ccsdk/data/properties
+ - ODL_CERT_DIR=${ODL_CERT_DIR}
+ - ODL_ADMIN_PASSWORD=${ODL_ADMIN_PASSWORD}
+ - ENABLE_ODL_CLUSTER=false
+ - SDNC_REPLICAS=0
+ - CCSDK_REPLICAS=0
+ - DOMAIN=""
+ - SDNRWT=true
+ - SDNRINIT=true
+ - SDNRONLY=true
+ - JAVA_OPTS=-Xms256m -Xmx2g
+ volumes:
+ - ./sdnr/mountpoint-registrar.properties:/opt/opendaylight/etc/mountpoint-registrar.properties
+ - ./sdnr/certs/certs.properties:${ODL_CERT_DIR}/certs.properties
+ - ./sdnr/certs/keys0.zip:${ODL_CERT_DIR}/keys0.zip
+ networks:
+ integration:
+ ipv4_address: ${SDNR_IP}
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "30m"
+ max-file: "5"
+
+networks:
+ integration:
+ name: ${NETWORK_NAME}
+ driver: bridge
+ ipam:
+ driver: default
+ config:
+ - subnet: ${NETWORK_SUBNET}
+ gateway: ${GATEWAY_IP}
diff --git a/csit/scripts/sdnr/docker-compose/kafka/zk_client_jaas.conf b/csit/scripts/sdnr/docker-compose/kafka/zk_client_jaas.conf
new file mode 100755
index 00000000..d4ef1eb0
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/kafka/zk_client_jaas.conf
@@ -0,0 +1,5 @@
+Client {
+ org.apache.zookeeper.server.auth.DigestLoginModule required
+ username="kafka"
+ password="kafka_secret";
+ }; \ No newline at end of file
diff --git a/csit/scripts/sdnr/docker-compose/mr/MsgRtrApi.properties b/csit/scripts/sdnr/docker-compose/mr/MsgRtrApi.properties
new file mode 100644
index 00000000..33ff0fdb
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/mr/MsgRtrApi.properties
@@ -0,0 +1,169 @@
+###############################################################################
+# ============LICENSE_START=======================================================
+# org.onap.dmaap
+# ================================================================================
+# Copyright � 2017 AT&T Intellectual Property. All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+# ECOMP is a trademark and service mark of AT&T Intellectual Property.
+#
+###############################################################################
+###############################################################################
+##
+## Cambria API Server config
+##
+## - Default values are shown as commented settings.
+##
+
+###############################################################################
+##
+## HTTP service
+##
+## - 3904 is standard as of 7/29/14.
+#
+## Zookeeper Connection
+##
+## Both Cambria and Kafka make use of Zookeeper.
+##
+#config.zk.servers=172.18.1.1
+config.zk.servers=zookeeper
+#config.zk.root=/fe3c/cambria/config
+
+
+###############################################################################
+##
+## Kafka Connection
+##
+## Items below are passed through to Kafka's producer and consumer
+## configurations (after removing "kafka.")
+## if you want to change request.required.acks it can take this one value
+#kafka.metadata.broker.list=localhost:9092,localhost:9093
+kafka.metadata.broker.list=kafka:9092
+##kafka.request.required.acks=-1
+#kafka.client.zookeeper=${config.zk.servers}
+consumer.timeout.ms=100
+zookeeper.connection.timeout.ms=6000
+zookeeper.session.timeout.ms=20000
+zookeeper.sync.time.ms=2000
+auto.commit.interval.ms=1000
+fetch.message.max.bytes =1000000
+auto.commit.enable=false
+
+#(backoff*retries > zksessiontimeout)
+kafka.rebalance.backoff.ms=10000
+kafka.rebalance.max.retries=6
+
+
+###############################################################################
+##
+## Secured Config
+##
+## Some data stored in the config system is sensitive -- API keys and secrets,
+## for example. to protect it, we use an encryption layer for this section
+## of the config.
+##
+## The key is a base64 encode AES key. This must be created/configured for
+## each installation.
+#cambria.secureConfig.key=
+##
+## The initialization vector is a 16 byte value specific to the secured store.
+## This must be created/configured for each installation.
+#cambria.secureConfig.iv=
+
+## Southfield Sandbox
+cambria.secureConfig.key=b/7ouTn9FfEw2PQwL0ov/Q==
+cambria.secureConfig.iv=wR9xP5k5vbz/xD0LmtqQLw==
+authentication.adminSecret=fe3cCompound
+#cambria.secureConfig.key[pc569h]=YT3XPyxEmKCTLI2NK+Sjbw==
+#cambria.secureConfig.iv[pc569h]=rMm2jhR3yVnU+u2V9Ugu3Q==
+
+
+###############################################################################
+##
+## Consumer Caching
+##
+## Kafka expects live connections from the consumer to the broker, which
+## obviously doesn't work over connectionless HTTP requests. The Cambria
+## server proxies HTTP requests into Kafka consumer sessions that are kept
+## around for later re-use. Not doing so is costly for setup per request,
+## which would substantially impact a high volume consumer's performance.
+##
+## This complicates Cambria server failover, because we often need server
+## A to close its connection before server B brings up the replacement.
+##
+
+## The consumer cache is normally enabled.
+#cambria.consumer.cache.enabled=true
+
+## Cached consumers are cleaned up after a period of disuse. The server inspects
+## consumers every sweepFreqSeconds and will clean up any connections that are
+## dormant for touchFreqMs.
+#cambria.consumer.cache.sweepFreqSeconds=15
+cambria.consumer.cache.touchFreqMs=120000
+##stickforallconsumerrequests=false
+## The cache is managed through ZK. The default value for the ZK connection
+## string is the same as config.zk.servers.
+#cambria.consumer.cache.zkConnect=${config.zk.servers}
+
+##
+## Shared cache information is associated with this node's name. The default
+## name is the hostname plus the HTTP service port this host runs on. (The
+## hostname is determined via InetAddress.getLocalHost ().getCanonicalHostName(),
+## which is not always adequate.) You can set this value explicitly here.
+##
+#cambria.api.node.identifier=<use-something-unique-to-this-instance>
+
+#cambria.rateLimit.maxEmptyPollsPerMinute=30
+#cambria.rateLimitActual.delay.ms=10
+
+###############################################################################
+##
+## Metrics Reporting
+##
+## This server can report its metrics periodically on a topic.
+##
+#metrics.send.cambria.enabled=true
+#metrics.send.cambria.topic=cambria.apinode.metrics #msgrtr.apinode.metrics.dmaap
+#metrics.send.cambria.sendEverySeconds=60
+
+cambria.consumer.cache.zkBasePath=/fe3c/cambria/consumerCache
+consumer.timeout=17
+
+##############################################################################
+#100mb
+maxcontentlength=10000
+
+
+##############################################################################
+#AAF Properties
+msgRtr.namespace.aaf=org.onap.dmaap.mr.topic
+msgRtr.topicfactory.aaf=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
+enforced.topic.name.AAF=org.onap.dmaap.mr
+forceAAF=false
+transidUEBtopicreqd=false
+defaultNSforUEB=org.onap.dmaap.mr
+##############################################################################
+#Mirror Maker Agent
+msgRtr.mirrormakeradmin.aaf=org.onap.dmaap.mr.mirrormaker|*|admin
+msgRtr.mirrormakeruser.aaf=org.onap.dmaap.mr.mirrormaker|*|user
+msgRtr.mirrormakeruser.aaf.create=org.onap.dmaap.mr.topicFactory|:org.onap.dmaap.mr.topic:
+msgRtr.mirrormaker.timeout=15000
+msgRtr.mirrormaker.topic=org.onap.dmaap.mr.mmagent
+msgRtr.mirrormaker.consumergroup=mmagentserver
+msgRtr.mirrormaker.consumerid=1
+
+kafka.max.poll.interval.ms=300000
+kafka.heartbeat.interval.ms=60000
+kafka.session.timeout.ms=240000
+kafka.max.poll.records=1000 \ No newline at end of file
diff --git a/csit/scripts/sdnr/docker-compose/mr/cadi.properties b/csit/scripts/sdnr/docker-compose/mr/cadi.properties
new file mode 100644
index 00000000..dca56c82
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/mr/cadi.properties
@@ -0,0 +1,19 @@
+aaf_locate_url=https://aaf-locate.{{ include "common.namespace" . }}:8095
+aaf_url=https://AAF_LOCATE_URL/onap.org.osaaf.aaf.service:2.1
+aaf_env=DEV
+aaf_lur=org.onap.aaf.cadi.aaf.v2_0.AAFLurPerm
+
+cadi_truststore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.trust.jks
+cadi_truststore_password=enc:mN6GiIzFQxKGDzAXDOs7b4j8DdIX02QrZ9QOWNRpxV3rD6whPCfizSMZkJwxi_FJ
+
+cadi_keyfile=/appl/dmaapMR1/etc/org.onap.dmaap.mr.keyfile
+
+cadi_alias=dmaapmr@mr.dmaap.onap.org
+cadi_keystore=/appl/dmaapMR1/etc/org.onap.dmaap.mr.p12
+cadi_keystore_password=enc:_JJT2gAEkRzXla5xfDIHal8pIoIB5iIos3USvZQT6sL-l14LpI5fRFR_QIGUCh5W
+cadi_x509_issuers=CN=intermediateCA_1, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_7, OU=OSAAF, O=ONAP, C=US:CN=intermediateCA_9, OU=OSAAF, O=ONAP, C=US
+
+cadi_loglevel=INFO
+cadi_protocols=TLSv1.1,TLSv1.2
+cadi_latitude=37.78187
+cadi_longitude=-122.26147 \ No newline at end of file
diff --git a/csit/scripts/sdnr/docker-compose/mr/logback.xml b/csit/scripts/sdnr/docker-compose/mr/logback.xml
new file mode 100644
index 00000000..f02a2db7
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/mr/logback.xml
@@ -0,0 +1,208 @@
+<!--
+ ============LICENSE_START=======================================================
+ Copyright © 2019 AT&T Intellectual Property. All rights reserved.
+ ================================================================================
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ ============LICENSE_END=========================================================
+ -->
+
+<configuration scan="true" scanPeriod="3 seconds" debug="false">
+ <contextName>${module.ajsc.namespace.name}</contextName>
+ <jmxConfigurator />
+ <property name="logDirectory" value="${AJSC_HOME}/log" />
+ <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>ERROR</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <encoder>
+ <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{1024} - %msg%n
+ </pattern>
+ </encoder>
+ </appender>
+
+ <appender name="INFO" class="ch.qos.logback.core.ConsoleAppender">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>INFO</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ </appender>
+
+ <appender name="DEBUG" class="ch.qos.logback.core.ConsoleAppender">
+
+ <encoder>
+ <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="ERROR" class="ch.qos.logback.core.ConsoleAppender"> class="ch.qos.logback.core.ConsoleAppender">
+ <filter class="ch.qos.logback.classic.filter.LevelFilter">
+ <level>ERROR</level>
+ <onMatch>ACCEPT</onMatch>
+ <onMismatch>DENY</onMismatch>
+ </filter>
+ <encoder>
+ <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+ </encoder>
+ </appender>
+
+
+ <!-- Msgrtr related loggers -->
+ <logger name="org.onap.dmaap.dmf.mr.service" level="INFO" />
+ <logger name="org.onap.dmaap.dmf.mr.service.impl" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.resources" level="INFO" />
+ <logger name="org.onap.dmaap.dmf.mr.resources.streamReaders" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.backends" level="INFO" />
+ <logger name="org.onap.dmaap.dmf.mr.backends.kafka" level="INFO" />
+ <logger name="org.onap.dmaap.dmf.mr.backends.memory" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.beans" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.constants" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.exception" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.listener" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.metabroker" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.metrics.publisher" level="INFO" />
+ <logger name="org.onap.dmaap.dmf.mr.metrics.publisher.impl" level="INFO" />
+
+
+
+ <logger name="org.onap.dmaap.dmf.mr.security" level="INFO" />
+ <logger name="org.onap.dmaap.dmf.mr.security.impl" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.transaction" level="INFO" />
+ <logger name="com.att.dmf.mr.transaction.impl" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.metabroker" level="INFO" />
+ <logger name="org.onap.dmaap.dmf.mr.metabroker" level="INFO" />
+
+ <logger name="org.onap.dmaap.dmf.mr.utils" level="INFO" />
+ <logger name="org.onap.dmaap.mr.filter" level="INFO" />
+
+ <!--<logger name="com.att.nsa.cambria.*" level="INFO" />-->
+
+ <!-- Msgrtr loggers in ajsc -->
+ <logger name="org.onap.dmaap.service" level="INFO" />
+ <logger name="org.onap.dmaap" level="INFO" />
+
+
+ <!-- Spring related loggers -->
+ <logger name="org.springframework" level="WARN" additivity="false"/>
+ <logger name="org.springframework.beans" level="WARN" additivity="false"/>
+ <logger name="org.springframework.web" level="WARN" additivity="false" />
+ <logger name="com.blog.spring.jms" level="WARN" additivity="false" />
+
+ <!-- AJSC Services (bootstrap services) -->
+ <logger name="ajsc" level="WARN" additivity="false"/>
+ <logger name="ajsc.RouteMgmtService" level="INFO" additivity="false"/>
+ <logger name="ajsc.ComputeService" level="INFO" additivity="false" />
+ <logger name="ajsc.VandelayService" level="WARN" additivity="false"/>
+ <logger name="ajsc.FilePersistenceService" level="WARN" additivity="false"/>
+ <logger name="ajsc.UserDefinedJarService" level="WARN" additivity="false" />
+ <logger name="ajsc.UserDefinedBeansDefService" level="WARN" additivity="false" />
+ <logger name="ajsc.LoggingConfigurationService" level="WARN" additivity="false" />
+
+ <!-- AJSC related loggers (DME2 Registration, csi logging, restlet, servlet
+ logging) -->
+ <logger name="ajsc.utils" level="WARN" additivity="false"/>
+ <logger name="ajsc.utils.DME2Helper" level="INFO" additivity="false" />
+ <logger name="ajsc.filters" level="DEBUG" additivity="false" />
+ <logger name="ajsc.beans.interceptors" level="DEBUG" additivity="false" />
+ <logger name="ajsc.restlet" level="DEBUG" additivity="false" />
+ <logger name="ajsc.servlet" level="DEBUG" additivity="false" />
+ <logger name="com.att" level="WARN" additivity="false" />
+ <logger name="com.att.ajsc.csi.logging" level="WARN" additivity="false" />
+ <logger name="com.att.ajsc.filemonitor" level="WARN" additivity="false"/>
+
+ <logger name="com.att.nsa.dmaap.util" level="INFO" additivity="false"/>
+ <logger name="com.att.cadi.filter" level="INFO" additivity="false" />
+
+
+ <!-- Other Loggers that may help troubleshoot -->
+ <logger name="net.sf" level="WARN" additivity="false" />
+ <logger name="org.apache.commons.httpclient" level="WARN" additivity="false"/>
+ <logger name="org.apache.commons" level="WARN" additivity="false" />
+ <logger name="org.apache.coyote" level="WARN" additivity="false"/>
+ <logger name="org.apache.jasper" level="WARN" additivity="false"/>
+
+ <!-- Camel Related Loggers (including restlet/servlet/jaxrs/cxf logging.
+ May aid in troubleshooting) -->
+ <logger name="org.apache.camel" level="WARN" additivity="false" />
+ <logger name="org.apache.cxf" level="WARN" additivity="false" />
+ <logger name="org.apache.camel.processor.interceptor" level="WARN" additivity="false"/>
+ <logger name="org.apache.cxf.jaxrs.interceptor" level="WARN" additivity="false" />
+ <logger name="org.apache.cxf.service" level="WARN" additivity="false" />
+ <logger name="org.restlet" level="DEBUG" additivity="false" />
+ <logger name="org.apache.camel.component.restlet" level="DEBUG" additivity="false" />
+ <logger name="org.apache.kafka" level="DEBUG" additivity="false" />
+ <logger name="org.apache.zookeeper" level="INFO" additivity="false" />
+ <logger name="org.I0Itec.zkclient" level="DEBUG" additivity="false" />
+
+ <!-- logback internals logging -->
+ <logger name="ch.qos.logback.classic" level="INFO" additivity="false"/>
+ <logger name="ch.qos.logback.core" level="INFO" additivity="false" />
+
+ <!-- logback jms appenders & loggers definition starts here -->
+ <!-- logback jms appenders & loggers definition starts here -->
+ <appender name="auditLogs" class="ch.qos.logback.core.ConsoleAppender">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ </filter>
+ <encoder>
+ <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+ </encoder>
+ </appender>
+ <appender name="perfLogs" class="ch.qos.logback.core.ConsoleAppender">
+ <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+ </filter>
+ <encoder>
+ <pattern>"%d [%thread] %-5level %logger{1024} - %msg%n"</pattern>
+ </encoder>
+ </appender>
+ <appender name="ASYNC-audit" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>1000</queueSize>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="Audit-Record-Queue" />
+ </appender>
+
+ <logger name="AuditRecord" level="INFO" additivity="FALSE">
+ <appender-ref ref="STDOUT" />
+ </logger>
+ <logger name="AuditRecord_DirectCall" level="INFO" additivity="FALSE">
+ <appender-ref ref="STDOUT" />
+ </logger>
+ <appender name="ASYNC-perf" class="ch.qos.logback.classic.AsyncAppender">
+ <queueSize>1000</queueSize>
+ <discardingThreshold>0</discardingThreshold>
+ <appender-ref ref="Performance-Tracker-Queue" />
+ </appender>
+ <logger name="PerfTrackerRecord" level="INFO" additivity="FALSE">
+ <appender-ref ref="ASYNC-perf" />
+ <appender-ref ref="perfLogs" />
+ </logger>
+ <!-- logback jms appenders & loggers definition ends here -->
+
+ <root level="DEBUG">
+ <appender-ref ref="DEBUG" />
+ <appender-ref ref="ERROR" />
+ <appender-ref ref="INFO" />
+ <appender-ref ref="STDOUT" />
+ </root>
+
+</configuration>
diff --git a/csit/scripts/sdnr/docker-compose/nts-networkfunctions-launch.sh b/csit/scripts/sdnr/docker-compose/nts-networkfunctions-launch.sh
new file mode 100755
index 00000000..1f0939c6
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/nts-networkfunctions-launch.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+# *******************************************************************************
+# * ============LICENSE_START========================================================================
+# * Copyright (C) 2021 highstreet technologies GmbH Intellectual Property. All rights reserved.
+# * =================================================================================================
+# * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+# * in compliance with the License. You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software distributed under the License
+# * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+# * or implied. See the License for the specific language governing permissions and limitations under
+# * the License.
+# * ============LICENSE_END==========================================================================
+
+set -o xtrace
+csvfile=$1
+export DOCKER_ENGINE_VERSION=$(docker version --format '{{.Server.APIVersion}}')
+
+CUR_PATH="`dirname \"$0\"`" # relative path
+CUR_PATH="`( cd \"$CUR_PATH\" && pwd )`" # absolutized and normalized
+if [ -z "$CUR_PATH" ] ; then
+ echo "Permission error!"
+ exit 1
+fi
+
+# define location of workpsace based on where the current script is
+WORKSPACE=$(cd $CUR_PATH/../../../ && pwd)
+if [ $# -lt 1 ]; then
+ echo "No arguments provided. Using default 'nts-networkfunctions.csv'"
+ csvfile="$CUR_PATH/nts-networkfunctions.csv"
+fi
+
+firstline=0
+# read each line of nts-networkfunctions.csv and put in into the corresponding variables
+while IFS=',' read NAME NTS_NF_DOCKER_REPOSITORY NTS_NF_IMAGE_NAME NTS_NF_IMAGE_TAG NTS_NF_IP NTS_HOST_NETCONF_SSH_BASE_PORT NTS_HOST_NETCONF_TLS_BASE_PORT NTS_NF_SSH_CONNECTIONS NTS_NF_TLS_CONNECTIONS; do
+ if [ $firstline -eq 0 ]; then
+ firstline=1
+ continue
+ fi
+ if [ -n "${NTS_NF_GLOBAL_TAG}" ]; then
+ NTS_NF_IMAGE_TAG=${NTS_NF_GLOBAL_TAG}
+ fi
+ if [[ -z ${USE_DEFAULT_REPO} ]]; then
+ export NTS_NF_DOCKER_REPOSITORY=$NTS_NF_DOCKER_REPOSITORY
+ fi
+ export NTS_NF_IMAGE_NAME=$NTS_NF_IMAGE_NAME
+ export NTS_NF_IMAGE_TAG=$NTS_NF_IMAGE_TAG
+ export NTS_NF_IP=$NTS_NF_IP
+ export NTS_HOST_NETCONF_SSH_BASE_PORT=$NTS_HOST_NETCONF_SSH_BASE_PORT
+ export NTS_HOST_NETCONF_TLS_BASE_PORT=$NTS_HOST_NETCONF_TLS_BASE_PORT
+ export NTS_HOST_NETCONF_SSH_BASE_PORT_PLUS_SSH_CON=$(expr $NTS_HOST_NETCONF_SSH_BASE_PORT + $NTS_NF_SSH_CONNECTIONS - 1)
+ export NTS_HOST_NETCONF_TLS_BASE_PORT_PLUS_TLS_CON=$(expr $NTS_HOST_NETCONF_TLS_BASE_PORT + $NTS_NF_TLS_CONNECTIONS - 1)
+ EXPOSE_PORT=830
+ export EXPOSE_PORT_SSH=$EXPOSE_PORT
+ EXPOSE_PORT=$(expr $EXPOSE_PORT + $NTS_NF_SSH_CONNECTIONS)
+ export EXPOSE_PORT_SSH_PLUS_CON=$(expr $EXPOSE_PORT - 1)
+ export EXPOSE_PORT_TLS=$EXPOSE_PORT
+ EXPOSE_PORT=$(expr $EXPOSE_PORT + $NTS_NF_TLS_CONNECTIONS)
+ export EXPOSE_PORT_TLS_PLUS_CON=$(expr $EXPOSE_PORT - 1)
+ export NTS_NF_CONTAINER_NAME=$NAME
+ export NTS_NF_SSH_CONNECTIONS=$NTS_NF_SSH_CONNECTIONS
+ export NTS_NF_TLS_CONNECTIONS=$NTS_NF_TLS_CONNECTIONS
+
+ SCRIPTDIR=${CUR_PATH}/$NAME/scripts
+ export SCRIPTDIR=$SCRIPTDIR
+
+ mkdir -p $SCRIPTDIR
+
+ docker-compose -p ${NAME} --env-file $CUR_PATH/.env -f $CUR_PATH/docker-compose-nts-networkfunction.yaml up -d
+done <$csvfile
+docker ps -a --format "table |{{.Names}}\t|{{.Image}}\t|{{printf \"%.70s\" .Ports}}|"| { head -1; sort --field-separator='|' -k 4;}
diff --git a/csit/scripts/sdnr/docker-compose/sdnr/certs/certs.properties b/csit/scripts/sdnr/docker-compose/sdnr/certs/certs.properties
new file mode 100644
index 00000000..32373a4d
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/sdnr/certs/certs.properties
@@ -0,0 +1,2 @@
+keys0.zip
+***********
diff --git a/csit/scripts/sdnr/docker-compose/sdnr/certs/keys0.zip b/csit/scripts/sdnr/docker-compose/sdnr/certs/keys0.zip
new file mode 100644
index 00000000..588315f2
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/sdnr/certs/keys0.zip
Binary files differ
diff --git a/csit/scripts/sdnr/docker-compose/sdnr/mountpoint-registrar.properties b/csit/scripts/sdnr/docker-compose/sdnr/mountpoint-registrar.properties
new file mode 100644
index 00000000..df0b562e
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/sdnr/mountpoint-registrar.properties
@@ -0,0 +1,43 @@
+[general]
+dmaapEnabled=true
+baseUrl=http://localhost:8181
+sdnrUser=admin
+sdnrPasswd=${ODL_ADMIN_PASSWORD}
+
+[fault]
+TransportType=HTTPNOAUTH
+Protocol=http
+username=${DMAAP_FAULT_TOPIC_USERNAME}
+password=${DMAAP_FAULT_TOPIC_PASSWORD}
+host=onap-dmaap:3904
+topic=unauthenticated.SEC_FAULT_OUTPUT
+contenttype=application/json
+group=myG
+id=C1
+timeout=20000
+limit=10000
+fetchPause=5000
+jersey.config.client.readTimeout=25000
+jersey.config.client.connectTimeout=25000
+jersey.config.client.proxy.username=${HTTP_PROXY_USERNAME}
+jersey.config.client.proxy.password=${HTTP_PROXY_PASSWORD}
+jersey.config.client.proxy.uri=${HTTP_PROXY_URI}
+
+[pnfRegistration]
+TransportType=HTTPNOAUTH
+Protocol=http
+username=${DMAAP_PNFREG_TOPIC_USERNAME}
+password=${DMAAP_PNFREG_TOPIC_PASSWORD}
+host=onap-dmaap:3904
+topic=unauthenticated.VES_PNFREG_OUTPUT
+contenttype=application/json
+group=myG
+id=C1
+timeout=20000
+limit=10000
+fetchPause=5000
+jersey.config.client.readTimeout=25000
+jersey.config.client.connectTimeout=25000
+jersey.config.client.proxy.username=${HTTP_PROXY_USERNAME}
+jersey.config.client.proxy.password=${HTTP_PROXY_PASSWORD}
+jersey.config.client.proxy.uri=${HTTP_PROXY_URI} \ No newline at end of file
diff --git a/csit/scripts/sdnr/docker-compose/vesc/collector.properties b/csit/scripts/sdnr/docker-compose/vesc/collector.properties
new file mode 100644
index 00000000..fd9bce5c
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/vesc/collector.properties
@@ -0,0 +1,64 @@
+###############################################################################
+##
+## Collector Server config
+##
+## - Default values are shown as commented settings.
+##
+###############################################################################
+##
+###############################################################################
+##
+## HTTP(S) service
+##
+## Normally:
+##
+## - 8080 is http service
+## - https is disabled by default
+##
+## - At this time, the server always binds to 0.0.0.0
+##
+##
+#collector.service.port=8080
+## Authentication is only supported via secure port
+## When enabled - require valid keystore defined
+collector.service.secure.port=8443
+
+# auth.method flags:
+#
+# noAuth - default option - no security (http)
+# certBasicAuth - auth by certificate and basic auth username / password (https)
+auth.method=certBasicAuth
+
+## Combination of userid,hashPassword encoded pwd list to be supported
+## userid and pwd comma separated; pipe delimitation between each pair
+## Password is generated by crypt-password library using BCrypt algorithm stored in dcaegen2/sdk package
+## or https://nexus.onap.org/#nexus-search;quick~crypt-password
+header.authlist=sample1,$2a$10$0buh.2WeYwN868YMwnNNEuNEAMNYVU9.FSMJGyIKV3dGET/7oGOi6
+
+## The keystore must be setup per installation when secure port is configured
+collector.keystore.file.location=etc/keystore
+collector.keystore.passwordfile=etc/passwordfile
+
+collector.cert.subject.matcher=etc/certSubjectMatcher.properties
+
+## The truststore must be setup per installation when mutual tls support is configured
+collector.truststore.file.location=etc/truststore
+collector.truststore.passwordfile=etc/trustpasswordfile
+
+## Schema Validation checkflag
+## default no validation checkflag (-1)
+## If enabled (1) - schemafile location must be specified
+collector.schema.checkflag=1
+collector.schema.file={\"v1\":\"./etc/CommonEventFormat_27.2.json\",\"v2\":\"./etc/CommonEventFormat_27.2.json\",\"v3\":\"./etc/CommonEventFormat_27.2.json\",\"v4\":\"./etc/CommonEventFormat_27.2.json\",\"v5\":\"./etc/CommonEventFormat_28.4.1.json\",\"v7\":\"./etc/CommonEventFormat_30.2.1_ONAP.json\"}
+
+## List all streamid per domain to be supported. The streamid should match to channel name on dmaapfile
+collector.dmaap.streamid=fault=ves-fault|syslog=ves-syslog|heartbeat=ves-heartbeat|measurementsForVfScaling=ves-measurement|mobileFlow=ves-mobileflow|other=ves-other|stateChange=ves-statechange|thresholdCrossingAlert=ves-thresholdCrossingAlert|voiceQuality=ves-voicequality|sipSignaling=ves-sipsignaling|notification=ves-notification|pnfRegistration=ves-pnfRegistration
+collector.dmaapfile=./etc/DmaapConfig.json
+
+## Event transformation Flag - when set expects configurable transformation
+## defined under ./etc/eventTransform.json
+## Enabled by default; to disable set to 0
+event.transform.flag=1
+
+# Describes at what frequency (measured in minutes) should application try to fetch config from CBS
+collector.dynamic.config.update.frequency=5
diff --git a/csit/scripts/sdnr/docker-compose/zk/zk_server_jaas.conf b/csit/scripts/sdnr/docker-compose/zk/zk_server_jaas.conf
new file mode 100644
index 00000000..26bf4601
--- /dev/null
+++ b/csit/scripts/sdnr/docker-compose/zk/zk_server_jaas.conf
@@ -0,0 +1,4 @@
+Server {
+ org.apache.zookeeper.server.auth.DigestLoginModule required
+ user_kafka=kafka_secret;
+}; \ No newline at end of file
diff --git a/csit/scripts/sdnr/sdnr-launch.sh b/csit/scripts/sdnr/sdnr-launch.sh
new file mode 100755
index 00000000..e79a6ada
--- /dev/null
+++ b/csit/scripts/sdnr/sdnr-launch.sh
@@ -0,0 +1,152 @@
+#!/bin/bash
+#
+# ============LICENSE_START=======================================================
+# ONAP : ccsdk feature sdnr wt
+# ================================================================================
+# Copyright (C) 2021 highstreet technologies GmbH Intellectual Property.
+# All rights reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+#
+docker version
+docker-compose version
+# update installed docker compose version
+sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
+sudo chmod +x /usr/local/bin/docker-compose
+sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
+which docker-compose
+docker version
+docker-compose version
+# WA: no space left on device.
+docker system prune -f -a
+
+if [[ -z $WORKSPACE ]]; then
+ CUR_PATH="`dirname \"$0\"`" # relative path
+ CUR_PATH="`( cd \"$CUR_PATH\" && pwd )`" # absolutized and normalized
+ if [ -z "$CUR_PATH" ] ; then
+ echo "Permission error!"
+ exit 1
+ fi
+
+ # define location of workpsace based on where the current script is
+ WORKSPACE=$(cd $CUR_PATH/../../ && pwd)
+fi
+
+if [[ -z $SCRIPTS ]]; then
+ SCRIPTS=$(cd $WORKSPACE/scripts && pwd)
+fi
+
+HOST_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $7}')
+SDNC_WEB_PORT=${SDNC_WEB_PORT:-8282}
+
+env_file="--env-file ${SCRIPTS}/sdnr/docker-compose/.env"
+echo $env_file
+
+# Define sdnrdb type
+# default: ESDB
+# alternative: MARIADB
+SDNRDB_TYPE="${SDNRDB_TYPE:-ESDB}"
+if [[ "$SDNRDB_TYPE" == "ESDB" ]]; then
+ sdnrdb_compose_file="docker-compose-sdnrdb-elasticsearch.yaml"
+else
+ sdnrdb_compose_file="docker-compose-sdnrdb-mariadb.yaml"
+fi
+docker ps -a
+
+function onap_dependent_components_launch() {
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-onap-addons.yaml pull
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-onap-addons.yaml up -d
+}
+function netconfserver_simulator_launch() {
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-netconfserver-simulator.yaml pull
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-netconfserver-simulator.yaml up -d
+}
+
+function nts_manager_launch() {
+ # starts all ntsim managers defined in the csv file
+ ${SCRIPTS}/sdnr/docker-compose/nts-manager-launch.sh $1
+}
+
+function nts_networkfunctions_launch() {
+ # starts all ntsim networkfucntions defined in the csv file
+ ${SCRIPTS}/sdnr/docker-compose/nts-networkfunctions-launch.sh $1
+}
+
+
+function sdnr_launch() {
+ #if [ -n "${CALLHOME}" ] ; then
+ #sdnrwtbootfeatures="-e SDNRWT_BOOTFEATURES=odl-netconf-callhome-ssh,sdnr-wt-feature-aggregator "
+ #callhomeport="-p ${CALL_HOME_PORT}:6666 "
+ #fi
+ if [ $SDNR_CLUSTER_MODE == "true" ]; then
+ sdnr_launch_cluster $1
+ else
+ sdnr_launch_single_node $1
+ fi
+ cd $WORKSPACE
+ ./getAllInfo.sh -c sdnr -kp
+}
+
+
+function sdnr_launch_single_node() {
+
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-single-sdnr.yaml \
+ -f ${WORKSPACE}/scripts/sdnr/docker-compose/$sdnrdb_compose_file \
+ pull
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-single-sdnr.yaml \
+ -f ${WORKSPACE}/scripts/sdnr/docker-compose/$sdnrdb_compose_file \
+ up -d
+ for i in {1..50}; do
+ curl -sS -m 1 -D - ${HOST_IP}:8181/ready | grep 200 && break
+ echo sleep $i
+ sleep $i
+ if [ $i == 50 ]; then
+ echo "[ERROR] SDNC/R container not ready"
+ docker ps -a
+ # exit 1
+ fi
+ done
+}
+
+function sdnr_web_launch() {
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-single-sdnr.yaml \
+ -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-single-sdnr-web.override.yaml \
+ -f ${WORKSPACE}/scripts/sdnr/docker-compose/$sdnrdb_compose_file \
+ pull
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-single-sdnr.yaml \
+ -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-single-sdnr-web.override.yaml \
+ -f ${WORKSPACE}/scripts/sdnr/docker-compose/$sdnrdb_compose_file \
+ up -d
+ for i in {1..50}; do
+ curl -sS -m 1 -D - ${HOST_IP}:${SDNC_WEB_PORT}/ready | grep 200 && break
+ echo sleep $i
+ sleep $i
+ done
+}
+
+function sdnr_launch_cluster() {
+ # source ${SCRIPTS}/sdnr/sdnrEnv_Cluster.sh
+ SDNRDM="false"
+ [[ -n "$1" ]] && SDNRDM="true" && echo "SDNRDM arg detected - running in headless mode"
+ echo "SDNR being launched in Cluster mode"
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose/cluster-sdnr.yaml pull
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose/cluster-sdnr.yaml up -d
+ # Wait for initialization of docker services. At the moment its the master SDNR node
+ HOST_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $7}')
+ for i in {1..50}; do
+ curl -sS -m 1 -D - ${HOST_IP}:${ODLUXPORT}/ready | grep 200 && break
+ echo sleep $i
+ sleep $i
+ done
+}
diff --git a/csit/scripts/sdnr/sdnr-teardown.sh b/csit/scripts/sdnr/sdnr-teardown.sh
new file mode 100755
index 00000000..eeb729e0
--- /dev/null
+++ b/csit/scripts/sdnr/sdnr-teardown.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+#
+# Copyright 2016-2017 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Modifications copyright (c) 2018 AT&T Intellectual Property
+#
+
+if [[ -z $WORKSPACE ]]; then
+ CUR_PATH="`dirname \"$0\"`" # relative path
+ CUR_PATH="`( cd \"$CUR_PATH\" && pwd )`" # absolutized and normalized
+ if [ -z "$CUR_PATH" ] ; then
+ echo "Permission error!"
+ exit 1
+ fi
+
+ # define location of workpsace based on where the current script is
+ WORKSPACE=$(cd $CUR_PATH/../../ && pwd)
+fi
+
+if [[ -z $SCRIPTS ]]; then
+ SCRIPTS=$(cd $WORKSPACE/scripts && pwd)
+fi
+
+source ${SCRIPTS}/sdnr/sdnrEnv_Common.sh
+env_file="--env-file ${SCRIPTS}/sdnr/docker-compose/.env"
+
+function sdnr_teardown() {
+ running_containers=$(docker ps -aq)
+ if [ -z "$running_containers" ]
+ then
+ echo "No containers to get logs from!"
+ else
+ echo "Getting logs from containers!"
+ running_containers_array=($(echo "$running_containers" | tr ' ' '\n'))
+ mkdir -p ${WORKSPACE}/archives/getallinfo
+ for i in "${running_containers_array[@]}"
+ do
+ echo "Getting logs from container $i"
+ docker logs $i >> ${WORKSPACE}/archives/getallinfo/$i.log 2>&1
+ done
+ fi
+ echo "Starting teardown!"
+ # removes sdnrdb, sdnr AND all of the rest of the containers (--remove-orphans)
+ docker rm -f $(docker ps -aq -f name=ntsim*)
+ docker rm -f $(docker ps -aq -f name=nts-*)
+ docker rm -f $(docker ps -aq -f name=NTS_Manager*)
+ docker rm -f $(docker ps -aq -f name=NTS-Manager*)
+ docker-compose $env_file -f ${WORKSPACE}/scripts/sdnr/docker-compose/docker-compose-single-sdnr.yaml down --remove-orphans
+ docker network rm integration
+}
diff --git a/csit/tests/sdnr/functional/dummy.robot b/csit/tests/sdnr/functional/dummy.robot
new file mode 100644
index 00000000..13687861
--- /dev/null
+++ b/csit/tests/sdnr/functional/dummy.robot
@@ -0,0 +1,10 @@
+*** Settings ***
+Documentation Dummy test case for debugging purposes
+
+Library BuiltIn
+
+
+*** Test Cases ***
+Dummy Test
+ [Documentation] dummy test case passing by intention
+ Pass Execution Passed dummy Test for setup debugging purposes
diff --git a/csit/tests/sdnr/healthcheck/20_healthcheckSUT.robot b/csit/tests/sdnr/healthcheck/20_healthcheckSUT.robot
new file mode 100644
index 00000000..dd3364c7
--- /dev/null
+++ b/csit/tests/sdnr/healthcheck/20_healthcheckSUT.robot
@@ -0,0 +1,39 @@
+*** Settings ***
+Documentation healthcheck of system under test: sdnc server, sdnrdb are available
+Library ConnectLibrary
+Library SDNCBaseLibrary
+Library Collections
+Library ElasticSearchLibrary
+Library ConnectApp
+Library RequestsLibrary
+
+Suite Setup global suite setup &{GLOBAL_SUITE_SETUP_CONFIG}
+Suite Teardown global suite teardown
+
+*** Variables ***
+&{headers} Content-Type=application/json Authorization=Basic
+
+*** Test Cases ***
+Test Is SDNR Node Available
+ ${server_status}= server is ready ${SDNR_PROTOCOL}${SDNR_HOST} ${SDNR_PORT}
+ should be true ${server_status}
+
+Test Is SDNRDB Available
+ ${es_version_info}= get elastic search version info as dict
+ ${length_of_response}= get length ${es_version_info}
+ should be true ${length_of_response}>${0}
+
+Test Is SDNRDB Initialized
+ ${res}= check aliases
+ Log ${res} level=INFO html=False console=False repr=False
+ Run Keyword If not ${res} Fatal Error
+
+Test Is VES Collector available
+ # curl -k -u sample1:sample1 https://172.40.0.1:8443
+ ${auth}= Create List ${VESCOLLECTOR}[USERNAME] ${VESCOLLECTOR}[PASSWORD]
+ RequestsLibrary.Create Session alias=ves url=${VESCOLLECTOR}[SCHEME]://${VESCOLLECTOR}[IP]:${VESCOLLECTOR}[PORT] headers=${headers} auth=${auth}
+ ${resp}= RequestsLibrary.GET On Session ves /
+ Should Be Equal As Strings ${resp.text} Welcome to VESCollector
+ Should Be Equal As Strings ${resp.status_code} 200
+ RequestsLibrary.Delete All Sessions
+