diff options
Diffstat (limited to 'test/csit')
59 files changed, 7211 insertions, 56 deletions
diff --git a/test/csit/plans/appc/healthcheck/bundle_query.sh b/test/csit/plans/appc/healthcheck/bundle_query.sh index 7224ae9c5..f163ce5c2 100755 --- a/test/csit/plans/appc/healthcheck/bundle_query.sh +++ b/test/csit/plans/appc/healthcheck/bundle_query.sh @@ -24,8 +24,8 @@ failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current echo "There are $num_failed_bundles failed bundles out of $num_bundles installed bundles." -if [ "$num_failed_bundles" -ge 1 ] || [ $num_bundles -le 393 ]; then - echo "There are $num_bundles bundles out of 394 with $num_failed_bundles in a failed state. " +if [ "$num_failed_bundles" -ge 1 ] || [ $num_bundles -le 400 ]; then + echo "There are $num_bundles bundles with $num_failed_bundles in a failed state. " echo "The following bundle(s) are in a failed state: " echo " $failed_bundles" exit 1; diff --git a/test/csit/plans/appc/healthcheck/db_query.sh b/test/csit/plans/appc/healthcheck/db_query.sh index 87e0ac397..70829a13a 100755 --- a/test/csit/plans/appc/healthcheck/db_query.sh +++ b/test/csit/plans/appc/healthcheck/db_query.sh @@ -41,20 +41,20 @@ else exit 1; fi -if [ "$NODE_TYPES" -eq "0" ]; then - echo "There is no data in table NODE_TYPES. " - exit 1; -fi +#if [ "$NODE_TYPES" -eq "0" ]; then +# echo "There is no data in table NODE_TYPES. " +# exit 1; +#fi -if [ "$SVC_LOGIC" -eq "0" ] ; then - echo "There is no data in table SVC_LOGIC. " - exit 1; -fi +#if [ "$SVC_LOGIC" -eq "0" ] ; then +# echo "There is no data in table SVC_LOGIC. " +# exit 1; +#fi -if [ "$VNF_DG_MAPPING" -eq "0" ]; then - echo "There is no data in table VNF_DG_MAPPING. " - exit 1; -fi +#if [ "$VNF_DG_MAPPING" -eq "0" ]; then +# echo "There is no data in table VNF_DG_MAPPING. " +# exit 1; +#fi echo "Expected table data is present." exit 0 ) diff --git a/test/csit/plans/appc/healthcheck/setup.sh b/test/csit/plans/appc/healthcheck/setup.sh index 3c57cefac..eaf488a65 100755 --- a/test/csit/plans/appc/healthcheck/setup.sh +++ b/test/csit/plans/appc/healthcheck/setup.sh @@ -20,6 +20,12 @@ SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" source ${WORKSPACE}/test/csit/scripts/appc/script1.sh +export NEXUS_USERNAME=docker +export NEXUS_PASSWD=docker +export NEXUS_DOCKER_REPO=nexus3.onap.org:10001 +export DMAAP_TOPIC=AUTO +export DOCKER_IMAGE_VERSION=1.1.0-SNAPSHOT-latest + export MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' | sort -n | head -1) if [ "$MTU" == "" ]; then @@ -36,14 +42,14 @@ git pull unset http_proxy https_proxy cd $WORKSPACE/archives/appc/docker-compose -sed -i "s/DMAAP_TOPIC_ENV=.*/DMAAP_TOPIC_ENV="AUTO"/g" docker-compose.yml -docker login -u docker -p docker nexus3.onap.org:10001 +sed -i "s/DMAAP_TOPIC_ENV=.*/DMAAP_TOPIC_ENV="$DMAAP_TOPIC"/g" docker-compose.yml +docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO -docker pull nexus3.onap.org:10001/openecomp/appc-image:1.1-STAGING-latest -docker tag nexus3.onap.org:10001/openecomp/appc-image:1.1-STAGING-latest openecomp/appc-image:latest +docker pull $NEXUS_DOCKER_REPO/openecomp/appc-image:$DOCKER_IMAGE_VERSION +docker tag $NEXUS_DOCKER_REPO/openecomp/appc-image:$DOCKER_IMAGE_VERSION openecomp/appc-image:latest -docker pull nexus3.onap.org:10001/openecomp/dgbuilder-sdnc-image:1.1-STAGING-latest -docker tag nexus3.onap.org:10001/openecomp/dgbuilder-sdnc-image:1.1-STAGING-latest openecomp/dgbuilder-sdnc-image:latest +docker pull $NEXUS_DOCKER_REPO/onap/ccsdk-dgbuilder-image:latest +docker tag $NEXUS_DOCKER_REPO/onap/ccsdk-dgbuilder-image:latest onap/ccsdk-dgbuilder-image:latest # start APPC containers with docker compose and configuration from docker-compose.yml docker-compose up -d @@ -71,7 +77,7 @@ fi #sleep 800 -TIME_OUT=1500 +TIME_OUT=1000 INTERVAL=60 TIME=0 while [ "$TIME" -lt "$TIME_OUT" ]; do @@ -79,7 +85,7 @@ while [ "$TIME" -lt "$TIME_OUT" ]; do response=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf system:start-level) num_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1) - if [ "$response" == "Level 100" ] && [ "$num_bundles" -ge 394 ]; then + if [ "$response" == "Level 100" ] && [ "$num_bundles" -ge 400 ]; then echo APPC karaf started in $TIME seconds break; fi @@ -96,7 +102,7 @@ fi response=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf system:start-level) num_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1) - if [ "$response" == "Level 100" ] && [ "$num_bundles" -ge 394 ]; then + if [ "$response" == "Level 100" ] && [ "$num_bundles" -ge 400 ]; then num_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1) num_failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure | wc -l) failed_bundles=$(docker exec appc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure) @@ -111,3 +117,9 @@ fi # Pass any variables required by Robot test suites in ROBOT_VARIABLES ROBOT_VARIABLES="-v SCRIPTS:${SCRIPTS}" +if [ "$response" == "" ] || [ "$num_bundles" == "" ]; then + echo "Docker container appc_controller_container is not available. Exiting." + exit 1 +fi + + diff --git a/test/csit/plans/appc/healthcheck/testplan.txt b/test/csit/plans/appc/healthcheck/testplan.txt index 2a8c1ea84..fbf2319a7 100644 --- a/test/csit/plans/appc/healthcheck/testplan.txt +++ b/test/csit/plans/appc/healthcheck/testplan.txt @@ -1,4 +1,5 @@ # Test suites are relative paths under [integration.git]/test/csit/tests/. # Place the suites in run order. appc/healthcheck +#appc/testsuite diff --git a/test/csit/plans/cli/sanity-check/setup.sh b/test/csit/plans/cli/sanity-check/setup.sh index ca18f5176..d1d0ab6f0 100644 --- a/test/csit/plans/cli/sanity-check/setup.sh +++ b/test/csit/plans/cli/sanity-check/setup.sh @@ -17,7 +17,30 @@ # Place the scripts in run order: source ${SCRIPTS}/common_functions.sh -# Start auth +#start msb +docker run -d -p 8500:8500 --name msb_consul consul +MSB_CONSUL_IP=`get-instance-ip.sh msb_consul` +echo MSB_CONSUL_IP=${MSB_CONSUL_IP} + +docker run -d -p 10081:10081 -e CONSUL_IP=$MSB_CONSUL_IP --name msb_discovery nexus3.onap.org:10001/onap/msb/msb_discovery +MSB_DISCOVERY_IP=`get-instance-ip.sh msb_discovery` +echo DISCOVERY_IP=${MSB_DISCOVERY_IP} + +docker run -d -p 80:80 -e CONSUL_IP=$MSB_CONSUL_IP -e SDCLIENT_IP=$MSB_DISCOVERY_IP -e "ROUTE_LABELS=visualRange:1" --name msb_internal_apigateway nexus3.onap.org:10001/onap/msb/msb_apigateway +MSB_IAG_IP=`get-instance-ip.sh msb_internal_apigateway` +echo MSB_IAG_IP=${MSB_IAG_IP} + +# Wait for initialization(8500 Consul, 10081 Service Registration & Discovery, 80 api gateway) +for i in {1..10}; do + curl -sS -m 1 ${MSB_CONSUL_IP}:8500 && curl -sS -m 1 ${MSB_DISCOVERY_IP}:10081 && curl -sS -m 1 ${MSB_IAG_IP}:80 && break + echo sleep $i + sleep $i +done + +#Need some time for the initialization of MSB services +sleep 60 + +# Start cli docker run -d --name cli -e CLI_MODE=daemon nexus3.onap.org:10001/onap/cli:1.1-STAGING-latest # Wait for cli initialization @@ -29,4 +52,4 @@ done CLI_IP=`get-instance-ip.sh cli` # Pass any variables required by Robot test suites in ROBOT_VARIABLES -ROBOT_VARIABLES="-v CLI_IP:${CLI_IP}" +ROBOT_VARIABLES="-v CLI_IP:${CLI_IP} -v MSB_IAG_IP:${MSB_IAG_IP}" diff --git a/test/csit/plans/cli/sanity-check/teardown.sh b/test/csit/plans/cli/sanity-check/teardown.sh index 70fb6ff18..2135d001e 100644 --- a/test/csit/plans/cli/sanity-check/teardown.sh +++ b/test/csit/plans/cli/sanity-check/teardown.sh @@ -14,4 +14,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -kill-instance.sh cli
\ No newline at end of file +kill-instance.sh cli msb_consul msb_discovery msb_internal_apigateway diff --git a/test/csit/plans/dcae/testsuites/setup.sh b/test/csit/plans/dcae/testsuites/setup.sh new file mode 100755 index 000000000..d7f02bcdf --- /dev/null +++ b/test/csit/plans/dcae/testsuites/setup.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Copyright 2017 ZTE Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Place the scripts in run order: +#Make sure python-uuid is installed + + +#get current host IP addres +HOST_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $NF}') +echo HOST_IP=${HOST_IP} + +VESC_IMAGE=nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.1 +echo VESC_IMAGE=${VESC_IMAGE} + +# Start DCAE VES Collector +docker run -d -p 8080:8080/tcp -p 8443:8443/tcp -P --name vesc -e DMAAPHOST=${HOST_IP} ${VESC_IMAGE} + +VESC_IP=`get-instance-ip.sh vesc` +export VESC_IP=${VESC_IP} + +export ROBOT_VARIABLES="--pythonpath ${WORKSPACE}/test/csit/tests/dcae/testcases/resources" + +# Wait container ready +sleep 5 + diff --git a/test/csit/plans/dcae/testsuites/teardown.sh b/test/csit/plans/dcae/testsuites/teardown.sh new file mode 100755 index 000000000..2dc14870f --- /dev/null +++ b/test/csit/plans/dcae/testsuites/teardown.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Copyright 2017 ZTE, Inc. and others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kill-instance.sh vesc
\ No newline at end of file diff --git a/test/csit/plans/dcae/testsuites/testplan.txt b/test/csit/plans/dcae/testsuites/testplan.txt new file mode 100755 index 000000000..777842a3a --- /dev/null +++ b/test/csit/plans/dcae/testsuites/testplan.txt @@ -0,0 +1,3 @@ +# Test suites are relative paths under [integration.git]/test/csit/tests/. +# Place the suites in run order. +dcae/testcases diff --git a/test/csit/plans/dmaap/mrpubsub/setup.sh b/test/csit/plans/dmaap/mrpubsub/setup.sh new file mode 100755 index 000000000..3e8950f2b --- /dev/null +++ b/test/csit/plans/dmaap/mrpubsub/setup.sh @@ -0,0 +1,95 @@ +#!/bin/bash +# +# ============LICENSE_START======================================================= +# ONAP DMAAP MR +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights +# reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END============================================ +# =================================================================== +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# +# Place the scripts in run order: +source ${SCRIPTS}/common_functions.sh + +# Clone DMaaP Message Router repo +mkdir -p $WORKSPACE/archives/dmaapmr +cd $WORKSPACE/archives/dmaapmr +#unset http_proxy https_proxy +git clone --depth 1 http://gerrit.onap.org/r/dmaap/messagerouter/messageservice -b master +git pull +cd $WORKSPACE/archives/dmaapmr/messageservice/src/main/resources/docker-compose +cp $WORKSPACE/archives/dmaapmr/messageservice/bundleconfig-local/etc/appprops/MsgRtrApi.properties /var/tmp/ + + +# start DMaaP MR containers with docker compose and configuration from docker-compose.yml +docker-compose up -d + +# Wait for initialization of Docker contaienr for DMaaP MR, Kafka and Zookeeper +for i in {1..50}; do + if [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ] && \ + [ $(docker inspect --format '{{ .State.Running }}' dockercompose_zookeeper_1) ] && \ + [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ] + then + echo "DMaaP Service Running" + break + else + echo sleep $i + sleep $i + fi +done + + +DMAAP_MR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_dmaap_1) +KAFKA_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_kafka_1) +ZOOKEEPER_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_zookeeper_1) + +echo DMAAP_MR_IP=${DMAAP_MR_IP} +echo KAFKA_IP=${KAFKA_IP} +echo ZOOKEEPER_IP=${ZOOKEEPER_IP} + +# Initial docker-compose up and down is for populating kafka and zookeeper IPs in /var/tmp/MsgRtrApi.properites +docker-compose down + +# Update kafkfa and zookeeper properties in MsgRtrApi.propeties which will be copied to DMaaP Container +sed -i -e 's/<zookeeper_host>/'$ZOOKEEPER_IP'/' /var/tmp/MsgRtrApi.properties +sed -i -e 's/<kafka_host>:<kafka_port>/'$KAFKA_IP':9092/' /var/tmp/MsgRtrApi.properties + +docker-compose build +docker-compose up -d + +# Wait for initialization of Docker containers +for i in {1..50}; do + if [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ] && \ + [ $(docker inspect --format '{{ .State.Running }}' dockercompose_zookeeper_1) ] && \ + [ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ] + then + echo "DMaaP Service Running" + break + else + echo sleep $i + sleep $i + fi +done + +# Wait for initialization of docker services +for i in {1..50}; do + curl -sS -m 1 ${DMAAP_MR_IP}:3904/events/TestTopic && break + echo sleep $i + sleep $i +done + +#Pass any variables required by Robot test suites in ROBOT_VARIABLES +ROBOT_VARIABLES="-v DMAAP_MR_IP:${DMAAP_MR_IP}" diff --git a/test/csit/plans/dmaap/mrpubsub/teardown.sh b/test/csit/plans/dmaap/mrpubsub/teardown.sh new file mode 100755 index 000000000..1b4303240 --- /dev/null +++ b/test/csit/plans/dmaap/mrpubsub/teardown.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# +# Copyright 2016-2017 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Modifications copyright (c) 2017 AT&T Intellectual Property +# + +kill-instance.sh dockercompose_dmaap_1 +kill-instance.sh dockercompose_kafka_1 +kill-instance.sh dockercompose_zookeeper_1 diff --git a/test/csit/plans/dmaap/mrpubsub/testplan.txt b/test/csit/plans/dmaap/mrpubsub/testplan.txt new file mode 100644 index 000000000..6a98eb790 --- /dev/null +++ b/test/csit/plans/dmaap/mrpubsub/testplan.txt @@ -0,0 +1,2 @@ +# Place the suites in run order. +dmaap/mrpubsub diff --git a/test/csit/plans/multicloud-ocata/functionality1/setup.sh b/test/csit/plans/multicloud-ocata/functionality1/setup.sh index 5630849cf..75411781e 100644 --- a/test/csit/plans/multicloud-ocata/functionality1/setup.sh +++ b/test/csit/plans/multicloud-ocata/functionality1/setup.sh @@ -20,7 +20,7 @@ source ${SCRIPTS}/common_functions.sh # start multicloud-ocata docker run -d --name multicloud-ocata nexus3.onap.org:10001/onap/multicloud/openstack-ocata SERVICE_IP=`get-instance-ip.sh multicloud-ocata` -SERVICE_PORT=9004 +SERVICE_PORT=9006 for i in {1..50}; do curl -sS ${SERVICE_IP}:${SERVICE_PORT} && break diff --git a/test/csit/plans/sdc/healthCheck/setup.sh b/test/csit/plans/sdc/healthCheck/setup.sh new file mode 100644 index 000000000..f247be656 --- /dev/null +++ b/test/csit/plans/sdc/healthCheck/setup.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Copyright 2016-2017 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Modifications copyright (c) 2017 AT&T Intellectual Property +# +# Place the scripts in run order: + + +source ${WORKSPACE}/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh + +source ${WORKSPACE}/test/csit/scripts/sdc/start_sdc_containers.sh + + +BE_IP=`get-instance-ip.sh sdc-BE` +echo BE_IP=${BE_IP} + + +# Pass any variables required by Robot test suites in ROBOT_VARIABLES +ROBOT_VARIABLES="-v BE_IP:${BE_IP}" + diff --git a/test/csit/plans/sdc/healthCheck/teardown.sh b/test/csit/plans/sdc/healthCheck/teardown.sh new file mode 100644 index 000000000..a5f69819e --- /dev/null +++ b/test/csit/plans/sdc/healthCheck/teardown.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# +# Copyright 2016-2017 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Modifications copyright (c) 2017 AT&T Intellectual Property +# + +source ${WORKSPACE}/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh + +# $WORKSPACE/archives/clamp-clone deleted with archives folder when tests starts so we keep it at the end for debugging diff --git a/test/csit/plans/sdc/healthCheck/testplan.txt b/test/csit/plans/sdc/healthCheck/testplan.txt new file mode 100644 index 000000000..2b2db1ede --- /dev/null +++ b/test/csit/plans/sdc/healthCheck/testplan.txt @@ -0,0 +1,3 @@ +# Test suites are relative paths under [integration.git]/test/csit/tests/. +# Place the suites in run order. +sdc/healthCheck diff --git a/test/csit/plans/sdnc/healthcheck/health_check.sh b/test/csit/plans/sdnc/healthcheck/health_check.sh new file mode 100644 index 000000000..96c99848a --- /dev/null +++ b/test/csit/plans/sdnc/healthcheck/health_check.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +############################################################################### +# Copyright 2017 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### +SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +echo $SCRIPTS + +response=$(curl --write-out '%{http_code}' --silent --output /dev/null -H "Authorization: Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ==" -X POST -H "X-FromAppId: csit-sdnc" -H "X-TransactionId: csit-sdnc" -H "Accept: application/json" -H "Content-Type: application/json" http://localhost:8282/restconf/operations/SLI-API:healthcheck ) + +if [ "$response" == "200" ]; then + echo "SDNC health check passed." + exit 0; +fi + +echo "SDNC health check failed with response code ${response}." +exit 1 diff --git a/test/csit/plans/sdnc/healthcheck/setup.sh b/test/csit/plans/sdnc/healthcheck/setup.sh new file mode 100644 index 000000000..643d54fe9 --- /dev/null +++ b/test/csit/plans/sdnc/healthcheck/setup.sh @@ -0,0 +1,118 @@ +#!/bin/bash +# +# Copyright 2016-2017 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Modifications copyright (c) 2017 AT&T Intellectual Property +# +# Place the scripts in run order: +SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +source ${WORKSPACE}/test/csit/scripts/sdnc/script1.sh + +export MTU=$(/sbin/ifconfig | grep MTU | sed 's/.*MTU://' | sed 's/ .*//' | sort -n | head -1) + +if [ "$MTU" == "" ]; then + export MTU="1450" +fi + + +# Clone SDNC repo to get docker-compose for SDNC +mkdir -p $WORKSPACE/archives/sdnc +cd $WORKSPACE/archives +git clone -b master --single-branch http://gerrit.onap.org/r/sdnc/oam.git sdnc +cd $WORKSPACE/archives/sdnc +git pull +unset http_proxy https_proxy +cd $WORKSPACE/archives/sdnc/installation/src/main/yaml + +sed -i "s/DMAAP_TOPIC_ENV=.*/DMAAP_TOPIC_ENV="AUTO"/g" docker-compose.yml +docker login -u docker -p docker nexus3.onap.org:10001 + +docker pull nexus3.onap.org:10001/onap/sdnc-image:latest +docker tag nexus3.onap.org:10001/onap/sdnc-image:latest onap/sdnc-image:latest + +docker pull nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:latest +docker tag nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:latest onap/ccsdk-dgbuilder-image:latest + +docker pull nexus3.onap.org:10001/onap/admportal-sdnc-image:latest +docker tag nexus3.onap.org:10001/onap/admportal-sdnc-image:latest onap/admportal-sdnc-image:latest + +# start SDNC containers with docker compose and configuration from docker-compose.yml +curl -L https://github.com/docker/compose/releases/download/1.9.0/docker-compose-`uname -s`-`uname -m` > docker-compose +chmod +x docker-compose +./docker-compose up -d + +# WAIT 5 minutes maximum and test every 5 seconds if SDNC is up using HealthCheck API +TIME_OUT=500 +INTERVAL=30 +TIME=0 +while [ "$TIME" -lt "$TIME_OUT" ]; do + response=$(curl --write-out '%{http_code}' --silent --output /dev/null -H "Authorization: Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ==" -X POST -H "X-FromAppId: csit-sdnc" -H "X-TransactionId: csit-sdnc" -H "Accept: application/json" -H "Content-Type: application/json" http://localhost:8282/restconf/operations/SLI-API:healthcheck ); echo $response + + if [ "$response" == "200" ]; then + echo SDNC started in $TIME seconds + break; + fi + + echo Sleep: $INTERVAL seconds before testing if SDNC is up. Total wait time up now is: $TIME seconds. Timeout is: $TIME_OUT seconds + sleep $INTERVAL + TIME=$(($TIME+$INTERVAL)) +done + +if [ "$TIME" -ge "$TIME_OUT" ]; then + echo TIME OUT: Docker containers not started in $TIME_OUT seconds... Could cause problems for testing activities... +fi + +#sleep 800 + +TIME_OUT=1500 +INTERVAL=60 +TIME=0 +while [ "$TIME" -lt "$TIME_OUT" ]; do + +response=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client -u karaf system:start-level) +num_bundles=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1) + + if [ "$response" == "Level 100" ] && [ "$num_bundles" -ge 333 ]; then + echo SDNC karaf started in $TIME seconds + break; + fi + + echo Sleep: $INTERVAL seconds before testing if SDNC is up. Total wait time up now is: $TIME seconds. Timeout is: $TIME_OUT seconds + sleep $INTERVAL + TIME=$(($TIME+$INTERVAL)) +done + +if [ "$TIME" -ge "$TIME_OUT" ]; then + echo TIME OUT: karaf session not started in $TIME_OUT seconds... Could cause problems for testing activities... +fi + +response=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client -u karaf system:start-level) +num_bundles=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1) + + if [ "$response" == "Level 100" ] && [ "$num_bundles" -ge 333 ]; then + num_bundles=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | tail -1 | cut -d\| -f1) + num_failed_bundles=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure | wc -l) + failed_bundles=$(docker exec sdnc_controller_container /opt/opendaylight/current/bin/client -u karaf bundle:list | grep Failure) + echo There is/are $num_failed_bundles failed bundles out of $num_bundles installed bundles. + fi + +if [ "$num_failed_bundles" -ge 1 ]; then + echo "The following bundle(s) are in a failed state: " + echo " $failed_bundles" +fi + +# Pass any variables required by Robot test suites in ROBOT_VARIABLES +ROBOT_VARIABLES="-v SCRIPTS:${SCRIPTS}" + diff --git a/test/csit/plans/sdnc/healthcheck/teardown.sh b/test/csit/plans/sdnc/healthcheck/teardown.sh new file mode 100644 index 000000000..4d99b9f31 --- /dev/null +++ b/test/csit/plans/sdnc/healthcheck/teardown.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# +# Copyright 2016-2017 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Modifications copyright (c) 2017 AT&T Intellectual Property +# + +kill-instance.sh sdnc_controller_container +kill-instance.sh sdnc_dgbuilder_container +kill-instance.sh sdnc_portal_container +kill-instance.sh sdnc_db_container + +# $WORKSPACE/archives/appc deleted with archives folder when tests starts so we keep it at the end for debugging diff --git a/test/csit/plans/sdnc/healthcheck/testplan.txt b/test/csit/plans/sdnc/healthcheck/testplan.txt new file mode 100644 index 000000000..3fa8dde53 --- /dev/null +++ b/test/csit/plans/sdnc/healthcheck/testplan.txt @@ -0,0 +1,4 @@ +# Test suites are relative paths under [integration.git]/test/csit/tests/. +# Place the suites in run order. +sdnc/healthcheck + diff --git a/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh b/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh index 382cb7a8d..64fa5a4d0 100644 --- a/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh +++ b/test/csit/plans/vfc-nfvo-driver-vnfm-svnfm/sanity-check/setup.sh @@ -64,4 +64,4 @@ for i in {1..10}; do done # Pass any variables required by Robot test suites in ROBOT_VARIABLES -ROBOT_VARIABLES="-v MSB_IAG_IP:${MSB_IAG_IP} -v ZTEVMANAGERDRIVER_IP:${ZTEVMANAGERDRIVER_IP} -v MSB_IP:${MSB_IAG_IP} -v SERVICE_IP:${SERVICE_IP}" +ROBOT_VARIABLES="-v MSB_IAG_IP:${MSB_IAG_IP} -v ZTEVMANAGERDRIVER_IP:${ZTEVMANAGERDRIVER_IP} -v MSB_IP:${MSB_IAG_IP} -v SERVICE_IP:${SERVICE_IP} -v SCRIPTS:${SCRIPTS}" diff --git a/test/csit/plans/vnfsdk-refrepo/sanity-check/setup.sh b/test/csit/plans/vnfsdk-refrepo/sanity-check/setup.sh index 91e931e91..e04feac41 100644 --- a/test/csit/plans/vnfsdk-refrepo/sanity-check/setup.sh +++ b/test/csit/plans/vnfsdk-refrepo/sanity-check/setup.sh @@ -17,9 +17,13 @@ # These scripts are sourced by run-csit.sh. +#Start postgres database +docker run -d -i -t --name=postgres -p 5432:5432 nexus3.onap.org:10001/onap/refrepo/postgres:latest + +POSTGRES=`docker inspect --format '{{ .NetworkSettings.IPAddress }}' postgres` #Start market place -docker run -d -i -t --name=refrepo -p 8702:8702 nexus3.onap.org:10001/onap/refrepo:1.0-STAGING-latest +docker run -d -i -t --name=refrepo -e POSTGRES_IP=$POSTGRES -p 8702:8702 nexus3.onap.org:10001/onap/refrepo:1.0-STAGING-latest # Wait for Market place initialization echo Wait for VNF Repository initialization diff --git a/test/csit/plans/vnfsdk-refrepo/sanity-check/teardown.sh b/test/csit/plans/vnfsdk-refrepo/sanity-check/teardown.sh index a42634303..19440bc79 100644 --- a/test/csit/plans/vnfsdk-refrepo/sanity-check/teardown.sh +++ b/test/csit/plans/vnfsdk-refrepo/sanity-check/teardown.sh @@ -18,4 +18,6 @@ kill-instance.sh refrepo +kill-instance.sh postgres + diff --git a/test/csit/run-csit.sh b/test/csit/run-csit.sh index 5c094f834..3070239b7 100755 --- a/test/csit/run-csit.sh +++ b/test/csit/run-csit.sh @@ -93,12 +93,24 @@ if ! type pybot > /dev/null; then source ${ROBOT_VENV}/bin/activate fi +# install required Robot libraries +pip install --upgrade robotframework-extendedselenium2library + # install eteutils mkdir -p ${ROBOT_VENV}/src/onap rm -rf ${ROBOT_VENV}/src/onap/testsuite git clone https://gerrit.onap.org/r/testsuite/python-testing-utils.git ${ROBOT_VENV}/src/onap/testsuite/python-testing-utils pip install --upgrade ${ROBOT_VENV}/src/onap/testsuite/python-testing-utils +# install chrome driver +if [ ! -x ${ROBOT_VENV}/bin/chromedriver ]; then + pushd ${ROBOT_VENV}/bin + wget -N http://chromedriver.storage.googleapis.com/2.27/chromedriver_linux64.zip + unzip chromedriver_linux64.zip + chmod +x chromedriver + popd +fi + WORKDIR=`mktemp -d --suffix=-robot-workdir` cd ${WORKDIR} @@ -109,7 +121,7 @@ set -x # Add csit scripts to PATH -export PATH=${PATH}:${WORKSPACE}/test/csit/docker/scripts:${WORKSPACE}/test/csit/scripts +export PATH=${PATH}:${WORKSPACE}/test/csit/docker/scripts:${WORKSPACE}/test/csit/scripts:${ROBOT_VENV}/bin export SCRIPTS=${WORKSPACE}/test/csit/scripts export ROBOT_VARIABLES= diff --git a/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh b/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh new file mode 100644 index 000000000..da421e4cf --- /dev/null +++ b/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# +# ============LICENSE_START======================================================= +# ONAP CLAMP +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights +# reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END============================================ +# =================================================================== +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# + +echo "This is ${WORKSPACE}/test/csit/scripts/sdc/clone_and_setup_sdc_data.sh" + +# Clone sdc enviroment template +mkdir -p ${WORKSPACE}/data/environments/ +mkdir -p ${WORKSPACE}/data/clone/ + +cd ${WORKSPACE}/data/clone +git clone --depth 1 http://gerrit.onap.org/r/sdc -b master + + +# set enviroment variables + +ENV_NAME=CSIT +MR_IP_ADDR=10.0.0.1 + +if [ -e /opt/config/public_ip.txt ] + then + IP_ADDRESS=$(cat /opt/config/public_ip.txt) + else + IP_ADDRESS=$(ifconfig eth0 | grep "inet addr" | tr -s ' ' | cut -d' ' -f3 | cut -d':' -f2) + fi + + cat ${WORKSPACE}/data/clone/sdc/sdc-os-chef/environments/Template.json | sed "s/yyy/"$IP_ADDRESS"/g" > ${WORKSPACE}/data/environments/$ENV_NAME.json + sed -i "s/xxx/"$ENV_NAME"/g" ${WORKSPACE}/data/environments/$ENV_NAME.json + sed -i "s/\"ueb_url_list\":.*/\"ueb_url_list\": \""$MR_IP_ADDR","$MR_IP_ADDR"\",/g" ${WORKSPACE}/data/environments/$ENV_NAME.json + sed -i "s/\"fqdn\":.*/\"fqdn\": [\""$MR_IP_ADDR"\", \""$MR_IP_ADDR"\"]/g" ${WORKSPACE}/data/environments/$ENV_NAME.json + + diff --git a/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh b/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh new file mode 100644 index 000000000..e03284248 --- /dev/null +++ b/test/csit/scripts/sdc/kill_containers_and_remove_dataFolders.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# +# Copyright 2016-2017 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Modifications copyright (c) 2017 AT&T Intellectual Property +# + +echo "This is ${WORKSPACE}/test/csit/scripts/sdc/kill_and_remove_dataFolder.sh" + +#kill and remove all sdc dockers +docker stop $(docker ps -a -q --filter="name=sdc") +docker rm $(docker ps -a -q --filter="name=sdc") + + +#delete data folder + +rm -rf ${WORKSPACE}/data/* + + diff --git a/test/csit/scripts/sdc/start_sdc_containers.sh b/test/csit/scripts/sdc/start_sdc_containers.sh new file mode 100644 index 000000000..31105acb0 --- /dev/null +++ b/test/csit/scripts/sdc/start_sdc_containers.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# +# ============LICENSE_START======================================================= +# ONAP CLAMP +# ================================================================================ +# Copyright (C) 2017 AT&T Intellectual Property. All rights +# reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END============================================ +# =================================================================== +# ECOMP is a trademark and service mark of AT&T Intellectual Property. +# + +echo "This is ${WORKSPACE}/test/csit/scripts/sdc/start_sdc_containers.sh" + + +RELEASE=latest +LOCAL=false +SKIPTESTS=false +DEP_ENV=CSIT +#[ -f /opt/config/nexus_username.txt ] && NEXUS_USERNAME=$(cat /opt/config/nexus_username.txt) || NEXUS_USERNAME=release +#[ -f /opt/config/nexus_password.txt ] && NEXUS_PASSWD=$(cat /opt/config/nexus_password.txt) || NEXUS_PASSWD=sfWU3DFVdBr7GVxB85mTYgAW +#[ -f /opt/config/nexus_docker_repo.txt ] && NEXUS_DOCKER_REPO=$(cat /opt/config/nexus_docker_repo.txt) || NEXUS_DOCKER_REPO=ecomp-nexus:${PORT} +#[ -f /opt/config/nexus_username.txt ] && docker login -u $NEXUS_USERNAME -p $NEXUS_PASSWD $NEXUS_DOCKER_REPO +export IP=`ifconfig eth0 | awk -F: '/inet addr/ {gsub(/ .*/,"",$2); print $2}'` +#export PREFIX=${NEXUS_DOCKER_REPO}'/openecomp' +export PREFIX='nexus3.onap.org:10001/openecomp' + +#start Elastic-Search +docker run --detach --name sdc-es --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --memory 1g --memory-swap=1g --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro -e ES_HEAP_SIZE=1024M --volume ${WORKSPACE}/data/ES:/usr/share/elasticsearch/data --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9200:9200 --publish 9300:9300 ${PREFIX}/sdc-elasticsearch:${RELEASE} + +#start cassandra +docker run --detach --name sdc-cs --env RELEASE="${RELEASE}" --env ENVNAME="${DEP_ENV}" --env HOST_IP=${IP} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/CS:/var/lib/cassandra --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9042:9042 --publish 9160:9160 ${PREFIX}/sdc-cassandra:${RELEASE} + +echo "please wait while CS is starting..." +echo "" +c=120 # seconds to wait +REWRITE="\e[25D\e[1A\e[K" +while [ $c -gt 0 ]; do + c=$((c-1)) + sleep 1 + echo -e "${REWRITE}$c" +done +echo -e "" + + +#start kibana +docker run --detach --name sdc-kbn --env ENVNAME="${DEP_ENV}" --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --memory 2g --memory-swap=2g --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 5601:5601 ${PREFIX}/sdc-kibana:${RELEASE} + +#start sdc-backend +docker run --detach --name sdc-BE --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env http_proxy=${http_proxy} --env https_proxy=${https_proxy} --env no_proxy=${no_proxy} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --memory 4g --memory-swap=4g --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/logs/BE/:/var/lib/jetty/logs --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 8443:8443 --publish 8080:8080 ${PREFIX}/sdc-backend:${RELEASE} + +echo "please wait while BE is starting..." +echo "" +c=120 # seconds to wait +REWRITE="\e[45D\e[1A\e[K" +while [ $c -gt 0 ]; do + c=$((c-1)) + sleep 1 + echo -e "${REWRITE}$c" +done +echo -e "" + +#start Front-End +docker run --detach --name sdc-FE --env HOST_IP=${IP} --env ENVNAME="${DEP_ENV}" --env http_proxy=${http_proxy} --env https_proxy=${https_proxy} --env no_proxy=${no_proxy} --log-driver=json-file --log-opt max-size=100m --log-opt max-file=10 --ulimit memlock=-1:-1 --memory 2g --memory-swap=2g --ulimit nofile=4096:100000 --volume /etc/localtime:/etc/localtime:ro --volume ${WORKSPACE}/data/logs/FE/:/var/lib/jetty/logs --volume ${WORKSPACE}/data/environments:/root/chef-solo/environments --publish 9443:9443 --publish 8181:8181 ${PREFIX}/sdc-frontend:${RELEASE} + +echo "please wait while FE is starting..." +echo "" +c=120 # seconds to wait +REWRITE="\e[45D\e[1A\e[K" +while [ $c -gt 0 ]; do + c=$((c-1)) + sleep 1 + echo -e "${REWRITE}$c" +done +echo -e "" + + + + +#TIME=0 +#while [ "$TIME" -lt "$TIME_OUT" ]; do +# response=$(curl --write-out '%{http_code}' --silent --output /dev/null http://localhost:8080/restservices/clds/v1/clds/healthcheck); echo $response + +# if [ "$response" == "200" ]; then +# echo Clamp and its database well started in $TIME seconds +# break; +# fi + +# echo Sleep: $INTERVAL seconds before testing if Clamp is up. Total wait time up now is: $TIME seconds. Timeout is: $TIME_OUT seconds +# sleep $INTERVAL +# TIME=$(($TIME+$INTERVAL)) +#done + +#if [ "$TIME" -ge "$TIME_OUT" ]; then +# echo TIME OUT: Docker containers not started in $TIME_OUT seconds... Could cause problems for tests... +#fi diff --git a/test/csit/scripts/sdnc/script1.sh b/test/csit/scripts/sdnc/script1.sh new file mode 100644 index 000000000..b87e3c197 --- /dev/null +++ b/test/csit/scripts/sdnc/script1.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright 2016-2017 Huawei Technologies Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Modifications copyright (c) 2017 AT&T Intellectual Property +# + +echo "This is ${WORKSPACE}/test/csit/scripts/sdnc/script1.sh" diff --git a/test/csit/tests/cli/startup/startup_check.robot b/test/csit/tests/cli/startup/startup_check.robot index 053a6d115..5c0bf8b76 100644 --- a/test/csit/tests/cli/startup/startup_check.robot +++ b/test/csit/tests/cli/startup/startup_check.robot @@ -5,9 +5,18 @@ Library Process *** Variables *** ${cli_exec} docker exec cli onap -${cli_exec_onap_11} docker exec -e CLI_PRODUCT_VERSION=onap-1.1 onap -${cli_exec_onap_10} docker exec -e CLI_PRODUCT_VERSION=onap-1.0 onap -${cli_exec_cli_10} docker exec -e CLI_PRODUCT_VERSION=cli-1.0 onap +${cli_exec_cli_10_help} docker exec cli bash -c "export CLI_PRODUCT_VERSION=cli-1.0 && onap --help" +${cli_exec_cli_10_version} docker exec cli bash -c "export CLI_PRODUCT_VERSION=cli-1.0 && onap --version" +${cli_exec_cli_10_schema_refresh} docker exec cli bash -c "export CLI_PRODUCT_VERSION=cli-1.0 && onap schema-refresh" +${cli_exec_cli_10_schema_validate} docker exec cli bash -c "export CLI_PRODUCT_VERSION=cli-1.0 && onap schema-validate -i -l schema-refresh.yaml" +${cli_exec_cli_10_schema_validate_invalid} docker exec cli bash -c "export CLI_PRODUCT_VERSION=cli-1.0 && onap schema-validate -i -l invalid-yaml-path.yaml" +${cli_exec_cli_10_schema_validate_empty} docker exec cli bash -c "export CLI_PRODUCT_VERSION=cli-1.0 && onap schema-validate" + +${cli_exec_onap_11} docker exec cli bash -c "export CLI_PRODUCT_VERSION=onap-1.1 && onap" +${cli_exec_onap_11_microservice_create} docker exec cli bash -c "export CLI_PRODUCT_VERSION=onap-1.1 && onap microservice-create --service-name test-service --service-version v1 --service-url /api/test/v1 --host-url http://${MSB_IAG_IP}:80 23.14.15.156 80" +${cli_exec_onap_11_microservice_list} docker exec cli bash -c "export CLI_PRODUCT_VERSION=onap-1.1 && onap microservice-list --host-url http://${MSB_IAG_IP}:80 --long" +${cli_exec_onap_11_microservice_show} docker exec cli bash -c "export CLI_PRODUCT_VERSION=onap-1.1 && onap microservice-show --service-name test-service --service-version v1 --host-url http://${MSB_IAG_IP}:80" +${cli_exec_onap_11_microservice_delete} docker exec cli bash -c "export CLI_PRODUCT_VERSION=onap-1.1 && onap microservice-delete --service-name test-service --service-version v1 --host-url http://${MSB_IAG_IP}:80 --node-ip 23.14.15.156 --node-port 80" *** Test Cases *** Liveness Test @@ -17,21 +26,21 @@ Liveness Test Check Cli help [Documentation] check cli help command - ${cli_cmd_output}= Run Process ${cli_exec} --help shell=yes + ${cli_cmd_output}= Run Process ${cli_exec_cli_10_help} shell=yes Log ${cli_cmd_output.stdout} Should Be Equal As Strings ${cli_cmd_output.rc} 0 Should Contain ${cli_cmd_output.stdout} CLI version Check Cli Version Default [Documentation] check cli default version - ${cli_cmd_output}= Run Process ${cli_exec} --version shell=yes + ${cli_cmd_output}= Run Process ${cli_exec_cli_10_version} shell=yes Log ${cli_cmd_output.stdout} Should Be Equal As Strings ${cli_cmd_output.rc} 0 Should Contain ${cli_cmd_output.stdout} : cli-1.0 Check Cli Scheam Refresh [Documentation] check cli schema-refresh command - ${cli_cmd_output}= Run Process ${cli_exec} schema-refresh shell=yes + ${cli_cmd_output}= Run Process ${cli_exec_cli_10_schema_refresh} shell=yes Log ${cli_cmd_output.stdout} Should Be Equal As Strings ${cli_cmd_output.rc} 0 Should Contain ${cli_cmd_output.stdout} sl-no @@ -42,7 +51,7 @@ Check Cli Scheam Refresh Check Cli Schema Validate With Valid Path [Documentation] check cli schema-validate command with valid path - ${cli_cmd_output}= Run Process ${cli_exec} schema-validate -i -l schema-refresh.yaml shell=yes + ${cli_cmd_output}= Run Process ${cli_exec_cli_10_schema_validate} shell=yes Log ${cli_cmd_output.stdout} Should Be Equal As Strings ${cli_cmd_output.rc} 0 Should Contain ${cli_cmd_output.stdout} sl-no @@ -50,18 +59,43 @@ Check Cli Schema Validate With Valid Path Check Cli Scheam Validate With Invalid Path [Documentation] check cli version - ${cli_cmd_output}= Run Process ${cli_exec} schema-validate -i -l invalid-yaml-path.yaml shell=yes + ${cli_cmd_output}= Run Process ${cli_exec_cli_10_schema_validate_invalid} shell=yes Log ${cli_cmd_output.stdout} Should Be Equal As Strings ${cli_cmd_output.rc} 1 Should Contain ${cli_cmd_output.stdout} 0x0007 Check Cli Scheam Validate Empty Argument [Documentation] check cli schema validate with empty argument - ${cli_cmd_output}= Run Process ${cli_exec} schema-validate shell=yes + ${cli_cmd_output}= Run Process ${cli_exec_cli_10_schema_validate_empty} shell=yes Log ${cli_cmd_output.stdout} Should Be Equal As Strings ${cli_cmd_output.rc} 1 Should Contain ${cli_cmd_output.stdout} 0x0015 +Check Cli create microservice + [Documentation] check create microservice + ${cli_cmd_output}= Run Process ${cli_exec_onap_11_microservice_create} shell=yes + Log ${cli_cmd_output.stdout} + Should Be Equal As Strings ${cli_cmd_output.rc} 0 + +Check Cli list microservice + [Documentation] check list microservice + ${cli_cmd_output}= Run Process ${cli_exec_onap_11_microservice_list} shell=yes + Log ${cli_cmd_output.stdout} + Should Be Equal As Strings ${cli_cmd_output.rc} 0 + +Check Cli show microservice + [Documentation] check show microservice + ${cli_cmd_output}= Run Process ${cli_exec_onap_11_microservice_show} shell=yes + Log ${cli_cmd_output.stdout} + Should Be Equal As Strings ${cli_cmd_output.rc} 0 + +Check Cli delete microservice + [Documentation] check delete microservice + ${cli_cmd_output}= Run Process ${cli_exec_onap_11_microservice_delete} shell=yes + Log ${cli_cmd_output.stdout} + Should Be Equal As Strings ${cli_cmd_output.rc} 0 + + *** Keywords *** CheckUrl diff --git a/test/csit/tests/dcae/testcases/__init__.robot b/test/csit/tests/dcae/testcases/__init__.robot new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/test/csit/tests/dcae/testcases/__init__.robot diff --git a/test/csit/tests/dcae/testcases/assets/json_events/CommonEventFormat_28.3.json b/test/csit/tests/dcae/testcases/assets/json_events/CommonEventFormat_28.3.json new file mode 100644 index 000000000..90f6d81a5 --- /dev/null +++ b/test/csit/tests/dcae/testcases/assets/json_events/CommonEventFormat_28.3.json @@ -0,0 +1,1866 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "definitions": { + "attCopyrightNotice": { + "description": "Copyright (c) <2017>, AT&T Intellectual Property. All rights reserved. Licensed under the Apache License, Version 2.0 (the License)", + "type": "object", + "properties": { + "useAndRedistribution": { + "description": "You may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", + "type": "string" + }, + "licenseLink": { + "description": "http://www.apache.org/licenses/LICENSE-2.0", + "type":"string" + }, + "condition1": { + "description": "Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an AS IS BASIS,", + "type": "string" + }, + "condition2": { + "description": "Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.", + "type": "string" + }, + "condition3": { + "description": "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", + "type": "string" + }, + "condition4": { + "description": "See the License for the specific language governing permissions and limitations under the License.", + "type": "string" + }, + "Trademarks": { + "description": "ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.", + "type": "string" + } + } + }, + + "codecsInUse": { + "description": "number of times an identified codec was used over the measurementInterval", + "type": "object", + "properties": { + "codecIdentifier": { "type": "string" }, + "numberInUse": { "type": "integer" } + }, + "required": [ "codecIdentifier", "numberInUse" ] + }, + "command": { + "description": "command from an event collector toward an event source", + "type": "object", + "properties": { + "commandType": { + "type": "string", + "enum": [ + "heartbeatIntervalChange", + "measurementIntervalChange", + "provideThrottlingState", + "throttlingSpecification" + ] + }, + "eventDomainThrottleSpecification": { "$ref": "#/definitions/eventDomainThrottleSpecification" }, + "heartbeatInterval": { "type": "integer" }, + "measurementInterval": { "type": "integer" } + }, + "required": [ "commandType" ] + }, + "commandList": { + "description": "array of commands from an event collector toward an event source", + "type": "array", + "items": { + "$ref": "#/definitions/command" + }, + "minItems": 0 + }, + "commonEventHeader": { + "description": "fields common to all events", + "type": "object", + "properties": { + "domain": { + "description": "the eventing domain associated with the event", + "type": "string", + "enum": [ + "fault", + "heartbeat", + "measurementsForVfScaling", + "mobileFlow", + "other", + "sipSignaling", + "stateChange", + "syslog", + "thresholdCrossingAlert", + "voiceQuality" + ] + }, + "eventId": { + "description": "event key that is unique to the event source", + "type": "string" + }, + "eventName": { + "description": "unique event name", + "type": "string" + }, + "eventType": { + "description": "for example - applicationVnf, guestOS, hostOS, platform", + "type": "string" + }, + "internalHeaderFields": { "$ref": "#/definitions/internalHeaderFields" }, + "lastEpochMicrosec": { + "description": "the latest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "nfcNamingCode": { + "description": "3 character network function component type, aligned with vfc naming standards", + "type": "string" + }, + "nfNamingCode": { + "description": "4 character network function type, aligned with vnf naming standards", + "type": "string" + }, + "priority": { + "description": "processing priority", + "type": "string", + "enum": [ + "High", + "Medium", + "Normal", + "Low" + ] + }, + "reportingEntityId": { + "description": "UUID identifying the entity reporting the event, for example an OAM VM; must be populated by the ATT enrichment process", + "type": "string" + }, + "reportingEntityName": { + "description": "name of the entity reporting the event, for example, an EMS name; may be the same as sourceName", + "type": "string" + }, + "sequence": { + "description": "ordering of events communicated by an event source instance or 0 if not needed", + "type": "integer" + }, + "sourceId": { + "description": "UUID identifying the entity experiencing the event issue; must be populated by the ATT enrichment process", + "type": "string" + }, + "sourceName": { + "description": "name of the entity experiencing the event issue", + "type": "string" + }, + "startEpochMicrosec": { + "description": "the earliest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "version": { + "description": "version of the event header", + "type": "number" + } + }, + "required": [ "domain", "eventId", "eventName", "lastEpochMicrosec", + "priority", "reportingEntityName", "sequence", "sourceName", + "startEpochMicrosec", "version" ] + }, + "counter": { + "description": "performance counter", + "type": "object", + "properties": { + "criticality": { "type": "string", "enum": [ "CRIT", "MAJ" ] }, + "name": { "type": "string" }, + "thresholdCrossed": { "type": "string" }, + "value": { "type": "string"} + }, + "required": [ "criticality", "name", "thresholdCrossed", "value" ] + }, + "cpuUsage": { + "description": "usage of an identified CPU", + "type": "object", + "properties": { + "cpuIdentifier": { + "description": "cpu identifer", + "type": "string" + }, + "cpuIdle": { + "description": "percentage of CPU time spent in the idle task", + "type": "number" + }, + "cpuUsageInterrupt": { + "description": "percentage of time spent servicing interrupts", + "type": "number" + }, + "cpuUsageNice": { + "description": "percentage of time spent running user space processes that have been niced", + "type": "number" + }, + "cpuUsageSoftIrq": { + "description": "percentage of time spent handling soft irq interrupts", + "type": "number" + }, + "cpuUsageSteal": { + "description": "percentage of time spent in involuntary wait which is neither user, system or idle time and is effectively time that went missing", + "type": "number" + }, + "cpuUsageSystem": { + "description": "percentage of time spent on system tasks running the kernel", + "type": "number" + }, + "cpuUsageUser": { + "description": "percentage of time spent running un-niced user space processes", + "type": "number" + }, + "cpuWait": { + "description": "percentage of CPU time spent waiting for I/O operations to complete", + "type": "number" + }, + "percentUsage": { + "description": "aggregate cpu usage of the virtual machine on which the VNFC reporting the event is running", + "type": "number" + } + }, + "required": [ "cpuIdentifier", "percentUsage" ] + }, + "diskUsage": { + "description": "usage of an identified disk", + "type": "object", + "properties": { + "diskIdentifier": { + "description": "disk identifier", + "type": "string" + }, + "diskIoTimeAvg": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the average over the measurement interval", + "type": "number" + }, + "diskIoTimeLast": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the last value measurement within the measurement interval", + "type": "number" + }, + "diskIoTimeMax": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the maximum value measurement within the measurement interval", + "type": "number" + }, + "diskIoTimeMin": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the minimum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadAvg": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadLast": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the last value measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadMax": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the maximum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadMin": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the minimum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteAvg": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteLast": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the last value measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteMax": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the maximum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteMin": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the minimum value measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadAvg": { + "description": "number of octets per second read from a disk or partition; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadLast": { + "description": "number of octets per second read from a disk or partition; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadMax": { + "description": "number of octets per second read from a disk or partition; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadMin": { + "description": "number of octets per second read from a disk or partition; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteAvg": { + "description": "number of octets per second written to a disk or partition; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteLast": { + "description": "number of octets per second written to a disk or partition; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteMax": { + "description": "number of octets per second written to a disk or partition; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteMin": { + "description": "number of octets per second written to a disk or partition; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadAvg": { + "description": "number of read operations per second issued to the disk; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadLast": { + "description": "number of read operations per second issued to the disk; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadMax": { + "description": "number of read operations per second issued to the disk; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadMin": { + "description": "number of read operations per second issued to the disk; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteAvg": { + "description": "number of write operations per second issued to the disk; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteLast": { + "description": "number of write operations per second issued to the disk; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteMax": { + "description": "number of write operations per second issued to the disk; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteMin": { + "description": "number of write operations per second issued to the disk; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsAvg": { + "description": "queue size of pending I/O operations per second; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsLast": { + "description": "queue size of pending I/O operations per second; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsMax": { + "description": "queue size of pending I/O operations per second; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsMin": { + "description": "queue size of pending I/O operations per second; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadAvg": { + "description": "milliseconds a read operation took to complete; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadLast": { + "description": "milliseconds a read operation took to complete; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadMax": { + "description": "milliseconds a read operation took to complete; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadMin": { + "description": "milliseconds a read operation took to complete; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteAvg": { + "description": "milliseconds a write operation took to complete; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteLast": { + "description": "milliseconds a write operation took to complete; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteMax": { + "description": "milliseconds a write operation took to complete; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteMin": { + "description": "milliseconds a write operation took to complete; provide the minimum measurement within the measurement interval", + "type": "number" + } + }, + "required": [ "diskIdentifier" ] + }, + "endOfCallVqmSummaries": { + "description": "provides end of call voice quality metrics", + "type": "object", + "properties": { + "adjacencyName": { + "description": " adjacency name", + "type": "string" + }, + "endpointDescription": { + "description": "Either Caller or Callee", + "type": "string", + "enum": ["Caller", "Callee"] + }, + "endpointJitter": { + "description": "", + "type": "number" + }, + "endpointRtpOctetsDiscarded": { + "description": "", + "type": "number" + }, + "endpointRtpOctetsReceived": { + "description": "", + "type": "number" + }, + "endpointRtpOctetsSent": { + "description": "", + "type": "number" + }, + "endpointRtpPacketsDiscarded": { + "description": "", + "type": "number" + }, + "endpointRtpPacketsReceived": { + "description": "", + "type": "number" + }, + "endpointRtpPacketsSent": { + "description": "", + "type": "number" + }, + "localJitter": { + "description": "", + "type": "number" + }, + "localRtpOctetsDiscarded": { + "description": "", + "type": "number" + }, + "localRtpOctetsReceived": { + "description": "", + "type": "number" + }, + "localRtpOctetsSent": { + "description": "", + "type": "number" + }, + "localRtpPacketsDiscarded": { + "description": "", + "type": "number" + }, + "localRtpPacketsReceived": { + "description": "", + "type": "number" + }, + "localRtpPacketsSent": { + "description": "", + "type": "number" + }, + "mosCqe": { + "description": "1-5 1dp", + "type": "number" + }, + "packetsLost": { + "description": "", + "type": "number" + }, + "packetLossPercent": { + "description" : "Calculated percentage packet loss based on Endpoint RTP packets lost (as reported in RTCP) and Local RTP packets sent. Direction is based on Endpoint description (Caller, Callee). Decimal (2 dp)", + "type": "number" + }, + "rFactor": { + "description": "0-100", + "type": "number" + }, + "roundTripDelay": { + "description": "millisecs", + "type": "number" + } + }, + "required": [ "adjacencyName", "endpointDescription" ] + }, + "event": { + "description": "the root level of the common event format", + "type": "object", + "properties": { + "commonEventHeader": { "$ref": "#/definitions/commonEventHeader" }, + "faultFields": { "$ref": "#/definitions/faultFields" }, + "heartbeatFields": { "$ref": "#/definitions/heartbeatFields" }, + "measurementsForVfScalingFields": { "$ref": "#/definitions/measurementsForVfScalingFields" }, + "mobileFlowFields": { "$ref": "#/definitions/mobileFlowFields" }, + "otherFields": { "$ref": "#/definitions/otherFields" }, + "sipSignalingFields": { "$ref": "#/definitions/sipSignalingFields" }, + "stateChangeFields": { "$ref": "#/definitions/stateChangeFields" }, + "syslogFields": { "$ref": "#/definitions/syslogFields" }, + "thresholdCrossingAlertFields": { "$ref": "#/definitions/thresholdCrossingAlertFields" }, + "voiceQualityFields": { "$ref": "#/definitions/voiceQualityFields" } + }, + "required": [ "commonEventHeader" ] + }, + "eventDomainThrottleSpecification": { + "description": "specification of what information to suppress within an event domain", + "type": "object", + "properties": { + "eventDomain": { + "description": "Event domain enum from the commonEventHeader domain field", + "type": "string" + }, + "suppressedFieldNames": { + "description": "List of optional field names in the event block that should not be sent to the Event Listener", + "type": "array", + "items": { + "type": "string" + } + }, + "suppressedNvPairsList": { + "description": "Optional list of specific NvPairsNames to suppress within a given Name-Value Field", + "type": "array", + "items": { + "$ref": "#/definitions/suppressedNvPairs" + } + } + }, + "required": [ "eventDomain" ] + }, + "eventDomainThrottleSpecificationList": { + "description": "array of eventDomainThrottleSpecifications", + "type": "array", + "items": { + "$ref": "#/definitions/eventDomainThrottleSpecification" + }, + "minItems": 0 + }, + "eventList": { + "description": "array of events", + "type": "array", + "items": { + "$ref": "#/definitions/event" + } + }, + "eventThrottlingState": { + "description": "reports the throttling in force at the event source", + "type": "object", + "properties": { + "eventThrottlingMode": { + "description": "Mode the event manager is in", + "type": "string", + "enum": [ + "normal", + "throttled" + ] + }, + "eventDomainThrottleSpecificationList": { "$ref": "#/definitions/eventDomainThrottleSpecificationList" } + }, + "required": [ "eventThrottlingMode" ] + }, + "faultFields": { + "description": "fields specific to fault events", + "type": "object", + "properties": { + "alarmAdditionalInformation": { + "description": "additional alarm information", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "alarmCondition": { + "description": "alarm condition reported by the device", + "type": "string" + }, + "alarmInterfaceA": { + "description": "card, port, channel or interface name of the device generating the alarm", + "type": "string" + }, + "eventCategory": { + "description": "Event category, for example: license, link, routing, security, signaling", + "type": "string" + }, + "eventSeverity": { + "description": "event severity", + "type": "string", + "enum": [ + "CRITICAL", + "MAJOR", + "MINOR", + "WARNING", + "NORMAL" + ] + }, + "eventSourceType": { + "description": "type of event source; examples: card, host, other, port, portThreshold, router, slotThreshold, switch, virtualMachine, virtualNetworkFunction", + "type": "string" + }, + "faultFieldsVersion": { + "description": "version of the faultFields block", + "type": "number" + }, + "specificProblem": { + "description": "short description of the alarm or problem", + "type": "string" + }, + "vfStatus": { + "description": "virtual function status enumeration", + "type": "string", + "enum": [ + "Active", + "Idle", + "Preparing to terminate", + "Ready to terminate", + "Requesting termination" + ] + } + }, + "required": [ "alarmCondition", "eventSeverity", "eventSourceType", + "faultFieldsVersion", "specificProblem", "vfStatus" ] + }, + "featuresInUse": { + "description": "number of times an identified feature was used over the measurementInterval", + "type": "object", + "properties": { + "featureIdentifier": { "type": "string" }, + "featureUtilization": { "type": "integer" } + }, + "required": [ "featureIdentifier", "featureUtilization" ] + }, + "field": { + "description": "name value pair", + "type": "object", + "properties": { + "name": { "type": "string" }, + "value": { "type": "string" } + }, + "required": [ "name", "value" ] + }, + "filesystemUsage": { + "description": "disk usage of an identified virtual machine in gigabytes and/or gigabytes per second", + "type": "object", + "properties": { + "blockConfigured": { "type": "number" }, + "blockIops": { "type": "number" }, + "blockUsed": { "type": "number" }, + "ephemeralConfigured": { "type": "number" }, + "ephemeralIops": { "type": "number" }, + "ephemeralUsed": { "type": "number" }, + "filesystemName": { "type": "string" } + }, + "required": [ "blockConfigured", "blockIops", "blockUsed", "ephemeralConfigured", + "ephemeralIops", "ephemeralUsed", "filesystemName" ] + }, + "gtpPerFlowMetrics": { + "description": "Mobility GTP Protocol per flow metrics", + "type": "object", + "properties": { + "avgBitErrorRate": { + "description": "average bit error rate", + "type": "number" + }, + "avgPacketDelayVariation": { + "description": "Average packet delay variation or jitter in milliseconds for received packets: Average difference between the packet timestamp and time received for all pairs of consecutive packets", + "type": "number" + }, + "avgPacketLatency": { + "description": "average delivery latency", + "type": "number" + }, + "avgReceiveThroughput": { + "description": "average receive throughput", + "type": "number" + }, + "avgTransmitThroughput": { + "description": "average transmit throughput", + "type": "number" + }, + "durConnectionFailedStatus": { + "description": "duration of failed state in milliseconds, computed as the cumulative time between a failed echo request and the next following successful error request, over this reporting interval", + "type": "number" + }, + "durTunnelFailedStatus": { + "description": "Duration of errored state, computed as the cumulative time between a tunnel error indicator and the next following non-errored indicator, over this reporting interval", + "type": "number" + }, + "flowActivatedBy": { + "description": "Endpoint activating the flow", + "type": "string" + }, + "flowActivationEpoch": { + "description": "Time the connection is activated in the flow (connection) being reported on, or transmission time of the first packet if activation time is not available", + "type": "number" + }, + "flowActivationMicrosec": { + "description": "Integer microseconds for the start of the flow connection", + "type": "number" + }, + "flowActivationTime": { + "description": "time the connection is activated in the flow being reported on, or transmission time of the first packet if activation time is not available; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "flowDeactivatedBy": { + "description": "Endpoint deactivating the flow", + "type": "string" + }, + "flowDeactivationEpoch": { + "description": "Time for the start of the flow connection, in integer UTC epoch time aka UNIX time", + "type": "number" + }, + "flowDeactivationMicrosec": { + "description": "Integer microseconds for the start of the flow connection", + "type": "number" + }, + "flowDeactivationTime": { + "description": "Transmission time of the first packet in the flow connection being reported on; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "flowStatus": { + "description": "connection status at reporting time as a working / inactive / failed indicator value", + "type": "string" + }, + "gtpConnectionStatus": { + "description": "Current connection state at reporting time", + "type": "string" + }, + "gtpTunnelStatus": { + "description": "Current tunnel state at reporting time", + "type": "string" + }, + "ipTosCountList": { + "description": "array of key: value pairs where the keys are drawn from the IP Type-of-Service identifiers which range from '0' to '255', and the values are the count of packets that had those ToS identifiers in the flow", + "type": "array", + "items": { + "type": "array", + "items": [ + { "type": "string" }, + { "type": "number" } + ] + } + }, + "ipTosList": { + "description": "Array of unique IP Type-of-Service values observed in the flow where values range from '0' to '255'", + "type": "array", + "items": { + "type": "string" + } + }, + "largePacketRtt": { + "description": "large packet round trip time", + "type": "number" + }, + "largePacketThreshold": { + "description": "large packet threshold being applied", + "type": "number" + }, + "maxPacketDelayVariation": { + "description": "Maximum packet delay variation or jitter in milliseconds for received packets: Maximum of the difference between the packet timestamp and time received for all pairs of consecutive packets", + "type": "number" + }, + "maxReceiveBitRate": { + "description": "maximum receive bit rate", + "type": "number" + }, + "maxTransmitBitRate": { + "description": "maximum transmit bit rate", + "type": "number" + }, + "mobileQciCosCountList": { + "description": "array of key: value pairs where the keys are drawn from LTE QCI or UMTS class of service strings, and the values are the count of packets that had those strings in the flow", + "type": "array", + "items": { + "type": "array", + "items": [ + { "type": "string" }, + { "type": "number" } + ] + } + }, + "mobileQciCosList": { + "description": "Array of unique LTE QCI or UMTS class-of-service values observed in the flow", + "type": "array", + "items": { + "type": "string" + } + }, + "numActivationFailures": { + "description": "Number of failed activation requests, as observed by the reporting node", + "type": "number" + }, + "numBitErrors": { + "description": "number of errored bits", + "type": "number" + }, + "numBytesReceived": { + "description": "number of bytes received, including retransmissions", + "type": "number" + }, + "numBytesTransmitted": { + "description": "number of bytes transmitted, including retransmissions", + "type": "number" + }, + "numDroppedPackets": { + "description": "number of received packets dropped due to errors per virtual interface", + "type": "number" + }, + "numGtpEchoFailures": { + "description": "Number of Echo request path failures where failed paths are defined in 3GPP TS 29.281 sec 7.2.1 and 3GPP TS 29.060 sec. 11.2", + "type": "number" + }, + "numGtpTunnelErrors": { + "description": "Number of tunnel error indications where errors are defined in 3GPP TS 29.281 sec 7.3.1 and 3GPP TS 29.060 sec. 11.1", + "type": "number" + }, + "numHttpErrors": { + "description": "Http error count", + "type": "number" + }, + "numL7BytesReceived": { + "description": "number of tunneled layer 7 bytes received, including retransmissions", + "type": "number" + }, + "numL7BytesTransmitted": { + "description": "number of tunneled layer 7 bytes transmitted, excluding retransmissions", + "type": "number" + }, + "numLostPackets": { + "description": "number of lost packets", + "type": "number" + }, + "numOutOfOrderPackets": { + "description": "number of out-of-order packets", + "type": "number" + }, + "numPacketErrors": { + "description": "number of errored packets", + "type": "number" + }, + "numPacketsReceivedExclRetrans": { + "description": "number of packets received, excluding retransmission", + "type": "number" + }, + "numPacketsReceivedInclRetrans": { + "description": "number of packets received, including retransmission", + "type": "number" + }, + "numPacketsTransmittedInclRetrans": { + "description": "number of packets transmitted, including retransmissions", + "type": "number" + }, + "numRetries": { + "description": "number of packet retries", + "type": "number" + }, + "numTimeouts": { + "description": "number of packet timeouts", + "type": "number" + }, + "numTunneledL7BytesReceived": { + "description": "number of tunneled layer 7 bytes received, excluding retransmissions", + "type": "number" + }, + "roundTripTime": { + "description": "round trip time", + "type": "number" + }, + "tcpFlagCountList": { + "description": "array of key: value pairs where the keys are drawn from TCP Flags and the values are the count of packets that had that TCP Flag in the flow", + "type": "array", + "items": { + "type": "array", + "items": [ + { "type": "string" }, + { "type": "number" } + ] + } + }, + "tcpFlagList": { + "description": "Array of unique TCP Flags observed in the flow", + "type": "array", + "items": { + "type": "string" + } + }, + "timeToFirstByte": { + "description": "Time in milliseconds between the connection activation and first byte received", + "type": "number" + } + }, + "required": [ "avgBitErrorRate", "avgPacketDelayVariation", "avgPacketLatency", + "avgReceiveThroughput", "avgTransmitThroughput", + "flowActivationEpoch", "flowActivationMicrosec", + "flowDeactivationEpoch", "flowDeactivationMicrosec", + "flowDeactivationTime", "flowStatus", + "maxPacketDelayVariation", "numActivationFailures", + "numBitErrors", "numBytesReceived", "numBytesTransmitted", + "numDroppedPackets", "numL7BytesReceived", + "numL7BytesTransmitted", "numLostPackets", + "numOutOfOrderPackets", "numPacketErrors", + "numPacketsReceivedExclRetrans", + "numPacketsReceivedInclRetrans", + "numPacketsTransmittedInclRetrans", + "numRetries", "numTimeouts", "numTunneledL7BytesReceived", + "roundTripTime", "timeToFirstByte" + ] + }, + "heartbeatFields": { + "description": "optional field block for fields specific to heartbeat events", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional heartbeat fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "heartbeatFieldsVersion": { + "description": "version of the heartbeatFields block", + "type": "number" + }, + "heartbeatInterval": { + "description": "current heartbeat interval in seconds", + "type": "integer" + } + }, + "required": [ "heartbeatFieldsVersion", "heartbeatInterval" ] + }, + "internalHeaderFields": { + "description": "enrichment fields for internal VES Event Listener service use only, not supplied by event sources", + "type": "object" + }, + "jsonObject": { + "description": "json object schema, name and other meta-information along with one or more object instances", + "type": "object", + "properties": { + "objectInstances": { + "description": "one or more instances of the jsonObject", + "type": "array", + "items": { + "$ref": "#/definitions/jsonObjectInstance" + } + }, + "objectName": { + "description": "name of the JSON Object", + "type": "string" + }, + "objectSchema": { + "description": "json schema for the object", + "type": "string" + }, + "objectSchemaUrl": { + "description": "Url to the json schema for the object", + "type": "string" + }, + "nfSubscribedObjectName": { + "description": "name of the object associated with the nfSubscriptonId", + "type": "string" + }, + "nfSubscriptionId": { + "description": "identifies an openConfig telemetry subscription on a network function, which configures the network function to send complex object data associated with the jsonObject", + "type": "string" + } + }, + "required": [ "objectInstances", "objectName" ] + }, + "jsonObjectInstance": { + "description": "meta-information about an instance of a jsonObject along with the actual object instance", + "type": "object", + "properties": { + "objectInstance": { + "description": "an instance conforming to the jsonObject schema", + "type": "object" + }, + "objectInstanceEpochMicrosec": { + "description": "the unix time aka epoch time associated with this objectInstance--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "objectKeys": { + "description": "an ordered set of keys that identifies this particular instance of jsonObject", + "type": "array", + "items": { + "$ref": "#/definitions/key" + } + } + }, + "required": [ "objectInstance" ] + }, + "key": { + "description": "tuple which provides the name of a key along with its value and relative order", + "type": "object", + "properties": { + "keyName": { + "description": "name of the key", + "type": "string" + }, + "keyOrder": { + "description": "relative sequence or order of the key with respect to other keys", + "type": "integer" + }, + "keyValue": { + "description": "value of the key", + "type": "string" + } + }, + "required": [ "keyName" ] + }, + "latencyBucketMeasure": { + "description": "number of counts falling within a defined latency bucket", + "type": "object", + "properties": { + "countsInTheBucket": { "type": "number" }, + "highEndOfLatencyBucket": { "type": "number" }, + "lowEndOfLatencyBucket": { "type": "number" } + }, + "required": [ "countsInTheBucket" ] + }, + "measurementsForVfScalingFields": { + "description": "measurementsForVfScaling fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional name-value-pair fields", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "additionalMeasurements": { + "description": "array of named name-value-pair arrays", + "type": "array", + "items": { + "$ref": "#/definitions/namedArrayOfFields" + } + }, + "additionalObjects": { + "description": "array of JSON objects described by name, schema and other meta-information", + "type": "array", + "items": { + "$ref": "#/definitions/jsonObject" + } + }, + "codecUsageArray": { + "description": "array of codecs in use", + "type": "array", + "items": { + "$ref": "#/definitions/codecsInUse" + } + }, + "concurrentSessions": { + "description": "peak concurrent sessions for the VM or VNF over the measurementInterval", + "type": "integer" + }, + "configuredEntities": { + "description": "over the measurementInterval, peak total number of: users, subscribers, devices, adjacencies, etc., for the VM, or subscribers, devices, etc., for the VNF", + "type": "integer" + }, + "cpuUsageArray": { + "description": "usage of an array of CPUs", + "type": "array", + "items": { + "$ref": "#/definitions/cpuUsage" + } + }, + "diskUsageArray": { + "description": "usage of an array of disks", + "type": "array", + "items": { + "$ref": "#/definitions/diskUsage" + } + }, + "featureUsageArray": { + "description": "array of features in use", + "type": "array", + "items": { + "$ref": "#/definitions/featuresInUse" + } + }, + "filesystemUsageArray": { + "description": "filesystem usage of the VM on which the VNFC reporting the event is running", + "type": "array", + "items": { + "$ref": "#/definitions/filesystemUsage" + } + }, + "latencyDistribution": { + "description": "array of integers representing counts of requests whose latency in milliseconds falls within per-VNF configured ranges", + "type": "array", + "items": { + "$ref": "#/definitions/latencyBucketMeasure" + } + }, + "meanRequestLatency": { + "description": "mean seconds required to respond to each request for the VM on which the VNFC reporting the event is running", + "type": "number" + }, + "measurementInterval": { + "description": "interval over which measurements are being reported in seconds", + "type": "number" + }, + "measurementsForVfScalingVersion": { + "description": "version of the measurementsForVfScaling block", + "type": "number" + }, + "memoryUsageArray": { + "description": "memory usage of an array of VMs", + "type": "array", + "items": { + "$ref": "#/definitions/memoryUsage" + } + }, + "numberOfMediaPortsInUse": { + "description": "number of media ports in use", + "type": "integer" + }, + "requestRate": { + "description": "peak rate of service requests per second to the VNF over the measurementInterval", + "type": "number" + }, + "vnfcScalingMetric": { + "description": "represents busy-ness of the VNF from 0 to 100 as reported by the VNFC", + "type": "integer" + }, + "vNicPerformanceArray": { + "description": "usage of an array of virtual network interface cards", + "type": "array", + "items": { + "$ref": "#/definitions/vNicPerformance" + } + } + }, + "required": [ "measurementInterval", "measurementsForVfScalingVersion" ] + }, + "memoryUsage": { + "description": "memory usage of an identified virtual machine", + "type": "object", + "properties": { + "memoryBuffered": { + "description": "kibibytes of temporary storage for raw disk blocks", + "type": "number" + }, + "memoryCached": { + "description": "kibibytes of memory used for cache", + "type": "number" + }, + "memoryConfigured": { + "description": "kibibytes of memory configured in the virtual machine on which the VNFC reporting the event is running", + "type": "number" + }, + "memoryFree": { + "description": "kibibytes of physical RAM left unused by the system", + "type": "number" + }, + "memorySlabRecl": { + "description": "the part of the slab that can be reclaimed such as caches measured in kibibytes", + "type": "number" + }, + "memorySlabUnrecl": { + "description": "the part of the slab that cannot be reclaimed even when lacking memory measured in kibibytes", + "type": "number" + }, + "memoryUsed": { + "description": "total memory minus the sum of free, buffered, cached and slab memory measured in kibibytes", + "type": "number" + }, + "vmIdentifier": { + "description": "virtual machine identifier associated with the memory metrics", + "type": "string" + } + }, + "required": [ "memoryFree", "memoryUsed", "vmIdentifier" ] + }, + "mobileFlowFields": { + "description": "mobileFlow fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional mobileFlow fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "applicationType": { + "description": "Application type inferred", + "type": "string" + }, + "appProtocolType": { + "description": "application protocol", + "type": "string" + }, + "appProtocolVersion": { + "description": "application protocol version", + "type": "string" + }, + "cid": { + "description": "cell id", + "type": "string" + }, + "connectionType": { + "description": "Abbreviation referencing a 3GPP reference point e.g., S1-U, S11, etc", + "type": "string" + }, + "ecgi": { + "description": "Evolved Cell Global Id", + "type": "string" + }, + "flowDirection": { + "description": "Flow direction, indicating if the reporting node is the source of the flow or destination for the flow", + "type": "string" + }, + "gtpPerFlowMetrics": { "$ref": "#/definitions/gtpPerFlowMetrics" }, + "gtpProtocolType": { + "description": "GTP protocol", + "type": "string" + }, + "gtpVersion": { + "description": "GTP protocol version", + "type": "string" + }, + "httpHeader": { + "description": "HTTP request header, if the flow connects to a node referenced by HTTP", + "type": "string" + }, + "imei": { + "description": "IMEI for the subscriber UE used in this flow, if the flow connects to a mobile device", + "type": "string" + }, + "imsi": { + "description": "IMSI for the subscriber UE used in this flow, if the flow connects to a mobile device", + "type": "string" + }, + "ipProtocolType": { + "description": "IP protocol type e.g., TCP, UDP, RTP...", + "type": "string" + }, + "ipVersion": { + "description": "IP protocol version e.g., IPv4, IPv6", + "type": "string" + }, + "lac": { + "description": "location area code", + "type": "string" + }, + "mcc": { + "description": "mobile country code", + "type": "string" + }, + "mnc": { + "description": "mobile network code", + "type": "string" + }, + "mobileFlowFieldsVersion": { + "description": "version of the mobileFlowFields block", + "type": "number" + }, + "msisdn": { + "description": "MSISDN for the subscriber UE used in this flow, as an integer, if the flow connects to a mobile device", + "type": "string" + }, + "otherEndpointIpAddress": { + "description": "IP address for the other endpoint, as used for the flow being reported on", + "type": "string" + }, + "otherEndpointPort": { + "description": "IP Port for the reporting entity, as used for the flow being reported on", + "type": "integer" + }, + "otherFunctionalRole": { + "description": "Functional role of the other endpoint for the flow being reported on e.g., MME, S-GW, P-GW, PCRF...", + "type": "string" + }, + "rac": { + "description": "routing area code", + "type": "string" + }, + "radioAccessTechnology": { + "description": "Radio Access Technology e.g., 2G, 3G, LTE", + "type": "string" + }, + "reportingEndpointIpAddr": { + "description": "IP address for the reporting entity, as used for the flow being reported on", + "type": "string" + }, + "reportingEndpointPort": { + "description": "IP port for the reporting entity, as used for the flow being reported on", + "type": "integer" + }, + "sac": { + "description": "service area code", + "type": "string" + }, + "samplingAlgorithm": { + "description": "Integer identifier for the sampling algorithm or rule being applied in calculating the flow metrics if metrics are calculated based on a sample of packets, or 0 if no sampling is applied", + "type": "integer" + }, + "tac": { + "description": "transport area code", + "type": "string" + }, + "tunnelId": { + "description": "tunnel identifier", + "type": "string" + }, + "vlanId": { + "description": "VLAN identifier used by this flow", + "type": "string" + } + }, + "required": [ "flowDirection", "gtpPerFlowMetrics", "ipProtocolType", "ipVersion", + "mobileFlowFieldsVersion", "otherEndpointIpAddress", "otherEndpointPort", + "reportingEndpointIpAddr", "reportingEndpointPort" ] + }, + "namedArrayOfFields": { + "description": "an array of name value pairs along with a name for the array", + "type": "object", + "properties": { + "name": { "type": "string" }, + "arrayOfFields": { + "description": "array of name value pairs", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + } + }, + "required": [ "name", "arrayOfFields" ] + }, + "otherFields": { + "description": "fields for events belonging to the 'other' domain of the commonEventHeader domain enumeration", + "type": "object", + "properties": { + "hashOfNameValuePairArrays": { + "description": "array of named name-value-pair arrays", + "type": "array", + "items": { + "$ref": "#/definitions/namedArrayOfFields" + } + }, + "jsonObjects": { + "description": "array of JSON objects described by name, schema and other meta-information", + "type": "array", + "items": { + "$ref": "#/definitions/jsonObject" + } + }, + "nameValuePairs": { + "description": "array of name-value pairs", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "otherFieldsVersion": { + "description": "version of the otherFields block", + "type": "number" + } + }, + "required": [ "otherFieldsVersion" ] + }, + "requestError": { + "description": "standard request error data structure", + "type": "object", + "properties": { + "messageId": { + "description": "Unique message identifier of the format ABCnnnn where ABC is either SVC for Service Exceptions or POL for Policy Exception", + "type": "string" + }, + "text": { + "description": "Message text, with replacement variables marked with %n, where n is an index into the list of <variables> elements, starting at 1", + "type": "string" + }, + "url": { + "description": "Hyperlink to a detailed error resource e.g., an HTML page for browser user agents", + "type": "string" + }, + "variables": { + "description": "List of zero or more strings that represent the contents of the variables used by the message text", + "type": "string" + } + }, + "required": [ "messageId", "text" ] + }, + "sipSignalingFields": { + "description": "sip signaling fields", + "type": "object", + "properties": { + "additionalInformation": { + "description": "additional sip signaling fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "compressedSip": { + "description": "the full SIP request/response including headers and bodies", + "type": "string" + }, + "correlator": { + "description": "this is the same for all events on this call", + "type": "string" + }, + "localIpAddress": { + "description": "IP address on VNF", + "type": "string" + }, + "localPort": { + "description": "port on VNF", + "type": "string" + }, + "remoteIpAddress": { + "description": "IP address of peer endpoint", + "type": "string" + }, + "remotePort": { + "description": "port of peer endpoint", + "type": "string" + }, + "sipSignalingFieldsVersion": { + "description": "version of the sipSignalingFields block", + "type": "number" + }, + "summarySip": { + "description": "the SIP Method or Response (‘INVITE’, ‘200 OK’, ‘BYE’, etc)", + "type": "string" + }, + "vendorVnfNameFields": { + "$ref": "#/definitions/vendorVnfNameFields" + } + }, + "required": [ "correlator", "localIpAddress", "localPort", "remoteIpAddress", + "remotePort", "sipSignalingFieldsVersion", "vendorVnfNameFields" ] + }, + "stateChangeFields": { + "description": "stateChange fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional stateChange fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "newState": { + "description": "new state of the entity", + "type": "string", + "enum": [ + "inService", + "maintenance", + "outOfService" + ] + }, + "oldState": { + "description": "previous state of the entity", + "type": "string", + "enum": [ + "inService", + "maintenance", + "outOfService" + ] + }, + "stateChangeFieldsVersion": { + "description": "version of the stateChangeFields block", + "type": "number" + }, + "stateInterface": { + "description": "card or port name of the entity that changed state", + "type": "string" + } + }, + "required": [ "newState", "oldState", "stateChangeFieldsVersion", "stateInterface" ] + }, + "suppressedNvPairs": { + "description": "List of specific NvPairsNames to suppress within a given Name-Value Field for event Throttling", + "type": "object", + "properties": { + "nvPairFieldName": { + "description": "Name of the field within which are the nvpair names to suppress", + "type": "string" + }, + "suppressedNvPairNames": { + "description": "Array of nvpair names to suppress within the nvpairFieldName", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ "nvPairFieldName", "suppressedNvPairNames" ] + }, + "syslogFields": { + "description": "sysLog fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional syslog fields if needed provided as name=value delimited by a pipe ‘|’ symbol, for example: 'name1=value1|name2=value2|…'", + "type": "string" + }, + "eventSourceHost": { + "description": "hostname of the device", + "type": "string" + }, + "eventSourceType": { + "description": "type of event source; examples: other, router, switch, host, card, port, slotThreshold, portThreshold, virtualMachine, virtualNetworkFunction", + "type": "string" + }, + "syslogFacility": { + "description": "numeric code from 0 to 23 for facility--see table in documentation", + "type": "integer" + }, + "syslogFieldsVersion": { + "description": "version of the syslogFields block", + "type": "number" + }, + "syslogMsg": { + "description": "syslog message", + "type": "string" + }, + "syslogPri": { + "description": "0-192 combined severity and facility", + "type": "integer" + }, + "syslogProc": { + "description": "identifies the application that originated the message", + "type": "string" + }, + "syslogProcId": { + "description": "a change in the value of this field indicates a discontinuity in syslog reporting", + "type": "number" + }, + "syslogSData": { + "description": "syslog structured data consisting of a structured data Id followed by a set of key value pairs", + "type": "string" + }, + "syslogSdId": { + "description": "0-32 char in format name@number for example ourSDID@32473", + "type": "string" + }, + "syslogSev": { + "description": "numerical Code for severity derived from syslogPri as remaider of syslogPri / 8", + "type": "string", + "enum": [ + "Alert", + "Critical", + "Debug", + "Emergency", + "Error", + "Info", + "Notice", + "Warning" + ] + }, + "syslogTag": { + "description": "msgId indicating the type of message such as TCPOUT or TCPIN; NILVALUE should be used when no other value can be provided", + "type": "string" + }, + "syslogVer": { + "description": "IANA assigned version of the syslog protocol specification - typically 1", + "type": "number" + } + }, + "required": [ "eventSourceType", "syslogFieldsVersion", "syslogMsg", "syslogTag" ] + }, + "thresholdCrossingAlertFields": { + "description": "fields specific to threshold crossing alert events", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional threshold crossing alert fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "additionalParameters": { + "description": "performance counters", + "type": "array", + "items": { + "$ref": "#/definitions/counter" + } + }, + "alertAction": { + "description": "Event action", + "type": "string", + "enum": [ + "CLEAR", + "CONT", + "SET" + ] + }, + "alertDescription": { + "description": "Unique short alert description such as IF-SHUB-ERRDROP", + "type": "string" + }, + "alertType": { + "description": "Event type", + "type": "string", + "enum": [ + "CARD-ANOMALY", + "ELEMENT-ANOMALY", + "INTERFACE-ANOMALY", + "SERVICE-ANOMALY" + ] + }, + "alertValue": { + "description": "Calculated API value (if applicable)", + "type": "string" + }, + "associatedAlertIdList": { + "description": "List of eventIds associated with the event being reported", + "type": "array", + "items": { "type": "string" } + }, + "collectionTimestamp": { + "description": "Time when the performance collector picked up the data; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "dataCollector": { + "description": "Specific performance collector instance used", + "type": "string" + }, + "elementType": { + "description": "type of network element - internal ATT field", + "type": "string" + }, + "eventSeverity": { + "description": "event severity or priority", + "type": "string", + "enum": [ + "CRITICAL", + "MAJOR", + "MINOR", + "WARNING", + "NORMAL" + ] + }, + "eventStartTimestamp": { + "description": "Time closest to when the measurement was made; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "interfaceName": { + "description": "Physical or logical port or card (if applicable)", + "type": "string" + }, + "networkService": { + "description": "network name - internal ATT field", + "type": "string" + }, + "possibleRootCause": { + "description": "Reserved for future use", + "type": "string" + }, + "thresholdCrossingFieldsVersion": { + "description": "version of the thresholdCrossingAlertFields block", + "type": "number" + } + }, + "required": [ + "additionalParameters", + "alertAction", + "alertDescription", + "alertType", + "collectionTimestamp", + "eventSeverity", + "eventStartTimestamp", + "thresholdCrossingFieldsVersion" + ] + }, + "vendorVnfNameFields": { + "description": "provides vendor, vnf and vfModule identifying information", + "type": "object", + "properties": { + "vendorName": { + "description": "VNF vendor name", + "type": "string" + }, + "vfModuleName": { + "description": "ASDC vfModuleName for the vfModule generating the event", + "type": "string" + }, + "vnfName": { + "description": "ASDC modelName for the VNF generating the event", + "type": "string" + } + }, + "required": [ "vendorName" ] + }, + "vNicPerformance": { + "description": "describes the performance and errors of an identified virtual network interface card", + "type": "object", + "properties": { + "receivedBroadcastPacketsAccumulated": { + "description": "Cumulative count of broadcast packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedBroadcastPacketsDelta": { + "description": "Count of broadcast packets received within the measurement interval", + "type": "number" + }, + "receivedDiscardedPacketsAccumulated": { + "description": "Cumulative count of discarded packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedDiscardedPacketsDelta": { + "description": "Count of discarded packets received within the measurement interval", + "type": "number" + }, + "receivedErrorPacketsAccumulated": { + "description": "Cumulative count of error packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedErrorPacketsDelta": { + "description": "Count of error packets received within the measurement interval", + "type": "number" + }, + "receivedMulticastPacketsAccumulated": { + "description": "Cumulative count of multicast packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedMulticastPacketsDelta": { + "description": "Count of multicast packets received within the measurement interval", + "type": "number" + }, + "receivedOctetsAccumulated": { + "description": "Cumulative count of octets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedOctetsDelta": { + "description": "Count of octets received within the measurement interval", + "type": "number" + }, + "receivedTotalPacketsAccumulated": { + "description": "Cumulative count of all packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedTotalPacketsDelta": { + "description": "Count of all packets received within the measurement interval", + "type": "number" + }, + "receivedUnicastPacketsAccumulated": { + "description": "Cumulative count of unicast packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedUnicastPacketsDelta": { + "description": "Count of unicast packets received within the measurement interval", + "type": "number" + }, + "transmittedBroadcastPacketsAccumulated": { + "description": "Cumulative count of broadcast packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedBroadcastPacketsDelta": { + "description": "Count of broadcast packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedDiscardedPacketsAccumulated": { + "description": "Cumulative count of discarded packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedDiscardedPacketsDelta": { + "description": "Count of discarded packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedErrorPacketsAccumulated": { + "description": "Cumulative count of error packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedErrorPacketsDelta": { + "description": "Count of error packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedMulticastPacketsAccumulated": { + "description": "Cumulative count of multicast packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedMulticastPacketsDelta": { + "description": "Count of multicast packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedOctetsAccumulated": { + "description": "Cumulative count of octets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedOctetsDelta": { + "description": "Count of octets transmitted within the measurement interval", + "type": "number" + }, + "transmittedTotalPacketsAccumulated": { + "description": "Cumulative count of all packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedTotalPacketsDelta": { + "description": "Count of all packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedUnicastPacketsAccumulated": { + "description": "Cumulative count of unicast packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedUnicastPacketsDelta": { + "description": "Count of unicast packets transmitted within the measurement interval", + "type": "number" + }, + "valuesAreSuspect": { + "description": "Indicates whether vNicPerformance values are likely inaccurate due to counter overflow or other condtions", + "type": "string", + "enum": [ "true", "false" ] + }, + "vNicIdentifier": { + "description": "vNic identification", + "type": "string" + } + }, + "required": [ "valuesAreSuspect", "vNicIdentifier" ] + }, + "voiceQualityFields": { + "description": "provides statistics related to customer facing voice products", + "type": "object", + "properties": { + "additionalInformation": { + "description": "additional voice quality fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "calleeSideCodec": { + "description": "callee codec for the call", + "type": "string" + }, + "callerSideCodec": { + "description": "caller codec for the call", + "type": "string" + }, + "correlator": { + "description": "this is the same for all events on this call", + "type": "string" + }, + "endOfCallVqmSummaries": { + "$ref": "#/definitions/endOfCallVqmSummaries" + }, + "phoneNumber": { + "description": "phone number associated with the correlator", + "type": "string" + }, + "midCallRtcp": { + "description": "Base64 encoding of the binary RTCP data excluding Eth/IP/UDP headers", + "type": "string" + }, + "vendorVnfNameFields": { + "$ref": "#/definitions/vendorVnfNameFields" + }, + "voiceQualityFieldsVersion": { + "description": "version of the voiceQualityFields block", + "type": "number" + } + }, + "required": [ "calleeSideCodec", "callerSideCodec", "correlator", "midCallRtcp", + "vendorVnfNameFields", "voiceQualityFieldsVersion" ] + } + }, + "title": "Event Listener", + "type": "object", + "properties": { + "event": {"$ref": "#/definitions/event"} + } +} diff --git a/test/csit/tests/dcae/testcases/assets/json_events/dcae_healthcheck.json b/test/csit/tests/dcae/testcases/assets/json_events/dcae_healthcheck.json new file mode 100644 index 000000000..1c8f2e7ce --- /dev/null +++ b/test/csit/tests/dcae/testcases/assets/json_events/dcae_healthcheck.json @@ -0,0 +1,5 @@ +{
+ "path": "/reports/dcae/service-instances",
+ "start": "-24hour",
+ "end": "now"
+}
\ No newline at end of file diff --git a/test/csit/tests/dcae/testcases/assets/json_events/ves_vfirewall_measurement.json b/test/csit/tests/dcae/testcases/assets/json_events/ves_vfirewall_measurement.json new file mode 100644 index 000000000..5dbedd3c5 --- /dev/null +++ b/test/csit/tests/dcae/testcases/assets/json_events/ves_vfirewall_measurement.json @@ -0,0 +1,36 @@ +{
+ "event": {
+ "commonEventHeader": {
+ "reportingEntityName": "VM name will be provided by ECOMP",
+ "startEpochMicrosec": 1506008587564787,
+ "lastEpochMicrosec": 1506008587564787,
+ "eventName": "Measurement_VFirewall_VNicStat",
+ "eventId": "0b2b5790-3673-480a-a4bd-5a00b88e5af6",
+ "sourceName": "Dummy VM name - No Metadata available",
+ "sequence": 18123,
+ "priority": "Normal",
+ "functionalRole": "vFirewall",
+ "domain": "measurementsForVfScaling",
+ "reportingEntityId": "VM UUID will be provided by ECOMP",
+ "sourceId": "Dummy VM UUID - No Metadata available",
+ "version": 1.1
+ },
+ "measurementsForVfScalingFields": {
+ "measurementInterval": 10,
+ "measurementsForVfScalingVersion": 1.1,
+ "vNicUsageArray": [{
+ "multicastPacketsIn": 0,
+ "bytesIn": 3896,
+ "unicastPacketsIn": 0,
+ "multicastPacketsOut": 0,
+ "broadcastPacketsOut": 0,
+ "packetsOut": 28,
+ "bytesOut": 12178,
+ "broadcastPacketsIn": 0,
+ "packetsIn": 58,
+ "unicastPacketsOut": 0,
+ "vNicIdentifier": "eth0"
+ }]
+ }
+ }
+}
\ No newline at end of file diff --git a/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_fault_eventlist_batch.json b/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_fault_eventlist_batch.json new file mode 100644 index 000000000..9a711507a --- /dev/null +++ b/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_fault_eventlist_batch.json @@ -0,0 +1,62 @@ +{
+ "eventList": [
+ {
+ "commonEventHeader": {
+ "version": 3.0,
+ "domain": "fault",
+ "eventName": "Fault_MobileCallRecording_PilotNumberPoolExhaustion",
+ "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546016",
+ "sequence": 0,
+ "priority": "High",
+ "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
+ "reportingEntityName": "EricssonOamVf",
+ "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
+ "sourceName": "scfx0001vm002cap001",
+ "nfNamingCode": "scfx",
+ "nfcNamingCode": "ssc",
+ "startEpochMicrosec": 1413378172000000,
+ "lastEpochMicrosec": 1413378172000000
+ },
+ "faultFields": {
+ "faultFieldsVersion": 2.0,
+ "alarmCondition": "PilotNumberPoolExhaustion",
+ "eventSourceType": "other",
+ "specificProblem": "Calls cannot complete - pilot numbers are unavailable",
+ "eventSeverity": "CRITICAL",
+ "vfStatus": "Active",
+ "alarmAdditionalInformation": [
+ {
+ "name": "PilotNumberPoolSize",
+ "value": "1000"
+ }
+ ]
+ }
+ },
+ {
+ "commonEventHeader": {
+ "version": 3.0,
+ "domain": "fault",
+ "eventName": "Fault_MobileCallRecording_RecordingServerUnreachable",
+ "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546025",
+ "sequence": 0,
+ "priority": "High",
+ "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
+ "reportingEntityName": "EricssonOamVf",
+ "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
+ "sourceName": "scfx0001vm002cap001",
+ "nfNamingCode": "scfx",
+ "nfcNamingCode": "ssc",
+ "startEpochMicrosec": 1413378172000010,
+ "lastEpochMicrosec": 1413378172000010
+ },
+ "faultFields": {
+ "faultFieldsVersion": 2.0,
+ "alarmCondition": "RecordingServerUnreachable",
+ "eventSourceType": "other",
+ "specificProblem": "Recording server unreachable",
+ "eventSeverity": "CRITICAL",
+ "vfStatus": "Active"
+ }
+ }
+ ]
+}
diff --git a/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_fault_provide_throttle_state.json b/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_fault_provide_throttle_state.json new file mode 100644 index 000000000..d9893a7ae --- /dev/null +++ b/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_fault_provide_throttle_state.json @@ -0,0 +1,30 @@ +{
+ "eventThrottlingState": {
+ "eventThrottlingMode": "throttled",
+ "eventDomainThrottleSpecificationList": [
+ {
+ "eventDomain": "fault",
+ "suppressedFieldNames": [
+ "alarmInterfaceA",
+ "alarmAdditionalInformation"
+ ]
+ },
+ {
+ "eventDomain": "thresholdCrossingAlert",
+ "suppressedFieldNames": [
+ "associatedAlertIdList",
+ "possibleRootCause"
+ ],
+ "suppressedNvPairsList": [
+ {
+ "nvPairFieldName": "additionalParameters",
+ "suppressedNvPairNames": [
+ "someCounterName",
+ "someOtherCounterName"
+ ]
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_single_fault_event.json b/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_single_fault_event.json new file mode 100644 index 000000000..a45c51dd9 --- /dev/null +++ b/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_single_fault_event.json @@ -0,0 +1,34 @@ +{
+ "event": {
+ "commonEventHeader": {
+ "version": 3.0,
+ "domain": "fault",
+ "eventName": "Fault_MobileCallRecording_PilotNumberPoolExhaustion",
+ "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546015",
+ "sequence": 0,
+ "priority": "High",
+ "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
+ "reportingEntityName": "EricssonOamVf",
+ "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
+ "sourceName": "scfx0001vm002cap001",
+ "nfNamingCode": "scfx",
+ "nfcNamingCode": "ssc",
+ "startEpochMicrosec": 1413378172000000,
+ "lastEpochMicrosec": 1413378172000000
+ },
+ "faultFields": {
+ "faultFieldsVersion": 2.0,
+ "alarmCondition": "PilotNumberPoolExhaustion",
+ "eventSourceType": "other",
+ "specificProblem": "Calls cannot complete - pilot numbers are unavailable",
+ "eventSeverity": "CRITICAL",
+ "vfStatus": "Active",
+ "alarmAdditionalInformation": [
+ {
+ "name": "PilotNumberPoolSize",
+ "value": "1000"
+ }
+ ]
+ }
+ }
+}
diff --git a/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_single_fault_event_bad.json b/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_single_fault_event_bad.json new file mode 100644 index 000000000..fd831cee2 --- /dev/null +++ b/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_single_fault_event_bad.json @@ -0,0 +1,34 @@ +{
+ "event": {
+ "commonEventHeader": {
+ "version": 3.0
+ "domain": "fault",
+ "eventName": "Fault_MobileCallRecording_PilotNumberPoolExhaustion",
+ "eventId": "ab305d54-85b4-a31b-7db2-fb6b9e546015",
+ "sequence": 0,
+ "priority": "High",
+ "reportingEntityId": "cc305d54-75b4-431b-adb2-eb6b9e541234",
+ "reportingEntityName": "EricssonOamVf",
+ "sourceId": "de305d54-75b4-431b-adb2-eb6b9e546014",
+ "sourceName": "scfx0001vm002cap001",
+ "nfNamingCode": "scfx",
+ "nfcNamingCode": "ssc",
+ "startEpochMicrosec": 1413378172000000,
+ "lastEpochMicrosec": 1413378172000000
+ },
+ "faultFields": {
+ "faultFieldsVersion": 2.0,
+ "alarmCondition": "PilotNumberPoolExhaustion",
+ "eventSourceType": "other",
+ "specificProblem": "Calls cannot complete - pilot numbers are unavailable",
+ "eventSeverity": "CRITICAL",
+ "vfStatus": "Active",
+ "alarmAdditionalInformation": [
+ {
+ "name": "PilotNumberPoolSize",
+ "value": "1000"
+ }
+ ]
+ }
+ }
+}
\ No newline at end of file diff --git a/test/csit/tests/dcae/testcases/dcae_ves.robot b/test/csit/tests/dcae/testcases/dcae_ves.robot new file mode 100644 index 000000000..1df67ae6f --- /dev/null +++ b/test/csit/tests/dcae/testcases/dcae_ves.robot @@ -0,0 +1,188 @@ +*** Settings ***
+Documentation Testing DCAE VES Listener with various event feeds from VoLTE, vDNS, vFW and cCPE use scenarios
+
+Library RequestsLibrary
+Library OperatingSystem
+Library Collections
+Library DcaeLibrary
+Resource resources/dcae_keywords.robot
+Test Setup Cleanup VES Events
+Suite Setup VES Collector Suite Setup DMaaP
+Suite Teardown VES Collector Suite Shutdown DMaaP
+
+
+
+
+*** Variables ***
+${VESC_URL_HTTPS} https://%{VESC_IP}:8443
+${VESC_URL} http://%{VESC_IP}:8080
+${VES_ANY_EVENT_PATH} /eventListener/v5
+${VES_BATCH_EVENT_PATH} /eventListener/v5/eventBatch
+${VES_THROTTLE_STATE_EVENT_PATH} /eventListener/v5/clientThrottlingState
+${HEADER_STRING} content-type=application/json
+${EVENT_DATA_FILE} %{WORKSPACE}/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_single_fault_event.json
+${EVENT_MEASURE_FILE} %{WORKSPACE}/test/csit/tests/dcae/testcases/assets/json_events/ves_vfirewall_measurement.json
+${EVENT_DATA_FILE_BAD} %{WORKSPACE}/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_single_fault_event_bad.json
+${EVENT_BATCH_DATA_FILE} %{WORKSPACE}/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_fault_eventlist_batch.json
+${EVENT_THROTTLING_STATE_DATA_FILE} %{WORKSPACE}/test/csit/tests/dcae/testcases/assets/json_events/ves_volte_fault_provide_throttle_state.json
+
+
+#DCAE Health Check
+${CONFIG_BINDING_URL} http://localhost:8443
+${CB_HEALTHCHECK_PATH} /healthcheck
+${CB_SERVICE_COMPONENT_PATH} /service_component/
+${VES_Service_Name1} dcae-controller-ves-collector
+${VES_Service_Name2} ves-collector-not-exist
+
+*** Comment out from R1 release ***
+DCAE Health Check
+ [Tags] DCAE-HealthCheck
+ [Documentation] Get DCAE Overall Status
+ ${auth}= Create List ${GLOBAL_DCAE_USERNAME} ${GLOBAL_DCAE_PASSWORD}
+ ${session}= Create Session dcae-health-check ${CONFIG_BINDING_URL} auth=${auth}
+ ${resp}= Get Request dcae-health-check ${CB_HEALTHCHECK_PATH}
+ Should Be Equal As Strings ${resp.status_code} 200
+
+
+Get VES Collector Service Status
+ [Tags] DCAE-HealthCheck
+ [Documentation] Get the status of a VES Collector Service Component based on service name
+ ${urlpath}= Catenate SEPARATOR= ${CB_SERVICE_COMPONENT_PATH} ${VES_Service_Name1}
+ Log Service component name for status query: ${urlpath}
+ ${resp}= Get DCAE Service Component Status ${CONFIG_BINDING_URL} ${CB_SERVICE_COMPONENT_PATH} ${GLOBAL_DCAE_USERNAME} ${GLOBAL_DCAE_PASSWORD}
+ Log Receive HTTP Status code ${resp.status_code}
+ Should Be Equal As Strings ${resp.status_code} 200
+ ${isEmpty}= Is Json Empty ${resp}
+ Run Keyword If '${isEmpty}' == False Log ${resp.json()}
+
+
+
+#*** Comment out from R1 release ***
+Publish VES VoLTE Fault Provide Throttling State
+ [Tags] DCAE-D1
+ ${evtdata}= Get Event Data From File ${EVENT_THROTTLING_STATE_DATA_FILE}
+ ${headers}= Create Header From String ${HEADER_STRING}
+ ${resp}= Publish Event To VES Collector ${VES_VOLTE_URL} ${VES_THROTTLE_STATE_EVENT_PATH} ${headers} ${evtdata} ${GLOBAL_DCAE_USERNAME} ${GLOBAL_DCAE_PASSWORD}
+ Should Be Equal As Strings ${resp.status_code} 204
+
+Publish VES Event With Invalid Method
+ [Tags] DCAE-D1
+ [Documentation] Use invalid Put instead of Post method to expect 405 response
+ ${evtdata}= Get Event Data From File ${EVENT_DATA_FILE}
+ ${headers}= Create Header From String ${HEADER_STRING}
+ Log Send HTTP Request with invalid method Put instead of Post
+ ${resp}= Publish Event To VES Collector With Put Method ${VES_VOLTE_URL} ${VES_ANY_EVENT_PATH} ${headers} ${evtdata} ${GLOBAL_DCAE_USERNAME} ${GLOBAL_DCAE_PASSWORD}
+ Log Receive HTTP Status code ${resp.status_code}
+ Should Be Equal As Strings ${resp.status_code} 405
+
+
+Publish VES Event With Invalid URL Path
+ [Tags] DCAE-D1
+ [Documentation] Use invalid url path to expect 404 response
+ ${evtdata}= Get Event Data From File ${EVENT_DATA_FILE}
+ ${headers}= Create Header From String ${HEADER_STRING}
+ Log Send HTTP Request with invalid /listener/v5/ instead of /eventlistener/v5 path
+ ${resp}= Publish Event To VES Collector ${VES_VOLTE_URL} /listener/v5/ ${headers} ${evtdata} ${GLOBAL_DCAE_USERNAME} ${GLOBAL_DCAE_PASSWORD}
+ Log Receive HTTP Status code ${resp.status_code}
+ Should Be Equal As Strings ${resp.status_code} 404
+
+Publish VES Event With Invalid Login
+ [Tags] DCAE-D1
+ [Documentation] Use invalid user or password to expect 401 response
+ ${evtdata}= Get Event Data From File ${EVENT_DATA_FILE}
+ ${headers}= Create Header From String ${HEADER_STRING}
+ Log Send HTTP Request with invalid User: BadUserName
+ ${resp}= Publish Event To VES Collector ${VES_VOLTE_URL} ${VES_ANY_EVENT_PATH} ${headers} ${evtdata} BadUserName ${GLOBAL_DCAE_PASSWORD}
+ Log Receive HTTP Status code ${resp.status_code}
+ Should Be Equal As Strings ${resp.status_code} 401
+
+*** Test Cases ***
+VES Collector Health Check
+ [Tags] DCAE-VESC-R1
+ [Documentation] Ves Collector Health Check
+ ${uuid}= Generate UUID
+ ${session}= Create Session dcae ${VESC_URL}
+ ${headers}= Create Dictionary Accept=*/* X-TransactionId=${GLOBAL_APPLICATION_ID}-${uuid} X-FromAppId=${GLOBAL_APPLICATION_ID}
+ ${resp}= Get Request dcae /healthcheck headers=${headers}
+ Should Be Equal As Strings ${resp.status_code} 200
+
+
+Publish Single VES VoLTE Fault Event
+ [Tags] DCAE-VESC-R1
+ [Documentation] Post single event and expect 200 Response
+ ${evtdata}= Get Event Data From File ${EVENT_DATA_FILE}
+ ${headers}= Create Header From String ${HEADER_STRING}
+ ${resp}= Publish Event To VES Collector No Auth ${VESC_URL} ${VES_ANY_EVENT_PATH} ${headers} ${evtdata}
+ Log Receive HTTP Status code ${resp.status_code}
+ Should Be Equal As Strings ${resp.status_code} 200
+ ${isEmpty}= Is Json Empty ${resp}
+ Run Keyword If '${isEmpty}' == False Log ${resp.json()}
+ ${ret}= DMaaP Message Receive ab305d54-85b4-a31b-7db2-fb6b9e546015
+ Should Be Equal As Strings ${ret} true
+
+Publish Single VES VNF Measurement Event
+ [Tags] DCAE-VESC-R1
+ [Documentation] Post single event and expect 200 Response
+ ${evtdata}= Get Event Data From File ${EVENT_MEASURE_FILE}
+ ${headers}= Create Header From String ${HEADER_STRING}
+ ${resp}= Publish Event To VES Collector No Auth ${VESC_URL} ${VES_ANY_EVENT_PATH} ${headers} ${evtdata}
+ Log Receive HTTP Status code ${resp.status_code}
+ Should Be Equal As Strings ${resp.status_code} 200
+ ${isEmpty}= Is Json Empty ${resp}
+ Run Keyword If '${isEmpty}' == False Log ${resp.json()}
+ ${ret}= DMaaP Message Receive 0b2b5790-3673-480a-a4bd-5a00b88e5af6
+ Should Be Equal As Strings ${ret} true
+
+Publish VES VoLTE Fault Batch Events
+ [Tags] DCAE-VESC-R1
+ [Documentation] Post batched events and expect 202 Response
+ ${evtdata}= Get Event Data From File ${EVENT_BATCH_DATA_FILE}
+ ${headers}= Create Header From String ${HEADER_STRING}
+ ${resp}= Publish Event To VES Collector No Auth ${VESC_URL} ${VES_BATCH_EVENT_PATH} ${headers} ${evtdata}
+ Should Be Equal As Strings ${resp.status_code} 200
+ ${ret}= DMaaP Message Receive ab305d54-85b4-a31b-7db2-fb6b9e546016
+ Should Be Equal As Strings ${ret} true
+
+
+Publish Single VES VoLTE Fault Event With Bad Data
+ [Tags] DCAE-VESC-R1
+ [Documentation] Run with JSON Envent with missing comma to expect 400 response
+ ${evtdata}= Get Event Data From File ${EVENT_DATA_FILE_BAD}
+ ${headers}= Create Header From String ${HEADER_STRING}
+ Log Send HTTP Request with invalid Json Event Data
+ ${resp}= Publish Event To VES Collector No Auth ${VESC_URL} ${VES_ANY_EVENT_PATH} ${headers} ${evtdata}
+ Log Receive HTTP Status code ${resp.status_code}
+ Should Be Equal As Strings ${resp.status_code} 400
+ ${isEmpty}= Is Json Empty ${resp}
+ Run Keyword If '${isEmpty}' == False Log ${resp.json()}
+
+Publish VES Event With Invalid Method
+ [Tags] DCAE-VESC-R1
+ [Documentation] Use invalid Put instead of Post method to expect 405 response
+ ${evtdata}= Get Event Data From File ${EVENT_DATA_FILE}
+ ${headers}= Create Header From String ${HEADER_STRING}
+ Log Send HTTP Request with invalid method Put instead of Post
+ ${resp}= Publish Event To VES Collector With Put Method No Auth ${VESC_URL} ${VES_ANY_EVENT_PATH} ${headers} ${evtdata}
+ Log Receive HTTP Status code ${resp.status_code}
+ Should Be Equal As Strings ${resp.status_code} 404
+
+
+Publish VES Event With Invalid URL Path
+ [Tags] DCAE-VESC-R1
+ [Documentation] Use invalid url path to expect 404 response
+ ${evtdata}= Get Event Data From File ${EVENT_DATA_FILE}
+ ${headers}= Create Header From String ${HEADER_STRING}
+ Log Send HTTP Request with invalid /listener/v5/ instead of /eventListener/v5 path
+ ${resp}= Publish Event To VES Collector No Auth ${VESC_URL} /listener/v5/ ${headers} ${evtdata}
+ Log Receive HTTP Status code ${resp.status_code}
+ Should Be Equal As Strings ${resp.status_code} 404
+
+
+
+
+
+
+
+
+
+
diff --git a/test/csit/tests/dcae/testcases/resources/DMaaP.py b/test/csit/tests/dcae/testcases/resources/DMaaP.py new file mode 100644 index 000000000..63e4e8c6b --- /dev/null +++ b/test/csit/tests/dcae/testcases/resources/DMaaP.py @@ -0,0 +1,423 @@ +'''
+Created on Aug 15, 2017
+
+@author: sw6830
+'''
+import os
+import posixpath
+import BaseHTTPServer
+import urllib
+import urlparse
+import cgi, sys, shutil, mimetypes
+from jsonschema import validate
+import jsonschema, json
+import DcaeVariables
+import SimpleHTTPServer
+from robot.api import logger
+
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+EvtSchema = None
+DMaaPHttpd = None
+
+
+def cleanUpEvent():
+ sz = DcaeVariables.VESEventQ.qsize()
+ for i in range(sz):
+ try:
+ self.evtQueue.get_nowait()
+ except:
+ pass
+
+def enqueEvent(evt):
+ if DcaeVariables.VESEventQ != None:
+ try:
+ DcaeVariables.VESEventQ.put(evt)
+ if DcaeVariables.IsRobotRun:
+ logger.console("DMaaP Event enqued - size=" + str(len(evt)))
+ else:
+ print ("DMaaP Event enqueued - size=" + str(len(evt)))
+ return True
+ except Exception as e:
+ print (str(e))
+ return False
+ return False
+
+def dequeEvent(waitSec=25):
+ if DcaeVariables.IsRobotRun:
+ logger.console("Enter DequeEvent")
+ try:
+ evt = DcaeVariables.VESEventQ.get(True, waitSec)
+ if DcaeVariables.IsRobotRun:
+ logger.console("DMaaP Event dequeued - size=" + str(len(evt)))
+ else:
+ print("DMaaP Event dequeued - size=" + str(len(evt)))
+ return evt
+ except Exception as e:
+ if DcaeVariables.IsRobotRun:
+ logger.console(str(e))
+ logger.console("DMaaP Event dequeue timeout")
+ else:
+ print("DMaaP Event dequeue timeout")
+ return None
+
+class DMaaPHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+
+ def do_PUT(self):
+ self.send_response(405)
+ return
+
+ def do_POST(self):
+
+ respCode = 0
+ # Parse the form data posted
+ '''
+ form = cgi.FieldStorage(
+ fp=self.rfile,
+ headers=self.headers,
+ environ={'REQUEST_METHOD':'POST',
+ 'CONTENT_TYPE':self.headers['Content-Type'],
+ })
+
+
+ form = cgi.FieldStorage(
+ fp=self.rfile,
+ headers=self.headers,
+ environ={"REQUEST_METHOD": "POST"})
+
+ for item in form.list:
+ print "%s=%s" % (item.name, item.value)
+
+ '''
+
+ if 'POST' not in self.requestline:
+ respCode = 405
+
+ '''
+ if respCode == 0:
+ if '/eventlistener/v5' not in self.requestline and '/eventlistener/v5/eventBatch' not in self.requestline and \
+ '/eventlistener/v5/clientThrottlingState' not in self.requestline:
+ respCode = 404
+
+
+ if respCode == 0:
+ if 'Y29uc29sZTpaakprWWpsbE1qbGpNVEkyTTJJeg==' not in str(self.headers):
+ respCode = 401
+ '''
+
+ if respCode == 0:
+ content_len = int(self.headers.getheader('content-length', 0))
+ post_body = self.rfile.read(content_len)
+
+ if DcaeVariables.IsRobotRun:
+ logger.console("\n" + "DMaaP Receive Event:\n" + post_body)
+ else:
+ print("\n" + "DMaaP Receive Event:")
+ print (post_body)
+
+ indx = post_body.index("{")
+ if indx != 0:
+ post_body = post_body[indx:]
+
+ if enqueEvent(post_body) == False:
+ print "enque event fails"
+
+ global EvtSchema
+ try:
+ if EvtSchema == None:
+ with open(DcaeVariables.CommonEventSchemaV5) as file:
+ EvtSchema = json.load(file)
+ decoded_body = json.loads(post_body)
+ jsonschema.validate(decoded_body, EvtSchema)
+ except:
+ respCode = 400
+
+ # Begin the response
+ if DcaeVariables.IsRobotRun == False:
+ print ("Response Message:")
+
+ '''
+ {
+ "200" : {
+ "description" : "Success",
+ "schema" : {
+ "$ref" : "#/definitions/DR_Pub"
+ }
+ }
+
+ rspStr = "{'responses' : {'200' : {'description' : 'Success'}}}"
+ rspStr1 = "{'count': 1, 'serverTimeMs': 3}"
+
+ '''
+
+ if respCode == 0:
+ if 'clientThrottlingState' in self.requestline:
+ self.send_response(204)
+ else:
+ self.send_response(200)
+ self.send_header('Content-Type', 'application/json')
+ self.end_headers()
+ #self.wfile.write("{'responses' : {'200' : {'description' : 'Success'}}}")
+ self.wfile.write("{'count': 1, 'serverTimeMs': 3}")
+ self.wfile.close()
+ else:
+ self.send_response(respCode)
+
+ '''
+ self.end_headers()
+ self.wfile.write('Client: %s\n' % str(self.client_address))
+ self.wfile.write('User-agent: %s\n' % str(self.headers['user-agent']))
+ self.wfile.write('Path: %s\n' % self.path)
+ self.wfile.write('Form data:\n')
+ self.wfile.close()
+
+ # Echo back information about what was posted in the form
+ for field in form.keys():
+ field_item = form[field]
+ if field_item.filename:
+ # The field contains an uploaded file
+ file_data = field_item.file.read()
+ file_len = len(file_data)
+ del file_data
+ self.wfile.write('\tUploaded %s as "%s" (%d bytes)\n' % \
+ (field, field_item.filename, file_len))
+ else:
+ # Regular form value
+ self.wfile.write('\t%s=%s\n' % (field, form[field].value))
+ '''
+ return
+
+
+ def do_GET(self):
+ """Serve a GET request."""
+ f = self.send_head()
+ if f:
+ try:
+ self.copyfile(f, self.wfile)
+ finally:
+ f.close()
+
+ def do_HEAD(self):
+ """Serve a HEAD request."""
+ f = self.send_head()
+ if f:
+ f.close()
+
+ def send_head(self):
+ """Common code for GET and HEAD commands.
+
+ This sends the response code and MIME headers.
+
+ Return value is either a file object (which has to be copied
+ to the outputfile by the caller unless the command was HEAD,
+ and must be closed by the caller under all circumstances), or
+ None, in which case the caller has nothing further to do.
+
+ """
+ path = self.translate_path(self.path)
+ f = None
+ if os.path.isdir(path):
+ parts = urlparse.urlsplit(self.path)
+ if not parts.path.endswith('/'):
+ # redirect browser - doing basically what apache does
+ self.send_response(301)
+ new_parts = (parts[0], parts[1], parts[2] + '/',
+ parts[3], parts[4])
+ new_url = urlparse.urlunsplit(new_parts)
+ self.send_header("Location", new_url)
+ self.end_headers()
+ return None
+ for index in "index.html", "index.htm":
+ index = os.path.join(path, index)
+ if os.path.exists(index):
+ path = index
+ break
+ else:
+ return self.list_directory(path)
+ ctype = self.guess_type(path)
+ try:
+ # Always read in binary mode. Opening files in text mode may cause
+ # newline translations, making the actual size of the content
+ # transmitted *less* than the content-length!
+ f = open(path, 'rb')
+ except IOError:
+ self.send_error(404, "File not found")
+ return None
+ try:
+ self.send_response(200)
+ self.send_header("Content-type", ctype)
+ fs = os.fstat(f.fileno())
+ self.send_header("Content-Length", str(fs[6]))
+ self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
+ self.end_headers()
+ return f
+ except:
+ f.close()
+ raise
+
+ def list_directory(self, path):
+ """Helper to produce a directory listing (absent index.html).
+
+ Return value is either a file object, or None (indicating an
+ error). In either case, the headers are sent, making the
+ interface the same as for send_head().
+
+ """
+ try:
+ list = os.listdir(path)
+ except os.error:
+ self.send_error(404, "No permission to list directory")
+ return None
+ list.sort(key=lambda a: a.lower())
+ f = StringIO()
+ displaypath = cgi.escape(urllib.unquote(self.path))
+ f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
+ f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath)
+ f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath)
+ f.write("<hr>\n<ul>\n")
+ for name in list:
+ fullname = os.path.join(path, name)
+ displayname = linkname = name
+ # Append / for directories or @ for symbolic links
+ if os.path.isdir(fullname):
+ displayname = name + "/"
+ linkname = name + "/"
+ if os.path.islink(fullname):
+ displayname = name + "@"
+ # Note: a link to a directory displays with @ and links with /
+ f.write('<li><a href="%s">%s</a>\n'
+ % (urllib.quote(linkname), cgi.escape(displayname)))
+ f.write("</ul>\n<hr>\n</body>\n</html>\n")
+ length = f.tell()
+ f.seek(0)
+ self.send_response(200)
+ encoding = sys.getfilesystemencoding()
+ self.send_header("Content-type", "text/html; charset=%s" % encoding)
+ self.send_header("Content-Length", str(length))
+ self.end_headers()
+ return f
+
+ def translate_path(self, path):
+ """Translate a /-separated PATH to the local filename syntax.
+
+ Components that mean special things to the local file system
+ (e.g. drive or directory names) are ignored. (XXX They should
+ probably be diagnosed.)
+
+ """
+ # abandon query parameters
+ path = path.split('?',1)[0]
+ path = path.split('#',1)[0]
+ # Don't forget explicit trailing slash when normalizing. Issue17324
+ trailing_slash = path.rstrip().endswith('/')
+ path = posixpath.normpath(urllib.unquote(path))
+ words = path.split('/')
+ words = filter(None, words)
+ path = os.getcwd()
+ for word in words:
+ if os.path.dirname(word) or word in (os.curdir, os.pardir):
+ # Ignore components that are not a simple file/directory name
+ continue
+ path = os.path.join(path, word)
+ if trailing_slash:
+ path += '/'
+ return path
+
+ def copyfile(self, source, outputfile):
+ """Copy all data between two file objects.
+
+ The SOURCE argument is a file object open for reading
+ (or anything with a read() method) and the DESTINATION
+ argument is a file object open for writing (or
+ anything with a write() method).
+
+ The only reason for overriding this would be to change
+ the block size or perhaps to replace newlines by CRLF
+ -- note however that this the default server uses this
+ to copy binary data as well.
+
+ """
+ shutil.copyfileobj(source, outputfile)
+
+ def guess_type(self, path):
+ """Guess the type of a file.
+
+ Argument is a PATH (a filename).
+
+ Return value is a string of the form type/subtype,
+ usable for a MIME Content-type header.
+
+ The default implementation looks the file's extension
+ up in the table self.extensions_map, using application/octet-stream
+ as a default; however it would be permissible (if
+ slow) to look inside the data to make a better guess.
+
+ """
+
+ base, ext = posixpath.splitext(path)
+ if ext in self.extensions_map:
+ return self.extensions_map[ext]
+ ext = ext.lower()
+ if ext in self.extensions_map:
+ return self.extensions_map[ext]
+ else:
+ return self.extensions_map['']
+
+ if not mimetypes.inited:
+ mimetypes.init() # try to read system mime.types
+ extensions_map = mimetypes.types_map.copy()
+ extensions_map.update({
+ '': 'application/octet-stream', # Default
+ '.py': 'text/plain',
+ '.c': 'text/plain',
+ '.h': 'text/plain',
+ })
+
+def test(HandlerClass = DMaaPHandler,
+ ServerClass = BaseHTTPServer.HTTPServer, protocol="HTTP/1.0", port=3904):
+ print "Load event schema file: " + DcaeVariables.CommonEventSchemaV5
+ with open(DcaeVariables.CommonEventSchemaV5) as file:
+ global EvtSchema
+ EvtSchema = json.load(file)
+
+ server_address = ('', port)
+
+ HandlerClass.protocol_version = protocol
+ httpd = ServerClass(server_address, HandlerClass)
+
+ global DMaaPHttpd
+ DMaaPHttpd = httpd
+ DcaeVariables.HTTPD = httpd
+
+ sa = httpd.socket.getsockname()
+ print "Serving HTTP on", sa[0], "port", sa[1], "..."
+ #httpd.serve_forever()
+
+def _main_ (HandlerClass = DMaaPHandler,
+ ServerClass = BaseHTTPServer.HTTPServer, protocol="HTTP/1.0"):
+
+ if sys.argv[1:]:
+ port = int(sys.argv[1])
+ else:
+ port = 3904
+
+ print "Load event schema file: " + DcaeVariables.CommonEventSchemaV5
+ with open(DcaeVariables.CommonEventSchemaV5) as file:
+ global EvtSchema
+ EvtSchema = json.load(file)
+
+ server_address = ('', port)
+
+ HandlerClass.protocol_version = protocol
+ httpd = ServerClass(server_address, HandlerClass)
+
+ sa = httpd.socket.getsockname()
+ print "Serving HTTP on", sa[0], "port", sa[1], "..."
+ httpd.serve_forever()
+
+if __name__ == '__main__':
+ _main_()
\ No newline at end of file diff --git a/test/csit/tests/dcae/testcases/resources/DcaeLibrary.py b/test/csit/tests/dcae/testcases/resources/DcaeLibrary.py new file mode 100644 index 000000000..650f8fef8 --- /dev/null +++ b/test/csit/tests/dcae/testcases/resources/DcaeLibrary.py @@ -0,0 +1,135 @@ +'''
+Created on Aug 18, 2017
+
+@author: sw6830
+'''
+from robot.api import logger
+from Queue import Queue
+import uuid, time, datetime,json, threading
+import DcaeVariables
+import DMaaP
+
+class DcaeLibrary(object):
+
+ def __init__(self):
+ pass
+
+ def setup_dmaap_server(self, portNum=3904):
+ if DcaeVariables.HttpServerThread != None:
+ DMaaP.cleanUpEvent()
+ logger.console("Clean up event from event queue before test")
+ logger.info("DMaaP Server already started")
+ return "true"
+
+ DcaeVariables.IsRobotRun = True
+ DMaaP.test(port=portNum)
+ try:
+ DcaeVariables.VESEventQ = Queue()
+ DcaeVariables.HttpServerThread = threading.Thread(name='DMAAP_HTTPServer', target=DMaaP.DMaaPHttpd.serve_forever)
+ DcaeVariables.HttpServerThread.start()
+ logger.console("DMaaP Mockup Sever started")
+ time.sleep(2)
+ return "true"
+ except Exception as e:
+ print (str(e))
+ return "false"
+
+ def shutdown_dmaap(self):
+ if DcaeVariables.HTTPD != None:
+ DcaeVariables.HTTPD.shutdown()
+ logger.console("DMaaP Server shut down")
+ time.sleep(3)
+ return "true"
+ else:
+ return "false"
+
+ def cleanup_ves_events(self):
+ if DcaeVariables.HttpServerThread != None:
+ DMaaP.cleanUpEvent()
+ logger.console("DMaaP event queue is cleaned up")
+ return "true"
+ logger.console("DMaaP server not started yet")
+ return "false"
+
+ def dmaap_message_receive(self, evtobj, action='contain'):
+
+ evtStr = DMaaP.dequeEvent()
+ while evtStr != None:
+ logger.console("DMaaP receive VES Event:\n" + evtStr)
+ if action == 'contain':
+ if evtobj in evtStr:
+ logger.info("DMaaP Receive Expected Publish Event:\n" + evtStr)
+ return 'true'
+ if action == 'sizematch':
+ if len(evtobj) == len(evtStr):
+ return 'true'
+ if action == 'dictmatch':
+ evtDict = json.loads(evtStr)
+ if cmp(evtobj, evtDict) == 0:
+ return 'true'
+ evtStr = DMaaP.dequeEvent()
+ return 'false'
+
+ def create_header_from_string(self, dictStr):
+ logger.info("Enter create_header_from_string: dictStr")
+ return dict(u.split("=") for u in dictStr.split(","))
+
+ def is_json_empty(self, resp):
+ logger.info("Enter is_json_empty: resp.text: " + resp.text)
+ if resp.text == None or len(resp.text) < 2:
+ return 'True'
+ return 'False'
+
+ def Generate_UUID(self):
+ """generate a uuid"""
+ return uuid.uuid4()
+
+ def get_json_value_list(self, jsonstr, keyval):
+ logger.info("Enter Get_Json_Key_Value_List")
+ if jsonstr == None or len(jsonstr) < 2:
+ logger.info("No Json data found")
+ return []
+ try:
+ data = json.loads(jsonstr)
+ nodelist = []
+ for item in data:
+ nodelist.append(item[keyval])
+ return nodelist
+ except Exception as e:
+ logger.info("Json data parsing fails")
+ print str(e)
+ return []
+
+ def generate_MilliTimestamp_UUID(self):
+ """generate a millisecond timestamp uuid"""
+ then = datetime.datetime.now()
+ return int(time.mktime(then.timetuple())*1e3 + then.microsecond/1e3)
+
+ def test (self):
+ import json
+ from pprint import pprint
+
+ with open('robot/assets/dcae/ves_volte_single_fault_event.json') as data_file:
+ data = json.load(data_file)
+
+ data['event']['commonEventHeader']['version'] = '5.0'
+ pprint(data)
+
+
+
+if __name__ == '__main__':
+ '''
+ dictStr = "action=getTable,Accept=application/json,Content-Type=application/json,X-FromAppId=1234908903284"
+ cls = DcaeLibrary()
+ #dict = cls.create_header_from_string(dictStr)
+ #print str(dict)
+ jsonStr = "[{'Node': 'onapfcnsl00', 'CheckID': 'serfHealth', 'Name': 'Serf Health Status', 'ServiceName': '', 'Notes': '', 'ModifyIndex': 6, 'Status': 'passing', 'ServiceID': '', 'ServiceTags': [], 'Output': 'Agent alive and reachable', 'CreateIndex': 6}]"
+ lsObj = cls.get_json_value_list(jsonStr, 'Status')
+ print lsObj
+ '''
+
+ lib = DcaeLibrary()
+ ret = lib.setup_dmaap_server()
+ print ret
+ time.sleep(10000000000)
+
\ No newline at end of file diff --git a/test/csit/tests/dcae/testcases/resources/DcaeVariables.py b/test/csit/tests/dcae/testcases/resources/DcaeVariables.py new file mode 100644 index 000000000..e2f1867e1 --- /dev/null +++ b/test/csit/tests/dcae/testcases/resources/DcaeVariables.py @@ -0,0 +1,16 @@ + +import os, time + +def GetEnvironmentVariable(envVarstr): + return os.environ.get(envVarstr) + +DCAE_HEALTH_CHECK_URL = "http://135.205.228.129:8500" +DCAE_HEALTH_CHECK_URL1 = "http://135.205.228.170:8500" + +CommonEventSchemaV5 = GetEnvironmentVariable('WORKSPACE') + "/test/csit/tests/dcae/testcases/assets/json_events/CommonEventFormat_28.3.json" + +HttpServerThread = None +HTTPD = None +VESEventQ = None +IsRobotRun = False + diff --git a/test/csit/tests/dcae/testcases/resources/VES-4.27.2-dataformat.json b/test/csit/tests/dcae/testcases/resources/VES-4.27.2-dataformat.json new file mode 100644 index 000000000..d4e3e4944 --- /dev/null +++ b/test/csit/tests/dcae/testcases/resources/VES-4.27.2-dataformat.json @@ -0,0 +1,1176 @@ +{
+ "self": {
+ "name": "VES_specification",
+ "version": "4.27.2",
+ "description": "VES spec from v4.1 and 27.2 spec"
+
+ },
+ "dataformatversion": "1.0.0",
+ "jsonschema":
+ {
+ "$schema": "http://json-schema.org/draft-04/schema#",
+
+ "definitions": {
+ "attCopyrightNotice": {
+ "description": "Copyright (c) <2016>, AT&T Intellectual Property. All other rights reserved",
+ "type": "object",
+ "properties": {
+ "useAndRedistribution": {
+ "description": "Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:",
+ "type": "string"
+ },
+ "condition1": {
+ "description": "Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.",
+ "type": "string"
+ },
+ "condition2": {
+ "description": "Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.",
+ "type": "string"
+ },
+ "condition3": {
+ "description": "All advertising materials mentioning features or use of this software must display the following acknowledgement: This product includes software developed by the AT&T.",
+ "type": "string"
+ },
+ "condition4": {
+ "description": "Neither the name of AT&T nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.",
+ "type": "string"
+ },
+ "disclaimerLine1": {
+ "description": "THIS SOFTWARE IS PROVIDED BY AT&T INTELLECTUAL PROPERTY AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS",
+ "type": "string"
+ },
+ "disclaimerLine2": {
+ "description": "FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AT&T INTELLECTUAL PROPERTY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES",
+ "type": "string"
+ },
+ "disclaimerLine3": {
+ "description": "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,",
+ "type": "string"
+ },
+ "disclaimerLine4": {
+ "description": "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.",
+ "type": "string"
+ }
+ }
+ },
+ "codecsInUse": {
+ "description": "number of times an identified codec was used over the measurementInterval",
+ "type": "object",
+ "properties": {
+ "codecIdentifier": { "type": "string" },
+ "numberInUse": { "type": "number" }
+ },
+ "required": [ "codecIdentifier", "numberInUse" ]
+ },
+ "command": {
+ "description": "command from an event collector toward an event source",
+ "type": "object",
+ "properties": {
+ "commandType": {
+ "type": "string",
+ "enum": [
+ "heartbeatIntervalChange",
+ "measurementIntervalChange",
+ "provideThrottlingState",
+ "throttlingSpecification"
+ ]
+ },
+ "eventDomainThrottleSpecification": { "$ref": "#/definitions/eventDomainThrottleSpecification" },
+ "measurementInterval": { "type": "number" }
+ },
+ "required": [ "commandType" ]
+ },
+ "commandList": {
+ "description": "array of commands from an event collector toward an event source",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/commandListEntry"
+ },
+ "minItems": 0
+ },
+ "commandListEntry": {
+ "description": "reference to a command object",
+ "type": "object",
+ "properties": {
+ "command": {"$ref": "#/definitions/command"}
+ },
+ "required": [ "command" ]
+ },
+ "commonEventHeader": {
+ "description": "fields common to all events",
+ "type": "object",
+ "properties": {
+ "domain": {
+ "description": "the eventing domain associated with the event",
+ "type": "string",
+ "enum": [
+ "fault",
+ "heartbeat",
+ "measurementsForVfScaling",
+ "mobileFlow",
+ "other",
+ "stateChange",
+ "syslog",
+ "thresholdCrossingAlert"
+ ]
+ },
+ "eventId": {
+ "description": "event key that is unique to the event source",
+ "type": "string"
+ },
+ "eventType": {
+ "description": "unique event topic name",
+ "type": "string"
+ },
+ "functionalRole": {
+ "description": "function of the event source e.g., eNodeB, MME, PCRF",
+ "type": "string"
+ },
+ "internalHeaderFields": { "$ref": "#/definitions/internalHeaderFields" },
+ "lastEpochMicrosec": {
+ "description": "the latest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds",
+ "type": "number"
+ },
+ "priority": {
+ "description": "processing priority",
+ "type": "string",
+ "enum": [
+ "High",
+ "Medium",
+ "Normal",
+ "Low"
+ ]
+ },
+ "reportingEntityId": {
+ "description": "UUID identifying the entity reporting the event, for example an OAM VM; must be populated by the ATT enrichment process",
+ "type": "string"
+ },
+ "reportingEntityName": {
+ "description": "name of the entity reporting the event, for example, an OAM VM",
+ "type": "string"
+ },
+ "sequence": {
+ "description": "ordering of events communicated by an event source instance or 0 if not needed",
+ "type": "integer"
+ },
+ "sourceId": {
+ "description": "UUID identifying the entity experiencing the event issue; must be populated by the ATT enrichment process",
+ "type": "string"
+ },
+ "sourceName": {
+ "description": "name of the entity experiencing the event issue",
+ "type": "string"
+ },
+ "startEpochMicrosec": {
+ "description": "the earliest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds",
+ "type": "number"
+ },
+ "version": {
+ "description": "version of the event header",
+ "type": "number"
+ }
+ },
+ "required": [ "domain", "eventId", "functionalRole", "lastEpochMicrosec",
+ "priority", "reportingEntityName", "sequence",
+ "sourceName", "startEpochMicrosec" ]
+ },
+ "counter": {
+ "description": "performance counter",
+ "type": "object",
+ "properties": {
+ "criticality": { "type": "string", "enum": [ "CRIT", "MAJ" ] },
+ "name": { "type": "string" },
+ "thresholdCrossed": { "type": "string" },
+ "value": { "type": "string"}
+ },
+ "required": [ "criticality", "name", "thresholdCrossed", "value" ]
+ },
+ "cpuUsage": {
+ "description": "percent usage of an identified CPU",
+ "type": "object",
+ "properties": {
+ "cpuIdentifier": { "type": "string" },
+ "percentUsage": { "type": "number" }
+ },
+ "required": [ "cpuIdentifier", "percentUsage" ]
+ },
+ "errors": {
+ "description": "receive and transmit errors for the measurements domain",
+ "type": "object",
+ "properties": {
+ "receiveDiscards": { "type": "number" },
+ "receiveErrors": { "type": "number" },
+ "transmitDiscards": { "type": "number" },
+ "transmitErrors": { "type": "number" }
+ },
+ "required": [ "receiveDiscards", "receiveErrors", "transmitDiscards", "transmitErrors" ]
+ },
+ "event": {
+ "description": "the root level of the common event format",
+ "type": "object",
+ "properties": {
+ "commonEventHeader": { "$ref": "#/definitions/commonEventHeader" },
+ "faultFields": { "$ref": "#/definitions/faultFields" },
+ "measurementsForVfScalingFields": { "$ref": "#/definitions/measurementsForVfScalingFields" },
+ "mobileFlowFields": { "$ref": "#/definitions/mobileFlowFields" },
+ "otherFields": { "$ref": "#/definitions/otherFields" },
+ "stateChangeFields": { "$ref": "#/definitions/stateChangeFields" },
+ "syslogFields": { "$ref": "#/definitions/syslogFields" },
+ "thresholdCrossingAlertFields": { "$ref": "#/definitions/thresholdCrossingAlertFields" }
+ },
+ "required": [ "commonEventHeader" ]
+ },
+ "eventDomainThrottleSpecification": {
+ "description": "specification of what information to suppress within an event domain",
+ "type": "object",
+ "properties": {
+ "eventDomain": {
+ "description": "Event domain enum from the commonEventHeader domain field",
+ "type": "string"
+ },
+ "suppressedFieldNames": {
+ "description": "List of optional field names in the event block that should not be sent to the Event Listener",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "suppressedNvPairsList": {
+ "description": "Optional list of specific NvPairsNames to suppress within a given Name-Value Field",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/suppressedNvPairs"
+ }
+ }
+ },
+ "required": [ "eventDomain" ]
+ },
+ "eventDomainThrottleSpecificationList": {
+ "description": "array of eventDomainThrottleSpecifications",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/eventDomainThrottleSpecification"
+ },
+ "minItems": 0
+ },
+ "eventList": {
+ "description": "array of events",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/event"
+ }
+ },
+ "eventThrottlingState": {
+ "description": "reports the throttling in force at the event source",
+ "type": "object",
+ "properties": {
+ "eventThrottlingMode": {
+ "description": "Mode the event manager is in",
+ "type": "string",
+ "enum": [
+ "normal",
+ "throttled"
+ ]
+ },
+ "eventDomainThrottleSpecificationList": { "$ref": "#/definitions/eventDomainThrottleSpecificationList" }
+ },
+ "required": [ "eventThrottlingMode" ]
+ },
+ "faultFields": {
+ "description": "fields specific to fault events",
+ "type": "object",
+ "properties": {
+ "alarmAdditionalInformation": {
+ "description": "additional alarm information",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/field"
+ }
+ },
+ "alarmCondition": {
+ "description": "alarm condition reported by the device",
+ "type": "string"
+ },
+ "alarmInterfaceA": {
+ "description": "card, port, channel or interface name of the device generating the alarm",
+ "type": "string"
+ },
+ "eventSeverity": {
+ "description": "event severity or priority",
+ "type": "string",
+ "enum": [
+ "CRITICAL",
+ "MAJOR",
+ "MINOR",
+ "WARNING",
+ "NORMAL"
+ ]
+ },
+ "eventSourceType": {
+ "description": "type of event source; examples: other, router, switch, host, card, port, slotThreshold, portThreshold, virtualMachine, virtualNetworkFunction",
+ "type": "string"
+ },
+ "faultFieldsVersion": {
+ "description": "version of the faultFields block",
+ "type": "number"
+ },
+ "specificProblem": {
+ "description": "short description of the alarm or problem",
+ "type": "string"
+ },
+ "vfStatus": {
+ "description": "virtual function status enumeration",
+ "type": "string",
+ "enum": [
+ "Active",
+ "Idle",
+ "Preparing to terminate",
+ "Ready to terminate",
+ "Requesting termination"
+ ]
+ }
+ },
+ "required": [ "alarmCondition", "eventSeverity",
+ "eventSourceType", "specificProblem", "vfStatus" ]
+ },
+ "featuresInUse": {
+ "description": "number of times an identified feature was used over the measurementInterval",
+ "type": "object",
+ "properties": {
+ "featureIdentifier": { "type": "string" },
+ "featureUtilization": { "type": "number" }
+ },
+ "required": [ "featureIdentifier", "featureUtilization" ]
+ },
+ "field": {
+ "description": "name value pair",
+ "type": "object",
+ "properties": {
+ "name": { "type": "string" },
+ "value": { "type": "string" }
+ },
+ "required": [ "name", "value" ]
+ },
+ "filesystemUsage": {
+ "description": "disk usage of an identified virtual machine in gigabytes and/or gigabytes per second",
+ "type": "object",
+ "properties": {
+ "blockConfigured": { "type": "number" },
+ "blockIops": { "type": "number" },
+ "blockUsed": { "type": "number" },
+ "ephemeralConfigured": { "type": "number" },
+ "ephemeralIops": { "type": "number" },
+ "ephemeralUsed": { "type": "number" },
+ "filesystemName": { "type": "string" }
+ },
+ "required": [ "blockConfigured", "blockIops", "blockUsed", "ephemeralConfigured",
+ "ephemeralIops", "ephemeralUsed", "filesystemName" ]
+ },
+ "gtpPerFlowMetrics": {
+ "description": "Mobility GTP Protocol per flow metrics",
+ "type": "object",
+ "properties": {
+ "avgBitErrorRate": {
+ "description": "average bit error rate",
+ "type": "number"
+ },
+ "avgPacketDelayVariation": {
+ "description": "Average packet delay variation or jitter in milliseconds for received packets: Average difference between the packet timestamp and time received for all pairs of consecutive packets",
+ "type": "number"
+ },
+ "avgPacketLatency": {
+ "description": "average delivery latency",
+ "type": "number"
+ },
+ "avgReceiveThroughput": {
+ "description": "average receive throughput",
+ "type": "number"
+ },
+ "avgTransmitThroughput": {
+ "description": "average transmit throughput",
+ "type": "number"
+ },
+ "durConnectionFailedStatus": {
+ "description": "duration of failed state in milliseconds, computed as the cumulative time between a failed echo request and the next following successful error request, over this reporting interval",
+ "type": "number"
+ },
+ "durTunnelFailedStatus": {
+ "description": "Duration of errored state, computed as the cumulative time between a tunnel error indicator and the next following non-errored indicator, over this reporting interval",
+ "type": "number"
+ },
+ "flowActivatedBy": {
+ "description": "Endpoint activating the flow",
+ "type": "string"
+ },
+ "flowActivationEpoch": {
+ "description": "Time the connection is activated in the flow (connection) being reported on, or transmission time of the first packet if activation time is not available",
+ "type": "number"
+ },
+ "flowActivationMicrosec": {
+ "description": "Integer microseconds for the start of the flow connection",
+ "type": "number"
+ },
+ "flowActivationTime": {
+ "description": "time the connection is activated in the flow being reported on, or transmission time of the first packet if activation time is not available; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
+ "type": "string"
+ },
+ "flowDeactivatedBy": {
+ "description": "Endpoint deactivating the flow",
+ "type": "string"
+ },
+ "flowDeactivationEpoch": {
+ "description": "Time for the start of the flow connection, in integer UTC epoch time aka UNIX time",
+ "type": "number"
+ },
+ "flowDeactivationMicrosec": {
+ "description": "Integer microseconds for the start of the flow connection",
+ "type": "number"
+ },
+ "flowDeactivationTime": {
+ "description": "Transmission time of the first packet in the flow connection being reported on; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
+ "type": "string"
+ },
+ "flowStatus": {
+ "description": "connection status at reporting time as a working / inactive / failed indicator value",
+ "type": "string"
+ },
+ "gtpConnectionStatus": {
+ "description": "Current connection state at reporting time",
+ "type": "string"
+ },
+ "gtpTunnelStatus": {
+ "description": "Current tunnel state at reporting time",
+ "type": "string"
+ },
+ "ipTosCountList": {
+ "description": "array of key: value pairs where the keys are drawn from the IP Type-of-Service identifiers which range from '0' to '255', and the values are the count of packets that had those ToS identifiers in the flow",
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": [
+ { "type": "string" },
+ { "type": "number" }
+ ]
+ }
+ },
+ "ipTosList": {
+ "description": "Array of unique IP Type-of-Service values observed in the flow where values range from '0' to '255'",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "largePacketRtt": {
+ "description": "large packet round trip time",
+ "type": "number"
+ },
+ "largePacketThreshold": {
+ "description": "large packet threshold being applied",
+ "type": "number"
+ },
+ "maxPacketDelayVariation": {
+ "description": "Maximum packet delay variation or jitter in milliseconds for received packets: Maximum of the difference between the packet timestamp and time received for all pairs of consecutive packets",
+ "type": "number"
+ },
+ "maxReceiveBitRate": {
+ "description": "maximum receive bit rate",
+ "type": "number"
+ },
+ "maxTransmitBitRate": {
+ "description": "maximum transmit bit rate",
+ "type": "number"
+ },
+ "mobileQciCosCountList": {
+ "description": "array of key: value pairs where the keys are drawn from LTE QCI or UMTS class of service strings, and the values are the count of packets that had those strings in the flow",
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": [
+ { "type": "string" },
+ { "type": "number" }
+ ]
+ }
+ },
+ "mobileQciCosList": {
+ "description": "Array of unique LTE QCI or UMTS class-of-service values observed in the flow",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "numActivationFailures": {
+ "description": "Number of failed activation requests, as observed by the reporting node",
+ "type": "number"
+ },
+ "numBitErrors": {
+ "description": "number of errored bits",
+ "type": "number"
+ },
+ "numBytesReceived": {
+ "description": "number of bytes received, including retransmissions",
+ "type": "number"
+ },
+ "numBytesTransmitted": {
+ "description": "number of bytes transmitted, including retransmissions",
+ "type": "number"
+ },
+ "numDroppedPackets": {
+ "description": "number of received packets dropped due to errors per virtual interface",
+ "type": "number"
+ },
+ "numGtpEchoFailures": {
+ "description": "Number of Echo request path failures where failed paths are defined in 3GPP TS 29.281 sec 7.2.1 and 3GPP TS 29.060 sec. 11.2",
+ "type": "number"
+ },
+ "numGtpTunnelErrors": {
+ "description": "Number of tunnel error indications where errors are defined in 3GPP TS 29.281 sec 7.3.1 and 3GPP TS 29.060 sec. 11.1",
+ "type": "number"
+ },
+ "numHttpErrors": {
+ "description": "Http error count",
+ "type": "number"
+ },
+ "numL7BytesReceived": {
+ "description": "number of tunneled layer 7 bytes received, including retransmissions",
+ "type": "number"
+ },
+ "numL7BytesTransmitted": {
+ "description": "number of tunneled layer 7 bytes transmitted, excluding retransmissions",
+ "type": "number"
+ },
+ "numLostPackets": {
+ "description": "number of lost packets",
+ "type": "number"
+ },
+ "numOutOfOrderPackets": {
+ "description": "number of out-of-order packets",
+ "type": "number"
+ },
+ "numPacketErrors": {
+ "description": "number of errored packets",
+ "type": "number"
+ },
+ "numPacketsReceivedExclRetrans": {
+ "description": "number of packets received, excluding retransmission",
+ "type": "number"
+ },
+ "numPacketsReceivedInclRetrans": {
+ "description": "number of packets received, including retransmission",
+ "type": "number"
+ },
+ "numPacketsTransmittedInclRetrans": {
+ "description": "number of packets transmitted, including retransmissions",
+ "type": "number"
+ },
+ "numRetries": {
+ "description": "number of packet retries",
+ "type": "number"
+ },
+ "numTimeouts": {
+ "description": "number of packet timeouts",
+ "type": "number"
+ },
+ "numTunneledL7BytesReceived": {
+ "description": "number of tunneled layer 7 bytes received, excluding retransmissions",
+ "type": "number"
+ },
+ "roundTripTime": {
+ "description": "round trip time",
+ "type": "number"
+ },
+ "tcpFlagCountList": {
+ "description": "array of key: value pairs where the keys are drawn from TCP Flags and the values are the count of packets that had that TCP Flag in the flow",
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": [
+ { "type": "string" },
+ { "type": "number" }
+ ]
+ }
+ },
+ "tcpFlagList": {
+ "description": "Array of unique TCP Flags observed in the flow",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "timeToFirstByte": {
+ "description": "Time in milliseconds between the connection activation and first byte received",
+ "type": "number"
+ }
+ },
+ "required": [ "avgBitErrorRate", "avgPacketDelayVariation", "avgPacketLatency",
+ "avgReceiveThroughput", "avgTransmitThroughput",
+ "flowActivationEpoch", "flowActivationMicrosec",
+ "flowDeactivationEpoch", "flowDeactivationMicrosec",
+ "flowDeactivationTime", "flowStatus",
+ "maxPacketDelayVariation", "numActivationFailures",
+ "numBitErrors", "numBytesReceived", "numBytesTransmitted",
+ "numDroppedPackets", "numL7BytesReceived",
+ "numL7BytesTransmitted", "numLostPackets",
+ "numOutOfOrderPackets", "numPacketErrors",
+ "numPacketsReceivedExclRetrans",
+ "numPacketsReceivedInclRetrans",
+ "numPacketsTransmittedInclRetrans",
+ "numRetries", "numTimeouts", "numTunneledL7BytesReceived",
+ "roundTripTime", "timeToFirstByte"
+ ]
+ },
+ "internalHeaderFields": {
+ "description": "enrichment fields for internal VES Event Listener service use only, not supplied by event sources",
+ "type": "object"
+ },
+ "latencyBucketMeasure": {
+ "description": "number of counts falling within a defined latency bucket",
+ "type": "object",
+ "properties": {
+ "countsInTheBucket": { "type": "number" },
+ "highEndOfLatencyBucket": { "type": "number" },
+ "lowEndOfLatencyBucket": { "type": "number" }
+ },
+ "required": [ "countsInTheBucket" ]
+ },
+ "measurementGroup": {
+ "description": "measurement group",
+ "type": "object",
+ "properties": {
+ "name": { "type": "string" },
+ "measurements": {
+ "description": "array of name value pair measurements",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/field"
+ }
+ }
+ },
+ "required": [ "name", "measurements" ]
+ },
+ "measurementsForVfScalingFields": {
+ "description": "measurementsForVfScaling fields",
+ "type": "object",
+ "properties": {
+ "additionalMeasurements": {
+ "description": "additional measurement fields",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/measurementGroup"
+ }
+ },
+ "aggregateCpuUsage": {
+ "description": "aggregate CPU usage of the VM on which the VNFC reporting the event is running",
+ "type": "number"
+ },
+ "codecUsageArray": {
+ "description": "array of codecs in use",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/codecsInUse"
+ }
+ },
+ "concurrentSessions": {
+ "description": "peak concurrent sessions for the VM or VNF over the measurementInterval",
+ "type": "number"
+ },
+ "configuredEntities": {
+ "description": "over the measurementInterval, peak total number of: users, subscribers, devices, adjacencies, etc., for the VM, or subscribers, devices, etc., for the VNF",
+ "type": "number"
+ },
+ "cpuUsageArray": {
+ "description": "usage of an array of CPUs",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/cpuUsage"
+ }
+ },
+ "errors": { "$ref": "#/definitions/errors" },
+ "featureUsageArray": {
+ "description": "array of features in use",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/featuresInUse"
+ }
+ },
+ "filesystemUsageArray": {
+ "description": "filesystem usage of the VM on which the VNFC reporting the event is running",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/filesystemUsage"
+ }
+ },
+ "latencyDistribution": {
+ "description": "array of integers representing counts of requests whose latency in milliseconds falls within per-VNF configured ranges",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/latencyBucketMeasure"
+ }
+ },
+ "meanRequestLatency": {
+ "description": "mean seconds required to respond to each request for the VM on which the VNFC reporting the event is running",
+ "type": "number"
+ },
+ "measurementInterval": {
+ "description": "interval over which measurements are being reported in seconds",
+ "type": "number"
+ },
+ "measurementsForVfScalingVersion": {
+ "description": "version of the measurementsForVfScaling block",
+ "type": "number"
+ },
+ "memoryConfigured": {
+ "description": "memory in MB configured in the VM on which the VNFC reporting the event is running",
+ "type": "number"
+ },
+ "memoryUsed": {
+ "description": "memory usage in MB of the VM on which the VNFC reporting the event is running",
+ "type": "number"
+ },
+ "numberOfMediaPortsInUse": {
+ "description": "number of media ports in use",
+ "type": "number"
+ },
+ "requestRate": {
+ "description": "peak rate of service requests per second to the VNF over the measurementInterval",
+ "type": "number"
+ },
+ "vnfcScalingMetric": {
+ "description": "represents busy-ness of the VNF from 0 to 100 as reported by the VNFC",
+ "type": "number"
+ },
+ "vNicUsageArray": {
+ "description": "usage of an array of virtual network interface cards",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/vNicUsage"
+ }
+ }
+ },
+ "required": [ "measurementInterval" ]
+ },
+ "mobileFlowFields": {
+ "description": "mobileFlow fields",
+ "type": "object",
+ "properties": {
+ "additionalFields": {
+ "description": "additional mobileFlow fields if needed",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/field"
+ }
+ },
+ "applicationType": {
+ "description": "Application type inferred",
+ "type": "string"
+ },
+ "appProtocolType": {
+ "description": "application protocol",
+ "type": "string"
+ },
+ "appProtocolVersion": {
+ "description": "application protocol version",
+ "type": "string"
+ },
+ "cid": {
+ "description": "cell id",
+ "type": "string"
+ },
+ "connectionType": {
+ "description": "Abbreviation referencing a 3GPP reference point e.g., S1-U, S11, etc",
+ "type": "string"
+ },
+ "ecgi": {
+ "description": "Evolved Cell Global Id",
+ "type": "string"
+ },
+ "flowDirection": {
+ "description": "Flow direction, indicating if the reporting node is the source of the flow or destination for the flow",
+ "type": "string"
+ },
+ "gtpPerFlowMetrics": { "$ref": "#/definitions/gtpPerFlowMetrics" },
+ "gtpProtocolType": {
+ "description": "GTP protocol",
+ "type": "string"
+ },
+ "gtpVersion": {
+ "description": "GTP protocol version",
+ "type": "string"
+ },
+ "httpHeader": {
+ "description": "HTTP request header, if the flow connects to a node referenced by HTTP",
+ "type": "string"
+ },
+ "imei": {
+ "description": "IMEI for the subscriber UE used in this flow, if the flow connects to a mobile device",
+ "type": "string"
+ },
+ "imsi": {
+ "description": "IMSI for the subscriber UE used in this flow, if the flow connects to a mobile device",
+ "type": "string"
+ },
+ "ipProtocolType": {
+ "description": "IP protocol type e.g., TCP, UDP, RTP...",
+ "type": "string"
+ },
+ "ipVersion": {
+ "description": "IP protocol version e.g., IPv4, IPv6",
+ "type": "string"
+ },
+ "lac": {
+ "description": "location area code",
+ "type": "string"
+ },
+ "mcc": {
+ "description": "mobile country code",
+ "type": "string"
+ },
+ "mnc": {
+ "description": "mobile network code",
+ "type": "string"
+ },
+ "mobileFlowFieldsVersion": {
+ "description": "version of the mobileFlowFields block",
+ "type": "number"
+ },
+ "msisdn": {
+ "description": "MSISDN for the subscriber UE used in this flow, as an integer, if the flow connects to a mobile device",
+ "type": "string"
+ },
+ "otherEndpointIpAddress": {
+ "description": "IP address for the other endpoint, as used for the flow being reported on",
+ "type": "string"
+ },
+ "otherEndpointPort": {
+ "description": "IP Port for the reporting entity, as used for the flow being reported on",
+ "type": "number"
+ },
+ "otherFunctionalRole": {
+ "description": "Functional role of the other endpoint for the flow being reported on e.g., MME, S-GW, P-GW, PCRF...",
+ "type": "string"
+ },
+ "rac": {
+ "description": "routing area code",
+ "type": "string"
+ },
+ "radioAccessTechnology": {
+ "description": "Radio Access Technology e.g., 2G, 3G, LTE",
+ "type": "string"
+ },
+ "reportingEndpointIpAddr": {
+ "description": "IP address for the reporting entity, as used for the flow being reported on",
+ "type": "string"
+ },
+ "reportingEndpointPort": {
+ "description": "IP port for the reporting entity, as used for the flow being reported on",
+ "type": "number"
+ },
+ "sac": {
+ "description": "service area code",
+ "type": "string"
+ },
+ "samplingAlgorithm": {
+ "description": "Integer identifier for the sampling algorithm or rule being applied in calculating the flow metrics if metrics are calculated based on a sample of packets, or 0 if no sampling is applied",
+ "type": "number"
+ },
+ "tac": {
+ "description": "transport area code",
+ "type": "string"
+ },
+ "tunnelId": {
+ "description": "tunnel identifier",
+ "type": "string"
+ },
+ "vlanId": {
+ "description": "VLAN identifier used by this flow",
+ "type": "string"
+ }
+ },
+ "required": [ "flowDirection", "gtpPerFlowMetrics", "ipProtocolType",
+ "ipVersion", "otherEndpointIpAddress", "otherEndpointPort",
+ "reportingEndpointIpAddr", "reportingEndpointPort" ]
+ },
+ "otherFields": {
+ "description": "additional fields not reported elsewhere",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/field"
+ }
+ },
+ "requestError": {
+ "description": "standard request error data structure",
+ "type": "object",
+ "properties": {
+ "messageId": {
+ "description": "Unique message identifier of the format ABCnnnn where ABC is either SVC for Service Exceptions or POL for Policy Exception",
+ "type": "string"
+ },
+ "text": {
+ "description": "Message text, with replacement variables marked with %n, where n is an index into the list of <variables> elements, starting at 1",
+ "type": "string"
+ },
+ "url": {
+ "description": "Hyperlink to a detailed error resource e.g., an HTML page for browser user agents",
+ "type": "string"
+ },
+ "variables": {
+ "description": "List of zero or more strings that represent the contents of the variables used by the message text",
+ "type": "string"
+ }
+ },
+ "required": [ "messageId", "text" ]
+ },
+ "stateChangeFields": {
+ "description": "stateChange fields",
+ "type": "object",
+ "properties": {
+ "additionalFields": {
+ "description": "additional stateChange fields if needed",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/field"
+ }
+ },
+ "newState": {
+ "description": "new state of the entity",
+ "type": "string",
+ "enum": [
+ "inService",
+ "maintenance",
+ "outOfService"
+ ]
+ },
+ "oldState": {
+ "description": "previous state of the entity",
+ "type": "string",
+ "enum": [
+ "inService",
+ "maintenance",
+ "outOfService"
+ ]
+ },
+ "stateChangeFieldsVersion": {
+ "description": "version of the stateChangeFields block",
+ "type": "number"
+ },
+ "stateInterface": {
+ "description": "card or port name of the entity that changed state",
+ "type": "string"
+ }
+ },
+ "required": [ "newState", "oldState", "stateInterface" ]
+ },
+ "suppressedNvPairs": {
+ "description": "List of specific NvPairsNames to suppress within a given Name-Value Field for event Throttling",
+ "type": "object",
+ "properties": {
+ "nvPairFieldName": {
+ "description": "Name of the field within which are the nvpair names to suppress",
+ "type": "string"
+ },
+ "suppressedNvPairNames": {
+ "description": "Array of nvpair names to suppress within the nvpairFieldName",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ },
+ "required": [ "nvPairFieldName", "suppressedNvPairNames" ]
+ },
+ "syslogFields": {
+ "description": "sysLog fields",
+ "type": "object",
+ "properties": {
+ "additionalFields": {
+ "description": "additional syslog fields if needed",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/field"
+ }
+ },
+ "eventSourceHost": {
+ "description": "hostname of the device",
+ "type": "string"
+ },
+ "eventSourceType": {
+ "description": "type of event source; examples: other, router, switch, host, card, port, slotThreshold, portThreshold, virtualMachine, virtualNetworkFunction",
+ "type": "string"
+ },
+ "syslogFacility": {
+ "description": "numeric code from 0 to 23 for facility--see table in documentation",
+ "type": "number"
+ },
+ "syslogFieldsVersion": {
+ "description": "version of the syslogFields block",
+ "type": "number"
+ },
+ "syslogMsg": {
+ "description": "syslog message",
+ "type": "string"
+ },
+ "syslogPri": {
+ "description": "0-192 combined severity and facility",
+ "type": "number"
+ },
+ "syslogProc": {
+ "description": "identifies the application that originated the message",
+ "type": "string"
+ },
+ "syslogProcId": {
+ "description": "a change in the value of this field indicates a discontinuity in syslog reporting",
+ "type": "number"
+ },
+ "syslogSData": {
+ "description": "syslog structured data consisting of a structured data Id followed by a set of key value pairs",
+ "type": "string"
+ },
+ "syslogSdId": {
+ "description": "0-32 char in format name@number for example ourSDID@32473",
+ "type": "string"
+ },
+ "syslogSev": {
+ "description": "numerical Code for severity derived from syslogPri as remaider of syslogPri / 8",
+ "type": "string"
+ },
+ "syslogTag": {
+ "description": "msgId indicating the type of message such as TCPOUT or TCPIN; NILVALUE should be used when no other value can be provided",
+ "type": "string"
+ },
+ "syslogVer": {
+ "description": "IANA assigned version of the syslog protocol specification - typically 1",
+ "type": "number"
+ }
+ },
+ "required": [ "eventSourceType", "syslogMsg", "syslogTag" ]
+ },
+ "thresholdCrossingAlertFields": {
+ "description": "fields specific to threshold crossing alert events",
+ "type": "object",
+ "properties": {
+ "additionalFields": {
+ "description": "additional threshold crossing alert fields if needed",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/field"
+ }
+ },
+ "additionalParameters": {
+ "description": "performance counters",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/counter"
+ }
+ },
+ "alertAction": {
+ "description": "Event action",
+ "type": "string",
+ "enum": [
+ "CLEAR",
+ "CONT",
+ "SET"
+ ]
+ },
+ "alertDescription": {
+ "description": "Unique short alert description such as IF-SHUB-ERRDROP",
+ "type": "string"
+ },
+ "alertType": {
+ "description": "Event type",
+ "type": "string",
+ "enum": [
+ "CARD-ANOMALY",
+ "ELEMENT-ANOMALY",
+ "INTERFACE-ANOMALY",
+ "SERVICE-ANOMALY"
+ ]
+ },
+ "alertValue": {
+ "description": "Calculated API value (if applicable)",
+ "type": "string"
+ },
+ "associatedAlertIdList": {
+ "description": "List of eventIds associated with the event being reported",
+ "type": "array",
+ "items": { "type": "string" }
+ },
+ "collectionTimestamp": {
+ "description": "Time when the performance collector picked up the data; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
+ "type": "string"
+ },
+ "dataCollector": {
+ "description": "Specific performance collector instance used",
+ "type": "string"
+ },
+ "elementType": {
+ "description": "type of network element - internal ATT field",
+ "type": "string"
+ },
+ "eventSeverity": {
+ "description": "event severity or priority",
+ "type": "string",
+ "enum": [
+ "CRITICAL",
+ "MAJOR",
+ "MINOR",
+ "WARNING",
+ "NORMAL"
+ ]
+ },
+ "eventStartTimestamp": {
+ "description": "Time closest to when the measurement was made; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800",
+ "type": "string"
+ },
+ "interfaceName": {
+ "description": "Physical or logical port or card (if applicable)",
+ "type": "string"
+ },
+ "networkService": {
+ "description": "network name - internal ATT field",
+ "type": "string"
+ },
+ "possibleRootCause": {
+ "description": "Reserved for future use",
+ "type": "string"
+ },
+ "thresholdCrossingFieldsVersion": {
+ "description": "version of the thresholdCrossingAlertFields block",
+ "type": "number"
+ }
+ },
+ "required": [
+ "additionalParameters",
+ "alertAction",
+ "alertDescription",
+ "alertType",
+ "collectionTimestamp",
+ "eventSeverity",
+ "eventStartTimestamp"
+ ]
+ },
+ "vNicUsage": {
+ "description": "usage of identified virtual network interface card",
+ "type": "object",
+ "properties": {
+ "broadcastPacketsIn": { "type": "number" },
+ "broadcastPacketsOut": { "type": "number" },
+ "bytesIn": { "type": "number" },
+ "bytesOut": { "type": "number" },
+ "multicastPacketsIn": { "type": "number" },
+ "multicastPacketsOut": { "type": "number" },
+ "packetsIn": { "type": "number" },
+ "packetsOut": { "type": "number" },
+ "unicastPacketsIn": { "type": "number" },
+ "unicastPacketsOut": { "type": "number" },
+ "vNicIdentifier": { "type": "string" }
+ },
+ "required": [ "bytesIn", "bytesOut", "packetsIn", "packetsOut", "vNicIdentifier"]
+ }
+ },
+ "title": "Event Listener",
+ "type": "object",
+ "properties": {
+ "event": {"$ref": "#/definitions/event"}
+ }
+ }
+
+}
\ No newline at end of file diff --git a/test/csit/tests/dcae/testcases/resources/VES-5.28.3-dataformat.json b/test/csit/tests/dcae/testcases/resources/VES-5.28.3-dataformat.json new file mode 100644 index 000000000..766323368 --- /dev/null +++ b/test/csit/tests/dcae/testcases/resources/VES-5.28.3-dataformat.json @@ -0,0 +1,2105 @@ +{ + "self": { + "name": "VES_specification", + "version": "5.28.3", + "description": "VES spec for 5.3" + }, + "dataformatversion": "1.0.0", + "jsonschema": { + "$schema": "http://json-schema.org/draft-04/schema#", + "definitions": { + "attCopyrightNotice": { + "description": "Copyright (c) <2017>, AT&T Intellectual Property. All rights reserved. Licensed under the Apache License, Version 2.0 (the License)", + "type": "object", + "properties": { + "useAndRedistribution": { + "description": "You may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", + "type": "string" + }, + "licenseLink": "http://www.apache.org/licenses/LICENSE-2.0", + "condition1": { + "description": "Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an AS IS BASIS,", + "type": "string" + }, + "condition2": { + "description": "Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.", + "type": "string" + }, + "condition3": { + "description": "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", + "type": "string" + }, + "condition4": { + "description": "See the License for the specific language governing permissions and limitations under the License.", + "type": "string" + }, + "Trademarks": { + "description": "ECOMP and OpenECOMP are trademarks and service marks of AT&T Intellectual Property.", + "type": "string" + } + } + }, + "codecsInUse": { + "description": "number of times an identified codec was used over the measurementInterval", + "type": "object", + "properties": { + "codecIdentifier": { + "type": "string" + }, + "numberInUse": { + "type": "integer" + } + }, + "required": [ + "codecIdentifier", + "numberInUse" + ] + }, + "command": { + "description": "command from an event collector toward an event source", + "type": "object", + "properties": { + "commandType": { + "type": "string", + "enum": [ + "heartbeatIntervalChange", + "measurementIntervalChange", + "provideThrottlingState", + "throttlingSpecification" + ] + }, + "eventDomainThrottleSpecification": { + "$ref": "#/definitions/eventDomainThrottleSpecification" + }, + "heartbeatInterval": { + "type": "integer" + }, + "measurementInterval": { + "type": "integer" + } + }, + "required": [ + "commandType" + ] + }, + "commandList": { + "description": "array of commands from an event collector toward an event source", + "type": "array", + "items": { + "$ref": "#/definitions/command" + }, + "minItems": 0 + }, + "commonEventHeader": { + "description": "fields common to all events", + "type": "object", + "properties": { + "domain": { + "description": "the eventing domain associated with the event", + "type": "string", + "enum": [ + "fault", + "heartbeat", + "measurementsForVfScaling", + "mobileFlow", + "other", + "sipSignaling", + "stateChange", + "syslog", + "thresholdCrossingAlert", + "voiceQuality" + ] + }, + "eventId": { + "description": "event key that is unique to the event source", + "type": "string" + }, + "eventName": { + "description": "unique event name", + "type": "string" + }, + "eventType": { + "description": "for example - applicationVnf, guestOS, hostOS, platform", + "type": "string" + }, + "internalHeaderFields": { + "$ref": "#/definitions/internalHeaderFields" + }, + "lastEpochMicrosec": { + "description": "the latest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "nfcNamingCode": { + "description": "3 character network function component type, aligned with vfc naming standards", + "type": "string" + }, + "nfNamingCode": { + "description": "4 character network function type, aligned with vnf naming standards", + "type": "string" + }, + "priority": { + "description": "processing priority", + "type": "string", + "enum": [ + "High", + "Medium", + "Normal", + "Low" + ] + }, + "reportingEntityId": { + "description": "UUID identifying the entity reporting the event, for example an OAM VM; must be populated by the ATT enrichment process", + "type": "string" + }, + "reportingEntityName": { + "description": "name of the entity reporting the event, for example, an EMS name; may be the same as sourceName", + "type": "string" + }, + "sequence": { + "description": "ordering of events communicated by an event source instance or 0 if not needed", + "type": "integer" + }, + "sourceId": { + "description": "UUID identifying the entity experiencing the event issue; must be populated by the ATT enrichment process", + "type": "string" + }, + "sourceName": { + "description": "name of the entity experiencing the event issue", + "type": "string" + }, + "startEpochMicrosec": { + "description": "the earliest unix time aka epoch time associated with the event from any component--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "version": { + "description": "version of the event header", + "type": "number" + } + }, + "required": [ + "domain", + "eventId", + "eventName", + "lastEpochMicrosec", + "priority", + "reportingEntityName", + "sequence", + "sourceName", + "startEpochMicrosec", + "version" + ] + }, + "counter": { + "description": "performance counter", + "type": "object", + "properties": { + "criticality": { + "type": "string", + "enum": [ + "CRIT", + "MAJ" + ] + }, + "name": { + "type": "string" + }, + "thresholdCrossed": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "criticality", + "name", + "thresholdCrossed", + "value" + ] + }, + "cpuUsage": { + "description": "usage of an identified CPU", + "type": "object", + "properties": { + "cpuIdentifier": { + "description": "cpu identifer", + "type": "string" + }, + "cpuIdle": { + "description": "percentage of CPU time spent in the idle task", + "type": "number" + }, + "cpuUsageInterrupt": { + "description": "percentage of time spent servicing interrupts", + "type": "number" + }, + "cpuUsageNice": { + "description": "percentage of time spent running user space processes that have been niced", + "type": "number" + }, + "cpuUsageSoftIrq": { + "description": "percentage of time spent handling soft irq interrupts", + "type": "number" + }, + "cpuUsageSteal": { + "description": "percentage of time spent in involuntary wait which is neither user, system or idle time and is effectively time that went missing", + "type": "number" + }, + "cpuUsageSystem": { + "description": "percentage of time spent on system tasks running the kernel", + "type": "number" + }, + "cpuUsageUser": { + "description": "percentage of time spent running un-niced user space processes", + "type": "number" + }, + "cpuWait": { + "description": "percentage of CPU time spent waiting for I/O operations to complete", + "type": "number" + }, + "percentUsage": { + "description": "aggregate cpu usage of the virtual machine on which the VNFC reporting the event is running", + "type": "number" + } + }, + "required": [ + "cpuIdentifier", + "percentUsage" + ] + }, + "diskUsage": { + "description": "usage of an identified disk", + "type": "object", + "properties": { + "diskIdentifier": { + "description": "disk identifier", + "type": "string" + }, + "diskIoTimeAvg": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the average over the measurement interval", + "type": "number" + }, + "diskIoTimeLast": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the last value measurement within the measurement interval", + "type": "number" + }, + "diskIoTimeMax": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the maximum value measurement within the measurement interval", + "type": "number" + }, + "diskIoTimeMin": { + "description": "milliseconds spent doing input/output operations over 1 sec; treat this metric as a device load percentage where 1000ms matches 100% load; provide the minimum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadAvg": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadLast": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the last value measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadMax": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the maximum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedReadMin": { + "description": "number of logical read operations that were merged into physical read operations, e.g., two logical reads were served by one physical disk access; provide the minimum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteAvg": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteLast": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the last value measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteMax": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the maximum value measurement within the measurement interval", + "type": "number" + }, + "diskMergedWriteMin": { + "description": "number of logical write operations that were merged into physical write operations, e.g., two logical writes were served by one physical disk access; provide the minimum value measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadAvg": { + "description": "number of octets per second read from a disk or partition; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadLast": { + "description": "number of octets per second read from a disk or partition; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadMax": { + "description": "number of octets per second read from a disk or partition; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOctetsReadMin": { + "description": "number of octets per second read from a disk or partition; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteAvg": { + "description": "number of octets per second written to a disk or partition; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteLast": { + "description": "number of octets per second written to a disk or partition; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteMax": { + "description": "number of octets per second written to a disk or partition; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOctetsWriteMin": { + "description": "number of octets per second written to a disk or partition; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadAvg": { + "description": "number of read operations per second issued to the disk; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadLast": { + "description": "number of read operations per second issued to the disk; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadMax": { + "description": "number of read operations per second issued to the disk; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOpsReadMin": { + "description": "number of read operations per second issued to the disk; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteAvg": { + "description": "number of write operations per second issued to the disk; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteLast": { + "description": "number of write operations per second issued to the disk; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteMax": { + "description": "number of write operations per second issued to the disk; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskOpsWriteMin": { + "description": "number of write operations per second issued to the disk; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsAvg": { + "description": "queue size of pending I/O operations per second; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsLast": { + "description": "queue size of pending I/O operations per second; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsMax": { + "description": "queue size of pending I/O operations per second; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskPendingOperationsMin": { + "description": "queue size of pending I/O operations per second; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadAvg": { + "description": "milliseconds a read operation took to complete; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadLast": { + "description": "milliseconds a read operation took to complete; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadMax": { + "description": "milliseconds a read operation took to complete; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskTimeReadMin": { + "description": "milliseconds a read operation took to complete; provide the minimum measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteAvg": { + "description": "milliseconds a write operation took to complete; provide the average measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteLast": { + "description": "milliseconds a write operation took to complete; provide the last measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteMax": { + "description": "milliseconds a write operation took to complete; provide the maximum measurement within the measurement interval", + "type": "number" + }, + "diskTimeWriteMin": { + "description": "milliseconds a write operation took to complete; provide the minimum measurement within the measurement interval", + "type": "number" + } + }, + "required": [ + "diskIdentifier" + ] + }, + "endOfCallVqmSummaries": { + "description": "provides end of call voice quality metrics", + "type": "object", + "properties": { + "adjacencyName": { + "description": " adjacency name", + "type": "string" + }, + "endpointDescription": { + "description": "Either Caller or Callee", + "type": "string", + "enum": [ + "Caller", + "Callee" + ] + }, + "endpointJitter": { + "description": "", + "type": "number" + }, + "endpointRtpOctetsDiscarded": { + "description": "", + "type": "number" + }, + "endpointRtpOctetsReceived": { + "description": "", + "type": "number" + }, + "endpointRtpOctetsSent": { + "description": "", + "type": "number" + }, + "endpointRtpPacketsDiscarded": { + "description": "", + "type": "number" + }, + "endpointRtpPacketsReceived": { + "description": "", + "type": "number" + }, + "endpointRtpPacketsSent": { + "description": "", + "type": "number" + }, + "localJitter": { + "description": "", + "type": "number" + }, + "localRtpOctetsDiscarded": { + "description": "", + "type": "number" + }, + "localRtpOctetsReceived": { + "description": "", + "type": "number" + }, + "localRtpOctetsSent": { + "description": "", + "type": "number" + }, + "localRtpPacketsDiscarded": { + "description": "", + "type": "number" + }, + "localRtpPacketsReceived": { + "description": "", + "type": "number" + }, + "localRtpPacketsSent": { + "description": "", + "type": "number" + }, + "mosCqe": { + "description": "1-5 1dp", + "type": "number" + }, + "packetsLost": { + "description": "", + "type": "number" + }, + "packetLossPercent": { + "description": "Calculated percentage packet loss based on Endpoint RTP packets lost (as reported in RTCP) and Local RTP packets sent. Direction is based on Endpoint description (Caller, Callee). Decimal (2 dp)", + "type": "number" + }, + "rFactor": { + "description": "0-100", + "type": "number" + }, + "roundTripDelay": { + "description": "millisecs", + "type": "number" + } + }, + "required": [ + "adjacencyName", + "endpointDescription" + ] + }, + "event": { + "description": "the root level of the common event format", + "type": "object", + "properties": { + "commonEventHeader": { + "$ref": "#/definitions/commonEventHeader" + }, + "faultFields": { + "$ref": "#/definitions/faultFields" + }, + "heartbeatFields": { + "$ref": "#/definitions/heartbeatFields" + }, + "measurementsForVfScalingFields": { + "$ref": "#/definitions/measurementsForVfScalingFields" + }, + "mobileFlowFields": { + "$ref": "#/definitions/mobileFlowFields" + }, + "otherFields": { + "$ref": "#/definitions/otherFields" + }, + "sipSignalingFields": { + "$ref": "#/definitions/sipSignalingFields" + }, + "stateChangeFields": { + "$ref": "#/definitions/stateChangeFields" + }, + "syslogFields": { + "$ref": "#/definitions/syslogFields" + }, + "thresholdCrossingAlertFields": { + "$ref": "#/definitions/thresholdCrossingAlertFields" + }, + "voiceQualityFields": { + "$ref": "#/definitions/voiceQualityFields" + } + }, + "required": [ + "commonEventHeader" + ] + }, + "eventDomainThrottleSpecification": { + "description": "specification of what information to suppress within an event domain", + "type": "object", + "properties": { + "eventDomain": { + "description": "Event domain enum from the commonEventHeader domain field", + "type": "string" + }, + "suppressedFieldNames": { + "description": "List of optional field names in the event block that should not be sent to the Event Listener", + "type": "array", + "items": { + "type": "string" + } + }, + "suppressedNvPairsList": { + "description": "Optional list of specific NvPairsNames to suppress within a given Name-Value Field", + "type": "array", + "items": { + "$ref": "#/definitions/suppressedNvPairs" + } + } + }, + "required": [ + "eventDomain" + ] + }, + "eventDomainThrottleSpecificationList": { + "description": "array of eventDomainThrottleSpecifications", + "type": "array", + "items": { + "$ref": "#/definitions/eventDomainThrottleSpecification" + }, + "minItems": 0 + }, + "eventList": { + "description": "array of events", + "type": "array", + "items": { + "$ref": "#/definitions/event" + } + }, + "eventThrottlingState": { + "description": "reports the throttling in force at the event source", + "type": "object", + "properties": { + "eventThrottlingMode": { + "description": "Mode the event manager is in", + "type": "string", + "enum": [ + "normal", + "throttled" + ] + }, + "eventDomainThrottleSpecificationList": { + "$ref": "#/definitions/eventDomainThrottleSpecificationList" + } + }, + "required": [ + "eventThrottlingMode" + ] + }, + "faultFields": { + "description": "fields specific to fault events", + "type": "object", + "properties": { + "alarmAdditionalInformation": { + "description": "additional alarm information", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "alarmCondition": { + "description": "alarm condition reported by the device", + "type": "string" + }, + "alarmInterfaceA": { + "description": "card, port, channel or interface name of the device generating the alarm", + "type": "string" + }, + "eventCategory": { + "description": "Event category, for example: license, link, routing, security, signaling", + "type": "string" + }, + "eventSeverity": { + "description": "event severity", + "type": "string", + "enum": [ + "CRITICAL", + "MAJOR", + "MINOR", + "WARNING", + "NORMAL" + ] + }, + "eventSourceType": { + "description": "type of event source; examples: card, host, other, port, portThreshold, router, slotThreshold, switch, virtualMachine, virtualNetworkFunction", + "type": "string" + }, + "faultFieldsVersion": { + "description": "version of the faultFields block", + "type": "number" + }, + "specificProblem": { + "description": "short description of the alarm or problem", + "type": "string" + }, + "vfStatus": { + "description": "virtual function status enumeration", + "type": "string", + "enum": [ + "Active", + "Idle", + "Preparing to terminate", + "Ready to terminate", + "Requesting termination" + ] + } + }, + "required": [ + "alarmCondition", + "eventSeverity", + "eventSourceType", + "faultFieldsVersion", + "specificProblem", + "vfStatus" + ] + }, + "featuresInUse": { + "description": "number of times an identified feature was used over the measurementInterval", + "type": "object", + "properties": { + "featureIdentifier": { + "type": "string" + }, + "featureUtilization": { + "type": "integer" + } + }, + "required": [ + "featureIdentifier", + "featureUtilization" + ] + }, + "field": { + "description": "name value pair", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "required": [ + "name", + "value" + ] + }, + "filesystemUsage": { + "description": "disk usage of an identified virtual machine in gigabytes and/or gigabytes per second", + "type": "object", + "properties": { + "blockConfigured": { + "type": "number" + }, + "blockIops": { + "type": "number" + }, + "blockUsed": { + "type": "number" + }, + "ephemeralConfigured": { + "type": "number" + }, + "ephemeralIops": { + "type": "number" + }, + "ephemeralUsed": { + "type": "number" + }, + "filesystemName": { + "type": "string" + } + }, + "required": [ + "blockConfigured", + "blockIops", + "blockUsed", + "ephemeralConfigured", + "ephemeralIops", + "ephemeralUsed", + "filesystemName" + ] + }, + "gtpPerFlowMetrics": { + "description": "Mobility GTP Protocol per flow metrics", + "type": "object", + "properties": { + "avgBitErrorRate": { + "description": "average bit error rate", + "type": "number" + }, + "avgPacketDelayVariation": { + "description": "Average packet delay variation or jitter in milliseconds for received packets: Average difference between the packet timestamp and time received for all pairs of consecutive packets", + "type": "number" + }, + "avgPacketLatency": { + "description": "average delivery latency", + "type": "number" + }, + "avgReceiveThroughput": { + "description": "average receive throughput", + "type": "number" + }, + "avgTransmitThroughput": { + "description": "average transmit throughput", + "type": "number" + }, + "durConnectionFailedStatus": { + "description": "duration of failed state in milliseconds, computed as the cumulative time between a failed echo request and the next following successful error request, over this reporting interval", + "type": "number" + }, + "durTunnelFailedStatus": { + "description": "Duration of errored state, computed as the cumulative time between a tunnel error indicator and the next following non-errored indicator, over this reporting interval", + "type": "number" + }, + "flowActivatedBy": { + "description": "Endpoint activating the flow", + "type": "string" + }, + "flowActivationEpoch": { + "description": "Time the connection is activated in the flow (connection) being reported on, or transmission time of the first packet if activation time is not available", + "type": "number" + }, + "flowActivationMicrosec": { + "description": "Integer microseconds for the start of the flow connection", + "type": "number" + }, + "flowActivationTime": { + "description": "time the connection is activated in the flow being reported on, or transmission time of the first packet if activation time is not available; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "flowDeactivatedBy": { + "description": "Endpoint deactivating the flow", + "type": "string" + }, + "flowDeactivationEpoch": { + "description": "Time for the start of the flow connection, in integer UTC epoch time aka UNIX time", + "type": "number" + }, + "flowDeactivationMicrosec": { + "description": "Integer microseconds for the start of the flow connection", + "type": "number" + }, + "flowDeactivationTime": { + "description": "Transmission time of the first packet in the flow connection being reported on; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "flowStatus": { + "description": "connection status at reporting time as a working / inactive / failed indicator value", + "type": "string" + }, + "gtpConnectionStatus": { + "description": "Current connection state at reporting time", + "type": "string" + }, + "gtpTunnelStatus": { + "description": "Current tunnel state at reporting time", + "type": "string" + }, + "ipTosCountList": { + "description": "array of key: value pairs where the keys are drawn from the IP Type-of-Service identifiers which range from '0' to '255', and the values are the count of packets that had those ToS identifiers in the flow", + "type": "array", + "items": { + "type": "array", + "items": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + } + }, + "ipTosList": { + "description": "Array of unique IP Type-of-Service values observed in the flow where values range from '0' to '255'", + "type": "array", + "items": { + "type": "string" + } + }, + "largePacketRtt": { + "description": "large packet round trip time", + "type": "number" + }, + "largePacketThreshold": { + "description": "large packet threshold being applied", + "type": "number" + }, + "maxPacketDelayVariation": { + "description": "Maximum packet delay variation or jitter in milliseconds for received packets: Maximum of the difference between the packet timestamp and time received for all pairs of consecutive packets", + "type": "number" + }, + "maxReceiveBitRate": { + "description": "maximum receive bit rate", + "type": "number" + }, + "maxTransmitBitRate": { + "description": "maximum transmit bit rate", + "type": "number" + }, + "mobileQciCosCountList": { + "description": "array of key: value pairs where the keys are drawn from LTE QCI or UMTS class of service strings, and the values are the count of packets that had those strings in the flow", + "type": "array", + "items": { + "type": "array", + "items": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + } + }, + "mobileQciCosList": { + "description": "Array of unique LTE QCI or UMTS class-of-service values observed in the flow", + "type": "array", + "items": { + "type": "string" + } + }, + "numActivationFailures": { + "description": "Number of failed activation requests, as observed by the reporting node", + "type": "number" + }, + "numBitErrors": { + "description": "number of errored bits", + "type": "number" + }, + "numBytesReceived": { + "description": "number of bytes received, including retransmissions", + "type": "number" + }, + "numBytesTransmitted": { + "description": "number of bytes transmitted, including retransmissions", + "type": "number" + }, + "numDroppedPackets": { + "description": "number of received packets dropped due to errors per virtual interface", + "type": "number" + }, + "numGtpEchoFailures": { + "description": "Number of Echo request path failures where failed paths are defined in 3GPP TS 29.281 sec 7.2.1 and 3GPP TS 29.060 sec. 11.2", + "type": "number" + }, + "numGtpTunnelErrors": { + "description": "Number of tunnel error indications where errors are defined in 3GPP TS 29.281 sec 7.3.1 and 3GPP TS 29.060 sec. 11.1", + "type": "number" + }, + "numHttpErrors": { + "description": "Http error count", + "type": "number" + }, + "numL7BytesReceived": { + "description": "number of tunneled layer 7 bytes received, including retransmissions", + "type": "number" + }, + "numL7BytesTransmitted": { + "description": "number of tunneled layer 7 bytes transmitted, excluding retransmissions", + "type": "number" + }, + "numLostPackets": { + "description": "number of lost packets", + "type": "number" + }, + "numOutOfOrderPackets": { + "description": "number of out-of-order packets", + "type": "number" + }, + "numPacketErrors": { + "description": "number of errored packets", + "type": "number" + }, + "numPacketsReceivedExclRetrans": { + "description": "number of packets received, excluding retransmission", + "type": "number" + }, + "numPacketsReceivedInclRetrans": { + "description": "number of packets received, including retransmission", + "type": "number" + }, + "numPacketsTransmittedInclRetrans": { + "description": "number of packets transmitted, including retransmissions", + "type": "number" + }, + "numRetries": { + "description": "number of packet retries", + "type": "number" + }, + "numTimeouts": { + "description": "number of packet timeouts", + "type": "number" + }, + "numTunneledL7BytesReceived": { + "description": "number of tunneled layer 7 bytes received, excluding retransmissions", + "type": "number" + }, + "roundTripTime": { + "description": "round trip time", + "type": "number" + }, + "tcpFlagCountList": { + "description": "array of key: value pairs where the keys are drawn from TCP Flags and the values are the count of packets that had that TCP Flag in the flow", + "type": "array", + "items": { + "type": "array", + "items": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + } + }, + "tcpFlagList": { + "description": "Array of unique TCP Flags observed in the flow", + "type": "array", + "items": { + "type": "string" + } + }, + "timeToFirstByte": { + "description": "Time in milliseconds between the connection activation and first byte received", + "type": "number" + } + }, + "required": [ + "avgBitErrorRate", + "avgPacketDelayVariation", + "avgPacketLatency", + "avgReceiveThroughput", + "avgTransmitThroughput", + "flowActivationEpoch", + "flowActivationMicrosec", + "flowDeactivationEpoch", + "flowDeactivationMicrosec", + "flowDeactivationTime", + "flowStatus", + "maxPacketDelayVariation", + "numActivationFailures", + "numBitErrors", + "numBytesReceived", + "numBytesTransmitted", + "numDroppedPackets", + "numL7BytesReceived", + "numL7BytesTransmitted", + "numLostPackets", + "numOutOfOrderPackets", + "numPacketErrors", + "numPacketsReceivedExclRetrans", + "numPacketsReceivedInclRetrans", + "numPacketsTransmittedInclRetrans", + "numRetries", + "numTimeouts", + "numTunneledL7BytesReceived", + "roundTripTime", + "timeToFirstByte" + ] + }, + "heartbeatFields": { + "description": "optional field block for fields specific to heartbeat events", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional heartbeat fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "heartbeatFieldsVersion": { + "description": "version of the heartbeatFields block", + "type": "number" + }, + "heartbeatInterval": { + "description": "current heartbeat interval in seconds", + "type": "integer" + } + }, + "required": [ + "heartbeatFieldsVersion", + "heartbeatInterval" + ] + }, + "internalHeaderFields": { + "description": "enrichment fields for internal VES Event Listener service use only, not supplied by event sources", + "type": "object" + }, + "jsonObject": { + "description": "json object schema, name and other meta-information along with one or more object instances", + "type": "object", + "properties": { + "objectInstances": { + "description": "one or more instances of the jsonObject", + "type": "array", + "items": { + "$ref": "#/definitions/jsonObjectInstance" + } + }, + "objectName": { + "description": "name of the JSON Object", + "type": "string" + }, + "objectSchema": { + "description": "json schema for the object", + "type": "string" + }, + "objectSchemaUrl": { + "description": "Url to the json schema for the object", + "type": "string" + }, + "nfSubscribedObjectName": { + "description": "name of the object associated with the nfSubscriptonId", + "type": "string" + }, + "nfSubscriptionId": { + "description": "identifies an openConfig telemetry subscription on a network function, which configures the network function to send complex object data associated with the jsonObject", + "type": "string" + } + }, + "required": [ + "objectInstances", + "objectName" + ] + }, + "jsonObjectInstance": { + "description": "meta-information about an instance of a jsonObject along with the actual object instance", + "type": "object", + "properties": { + "objectInstance": { + "description": "an instance conforming to the jsonObject schema", + "type": "object" + }, + "objectInstanceEpochMicrosec": { + "description": "the unix time aka epoch time associated with this objectInstance--as microseconds elapsed since 1 Jan 1970 not including leap seconds", + "type": "number" + }, + "objectKeys": { + "description": "an ordered set of keys that identifies this particular instance of jsonObject", + "type": "array", + "items": { + "$ref": "#/definitions/key" + } + } + }, + "required": [ + "objectInstance" + ] + }, + "key": { + "description": "tuple which provides the name of a key along with its value and relative order", + "type": "object", + "properties": { + "keyName": { + "description": "name of the key", + "type": "string" + }, + "keyOrder": { + "description": "relative sequence or order of the key with respect to other keys", + "type": "integer" + }, + "keyValue": { + "description": "value of the key", + "type": "string" + } + }, + "required": [ + "keyName" + ] + }, + "latencyBucketMeasure": { + "description": "number of counts falling within a defined latency bucket", + "type": "object", + "properties": { + "countsInTheBucket": { + "type": "number" + }, + "highEndOfLatencyBucket": { + "type": "number" + }, + "lowEndOfLatencyBucket": { + "type": "number" + } + }, + "required": [ + "countsInTheBucket" + ] + }, + "measurementsForVfScalingFields": { + "description": "measurementsForVfScaling fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional name-value-pair fields", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "additionalMeasurements": { + "description": "array of named name-value-pair arrays", + "type": "array", + "items": { + "$ref": "#/definitions/namedArrayOfFields" + } + }, + "additionalObjects": { + "description": "array of JSON objects described by name, schema and other meta-information", + "type": "array", + "items": { + "$ref": "#/definitions/jsonObject" + } + }, + "codecUsageArray": { + "description": "array of codecs in use", + "type": "array", + "items": { + "$ref": "#/definitions/codecsInUse" + } + }, + "concurrentSessions": { + "description": "peak concurrent sessions for the VM or VNF over the measurementInterval", + "type": "integer" + }, + "configuredEntities": { + "description": "over the measurementInterval, peak total number of: users, subscribers, devices, adjacencies, etc., for the VM, or subscribers, devices, etc., for the VNF", + "type": "integer" + }, + "cpuUsageArray": { + "description": "usage of an array of CPUs", + "type": "array", + "items": { + "$ref": "#/definitions/cpuUsage" + } + }, + "diskUsageArray": { + "description": "usage of an array of disks", + "type": "array", + "items": { + "$ref": "#/definitions/diskUsage" + } + }, + "featureUsageArray": { + "description": "array of features in use", + "type": "array", + "items": { + "$ref": "#/definitions/featuresInUse" + } + }, + "filesystemUsageArray": { + "description": "filesystem usage of the VM on which the VNFC reporting the event is running", + "type": "array", + "items": { + "$ref": "#/definitions/filesystemUsage" + } + }, + "latencyDistribution": { + "description": "array of integers representing counts of requests whose latency in milliseconds falls within per-VNF configured ranges", + "type": "array", + "items": { + "$ref": "#/definitions/latencyBucketMeasure" + } + }, + "meanRequestLatency": { + "description": "mean seconds required to respond to each request for the VM on which the VNFC reporting the event is running", + "type": "number" + }, + "measurementInterval": { + "description": "interval over which measurements are being reported in seconds", + "type": "number" + }, + "measurementsForVfScalingVersion": { + "description": "version of the measurementsForVfScaling block", + "type": "number" + }, + "memoryUsageArray": { + "description": "memory usage of an array of VMs", + "type": "array", + "items": { + "$ref": "#/definitions/memoryUsage" + } + }, + "numberOfMediaPortsInUse": { + "description": "number of media ports in use", + "type": "integer" + }, + "requestRate": { + "description": "peak rate of service requests per second to the VNF over the measurementInterval", + "type": "number" + }, + "vnfcScalingMetric": { + "description": "represents busy-ness of the VNF from 0 to 100 as reported by the VNFC", + "type": "integer" + }, + "vNicPerformanceArray": { + "description": "usage of an array of virtual network interface cards", + "type": "array", + "items": { + "$ref": "#/definitions/vNicPerformance" + } + } + }, + "required": [ + "measurementInterval", + "measurementsForVfScalingVersion" + ] + }, + "memoryUsage": { + "description": "memory usage of an identified virtual machine", + "type": "object", + "properties": { + "memoryBuffered": { + "description": "kibibytes of temporary storage for raw disk blocks", + "type": "number" + }, + "memoryCached": { + "description": "kibibytes of memory used for cache", + "type": "number" + }, + "memoryConfigured": { + "description": "kibibytes of memory configured in the virtual machine on which the VNFC reporting the event is running", + "type": "number" + }, + "memoryFree": { + "description": "kibibytes of physical RAM left unused by the system", + "type": "number" + }, + "memorySlabRecl": { + "description": "the part of the slab that can be reclaimed such as caches measured in kibibytes", + "type": "number" + }, + "memorySlabUnrecl": { + "description": "the part of the slab that cannot be reclaimed even when lacking memory measured in kibibytes", + "type": "number" + }, + "memoryUsed": { + "description": "total memory minus the sum of free, buffered, cached and slab memory measured in kibibytes", + "type": "number" + }, + "vmIdentifier": { + "description": "virtual machine identifier associated with the memory metrics", + "type": "string" + } + }, + "required": [ + "memoryFree", + "memoryUsed", + "vmIdentifier" + ] + }, + "mobileFlowFields": { + "description": "mobileFlow fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional mobileFlow fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "applicationType": { + "description": "Application type inferred", + "type": "string" + }, + "appProtocolType": { + "description": "application protocol", + "type": "string" + }, + "appProtocolVersion": { + "description": "application protocol version", + "type": "string" + }, + "cid": { + "description": "cell id", + "type": "string" + }, + "connectionType": { + "description": "Abbreviation referencing a 3GPP reference point e.g., S1-U, S11, etc", + "type": "string" + }, + "ecgi": { + "description": "Evolved Cell Global Id", + "type": "string" + }, + "flowDirection": { + "description": "Flow direction, indicating if the reporting node is the source of the flow or destination for the flow", + "type": "string" + }, + "gtpPerFlowMetrics": { + "$ref": "#/definitions/gtpPerFlowMetrics" + }, + "gtpProtocolType": { + "description": "GTP protocol", + "type": "string" + }, + "gtpVersion": { + "description": "GTP protocol version", + "type": "string" + }, + "httpHeader": { + "description": "HTTP request header, if the flow connects to a node referenced by HTTP", + "type": "string" + }, + "imei": { + "description": "IMEI for the subscriber UE used in this flow, if the flow connects to a mobile device", + "type": "string" + }, + "imsi": { + "description": "IMSI for the subscriber UE used in this flow, if the flow connects to a mobile device", + "type": "string" + }, + "ipProtocolType": { + "description": "IP protocol type e.g., TCP, UDP, RTP...", + "type": "string" + }, + "ipVersion": { + "description": "IP protocol version e.g., IPv4, IPv6", + "type": "string" + }, + "lac": { + "description": "location area code", + "type": "string" + }, + "mcc": { + "description": "mobile country code", + "type": "string" + }, + "mnc": { + "description": "mobile network code", + "type": "string" + }, + "mobileFlowFieldsVersion": { + "description": "version of the mobileFlowFields block", + "type": "number" + }, + "msisdn": { + "description": "MSISDN for the subscriber UE used in this flow, as an integer, if the flow connects to a mobile device", + "type": "string" + }, + "otherEndpointIpAddress": { + "description": "IP address for the other endpoint, as used for the flow being reported on", + "type": "string" + }, + "otherEndpointPort": { + "description": "IP Port for the reporting entity, as used for the flow being reported on", + "type": "integer" + }, + "otherFunctionalRole": { + "description": "Functional role of the other endpoint for the flow being reported on e.g., MME, S-GW, P-GW, PCRF...", + "type": "string" + }, + "rac": { + "description": "routing area code", + "type": "string" + }, + "radioAccessTechnology": { + "description": "Radio Access Technology e.g., 2G, 3G, LTE", + "type": "string" + }, + "reportingEndpointIpAddr": { + "description": "IP address for the reporting entity, as used for the flow being reported on", + "type": "string" + }, + "reportingEndpointPort": { + "description": "IP port for the reporting entity, as used for the flow being reported on", + "type": "integer" + }, + "sac": { + "description": "service area code", + "type": "string" + }, + "samplingAlgorithm": { + "description": "Integer identifier for the sampling algorithm or rule being applied in calculating the flow metrics if metrics are calculated based on a sample of packets, or 0 if no sampling is applied", + "type": "integer" + }, + "tac": { + "description": "transport area code", + "type": "string" + }, + "tunnelId": { + "description": "tunnel identifier", + "type": "string" + }, + "vlanId": { + "description": "VLAN identifier used by this flow", + "type": "string" + } + }, + "required": [ + "flowDirection", + "gtpPerFlowMetrics", + "ipProtocolType", + "ipVersion", + "mobileFlowFieldsVersion", + "otherEndpointIpAddress", + "otherEndpointPort", + "reportingEndpointIpAddr", + "reportingEndpointPort" + ] + }, + "namedArrayOfFields": { + "description": "an array of name value pairs along with a name for the array", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "arrayOfFields": { + "description": "array of name value pairs", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + } + }, + "required": [ + "name", + "arrayOfFields" + ] + }, + "otherFields": { + "description": "fields for events belonging to the 'other' domain of the commonEventHeader domain enumeration", + "type": "object", + "properties": { + "hashOfNameValuePairArrays": { + "description": "array of named name-value-pair arrays", + "type": "array", + "items": { + "$ref": "#/definitions/namedArrayOfFields" + } + }, + "jsonObjects": { + "description": "array of JSON objects described by name, schema and other meta-information", + "type": "array", + "items": { + "$ref": "#/definitions/jsonObject" + } + }, + "nameValuePairs": { + "description": "array of name-value pairs", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "otherFieldsVersion": { + "description": "version of the otherFields block", + "type": "number" + } + }, + "required": [ + "otherFieldsVersion" + ] + }, + "requestError": { + "description": "standard request error data structure", + "type": "object", + "properties": { + "messageId": { + "description": "Unique message identifier of the format ABCnnnn where ABC is either SVC for Service Exceptions or POL for Policy Exception", + "type": "string" + }, + "text": { + "description": "Message text, with replacement variables marked with %n, where n is an index into the list of <variables> elements, starting at 1", + "type": "string" + }, + "url": { + "description": "Hyperlink to a detailed error resource e.g., an HTML page for browser user agents", + "type": "string" + }, + "variables": { + "description": "List of zero or more strings that represent the contents of the variables used by the message text", + "type": "string" + } + }, + "required": [ + "messageId", + "text" + ] + }, + "sipSignalingFields": { + "description": "sip signaling fields", + "type": "object", + "properties": { + "additionalInformation": { + "description": "additional sip signaling fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "compressedSip": { + "description": "the full SIP request/response including headers and bodies", + "type": "string" + }, + "correlator": { + "description": "this is the same for all events on this call", + "type": "string" + }, + "localIpAddress": { + "description": "IP address on VNF", + "type": "string" + }, + "localPort": { + "description": "port on VNF", + "type": "string" + }, + "remoteIpAddress": { + "description": "IP address of peer endpoint", + "type": "string" + }, + "remotePort": { + "description": "port of peer endpoint", + "type": "string" + }, + "sipSignalingFieldsVersion": { + "description": "version of the sipSignalingFields block", + "type": "number" + }, + "summarySip": { + "description": "the SIP Method or Response (‘INVITE’, ‘200 OK’, ‘BYE’, etc)", + "type": "string" + }, + "vendorVnfNameFields": { + "$ref": "#/definitions/vendorVnfNameFields" + } + }, + "required": [ + "correlator", + "localIpAddress", + "localPort", + "remoteIpAddress", + "remotePort", + "sipSignalingFieldsVersion", + "vendorVnfNameFields" + ] + }, + "stateChangeFields": { + "description": "stateChange fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional stateChange fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "newState": { + "description": "new state of the entity", + "type": "string", + "enum": [ + "inService", + "maintenance", + "outOfService" + ] + }, + "oldState": { + "description": "previous state of the entity", + "type": "string", + "enum": [ + "inService", + "maintenance", + "outOfService" + ] + }, + "stateChangeFieldsVersion": { + "description": "version of the stateChangeFields block", + "type": "number" + }, + "stateInterface": { + "description": "card or port name of the entity that changed state", + "type": "string" + } + }, + "required": [ + "newState", + "oldState", + "stateChangeFieldsVersion", + "stateInterface" + ] + }, + "suppressedNvPairs": { + "description": "List of specific NvPairsNames to suppress within a given Name-Value Field for event Throttling", + "type": "object", + "properties": { + "nvPairFieldName": { + "description": "Name of the field within which are the nvpair names to suppress", + "type": "string" + }, + "suppressedNvPairNames": { + "description": "Array of nvpair names to suppress within the nvpairFieldName", + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "nvPairFieldName", + "suppressedNvPairNames" + ] + }, + "syslogFields": { + "description": "sysLog fields", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional syslog fields if needed provided as name=value delimited by a pipe ‘|’ symbol, for example: 'name1=value1|name2=value2|…'", + "type": "string" + }, + "eventSourceHost": { + "description": "hostname of the device", + "type": "string" + }, + "eventSourceType": { + "description": "type of event source; examples: other, router, switch, host, card, port, slotThreshold, portThreshold, virtualMachine, virtualNetworkFunction", + "type": "string" + }, + "syslogFacility": { + "description": "numeric code from 0 to 23 for facility--see table in documentation", + "type": "integer" + }, + "syslogFieldsVersion": { + "description": "version of the syslogFields block", + "type": "number" + }, + "syslogMsg": { + "description": "syslog message", + "type": "string" + }, + "syslogPri": { + "description": "0-192 combined severity and facility", + "type": "integer" + }, + "syslogProc": { + "description": "identifies the application that originated the message", + "type": "string" + }, + "syslogProcId": { + "description": "a change in the value of this field indicates a discontinuity in syslog reporting", + "type": "number" + }, + "syslogSData": { + "description": "syslog structured data consisting of a structured data Id followed by a set of key value pairs", + "type": "string" + }, + "syslogSdId": { + "description": "0-32 char in format name@number for example ourSDID@32473", + "type": "string" + }, + "syslogSev": { + "description": "numerical Code for severity derived from syslogPri as remaider of syslogPri / 8", + "type": "string", + "enum": [ + "Alert", + "Critical", + "Debug", + "Emergency", + "Error", + "Info", + "Notice", + "Warning" + ] + }, + "syslogTag": { + "description": "msgId indicating the type of message such as TCPOUT or TCPIN; NILVALUE should be used when no other value can be provided", + "type": "string" + }, + "syslogVer": { + "description": "IANA assigned version of the syslog protocol specification - typically 1", + "type": "number" + } + }, + "required": [ + "eventSourceType", + "syslogFieldsVersion", + "syslogMsg", + "syslogTag" + ] + }, + "thresholdCrossingAlertFields": { + "description": "fields specific to threshold crossing alert events", + "type": "object", + "properties": { + "additionalFields": { + "description": "additional threshold crossing alert fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "additionalParameters": { + "description": "performance counters", + "type": "array", + "items": { + "$ref": "#/definitions/counter" + } + }, + "alertAction": { + "description": "Event action", + "type": "string", + "enum": [ + "CLEAR", + "CONT", + "SET" + ] + }, + "alertDescription": { + "description": "Unique short alert description such as IF-SHUB-ERRDROP", + "type": "string" + }, + "alertType": { + "description": "Event type", + "type": "string", + "enum": [ + "CARD-ANOMALY", + "ELEMENT-ANOMALY", + "INTERFACE-ANOMALY", + "SERVICE-ANOMALY" + ] + }, + "alertValue": { + "description": "Calculated API value (if applicable)", + "type": "string" + }, + "associatedAlertIdList": { + "description": "List of eventIds associated with the event being reported", + "type": "array", + "items": { + "type": "string" + } + }, + "collectionTimestamp": { + "description": "Time when the performance collector picked up the data; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "dataCollector": { + "description": "Specific performance collector instance used", + "type": "string" + }, + "elementType": { + "description": "type of network element - internal ATT field", + "type": "string" + }, + "eventSeverity": { + "description": "event severity or priority", + "type": "string", + "enum": [ + "CRITICAL", + "MAJOR", + "MINOR", + "WARNING", + "NORMAL" + ] + }, + "eventStartTimestamp": { + "description": "Time closest to when the measurement was made; with RFC 2822 compliant format: Sat, 13 Mar 2010 11:29:05 -0800", + "type": "string" + }, + "interfaceName": { + "description": "Physical or logical port or card (if applicable)", + "type": "string" + }, + "networkService": { + "description": "network name - internal ATT field", + "type": "string" + }, + "possibleRootCause": { + "description": "Reserved for future use", + "type": "string" + }, + "thresholdCrossingFieldsVersion": { + "description": "version of the thresholdCrossingAlertFields block", + "type": "number" + } + }, + "required": [ + "additionalParameters", + "alertAction", + "alertDescription", + "alertType", + "collectionTimestamp", + "eventSeverity", + "eventStartTimestamp", + "thresholdCrossingFieldsVersion" + ] + }, + "vendorVnfNameFields": { + "description": "provides vendor, vnf and vfModule identifying information", + "type": "object", + "properties": { + "vendorName": { + "description": "VNF vendor name", + "type": "string" + }, + "vfModuleName": { + "description": "ASDC vfModuleName for the vfModule generating the event", + "type": "string" + }, + "vnfName": { + "description": "ASDC modelName for the VNF generating the event", + "type": "string" + } + }, + "required": [ + "vendorName" + ] + }, + "vNicPerformance": { + "description": "describes the performance and errors of an identified virtual network interface card", + "type": "object", + "properties": { + "receivedBroadcastPacketsAccumulated": { + "description": "Cumulative count of broadcast packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedBroadcastPacketsDelta": { + "description": "Count of broadcast packets received within the measurement interval", + "type": "number" + }, + "receivedDiscardedPacketsAccumulated": { + "description": "Cumulative count of discarded packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedDiscardedPacketsDelta": { + "description": "Count of discarded packets received within the measurement interval", + "type": "number" + }, + "receivedErrorPacketsAccumulated": { + "description": "Cumulative count of error packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedErrorPacketsDelta": { + "description": "Count of error packets received within the measurement interval", + "type": "number" + }, + "receivedMulticastPacketsAccumulated": { + "description": "Cumulative count of multicast packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedMulticastPacketsDelta": { + "description": "Count of multicast packets received within the measurement interval", + "type": "number" + }, + "receivedOctetsAccumulated": { + "description": "Cumulative count of octets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedOctetsDelta": { + "description": "Count of octets received within the measurement interval", + "type": "number" + }, + "receivedTotalPacketsAccumulated": { + "description": "Cumulative count of all packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedTotalPacketsDelta": { + "description": "Count of all packets received within the measurement interval", + "type": "number" + }, + "receivedUnicastPacketsAccumulated": { + "description": "Cumulative count of unicast packets received as read at the end of the measurement interval", + "type": "number" + }, + "receivedUnicastPacketsDelta": { + "description": "Count of unicast packets received within the measurement interval", + "type": "number" + }, + "transmittedBroadcastPacketsAccumulated": { + "description": "Cumulative count of broadcast packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedBroadcastPacketsDelta": { + "description": "Count of broadcast packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedDiscardedPacketsAccumulated": { + "description": "Cumulative count of discarded packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedDiscardedPacketsDelta": { + "description": "Count of discarded packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedErrorPacketsAccumulated": { + "description": "Cumulative count of error packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedErrorPacketsDelta": { + "description": "Count of error packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedMulticastPacketsAccumulated": { + "description": "Cumulative count of multicast packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedMulticastPacketsDelta": { + "description": "Count of multicast packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedOctetsAccumulated": { + "description": "Cumulative count of octets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedOctetsDelta": { + "description": "Count of octets transmitted within the measurement interval", + "type": "number" + }, + "transmittedTotalPacketsAccumulated": { + "description": "Cumulative count of all packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedTotalPacketsDelta": { + "description": "Count of all packets transmitted within the measurement interval", + "type": "number" + }, + "transmittedUnicastPacketsAccumulated": { + "description": "Cumulative count of unicast packets transmitted as read at the end of the measurement interval", + "type": "number" + }, + "transmittedUnicastPacketsDelta": { + "description": "Count of unicast packets transmitted within the measurement interval", + "type": "number" + }, + "valuesAreSuspect": { + "description": "Indicates whether vNicPerformance values are likely inaccurate due to counter overflow or other condtions", + "type": "string", + "enum": [ + "true", + "false" + ] + }, + "vNicIdentifier": { + "description": "vNic identification", + "type": "string" + } + }, + "required": [ + "valuesAreSuspect", + "vNicIdentifier" + ] + }, + "voiceQualityFields": { + "description": "provides statistics related to customer facing voice products", + "type": "object", + "properties": { + "additionalInformation": { + "description": "additional voice quality fields if needed", + "type": "array", + "items": { + "$ref": "#/definitions/field" + } + }, + "calleeSideCodec": { + "description": "callee codec for the call", + "type": "string" + }, + "callerSideCodec": { + "description": "caller codec for the call", + "type": "string" + }, + "correlator": { + "description": "this is the same for all events on this call", + "type": "string" + }, + "endOfCallVqmSummaries": { + "$ref": "#/definitions/endOfCallVqmSummaries" + }, + "phoneNumber": { + "description": "phone number associated with the correlator", + "type": "string" + }, + "midCallRtcp": { + "description": "Base64 encoding of the binary RTCP data excluding Eth/IP/UDP headers", + "type": "string" + }, + "vendorVnfNameFields": { + "$ref": "#/definitions/vendorVnfNameFields" + }, + "voiceQualityFieldsVersion": { + "description": "version of the voiceQualityFields block", + "type": "number" + } + }, + "required": [ + "calleeSideCodec", + "callerSideCodec", + "correlator", + "midCallRtcp", + "vendorVnfNameFields", + "voiceQualityFieldsVersion" + ] + } + }, + "title": "Event Listener", + "type": "object", + "properties": { + "event": { + "$ref": "#/definitions/event" + } + } + } +}
\ No newline at end of file diff --git a/test/csit/tests/dcae/testcases/resources/dcae_keywords.robot b/test/csit/tests/dcae/testcases/resources/dcae_keywords.robot new file mode 100644 index 000000000..a30a1ba01 --- /dev/null +++ b/test/csit/tests/dcae/testcases/resources/dcae_keywords.robot @@ -0,0 +1,161 @@ + *** Settings ***
+Documentation The main interface for interacting with DCAE. It handles low level stuff like managing the http request library and DCAE required fields
+Library RequestsLibrary
+Library DcaeLibrary.py
+Library OperatingSystem
+Library Collections
+Variables ../resources/DcaeVariables.py
+Resource ../resources/dcae_properties.robot
+
+
+*** Variables ***
+${DCAE_HEALTH_CHECK_BODY} %{WORKSPACE}/test/csit/tests/dcae/testcases/assets/json_events/dcae_healthcheck.json
+
+*** Keywords ***
+Get DCAE Nodes
+ [Documentation] Get DCAE Nodes from Consul Catalog
+ #Log Creating session ${GLOBAL_DCAE_CONSUL_URL}
+ ${session}= Create Session dcae ${GLOBAL_DCAE_CONSUL_URL}
+ ${uuid}= Generate UUID
+ ${headers}= Create Dictionary Accept=application/json Content-Type=application/json X-Consul-Token=abcd1234 X-TransactionId=${GLOBAL_APPLICATION_ID}-${uuid} X-FromAppId=${GLOBAL_APPLICATION_ID}
+ ${resp}= Get Request dcae /v1/catalog/nodes headers=${headers}
+ Log Received response from dcae consul: ${resp.json()}
+ Should Be Equal As Strings ${resp.status_code} 200
+ ${NodeList}= Get Json Value List ${resp.text} Node
+ ${NodeListLength}= Get Length ${NodeList}
+ ${len}= Get Length ${NodeList}
+ Should Not Be Equal As Integers ${len} 0
+ [return] ${NodeList}
+
+
+DCAE Node Health Check
+ [Documentation] Perform DCAE Node Health Check
+ [Arguments] ${NodeName}
+ ${session}= Create Session dcae-${NodeName} ${GLOBAL_DCAE_CONSUL_URL}
+ ${uuid}= Generate UUID
+ ${headers}= Create Dictionary Accept=application/json Content-Type=application/json X-Consul-Token=abcd1234 X-TransactionId=${GLOBAL_APPLICATION_ID}-${uuid} X-FromAppId=${GLOBAL_APPLICATION_ID}
+ ${hcpath}= Catenate SEPARATOR= /v1/health/node/ ${NodeName}
+ ${resp}= Get Request dcae-${NodeName} ${hcpath} headers=${headers}
+ Log Received response from dcae consul: ${resp.json()}
+ Should Be Equal As Strings ${resp.status_code} 200
+ ${StatusList}= Get Json Value List ${resp.text} Status
+ ${len}= Get Length ${StatusList}
+ Should Not Be Equal As Integers ${len} 0
+ DCAE Check Health Status ${NodeName} ${StatusList[0]} Serf Health Status
+ #Run Keyword if ${len} > 1 DCAE Check Health Status ${NodeName} ${StatusList[1]} Serf Health Status
+
+
+DCAE Check Health Status
+ [Arguments] ${NodeName} ${ItemStatus} ${CheckType}
+ Should Be Equal As Strings ${ItemStatus} passing
+ Log Node: ${NodeName} ${CheckType} check pass ok
+
+VES Collector Suite Setup DMaaP
+ [Documentation] Start DMaaP Mockup Server
+ ${ret}= Setup DMaaP Server
+ Should Be Equal As Strings ${ret} true
+
+VES Collector Suite Shutdown DMaaP
+ [Documentation] Shutdown DMaaP Mockup Server
+ ${ret}= Shutdown DMaap
+ Should Be Equal As Strings ${ret} true
+
+
+
+
+Check DCAE Results
+ [Documentation] Parse DCAE JSON response and make sure all rows have healthTestStatus=GREEN
+ [Arguments] ${json}
+ @{rows}= Get From Dictionary ${json['returns']} rows
+ @{headers}= Get From Dictionary ${json['returns']} columns
+
+ # Retrieve column names from headers
+ ${columns}= Create List
+ :for ${header} in @{headers}
+ \ ${colName}= Get From Dictionary ${header} colName
+ \ Append To List ${columns} ${colName}
+
+ # Process each row making sure status=GREEN
+ :for ${row} in @{rows}
+ \ ${cells}= Get From Dictionary ${row} cells
+ \ ${dict}= Make A Dictionary ${cells} ${columns}
+ \ Dictionary Should Contain Item ${dict} healthTestStatus GREEN
+
+
+Make A Dictionary
+ [Documentation] Given a list of column names and a list of dictionaries, map columname=value
+ [Arguments] ${columns} ${names} ${valuename}=value
+ ${dict}= Create Dictionary
+ ${collength}= Get Length ${columns}
+ ${namelength}= Get Length ${names}
+ :for ${index} in range 0 ${collength}
+ \ ${name}= Evaluate ${names}[${index}]
+ \ ${valued}= Evaluate ${columns}[${index}]
+ \ ${value}= Get From Dictionary ${valued} ${valueName}
+ \ Set To Dictionary ${dict} ${name} ${value}
+ [Return] ${dict}
+
+
+Get Event Data From File
+ [Arguments] ${jsonfile}
+ ${data}= OperatingSystem.Get File ${jsonfile}
+ #Should Not Be_Equal ${data} None
+ [return] ${data}
+
+Json String To Dictionary
+ [Arguments] ${json_string}
+ ${json_dict}= evaluate json.loads('''${json_string}''') json
+ [return] ${json_dict}
+
+Dictionary To Json String
+ [Arguments] ${json_dict}
+ ${json_string}= evaluate json.dumps(${json_dict}) json
+ [return] ${json_string}
+
+
+Get DCAE Service Component Status
+ [Documentation] Get the status of a DCAE Service Component
+ [Arguments] ${url} ${urlpath} ${usr} ${passwd}
+ ${auth}= Create List ${usr} ${passwd}
+ ${session}= Create Session dcae-service-component ${url} auth=${auth}
+ ${resp}= Get Request dcae-service-component ${urlpath}
+ [return] ${resp}
+
+Publish Event To VES Collector No Auth
+ [Documentation] Send an event to VES Collector
+ [Arguments] ${url} ${evtpath} ${httpheaders} ${evtdata}
+ Log Creating session ${url}
+ ${session}= Create Session dcae-d1 ${url}
+ ${resp}= Post Request dcae-d1 ${evtpath} data=${evtdata} headers=${httpheaders}
+ #Log Received response from dcae ${resp.json()}
+ [return] ${resp}
+
+Publish Event To VES Collector
+ [Documentation] Send an event to VES Collector
+ [Arguments] ${url} ${evtpath} ${httpheaders} ${evtdata} ${user} ${pd}
+ ${auth}= Create List ${user} ${pd}
+ Log Creating session ${url}
+ ${session}= Create Session dcae-d1 ${url} auth=${auth}
+ ${resp}= Post Request dcae-d1 ${evtpath} data=${evtdata} headers=${httpheaders}
+ #Log Received response from dcae ${resp.json()}
+ [return] ${resp}
+
+Publish Event To VES Collector With Put Method
+ [Documentation] Send an event to VES Collector
+ [Arguments] ${url} ${evtpath} ${httpheaders} ${evtdata} ${user} ${pd}
+ ${auth}= Create List ${user} ${pd}
+ Log Creating session ${url}
+ ${session}= Create Session dcae-d1 ${url} auth=${auth}
+ ${resp}= Put Request dcae-d1 ${evtpath} data=${evtdata} headers=${httpheaders}
+ #Log Received response from dcae ${resp.json()}
+ [return] ${resp}
+
+Publish Event To VES Collector With Put Method No Auth
+ [Documentation] Send an event to VES Collector
+ [Arguments] ${url} ${evtpath} ${httpheaders} ${evtdata}
+ Log Creating session ${url}
+ ${session}= Create Session dcae-d1 ${url}
+ ${resp}= Put Request dcae-d1 ${evtpath} data=${evtdata} headers=${httpheaders}
+ #Log Received response from dcae ${resp.json()}
+ [return] ${resp}
+
\ No newline at end of file diff --git a/test/csit/tests/dcae/testcases/resources/dcae_properties.robot b/test/csit/tests/dcae/testcases/resources/dcae_properties.robot new file mode 100644 index 000000000..be182d4aa --- /dev/null +++ b/test/csit/tests/dcae/testcases/resources/dcae_properties.robot @@ -0,0 +1,13 @@ +Documentation store all properties that can change or are used in multiple places here
+... format is all caps with underscores between words and prepended with GLOBAL
+... make sure you prepend them with GLOBAL so that other files can easily see it is from this file.
+
+
+
+*** Variables ***
+${GLOBAL_APPLICATION_ID} robot-dcae
+${GLOBAL_DCAE_CONSUL_URL} http://135.205.228.129:8500
+${GLOBAL_DCAE_CONSUL_URL1} http://135.205.228.170:8500
+${GLOBAL_DCAE_VES_URL} http://localhost:8443/eventlistener/v5
+${GLOBAL_DCAE_USERNAME} console
+${GLOBAL_DCAE_PASSWORD} ZjJkYjllMjljMTI2M2Iz
diff --git a/test/csit/tests/dcae/testcases/resources/index.htm b/test/csit/tests/dcae/testcases/resources/index.htm new file mode 100644 index 000000000..5ab2f8a43 --- /dev/null +++ b/test/csit/tests/dcae/testcases/resources/index.htm @@ -0,0 +1 @@ +Hello
\ No newline at end of file diff --git a/test/csit/tests/dmaap/mrpubsub/mrpubsub.robot b/test/csit/tests/dmaap/mrpubsub/mrpubsub.robot new file mode 100755 index 000000000..c711bd754 --- /dev/null +++ b/test/csit/tests/dmaap/mrpubsub/mrpubsub.robot @@ -0,0 +1,73 @@ +*** Settings *** +Library OperatingSystem +Library RequestsLibrary +Library requests +Library Collections +Library String + +*** Variables *** +${TARGETURL_PUBLISH} http://${DMAAP_MR_IP}:3904/events/TestTopic1 +${TARGETURL_TOPICS} http://${DMAAP_MR_IP}:3904/topics +${TARGETURL_SUBSCR} http://${DMAAP_MR_IP}:3904/events/TestTopic1/CG1/C1?timeout=1000 +${TEST_DATA} {"topicName": "TestTopic1"} +${TOPIC_DATA} {"topicName":"FirstTopic","topicDescription":"This is a TestTopic","partitionCount":"1","replicationCount":"3","transactionEnabled":"true"} + +*** Test Cases *** +Run Topic Creation and Publish + [Documentation] Topic Creation + [Timeout] 1 minute + ${resp}= PostCall ${TARGETURL_PUBLISH} ${TEST_DATA} + log ${TARGETURL_PUBLISH} + log ${resp.text} + Should Be Equal As Strings ${resp.status_code} 200 + ${count}= Evaluate $resp.json().get('count') + log 'JSON Response Code:'${resp} + +Run Subscribing a message status + [Documentation] Subscribide message status + [Timeout] 1 minute + ${resp}= GetCall ${TARGETURL_SUBSCR} + log ${TARGETURL_SUBSCR} + Should Be Equal As Strings ${resp.status_code} 200 + log 'JSON Response Code :'${resp} + +Run check topics are exisiting + [Documentation] Get the count of the Topics + [Timeout] 1 minute + ${resp}= GetCall ${TARGETURL_TOPICS} + log ${TARGETURL_TOPICS} + Should Be Equal As Strings ${resp.status_code} 200 + log 'JSON Response Code :'${resp} + ${topics}= Evaluate $resp.json().get('topics') + log ${topics} + ${ListLength}= Get Length ${topics} + log ${ListLength} + List Should Contain Value ${topics} TestTopic1 + +Run Publich and Subscribe a message + [Documentation] Publish and Subscribe the message + [Timeout] 1 minute + ${resp}= PostCall ${TARGETURL_PUBLISH} ${TEST_DATA} + log ${TARGETURL_PUBLISH} + log ${resp.text} + Should Be Equal As Strings ${resp.status_code} 200 + ${sub_resp}= GetCall ${TARGETURL_SUBSCR} + log ${TARGETURL_SUBSCR} + Should Be Equal As Strings ${sub_resp.status_code} 200 + log 'JSON Response Code :'${sub_resp} + ${ListLength}= Get Length ${sub_resp.json()} + log ${ListLength} + List Should Contain Value ${sub_resp.json()} {"topicName":"TestTopic1"} case_insensitive=yes + +*** Keywords *** +PostCall + [Arguments] ${url} ${data} + ${headers}= Create Dictionary Accept=application/json Content-Type=application/json + ${resp}= Evaluate requests.post('${url}',data='${data}', headers=${headers},verify=False) requests + [Return] ${resp} + +GetCall + [Arguments] ${url} + ${headers}= Create Dictionary Accept=application/json Content-Type=application/json + ${resp}= Evaluate requests.get('${url}', headers=${headers}, verify=False) requests + [Return] ${resp} diff --git a/test/csit/tests/holmes/testcase/CommonKeywords/HttpRequest.robot b/test/csit/tests/holmes/testcase/CommonKeywords/HttpRequest.robot index 5b8417404..45bec5ef5 100644 --- a/test/csit/tests/holmes/testcase/CommonKeywords/HttpRequest.robot +++ b/test/csit/tests/holmes/testcase/CommonKeywords/HttpRequest.robot @@ -30,8 +30,8 @@ httpPost [Return] ${postResponse} httpDelete - [Arguments] ${restHost} ${restUrl} ${data} + [Arguments] ${restHost} ${restUrl} ${headers} create dictionary Content-Type=application/json Accept=application/json create session microservices ${restHost} ${headers} - ${deleteResponse} delete request microservices ${restUrl} ${data} + ${deleteResponse} delete request microservices ${restUrl} [Return] ${deleteResponse} diff --git a/test/csit/tests/holmes/testcase/RuleMgt/Rule-Keywords.robot b/test/csit/tests/holmes/testcase/RuleMgt/Rule-Keywords.robot index 03f840904..182737f54 100644 --- a/test/csit/tests/holmes/testcase/RuleMgt/Rule-Keywords.robot +++ b/test/csit/tests/holmes/testcase/RuleMgt/Rule-Keywords.robot @@ -72,8 +72,8 @@ modifyRule [Return] ${response} deleteRule - [Arguments] ${jsonParam} ${codeFlag}=1 - ${response} httpDelete ${ruleMgtHost} ${ruleMgtUrl} ${jsonParam} + [Arguments] ${ruleId} ${codeFlag}=1 + ${response} httpDelete ${ruleMgtHost} ${ruleMgtUrl}/${ruleId} log ${response.content} run keyword if ${codeFlag}==1 Should be equal as strings ${response.status_code} 200 run keyword if ${codeFlag}!=1 Should be equal as strings ${response.status_code} 499 diff --git a/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot b/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot index 03ee70849..ad2a540fd 100644 --- a/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot +++ b/test/csit/tests/holmes/testcase/RuleMgt/Rule-Mgt.robot @@ -120,8 +120,8 @@ modify_rule_with_description delete_existing_rule [Documentation] Delete an existing rule. should not be empty ${RULEID} - deleteRule {"ruleid":"${RULEID}"} + deleteRule ${RULEID} delete_non_existing_rule [Documentation] Delete a non-existing rule. - deleteRule {"ruleid":"${RULEID}"} -1 + deleteRule ${RULEID} -1 diff --git a/test/csit/tests/sdc/healthCheck/__init__.robot b/test/csit/tests/sdc/healthCheck/__init__.robot new file mode 100644 index 000000000..8ee10d5f6 --- /dev/null +++ b/test/csit/tests/sdc/healthCheck/__init__.robot @@ -0,0 +1,2 @@ +*** Settings *** +Documentation Sdc - HealthCheck diff --git a/test/csit/tests/sdc/healthCheck/test1.robot b/test/csit/tests/sdc/healthCheck/test1.robot new file mode 100644 index 000000000..6d4dc242d --- /dev/null +++ b/test/csit/tests/sdc/healthCheck/test1.robot @@ -0,0 +1,16 @@ +*** Settings *** +Library Collections +Library OperatingSystem +Library RequestsLibrary +Library json + +*** Test Cases *** +Get Requests health check ok + [Tags] get + CreateSession sdc-be http://localhost:8080 + ${headers}= Create Dictionary Accept=application/json Content-Type=application/json + ${resp}= Get Request sdc-be /sdc2/rest/healthCheck headers=&{headers} + Should Be Equal As Strings ${resp.status_code} 200 + @{ITEMS}= Copy List ${resp.json()['componentsInfo']} + : FOR ${ELEMENT} IN @{ITEMS} + \ Log ${ELEMENT['healthCheckComponent']} ${ELEMENT['healthCheckStatus']} diff --git a/test/csit/tests/sdnc/healthcheck/__init__.robot b/test/csit/tests/sdnc/healthcheck/__init__.robot new file mode 100644 index 000000000..8dac1b6ac --- /dev/null +++ b/test/csit/tests/sdnc/healthcheck/__init__.robot @@ -0,0 +1,2 @@ +*** Settings *** +Documentation SDNC - healthcheck diff --git a/test/csit/tests/sdnc/healthcheck/test1.robot b/test/csit/tests/sdnc/healthcheck/test1.robot new file mode 100644 index 000000000..1adb9a6b3 --- /dev/null +++ b/test/csit/tests/sdnc/healthcheck/test1.robot @@ -0,0 +1,16 @@ +*** Settings *** +Library OperatingSystem +Library Process + +*** Variables *** + +${health_check} ${SCRIPTS}/health_check.sh + + +*** Test Cases *** +Health check test case for SDNC + [Documentation] Health check + ${result_hc}= Run Process bash ${health_check} > log_hc.txt shell=yes + Should Be Equal As Integers ${result_hc.rc} 0 + + diff --git a/test/csit/tests/so/sanity-check/sanity_test_so.robot b/test/csit/tests/so/sanity-check/sanity_test_so.robot index 2e05c50f1..73a9f3f1f 100644 --- a/test/csit/tests/so/sanity-check/sanity_test_so.robot +++ b/test/csit/tests/so/sanity-check/sanity_test_so.robot @@ -13,59 +13,59 @@ Create ServiceInstance for invalid input Create Session refrepo http://${REPO_IP}:8080 ${data}= Get Binary File ${CURDIR}${/}data${/}createService.json &{headers}= Create Dictionary Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA== Content-Type=application/json Accept=application/json - ${resp}= Post Request refrepo /ecomp/mso/infra/serviceInstances/v2 data=${data} headers=${headers} + ${resp}= Post Request refrepo /ecomp/mso/infra/serviceInstances/v3 data=${data} headers=${headers} Run Keyword If '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405' log to console \nexecuted with expected result Create ServiceInstance for invalid user Create Session refrepo http://${REPO_IP}:8080 ${data}= Get Binary File ${CURDIR}${/}data${/}createService.json &{headers}= Create Dictionary Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQxOnBhc3N3b3JkMTI= Content-Type=application/json Accept=application/json - ${resp}= Post Request refrepo /ecomp/mso/infra/serviceInstances/v2 data=${data} headers=${headers} + ${resp}= Post Request refrepo /ecomp/mso/infra/serviceInstances/v3 data=${data} headers=${headers} Run Keyword If '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405' log to console \nexecuted with expected result Delete ServiceInstance for invalid input Create Session refrepo http://${REPO_IP}:8080 ${data}= Get Binary File ${CURDIR}${/}data${/}deleteService.json &{headers}= Create Dictionary Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA== Content-Type=application/json Accept=application/json - ${resp}= Delete Request refrepo /ecomp/mso/infra/serviceInstances/v2/ff305d54-75b4-431b-adb2-eb6b9e5ff000 data=${data} headers=${headers} + ${resp}= Delete Request refrepo /ecomp/mso/infra/serviceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000 data=${data} headers=${headers} Run Keyword If '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405' log to console \nexecuted with expected result Delete ServiceInstance for invalid user Create Session refrepo http://${REPO_IP}:8080 ${data}= Get Binary File ${CURDIR}${/}data${/}deleteService.json &{headers}= Create Dictionary Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQxOnBhc3N3b3JkMTI== Content-Type=application/json Accept=application/json - ${resp}= Delete Request refrepo /ecomp/mso/infra/serviceInstances/v2/ff305d54-75b4-431b-adb2-eb6b9e5ff000 data=${data} headers=${headers} + ${resp}= Delete Request refrepo /ecomp/mso/infra/serviceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000 data=${data} headers=${headers} Run Keyword If '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405' log to console \nexecuted with expected result SO ServiceInstance health check Create Session refrepo http://${REPO_IP}:8080 &{headers}= Create Dictionary Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA== Content-Type=application/json Accept=application/json - ${resp}= Get Request refrepo /ecomp/mso/infra/orchestrationRequests/v2/rq1234d1-5a33-55df-13ab-12abad84e333 headers=${headers} + ${resp}= Get Request refrepo /ecomp/mso/infra/orchestrationRequests/v3/rq1234d1-5a33-55df-13ab-12abad84e333 headers=${headers} Should Not Contain ${resp.content} null Create VnfInstance for invalid input Create Session refrepo http://${REPO_IP}:8080 ${data}= Get Binary File ${CURDIR}${/}data${/}createVnf.json &{headers}= Create Dictionary Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA== Content-Type=application/json Accept=application/json - ${resp}= Post Request refrepo /ecomp/mso/infra/serviceInstances/v2/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs data=${data} headers=${headers} + ${resp}= Post Request refrepo /ecomp/mso/infra/serviceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs data=${data} headers=${headers} Run Keyword If '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405' log to console \nexecuted with expected result Create VnfInstance for invalid credential Create Session refrepo http://${REPO_IP}:8080 ${data}= Get Binary File ${CURDIR}${/}data${/}createVnf.json &{headers}= Create Dictionary Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQxOnBhc3N3b3JkMTI= Content-Type=application/json Accept=application/json - ${resp}= Post Request refrepo /ecomp/mso/infra/serviceInstances/v2/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs data=${data} headers=${headers} + ${resp}= Post Request refrepo /ecomp/mso/infra/serviceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs data=${data} headers=${headers} Run Keyword If '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405' log to console \nexecuted with expected result Delete VnfInstance for invalid input Create Session refrepo http://${REPO_IP}:8080 ${data}= Get Binary File ${CURDIR}${/}data${/}deleteVnf.json &{headers}= Create Dictionary Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA== Content-Type=application/json Accept=application/json - ${resp}= Delete Request refrepo /ecomp/mso/infra/serviceInstances/v2/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs/aca51b0a-710d-4155-bc7c-7cef19d9a94e data=${data} headers=${headers} + ${resp}= Delete Request refrepo /ecomp/mso/infra/serviceInstances/v3/ff305d54-75b4-431b-adb2-eb6b9e5ff000/vnfs/aca51b0a-710d-4155-bc7c-7cef19d9a94e data=${data} headers=${headers} Run Keyword If '${resp.status_code}' == '400' or '${resp.status_code}' == '404' or '${resp.status_code}' == '405' log to console \nexecuted with expected result Get Orchestration Requests Create Session refrepo http://${REPO_IP}:8080 &{headers}= Create Dictionary Authorization=Basic SW5mcmFQb3J0YWxDbGllbnQ6cGFzc3dvcmQxJA== Content-Type=application/json Accept=application/json - ${resp}= Get Request refrepo /ecomp/mso/infra/orchestrationRequests/v2 headers=${headers} + ${resp}= Get Request refrepo /ecomp/mso/infra/orchestrationRequests/v3 headers=${headers} Should Not Contain ${resp.content} null
\ No newline at end of file diff --git a/test/csit/tests/vfc/nfvo-driver-svnfm/huawei.robot b/test/csit/tests/vfc/nfvo-driver-svnfm/huawei.robot index 3277e7782..e0679fbd8 100644 --- a/test/csit/tests/vfc/nfvo-driver-svnfm/huawei.robot +++ b/test/csit/tests/vfc/nfvo-driver-svnfm/huawei.robot @@ -1,4 +1,5 @@ *** settings *** +Resource ../../common.robot Library Collections Library RequestsLibrary Library simplejson @@ -7,10 +8,32 @@ Library json Library HttpLibrary.HTTP *** Variables *** -@{return_ok_list}= 200 201 202 +@{return_ok_list}= 200 201 202 204 ${queryswagger_url} /api/hwvnfm/v1/swagger.json +${createauthtoken_url} /rest/plat/smapp/v1/oauth/token + +#json files +${hwvnfm_createtoken_json} ${SCRIPTS}/../tests/vfc/nfvo-driver-svnfm/jsoninput/hwvnfm_createtoken.json *** Test Cases *** SwaggerFuncTest [Documentation] query swagger info rest test - Should Be Equal 2.0 2.0 + ${headers} Create Dictionary Content-Type=application/json Accept=application/json + Create Session web_session http://${SERVICE_IP}:8482 headers=${headers} + ${resp}= Get Request web_session ${queryswagger_url} + ${responese_code}= Convert To String ${resp.status_code} + List Should Contain Value ${return_ok_list} ${responese_code} + ${response_json} json.loads ${resp.content} + ${swagger_version}= Convert To String ${response_json['swagger']} + Should Be Equal ${swagger_version} 2.0 + +AuthTokenFuncTest + [Documentation] create auth token rest test + ${json_value}= json_from_file ${hwvnfm_createtoken_json} + ${json_string}= string_from_json ${json_value} + ${headers} Create Dictionary Content-Type=application/json Accept=application/json + Create Session web_session http://${SERVICE_IP}:8482 headers=${headers} + Set Request Body ${json_string} + ${resp}= Put Request web_session ${createauthtoken_url} ${json_string} + ${responese_code}= Convert To String ${resp.status_code} + List Should Contain Value ${return_ok_list} ${responese_code}
\ No newline at end of file diff --git a/test/csit/tests/vfc/nfvo-driver-svnfm/jsoninput/hwvnfm_createtoken.json b/test/csit/tests/vfc/nfvo-driver-svnfm/jsoninput/hwvnfm_createtoken.json new file mode 100644 index 000000000..e9a6c3e92 --- /dev/null +++ b/test/csit/tests/vfc/nfvo-driver-svnfm/jsoninput/hwvnfm_createtoken.json @@ -0,0 +1,13 @@ +{
+ "auth": {
+ "identity": {
+ "methods": ["password"],
+ "password": {
+ "user": {
+ "name": "admin",
+ "password": "User@12345"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file diff --git a/test/csit/tests/vnfsdk-marketplace/provision/enterprise2DC.csar b/test/csit/tests/vnfsdk-marketplace/provision/enterprise2DC.csar Binary files differindex 29e08c162..f27947955 100644 --- a/test/csit/tests/vnfsdk-marketplace/provision/enterprise2DC.csar +++ b/test/csit/tests/vnfsdk-marketplace/provision/enterprise2DC.csar diff --git a/test/csit/tests/vnfsdk-marketplace/provision/sanity_test_vnfsdktestfunction.robot b/test/csit/tests/vnfsdk-marketplace/provision/sanity_test_vnfsdktestfunction.robot index b06d5b544..c57642a2e 100644 --- a/test/csit/tests/vnfsdk-marketplace/provision/sanity_test_vnfsdktestfunction.robot +++ b/test/csit/tests/vnfsdk-marketplace/provision/sanity_test_vnfsdktestfunction.robot @@ -25,6 +25,15 @@ Get VNF Package Information from Repository Create Session refrepo http://${REPO_IP}:8702 &{headers}= Create Dictionary Content-Type=application/json ${resp}= Get Request refrepo /openoapi/vnfsdk-marketplace/v1/PackageResource/csars/${csarId} headers=${headers} + ${response_json} json.loads ${resp.content} + ${downloadUri}= Convert To String ${response_json['downloadUri']} + Should Contain ${downloadUri} ${csarId} + Should Be Equal As Strings ${resp.status_code} 200 + +Get List Of Requests + Create Session refrepo http://${REPO_IP}:8702 + &{headers}= Create Dictionary Content-Type=application/json + ${resp}= Get Request refrepo /openoapi/vnfsdk-marketplace/v1/PackageResource/csars?name=enterprise2DC&version=1.0&type=SSAR&provider=huawei headers=${headers} Should Be Equal As Strings ${resp.status_code} 200 Download VNF Package from Repository @@ -32,9 +41,15 @@ Download VNF Package from Repository &{headers}= Create Dictionary Content-Type=application/json ${resp}= Get Request refrepo /openoapi/vnfsdk-marketplace/v1/PackageResource/csars/${csarId}/files headers=${headers} Should Be Equal As Strings ${resp.status_code} 200 + ${downloadUri}= Convert To String ${resp.content} + ${downloadUri1}= Run curl http://${REPO_IP}:8702/openoapi/vnfsdk-marketplace/v1/PackageResource/csars/${csarId}/files + ${string}= Convert To String ${downloadUri1} + Should Contain ${downloadUri1} ' % Total % Received % Xferd Average + Should Contain ${string} ' % Total % Received % Xferd Average Delete VNF Package from Repository Create Session refrepo http://${REPO_IP}:8702 &{headers}= Create Dictionary Content-Type=application/json ${resp}= Delete Request refrepo /openoapi/vnfsdk-marketplace/v1/PackageResource/csars/${csarId} headers=${headers} Should Be Equal As Strings ${resp.status_code} 200 + |