diff options
54 files changed, 862 insertions, 236 deletions
diff --git a/deployment/heat/onap-oom/env/windriver/onap-oom-light.env b/deployment/heat/onap-oom/env/windriver/onap-oom-light.env new file mode 100644 index 000000000..c37cf757c --- /dev/null +++ b/deployment/heat/onap-oom/env/windriver/onap-oom-light.env @@ -0,0 +1,81 @@ +parameters: + + ubuntu_1604_image: ubuntu-16-04-cloud-amd64 + + apt_proxy: 10.12.5.2:3142 + docker_proxy: 10.12.5.2:5000 + + rancher_vm_flavor: m1.large + # use a smaller image for k8 hosts + k8s_vm_flavor: m1.xlarge + + public_net_id: 971040b2-7059-49dc-b220-4fab50cb2ad4 + + oam_network_cidr: 10.0.0.0/16 + + integration_override_yaml: > + global: + repository: __docker_proxy__ + pullPolicy: IfNotPresent + robot: + openStackKeyStoneUrl: "http://10.12.25.2:5000" + openStackPublicNetId: "__public_net_id__" + openStackPassword: "${OS_PASSWORD}" + openStackTenantId: "${OS_PROJECT_ID}" + openStackUserName: "${OS_USERNAME}" + ubuntu14Image: "ubuntu-14-04-cloud-amd64" + ubuntu16Image: "ubuntu-16-04-cloud-amd64" + openStackPrivateNetId: "__oam_network_id__" + openStackPrivateSubnetId: "__oam_subnet_id__" + openStackPrivateNetCidr: "__oam_network_cidr__" + openStackOamNetworkCidrPrefix: "10.0" + dcaeCollectorIp: "__k8s_1_vm_ip__" + vnfPubKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKXDgoo3+WOqcUG8/5uUbk81+yczgwC4Y8ywTmuQqbNxlY1oQ0YxdMUqUnhitSXs5S/yRuAVOYHwGg2mCs20oAINrP+mxBI544AMIb9itPjCtgqtE2EWo6MmnFGbHB4Sx3XioE7F4VPsh7japsIwzOjbrQe+Mua1TGQ5d4nfEOQaaglXLLPFfuc7WbhbJbK6Q7rHqZfRcOwAMXgDoBqlyqKeiKwnumddo2RyNT8ljYmvB6buz7KnMinzo7qB0uktVT05FH9Rg0CTWH5norlG5qXgP2aukL0gk1ph8iAt7uYLf1ktp+LJI2gaF6L0/qli9EmVCSLr1uJ38Q8CBflhkh" + demoArtifactsVersion: "1.2.2" + scriptVersion: "1.2.1" + rancherIpAddress: "__rancher_ip_addr__" + so: + config: + openStackUserName: "${OS_USERNAME}" + openStackKeyStoneUrl: "http://10.12.25.2:5000" + openStackEncryptedPasswordHere: "${OS_PASSWORD_ENCRYPTED}" + appc: + replicaCount: 1 + config: + enableClustering: false + sdnc: + replicaCount: 1 + config: + enableClustering: false + clamp: + enabled: false + pomba: + enabled: false + cli: + enabled: false + consul: + enabled: false + dcaegen2: + enabled: false + esr: + enabled: false + log: + enabled: false + mock: + enabled: false + msb: + enabled: false + multicloud: + enabled: false + nbi: + enabled: false + oof: + enabled: false + policy: + enabled: false + uui: + enabled: false + vfc: + enabled: false + vnfsdk: + enabled: false diff --git a/deployment/heat/onap-oom/onap-oom.yaml b/deployment/heat/onap-oom/onap-oom.yaml index 227e884d7..e8b0ffa7d 100644 --- a/deployment/heat/onap-oom/onap-oom.yaml +++ b/deployment/heat/onap-oom/onap-oom.yaml @@ -138,6 +138,8 @@ resources: router: type: OS::Neutron::Router properties: + name: + list_join: ['-', [{ get_param: 'OS::stack_name' }, 'router']] external_gateway_info: network: { get_param: public_net_id } @@ -164,7 +166,8 @@ resources: rancher_vm: type: OS::Nova::Server properties: - name: rancher + name: + list_join: ['-', [{ get_param: 'OS::stack_name' }, 'rancher']] image: { get_param: ubuntu_1604_image } flavor: { get_param: rancher_vm_flavor } key_name: { get_param: key_name } @@ -201,12 +204,14 @@ resources: get_attr: [k8s_2_floating_ip, floating_ip_address], get_attr: [k8s_3_floating_ip, floating_ip_address], get_attr: [k8s_4_floating_ip, floating_ip_address], + get_attr: [k8s_5_floating_ip, floating_ip_address], ] __k8s_private_ips__: [ get_attr: [k8s_1_floating_ip, fixed_ip_address], get_attr: [k8s_2_floating_ip, fixed_ip_address], get_attr: [k8s_3_floating_ip, fixed_ip_address], get_attr: [k8s_4_floating_ip, fixed_ip_address], + get_attr: [k8s_5_floating_ip, fixed_ip_address], ] k8s_1_private_port: type: OS::Neutron::Port @@ -225,7 +230,8 @@ resources: k8s_1_vm: type: OS::Nova::Server properties: - name: k8s_1 + name: + list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s_1']] image: { get_param: ubuntu_1604_image } flavor: { get_param: k8s_vm_flavor } key_name: { get_param: key_name } @@ -260,7 +266,8 @@ resources: k8s_2_vm: type: OS::Nova::Server properties: - name: k8s_2 + name: + list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s_2']] image: { get_param: ubuntu_1604_image } flavor: { get_param: k8s_vm_flavor } key_name: { get_param: key_name } @@ -295,7 +302,8 @@ resources: k8s_3_vm: type: OS::Nova::Server properties: - name: k8s_3 + name: + list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s_3']] image: { get_param: ubuntu_1604_image } flavor: { get_param: k8s_vm_flavor } key_name: { get_param: key_name } @@ -330,7 +338,8 @@ resources: k8s_4_vm: type: OS::Nova::Server properties: - name: k8s_4 + name: + list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s_4']] image: { get_param: ubuntu_1604_image } flavor: { get_param: k8s_vm_flavor } key_name: { get_param: key_name } @@ -348,6 +357,42 @@ resources: template: get_file: k8s_vm_entrypoint.sh + k8s_5_private_port: + type: OS::Neutron::Port + properties: + network: { get_resource: oam_network } + fixed_ips: [{"subnet": { get_resource: oam_subnet }}] + security_groups: + - { get_resource: onap_sg } + + k8s_5_floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network_id: { get_param: public_net_id } + port_id: { get_resource: k8s_5_private_port } + + k8s_5_vm: + type: OS::Nova::Server + properties: + name: + list_join: ['-', [ { get_param: 'OS::stack_name' }, 'k8s_5']] + image: { get_param: ubuntu_1604_image } + flavor: { get_param: k8s_vm_flavor } + key_name: { get_param: key_name } + networks: + - port: { get_resource: k8s_5_private_port } + user_data_format: RAW + user_data: + str_replace: + params: + __docker_proxy__: { get_param: docker_proxy } + __apt_proxy__: { get_param: apt_proxy } + __docker_version__: { get_param: docker_version } + __rancher_ip_addr__: { get_attr: [rancher_floating_ip, floating_ip_address] } + __rancher_private_ip_addr__: { get_attr: [rancher_floating_ip, fixed_ip_address] } + template: + get_file: k8s_vm_entrypoint.sh + outputs: rancher_vm_ip: description: The IP address of the rancher instance @@ -369,3 +414,7 @@ outputs: description: The IP address of the k8s_4 instance value: { get_attr: [k8s_4_floating_ip, floating_ip_address] } + k8s_5_vm_ip: + description: The IP address of the k8s_5 instance + value: { get_attr: [k8s_5_floating_ip, floating_ip_address] } + diff --git a/deployment/heat/onap-oom/parts/onap-oom-1.yaml b/deployment/heat/onap-oom/parts/onap-oom-1.yaml index 8031505b7..ab79b1ed5 100644 --- a/deployment/heat/onap-oom/parts/onap-oom-1.yaml +++ b/deployment/heat/onap-oom/parts/onap-oom-1.yaml @@ -135,6 +135,8 @@ resources: router: type: OS::Neutron::Router properties: + name: + list_join: ['-', [{ get_param: 'OS::stack_name' }, 'router']] external_gateway_info: network: { get_param: public_net_id } diff --git a/deployment/heat/onap-oom/parts/onap-oom-2.yaml b/deployment/heat/onap-oom/parts/onap-oom-2.yaml index 463635b8a..e01ba132d 100644 --- a/deployment/heat/onap-oom/parts/onap-oom-2.yaml +++ b/deployment/heat/onap-oom/parts/onap-oom-2.yaml @@ -15,7 +15,8 @@ ${K8S_VM_NAME}_vm: type: OS::Nova::Server properties: - name: ${K8S_VM_NAME} + name: + list_join: ['-', [ { get_param: 'OS::stack_name' }, '${K8S_VM_NAME}']] image: { get_param: ubuntu_1604_image } flavor: { get_param: k8s_vm_flavor } key_name: { get_param: key_name } diff --git a/deployment/heat/onap-oom/scripts/deploy.sh b/deployment/heat/onap-oom/scripts/deploy.sh index c9cd005c7..e97c5a3f5 100755 --- a/deployment/heat/onap-oom/scripts/deploy.sh +++ b/deployment/heat/onap-oom/scripts/deploy.sh @@ -1,4 +1,4 @@ -#!/bin/bash -x +#!/bin/bash # # Copyright 2018 Huawei Technologies Co., Ltd. # @@ -9,18 +9,53 @@ # http://www.apache.org/licenses/LICENSE-2.0 # -install_name="onap-oom" +stack_name="oom" full_deletion=false if [ -z "$WORKSPACE" ]; then export WORKSPACE=`git rev-parse --show-toplevel` fi -usage() { echo "Usage: $0 [ -r ] <env-name>" 1>&2; exit 1; } +usage() { + echo "Usage: $0 [ -n <number of VMs {2-15}> ][ -s <stack name> ][ -m <manifest> ][ -r ][ -q ] <env>" 1>&2; + echo "n: Set the number of VM's that will be installed. This number must be between 2 and 15" 1>&2; + echo "s: Set the name to be used for stack. This name will be used for naming of resources" 1>&2; + echo "m: The docker manifest to apply; must be either \"docker-manifest-staging.csv\" or \"docker-manifest.csv\"." 1>&2; + echo "r: Delete all resources relating to ONAP within enviroment." 1>&2; + echo "q: Quiet Delete of all ONAP resources." 1>&2; -while getopts ":rq" o; do + exit 1; +} + + +while getopts ":n:s:m:rq" o; do case "${o}" in + n) + if [[ ${OPTARG} =~ ^[0-9]+$ ]];then + if [ ${OPTARG} -ge 2 -a ${OPTARG} -le 15 ]; then + vm_num=${OPTARG} + else + usage + fi + else + usage + fi + ;; + s) + if [[ ! ${OPTARG} =~ ^[0-9]+$ ]];then + stack_name=${OPTARG} + else + usage + fi + ;; + m) + if [ -f $WORKSPACE/version-manifest/src/main/resources/${OPTARG} ]; then + docker_manifest=${OPTARG} + else + usage + fi + ;; r) echo "The following command will delete all information relating to onap within your enviroment" read -p "Are you certain this is what you want? (type y to confirm):" answer @@ -56,6 +91,13 @@ fi ENV_FILE=$1 +if [ ! -f $ENV_FILE ];then + echo ENV file does not exist or was not given + exit 1 +fi + +set -x + SSH_KEY=~/.ssh/onap_key source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh @@ -65,23 +107,28 @@ export OS_PASSWORD_ENCRYPTED=$(echo -n "$OS_PASSWORD" | openssl aes-128-ecb -e - for n in $(seq 1 5); do if [ $full_deletion = true ] ; then - $WORKSPACE/test/ete/scripts/teardown-onap.sh -n $install_name -q + $WORKSPACE/test/ete/scripts/teardown-onap.sh -n $stack_name -q else - $WORKSPACE/test/ete/scripts/teardown-onap.sh -n $install_name + $WORKSPACE/test/ete/scripts/teardown-onap.sh -n $stack_name fi cd $WORKSPACE/deployment/heat/onap-oom envsubst < $ENV_FILE > $ENV_FILE~ + if [ -z "$vm_num" ]; then + cp onap-oom.yaml onap-oom.yaml~ + else + ./scripts/gen-onap-oom-yaml.sh $vm_num > onap-oom.yaml~ + fi - if ! openstack stack create -t ./$install_name.yaml -e $ENV_FILE~ $install_name; then + if ! openstack stack create -t ./onap-oom.yaml~ -e $ENV_FILE~ $stack_name --parameter docker_manifest=$docker_manifest; then break fi - while [ "CREATE_IN_PROGRESS" == "$(openstack stack show -c stack_status -f value $install_name)" ]; do + while [ "CREATE_IN_PROGRESS" == "$(openstack stack show -c stack_status -f value $stack_name)" ]; do sleep 20 done - STATUS=$(openstack stack show -c stack_status -f value $install_name) + STATUS=$(openstack stack show -c stack_status -f value $stack_name) echo $STATUS if [ "CREATE_COMPLETE" != "$STATUS" ]; then break @@ -89,8 +136,8 @@ for n in $(seq 1 5); do for i in $(seq 1 30); do sleep 30 - RANCHER_IP=$(openstack stack output show $install_name rancher_vm_ip -c output_value -f value) - K8S_IP=$(openstack stack output show $install_name k8s_1_vm_ip -c output_value -f value) + RANCHER_IP=$(openstack stack output show $stack_name rancher_vm_ip -c output_value -f value) + K8S_IP=$(openstack stack output show $stack_name k8s_1_vm_ip -c output_value -f value) timeout 1 ping -c 1 "$RANCHER_IP" && break done diff --git a/deployment/heat/onap-oom/scripts/gen-onap-oom-yaml.sh b/deployment/heat/onap-oom/scripts/gen-onap-oom-yaml.sh index 092b2a1fc..41c5de16a 100755 --- a/deployment/heat/onap-oom/scripts/gen-onap-oom-yaml.sh +++ b/deployment/heat/onap-oom/scripts/gen-onap-oom-yaml.sh @@ -16,7 +16,6 @@ if [ "$#" -ne 1 ]; then fi NUM_K8S_VMS=$1 - if [ -z "$WORKSPACE" ]; then export WORKSPACE=`git rev-parse --show-toplevel` fi @@ -34,7 +33,8 @@ cat <<EOF rancher_vm: type: OS::Nova::Server properties: - name: rancher + name: + list_join: ['-', [{ get_param: 'OS::stack_name' }, 'rancher']] image: { get_param: ubuntu_1604_image } flavor: { get_param: rancher_vm_flavor } key_name: { get_param: key_name } diff --git a/test/csit/plans/dcae-bulkpm/bulkpm-suite/setup.sh b/test/csit/plans/dcae-bulkpm/bulkpm-suite/setup.sh new file mode 100644 index 000000000..530d97da1 --- /dev/null +++ b/test/csit/plans/dcae-bulkpm/bulkpm-suite/setup.sh @@ -0,0 +1,86 @@ +#!/bin/bash +# Place the scripts in run order: +#Make sure python-uuid is installed + +# Place the scripts in run order: +source ${SCRIPTS}/dcae-bulkpm/xNFSimulator.sh + +# Place the scripts in run order: +source ${SCRIPTS}/common_functions.sh + +#get current host IP addres +HOST_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $NF}') + +VESC_IMAGE=nexus3.onap.org:10001/onap/org.onap.dcaegen2.collectors.ves.vescollector:1.3.1 +echo VESC_IMAGE=${VESC_IMAGE} + +# Start DCAE VES Collector +docker run -d -p 8080:8080/tcp -p 8443:8443/tcp -P --name vesc -e DMAAPHOST=${HOST_IP} ${VESC_IMAGE} + +# Clone DMaaP Message Router repo +mkdir -p $WORKSPACE/archives/dmaapmr +cd $WORKSPACE/archives/dmaapmr +#unset http_proxy https_proxy +git clone --depth 1 http://gerrit.onap.org/r/dmaap/messagerouter/messageservice -b master +git pull +cd $WORKSPACE/archives/dmaapmr/messageservice/src/main/resources/docker-compose +cp $WORKSPACE/archives/dmaapmr/messageservice/bundleconfig-local/etc/appprops/MsgRtrApi.properties /var/tmp/ + +# Update kafkfa and zookeeper properties in MsgRtrApi.propeties which will be copied to DMaaP Container +sed -i -e 's#nexus3.onap.org:10001/onap/dmaap/kafka01101:0.0.1#wurstmeister/kafka:1.1.0#' $WORKSPACE/archives/dmaapmr/messageservice/src/main/resources/docker-compose/docker-compose.yml + +# start DMaaP MR containers with docker compose and configuration from docker-compose.yml +docker login -u docker -p docker nexus3.onap.org:10001 +docker-compose up -d + +# Wait for initialization of Docker contaienr for DMaaP MR, Kafka and Zookeeper +for i in {1..50}; do +if [ $(docker inspect --format '{{ .State.Running }}' dockercompose_kafka_1) ] && \ +[ $(docker inspect --format '{{ .State.Running }}' dockercompose_zookeeper_1) ] && \ +[ $(docker inspect --format '{{ .State.Running }}' dockercompose_dmaap_1) ] +then + echo "DMaaP Service Running" + break +else + echo sleep $i + sleep $i +fi +done + +# Get IP address of DMAAP, KAFKA, Zookeeper +DMAAP_MR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_dmaap_1) +KAFKA_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_kafka_1) +ZOOKEEPER_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_zookeeper_1) + +echo DMAAP_MR_IP=${DMAAP_MR_IP} +echo KAFKA_IP=${KAFKA_IP} +echo ZOOKEEPER_IP=${ZOOKEEPER_IP} + +# Shutdown DMAAP Container +docker kill dockercompose_dmaap_1 + +# Initial docker-compose up and down is for populating kafka and zookeeper IPs in /var/tmp/MsgRtrApi.properites +sed -i -e '/config.zk.servers=/ s/=.*/='$ZOOKEEPER_IP'/' /var/tmp/MsgRtrApi.properties +sed -i -e '/kafka.metadata.broker.list=/ s/=.*/='$KAFKA_IP':9092/' /var/tmp/MsgRtrApi.properties + +# Start DMaaP MR containers with docker compose and configuration from docker-compose.yml +docker-compose build +docker login -u docker -p docker nexus3.onap.org:10001 +docker-compose up -d +sleep 5 + +# Get IP address of DMAAP, KAFKA, Zookeeper and VESC +DMAAP_MR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_dmaap_1) +KAFKA_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_kafka_1) +ZOOKEEPER_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockercompose_zookeeper_1) + +VESC_IP=`get-instance-ip.sh vesc` +export VESC_IP=${VESC_IP} +export HOST_IP=${HOST_IP} +export DMAAP_MR_IP=${DMAAP_MR_IP} + +ROBOT_VARIABLES="-v DMAAP_MR_IP:${DMAAP_MR_IP} -v VESC_IP:${VESC_IP}" + +pip install jsonschema uuid +# Wait container ready +sleep 2 diff --git a/test/csit/plans/dcae-bulkpm/bulkpm-suite/teardown.sh b/test/csit/plans/dcae-bulkpm/bulkpm-suite/teardown.sh new file mode 100644 index 000000000..85428dad9 --- /dev/null +++ b/test/csit/plans/dcae-bulkpm/bulkpm-suite/teardown.sh @@ -0,0 +1,8 @@ +#!/bin/bash +echo "Starting teardown script" +kill-instance.sh vesc +kill-instance.sh dockercompose_dmaap_1 +kill-instance.sh dockercompose_kafka_1 +kill-instance.sh dockercompose_zookeeper_1 + + diff --git a/test/csit/plans/dcae-bulkpm/bulkpm-suite/testplan.txt b/test/csit/plans/dcae-bulkpm/bulkpm-suite/testplan.txt new file mode 100644 index 000000000..25a5d6e8b --- /dev/null +++ b/test/csit/plans/dcae-bulkpm/bulkpm-suite/testplan.txt @@ -0,0 +1,3 @@ +# Test suites are relative paths under [integration.git]/test/csit/tests/. +# Place the suites in run order. +dcae-bulkpm/testcases diff --git a/test/csit/plans/dcaegen2-collectors-hv-ves/testsuites/docker-compose.yml b/test/csit/plans/dcaegen2-collectors-hv-ves/testsuites/docker-compose.yml index 66cbde22f..1673715cb 100644 --- a/test/csit/plans/dcaegen2-collectors-hv-ves/testsuites/docker-compose.yml +++ b/test/csit/plans/dcaegen2-collectors-hv-ves/testsuites/docker-compose.yml @@ -60,6 +60,25 @@ services: networks: - ves-hv-default + unencrypted-ves-hv-collector: + image: $DOCKER_REGISTRY/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-main:latest + ports: + - "7060:6060" + - "7061:6061/tcp" + entrypoint: ["java", "-Dio.netty.leakDetection.level=paranoid", "-cp", "*:", "org.onap.dcae.collectors.veshv.main.MainKt"] + command: ["--listen-port", "6061","--config-url", "http://consul:8500/v1/kv/veshv-config", "--ssl-disable"] + healthcheck: + interval: 10s + timeout: 5s + retries: 2 + test: "curl --request GET --fail --silent --show-error localhost:6060/health/ready && nc -vz localhost 6061" + depends_on: + - kafka + volumes: + - ./ssl/:/etc/ves-hv/ + networks: + - ves-hv-default + dcae-app-simulator: image: $DOCKER_REGISTRY/onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-dcae-app-simulator:latest ports: diff --git a/test/csit/plans/dcaegen2/prh-testsuites/setup.sh b/test/csit/plans/dcaegen2/prh-testsuites/setup.sh index a5ce48b52..52167bf5c 100644 --- a/test/csit/plans/dcaegen2/prh-testsuites/setup.sh +++ b/test/csit/plans/dcaegen2/prh-testsuites/setup.sh @@ -8,26 +8,11 @@ export AAI_SIMULATOR="aai_simulator" cd ${WORKSPACE}/test/csit/tests/dcaegen2/prh-testcases/resources/ -docker login -u docker -p docker nexus3.onap.org:10001 pip uninstall -y docker-py pip uninstall -y docker pip install -U docker docker-compose up -d --build -# Wait for initialization of Docker containers -for i in {1..10}; do - if [ $(docker inspect --format '{{ .State.Running }}' ${PRH_SERVICE}) ] && \ - [ $(docker inspect --format '{{ .State.Running }}' ${DMAAP_SIMULATOR}) ] && \ - [ $(docker inspect --format '{{ .State.Running }}' ${AAI_SIMULATOR}) ] - then - echo "dmaap_simulator, aai_simulator and prh services are running" - break - else - echo sleep ${i} - sleep ${i} - fi -done - PRH_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${PRH_SERVICE}) DMAAP_SIMULATOR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${DMAAP_SIMULATOR}) AAI_SIMULATOR_IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${AAI_SIMULATOR}) @@ -47,12 +32,5 @@ for i in {1..10}; do sleep ${i} done -docker stop prh -docker cp prh:/config/prh_endpoints.json ${WORKDIR} -sed -i -e 's/"dmaapHostName":.*/"dmaapHostName": "'${DMAAP_SIMULATOR_IP}'",/g' ${WORKDIR}/prh_endpoints.json -sed -i -e 's/"aaiHost":.*/"aaiHost": "'${AAI_SIMULATOR_IP}'",/g' ${WORKDIR}/prh_endpoints.json -docker cp ${WORKDIR}/prh_endpoints.json prh:/config/ -docker start prh - # #Pass any variables required by Robot test suites in ROBOT_VARIABLES ROBOT_VARIABLES="-v DMAAP_SIMULATOR:${DMAAP_SIMULATOR_IP}:2222 -v AAI_SIMULATOR:${AAI_SIMULATOR_IP}:3333 -v PRH:${PRH_IP}:8100" diff --git a/test/csit/plans/policy/apex-pdp/setup.sh b/test/csit/plans/policy/apex-pdp/setup.sh new file mode 100644 index 000000000..7ab5b9e22 --- /dev/null +++ b/test/csit/plans/policy/apex-pdp/setup.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# ============LICENSE_START======================================================= +# Copyright (C) 2018 Ericsson. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# ============LICENSE_END========================================================= + +docker run -d --name apex -p 12561:12561 -p 23324:23324 -it nexus3.onap.org:10001/onap/policy-apex-pdp:2.0-SNAPSHOT-latest /bin/bash -c "/opt/app/policy/apex-pdp/bin/apexEngine.sh -c /opt/app/policy/apex-pdp/examples/config/SampleDomain/RESTServerJsonEvent.json" + +APEX_IP=`get-instance-ip.sh apex` +echo APEX IP IS ${APEX_IP} +Wait for initialization +for i in {1..10}; do + curl -sS ${APEX_IP}:23324 && break + echo sleep $i + sleep $i +done + +ROBOT_VARIABLES="-v APEX_IP:${APEX_IP}" diff --git a/test/csit/plans/policy/apex-pdp/teardown.sh b/test/csit/plans/policy/apex-pdp/teardown.sh new file mode 100644 index 000000000..ca8e92e6c --- /dev/null +++ b/test/csit/plans/policy/apex-pdp/teardown.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# ============LICENSE_START======================================================= +# Copyright (C) 2018 Ericsson. All rights reserved. +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# ============LICENSE_END========================================================= + +kill-instance.sh apex diff --git a/test/csit/plans/policy/apex-pdp/testplan.txt b/test/csit/plans/policy/apex-pdp/testplan.txt new file mode 100644 index 000000000..cee9abda5 --- /dev/null +++ b/test/csit/plans/policy/apex-pdp/testplan.txt @@ -0,0 +1,3 @@ +# Test suites are relative paths under [integration.git]/test/csit/tests/. +# Place the suites in run order. +policy/apex-pdp/apex-pdp-test.robot diff --git a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh index 5a578230b..f990aa5a7 100644 --- a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh +++ b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/setup.sh @@ -24,10 +24,10 @@ source ${SCRIPTS}/common_functions.sh docker run -d -p 8500:8500 --name msb_consul consul:0.9.3 MSB_CONSUL_IP=`get-instance-ip.sh msb_consul` echo MSB_CONSUL_IP=${MSB_CONSUL_IP} -docker run -d -p 10081:10081 -e CONSUL_IP=$MSB_CONSUL_IP --name msb_discovery nexus3.onap.org:10001/onap/msb/msb_discovery +docker run -d -p 10081:10081 -e CONSUL_IP=$MSB_CONSUL_IP --name msb_discovery nexus3.onap.org:10001/onap/msb/msb_discovery:1.1.0 MSB_DISCOVERY_IP=`get-instance-ip.sh msb_discovery` echo MSB_DISCOVERY_IP=${MSB_DISCOVERY_IP} -docker run -d -p 80:80 -e CONSUL_IP=$MSB_CONSUL_IP -e SDCLIENT_IP=$MSB_DISCOVERY_IP -e "ROUTE_LABELS=visualRange:1" --name msb_internal_apigateway nexus3.onap.org:10001/onap/msb/msb_apigateway +docker run -d -p 80:80 -e CONSUL_IP=$MSB_CONSUL_IP -e SDCLIENT_IP=$MSB_DISCOVERY_IP --name msb_internal_apigateway nexus3.onap.org:10001/onap/msb/msb_apigateway:1.1.0 MSB_IAG_IP=`get-instance-ip.sh msb_internal_apigateway` echo MSB_IAG_IP=${MSB_IAG_IP} @@ -39,8 +39,8 @@ for i in {1..10}; do done # wait for container initalization -echo sleep 60 -sleep 60 +echo sleep 30 +sleep 30 ORG="onap" PROJECT="vfc" @@ -52,7 +52,8 @@ IMAGE_ACTIVITI_NAME="${DOCKER_REPOSITORY}/${ORG}/${PROJECT}/${IMAGE}" SERVICE_IP=$(ip route get 8.8.8.8 | awk '/8.8.8.8/ {print $NF}') # start wfengine-activiti -docker run -d --name vfc_wfengine_activiti -p 8804:8080 -e SERVICE_IP=$SERVICE_IP -e SERVICE_PORT=8804 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_ACTIVITI_NAME} +# docker run -d --name vfc_wfengine_activiti -p 8804:8080 -e SERVICE_IP=$SERVICE_IP -e SERVICE_PORT=8804 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_ACTIVITI_NAME} +docker run -d --name vfc_wfengine_activiti -p 8804:8080 -e SERVICE_PORT=8080 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_ACTIVITI_NAME} WFENGINE_ACTIVITI_IP=`get-instance-ip.sh vfc_wfengine_activiti` # Wait for initialization @@ -72,7 +73,10 @@ IMAGE="wfengine-mgrservice" IMAGE_MGRSERVICE_NAME="${DOCKER_REPOSITORY}/${ORG}/${PROJECT}/${IMAGE}" # Start wfengine-mgrservice -docker run -d --name vfc_wfengine_mgrservice -p 8805:10550 -e SERVICE_IP=$SERVICE_IP -e SERVICE_PORT=8805 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_MGRSERVICE_NAME} +#docker run -d --name vfc_wfengine_mgrservice -p 8805:10550 -e SERVICE_IP=$SERVICE_IP -e SERVICE_PORT=8805 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_MGRSERVICE_NAME} +# docker run -d --name vfc_wfengine_mgrservice -p 8805:10550 -e SERVICE_PORT=10550 -e OPENPALETTE_MSB_IP=${MSB_IAG_IP} -e OPENPALETTE_MSB_PORT=80 ${IMAGE_MGRSERVICE_NAME} +docker run -d --name vfc_wfengine_mgrservice -p 8805:10550 -e SERVICE_PORT=10550 -e OPENPALETTE_MSB_IP=${WFENGINE_ACTIVITI_IP} -e OPENPALETTE_MSB_PORT=8080 ${IMAGE_MGRSERVICE_NAME} + ##docker run -d --name ${IMAGE} -e OPENPALETTE_MSB_IP=${WFENGINEACTIVITIR_IP} -e OPENPALETTE_MSB_PORT=8080 ${IMAGE_MGRSERVICE_NAME} WFENGINE_MGRSERVICE_IP=`get-instance-ip.sh vfc_wfengine_mgrservice` for i in {1..10}; do diff --git a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh index 384bc3935..bca33569b 100644 --- a/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh +++ b/test/csit/plans/vfc-nfvo-wfengine/sanity-check/teardown.sh @@ -16,6 +16,12 @@ # # This script is sourced by run-csit.sh after Robot test completion. +echo === logs vfc_wfengine_activiti === +docker logs vfc_wfengine_activiti + +echo === logs vfc_wfengine_mgrservice === +docker logs vfc_wfengine_mgrservice + kill-instance.sh msb_internal_apigateway kill-instance.sh msb_discovery kill-instance.sh msb_consul diff --git a/test/csit/scripts/clamp/clone_clamp_and_change_dockercompose.sh b/test/csit/scripts/clamp/clone_clamp_and_change_dockercompose.sh index baffc17d1..e564e637e 100755 --- a/test/csit/scripts/clamp/clone_clamp_and_change_dockercompose.sh +++ b/test/csit/scripts/clamp/clone_clamp_and_change_dockercompose.sh @@ -24,6 +24,10 @@ echo "This is ${WORKSPACE}/test/csit/scripts/clamp/clone_clamp_and_change_dockercompose.sh" +firefox --version +which firefox + + # Clone Clamp repo to get extra folder that has all needed to run docker with docker-compose to start DB and Clamp mkdir -p $WORKSPACE/archives/clamp-clone cd $WORKSPACE/archives/clamp-clone @@ -34,7 +38,7 @@ cd clamp/extra/docker/clamp/ sed -i '/image: onap\/clamp/c\ image: nexus3.onap.org:10001\/onap\/clamp' docker-compose.yml # Change config to take third_party_proxy:8085 for SDC, Policy and DCAE simulator -sed -i 's/}/,\"clamp.config.policy.pdpUrl1\":\"http:\/\/third_party_proxy:8085\/pdp\/ , testpdp, alpha123\",\"clamp.config.policy.pdpUrl2\":\"http:\/\/third_party_proxy:8085\/pdp\/ , testpdp, alpha123\",\"clamp.config.policy.papUrl\":\"http:\/\/third_party_proxy:8085\/pap\/ , testpap, alpha123\",\"clamp.config.policy.clientId\":\"python\",\"clamp.config.policy.clientKey\":\"dGVzdA==\",\"clamp.config.sdc.catalog.url\":\"http:\/\/third_party_proxy:8085\/sdc\/v1\/catalog\/\",\"clamp.config.sdc.hostUrl\":\"http:\/\/third_party_proxy:8085\",\"clamp.config.sdc.serviceUrl\":\"http:\/\/third_party_proxy:8085\/sdc\/v1\/catalog\/services\",\"clamp.config.dcae.inventory.url\":\"http:\/\/third_party_proxy:8085\",\"clamp.config.dcae.dispatcher.url\":\"http:\/\/third_party_proxy:8085\",\"spring.profiles.active\":\"clamp-default,clamp-default-user,clamp-sdc-controller\"}/g' clamp.env +sed -i 's/}/,\"clamp.config.policy.pdpUrl1\":\"http:\/\/third_party_proxy:8085\/pdp\/ , testpdp, alpha123\",\"clamp.config.policy.pdpUrl2\":\"http:\/\/third_party_proxy:8085\/pdp\/ , testpdp, alpha123\",\"clamp.config.policy.papUrl\":\"http:\/\/third_party_proxy:8085\/pap\/ , testpap, alpha123\",\"clamp.config.policy.clientId\":\"python\",\"clamp.config.policy.clientKey\":\"dGVzdA==\",\"clamp.config.sdc.catalog.url\":\"http:\/\/third_party_proxy:8085\/sdc\/v1\/catalog\/\",\"clamp.config.sdc.hostUrl\":\"http:\/\/third_party_proxy:8085\",\"clamp.config.sdc.serviceUrl\":\"http:\/\/third_party_proxy:8085\/sdc\/v1\/catalog\/services\",\"clamp.config.dcae.inventory.url\":\"http:\/\/third_party_proxy:8085\",\"clamp.config.dcae.dispatcher.url\":\"http:\/\/third_party_proxy:8085\",\"spring.profiles.active\":\"clamp-default,clamp-default-user,clamp-sdc-controller\",\"server.ssl.client-auth\":\"want\"}/g' clamp.env # Add the sql to create template so it is played by docker-compose later cp ../../../src/test/resources/sql/four_templates_only.sql ../../sql/bulkload/ diff --git a/test/csit/scripts/dcae-bulkpm/xNFSimulator.sh b/test/csit/scripts/dcae-bulkpm/xNFSimulator.sh new file mode 100644 index 000000000..1728ef75b --- /dev/null +++ b/test/csit/scripts/dcae-bulkpm/xNFSimulator.sh @@ -0,0 +1,28 @@ +#!/bin/bash +#This scritt will simulate xNF ftpes functionality. +#This script will automatic install vsftpd and it will make necessary changes to vsftpd.conf +sudo apt-get install vsftpd -y +sudo useradd -m -u 12345 -g users -d /home/ftpuser -s /bin/bash -p "$(echo ftpuser | openssl passwd -1 -stdin)" ftpuser +sudo chown root:root /home/ftpuser +sudo mkdir -p /tmp/ftp/rop +sudo chown nobody:nogroup /tmp/ftp/rop +sudo openssl req -x509 -nodes -days 365 -newkey rsa:1024 -keyout /etc/ssl/private/vsftpd.pem -out /etc/ssl/private/vsftpd.pem -subj "/C=IE/ST=ftp/L=Springfield/O=Dis/CN=www.onap.org" +sudo sed -i -e '/anonymous_enable=/ s/=.*/=NO/' /etc/vsftpd.conf +sudo sed -i -e '/local_enable=/ s/=.*/=NO/' /etc/vsftpd.conf +sudo sed -i -e '/write_enable=/ s/=.*/=YES/' /etc/vsftpd.conf +sudo sed -i -e '/#write_enable=/ s/#write_enable=.*/write_enable=YES/' /etc/vsftpd.conf +sudo sed -i -e '/chroot_local_user=/ s/=.*/=YES/' /etc/vsftpd.conf +sudo sed -i -e '0,/#chroot_local_user=/ s/#chroot_local_user=.*/chroot_local_user=YES/' /etc/vsftpd.conf +sudo sed -i -e '/ssl_enable=/ s/=.*/=YES/' /etc/vsftpd.conf +sudo sed -i -e "/ssl_enable=YES/a\\allow_anon_ssl=YES" /etc/vsftpd.conf +sudo sed -i -e "/allow_anon_ssl=NO/a\\force_local_data_ssl=NO" /etc/vsftpd.conf +sudo sed -i -e "/force_local_data_ssl=NO/a\\force_local_logins_ssl=NO" /etc/vsftpd.conf +sudo sed -i -e "/force_local_logins_ssl=NO/a\\ssl_tlsv1=YES" /etc/vsftpd.conf +sudo sed -i -e "/ssl_tlsv1=YES/a\\ssl_sslv2=NO" /etc/vsftpd.conf +sudo sed -i -e "/ssl_sslv2=NO/a\\ssl_sslv3=NO" /etc/vsftpd.conf +sudo sed -i -e "/ssl_sslv3=NO/a\\require_ssl_reuse=NO" /etc/vsftpd.conf +sudo sed -i -e "/require_ssl_reuse=NO/a\\ssl_ciphers=HIGH" /etc/vsftpd.conf +sudo sed -i -e "/ssl_ciphers=HIGH/a\\hide_ids=YES" /etc/vsftpd.conf +sudo sed -i -e "/ssl_ciphers=HIGH/a\\anon_root=/var/ftp/" /etc/vsftpd.conf +sudo sed -i -e "/ssl_ciphers=HIGH/a\\no_anon_password=YES" /etc/vsftpd.conf +sudo service vsftpd restart
\ No newline at end of file diff --git a/test/csit/scripts/optf-has/has/has-properties/conductor.conf.onap b/test/csit/scripts/optf-has/has/has-properties/conductor.conf.onap index 1f644264b..2c5b8d6c5 100644 --- a/test/csit/scripts/optf-has/has/has-properties/conductor.conf.onap +++ b/test/csit/scripts/optf-has/has/has-properties/conductor.conf.onap @@ -252,7 +252,7 @@ music_new_version = True # Base URL for Music REST API without a trailing slash. (string value) server_url = http://localhost:8080/MUSIC/rest/v2 version = v2 -music_version = "3.0.3" +music_version = "2.5.3" aafuser = conductor aafpass = c0nduct0r aafns = conductor diff --git a/test/csit/scripts/optf-has/has/has_script.sh b/test/csit/scripts/optf-has/has/has_script.sh index 08bf0bcb0..2d2eff3b7 100755 --- a/test/csit/scripts/optf-has/has/has_script.sh +++ b/test/csit/scripts/optf-has/has/has_script.sh @@ -31,6 +31,7 @@ cd ${DIR} COND_CONF=/tmp/conductor/properties/conductor.conf LOG_CONF=/tmp/conductor/properties/log.conf IMAGE_NAME=nexus3.onap.org:10001/onap/optf-has +IMAGE_VER=1.2.1-SNAPSHOT-latest CERT=/tmp/conductor/properties/cert.cer KEY=/tmp/conductor/properties/cert.key BUNDLE=/tmp/conductor/properties/cert.pem @@ -69,16 +70,16 @@ curl -vvvvv --noproxy "*" --request GET http://${MUSIC_IP}:8080/MUSIC/rest/v2/ve echo "Onboard conductor into music" curl -vvvvv --noproxy "*" --request POST http://${MUSIC_IP}:8080/MUSIC/rest/v2/admin/onboardAppWithMusic -H "Content-Type: application/json" --data @${WORKSPACE}/test/csit/tests/optf-has/has/data/onboard.json -docker run -d --name cond-cont -v ${COND_CONF}:/usr/local/bin/conductor.conf -v ${LOG_CONF}:/usr/local/bin/log.conf ${IMAGE_NAME}:latest python /usr/local/bin/conductor-controller --config-file=/usr/local/bin/conductor.conf -sleep 2 -docker run -d --name cond-api -p "8091:8091" -v ${COND_CONF}:/usr/local/bin/conductor.conf -v ${LOG_CONF}:/usr/local/bin/log.conf ${IMAGE_NAME}:latest python /usr/local/bin/conductor-api --port=8091 -- --config-file=/usr/local/bin/conductor.conf -sleep 2 -docker run -d --name cond-solv -v ${COND_CONF}:/usr/local/bin/conductor.conf -v ${LOG_CONF}:/usr/local/bin/log.conf ${IMAGE_NAME}:latest python /usr/local/bin/conductor-solver --config-file=/usr/local/bin/conductor.conf -sleep 2 -docker run -d --name cond-resv -v ${COND_CONF}:/usr/local/bin/conductor.conf -v ${LOG_CONF}:/usr/local/bin/log.conf ${IMAGE_NAME}:latest python /usr/local/bin/conductor-reservation --config-file=/usr/local/bin/conductor.conf -sleep 2 -docker run -d --name cond-data -v ${COND_CONF}:/usr/local/bin/conductor.conf -v ${LOG_CONF}:/usr/local/bin/log.conf -v ${CERT}:/usr/local/bin/cert.cer -v ${KEY}:/usr/local/bin/cert.key -v ${BUNDLE}:/usr/local/bin/cert.pem ${IMAGE_NAME}:latest python /usr/local/bin/conductor-data --config-file=/usr/local/bin/conductor.conf -sleep 2 +docker run -d --name cond-cont -v ${COND_CONF}:/usr/local/bin/conductor.conf -v ${LOG_CONF}:/usr/local/bin/log.conf ${IMAGE_NAME}:${IMAGE_VER} python /usr/local/bin/conductor-controller --config-file=/usr/local/bin/conductor.conf +sleep 20 +docker run -d --name cond-api -p "8091:8091" -v ${COND_CONF}:/usr/local/bin/conductor.conf -v ${LOG_CONF}:/usr/local/bin/log.conf ${IMAGE_NAME}:${IMAGE_VER} python /usr/local/bin/conductor-api --port=8091 -- --config-file=/usr/local/bin/conductor.conf +sleep 20 +docker run -d --name cond-solv -v ${COND_CONF}:/usr/local/bin/conductor.conf -v ${LOG_CONF}:/usr/local/bin/log.conf ${IMAGE_NAME}:${IMAGE_VER} python /usr/local/bin/conductor-solver --config-file=/usr/local/bin/conductor.conf +sleep 20 +docker run -d --name cond-resv -v ${COND_CONF}:/usr/local/bin/conductor.conf -v ${LOG_CONF}:/usr/local/bin/log.conf ${IMAGE_NAME}:${IMAGE_VER} python /usr/local/bin/conductor-reservation --config-file=/usr/local/bin/conductor.conf +sleep 20 +docker run -d --name cond-data -v ${COND_CONF}:/usr/local/bin/conductor.conf -v ${LOG_CONF}:/usr/local/bin/log.conf -v ${CERT}:/usr/local/bin/cert.cer -v ${KEY}:/usr/local/bin/cert.key -v ${BUNDLE}:/usr/local/bin/cert.pem ${IMAGE_NAME}:${IMAGE_VER} python /usr/local/bin/conductor-data --config-file=/usr/local/bin/conductor.conf +sleep 20 COND_IP=`docker inspect --format '{{ .NetworkSettings.Networks.bridge.IPAddress}}' cond-api` ${WORKSPACE}/test/csit/scripts/optf-has/has/wait_for_port.sh ${COND_IP} 8091 diff --git a/test/csit/scripts/optf-has/has/music_script.sh b/test/csit/scripts/optf-has/has/music_script.sh index 1e978c2f9..7693d7b30 100755 --- a/test/csit/scripts/optf-has/has/music_script.sh +++ b/test/csit/scripts/optf-has/has/music_script.sh @@ -27,7 +27,7 @@ echo "# music configuration step"; CASS_IMG=nexus3.onap.org:10001/onap/music/cassandra_music:latest TOMCAT_IMG=nexus3.onap.org:10001/library/tomcat:8.0 ZK_IMG=nexus3.onap.org:10001/library/zookeeper:3.4 -MUSIC_IMG=nexus3.onap.org:10001/onap/music/music:latest +MUSIC_IMG=nexus3.onap.org:10001/onap/music/music:2.5.3 WORK_DIR=/tmp/music CASS_USERNAME=nelson24 CASS_PASSWORD=winman123 @@ -51,10 +51,10 @@ docker run -d --name music-db --network music-net -p "7000:7000" -p "7001:7001" CASSA_IP=`docker inspect -f '{{ $network := index .NetworkSettings.Networks "music-net" }}{{ $network.IPAddress}}' music-db` echo "CASSANDRA_IP=${CASSA_IP}" ${WORKSPACE}/test/csit/scripts/optf-has/has/wait_for_port.sh ${CASSA_IP} 9042 -sleep 60 +sleep 150 # Start Music war docker run -d --name music-war -v music-vol:/app ${MUSIC_IMG}; -sleep 15 +sleep 30 # Start Zookeeper docker run -d --name music-zk --network music-net -p "2181:2181" -p "2888:2888" -p "3888:3888" ${ZK_IMG}; #ZOO_IP=`docker inspect --format '{{ .NetworkSettings.Networks.bridge.IPAddress}}' music-zk` @@ -62,7 +62,7 @@ ZOO_IP=`docker inspect -f '{{ $network := index .NetworkSettings.Networks "music echo "ZOOKEEPER_IP=${ZOO_IP}" # Delay between Cassandra/Zookeeper and Tomcat -sleep 60; +sleep 120 # Start Up tomcat - Needs to have properties,logs dir and war file volume mapped. docker run -d --name music-tomcat --network music-net -p "8080:8080" -v music-vol:/usr/local/tomcat/webapps -v ${WORK_DIR}/properties:/opt/app/music/etc:ro -v ${WORK_DIR}/logs:/opt/app/music/logs ${TOMCAT_IMG}; @@ -80,7 +80,7 @@ echo "TOMCAT_IP=${TOMCAT_IP}" ${WORKSPACE}/test/csit/scripts/optf-has/has/wait_for_port.sh ${TOMCAT_IP} 8080 # wait a while to make sure music is totally up and configured -sleep 60 +sleep 90 echo "inspect docker things for tracing purpose" docker inspect music-db diff --git a/test/csit/scripts/policy/script1.sh b/test/csit/scripts/policy/script1.sh index 7bb9731c8..12509eda7 100755 --- a/test/csit/scripts/policy/script1.sh +++ b/test/csit/scripts/policy/script1.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Copyright 2017 AT&T Intellectual Property. All rights reserved. +# Copyright 2017-2018 AT&T Intellectual Property. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -147,7 +147,7 @@ TIME_OUT=600 INTERVAL=20 TIME=0 while [ "$TIME" -lt "$TIME_OUT" ]; do - curl -i --user healthcheck:zb!XztG34 -H "ContentType: application/json" -H "Accept: application/json" ${POLICY_IP}:6969/healthcheck && break + curl -k -i --user healthcheck:zb!XztG34 -H "ContentType: application/json" -H "Accept: application/json" https://${POLICY_IP}:6969/healthcheck && break echo Sleep: $INTERVAL seconds before testing if Policy is up. Total wait time up now is: $TIME seconds. Timeout is: $TIME_OUT seconds sleep $INTERVAL diff --git a/test/csit/tests/clamp/UIs/01__Create_Holmes_model.robot b/test/csit/tests/clamp/UIs/01__Create_Holmes_model.robot index 305044cb0..e8b1429d0 100644 --- a/test/csit/tests/clamp/UIs/01__Create_Holmes_model.robot +++ b/test/csit/tests/clamp/UIs/01__Create_Holmes_model.robot @@ -60,6 +60,7 @@ Set Properties for HolmesModel1 Select From List By Label id=vf vFirewall 0 Select From List By Label id=actionSet VNF Select From List By Label id=location Data Center 2 Data Center 3 + Input Text locator=deployParameters text={} Click Button locator=Save Set Policy Box properties for HolmesModel1 diff --git a/test/csit/tests/clamp/UIs/02__Create_TCA_model.robot b/test/csit/tests/clamp/UIs/02__Create_TCA_model.robot index 0dc0a8abb..bdc537eab 100644 --- a/test/csit/tests/clamp/UIs/02__Create_TCA_model.robot +++ b/test/csit/tests/clamp/UIs/02__Create_TCA_model.robot @@ -53,6 +53,7 @@ Set Properties for TCAModel1 Select From List By Label id=vf vLoadBalancer 0 Select From List By Label id=actionSet VNF Select From List By Label id=location Data Center 1 Data Center 3 + Input Text locator=deployParameters text={} Click Button locator=Save Set Policy Box properties for TCAModel1 diff --git a/test/csit/tests/dcae-bulkpm/testcases/__init__.robot b/test/csit/tests/dcae-bulkpm/testcases/__init__.robot new file mode 100644 index 000000000..7114fd447 --- /dev/null +++ b/test/csit/tests/dcae-bulkpm/testcases/__init__.robot @@ -0,0 +1,2 @@ +*** Settings *** +Documentation 5G Bulk PM E2E Testcases diff --git a/test/csit/tests/dcae-bulkpm/testcases/assets/json_events/FileExistNotification.json b/test/csit/tests/dcae-bulkpm/testcases/assets/json_events/FileExistNotification.json new file mode 100644 index 000000000..96068e39a --- /dev/null +++ b/test/csit/tests/dcae-bulkpm/testcases/assets/json_events/FileExistNotification.json @@ -0,0 +1,30 @@ +{ + "event": { + "commonEventHeader": { + "version": "4.0.1", + "vesEventListenerVersion": "7.0.1", + "domain": "notification", + "eventName": "Noti_RnNode-Ericsson_FileReady", + "eventId": "FileReady_1797490e-10ae-4d48-9ea7-3d7d790b25e1", + "lastEpochMicrosec": 8745745764578, + "priority": "Normal", + "reportingEntityName": "otenb5309", + "sequence": 0, + "sourceName": "oteNB5309", + "startEpochMicrosec": 8745745764578, + "timeZoneOffset": "UTC+05.30" + }, + "notificationFields": { + "changeIdentifier": "PM_MEAS_FILES", + "changeType": "FileReady", + "notificationFieldsVersion": "2.0", + "additionalFields": + { + "location": "ftpes://192.168.0.101:22/ftp/rop/A20161224.1030-1045.bin.gz", + "compression": "gzip", + "fileformatType": "org.3GPP.32.435#measCollec", + "fileFormatVersion": "V10" + } + } + } + }
\ No newline at end of file diff --git a/test/csit/tests/dcae-bulkpm/testcases/e2e.robot b/test/csit/tests/dcae-bulkpm/testcases/e2e.robot new file mode 100644 index 000000000..69c795341 --- /dev/null +++ b/test/csit/tests/dcae-bulkpm/testcases/e2e.robot @@ -0,0 +1,40 @@ +*** Settings *** +Documentation Testing E2E VES,Dmaap,DFC,DR with File Ready event feed from xNF +Library RequestsLibrary +Library OperatingSystem +Library Collections +Resource resources/ves_keywords.robot + + +*** Variables *** +${VESC_URL} http://%{VESC_IP}:8080 +${GLOBAL_APPLICATION_ID} robot-ves +${VES_ANY_EVENT_PATH} /eventListener/v7 +${HEADER_STRING} content-type=application/json +${EVENT_DATA_FILE} %{WORKSPACE}/test/csit/tests/dcae-bulkpm/testcases/assets/json_events/FileExistNotification.json + +${TARGETURL_TOPICS} http://${DMAAP_MR_IP}:3904/topics +${TARGETURL_SUBSCR} http://${DMAAP_MR_IP}:3904/events/unauthenticated.VES_NOTIFICATION_OUTPUT/OpenDcae-c12/C12?timeout=1000 +*** Test Cases *** + +Send VES File Ready Event to VES Collector + [Tags] DCAE-VESC-R1 + [Documentation] Post single event and expect 200 Response + ${evtdata}= Get Event Data From File ${EVENT_DATA_FILE} + ${headers}= Create Header From String ${HEADER_STRING} + ${resp}= Publish Event To VES Collector ${VESC_URL} ${VES_ANY_EVENT_PATH} ${headers} ${evtdata} + Log Receive HTTP Status code ${resp.status_code} + Should Be Equal As Strings ${resp.status_code} 202 + +Check VES Notification Topic is existing in Message Router + [Documentation] Get the count of the Topics + [Timeout] 1 minute + Sleep 10s + ${resp}= GetCall ${TARGETURL_TOPICS} + log ${TARGETURL_TOPICS} + log 'JSON Response Code :'${resp} + ${topics}= Evaluate $resp.json().get('topics') + log ${topics} + ${ListLength}= Get Length ${topics} + log ${ListLength} + List Should Contain Value ${topics} unauthenticated.VES_NOTIFICATION_OUTPUT diff --git a/test/csit/tests/dcae-bulkpm/testcases/resources/VesLibrary.py b/test/csit/tests/dcae-bulkpm/testcases/resources/VesLibrary.py new file mode 100644 index 000000000..d1ec9811d --- /dev/null +++ b/test/csit/tests/dcae-bulkpm/testcases/resources/VesLibrary.py @@ -0,0 +1,25 @@ +''' +Created on Aug 18, 2017 + +@author: sw6830 +''' +from robot.api import logger +from Queue import Queue +import uuid, time, json, threading,os, platform, subprocess,paramiko + +class VesLibrary(object): + + def __init__(self): + pass + + def create_header_from_string(self, dictStr): + logger.info("Enter create_header_from_string: dictStr") + return dict(u.split("=") for u in dictStr.split(",")) + + def Generate_UUID(self): + """generate a uuid""" + return uuid.uuid4() + +if __name__ == '__main__': + lib = VesLibrary() + time.sleep(100000)
\ No newline at end of file diff --git a/test/csit/tests/dcae-bulkpm/testcases/resources/ves_keywords.robot b/test/csit/tests/dcae-bulkpm/testcases/resources/ves_keywords.robot new file mode 100644 index 000000000..76bc33f47 --- /dev/null +++ b/test/csit/tests/dcae-bulkpm/testcases/resources/ves_keywords.robot @@ -0,0 +1,39 @@ + *** Settings *** +Documentation The main interface for interacting with VES. It handles low level stuff like managing the http request library and VES required fields +Library RequestsLibrary +Library ../resources/VesLibrary.py +Library OperatingSystem +Library Collections +Library requests +Library Collections +Library String + +*** Variables *** + +*** Keywords *** + +Get Event Data From File + [Arguments] ${jsonfile} + ${data}= OperatingSystem.Get File ${jsonfile} + #Should Not Be_Equal ${data} None + [return] ${data} + +Publish Event To VES Collector + [Documentation] Send an event to VES Collector + [Arguments] ${url} ${evtpath} ${httpheaders} ${evtdata} + Log Creating session ${url} + ${session}= Create Session dcaegen2-d1 ${url} + ${resp}= Post Request dcaegen2-d1 ${evtpath} data=${evtdata} headers=${httpheaders} + #Log Received response from dcae ${resp.json()} + [return] ${resp} +PostCall + [Arguments] ${url} ${data} + ${headers}= Create Dictionary Accept=application/json Content-Type=application/json + ${resp}= Evaluate requests.post('${url}',data='${data}', headers=${headers},verify=False) requests + [Return] ${resp} + +GetCall + [Arguments] ${url} + ${resp}= Evaluate requests.get('${url}') requests + [Return] ${resp} +
\ No newline at end of file diff --git a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/authorization.robot b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/authorization.robot index 1b832f27d..15c1c4896 100644 --- a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/authorization.robot +++ b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/authorization.robot @@ -11,21 +11,47 @@ Test Teardown VES-HV Collector Test Shutdown Client Authorization Suite Setup Log Started Suite: VES-HV Client Authorization ${XNF_PORTS_LIST}= Create List 7000 - Configure Invalid xNF Simulators On Ports ${XNF_PORTS_LIST} + ${XNF_WITH_INVALID_CERTIFICATES}= Configure xNF Simulators ${XNF_PORTS_LIST} + ... should_use_valid_certs=${false} + Set Suite Variable ${XNF_WITH_INVALID_CERTIFICATES} + ${XNF_PORTS_LIST}= Create List 7001 + ${XNF_WITHOUT_SSL}= Configure xNF Simulators ${XNF_PORTS_LIST} + ... should_disable_ssl=${true} + Set Suite Variable ${XNF_WITHOUT_SSL} + ${XNF_PORTS_LIST}= Create List 7002 + ${XNF_WITHOUT_SSL_CONNECTING_TO_UNENCRYPTED_HV_VES}= Configure xNF Simulators ${XNF_PORTS_LIST} + ... should_disable_ssl=${true} + ... should_connect_to_unencrypted_hv_ves=${true} + Set Suite Variable ${XNF_WITHOUT_SSL_CONNECTING_TO_UNENCRYPTED_HV_VES} Log Suite setup finished - *** Test Cases *** Authorization [Documentation] VES-HV Collector should not authorize XNF with invalid certificate and not route any message ... to topics - ${SIMULATORS_LIST}= Get Invalid xNF Simulators 1 - Send Messages From xNF Simulators ${SIMULATORS_LIST} ${XNF_VALID_MESSAGES_REQUEST} + Send Messages From xNF Simulators ${XNF_WITH_INVALID_CERTIFICATES} ${XNF_VALID_MESSAGES_REQUEST} + + Wait until keyword succeeds 60 sec 5 sec + ... Assert Dcae App Consumed ${DCAE_APP_API_MESSAGES_COUNT_URL} ${AMOUNT_0} + +Unencrypted connection from client + [Documentation] VES-HV Collector should not authorize XNF trying to connect through unencrypted connection + + Send Messages From xNF Simulators ${XNF_WITHOUT_SSL} ${XNF_VALID_MESSAGES_REQUEST} Wait until keyword succeeds 60 sec 5 sec ... Assert Dcae App Consumed ${DCAE_APP_API_MESSAGES_COUNT_URL} ${AMOUNT_0} +Unencrypted connection on both ends + [Documentation] When run without SSL turned on, VES-HV Collector should route all valid messages + ... from xNF trying to connect through unencrypted connection + + Send Messages From xNF Simulators ${XNF_WITHOUT_SSL_CONNECTING_TO_UNENCRYPTED_HV_VES} ${XNF_VALID_MESSAGES_REQUEST} + + Wait until keyword succeeds 60 sec 5 sec + ... Assert Dcae App Consumed ${DCAE_APP_API_MESSAGES_COUNT_URL} ${AMOUNT_5000} + *** Variables *** ${VES_HV_SCENARIOS} %{WORKSPACE}/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios @@ -33,3 +59,4 @@ ${VES_HV_SCENARIOS} %{WORKSPACE}/test/csit/tests/dcae ${XNF_VALID_MESSAGES_REQUEST} ${VES_HV_SCENARIOS}/authorization/xnf-valid-messages-request.json ${AMOUNT_0} 0 +${AMOUNT_5000} 5000 diff --git a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/libraries/HttpRequests.py b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/libraries/HttpRequests.py index 0d1d928b5..c0dcd81d4 100644 --- a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/libraries/HttpRequests.py +++ b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/libraries/HttpRequests.py @@ -1,12 +1,19 @@ import requests from robot.api import logger +valid_status_codes = [ + requests.codes.ok, + requests.codes.accepted +] + + def session_without_env(): session = requests.Session() session.trust_env = False return session + def checkStatusCode(status_code, server_name): - if status_code != 200: + if status_code not in valid_status_codes: logger.error("Response status code from " + server_name + ": " + str(status_code)) - raise (Exception(server_name + " returned status code " + status_code))
\ No newline at end of file + raise (Exception(server_name + " returned status code " + status_code)) diff --git a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/libraries/XnfSimulatorLibrary.py b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/libraries/XnfSimulatorLibrary.py index b2466d7ca..26d5a91c2 100644 --- a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/libraries/XnfSimulatorLibrary.py +++ b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/libraries/XnfSimulatorLibrary.py @@ -1,4 +1,3 @@ -from VesHvContainersUtilsLibrary import copy_to_container import HttpRequests import os import docker @@ -16,13 +15,22 @@ ONE_SECOND_IN_NANOS = 10 ** 9 class XnfSimulatorLibrary: - def start_xnf_simulators(self, list_of_ports, valid_certs=True): + def start_xnf_simulators(self, list_of_ports, + should_use_valid_certs=True, + should_disable_ssl=False, + should_connect_to_unencrypted_hv_ves=False): logger.info("Creating " + str(len(list_of_ports)) + " xNF Simulator containers") dockerClient = docker.from_env() - cert_name_prefix = "" if valid_certs else "invalid_" + self.pullImageIfAbsent(dockerClient) logger.info("Using image: " + SIMULATOR_IMAGE_FULL_NAME) - simulators_addresses = self.create_simulators(dockerClient, list_of_ports, cert_name_prefix) + + simulators_addresses = self.create_containers(dockerClient, + list_of_ports, + should_use_valid_certs, + should_disable_ssl, + should_connect_to_unencrypted_hv_ves) + self.assert_containers_startup_was_successful(dockerClient) dockerClient.close() return simulators_addresses @@ -35,34 +43,24 @@ class XnfSimulatorLibrary: "This can take a while.") dockerClient.images.pull(SIMULATOR_IMAGE_FULL_NAME) - def create_simulators(self, dockerClient, list_of_ports, cert_name_prefix): + def create_containers(self, + dockerClient, + list_of_ports, + should_use_valid_certs, + should_disable_ssl, + should_connect_to_unencrypted_hv_ves): simulators_addresses = [] for port in list_of_ports: - container = self.run_simulator(dockerClient, port, - collector_certs_lookup_dir + cert_name_prefix + "client.crt", - collector_certs_lookup_dir + cert_name_prefix + "client.key", - collector_certs_lookup_dir + cert_name_prefix + "trust.crt" - ) - + xnf = XnfSimulator(port, should_use_valid_certs, should_disable_ssl, should_connect_to_unencrypted_hv_ves) + container = self.run_simulator(dockerClient, xnf) logger.info("Started container: " + container.name + " " + container.id) - simulators_addresses.append(container.name + ":" + port) + simulators_addresses.append(container.name + ":" + xnf.port) return simulators_addresses - def run_simulator(self, dockerClient, port, client_crt_path, client_key_path, client_trust_store): - xNF_startup_command = ["--listen-port", port, - "--ves-host", "ves-hv-collector", - "--ves-port", "6061", - "--cert-file", client_crt_path, - "--private-key-file", client_key_path, - "--trust-cert-file", client_trust_store] - xNF_healthcheck_command = { - "interval": 5 * ONE_SECOND_IN_NANOS, - "timeout": 3 * ONE_SECOND_IN_NANOS, - "retries": 1, - "test": ["CMD", "curl", "--request", "GET", - "--fail", "--silent", "--show-error", - "localhost:" + port + "/healthcheck"] - } + def run_simulator(self, dockerClient, xnf): + xNF_startup_command = xnf.get_startup_command() + xNF_healthcheck_command = xnf.get_healthcheck_command() + port = xnf.port logger.info("Startup command: " + str(xNF_startup_command)) logger.info("Healthcheck command: " + str(xNF_healthcheck_command)) return dockerClient.containers.run(SIMULATOR_IMAGE_FULL_NAME, @@ -72,7 +70,7 @@ class XnfSimulatorLibrary: network="ves-hv-default", ports={port + "/tcp": port}, volumes=self.container_volumes(), - name="ves-hv-collector-xnf-simulator" + port) + name=xnf.container_name_prefix + port) def container_volumes(self): return {certificates_dir_path: {"bind": collector_certs_lookup_dir, "mode": 'rw'}} @@ -121,6 +119,46 @@ class XnfSimulatorLibrary: HttpRequests.checkStatusCode(resp.status_code, XNF_SIMULATOR_NAME) +class XnfSimulator: + container_name_prefix = "ves-hv-collector-xnf-simulator" + + def __init__(self, + port, + should_use_valid_certs, + should_disable_ssl, + should_connect_to_unencrypted_hv_ves): + self.port = port + cert_name_prefix = "" if should_use_valid_certs else "invalid_" + certificates_path_with_file_prefix = collector_certs_lookup_dir + cert_name_prefix + self.cert_path = certificates_path_with_file_prefix + "client.crt" + self.key_path = certificates_path_with_file_prefix + "client.key" + self.trust_cert_path = certificates_path_with_file_prefix + "trust.crt" + self.disable_ssl = should_disable_ssl + self.hv_collector_host = "unencrypted-ves-hv-collector" \ + if should_connect_to_unencrypted_hv_ves else "ves-hv-collector" + + def get_startup_command(self): + startup_command = ["--listen-port", self.port, + "--ves-host", self.hv_collector_host, + "--ves-port", "6061", + "--cert-file", self.cert_path, + "--private-key-file", self.key_path, + "--trust-cert-file", self.trust_cert_path] + if (self.disable_ssl): + startup_command.append("--ssl-disable") + return startup_command + + def get_healthcheck_command(self): + return { + "interval": 5 * ONE_SECOND_IN_NANOS, + "timeout": 3 * ONE_SECOND_IN_NANOS, + "retries": 1, + "test": ["CMD", "curl", "--request", "GET", + "--fail", "--silent", "--show-error", + "localhost:" + self.port + "/healthcheck"] + } + + class ContainerException(Exception): def __init__(self, message): super(ContainerException, self).__init__(message) diff --git a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/message-routing.robot b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/message-routing.robot index 6153afa0a..89208e456 100644 --- a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/message-routing.robot +++ b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/message-routing.robot @@ -14,7 +14,7 @@ Test Teardown VES-HV Collector Test Shutdown Message Routing Suite Setup Log Started Suite: VES-HV Message Routing ${XNF_PORTS_LIST}= Create List 7000 - Configure Valid xNF Simulators On Ports ${XNF_PORTS_LIST} + Configure xNF Simulators Using Valid Certificates On Ports ${XNF_PORTS_LIST} Log Suite setup finished *** Test Cases *** @@ -22,8 +22,8 @@ Correct Messages Routing [Documentation] VES-HV Collector should route all valid messages to topics specified in configuration ... and do not change message payload generated in XNF simulator - ${SIMULATORS_LIST}= Get Valid xNF Simulators 1 - Send Messages From xNF Simulators ${SIMULATORS_LIST} ${XNF_FIXED_PAYLOAD_REQUEST} + ${XNF_SIMULATOR}= Get xNF Simulators Using Valid Certificates + Send Messages From xNF Simulators ${XNF_SIMULATOR} ${XNF_FIXED_PAYLOAD_REQUEST} Wait until keyword succeeds 60 sec 5 sec ... Assert Dcae App Consumed ${DCAE_APP_API_MESSAGES_COUNT_URL} ${AMOUNT_25000} @@ -33,8 +33,8 @@ Correct Messages Routing Too big payload message handling [Documentation] VES-HV Collector should interrupt the stream when encountered message with too big payload - ${SIMULATORS_LIST}= Get Valid xNF Simulators 1 - Send Messages From xNF Simulators ${SIMULATORS_LIST} ${XNF_TOO_BIG_PAYLOAD_REQUEST} + ${XNF_SIMULATOR}= Get xNF Simulators Using Valid Certificates + Send Messages From xNF Simulators ${XNF_SIMULATOR} ${XNF_TOO_BIG_PAYLOAD_REQUEST} Wait until keyword succeeds 60 sec 5 sec ... Assert Dcae App Consumed Less Equal Than ${DCAE_APP_API_MESSAGES_COUNT_URL} ${AMOUNT_25000} @@ -43,8 +43,8 @@ Too big payload message handling Invalid wire frame message handling [Documentation] VES-HV Collector should skip messages with invalid wire frame - ${SIMULATORS_LIST}= Get Valid xNF Simulators 1 - Send Messages From xNF Simulators ${SIMULATORS_LIST} ${XNF_INVALID_WIRE_FRAME_REQUEST} + ${XNF_SIMULATOR}= Get xNF Simulators Using Valid Certificates + Send Messages From xNF Simulators ${XNF_SIMULATOR} ${XNF_INVALID_WIRE_FRAME_REQUEST} Wait until keyword succeeds 60 sec 5 sec ... Assert Dcae App Consumed ${DCAE_APP_API_MESSAGES_COUNT_URL} ${AMOUNT_50000} @@ -54,8 +54,8 @@ Invalid wire frame message handling Invalid GPB data message handling [Documentation] VES-HV Collector should skip messages with invalid GPB data - ${SIMULATORS_LIST}= Get Valid xNF Simulators 1 - Send Messages From xNF Simulators ${SIMULATORS_LIST} ${XNF_INVALID_GPB_DATA_REQUEST} + ${XNF_SIMULATOR}= Get xNF Simulators Using Valid Certificates + Send Messages From xNF Simulators ${XNF_SIMULATOR} ${XNF_INVALID_GPB_DATA_REQUEST} Wait until keyword succeeds 60 sec 5 sec ... Assert Dcae App Consumed ${DCAE_APP_API_MESSAGES_COUNT_URL} ${AMOUNT_50000} @@ -65,8 +65,8 @@ Invalid GPB data message handling Unsupported domain message handling [Documentation] VES-HV Collector should skip messages with unsupported domain - ${SIMULATORS_LIST}= Get Valid xNF Simulators 1 - Send Messages From xNF Simulators ${SIMULATORS_LIST} ${XNF_UNSUPPORTED_DOMAIN_REQUEST} + ${XNF_SIMULATOR}= Get xNF Simulators Using Valid Certificates + Send Messages From xNF Simulators ${XNF_SIMULATOR} ${XNF_UNSUPPORTED_DOMAIN_REQUEST} Wait until keyword succeeds 60 sec 5 sec ... Assert Dcae App Consumed ${DCAE_APP_API_MESSAGES_COUNT_URL} ${AMOUNT_50000} diff --git a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/multiple-clients.robot b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/multiple-clients.robot index 862a2bc6a..9b1982a6b 100644 --- a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/multiple-clients.robot +++ b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/multiple-clients.robot @@ -11,14 +11,14 @@ Test Teardown VES-HV Collector Test Shutdown Multiple Clients Handling Suite Setup Log Started Suite: VES-HV Multiple Clients Handling ${XNF_PORTS_LIST}= Create List 7000 7001 7002 - Configure Valid xNF Simulators On Ports ${XNF_PORTS_LIST} + Configure xNF Simulators Using Valid Certificates On Ports ${XNF_PORTS_LIST} Log Suite setup finished *** Test Cases *** Handle Multiple Connections [Documentation] VES-HV Collector should handle multiple incoming transmissions - ${SIMULATORS_LIST}= Get Valid xNF Simulators 3 + ${SIMULATORS_LIST}= Get xNF Simulators Using Valid Certificates 3 Send Messages From xNF Simulators ${SIMULATORS_LIST} ${XNF_SMALLER_PAYLOAD_REQUEST} Wait until keyword succeeds 60 sec 5 sec diff --git a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/common-keywords.robot b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/common-keywords.robot index bc03de232..58f5cbc16 100644 --- a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/common-keywords.robot +++ b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/common-keywords.robot @@ -4,30 +4,28 @@ Library VesHvContainersUtilsLibrary Library Collections *** Keywords *** -Configure Valid xNF Simulators On Ports +Configure xNF Simulators Using Valid Certificates On Ports [Arguments] ${XNF_PORTS_LIST} - ${VALID_XNF_SIMULATORS_ADDRESSES}= Start Xnf Simulators ${XNF_PORTS_LIST} ${true} + ${VALID_XNF_SIMULATORS_ADDRESSES}= Configure xNF Simulators ${XNF_PORTS_LIST} Set Suite Variable ${VALID_XNF_SIMULATORS_ADDRESSES} - -Configure Invalid xNF Simulators On Ports +Configure xNF Simulators [Arguments] ${XNF_PORTS_LIST} - ${INVALID_XNF_SIMULATORS_ADDRESSES}= Start Xnf Simulators ${XNF_PORTS_LIST} ${false} - Set Suite Variable ${INVALID_XNF_SIMULATORS_ADDRESSES} - - -Get Valid xNF Simulators - [Arguments] ${AMOUNT} + ... ${should_use_valid_certs}=${true} + ... ${should_disable_ssl}=${false} + ... ${should_connect_to_unencrypted_hv_ves}=${false} + ${XNF_SIMULATORS_ADDRESSES}= Start Xnf Simulators ${XNF_PORTS_LIST} + ... ${should_use_valid_certs} + ... ${should_disable_ssl} + ... ${should_connect_to_unencrypted_hv_ves} + [Return] ${XNF_SIMULATORS_ADDRESSES} + +Get xNF Simulators Using Valid Certificates + [Arguments] ${AMOUNT}=1 ${SIMULATORS}= Get Slice From List ${VALID_XNF_SIMULATORS_ADDRESSES} 0 ${AMOUNT} [Return] ${SIMULATORS} -Get Invalid xNF Simulators - [Arguments] ${AMOUNT} - ${SIMULATORS}= Get Slice From List ${INVALID_XNF_SIMULATORS_ADDRESSES} 0 ${AMOUNT} - [Return] ${SIMULATORS} - - Send Messages From xNF Simulators [Arguments] ${XNF_HOSTS_LIST} ${MESSAGE_FILEPATH} :FOR ${HOST} IN @{XNF_HOSTS_LIST} diff --git a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios/authorization/xnf-valid-messages-request.json b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios/authorization/xnf-valid-messages-request.json index c71793d7d..75d938766 100644 --- a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios/authorization/xnf-valid-messages-request.json +++ b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/scenarios/authorization/xnf-valid-messages-request.json @@ -18,6 +18,6 @@ "sourceName": "sample-source-name" }, "messageType": "VALID", - "messagesAmount": 500000 + "messagesAmount": 5000 } ]
\ No newline at end of file diff --git a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/ves-hv-configuration.json b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/ves-hv-configuration.json index 3235a0c0e..88a70b0db 100644 --- a/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/ves-hv-configuration.json +++ b/test/csit/tests/dcaegen2-collectors-hv-ves/testcases/resources/ves-hv-configuration.json @@ -1,6 +1,6 @@ { - "kafkaBootstrapServers": "kafka:9092", - "routing": [ + "dmaap.kafkaBootstrapServers": "kafka:9092", + "collector.routing": [ { "fromDomain": 11, "toTopic": "test-hv-ran-meas" diff --git a/test/csit/tests/dcaegen2/prh-testcases/__init__.robot b/test/csit/tests/dcaegen2/prh-testcases/__init__.robot index e69de29bb..f13ba6df8 100644 --- a/test/csit/tests/dcaegen2/prh-testcases/__init__.robot +++ b/test/csit/tests/dcaegen2/prh-testcases/__init__.robot @@ -0,0 +1,2 @@ +*** Settings *** +Documentation Integration - PRH suite
\ No newline at end of file diff --git a/test/csit/tests/dcaegen2/prh-testcases/prh_tests.robot b/test/csit/tests/dcaegen2/prh-testcases/prh_tests.robot index b7013c4a2..5150a4b35 100644 --- a/test/csit/tests/dcaegen2/prh-testcases/prh_tests.robot +++ b/test/csit/tests/dcaegen2/prh-testcases/prh_tests.robot @@ -10,7 +10,7 @@ Resource resources/prh_library.robot ${DMAAP_SIMULATOR_URL} http://${DMAAP_SIMULATOR} ${AAI_SIMULATOR_URL} http://${AAI_SIMULATOR} ${PRH_URL} http://${PRH} -${EVENT_WITH_ALL_VALID_REQUIRED_FIELDS} {"event": {"otherFields": {"pnfVendorName":"Nokia", "pnfSerialNumber":"QTFCOC540002E", "pnfOamIpv4Address":"10.16.123.234", "pnfOamIpv6Address":"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}}} +${EVENT_WITH_ALL_VALID_REQUIRED_FIELDS} {"event": {"commonEventHeader": {"sourceName":"NOK6061ZW1"}, "pnfRegistrationFields": {"oamV4IpAddress":"10.16.123.234", "oamV6IpAddress":"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}}} ${Not_json_format} "" *** Test Cases *** @@ -19,28 +19,18 @@ Valid DMaaP event can be converted to PNF_READY notification [Tags] PRH Valid event [Template] Valid event processing ${EVENT_WITH_ALL_VALID_REQUIRED_FIELDS} - {"event": {"otherFields": {"pnfVendorName":"Nokia", "pnfSerialNumber":"QTFCOC540002G", "pnfOamIpv4Address":"10.16.123.234", "pnfOamIpv6Address":""}}} - {"event": {"otherFields": {"pnfVendorName":"Nokia", "pnfSerialNumber":"QTFCOC540002F", "pnfOamIpv4Address":"", "pnfOamIpv6Address":"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}}} - {"event": {"otherFields": {"pnfVendorName":"Ericsson", "pnfSerialNumber":"QTFCOC5400000", "pnfOamIpv4Address":"", "pnfOamIpv6Address":"2001:0db8:85b3:0000:0000:8a2e:0370:7334"}}} + {"event": {"commonEventHeader": {"sourceName":"NOK6061ZW2"}, "pnfRegistrationFields": {"oamV4IpAddress":"10.17.123.234", "oamV6IpAddress":""}}} + {"event": {"commonEventHeader": {"sourceName":"ERI6061ZW3"}, "pnfRegistrationFields": {"oamV4IpAddress":"", "oamV6IpAddress":"2001:0db8:85a3:0000:0000:8b2e:0370:7334"}}} Invalid DMaaP event cannot be converted to PNF_READY notification [Documentation] PRH get invalid event from DMaaP with missing required fields - PRH does not produce PNF_READY notification [Tags] PRH Invalid event [Template] Invalid event processing - {"event": {"otherFields": {"pnfVendorName":"Nokia", "pnfSerialNumber":"QTFCOC540002E", "pnfOamIpv4Address":"", "pnfOamIpv6Address":""}}} - {"event": {"otherFields": {"pnfVendorName":"Nokia", "pnfSerialNumber":"", "pnfOamIpv4Address":"10.16.123.234", "pnfOamIpv6Address":"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}}} - {"event": {"otherFields": {"pnfVendorName":"Nokia", "pnfSerialNumber":"", "pnfOamIpv4Address":"10.16.123.234", "pnfOamIpv6Address":""}}} - {"event": {"otherFields": {"pnfVendorName":"Nokia", "pnfSerialNumber":"", "pnfOamIpv4Address":"", "pnfOamIpv6Address":"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}}} - {"event": {"otherFields": {"pnfVendorName":"Nokia", "pnfSerialNumber":"", "pnfOamIpv4Address":"", "pnfOamIpv6Address":""}}} - {"event": {"otherFields": {"pnfVendorName":"", "pnfSerialNumber":"QTFCOC540002E", "pnfOamIpv4Address":"10.16.123.234", "pnfOamIpv6Address":"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}}} - {"event": {"otherFields": {"pnfVendorName":"", "pnfSerialNumber":"QTFCOC540002E", "pnfOamIpv4Address":"10.16.123.234", "pnfOamIpv6Address":""}}} - {"event": {"otherFields": {"pnfVendorName":"", "pnfSerialNumber":"QTFCOC540002E", "pnfOamIpv4Address":"", "pnfOamIpv6Address":"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}}} - {"event": {"otherFields": {"pnfVendorName":"", "pnfSerialNumber":"QTFCOC540002E", "pnfOamIpv4Address":"", "pnfOamIpv6Address":""}}} - {"event": {"otherFields": {"pnfVendorName":"", "pnfSerialNumber":"", "pnfOamIpv4Address":"10.16.123.234", "pnfOamIpv6Address":"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}}} - {"event": {"otherFields": {"pnfVendorName":"", "pnfSerialNumber":"", "pnfOamIpv4Address":"10.16.123.234", "pnfOamIpv6Address":""}}} - {"event": {"otherFields": {"pnfVendorName":"", "pnfSerialNumber":"", "pnfOamIpv4Address":"", "pnfOamIpv6Address":"2001:0db8:85a3:0000:0000:8a2e:0370:7334"}}} - {"event": {"otherFields": {"pnfVendorName":"", "pnfSerialNumber":"", "pnfOamIpv4Address":"", "pnfOamIpv6Address":""}}} - ${Not_json_format} + {"event": {"commonEventHeader": {"sourceName":"NOK6061ZW4"}, "pnfRegistrationFields": {"oamV4IpAddress":"", "oamV6IpAddress":""}}} + {"event": {"commonEventHeader": {"sourceName":""}, "pnfRegistrationFields": {"oamV4IpAddress":"10.18.123.234", "oamV6IpAddress":"2001:0db8:85a3:0000:0000:8a2a:0370:7334"}}} + {"event": {"commonEventHeader": {"sourceName":""}, "pnfRegistrationFields": {"oamV4IpAddress":"10.17.163.234", "oamV6IpAddress":""}}} + {"event": {"commonEventHeader": {"sourceName":""}, "pnfRegistrationFields": {"oamV4IpAddress":"", "oamV6IpAddress":"2001:0db8:85a3:0000:0000:8b2f:0370:7334"}}} + {"event": {"commonEventHeader": {"sourceName":""}, "pnfRegistrationFields": {"oamV4IpAddress":"", "oamV6IpAddress":""}}} Get valid event from DMaaP and record in AAI does not exist [Documentation] PRH get valid event from DMaaP with all required fields and in AAI record doesn't exist - PRH does not produce PNF_READY notification @@ -48,7 +38,13 @@ Get valid event from DMaaP and record in AAI does not exist [Timeout] 30s Set PNF name in AAI wrong_aai_record Set event in DMaaP ${EVENT_WITH_ALL_VALID_REQUIRED_FIELDS} - Wait Until Keyword Succeeds 100x 300ms Check PRH log org.onap.dcaegen2.services.prh.exceptions.AAINotFoundException: Incorrect response code for continuation of tasks workflow + Wait Until Keyword Succeeds 100x 300ms Check PRH log java.io.IOException: Connection closed prematurely + +Event in DMaaP is not JSON format + [Documentation] PRH get not JSON format event from DMaaP - PRH does not produce PNF_READY notification + [Tags] PRH + Set event in DMaaP ${Not_json_format} + Wait Until Keyword Succeeds 100x 300ms Check PRH log |java.lang.IllegalStateException: Not a JSON Array: Get valid event from DMaaP and AAI is not responding [Documentation] PRH get valid event from DMaaP with all required fields and AAI is not responding - PRH does not produce PNF_READY notification @@ -56,4 +52,4 @@ Get valid event from DMaaP and AAI is not responding [Timeout] 180s Stop AAI Set event in DMaaP ${EVENT_WITH_ALL_VALID_REQUIRED_FIELDS} - Wait Until Keyword Succeeds 100x 300ms Check PRH log java.net.NoRouteToHostException: Host is unreachable (Host unreachable) + Wait Until Keyword Succeeds 100x 300ms Check PRH log java.net.UnknownHostException: aai diff --git a/test/csit/tests/dcaegen2/prh-testcases/resources/PrhLibrary.py b/test/csit/tests/dcaegen2/prh-testcases/resources/PrhLibrary.py index ac3fba46e..7c52f5430 100644 --- a/test/csit/tests/dcaegen2/prh-testcases/resources/PrhLibrary.py +++ b/test/csit/tests/dcaegen2/prh-testcases/resources/PrhLibrary.py @@ -21,16 +21,18 @@ class PrhLibrary(object): @staticmethod def create_pnf_ready_notification(json_file): json_to_python = json.loads(json_file) - ipv4 = json_to_python["event"]["otherFields"]["pnfOamIpv4Address"] - ipv6 = json_to_python["event"]["otherFields"]["pnfOamIpv6Address"] - pnf_name = _create_pnf_name(json_file) - str_json = '{"pnf-name":"' + pnf_name + '","ipaddress-v4-oam":"' + ipv4 + '","ipaddress-v6-oam":"' + ipv6 + '"}' + ipv4 = json_to_python["event"]["pnfRegistrationFields"]["oamV4IpAddress"] + ipv6 = json_to_python["event"]["pnfRegistrationFields"]["oamV6IpAddress"] + header = json_to_python["event"]["commonEventHeader"]["sourceName"] + str_json = '{"sourceName":"' + header + '","ipaddress-v4-oam":"' + ipv4 + '","ipaddress-v6-oam":"' + ipv6 + '"}' python_to_json = json.dumps(str_json) return python_to_json.replace("\\", "")[1:-1] @staticmethod def create_pnf_name(json_file): - return _create_pnf_name(json_file) + json_to_python = json.loads(json_file) + header = json_to_python["event"]["commonEventHeader"]["sourceName"] + return header @staticmethod def stop_aai(): @@ -38,9 +40,7 @@ class PrhLibrary(object): container = client.containers.get('aai_simulator') container.stop() - -def _create_pnf_name(json_file): - json_to_python = json.loads(json_file) - vendor = json_to_python["event"]["otherFields"]["pnfVendorName"] - serial_number = json_to_python["event"]["otherFields"]["pnfSerialNumber"] - return vendor[:3].upper() + serial_number + def create_invalid_notification(self, json_file): + return self.create_pnf_ready_notification(json_file).replace("\":", "\": ")\ + .replace("ipaddress-v4-oam", "oamV4IpAddress").replace("ipaddress-v6-oam", "oamV6IpAddress")\ + .replace("}", "\\n}") diff --git a/test/csit/tests/dcaegen2/prh-testcases/resources/docker-compose.yml b/test/csit/tests/dcaegen2/prh-testcases/resources/docker-compose.yml index b1f84fda2..67921e8e0 100644 --- a/test/csit/tests/dcaegen2/prh-testcases/resources/docker-compose.yml +++ b/test/csit/tests/dcaegen2/prh-testcases/resources/docker-compose.yml @@ -1,12 +1,15 @@ version: '3' services: prh: - image: nexus3.onap.org:10001/onap/org.onap.dcaegen2.services.prh.prh-app-server + image: nexus3.onap.org:10001/onap/org.onap.dcaegen2.services.prh.prh-app-server:latest command: > - --dmaap.dmaapConsumerConfiguration.dmaapPortNumber=2222 - --dmaap.dmaapProducerConfiguration.dmaapPortNumber=2222 - --aai.aaiClientConfiguration.aaiHostPortNumber=3333 - --aai.aaiClientConfiguration.aaiProtocol=http + --dmaap.dmaapConsumerConfiguration.dmaapHostName=dmaap + --dmaap.dmaapConsumerConfiguration.dmaapPortNumber=2222 + --dmaap.dmaapProducerConfiguration.dmaapHostName=dmaap + --dmaap.dmaapProducerConfiguration.dmaapPortNumber=2222 + --aai.aaiClientConfiguration.aaiHostPortNumber=3333 + --aai.aaiClientConfiguration.aaiHost=aai + --aai.aaiClientConfiguration.aaiProtocol=http entrypoint: - java - -Dspring.profiles.active=dev @@ -18,10 +21,10 @@ services: - "8433:8433" container_name: prh depends_on: - - dmaap_simulator - - aai_simulator + - dmaap + - aai - dmaap_simulator: + dmaap: build: context: simulator dockerfile: DMaaP_simulator @@ -29,7 +32,7 @@ services: - "2222:2222" container_name: dmaap_simulator - aai_simulator: + aai: build: context: simulator dockerfile: AAI_simulator diff --git a/test/csit/tests/dcaegen2/prh-testcases/resources/prh_library.robot b/test/csit/tests/dcaegen2/prh-testcases/resources/prh_library.robot index 10bc26c18..b7e339bc9 100644 --- a/test/csit/tests/dcaegen2/prh-testcases/resources/prh_library.robot +++ b/test/csit/tests/dcaegen2/prh-testcases/resources/prh_library.robot @@ -1,6 +1,7 @@ *** Settings *** Library RequestsLibrary Library Collections +Library PrhLibrary.py *** Keywords *** Create header @@ -17,8 +18,10 @@ Invalid event processing [Arguments] ${input_invalid_event_in_dmaap} [Timeout] 30s Set event in DMaaP ${input_invalid_event_in_dmaap} - Wait Until Keyword Succeeds 100x 100ms Check PRH log INFO 1 --- [pool-2-thread-1] o.o.d.s.prh.tasks.DmaapConsumerTaskImpl \ : Consumed model from DmaaP: ${input_invalid_event_in_dmaap} - + ${invalid_notification}= Create invalid notification ${input_invalid_event_in_dmaap} + ${notification}= Catenate SEPARATOR= \\n |org.onap.dcaegen2.services.prh.exceptions.DmaapNotFoundException: Incorrect json, consumerDmaapModel can not be created: ${invalid_notification} + Wait Until Keyword Succeeds 100x 100ms Check PRH log ${notification} + Valid event processing [Arguments] ${input_valid_event_in_dmaap} [Timeout] 30s diff --git a/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/AAI.py b/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/AAI.py index e70d8d30f..c57903c30 100644 --- a/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/AAI.py +++ b/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/AAI.py @@ -7,6 +7,7 @@ pnfs = 'Empty' class AAIHandler(BaseHTTPRequestHandler): + def do_PUT(self): if re.search('/set_pnfs', self.path): global pnfs diff --git a/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/AAI_simulator b/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/AAI_simulator index 013cd0a65..89a266ebe 100644 --- a/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/AAI_simulator +++ b/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/AAI_simulator @@ -1,4 +1,12 @@ -FROM python:3 +FROM alpine:3.8 + +RUN apk add --no-cache python3 && \ + python3 -m ensurepip && \ + rm -r /usr/lib/python*/ensurepip && \ + pip3 install --upgrade pip setuptools && \ + if [ ! -e /usr/bin/pip ]; then ln -s pip3 /usr/bin/pip ; fi && \ + if [[ ! -e /usr/bin/python ]]; then ln -sf /usr/bin/python3 /usr/bin/python; fi && \ + rm -r /root/.cache ADD AAI.py / diff --git a/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/DMaaP.py b/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/DMaaP.py index 210378421..96e22a141 100644 --- a/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/DMaaP.py +++ b/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/DMaaP.py @@ -8,6 +8,7 @@ received_event_to_get_method = 'Empty' class DMaaPHandler(BaseHTTPRequestHandler): + def do_PUT(self): if re.search('/set_get_event', self.path): global received_event_to_get_method @@ -27,7 +28,7 @@ class DMaaPHandler(BaseHTTPRequestHandler): return def do_GET(self): - if re.search('/events/unauthenticated.SEC_OTHER_OUTPUT/OpenDcae-c12/c12', self.path): + if re.search('/events/unauthenticated.VES_PNFREG_OUTPUT/OpenDcae-c12/c12', self.path): _header_200_and_json(self) self.wfile.write(received_event_to_get_method) elif re.search('/events/pnfReady', self.path): diff --git a/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/DMaaP_simulator b/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/DMaaP_simulator index cf4160c89..9cf21dc92 100644 --- a/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/DMaaP_simulator +++ b/test/csit/tests/dcaegen2/prh-testcases/resources/simulator/DMaaP_simulator @@ -1,4 +1,12 @@ -FROM python:3 +FROM alpine:3.8 + +RUN apk add --no-cache python3 && \ + python3 -m ensurepip && \ + rm -r /usr/lib/python*/ensurepip && \ + pip3 install --upgrade pip setuptools && \ + if [ ! -e /usr/bin/pip ]; then ln -s pip3 /usr/bin/pip ; fi && \ + if [[ ! -e /usr/bin/python ]]; then ln -sf /usr/bin/python3 /usr/bin/python; fi && \ + rm -r /root/.cache ADD DMaaP.py / diff --git a/test/csit/tests/policy/apex-pdp/apex-pdp-test.robot b/test/csit/tests/policy/apex-pdp/apex-pdp-test.robot new file mode 100644 index 000000000..f1dea17d1 --- /dev/null +++ b/test/csit/tests/policy/apex-pdp/apex-pdp-test.robot @@ -0,0 +1,14 @@ +*** Settings *** +Library Collections +Library RequestsLibrary +Library OperatingSystem +Library json + +*** Test Cases *** + +Call Apex Policy + Create Session apexSession http://${APEX_IP}:23324 max_retries=3 + ${data}= Get Binary File ${CURDIR}${/}data${/}event.json + &{headers}= Create Dictionary Content-Type=application/json Accept=application/json + ${resp}= Put Request apexSession /apex/FirstConsumer/EventIn data=${data} headers=${headers} + Should Be Equal As Strings ${resp.status_code} 200 diff --git a/test/csit/tests/policy/apex-pdp/data/event.json b/test/csit/tests/policy/apex-pdp/data/event.json new file mode 100644 index 000000000..9dbf2790b --- /dev/null +++ b/test/csit/tests/policy/apex-pdp/data/event.json @@ -0,0 +1,11 @@ +{ + "nameSpace": "org.onap.policy.apex.sample.events", + "name": "Event0000", + "version": "0.0.1", + "source": "REST_0", + "target": "apex", + "TestSlogan": "Test slogan for External Event0", + "TestMatchCase": 3, + "TestTimestamp": 1536363522018, + "TestTemperature": 9080.866 +} diff --git a/test/csit/tests/policy/suite1/global_properties.robot b/test/csit/tests/policy/suite1/global_properties.robot index 911fdaff9..60826bbc3 100644 --- a/test/csit/tests/policy/suite1/global_properties.robot +++ b/test/csit/tests/policy/suite1/global_properties.robot @@ -25,6 +25,6 @@ ${GLOBAL_VM_PRIVATE_KEY} ${EXECDIR}/robot/assets/keys/robot_ssh_private_key.pv ${GLOBAL_POLICY_SERVER_URL} https://%{PDP_IP}:8081 ${GLOBAL_POLICY_AUTH} dGVzdHBkcDphbHBoYTEyMw== ${GLOBAL_POLICY_CLIENTAUTH} cHl0aG9uOnRlc3Q= -${GLOBAL_POLICY_HEALTHCHECK_URL} http://%{POLICY_IP}:6969 +${GLOBAL_POLICY_HEALTHCHECK_URL} https://%{POLICY_IP}:6969 ${GLOBAL_POLICY_USERNAME} healthcheck ${GLOBAL_POLICY_PASSWORD} zb!XztG34 diff --git a/test/csit/tests/sdnc/healthcheck/test1.robot b/test/csit/tests/sdnc/healthcheck/test1.robot index 4bf3d25e7..c002a1892 100644 --- a/test/csit/tests/sdnc/healthcheck/test1.robot +++ b/test/csit/tests/sdnc/healthcheck/test1.robot @@ -14,32 +14,31 @@ ${PRELOAD_VNF_TOPOLOGY_OPERATION_PATH} /operations/VNF-API:preload-vnf-topology Healthcheck API Create Session sdnc http://localhost:8282/restconf - ${data}= Get Binary File ${CURDIR}${/}data${/}data.json + ${data}= Get File ${CURDIR}${/}data${/}data.json &{headers}= Create Dictionary Authorization=Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ== Content-Type=application/json Accept=application/json ${resp}= Post Request sdnc ${SDN_HEALTHCHECK_OPERATION_PATH} data=${data} headers=${headers} Should Be Equal As Strings ${resp.status_code} 200 Should Be Equal As Strings ${resp.json()['output']['response-code']} 200 - + Check SLI-API Create Session sdnc http://localhost:8282 &{headers}= Create Dictionary Authorization=Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ== Content-Type=application/json Accept=application/json ${resp}= Get Request sdnc ${SDN_APIDOCS_URI} headers=${headers} - Log ${resp.content} - Should Contain ${resp.content} SLI-API + Log ${resp.text} + Should Contain ${resp.text} SLI-API Check VNF-API Create Session sdnc http://localhost:8282 &{headers}= Create Dictionary Authorization=Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ== Content-Type=application/json Accept=application/json ${resp}= Get Request sdnc ${SDN_APIDOCS_URI} headers=${headers} - Log ${resp.content} - Should Contain ${resp.content} VNF-API + Log ${resp.text} + Should Contain ${resp.text} VNF-API Test Preload Create Session sdnc http://localhost:8282/restconf - ${data}= Get Binary File ${CURDIR}${/}data${/}preload.json + ${data}= Get File ${CURDIR}${/}data${/}preload.json &{headers}= Create Dictionary Authorization=Basic YWRtaW46S3A4Yko0U1hzek0wV1hsaGFrM2VIbGNzZTJnQXc4NHZhb0dHbUp2VXkyVQ== Content-Type=application/json Accept=application/json ${resp}= Post Request sdnc ${PRELOAD_VNF_TOPOLOGY_OPERATION_PATH} data=${data} headers=${headers} - Log ${resp.content} + Log ${resp.text} Should Be Equal As Strings ${resp.status_code} 200 Should Be Equal As Strings ${resp.json()['output']['response-code']} 200 - diff --git a/test/csit/tests/vfc/nfvo-wfengine/workflow.robot b/test/csit/tests/vfc/nfvo-wfengine/workflow.robot index c9dbe6c46..8039ae177 100644 --- a/test/csit/tests/vfc/nfvo-wfengine/workflow.robot +++ b/test/csit/tests/vfc/nfvo-wfengine/workflow.robot @@ -80,34 +80,34 @@ UnDeploy BPMN File Testt On MgrService ${resp}= Delete Request web_session /api/workflow/v1/package/${deployedId} Should Be Equal ${resp.status_code} ${200} -Deploy BPMN File Test On MSB - [Documentation] Check if the test bpmn file can be deployed in activiti engine - ${auth}= Create List kermit kermit - ${headers}= Create Dictionary Accept=application/json - Create Session web_session http://${MSB_IP}:${MSB_PORT} headers=${headers} auth=${auth} - ${files}= evaluate {"file":open('${bmpfilepath}','rb')} - ${resp}= Post Request web_session api/workflow/v1/package files=${files} - Should Be Equal ${resp.status_code} ${200} - Log ${resp.json()} - ${deployedId}= Set Variable ${resp.json()["deployedId"]} - Set Global Variable ${deployedId} +# Deploy BPMN File Test On MSB +# [Documentation] Check if the test bpmn file can be deployed in activiti engine +# ${auth}= Create List kermit kermit +# ${headers}= Create Dictionary Accept=application/json +# Create Session web_session http://${MSB_IP}:${MSB_PORT} headers=${headers} auth=${auth} +# ${files}= evaluate {"file":open('${bmpfilepath}','rb')} +# ${resp}= Post Request web_session api/workflow/v1/package files=${files} +# Should Be Equal ${resp.status_code} ${200} +# Log ${resp.json()} +# ${deployedId}= Set Variable ${resp.json()["deployedId"]} +# Set Global Variable ${deployedId} -Exectue BPMN File Testt On MSB - [Documentation] Check if the test bpmn file can be exectued in MSB - ${headers} Create Dictionary Content-Type=application/json Accept=application/json Authorization=Basic a2VybWl0Omtlcm1pdA== - Create Session web_session http://${MSB_IP}:${MSB_PORT} headers=${headers} - ${body} Create Dictionary processDefinitionKey=${processId} - ${body} dumps ${body} - ${resp}= Post Request web_session api/workflow/v1/process/instance ${body} - Should Be Equal ${resp.status_code} ${200} - Log ${resp.json()} - Should Be Equal ${resp.json()["processDefinitionKey"]} ${processId} +# Exectue BPMN File Testt On MSB +# [Documentation] Check if the test bpmn file can be exectued in MSB +# ${headers} Create Dictionary Content-Type=application/json Accept=application/json Authorization=Basic a2VybWl0Omtlcm1pdA== +# Create Session web_session http://${MSB_IP}:${MSB_PORT} headers=${headers} +# ${body} Create Dictionary processDefinitionKey=${processId} +# ${body} dumps ${body} +# ${resp}= Post Request web_session api/workflow/v1/process/instance ${body} +# Should Be Equal ${resp.status_code} ${200} +# Log ${resp.json()} +# Should Be Equal ${resp.json()["processDefinitionKey"]} ${processId} -UnDeploy BPMN File Testt On MSB - [Documentation] Check if the test bpmn file can be undeployed in MSB - log ${deployedId} - ${auth}= Create List kermit kermit - ${headers} Create Dictionary Content-Type=application/json Accept=application/json - Create Session web_session http://${MSB_IP}:${MSB_PORT} headers=${headers} auth=${auth} - ${resp}= Delete Request web_session /api/workflow/v1/package/${deployedId} - Should Be Equal ${resp.status_code} ${200} +# UnDeploy BPMN File Testt On MSB +# [Documentation] Check if the test bpmn file can be undeployed in MSB +# log ${deployedId} +# ${auth}= Create List kermit kermit +# ${headers} Create Dictionary Content-Type=application/json Accept=application/json +# Create Session web_session http://${MSB_IP}:${MSB_PORT} headers=${headers} auth=${auth} +# ${resp}= Delete Request web_session /api/workflow/v1/package/${deployedId} +# Should Be Equal ${resp.status_code} ${200} diff --git a/test/csit/tests/vid/resources/docker-compose.yml b/test/csit/tests/vid/resources/docker-compose.yml index 93b317001..879c23d47 100644 --- a/test/csit/tests/vid/resources/docker-compose.yml +++ b/test/csit/tests/vid/resources/docker-compose.yml @@ -1,7 +1,7 @@ version: '3' services: vid-server: - image: nexus3.onap.org:10001/onap/vid:latest + image: nexus3.onap.org:10001/onap/vid:3.0-STAGING-latest environment: - VID_MYSQL_DBNAME=vid_openecomp_epsdk - VID_MYSQL_PASS=Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U @@ -23,7 +23,6 @@ services: container_name: vid-mariadb volumes: - ${WORKSPACE}/data/clone/vid/lf_config/vid-my.cnf:/etc/mysql/my.cnf - - ${WORKSPACE}/data/clone/vid/lf_config/vid-schema.sql:/docker-entrypoint-initdb.d/vid-schema.sql - /var/lib/mysql sdc_simulator: diff --git a/test/ete/scripts/teardown-onap.sh b/test/ete/scripts/teardown-onap.sh index 61e643b64..77b8233fa 100755 --- a/test/ete/scripts/teardown-onap.sh +++ b/test/ete/scripts/teardown-onap.sh @@ -14,14 +14,14 @@ while getopts ":rqn:" o; do if [ $answer = "y" ] || [ $answer = "Y" ] || [ $answer = "yes" ] || [ $answer = "Yes"]; then echo "This may delete the work of other colleages within the same enviroment" read -p "Are you certain this is what you want? (type y to confirm):" answer2 - + if [ $answer2 = "y" ] || [ $answer2 = "Y" ] || [ $answer2 = "yes" ] || [ $answer2 = "Yes"]; then full_deletion=true - else + else echo "Ending program" exit 1 fi - else + else echo "Ending program" exit 1 fi @@ -46,7 +46,7 @@ fi source $WORKSPACE/test/ete/scripts/install_openstack_cli.sh -if [ "$full_deletion" = true ];then +if [ "$full_deletion" = true ];then echo "Commencing delete, press CRTL-C to stop" sleep 10 @@ -92,19 +92,21 @@ if [ "$full_deletion" = true ];then echo "No existing stacks to delete." fi -else - #Restrained teardown +else + #Restrained teardown echo "Restrained teardown" - + STACK=$install_name - if [ ! -z "${STACK}" ]; then + STATUS=$(openstack stack check $STACK) + + if [ "Stack not found: $install_name" != "$STATUS" ]; then openstack stack delete $STACK - + until [ "DELETE_IN_PROGRESS" != "$(openstack stack show -c stack_status -f value $STACK)" ]; do sleep 2 done else echo "No existing stack with the name $install_name." fi -fi
\ No newline at end of file +fi diff --git a/version-manifest/src/main/resources/docker-manifest-staging.csv b/version-manifest/src/main/resources/docker-manifest-staging.csv index 0b338ab3f..0515ca653 100644 --- a/version-manifest/src/main/resources/docker-manifest-staging.csv +++ b/version-manifest/src/main/resources/docker-manifest-staging.csv @@ -45,20 +45,20 @@ onap/music/prom,1.0.5-latest onap/oom/kube2msb,1.1.0 onap/optf-has,1.2.1-STAGING-latest onap/optf-osdf,1.2.1-STAGING-latest -onap/org.onap.dcaegen2.collectors.snmptrap,1.3.0 -onap/org.onap.dcaegen2.collectors.ves.vescollector,1.2.0 +onap/org.onap.dcaegen2.collectors.snmptrap,1.4.0 +onap/org.onap.dcaegen2.collectors.ves.vescollector,1.3.1 onap/org.onap.dcaegen2.deployments.bootstrap,1.1.3 onap/org.onap.dcaegen2.deployments.cm-container,1.3.0 onap/org.onap.dcaegen2.deployments.healthcheck-container,1.1.0 -onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container,1.1.11 +onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container,1.4.1 onap/org.onap.dcaegen2.deployments.redis-cluster-container,1.0.0 onap/org.onap.dcaegen2.deployments.tca-cdap-container,1.1.0 onap/org.onap.dcaegen2.platform.cdapbroker,4.1.0 -onap/org.onap.dcaegen2.platform.configbinding,2.1.5 -onap/org.onap.dcaegen2.platform.deployment-handler,2.1.5 -onap/org.onap.dcaegen2.platform.inventory-api,3.0.1 -onap/org.onap.dcaegen2.platform.policy-handler,2.4.5 -onap/org.onap.dcaegen2.platform.servicechange-handler,1.1.4 +onap/org.onap.dcaegen2.platform.configbinding.app-app,2.2.3 +onap/org.onap.dcaegen2.platform.deployment-handler,3.0.0 +onap/org.onap.dcaegen2.platform.inventory-api,3.0.3 +onap/org.onap.dcaegen2.platform.policy-handler,4.2.0 +onap/org.onap.dcaegen2.platform.servicechange-handler,1.1.5 onap/policy-apex-pdp,2.0-SNAPSHOT-latest onap/policy-drools,1.3-SNAPSHOT-latest onap/policy-pe,1.3-SNAPSHOT-latest |