summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoradheli.tavares <adheli.tavares@est.tech>2024-12-19 09:45:08 +0000
committeradheli.tavares <adheli.tavares@est.tech>2025-01-10 11:47:16 +0000
commit7a6c3faeb0134776d46352f51f02cf6b2aef72b7 (patch)
treee9fcbc4e1ad8ca7c0ad32b780402f59803801a24
parentff37c033e7c25e1dd4ab6a1cc5b3913d27a37b0b (diff)
General improvements on CSIT scripts
- some issues with functions order - clear waiting times for docker containers to come up - --local flag added to make it easier to run tests with local images Issue-ID: POLICY-5239 Change-Id: I3bcbfd88f45110436b2b0fda16c61936ef919f95 Signed-off-by: adheli.tavares <adheli.tavares@est.tech>
-rw-r--r--compose/README.md27
-rwxr-xr-xcompose/get-versions.sh7
-rwxr-xr-xcompose/start-compose.sh6
-rw-r--r--csit/README.md43
-rwxr-xr-xcsit/resources/scripts/build-csit-docker-image.sh3
-rwxr-xr-xcsit/resources/scripts/get-cluster-info.sh76
-rwxr-xr-xcsit/resources/scripts/wait_for_rest.sh6
-rw-r--r--csit/resources/tests/apex-slas-3.robot6
-rwxr-xr-xcsit/run-k8s-csit.sh135
-rwxr-xr-xcsit/run-project-csit.sh105
10 files changed, 251 insertions, 163 deletions
diff --git a/compose/README.md b/compose/README.md
index 290bdf4e..dcc21c05 100644
--- a/compose/README.md
+++ b/compose/README.md
@@ -53,10 +53,12 @@ policy-clamp-runtime-acm)
## Docker image download localization
The docker images are always downloaded from nexus repository, but if needed to build a local
-image, edit the ``get-versions.sh`` script and change the variable ``LOCAL_IMAGES``
-to `true` or edit the image tag in the docker compose file.
-Changing the variable to `true` will ensure that the newly built images locally are being used
-by not requesting a download from nexus and using the image tagged as latest.
+image, do an export ``export USE_LOCAL_IMAGES=true`` or edit the image tag in the docker compose
+file. That will ensure that the newly built images locally are being used by not requesting a
+download from nexus and using the image tagged as latest.
+
+> When using the export command way, keep in mind that all policy images will need to be available
+> locally.
## Docker image versions
@@ -82,7 +84,8 @@ Use ``docker compose logs`` or `docker logs ${container_name}` instructions on h
## Uninstall
-Simply run the ``stop-compose.sh`` script. This will also generate logs from the services started with compose.
+Simply run the ``stop-compose.sh`` script. This will also generate logs from the services started
+with compose.
```sh
./stop-compose.sh
@@ -90,8 +93,8 @@ Simply run the ``stop-compose.sh`` script. This will also generate logs from the
## Database support
-From Oslo version onwards, this docker compose setup uses Postgres database as default; MariaDB is still available,
-but support might be limited.
+From Oslo version onwards, this docker compose setup uses Postgres database as default; MariaDB is
+still available, but support might be limited.
To start docker compose with MariaDB, add a flag to use it:
@@ -104,16 +107,16 @@ To start docker compose with MariaDB, add a flag to use it:
### Docker compose files
-To make it easier and clear how the docker compose system works, there are three files describing the services
+To make it easier and clear how the docker compose system works, there are three files describing
+the services:
- compose.common.yml
- - Has policy services that don't connect directly to database: apex-pdp and distribution
- Simulator service
- - ACM-R Participants that don't connect directly to database.
+ - ACM-R Participants that don't connect directly to database
- Messaging services (kafka, zookeeper)
- Metrics services (prometheus, grafana, jaeger)
- compose.postgres.yml
- - All policy services that connect directly to database with Postgres configurations
- Postgres database and policy-db-migrator working towards it
- compose.mariadb.yml
- - All policy services that connect directly to database with MariaDB configurations
- MariaDB database and policy-db-migrator working towards it
+- compose.yml
+ - All the policy components.
diff --git a/compose/get-versions.sh b/compose/get-versions.sh
index 0c2ef30e..9d9b42b7 100755
--- a/compose/get-versions.sh
+++ b/compose/get-versions.sh
@@ -1,7 +1,7 @@
#! /bin/bash
# ============LICENSE_START====================================================
# Copyright (C) 2020-2021 AT&T Intellectual Property. All rights reserved.
-# Modification Copyright 2021-2024 Nordix Foundation.
+# Modification Copyright 2021-2025 Nordix Foundation.
# Modifications Copyright (C) 2021 Bell Canada. All rights reserved.
# Modifications Copyright 2024-2025 Deutsche Telekom
# =============================================================================
@@ -25,8 +25,6 @@ if [ -z "${WORKSPACE}" ]; then
export WORKSPACE
fi
-LOCAL_IMAGES=false
-
#default values
export POLICY_MARIADB_VER=10.10.2
echo POLICY_MARIADB_VER=${POLICY_MARIADB_VER}
@@ -34,7 +32,7 @@ echo POLICY_MARIADB_VER=${POLICY_MARIADB_VER}
export POLICY_POSTGRES_VER=16.4
echo POLICY_POSTGRES_VER=${POLICY_POSTGRES_VER}
-if [ -n "$LOCAL_IMAGES" ] && [ "$LOCAL_IMAGES" = "true" ]; then
+if [ -n "${USE_LOCAL_IMAGES}" ] && [ "${USE_LOCAL_IMAGES}" = "true" ]; then
echo "Running with local images."
export POLICY_DOCKER_VERSION="latest"
export POLICY_MODELS_VERSION="latest"
@@ -51,6 +49,7 @@ if [ -n "$LOCAL_IMAGES" ] && [ "$LOCAL_IMAGES" = "true" ]; then
export CONTAINER_LOCATION=""
else
+ echo "Downloading latest released images..."
export CONTAINER_LOCATION="nexus3.onap.org:10001/"
GERRIT_BRANCH=$(awk -F= '$1 == "defaultbranch" { print $2 }' \
"${WORKSPACE}"/.gitreview)
diff --git a/compose/start-compose.sh b/compose/start-compose.sh
index 013aba6f..115b30e4 100755
--- a/compose/start-compose.sh
+++ b/compose/start-compose.sh
@@ -1,7 +1,7 @@
#!/bin/bash
#
# ============LICENSE_START====================================================
-# Copyright (C) 2022-2024 Nordix Foundation.
+# Copyright (C) 2022-2025 Nordix Foundation.
# =============================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -53,6 +53,10 @@ do
database=postgres
shift
;;
+ --local)
+ export USE_LOCAL_IMAGES=true
+ shift
+ ;;
*)
component="$1"
shift
diff --git a/csit/README.md b/csit/README.md
new file mode 100644
index 00000000..f0c44823
--- /dev/null
+++ b/csit/README.md
@@ -0,0 +1,43 @@
+# Running Policy Framework CSIT
+
+## Using Docker Compose environment
+
+Policy Framework Continuous System and Integration Tests are executed daily on jenkins jobs
+targeting master branch. The runs are available at https://jenkins.onap.org/view/policy/
+
+The CSIT suites are also used by developers as another guarantee that new code or changes
+delivered on main code do not affect the expected behaviour for the already delivered
+functionalities or new tests are added when a new functionality is added.
+
+To execute the tests on a local environment, the steps are the following:
+
+> all the instructions assume docker repository was cloned to /git/policy/docker
+
+- after cloning the project, go to ../docker/csit
+- to run a test, execute the run-project-csit.sh script
+
+`./run-project-csit.sh <policy-component>`
+
+The options for <policy-component> are:
+- api
+- pap
+- apex-pdp
+- clamp (for runtime-acm and participants)
+- drools-pdp
+- drools-applications
+- xacml-pdp
+- distribution
+- opa-pdp
+
+The command above with download the latest SNAPSHOT version available for the policy-component
+being tested. Version is collected from [PF Release Data](https://github.com/onap/policy-parent/blob/master/integration/src/main/resources/release/pf_release_data.csv)
+
+To start the containers with images generated in local environment, the script can be run with the
+flag `--local`
+
+`./run-project-csit.sh api --local`
+
+The command above with start the docker containers for `policy-api` and `policy-db-migrator` using
+the latest image created at the local environment. When using the flag `--local` it will look for
+all the policy components needed for the test suites to be executed. The support services like
+PostgreSQL, Kafka, Prometheus, Grafana will always be downloaded if not present.
diff --git a/csit/resources/scripts/build-csit-docker-image.sh b/csit/resources/scripts/build-csit-docker-image.sh
index c80afc4f..8b5aa7d6 100755
--- a/csit/resources/scripts/build-csit-docker-image.sh
+++ b/csit/resources/scripts/build-csit-docker-image.sh
@@ -1,6 +1,6 @@
#!/bin/bash -x
#
-# Copyright 2024 Nordix Foundation.
+# Copyright 2024-2025 Nordix Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -37,7 +37,6 @@ fi
GERRIT_BRANCH=$(awk -F= '$1 == "defaultbranch" { print $2 }' "${WORKSPACE}"/.gitreview)
export ROBOT_DOCKER_IMAGE="policy-csit-robot"
-echo "Build docker image for robot framework"
cd ${WORKSPACE}/csit/resources || exit
docker image rm -f ${ROBOT_DOCKER_IMAGE}
diff --git a/csit/resources/scripts/get-cluster-info.sh b/csit/resources/scripts/get-cluster-info.sh
index 92e92ee8..75fe7193 100755
--- a/csit/resources/scripts/get-cluster-info.sh
+++ b/csit/resources/scripts/get-cluster-info.sh
@@ -1,6 +1,6 @@
#!/bin/bash
# ============LICENSE_START=======================================================
-# Copyright (C) 2023-2024 Nordix Foundation. All rights reserved.
+# Copyright (C) 2023-2025 Nordix Foundation. All rights reserved.
# Modifications Copyright © 2024 Deutsche Telekom
# ================================================================================
#
@@ -39,6 +39,10 @@ export OPA_PORT=30012
export SIMULATOR_PORT=30904
# Retrieve pod names
+function get_pod_name() {
+ microk8s kubectl get pods --no-headers -o custom-columns=':metadata.name' | grep $1
+}
+
function get_pod_names() {
export APEX_POD=$(get_pod_name apex)
export PAP_POD=$(get_pod_name pap)
@@ -56,6 +60,10 @@ function get_pod_names() {
}
# Retrieve service names
+function get_svc_name() {
+ microk8s kubectl get svc --no-headers -o custom-columns=':metadata.name' | grep $1
+}
+
function get_svc_names() {
export APEX_SVC=$(get_svc_name policy-apex-pdp)
export PAP_SVC=$(get_svc_name policy-pap)
@@ -72,47 +80,11 @@ function get_svc_names() {
export POLICY_K8S_SVC=$(get_svc_name policy-clamp-ac-k8s-ppnt)
}
-# Expose services in order to perform tests from JMeter
-function expose_services() {
- expose_service $APEX_SVC
- expose_service $PAP_SVC
- expose_service $API_SVC
- expose_service $XACML_SVC
- expose_service_opa_pdp $OPA_SVC
- expose_service $DROOLS_SVC
- expose_service $DIST_SVC
- expose_service $ACM_SVC
- expose_service $POLICY_PPNT_SVC
- expose_service $POLICY_HTTP_SVC
- expose_service $POLICY_SIM_SVC
- expose_service $POLICY_K8S_SVC
-
- setup_message_router_svc
- sleep 2
- patch_ports
-}
-
-function get_pod_name() {
- microk8s kubectl get pods --no-headers -o custom-columns=':metadata.name' | grep $1
-}
-
-function get_svc_name() {
- microk8s kubectl get svc --no-headers -o custom-columns=':metadata.name' | grep $1
-}
-
-function expose_service_opa_pdp() {
- microk8s kubectl expose service $1 --name $1"-svc" --type NodePort --protocol TCP --port 8282 --target-port 8282
-}
-
-function expose_service() {
- microk8s kubectl expose service $1 --name $1"-svc" --type NodePort --protocol TCP --port 6969 --target-port 6969
-}
-
+# Assign set port values
function patch_port() {
microk8s kubectl patch service "$1-svc" --namespace=default --type='json' --patch='[{"op": "replace", "path": "/spec/ports/0/nodePort", "value":'"$2"'}]'
}
-# Assign set port values
function patch_ports() {
patch_port "$APEX_SVC" $APEX_PORT
patch_port "$API_SVC" $API_PORT
@@ -133,6 +105,34 @@ function setup_message_router_svc() {
microk8s kubectl patch service message-router-svc --namespace=default --type='json' --patch='[{"op": "replace", "path": "/spec/ports/0/nodePort", "value":'"$SIMULATOR_PORT"'}]'
}
+# Expose services in order to perform tests from JMeter
+function expose_service() {
+ microk8s kubectl expose service $1 --name $1"-svc" --type NodePort --protocol TCP --port 6969 --target-port 6969
+}
+
+function expose_service_opa_pdp() {
+ microk8s kubectl expose service $1 --name $1"-svc" --type NodePort --protocol TCP --port 8282 --target-port 8282
+}
+
+function expose_services() {
+ expose_service $APEX_SVC
+ expose_service $PAP_SVC
+ expose_service $API_SVC
+ expose_service $XACML_SVC
+ expose_service $DROOLS_SVC
+ expose_service $DIST_SVC
+ expose_service $ACM_SVC
+ expose_service $POLICY_PPNT_SVC
+ expose_service $POLICY_HTTP_SVC
+ expose_service $POLICY_SIM_SVC
+ expose_service $POLICY_K8S_SVC
+ expose_service_opa_pdp $OPA_SVC
+
+ setup_message_router_svc
+ sleep 2
+ patch_ports
+}
+
####MAIN###
get_pod_names
get_svc_names
diff --git a/csit/resources/scripts/wait_for_rest.sh b/csit/resources/scripts/wait_for_rest.sh
index 9732bc54..b51a7fe0 100755
--- a/csit/resources/scripts/wait_for_rest.sh
+++ b/csit/resources/scripts/wait_for_rest.sh
@@ -1,6 +1,6 @@
#!/bin/sh
# ============LICENSE_START====================================================
-# Copyright (C) 2023-2024 Nordix Foundation.
+# Copyright (C) 2023-2025 Nordix Foundation.
# =============================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -49,7 +49,7 @@ do
export port="$2"
shift
shift
- echo "Waiting for REST to come up on $host port $port..."
+ echo "Checking if REST port $port is open on $host ..."
while [ "$tmout" -gt 0 ]
do
if command -v docker > /dev/null 2>&1
@@ -67,7 +67,7 @@ do
fi
done
if [ $rc -ne 0 ]; then
- echo "$host port $port REST cannot be detected"
+ echo "REST port $port cannot be detected on host $host"
exit $rc
fi
done
diff --git a/csit/resources/tests/apex-slas-3.robot b/csit/resources/tests/apex-slas-3.robot
index 1ceb9b77..c8fc2582 100644
--- a/csit/resources/tests/apex-slas-3.robot
+++ b/csit/resources/tests/apex-slas-3.robot
@@ -17,7 +17,7 @@ Healthcheck
Set Suite Variable ${pdpName} ${resp.json()['name']}
ValidatePolicyExecutionAndEventRateLowComplexity
- [Documentation] Validate that a moderate complexity policity can be executed in less than 100ms and minimum 30 events triggered per second
+ [Documentation] Validate that a moderate complexity policy can be executed in less than 100ms and minimum 30 events triggered per second
Set Test Variable ${policyName} onap.policies.apex.pnf.Test
${postjson}= Get File ${CURDIR}/data/${policyName}.json
CreatePolicySuccessfully /policy/api/v1/policytypes/onap.policies.native.Apex/versions/1.0.0/policies ${postjson} ${policyName} 1.0.0
@@ -31,7 +31,7 @@ ValidatePolicyExecutionAndEventRateLowComplexity
ValidateEventExecution ${eventStartTime} ${eventEndTime} 30
ValidatePolicyExecutionAndEventRateHighComplexity
- [Documentation] Validate that a high complexity policity can be executed in less than 5000ms and minimum 0.6 events triggered per second
+ [Documentation] Validate that a high complexity policy can be executed in less than 5000ms and minimum 0.6 events triggered per second
Set Test Variable ${policyName} onap.policies.apex.pnf.metadataSet.Test
${postjson}= Get File ${CURDIR}/data/${policyName}.json
CreatePolicySuccessfully /policy/api/v1/policytypes/onap.policies.native.Apex/versions/1.0.0/policies ${postjson} ${policyName} 1.0.0
@@ -47,7 +47,7 @@ ValidatePolicyExecutionAndEventRateHighComplexity
ValidateEventExecution ${eventStartTime} ${eventEndTime} 0.6
ValidatePolicyExecutionAndEventRateModerateComplexity
- [Documentation] Validate that a low complexity policity can be executed in less than 1000ms and minimum 3 events triggered per second
+ [Documentation] Validate that a low complexity policy can be executed in less than 1000ms and minimum 3 events triggered per second
Set Test Variable ${policyName} onap.policies.native.apex.Sampledomain
${postjson}= Get File ${CURDIR}/data/${policyName}.json
CreatePolicySuccessfully /policy/api/v1/policytypes/onap.policies.native.Apex/versions/1.0.0/policies ${postjson} ${policyName} 1.0.0
diff --git a/csit/run-k8s-csit.sh b/csit/run-k8s-csit.sh
index 3f6e5a44..7d93fa8d 100755
--- a/csit/run-k8s-csit.sh
+++ b/csit/run-k8s-csit.sh
@@ -1,10 +1,9 @@
#!/bin/bash
#
# ============LICENSE_START====================================================
-# Copyright (C) 2022-2024 Nordix Foundation.
+# Copyright (C) 2022-2025 Nordix Foundation.
# Modifications Copyright © 2024 Deutsche Telekom
# =============================================================================
-#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -170,6 +169,32 @@ function teardown_cluster() {
echo "MicroK8s Cluster removed"
}
+function clone_models() {
+
+ # download models examples
+ git clone -b "${GERRIT_BRANCH}" --single-branch https://github.com/onap/policy-models.git "${WORKSPACE}"/csit/resources/tests/models
+
+ # create a couple of variations of the policy definitions
+ sed -e 's!Measurement_vGMUX!ADifferentValue!' \
+ tests/models/models-examples/src/main/resources/policies/vCPE.policy.monitoring.input.tosca.json \
+ >tests/models/models-examples/src/main/resources/policies/vCPE.policy.monitoring.input.tosca.v1_2.json
+
+ sed -e 's!"version": "1.0.0"!"version": "2.0.0"!' \
+ -e 's!"policy-version": 1!"policy-version": 2!' \
+ tests/models/models-examples/src/main/resources/policies/vCPE.policy.monitoring.input.tosca.json \
+ >tests/models/models-examples/src/main/resources/policies/vCPE.policy.monitoring.input.tosca.v2.json
+}
+
+function copy_csar_file() {
+ zip -F ${DISTRIBUTION_CSAR}/sample_csar_with_apex_policy.csar \
+ --out ${DISTRIBUTION_CSAR}/csar_temp.csar -q
+ # Remake temp directory
+ sudo rm -rf "${DIST_TEMP_FOLDER}"
+ sudo mkdir "${DIST_TEMP_FOLDER}"
+ sudo cp ${DISTRIBUTION_CSAR}/csar_temp.csar ${DISTRIBUTION_CSAR}/temp.csar
+ sudo mv ${DISTRIBUTION_CSAR}/temp.csar ${DIST_TEMP_FOLDER}/sample_csar_with_apex_policy.csar
+}
+
function build_robot_image() {
echo "Build docker image for robot framework"
cd ${WORKSPACE}/csit/resources || exit
@@ -186,6 +211,37 @@ function build_robot_image() {
echo "---------------------------------------------"
}
+function push_acelement_chart() {
+ echo "Pushing acelement chart to the chartmuseum repo..."
+ helm repo add policy-chartmuseum http://localhost:30208
+
+ # download clamp repo
+ git clone -b "${GERRIT_BRANCH}" --single-branch https://github.com/onap/policy-clamp.git "${WORKSPACE}"/csit/resources/tests/clamp
+ ACELEMENT_CHART=${WORKSPACE}/csit/resources/tests/clamp/examples/src/main/resources/clamp/acm/acelement-helm/acelement
+ helm cm-push $ACELEMENT_CHART policy-chartmuseum
+ helm repo update
+ rm -rf ${WORKSPACE}/csit/resources/tests/clamp/
+ echo "-------------------------------------------"
+}
+
+function print_robot_log() {
+ count_pods=0
+ while [[ ${count_pods} -eq 0 ]]; do
+ echo "Waiting for pods to come up..."
+ sleep 5
+ count_pods=$(kubectl get pods --output name | wc -l)
+ done
+ robotpod=$(kubectl get po | grep policy-csit)
+ podName=$(echo "$robotpod" | awk '{print $1}')
+ echo "The robot tests will begin once the policy components {${READINESS_CONTAINERS[*]}} are up and running..."
+ kubectl wait --for=jsonpath='{.status.phase}'=Running --timeout=18m pod/"$podName"
+ echo "Policy deployment status:"
+ kubectl get po
+ kubectl get all -A
+ echo "Robot Test logs:"
+ kubectl logs -f "$podName"
+}
+
function start_csit() {
build_robot_image
if [ "${?}" -eq 0 ]; then
@@ -212,48 +268,14 @@ function start_csit() {
fi
}
-function print_robot_log() {
- count_pods=0
- while [[ ${count_pods} -eq 0 ]]; do
- echo "Waiting for pods to come up..."
- sleep 5
- count_pods=$(kubectl get pods --output name | wc -l)
- done
- robotpod=$(kubectl get po | grep policy-csit)
- podName=$(echo "$robotpod" | awk '{print $1}')
- echo "The robot tests will begin once the policy components {${READINESS_CONTAINERS[*]}} are up and running..."
- kubectl wait --for=jsonpath='{.status.phase}'=Running --timeout=18m pod/"$podName"
- echo "Policy deployment status:"
- kubectl get po
- kubectl get all -A
- echo "Robot Test logs:"
- kubectl logs -f "$podName"
-}
-
-function clone_models() {
-
- # download models examples
- git clone -b "${GERRIT_BRANCH}" --single-branch https://github.com/onap/policy-models.git "${WORKSPACE}"/csit/resources/tests/models
-
- # create a couple of variations of the policy definitions
- sed -e 's!Measurement_vGMUX!ADifferentValue!' \
- tests/models/models-examples/src/main/resources/policies/vCPE.policy.monitoring.input.tosca.json \
- >tests/models/models-examples/src/main/resources/policies/vCPE.policy.monitoring.input.tosca.v1_2.json
-
- sed -e 's!"version": "1.0.0"!"version": "2.0.0"!' \
- -e 's!"policy-version": 1!"policy-version": 2!' \
- tests/models/models-examples/src/main/resources/policies/vCPE.policy.monitoring.input.tosca.json \
- >tests/models/models-examples/src/main/resources/policies/vCPE.policy.monitoring.input.tosca.v2.json
-}
-
-function copy_csar_file() {
- zip -F ${DISTRIBUTION_CSAR}/sample_csar_with_apex_policy.csar \
- --out ${DISTRIBUTION_CSAR}/csar_temp.csar -q
- # Remake temp directory
- sudo rm -rf "${DIST_TEMP_FOLDER}"
- sudo mkdir "${DIST_TEMP_FOLDER}"
- sudo cp ${DISTRIBUTION_CSAR}/csar_temp.csar ${DISTRIBUTION_CSAR}/temp.csar
- sudo mv ${DISTRIBUTION_CSAR}/temp.csar ${DIST_TEMP_FOLDER}/sample_csar_with_apex_policy.csar
+function install_chartmuseum () {
+ echo "---------------------------------------------"
+ echo "Installing Chartmuseum helm repository..."
+ helm repo add chartmuseum-git https://chartmuseum.github.io/charts
+ helm repo update
+ helm install policy-chartmuseum chartmuseum-git/chartmuseum --set env.open.DISABLE_API=false --set service.type=NodePort --set service.nodePort=30208
+ helm plugin install https://github.com/chartmuseum/helm-push
+ echo "---------------------------------------------"
}
function set_project_config() {
@@ -328,36 +350,13 @@ function set_project_config() {
}
-function install_chartmuseum () {
- echo "---------------------------------------------"
- echo "Installing Chartmuseum helm repository..."
- helm repo add chartmuseum-git https://chartmuseum.github.io/charts
- helm repo update
- helm install policy-chartmuseum chartmuseum-git/chartmuseum --set env.open.DISABLE_API=false --set service.type=NodePort --set service.nodePort=30208
- helm plugin install https://github.com/chartmuseum/helm-push
- echo "---------------------------------------------"
-}
-
-function push_acelement_chart() {
- echo "Pushing acelement chart to the chartmuseum repo..."
- helm repo add policy-chartmuseum http://localhost:30208
-
- # download clamp repo
- git clone -b "${GERRIT_BRANCH}" --single-branch https://github.com/onap/policy-clamp.git "${WORKSPACE}"/csit/resources/tests/clamp
- ACELEMENT_CHART=${WORKSPACE}/csit/resources/tests/clamp/examples/src/main/resources/clamp/acm/acelement-helm/acelement
- helm cm-push $ACELEMENT_CHART policy-chartmuseum
- helm repo update
- rm -rf ${WORKSPACE}/csit/resources/tests/clamp/
- echo "-------------------------------------------"
-}
-
function get_pod_name() {
pods=$(kubectl get pods --no-headers -o custom-columns=':metadata.name' | grep $1)
read -rd '' -a pod_array <<< "$pods"
echo "${pod_array[@]}"
}
-wait_for_pods_running() {
+function wait_for_pods_running() {
local namespace="$1"
shift
local timeout_seconds="$1"
diff --git a/csit/run-project-csit.sh b/csit/run-project-csit.sh
index 1c5c2f12..9b2c7695 100755
--- a/csit/run-project-csit.sh
+++ b/csit/run-project-csit.sh
@@ -3,7 +3,7 @@
# Copyright 2016-2017 Huawei Technologies Co., Ltd.
# Modification Copyright 2019 © Samsung Electronics Co., Ltd.
# Modification Copyright 2021 © AT&T Intellectual Property.
-# Modification Copyright 2021-2024 Nordix Foundation.
+# Modification Copyright 2021-2025 Nordix Foundation.
# Modifications Copyright 2024 Deutsche Telekom
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -19,20 +19,26 @@
# limitations under the License.
#
+SKIP_BUILDING_ROBOT_IMG=false
+DO_NOT_TEARDOWN=false
+
# even with forced finish, clean up docker containers
function on_exit(){
rm -rf ${CSAR_DIR}/csar_temp.csar
- # teardown of compose containers for acm-replicas doesn't work with normal stop-compose script
- if [ "${ACM_REPLICA_TEARDOWN}" = true ]; then
- source ${DOCKER_COMPOSE_DIR}/start-acm-replica.sh --stop --replicas=2
- elif [ "${APEX_REPLICA_TEARDOWN}" = true ]; then
- source ${DOCKER_COMPOSE_DIR}/start-multiple-pdp.sh --stop --replicas=2
- else
- source ${DOCKER_COMPOSE_DIR}/stop-compose.sh ${PROJECT}
+ if [ "${DO_NOT_TEARDOWN}" = false ]; then
+ # teardown of compose containers for acm-replicas doesn't work with normal stop-compose script
+ if [ "${ACM_REPLICA_TEARDOWN}" = true ]; then
+ source ${DOCKER_COMPOSE_DIR}/start-acm-replica.sh --stop --replicas=2
+ elif [ "${APEX_REPLICA_TEARDOWN}" = true ]; then
+ source ${DOCKER_COMPOSE_DIR}/start-multiple-pdp.sh --stop --replicas=2
+ else
+ source ${DOCKER_COMPOSE_DIR}/stop-compose.sh ${PROJECT}
+ fi
+
+ mv ${DOCKER_COMPOSE_DIR}/*.log ${ROBOT_LOG_DIR}
fi
- mv ${DOCKER_COMPOSE_DIR}/*.log ${ROBOT_LOG_DIR}
exit $RC
}
@@ -91,7 +97,8 @@ function check_rest_endpoint() {
function setup_clamp() {
export ROBOT_FILES="policy-clamp-test.robot clamp-slas.robot"
source ${DOCKER_COMPOSE_DIR}/start-compose.sh policy-clamp-runtime-acm --grafana
- sleep 30
+ echo "Waiting 2 minutes acm-runtime and participants to start..."
+ sleep 120
check_rest_endpoint "${ACM_PORT}"
}
@@ -101,8 +108,8 @@ function setup_clamp_replica() {
export TEST_ENV="docker"
export PROJECT=clamp
source ${DOCKER_COMPOSE_DIR}/start-acm-replica.sh --start --replicas=2
- echo "Waiting a minute for the replicas to be started..."
- sleep 60
+ echo "Waiting 2 minutes for the replicas to be started..."
+ sleep 120
# checking on apex-pdp status because acm-r replicas only start after apex-pdp is running
check_rest_endpoint ${PAP_PORT}
check_rest_endpoint ${APEX_PORT}
@@ -113,31 +120,37 @@ function setup_clamp_replica() {
function setup_api() {
export ROBOT_FILES="api-test.robot api-slas.robot"
source ${DOCKER_COMPOSE_DIR}/start-compose.sh api --grafana
- sleep 10
+ echo "Waiting 1 minute for policy-api to start..."
+ sleep 60
check_rest_endpoint ${API_PORT}
}
function setup_pap() {
export ROBOT_FILES="pap-test.robot pap-slas.robot"
source ${DOCKER_COMPOSE_DIR}/start-compose.sh apex-pdp --grafana
- sleep 10
+ echo "Waiting 1 minute for policy-pap to start..."
+ sleep 60
check_rest_endpoint ${PAP_PORT}
+ check_rest_endpoint ${APEX_PORT}
+ apex_healthcheck
}
function setup_apex() {
export ROBOT_FILES="apex-pdp-test.robot apex-slas.robot"
source ${DOCKER_COMPOSE_DIR}/start-compose.sh apex-pdp --grafana
- sleep 10
+ echo "Waiting 1 minute for apex-pdp to start..."
+ sleep 60
check_rest_endpoint ${PAP_PORT}
check_rest_endpoint ${APEX_PORT}
apex_healthcheck
}
function setup_apex_medium() {
- export SUITES="apex-slas-3.robot"
+ export ROBOT_FILES="apex-slas-3.robot"
export APEX_REPLICA_TEARDOWN=true
source ${DOCKER_COMPOSE_DIR}/start-multiple-pdp.sh --start --replicas=3
- sleep 10
+ echo "Waiting 1 minute for apex-pdp to start..."
+ sleep 60
check_rest_endpoint ${PAP_PORT}
check_rest_endpoint ${APEX_PORT}
apex_healthcheck
@@ -147,7 +160,8 @@ function setup_apex_large() {
export ROBOT_FILES="apex-slas-10.robot"
export APEX_REPLICA_TEARDOWN=true
source ${DOCKER_COMPOSE_DIR}/start-multiple-pdp.sh --start --replicas=10
- sleep 10
+ echo "Waiting 1 minute for apex-pdp to start..."
+ sleep 60
check_rest_endpoint ${PAP_PORT}
check_rest_endpoint ${APEX_PORT}
apex_healthcheck
@@ -156,33 +170,35 @@ function setup_apex_large() {
function setup_drools_apps() {
export ROBOT_FILES="drools-applications-test.robot drools-applications-slas.robot"
source ${DOCKER_COMPOSE_DIR}/start-compose.sh drools-applications --grafana
- sleep 10
+ echo "Waiting 1 minute for drools-pdp and drools-applications to start..."
+ sleep 60
check_rest_endpoint ${PAP_PORT}
- sleep 10
check_rest_endpoint ${DROOLS_APPS_PORT}
- sleep 10
check_rest_endpoint ${DROOLS_APPS_TELEMETRY_PORT}
}
function setup_xacml_pdp() {
export ROBOT_FILES="xacml-pdp-test.robot xacml-pdp-slas.robot"
source ${DOCKER_COMPOSE_DIR}/start-compose.sh xacml-pdp --grafana
- sleep 10
+ echo "Waiting 1 minute for xacml-pdp to start..."
+ sleep 60
check_rest_endpoint "${XACML_PORT}"
}
function setup_opa_pdp() {
export ROBOT_FILES="opa-pdp-test.robot"
export PROJECT="opa-pdp"
- source ${DOCKER_COMPOSE_DIR}/start-compose.sh opa-pdp --grafana
+ source ${DOCKER_COMPOSE_DIR}/start-compose.sh opa-pdp
+ echo "Waiting 3 minutes for OPA-PDP to start..."
sleep 180
- bash ${SCRIPTS}/wait_for_rest.sh localhost "${OPA_PDP_PORT}"
+ check_rest_endpoint "${OPA_PDP_PORT}"
}
function setup_drools_pdp() {
export ROBOT_FILES="drools-pdp-test.robot"
source ${DOCKER_COMPOSE_DIR}/start-compose.sh drools-pdp --grafana
- sleep 30
+ echo "Waiting 1 minute for drools-pdp to start..."
+ sleep 60
check_rest_endpoint ${DROOLS_TELEMETRY_PORT}
}
@@ -195,8 +211,11 @@ function setup_distribution() {
export ROBOT_FILES="distribution-test.robot"
source ${DOCKER_COMPOSE_DIR}/start-compose.sh distribution --grafana
- sleep 10
+ echo "Waiting 1 minute for distribution to start..."
+ sleep 60
check_rest_endpoint "${DIST_PORT}"
+ check_rest_endpoint ${APEX_PORT}
+ apex_healthcheck
}
function build_robot_image() {
@@ -233,10 +252,6 @@ function set_project_config() {
setup_apex
;;
- apex-pdp-postgres | policy-apex-pdp-postgres)
- setup_apex
- ;;
-
apex-pdp-medium | policy-apex-pdp-medium)
setup_apex_medium
;;
@@ -275,6 +290,33 @@ function set_project_config() {
# ensure that teardown and other finalizing steps are always executed
trap on_exit EXIT
+# start the script
+
+# Parse the command-line arguments
+while [[ $# -gt 0 ]]
+do
+ key="$1"
+
+ case $key in
+ --skip-build-csit)
+ export SKIP_BUILDING_ROBOT_IMG=true
+ shift
+ ;;
+ --local)
+ export USE_LOCAL_IMAGES=true
+ shift
+ ;;
+ --no-exit)
+ export DO_NOT_TEARDOWN=true
+ shift
+ ;;
+ *)
+ export PROJECT="${1}"
+ shift
+ ;;
+ esac
+done
+
# setup all directories used for test resources
if [ -z "${WORKSPACE}" ]; then
WORKSPACE=$(git rev-parse --show-toplevel)
@@ -282,7 +324,6 @@ if [ -z "${WORKSPACE}" ]; then
fi
export GERRIT_BRANCH=$(awk -F= '$1 == "defaultbranch" { print $2 }' "${WORKSPACE}"/.gitreview)
-export PROJECT="${1}"
export ROBOT_LOG_DIR="${WORKSPACE}/csit/archives/${PROJECT}"
export SCRIPTS="${WORKSPACE}/csit/resources/scripts"
export CSAR_DIR="${WORKSPACE}/csit/resources/tests/data/csar"
@@ -319,7 +360,7 @@ unset http_proxy https_proxy
export ROBOT_FILES
# use a separate script to build a CSIT docker image, to isolate the test run
-if [ "${2}" == "--skip-build-csit" ]; then
+if [ "${SKIP_BUILDING_ROBOT_IMG}" == "true" ]; then
echo "Skipping build csit robot image"
else
build_robot_image