aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFilip Krzywka <filip.krzywka@nokia.com>2019-01-10 07:59:56 +0100
committerFilip Krzywka <filip.krzywka@nokia.com>2019-01-10 08:43:59 +0100
commit23ebedbae53f8cb91332744133c99ef502c595ec (patch)
treebbf54ff9d6d2ec83a8b786742e16fb81236d9b49
parent867f482cbb9241df483b815c16be01d9ff391f09 (diff)
Update development scripts
This changeset is mostly follow-up after merges in DCAEGEN2-1063 - new options for hv-ves hostname and port in xnf simulations scripts - new option for disabling SSL in xnf simulations scripts - fail start-simulation script on not completing all simulations - use docker-healthchecks to determine xNF simulator readiness in development scripts - fix logging in xNF simulator on closing connection - minor corrections in logs and usage for development scripts Change-Id: Ic939a139fdaf86ad487e0b31d6abcfc6c8a28885 Issue-ID: DCAEGEN2-1063 Signed-off-by: Filip Krzywka <filip.krzywka@nokia.com>
-rwxr-xr-xdevelopment/bin/run-xnf-simulator.sh39
-rwxr-xr-xdevelopment/bin/start-simulation.sh66
-rw-r--r--sources/hv-collector-xnf-simulator/src/main/kotlin/org/onap/dcae/collectors/veshv/simulators/xnf/impl/adapters/VesHvClient.kt2
3 files changed, 76 insertions, 31 deletions
diff --git a/development/bin/run-xnf-simulator.sh b/development/bin/run-xnf-simulator.sh
index d8de0097..e4d8d94a 100755
--- a/development/bin/run-xnf-simulator.sh
+++ b/development/bin/run-xnf-simulator.sh
@@ -21,7 +21,13 @@ set -euo pipefail
usage() {
echo "Start xnf-simulator container on given port and inside of given docker-network"
- echo "Usage: $0 [-h|--help] [-v|--verbose] <xnf listen port> [<hv ves docker network>]"
+ echo "Usage: $0 [-h|--help] [-v|--verbose] [--ssl-disable] <xnf listen port> [<hv ves hostname> <hv ves port> <hv ves docker network>]"
+ echo ""
+ echo "Optional parameters:"
+ echo " - ssl-disable : Should xNF simulator be configured without using SSL/TLS connections"
+ echo "Default values:"
+ echo " - hv ves hostname: ves-hv-collector"
+ echo " - hv ves port: 6061"
exit 1
}
@@ -32,6 +38,8 @@ while getopts "$optspec" arg; do
case "${OPTARG}" in
verbose)
VERBOSE=True ;;
+ ssl-disable)
+ SSL_DISABLE=True ;;
help)
usage ;;
*)
@@ -53,27 +61,42 @@ shift $((OPTIND-1))
LISTEN_PORT=$1
-if [ $# -gt 1 ]; then
- HV_VES_NETWORK=${2}
+HV_VES_HOSTNAME=${2:-ves-hv-collector}
+HV_VES_PORT=${3:-6061}
+if [ $# -gt 3 ]; then
+ HV_VES_NETWORK=${4}
fi
PORTS="${LISTEN_PORT}:${LISTEN_PORT}/tcp"
HV_VES_REPO_HOME=$(realpath $(dirname "$0"))/..
+if [ -n "${SSL_DISABLE+x}" ]; then
+ SSL_CONFIGURATION="--ssl-disable"
+else
+ SSL_CONFIGURATION="--key-store-password onaponap --trust-store-password onaponap"
+fi
+
if [ -n "${VERBOSE+x}" ]; then
- echo "Starting xnf-simulator with ports configuration: ${PORTS}"
+ echo "Starting xnf-simulator with "
+ echo " - ports configuration: ${PORTS}"
+ echo " - SSL configuration: ${SSL_CONFIGURATION}"
echo "Container id:"
fi
+
XNF_CONTAINER_ID=$(docker run -d \
-v ${HV_VES_REPO_HOME}/ssl/:/etc/ves-hv/ \
+ --health-cmd='curl -s -f http://localhost:6063/health/ready || exit 1' \
+ --health-interval=5s \
+ --health-retries=3 \
+ --health-start-period='10s' \
-p ${PORTS} \
onap/org.onap.dcaegen2.collectors.hv-ves.hv-collector-xnf-simulator \
--listen-port ${LISTEN_PORT} \
- --ves-host ves-hv-collector \
- --ves-port 6061 \
- --key-store-password onaponap \
- --trust-store-password onaponap)
+ --health-check-api-port 6063 \
+ --ves-host ${HV_VES_HOSTNAME} \
+ --ves-port ${HV_VES_PORT} \
+ ${SSL_CONFIGURATION})
echo $XNF_CONTAINER_ID
diff --git a/development/bin/start-simulation.sh b/development/bin/start-simulation.sh
index dfb63e65..8c63ddbb 100755
--- a/development/bin/start-simulation.sh
+++ b/development/bin/start-simulation.sh
@@ -23,21 +23,25 @@ set -euo pipefail
function usage() {
echo ""
echo "Send messages to hv-ves from multiple xNF simulators"
- echo "Usage: $0 [-h|--help] [-v|--verbose] [--messages-in-batch] [--docker-network] [--xnf-logs-directory]"
+ echo "Usage: $0 [-h|--help] [-v|--verbose] [--ssl-disable]"
+ echo " [--messages-in-batch=ARG] [--docker-network=ARG] [--xnf-logs-directory=ARG]"
echo " <hv ves hostname> <hv ves port> <simulators amount> <messages batches amount per simulator> <messages sending interval>"
echo ""
echo " - hv ves hostname : HighVolume VES Collector network hostname"
echo " - hv ves port : HighVolume VES Collector network port"
echo " - simulators amount : Amount of xNF simulators to be launched"
- echo " - messages amount per simulator : Amount of messages to be sent from each xNF simulator to HV-VES"
- echo " - messages sending interval : interval in seconds between sending messages from xNFs"
+ echo " - messages batches amount per simulator : Amount of batches of messages to be sent from each xNF simulator to HV-VES"
+ echo " - messages sending interval : interval in seconds between sending batches of messages from xNFs"
echo "Optional parameters:"
+ echo " - ssl-disable : Should xNF simulator be configured without using SSL/TLS connections"
echo " - messages-in-batch : Amount of messages sent on each request"
echo " - docker-network : Docker network to which xNF simulators should be added"
echo " - xnf-logs-directory : Path to directory where logs from all xNF simulators should be stored"
echo "Example invocations:"
echo "./start-simulation.sh --messages-in-batch=5 --docker-network=development_default ves-hv-collector 6061 10 20 0.5"
echo "./start-simulation.sh --messages-in-batch=5 --xnf-logs-directory=/tmp/xnf-simulation localhost 6061 10 20 0.5"
+ echo "Invocation with remote HV-VES host (Kubernetes slave IP given with default K8S NodePort for HV-VES service):"
+ echo "./start-simulation.sh --ssl-disable --xnf-logs-directory=/tmp/xnf-simulation 10.183.36.78 30222 5 100 5"
exit 1
}
@@ -56,10 +60,12 @@ function create_logs_dir() {
}
function create_xNFs_simulators() {
+ echo "Creating ${XNFS_AMOUNT} xNFs simulators"
+ [ -n "${SSL_DISABLE+x}" ] && verbose_log "--ssl-disable flag will be set inside containers."
for i in $(seq 1 ${XNFS_AMOUNT}); do
local XNF_PORT=$(get_unoccupied_port 32000 65000)
verbose_log "Starting xNF simulator container on port ${XNF_PORT} using run-xnf-simulator script"
- XNF_CONTAINER_ID=$(${DEVELOPMENT_BIN_DIRECTORY}/run-xnf-simulator.sh $XNF_PORT ${DOCKER_NETWORK:-})
+ XNF_CONTAINER_ID=$(${DEVELOPMENT_BIN_DIRECTORY}/run-xnf-simulator.sh ${SSL_DISABLE} $XNF_PORT ${HV_VES_HOSTNAME} ${HV_VES_PORT} ${DOCKER_NETWORK:-})
CREATED_XNF_SIMULATORS_PORTS+=(${XNF_PORT})
verbose_log "Container id: ${XNF_CONTAINER_ID}"
CREATED_XNF_SIMULATORS_IDS+=(${XNF_CONTAINER_ID})
@@ -80,19 +86,20 @@ function get_unoccupied_port() {
}
function wait_for_containers_startup_or_fail() {
- local seconds_to_wait=10
+ local intervals_amount=30
+ local wait_interval=5
local all_containers_healthy=1
- verbose_log "Waiting ${seconds_to_wait}s for containers startup"
+ verbose_log "Waiting up to ${intervals_amount} times with interval of ${wait_interval}s for containers startup"
set +e
- for i in $(seq 1 ${seconds_to_wait}); do
+ for i in $(seq 1 ${intervals_amount}); do
verbose_log "Try no. ${i}"
all_containers_healthy=1
- for port in ${CREATED_XNF_SIMULATORS_PORTS[@]}; do
- verbose_log "Checking container on port ${port}"
- local status_code=$(curl -s -o /dev/null -I -w "%{http_code}" localhost:${port}/healthcheck)
- if [ $status_code -ne 200 ]; then
- verbose_log "Container on port ${port} is unhealthy "
+ for id in ${CREATED_XNF_SIMULATORS_IDS[@]}; do
+ verbose_log "Checking container with id ${id}"
+ health=$(docker inspect --format='{{json .State.Health.Status}}' ${id})
+ if [ ${health} != "\"healthy\"" ]; then
+ verbose_log "Container ${id} is not in healthy state. Actual status: ${health}"
all_containers_healthy=0
break
fi
@@ -100,7 +107,8 @@ function wait_for_containers_startup_or_fail() {
if [ $all_containers_healthy -eq 1 ]; then
break
fi
- sleep 1
+ verbose_log "Sleeping for ${wait_interval}s"
+ sleep $wait_interval
done
set -e
@@ -113,8 +121,8 @@ function wait_for_containers_startup_or_fail() {
}
function start_simulation() {
- verbose_log "Simulation: every xNF will send ${MESSAGES_IN_BATCH} messages to hv-ves
- ${MESSAGE_BATCHES_AMOUNT} times, once every ${MESSAGES_SENDING_INTERVAL}s"
+ verbose_log "Simulation: every xNF will send ${MESSAGES_IN_BATCH} messages to hv-ves ( running on
+ ${HV_VES_HOSTNAME}:${HV_VES_PORT} ) ${MESSAGE_BATCHES_AMOUNT} times, once every ${MESSAGES_SENDING_INTERVAL}s"
for port in ${CREATED_XNF_SIMULATORS_PORTS[@]}; do
start_single_simulation $port $MESSAGES_IN_BATCH &
done
@@ -144,11 +152,12 @@ function wait_for_simulators_to_finish_sending_messages() {
for i in $(seq 1 ${seconds_to_wait}); do
verbose_log "Wait no. ${i}"
all_containers_finished=1
- for port in ${CREATED_XNF_SIMULATORS_PORTS[@]}; do
- local container_status=$(curl --request GET -s localhost:${port}/healthcheck | jq -r '.["Detailed status"]')
+ for id in ${CREATED_XNF_SIMULATORS_IDS[@]}; do
+ verbose_log "Checking container ${id}"
+ local container_status=$(docker inspect --format='{{json .State.Health.Log }}' ${id} | jq '.[-1] | .Output')
- verbose_log "Container on port ${port} status: ${container_status}"
- if [ "${container_status}" = "Busy" ]; then
+ verbose_log "Container ${id} status: ${container_status}"
+ if [ "${container_status}" != "\"UP\\nNo simulation is in progress at the moment\"" ]; then
all_containers_finished=0
break
fi
@@ -157,8 +166,18 @@ function wait_for_simulators_to_finish_sending_messages() {
echo "All containers finished sending messages"
break
fi
+ verbose_log "Sleeping for 1s"
sleep 1
done
+
+
+ if [ $all_containers_finished -ne 1 ]; then
+ echo "[ERROR] Some xNFs simulators failed to finish sending messages - simulation probably failed"
+ echo "For debug output rerun simulation with -v and --xnf-logs-directory command line options"
+ cleanup
+ echo "Exitting..."
+ exit 3
+ fi
}
function cleanup() {
@@ -170,7 +189,7 @@ function cleanup() {
if [ -n "${XNF_LOGS_DIRECTORY+x}" ]; then
local log_file=${XNF_LOGS_DIRECTORY}/${container_id}.log
verbose_log "Writing container logs to: ${log_file}"
- docker logs ${container_id} > $log_file
+ docker logs ${container_id} &> $log_file
fi
verbose_log "Removing container: ${container_id}"
docker rm $container_id > /dev/null
@@ -202,6 +221,8 @@ while getopts "$optspec" arg; do
case "${OPTARG}" in
verbose)
VERBOSE=True ;;
+ ssl-disable)
+ SSL_DISABLE="--ssl-disable" ;;
help)
usage ;;
*)
@@ -239,14 +260,14 @@ MESSAGE_BATCHES_AMOUNT=${4}
MESSAGES_SENDING_INTERVAL=${5}
# set defaults if absent
-[ -z "${MESSAGES_IN_BATCH}" ] && MESSAGES_IN_BATCH=1
+[ -z "${MESSAGES_IN_BATCH+x}" ] && MESSAGES_IN_BATCH=1
+[ -z "${SSL_DISABLE+x}" ] && SSL_DISABLE=""
create_logs_dir
CREATED_XNF_SIMULATORS_PORTS=()
CREATED_XNF_SIMULATORS_IDS=()
-echo "Creating ${XNFS_AMOUNT} xNFs simulators"
trap cleanup SIGINT SIGTERM
create_xNFs_simulators
@@ -259,6 +280,7 @@ assure_all_xNFs_requests_were_sent
assumed_message_sending_time=$(echo ";0.00025 * $XNFS_AMOUNT" | bc)
seconds_to_wait=$(echo ";$assumed_message_sending_time * $MESSAGE_BATCHES_AMOUNT * $MESSAGES_IN_BATCH" | bc)
+seconds_to_wait=$(echo ";if($seconds_to_wait > 2) $seconds_to_wait else 2" | bc)
wait_for_simulators_to_finish_sending_messages $seconds_to_wait
# there might be network lag between moment when xNF finished sending messages and they actually are received by hv-ves
# thus we cannot start removing xNFs immediately to prevent closing socket channels
diff --git a/sources/hv-collector-xnf-simulator/src/main/kotlin/org/onap/dcae/collectors/veshv/simulators/xnf/impl/adapters/VesHvClient.kt b/sources/hv-collector-xnf-simulator/src/main/kotlin/org/onap/dcae/collectors/veshv/simulators/xnf/impl/adapters/VesHvClient.kt
index 6e707c3a..8de7da32 100644
--- a/sources/hv-collector-xnf-simulator/src/main/kotlin/org/onap/dcae/collectors/veshv/simulators/xnf/impl/adapters/VesHvClient.kt
+++ b/sources/hv-collector-xnf-simulator/src/main/kotlin/org/onap/dcae/collectors/veshv/simulators/xnf/impl/adapters/VesHvClient.kt
@@ -95,7 +95,7 @@ class VesHvClient(private val configuration: SimulatorConfiguration) {
private fun NettyOutbound.logConnectionClosed() =
withConnection { conn ->
- conn.onTerminate().subscribe {
+ conn.onDispose {
logger.info { "Connection to ${conn.address()} has been closed" }
}
}