From b44416a72f3ce23a3254229c9f469c21df86c82f Mon Sep 17 00:00:00 2001 From: Tomasz Wrobel Date: Fri, 29 May 2020 08:02:39 +0200 Subject: Refactor performance scripts -change scripts to use deployment instead of pod -add function for instant,inverval mode -add config files -remove unused variable from test.properties Issue-ID: DCAEGEN2-1576 Change-Id: Ice889f7e8e1dd2d787c7b59a9701669a93a7cfb1 Signed-off-by: Tomasz Wrobel --- .../cloud/cloud-based-performance-test.sh | 302 +++++++++++++-------- tools/performance/cloud/producer-deployment.yaml | 2 +- tools/performance/cloud/producer-proxy.yaml | 42 +++ .../cloud/producers-config/instant-config.json | 3 + .../cloud/producers-config/interval-config.json | 12 + .../cloud/producers-config/producer-config.json | 4 + tools/performance/cloud/reboot-test-environment.sh | 4 + tools/performance/cloud/test.properties | 8 - 8 files changed, 254 insertions(+), 123 deletions(-) create mode 100755 tools/performance/cloud/producer-proxy.yaml create mode 100644 tools/performance/cloud/producers-config/instant-config.json create mode 100644 tools/performance/cloud/producers-config/interval-config.json create mode 100644 tools/performance/cloud/producers-config/producer-config.json diff --git a/tools/performance/cloud/cloud-based-performance-test.sh b/tools/performance/cloud/cloud-based-performance-test.sh index 2365cc4d..a4ec99b5 100755 --- a/tools/performance/cloud/cloud-based-performance-test.sh +++ b/tools/performance/cloud/cloud-based-performance-test.sh @@ -18,12 +18,13 @@ # ============LICENSE_END========================================================= SCRIPT_DIRECTORY="$(pwd "$0")" -CONTAINERS_COUNT=1 -COMPLETED_PRODUCERS_SUM=0 -LOAD_TEST="false" +PRODUCERS_COUNT=1 TEST_CONFIG_MAP=performance-test-config PROPERTIES_FILE=${SCRIPT_DIRECTORY}/test.properties PRODUCER_APPS_LABEL=hv-collector-producer +PRODUCER_APPS_DEPLOYMENT=hv-collector-producer-deployment +PRODUCER_SERVICE=hv-collector-producer +PRODUCER_PROXY_APPS_LABEL=hv-collector-producer-proxy CONSUMER_APPS_LABEL=hv-collector-kafka-consumer PROMETHEUS_CONF_LABEL=prometheus-server-conf PROMETHEUS_APPS_LABEL=hv-collector-prometheus @@ -32,9 +33,6 @@ GRAFANA_DATASOURCE=grafana-datasources GRAFANA_DASHBOARDS=grafana-dashboards GRAFANA_DASHBOARD_PROVIDERS=grafana-dashboards-providers ONAP_NAMESPACE=onap -MAXIMUM_BACK_OFF_CHECK_ITERATIONS=30 -CHECK_NUMBER=0 -PRODUCERS_TO_RECREATE=0 NAME_REASON_PATTERN="custom-columns=NAME:.metadata.name,REASON:.status.containerStatuses[].state.waiting.reason" HVVES_POD_NAME=$(kubectl -n ${ONAP_NAMESPACE} get pods --no-headers=true -o custom-columns=:metadata.name | grep hv-ves-collector) HVVES_CERT_PATH=/etc/ves-hv/ssl/server @@ -44,6 +42,10 @@ CALC_RETENTION_TIME_IN_MS_CMD='expr $KAFKA_RETENTION_TIME_MINUTES \* $MILISECOND KAFKA_ROUTER_0_POD_NAME=$(kubectl -n ${ONAP_NAMESPACE} get pods --no-headers=true -o custom-columns=:metadata.name | grep router-kafka-0) KAFKA_SET_TOPIC_RETENTION_TIME_CMD='kafka-topics --zookeeper message-router-zookeeper:2181 --alter --topic HV_VES_PERF3GPP --config retention.ms=' HIDE_OUTPUT='grep abc | grep 123' +CONTENT_TYPE_HEADER='Content-Type: application/json' +REQUEST_ENDPOINT="/" +REQUEST_JSON_DATA="{}" +PRODUCER_INTERNAL_PORT=8080 function clean() { echo "Cleaning up environment" @@ -72,9 +74,6 @@ function clean() { echo "Attempting to delete consumer deployments" kubectl delete deployments -l app=${CONSUMER_APPS_LABEL} -n ${ONAP_NAMESPACE} - echo "Attempting to delete producer pods" - kubectl delete pods -l app=${PRODUCER_APPS_LABEL} -n ${ONAP_NAMESPACE} - echo "Attempting to delete client certs secret" kubectl delete secret cert -n ${ONAP_NAMESPACE} @@ -84,6 +83,16 @@ function clean() { echo "Environment clean up finished!" } +function clean_all() { + clean + echo "Attepting to delete producer pods" + kubectl delete service,deployments -l app=${PRODUCER_APPS_LABEL} -n ${ONAP_NAMESPACE} + echo "Attepting to delete producer-proxy pod" + kubectl delete deployments -l app=${PRODUCER_PROXY_APPS_LABEL} -n ${ONAP_NAMESPACE} + + echo "Producers deleted" +} + function copy_certs_to_hvves() { cd ../../ssl echo "Attempting to create certs directory in HV-VES" @@ -100,52 +109,20 @@ function set_kafka_retention_time() { kubectl exec -it ${KAFKA_ROUTER_0_POD_NAME} -n ${ONAP_NAMESPACE} -- ${KAFKA_SET_TOPIC_RETENTION_TIME_CMD}$(eval $CALC_RETENTION_TIME_IN_MS_CMD) | eval $HIDE_OUTPUT } -function create_producers() { - echo "Recreating test properties ConfigMap from: $PROPERTIES_FILE" - kubectl delete configmap ${TEST_CONFIG_MAP} -n ${ONAP_NAMESPACE} - kubectl create configmap ${TEST_CONFIG_MAP} --from-env-file=${PROPERTIES_FILE} -n ${ONAP_NAMESPACE} - - set -e - for i in $(seq 1 ${CONTAINERS_COUNT}); - do - echo "Creating ${i}/${CONTAINERS_COUNT} producer" - kubectl create -f producer-pod.yaml -n ${ONAP_NAMESPACE} - done - echo "Producers created" - set +e -} - function generate_certs() { echo "Generation of certs" cd ../../ssl ./gen-certs.sh } -function handle_backoffs() { - IMAGE_PULL_BACK_OFFS=$(kubectl get pods -l app=${PRODUCER_APPS_LABEL} -n ${ONAP_NAMESPACE} -o ${NAME_REASON_PATTERN} | grep -c "ImagePullBackOff \| ErrImagePull") - if [[ ${IMAGE_PULL_BACK_OFFS} -gt 0 ]]; then - CHECK_NUMBER=$((CHECK_NUMBER + 1)) - if [[ ${CHECK_NUMBER} -gt ${MAXIMUM_BACK_OFF_CHECK_ITERATIONS} ]]; then - echo "Error: Image pull problem" - exit 1 - fi - fi -} - -function handle_key_interrupt() { - trap SIGINT - echo "Script interrupted, attempt to delete producers" - echo "Wait with patience" - COMPLETED_PRODUCERS_SUM=$(($(kubectl delete pods -l app=${PRODUCER_APPS_LABEL} -n ${ONAP_NAMESPACE} | grep producer | wc -l) + COMPLETED_PRODUCERS_SUM)) - echo "Total number of completed producers: ${COMPLETED_PRODUCERS_SUM}" - exit 0 -} - function print_test_setup_info() { + RUNNING_PRODUCERS=$(kubectl -n ${ONAP_NAMESPACE} get pods -l app=${PRODUCER_APPS_LABEL} | grep -c "Running") echo "Starting cloud based performance tests" echo "________________________________________" echo "Test configuration:" - echo "Producer containers count: ${CONTAINERS_COUNT}" + echo "Running producers: ${RUNNING_PRODUCERS}" + echo "Sending scheme:" + echo ${REQUEST_JSON_DATA} echo "Properties file path: ${PROPERTIES_FILE}" echo "Retention time of kafka messages in minutes: ${KAFKA_RETENTION_TIME_MINUTES}" echo "________________________________________" @@ -154,26 +131,36 @@ function print_test_setup_info() { function usage() { echo "" echo "Run cloud based HV-VES performance test" - echo "Usage $0 gen_certs|setup|start|clean|help" - echo " gen_certs: generate certs in ../../ssl directory" - echo " setup : set up ConfigMap and consumers" - echo " start : create producers - start the performance test" - echo " Optional parameters:" - echo " --load : should test keep defined containers number till script interruption (false)" - echo " --containers : number of producer containers to create (1)" - echo " --properties-file : path to file with benchmark properties (./test.properties)" - echo " --retention-time-minutes : messages retention time on kafka in minutes (60)" - echo " clean : remove ConfigMap, HV-VES consumers and producers" - echo " help : print usage" + echo "Usage $0 gen_certs|setup|send_config|start_interval|start_instant|stop|reset_producers|clean|help" + echo " gen_certs : generate certs in ../../ssl directory" + echo " setup : set up ConfigMaps and consumers" + echo " setup_all : set up ConfigMaps consumers and producers" + echo " send_config : send producers configuration (message interval and payload), located in producers-config/producer-config.json to each producer" + echo " start_interval : start interval mode, config file is located in producers-config/interval-config.json" + echo " Optional parameters:" + echo " --producers : number of producers in deployment (10)" + echo " --retention-time-minutes : messages retention time on kafka in minutes (60)" + echo " start_instant : start instant mode, config file is located in producers-config/instant-config.json" + echo " Optional parameters:" + echo " --producers : number of producers in deployment (10)" + echo " --retention-time-minutes : messages retention time on kafka in minutes (60)" + echo " scale_producers : scale producer deployment to number provide in argument" + echo " stop : stop all producers" + echo " reset_producers : reset all metrics on each producer" + echo " clean : remove ConfigMap, HV-VES consumers" + echo " clean_all : remove ConfigMap, HV-VES consumers and producers" + echo " help : print usage" echo "Example invocations:" echo "./cloud-based-performance-test.sh gen_certs" echo "./cloud-based-performance-test.sh setup" - echo "./cloud-based-performance-test.sh start" - echo "./cloud-based-performance-test.sh start --containers 10" - echo "./cloud-based-performance-test.sh start --load true --containers 10" - echo "./cloud-based-performance-test.sh start --load true --containers 10 --retention-time-minutes 50" - echo "./cloud-based-performance-test.sh start --properties-file ~/other_test.properties" + echo "./cloud-based-performance-test.sh setup_all" + echo "./cloud-based-performance-test.sh send_config" + echo "./cloud-based-performance-test.sh start_interval" + echo "./cloud-based-performance-test.sh start_interval --producers 8" + echo "./cloud-based-performance-test.sh start_instant" + echo "./cloud-based-performance-test.sh scale_producers 8" echo "./cloud-based-performance-test.sh clean" + echo "./cloud-based-performance-test.sh clean_all" exit 1 } @@ -221,62 +208,112 @@ function setup_environment() { echo "Setting up environment finished!" } -function start_load_tests() { - print_test_setup_info +function setup_all() { + setup_environment + echo "Creating producer deployment" + kubectl apply -f producer-deployment.yaml - set_kafka_retention_time + echo "Creating producer-proxy pod" + kubectl apply -f producer-proxy.yaml - echo "CTRL + C to stop/interrupt this script" - create_producers - - trap "handle_key_interrupt" INT + echo "Creating producers finished" +} - echo "Constant producer number keeper started working" - while :; do - PRODUCERS_TO_RECREATE=$((CONTAINERS_COUNT-$(kubectl get pods -l app=${PRODUCER_APPS_LABEL} -n ${ONAP_NAMESPACE} | grep -c "Running"))) - handle_backoffs +function scale_producer_deployment() { + echo "Scaling prodcuer deployment to ${PRODUCERS_COUNT}" + kubectl scale --replicas=${PRODUCERS_COUNT} deployment ${PRODUCER_APPS_DEPLOYMENT} -n ${ONAP_NAMESPACE} + RUNNING_PRODUCERS="" + while [ "${RUNNING_PRODUCERS}" != "${PRODUCERS_COUNT}" ]; do + RUNNING_PRODUCERS=$(kubectl -n ${ONAP_NAMESPACE} get pods -l app=${PRODUCER_APPS_LABEL} | grep -c "Running") + sleep 1s + done + echo "Producers are ready" +} - set -e - for i in $(seq 1 ${PRODUCERS_TO_RECREATE}); - do - echo "Recreating ${i}/${PRODUCERS_TO_RECREATE} producer" - kubectl create -f producer-pod.yaml -n ${ONAP_NAMESPACE} - done - set +e - COMPLETED_PRODUCERS_SUM=$((COMPLETED_PRODUCERS_SUM + PRODUCERS_TO_RECREATE)) - echo "Attempting to clear completed producers" - kubectl delete pod --field-selector=status.phase==Succeeded -l app=${PRODUCER_APPS_LABEL} -n ${ONAP_NAMESPACE} +function set_producers_array_internal_IP() { + PRODUCER_IP_ARRAY=$(kubectl -n ${ONAP_NAMESPACE} get endpoints ${PRODUCER_SERVICE} -o jsonpath="{.subsets[*].addresses[*].ip}") +} - [[ ${CHECK_NUMBER} -gt ${MAXIMUM_BACK_OFF_CHECK_ITERATIONS} ]] && break - sleep 1 +function send_post_request_to_each_producer() { + set_producers_array_internal_IP + PROXY_POD=$(kubectl -n ${ONAP_NAMESPACE} get pods -l app=${PRODUCER_PROXY_APPS_LABEL} -o name) + echo "Sending POST request to each producer, endpoint: ${REQUEST_ENDPOINT}" + REQUEST_ARRAY="" + for item in ${PRODUCER_IP_ARRAY[*]} + do + URL="${item}:${PRODUCER_INTERNAL_PORT}${REQUEST_ENDPOINT}" + echo ${URL} + REQUEST_ARRAY="curl -H '${CONTENT_TYPE_HEADER}' -X POST -d '${REQUEST_JSON_DATA}' ${URL} ; ${REQUEST_ARRAY}" done + kubectl -n onap exec -it ${PROXY_POD} -- /bin/sh -c "${REQUEST_ARRAY}" + echo "Request was send to each producer" +} - trap SIGINT - exit 0 +function send_get_request_to_each_producer() { + set_producers_array_internal_IP + PROXY_POD=$(kubectl -n ${ONAP_NAMESPACE} get pods -l app=${PRODUCER_PROXY_APPS_LABEL} -o name) + echo "Sending GET request to each producer, endpoint: ${REQUEST_ENDPOINT}" + REQUEST_ARRAY="" + for item in ${PRODUCER_IP_ARRAY[*]} + do + URL="${item}:${PRODUCER_INTERNAL_PORT}${REQUEST_ENDPOINT}" + echo ${URL} + REQUEST_ARRAY="curl -X GET ${URL} ; ${REQUEST_ARRAY}" + done + kubectl -n onap exec -it ${PROXY_POD} -- /bin/sh -c "${REQUEST_ARRAY}" + echo "Request was send to each producer" } -function start_performance_test() { - print_test_setup_info +function send_configuration() { + REQUEST_ENDPOINT="/configuration" + REQUEST_JSON_DATA=$(cat producers-config/producer-config.json) + echo "Sending producer configuration: " + echo ${REQUEST_JSON_DATA} + send_post_request_to_each_producer + echo "Configuration was send to each producer pod" + exit 0 +} - set_kafka_retention_time +function start_interval_mode() { + set_kafka_retention_time + REQUEST_ENDPOINT="/interval" + REQUEST_JSON_DATA=$(cat producers-config/interval-config.json) + print_test_setup_info + echo "Sending start interval command to producer pods" + send_post_request_to_each_producer + echo "Command was send to each producer pod" + exit 0 +} - create_producers +function start_instant_mode() { + set_kafka_retention_time + REQUEST_ENDPOINT="/instant" + REQUEST_JSON_DATA=$(cat producers-config/instant-config.json) + print_test_setup_info - echo "Waiting for producers completion" - while :; do - COMPLETED_PRODUCERS=$(kubectl get pods -l app=${PRODUCER_APPS_LABEL} -n ${ONAP_NAMESPACE} | grep -c "Completed") - handle_backoffs + echo "Sending start instant command to producer pods" + send_post_request_to_each_producer + echo "Command was send to each producer pod" + exit 0 +} +function stop_producer_pods() { + REQUEST_ENDPOINT="/cancel" - [[ ${COMPLETED_PRODUCERS} -eq ${CONTAINERS_COUNT} || ${CHECK_NUMBER} -gt ${MAXIMUM_BACK_OFF_CHECK_ITERATIONS} ]] && break - sleep 1 - done + echo "Sending stop command" + send_get_request_to_each_producer + echo "Stop command was send to each producer pod" + exit 0 +} +function reset_producer_pods() { + REQUEST_ENDPOINT="/monitoring/prometheus/reset" - echo "Attempting to delete producer pods" - kubectl delete pods -l app=${PRODUCER_APPS_LABEL} -n ${ONAP_NAMESPACE} - echo "Performance test finished" - exit 0 + echo "Sending reset command" + send_get_request_to_each_producer + echo "Reset command was send to each producer pod" + exit 0 } + cd ${SCRIPT_DIRECTORY} if [[ $# -eq 0 ]]; then @@ -291,18 +328,50 @@ else setup) setup_environment ;; + setup_all) + setup_all + ;; start) + echo "Option start is deprecated. Last support commit: 3e7de0deb033e485d519c74feaffecc02e7e9dc7" + ;; + clean) + clean + ;; + clean_all) + clean_all + ;; + send_config) + send_configuration + ;; + start_interval) shift 1 while [[ $(($#)) -gt 0 ]]; do case "${1}" in - --load) - LOAD_TEST=${2} + --producers) + PRODUCERS_COUNT=${2} + scale_producer_deployment + send_configuration ;; - --containers) - CONTAINERS_COUNT=${2} + --retention-time-minutes) + KAFKA_RETENTION_TIME_MINUTES=${2} + ;; + *) + echo "Unknown option: ${1}" + usage ;; - --properties-file) - PROPERTIES_FILE=${2} + esac + shift 2 + done + start_interval_mode + ;; + start_instant) + shift 1 + while [[ $(($#)) -gt 0 ]]; do + case "${1}" in + --producers) + PRODUCERS_COUNT=${2} + scale_producer_deployment + send_configuration ;; --retention-time-minutes) KAFKA_RETENTION_TIME_MINUTES=${2} @@ -314,14 +383,19 @@ else esac shift 2 done - if [ ${LOAD_TEST} == "true" ] ; then - start_load_tests - else - start_performance_test - fi + start_instant_mode ;; - clean) - clean + stop) + stop_producer_pods + ;; + reset_producers) + reset_producer_pods + ;; + scale_producers) + shift 1 + PRODUCERS_COUNT=${1} + scale_producer_deployment + exit 0 ;; help) usage diff --git a/tools/performance/cloud/producer-deployment.yaml b/tools/performance/cloud/producer-deployment.yaml index f13760a6..e81fd7f6 100755 --- a/tools/performance/cloud/producer-deployment.yaml +++ b/tools/performance/cloud/producer-deployment.yaml @@ -80,7 +80,7 @@ metadata: prometheus.io/path: '/monitoring/prometheus' spec: selector: - name: hv-collector-producer + app: hv-collector-producer type: NodePort ports: - protocol: TCP diff --git a/tools/performance/cloud/producer-proxy.yaml b/tools/performance/cloud/producer-proxy.yaml new file mode 100755 index 00000000..ace46a63 --- /dev/null +++ b/tools/performance/cloud/producer-proxy.yaml @@ -0,0 +1,42 @@ +# ============LICENSE_START======================================================= +# dcaegen2-collectors-veshv +# ================================================================================ +# Copyright (C) 2020 NOKIA +# ================================================================================ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============LICENSE_END========================================================= + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: hv-collector-producer-proxy-deployment + namespace: onap + labels: + app: hv-collector-producer-proxy +spec: + replicas: 1 + selector: + matchLabels: + app: hv-collector-producer-proxy + template: + metadata: + labels: + app: hv-collector-producer-proxy + name: hv-collector-producer-proxy + spec: + containers: + - name: hv-collector-producer-proxy + imagePullPolicy: IfNotPresent + image: alpine:latest + command: [ "/bin/sh", "-c" ] + args: [ "apk add --update curl;rm -rf /var/cache/apk/*; while true; do sleep 30; done;" ] diff --git a/tools/performance/cloud/producers-config/instant-config.json b/tools/performance/cloud/producers-config/instant-config.json new file mode 100644 index 00000000..486bb077 --- /dev/null +++ b/tools/performance/cloud/producers-config/instant-config.json @@ -0,0 +1,3 @@ +{ + "connections": 500 +} diff --git a/tools/performance/cloud/producers-config/interval-config.json b/tools/performance/cloud/producers-config/interval-config.json new file mode 100644 index 00000000..62deb8c4 --- /dev/null +++ b/tools/performance/cloud/producers-config/interval-config.json @@ -0,0 +1,12 @@ +{ + "intervalConfigs": [ + { + "duration": 1, + "interval": 100 + }, + { + "duration": 2, + "interval": 100 + } + ] +} diff --git a/tools/performance/cloud/producers-config/producer-config.json b/tools/performance/cloud/producers-config/producer-config.json new file mode 100644 index 00000000..74972e61 --- /dev/null +++ b/tools/performance/cloud/producers-config/producer-config.json @@ -0,0 +1,4 @@ +{ + "payloadSize": 8192, + "messageIntervalMs": 500 +} diff --git a/tools/performance/cloud/reboot-test-environment.sh b/tools/performance/cloud/reboot-test-environment.sh index 0fb916f0..3d9a5a1e 100755 --- a/tools/performance/cloud/reboot-test-environment.sh +++ b/tools/performance/cloud/reboot-test-environment.sh @@ -63,6 +63,10 @@ function rebootEnvironment(){ ./cloud-based-performance-test.sh setup | formatOutput + ./cloud-based-performance-test.sh stop | formatOutput + + ./cloud-based-performance-test.sh reset_producers | formatOutput + echo "${GREEN}Environment ready!${NO_COLOR}" } diff --git a/tools/performance/cloud/test.properties b/tools/performance/cloud/test.properties index 53a38e23..05a50055 100644 --- a/tools/performance/cloud/test.properties +++ b/tools/performance/cloud/test.properties @@ -2,14 +2,6 @@ # HV-VES address producer.hvVesAddress=dcae-hv-ves-collector.onap:6061 -# Number of clients per pod -producer.client.count=1 -# Size in bytes of a single message -producer.message.size=16384 -# Amount of messages to sent by one client in a single pod -producer.message.count=1000 -# Interval between messages in milliseconds -producer.message.interval=1 # Path to client cert file client.cert.path=/ssl/client.p12 # Path to cert pass file -- cgit 1.2.3-korg