aboutsummaryrefslogtreecommitdiffstats
path: root/kubernetes/consul/resources/config/consul-agent-config/scripts
diff options
context:
space:
mode:
authorjasmineWen <jasmine.wen@amdocs.com>2018-03-06 16:13:35 +0000
committerjasmineWen <jasmine.wen@amdocs.com>2018-03-19 15:42:08 +0000
commit85d9d73270f01d0770c29e8516dd0154669e9cf5 (patch)
tree8f135e6dbdc2359f9329352aa7bda94c2e40be76 /kubernetes/consul/resources/config/consul-agent-config/scripts
parent0947cdf7e780641e1604ef3752483fa4d29efb3e (diff)
Move consul config from shared NFS to configmap
Issue-ID: OOM-597 Change-Id: I708c3e9df16003a54462f76c6ffe513b270faae8 Signed-off-by: jasmineWen <jasmine.wen@amdocs.com>
Diffstat (limited to 'kubernetes/consul/resources/config/consul-agent-config/scripts')
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt9
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh17
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh13
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh16
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh16
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh13
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh13
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh15
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh15
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh15
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh14
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh18
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh18
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh18
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh18
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh13
-rw-r--r--kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh45
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh16
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh20
-rwxr-xr-xkubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh14
20 files changed, 336 insertions, 0 deletions
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt
new file mode 100644
index 0000000000..a6e084cfea
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-doc.txt
@@ -0,0 +1,9 @@
+{
+ "vnfId" : "testwrite",
+ "device" : "10.198.1.31",
+ "timestamp" : "2017-08-23T19:13:56Z",
+ "jdmTotalMem" : "2097152",
+ "jdmAvailableMem" : "1877272",
+ "jdmUserCpu" : "16",
+ "jdmSystemCpu" : "3"
+}
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh
new file mode 100755
index 0000000000..3d26f6e71e
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/aai-search-storage-write-script.sh
@@ -0,0 +1,17 @@
+if curl -s -X PUT http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite -d @/consul/scripts/aai-search-storage-write-doc.txt | grep '\"created\":true'; then
+ if curl -s -X DELETE http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite | grep '\"failed\":0'; then
+ if curl -s -X GET http://aai-elasticsearch.{{ .Values.nsPrefix }}:9200/searchhealth/stats/testwrite | grep '\"found\":false'; then
+ echo Successful PUT, DELETE, GET from Search Document Storage 2>&1
+ exit 0
+ else
+ echo Failed GET from Search Document Storage 2>&1
+ exit 1
+ fi
+ else
+ echo Failed DELETE from Search Document Storage 2>&1
+ exit 1
+ fi
+else
+ echo Failed PUT from Search Document Storage 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh
new file mode 100755
index 0000000000..5f91c5e89c
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/appc-dbhost-script.sh
@@ -0,0 +1,13 @@
+APPC_DBHOST_POD=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "appc-dbhost-[^[:space:]]*")
+if [ -n "$APPC_DBHOST_POD" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $APPC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
+ echo Success. APPC DBHost is running. 2>&1
+ exit 0
+ else
+ echo Failed. APPC DBHost is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. APPC DBHost is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh
new file mode 100755
index 0000000000..035e7c8a38
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/data-router-script.sh
@@ -0,0 +1,16 @@
+
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-data-router[^[:space:]]*")
+
+if [ -n "$NAME" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'data-router' > /dev/null; then
+
+ echo Success. Synapse process is running. 2>&1
+ exit 0
+ else
+ echo Failed. Synapse process is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. Synapse container is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh
new file mode 100755
index 0000000000..9a4b4df28f
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/model-loader-script.sh
@@ -0,0 +1,16 @@
+
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-model-loader[^[:space:]]*")
+
+if [ -n "$NAME" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'model-loader' > /dev/null; then
+
+ echo Success. Model Loader process is running. 2>&1
+ exit 0
+ else
+ echo Failed. Model Loader process is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. Model Loader container is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh
new file mode 100755
index 0000000000..a109032d3b
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-kafka-health.sh
@@ -0,0 +1,13 @@
+kafkapod=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "message-router-global-kafka-[^[:space:]]*")
+if [ -n "$kafkapod" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $kafkapod -- ps ef | grep -i kafka; then
+ echo Success. Kafka process is running. 2>&1
+ exit 0
+ else
+ echo Failed. Kafka is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. Kafka container is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh
new file mode 100755
index 0000000000..47c42d54ef
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mr-zookeeper-health.sh
@@ -0,0 +1,13 @@
+zkpod=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "message-router-zookeeper-[^[:space:]]*")
+if [ -n "$zkpod" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $zkpod -- ps ef | grep -i zookeeper; then
+ echo Success. Zookeeper process is running. 2>&1
+ exit 0
+ else
+ echo Failed. Zookeeper is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. Zookeeper container is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh
new file mode 100755
index 0000000000..8f3f85ce5e
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-api-script.sh
@@ -0,0 +1,15 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/ecomp/mso/infra/healthcheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh
new file mode 100755
index 0000000000..341ff193e9
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-camunda-script.sh
@@ -0,0 +1,15 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/mso/healthcheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh
new file mode 100755
index 0000000000..beeb289517
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-jra-script.sh
@@ -0,0 +1,15 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://mso.{{ .Values.nsPrefix }}:8080/networks/rest/healthcheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+READY=$(echo $HEALTH_CHECK_RESPONSE | grep "Application ready")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh
new file mode 100755
index 0000000000..aa73a73050
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/mso-mariadb-script.sh
@@ -0,0 +1,14 @@
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "mso-mariadb[^[:space:]]*")
+
+ if [ -n "$NAME" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
+ echo Success. mariadb process is running. 2>&1
+ exit 0
+ else
+ echo Failed. mariadb process is not running. 2>&1
+ exit 1
+ fi
+ else
+ echo Failed. mariadb container is offline. 2>&1
+ exit 1
+ fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh
new file mode 100755
index 0000000000..00a05648d3
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-be-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component BE) and check to see if
+## the BE component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "BE" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh
new file mode 100755
index 0000000000..9950cc9fdf
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-cs-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component CASSANDRA) and check to see if
+## the CASSANDRA component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "CASSANDRA" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh
new file mode 100755
index 0000000000..27f3b224c6
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-fe-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component FE) and check to see if
+## the FE component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "FE" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh
new file mode 100755
index 0000000000..c5955f3be3
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdc-titan-script.sh
@@ -0,0 +1,18 @@
+## Query the health check API.
+HEALTH_CHECK_ENDPOINT="http://sdc-fe.{{ .Values.nsPrefix }}:8181/sdc1/rest/healthCheck"
+HEALTH_CHECK_RESPONSE=$(curl -s $HEALTH_CHECK_ENDPOINT)
+
+## Strip out the ON_BOARDING section from the response XML (otherwise we will
+## get duplicate results when we search for component TITAN) and check to see if
+## the TITAN component is reported as up.
+READY=$(echo "$HEALTH_CHECK_RESPONSE" | sed '/ON_BOARDING/,/]/d' | grep -A 1 "TITAN" | grep "UP")
+
+if [ -n $READY ]; then
+ echo "Query against health check endpoint: $HEALTH_CHECK_ENDPOINT"
+ echo "Produces response: $HEALTH_CHECK_RESPONSE"
+ echo "Application is not in an available state"
+ return 2
+else
+ echo "Application is available."
+ return 0
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh
new file mode 100755
index 0000000000..27b9b9f608
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sdnc-dbhost-script.sh
@@ -0,0 +1,13 @@
+SDNC_DBHOST_POD=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "sdnc-dbhost-[^[:space:]]*")
+if [ -n "$SDNC_DBHOST_POD" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $SDNC_DBHOST_POD -- ./healthcheck.sh |grep -i "mysqld is alive"; then
+ echo Success. SDNC DBHost is running. 2>&1
+ exit 0
+ else
+ echo Failed. SDNC DBHost is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. SDNC DBHost is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh
new file mode 100644
index 0000000000..d5118736cd
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/search-data-service-availability.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+
+SEARCH_SERVICE_NAME="search-data-service.{{ .Values.nsPrefix }}"
+SEARCH_SERVICE_PORT=9509
+HEALTH_CHECK_INDEX="healthcheck"
+
+# 'Document Index' REST Endpoint
+INDEX_URL="https://$SEARCH_SERVICE_NAME:$SEARCH_SERVICE_PORT/services/search-data-service/v1/search/indexes/$HEALTH_CHECK_INDEX"
+INDEX_SCHEMA="{\"fields\":[{\"name\": \"field1\", \"data-type\": \"string\"}]}"
+
+SEARCH_CERT_FILE="/consul/certs/client-cert-onap.crt.pem"
+SEARCH_KEY_FILE="/consul/certs/client-cert-onap.key.pem"
+
+## Try to create an index via the Search Data Service API.
+CREATE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "$INDEX_SCHEMA" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X PUT $INDEX_URL)
+
+RESULT_STRING=" "
+
+if [ $CREATE_INDEX_RESP -eq 201 ]; then
+ RESULT_STRING="Service Is Able To Communicate With Back End"
+elif [ $CREATE_INDEX_RESP -eq 400 ]; then
+ # A 400 response could mean that the index already exists (ie: we didn't
+ # clean up after ourselves on a previous check), so log the response but
+ # don't exit yet. If we fail on the delete then we can consider the
+ # check a failure, otherwise, we are good.
+ RESULT_STRING="$RESULT_STRING Create Index [FAIL - 400 (possible index already exists)] "
+else
+ RESULT_STRING="Service API Failure - $CREATE_INDEX_RESP"
+ echo $RESULT_STRING
+ exit 1
+fi
+
+## Now, clean up after ourselves.
+DELETE_INDEX_RESP=$(curl -s -o /dev/null -w "%{http_code}" -k --cert $SEARCH_CERT_FILE --cert-type PEM --key $SEARCH_KEY_FILE --key-type PEM -d "{ }" --header "Content-Type: application/json" --header "X-TransactionId: ConsulHealthCheck" -X DELETE $INDEX_URL)
+
+if [ $DELETE_INDEX_RESP -eq 200 ]; then
+ RESULT_STRING="Service Is Able To Communicate With Back End"
+else
+ RESULT_STRING="Service API Failure - $DELETE_INDEX_RESP"
+ echo $RESULT_STRING
+ exit 1
+fi
+
+echo $RESULT_STRING
+return 0
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh
new file mode 100755
index 0000000000..7796681902
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/sparky-be-script.sh
@@ -0,0 +1,16 @@
+
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "aai-sparky-be[^[:space:]]*")
+
+if [ -n "$NAME" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- ps -efww | grep 'java' | grep 'sparky' > /dev/null; then
+
+ echo Success. UI Backend Service process is running. 2>&1
+ exit 0
+ else
+ echo Failed. UI Backend Service process is not running. 2>&1
+ exit 1
+ fi
+else
+ echo Failed. UI Backend Service container is offline. 2>&1
+ exit 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh
new file mode 100755
index 0000000000..dc7768fc6e
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/tabular-db-availability.sh
@@ -0,0 +1,20 @@
+
+# Query the Hbase service for the cluster status.
+GET_CLUSTER_STATUS_RESPONSE=$(curl -si -X GET -H "Accept: text/xml" http://hbase.{{ .Values.nsPrefix }}:8080/status/cluster)
+
+if [ -z "$GET_CLUSTER_STATUS_RESPONSE" ]; then
+ echo "Tabular store is unreachable."
+ return 2
+fi
+
+# Check the resulting status JSON to see if there is a 'DeadNodes' stanza with
+# entries.
+DEAD_NODES=$(echo $GET_CLUSTER_STATUS_RESPONSE | grep "<DeadNodes/>")
+
+if [ -n "$DEAD_NODES" ]; then
+ echo "Tabular store is up and accessible."
+ return 0
+else
+ echo "Tabular store is up but is reporting dead nodes - cluster may be in degraded state."
+ return 1
+fi
diff --git a/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh b/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh
new file mode 100755
index 0000000000..bbb080f44d
--- /dev/null
+++ b/kubernetes/consul/resources/config/consul-agent-config/scripts/vid-mariadb-script.sh
@@ -0,0 +1,14 @@
+NAME=$(/consul/bin/kubectl -n {{ .Values.nsPrefix }} get pod | grep -o "vid-mariadb[^[:space:]]*")
+
+ if [ -n "$NAME" ]; then
+ if /consul/bin/kubectl -n {{ .Values.nsPrefix }} exec -it $NAME -- bash -c 'mysqladmin status -u root -p$MYSQL_ROOT_PASSWORD' > /dev/null; then
+ echo Success. mariadb process is running. 2>&1
+ exit 0
+ else
+ echo Failed. mariadb process is not running. 2>&1
+ exit 1
+ fi
+ else
+ echo Failed. mariadb container is offline. 2>&1
+ exit 1
+ fi