aboutsummaryrefslogtreecommitdiffstats
path: root/infra-healthcheck/scripts/check_onap_k8s.sh
diff options
context:
space:
mode:
authormrichomme <morgan.richomme@orange.com>2019-12-10 08:47:28 +0100
committermrichomme <morgan.richomme@orange.com>2019-12-11 12:00:31 +0100
commit1f997a66f658ff11809f44f4630fc678eb091b83 (patch)
treed29e08adfc2a5d193310d9dfc640175b0cf76fc6 /infra-healthcheck/scripts/check_onap_k8s.sh
parent78b4cdd2c5aa084ee6b8cc0f768187be907ae68c (diff)
Move integration xtesting Dockerfile to ONAP
All the Dockerfiles and xtesting configurations were hosted in gitlab.com [1] The goal of this patch is to host these assets in ONAP A jenkins jjb shall be created to generated the docker and push them on the nexus (today the built-in registry of ONAP was used) These xtesting dockers are referencing integration categories [2] and integration use cases [3] These xtesting dockers shall also simplify the way to integrate new use cases in any CI chain (jenkins or gitlab-ci based) [1]: https://gitlab.com/Orange-OpenSource/lfn/onap/integration/xtesting [2]: https://wiki.onap.org/pages/viewpage.action?pageId=71835330 [3]: http://testresults.opnfv.org/onap/api/v1/projects/integration/cases Issue-ID: INT-1366 Signed-off-by: mrichomme <morgan.richomme@orange.com> Change-Id: Iba0fc0b0415731a7a81ba0225a70ae16391dd129 Signed-off-by: mrichomme <morgan.richomme@orange.com>
Diffstat (limited to 'infra-healthcheck/scripts/check_onap_k8s.sh')
-rw-r--r--infra-healthcheck/scripts/check_onap_k8s.sh108
1 files changed, 108 insertions, 0 deletions
diff --git a/infra-healthcheck/scripts/check_onap_k8s.sh b/infra-healthcheck/scripts/check_onap_k8s.sh
new file mode 100644
index 0000000..2dffd7e
--- /dev/null
+++ b/infra-healthcheck/scripts/check_onap_k8s.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+echo "------------------------------------------------------------------------"
+echo "-------------------- ONAP Check kubernetes ----------------------------"
+echo "------------------------------------------------------------------------"
+
+code=0
+
+# get the pod list
+echo "List of ONAP pods"
+echo "*****************"
+kubectl get pods -n onap
+
+# show deployments
+echo "Show ONAP kubernetes deployments"
+echo "********************************"
+kubectl get deployments -n onap
+echo "------------------------------------------------------------------------"
+
+# show SVC
+echo "Show ONAP kubernetes SVC"
+echo "************************"
+kubectl get svc -n onap
+echo "------------------------------------------------------------------------"
+
+# show ONAP events
+echo "Show ONAP kubernetes events"
+echo "***************************"
+kubectl get events -n onap
+echo "------------------------------------------------------------------------"
+
+# show ONAP config maps
+echo "Show ONAP kubernetes config maps"
+echo "***************************"
+kubectl get cm -n onap
+echo "------------------------------------------------------------------------"
+
+# show ONAP jobs
+echo "Show ONAP kubernetes jobs"
+echo "***************************"
+kubectl get jobs -n onap
+echo "------------------------------------------------------------------------"
+
+# show ONAP statefulsets
+echo "Show ONAP kubernetes statefulset"
+echo "***************************"
+kubectl get sts -n onap
+echo "------------------------------------------------------------------------"
+
+# if all pods in RUNNING state exit 0, else exit 1
+nb_pods=$((`kubectl get pods -n onap | grep Running | grep -v functest | wc -l` -1))
+list_failed_pods=$(kubectl get pods -n onap |grep -v Running |grep -v functest |grep -v NAME | grep -v Completed | awk '{print $1}')
+list_filtered_failed_pods=()
+
+for i in $list_failed_pods;do
+ status=$(kubectl get pods -n onap $i | grep -v NAME | awk '{print $3'})
+ # in case of Error or Init:Error
+ # we check that another instance is not already Completed or Running
+ if [ $status = "Error" ] || [ $status = "Init:Error" ];then
+ echo "$i in Status Error or Init Error found for the pods, is is really true...."
+ # By default pod naming is similar, keep only the root to check
+ root_name=${i::-6}
+ kubectl get pods -n onap | grep $root_name | grep Completed
+ if [ $? ];then
+ echo "Instance Completed found."
+ else
+ echo "No Completed instance found."
+ list_filtered_failed_pods+=$i,
+ fi
+ else
+ # Other status are not running/not completed pods
+ list_filtered_failed_pods+=$i,
+ fi
+done
+
+nice_list=${list_filtered_failed_pods::-1}
+
+IFS=,
+nb_pods_not_running=$(echo "$list_filtered_failed_pods" | tr -cd , | wc -c)
+
+if [ $nb_pods_not_running -ne 0 ]; then
+echo "$nb_pods_not_running pods (on $nb_pods) are not in Running state"
+echo "---------------------------------------------------------------------"
+ kubectl get pods -n onap | grep -v Running | grep -v functest | grep -v Completed
+ echo "--------------------------------------------------------------------"
+ echo "Describe non running pods"
+ echo "*************************"
+ for i in $nice_list;do
+ echo "****************************************************************"
+ kubectl describe pod $i -n onap
+ kubectl logs --all-containers=true -n onap $i
+ done
+ code=1
+else
+ echo "all pods ($nb_pods) are running well"
+fi
+
+echo "------------------------------------------------"
+echo "------- ONAP kubernetes tests ------------------"
+echo "------------------------------------------------"
+echo ">>> Nb Pods: $nb_pods"
+echo ">>> Nb Failed Pods: $nb_pods_not_running"
+echo ">>> List of Failed Pods: [$nice_list]"
+echo "------------------------------------------------"
+echo "------------------------------------------------"
+echo "------------------------------------------------"
+
+exit $code