summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael O'Brien <michael@obrienlabs.org>2019-01-15 16:49:51 -0500
committerMichael O'Brien <michael@obrienlabs.org>2019-01-15 17:41:08 -0500
commit1d330491d358c6483a9d3e231354783c697d2d4c (patch)
treef9763c67b14212d1ed6948b357c74a6e1cc8f856
parenta16f913d92feb0e3ce4fefb15cb63564dcc48f7c (diff)
enable kubectl del --force for rogue pods
Change-Id: Ic60262dd5b471f64bee2b1c9581edf54ab1eddd6 Issue-ID: LOG-914 Signed-off-by: Michael O'Brien <michael@obrienlabs.org>
-rwxr-xr-xdeploy/cd.sh31
1 files changed, 16 insertions, 15 deletions
diff --git a/deploy/cd.sh b/deploy/cd.sh
index ebb1241..ae8e45a 100755
--- a/deploy/cd.sh
+++ b/deploy/cd.sh
@@ -17,7 +17,7 @@
#############################################################################
# v20190115
# https://wiki.onap.org/display/DW/ONAP+on+Kubernetes
-# source from https://jira.onap.org/browse/OOM-320, 326, 321, 898, 925
+# source from https://jira.onap.org/browse/OOM-320, 326, 321, 898, 925, 914
# Michael O'Brien
#
@@ -90,9 +90,7 @@ deploy_onap() {
if [ "$BRANCH" == "amsterdam" ]; then
oom/kubernetes/oneclick/deleteAll.bash -n $ENVIRON
else
- # run undeploy for completeness of the deploy/undeploy cycle - note that pv/pvcs are not deleted in all cases
- # this will fail as expected on a clean first run of the deployment - the plugin will be installed for run n+1
- sudo helm undeploy $ENVIRON --purge
+ echo "kubectl delete namespace $ENVIRON"
# workaround for secondary orchestration in dcae
kubectl delete namespace $ENVIRON
echo "sleep for 4 min to allow the delete to finish pod terminations before trying a helm delete"
@@ -101,21 +99,23 @@ deploy_onap() {
fi
# verify
- DELETED=$(kubectl get pods --all-namespaces | grep -E '0/|1/2|1/3|2/3' | wc -l)
+ DELETED=$(kubectl get pods --namespace $ENVIRON | grep -E '0/|1/2|1/3|2/3' | wc -l)
echo "showing $DELETED undeleted pods"
+ kubectl get pods --namespace $ENVIRON | grep -E '0/|1/2|1/3|2/3'
echo "verify deletion is finished."
+
+ # this block: for very infrequent rogue pods that are out-of-band of kubernetes - they dont delete without a force delete
# max number of cycles exits to --force block next
- local MAX_DELETION_WAIT_PERIODS_BEFORE_RUNNING_FORCE=120 # 30 min
- local DELETE_COUNTER=0
- while [ $(kubectl get pods --all-namespaces | grep -E '0/|1/2|1/3|2/3' | wc -l) -gt 0 ]; do
+ local MAX_DELETION_WAIT_PERIODS_BEFORE_RUNNING_FORCE=40 # 10 min
+ while [ $(kubectl get pods --namespace $ENVIRON | grep -E '0/|1/2|1/3|2/3' | wc -l) -gt 0 ]; do
sleep 15
- echo "waiting for deletions to complete, iteration $DELETE_COUNTER of $MAX_DELETION_WAIT_PERIODS_BEFORE_RUNNING_FORCE"
+ echo "waiting for deletions to complete, iterations left: $MAX_DELETION_WAIT_PERIODS_BEFORE_RUNNING_FORCE"
# addressing rare occurrence on Terminating instances requiring scripted --force in next merge for LOG-914
- COUNTER=$((COUNTER + 1 ))
MAX_DELETION_WAIT_PERIODS_BEFORE_RUNNING_FORCE=$((MAX_DELETION_WAIT_PERIODS_BEFORE_RUNNING_FORCE - 1))
if [ "$MAX_DELETION_WAIT_PERIODS_BEFORE_RUNNING_FORCE" -eq 0 ]; then
#https://wiki.onap.org/display/DW/ONAP+Development#ONAPDevelopment-WorkingwithJSONPath
- export POD_NAMES=$(kubectl get pods --field-selector=status.phase!=Running --all-namespaces -o jsonpath="{.items[*].metadata.name}")
+ #export POD_NAMES=$(kubectl get pods --field-selector=status.phase!=Running --all-namespaces -o jsonpath="{.items[*].metadata.name}")
+ export POD_NAMES=$(kubectl get pods --namespace $ENVIRON -o jsonpath="{.items[*].metadata.name}")
echo "--force delete on pods: $POD_NAMES"
for pod in $POD_NAMES; do
echo "running: kubectl delete pods $pod --grace-period=0 --force -n $ENVIRON"
@@ -123,8 +123,9 @@ deploy_onap() {
done
fi
done
- echo "Pod deletions completed"
-
+
+ echo "Pod deletions completed - running helm undeploy then kubectl delete pv,pvc,secrets,cluserrolebindings"
+ sudo helm undeploy $ENVIRON --purge
# specific to when there is no helm release
kubectl delete pv --all
kubectl delete pvc --all
@@ -222,7 +223,7 @@ deploy_onap() {
# account for podd that have varying deploy times or replicaset sizes
# don't count the 0/1 completed pods - and skip most of the ResultSet instances except 1
# dcae boostrap is problematic
- DEPLOY_NUMBER_PODS_PARTIAL_ARRAY=(2 5 13 9 11 1 11 2 6 16 10 12 11 2 8 6 3 18 2 5 5 5 1 9 11 3 1)
+ DEPLOY_NUMBER_PODS_PARTIAL_ARRAY=(2 5 11 9 11 1 11 2 6 16 10 12 11 2 8 6 3 18 2 5 5 5 1 9 11 3 1)
else
# casablanca branches
DEPLOY_ORDER_POD_NAME_ARRAY=('consul msb dmaap dcaegen2 aaf robot aai esr multicloud oof so sdc sdnc vid policy portal log vfc uui vnfsdk appc clamp cli pomba vvp contrib sniro-emulator')
@@ -231,7 +232,7 @@ deploy_onap() {
# account for podd that have varying deploy times or replicaset sizes
# don't count the 0/1 completed pods - and skip most of the ResultSet instances except 1
# dcae boostrap is problematic
- DEPLOY_NUMBER_PODS_PARTIAL_ARRAY=(2 5 13 9 11 1 11 2 6 16 10 12 11 2 8 6 3 18 2 5 5 5 1 9 11 3 1)
+ DEPLOY_NUMBER_PODS_PARTIAL_ARRAY=(2 5 11 9 11 1 11 2 6 16 10 12 11 2 8 6 3 18 2 5 5 5 1 9 11 3 1)
fi
echo "deploying for $BRANCH using profile $DEPLOY_ORDER_POD_NAME_ARRAY"
else