summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBartek Grzybowski <b.grzybowski@partner.samsung.com>2021-06-28 15:06:58 +0200
committerBartek Grzybowski <b.grzybowski@partner.samsung.com>2021-07-01 11:53:44 +0000
commit504c2354a791aeaee6faf746ef8831671f57a7da (patch)
tree8f7fbed1a6ea78bc6843e897d2b0c0cc416d1a4d
parent71c385b0762310a5c764c260f164b2fefafd352b (diff)
[TOOLS] Use release's chart manifest to collect kubernetes object
Since Helm v3 is no longer showing kubernetes objects associated with a release, charts manifest is used to get those. Ref.: https://github.com/helm/helm/issues/5952 Change-Id: Idf16124663186b8d5b8ce2b408bdf7d399b12b48 Issue-ID: OOM-2770 Signed-off-by: Bartek Grzybowski <b.grzybowski@partner.samsung.com>
-rwxr-xr-xtools/helm-healer.sh39
1 files changed, 5 insertions, 34 deletions
diff --git a/tools/helm-healer.sh b/tools/helm-healer.sh
index a6870fe2..650c7d12 100755
--- a/tools/helm-healer.sh
+++ b/tools/helm-healer.sh
@@ -222,34 +222,7 @@ delete_job()
#arg: <component>
get_resources_for_component()
{
-
-helm -n ${NAMESPACE} status $1 | awk -f <(cat - <<-'EOD'
-BEGIN {
- work="no"
- kind=""
- a["dummy"]=""
-}
-
-$1 ~ ":" {
- if ( $1 == "RESOURCES:" ) {
- work="yes"
-} else {
- work="no"
-}
-
-}
-
-$1 == "==>" {
- split($2, a, "[/(]")
- kind=a[2]
-}
-
-$1 != "NAME" && $1 != "==>" && work == "yes" && $1 !~ ":" && $1 != "" {
- printf "%s/%s\n", kind, $1
-}
-
-EOD
-)
+ helm -n ${NAMESPACE} get manifest $1 | kubectl -n ${NAMESPACE} get -f - | awk '{print $1}' | grep -v NAME | grep -v ^$
}
# arg: <resource>
@@ -259,8 +232,7 @@ delete_resource()
local _kind="${_resource%/*}"
local _name="${_resource#*/}"
-
- if kubectl get ${_resource} >/dev/null 2>&1; then
+ if kubectl -n ${NAMESPACE} get ${_resource} >/dev/null 2>&1; then
msg "${_resource} has not been removed with helm undeploy, manual removal is required. Proceeding"
kubectl delete ${_resource} -n ${NAMESPACE} \
--cascade=true \
@@ -378,19 +350,18 @@ undeploy_component()
for resource in ${_component_resources[@]}; do
case $resource in
- CronJob/* | Job/* | Secret/* | ConfigMap/* | Pod/* | Service/* | Deployment/* | StatefulSet/*)
+ cronjob/* | job.batch/* | secret/* | configmap/* | service/* | deployment.apps/* | statefulset.apps/* | serviceaccount/* | rolebinding.rbac.authorization.k8s.io/* | role.rbac.authorization.k8s.io/* | poddisruptionbudget.policy/* | clusterrolebinding.rbac.authorization.k8s.io/*)
_standard+=(${resource});;
#Ignoring PVC, they will be handled along with PV as 'helm' status does not return them for some components
- PersistentVolumeClaim/*)
+ persistentvolumeclaim/*)
;;
- PersistentVolume/*)
+ persistentvolume/*)
_persistent_volumes+=(${resource});;
*)
_unknown_kinds+=(${resource})
esac
done
-
#Gathering physical location of directories for persistent volumes to delete them after undeploy
declare -a _physical_locations
for volume in ${_persistent_volumes[@]}; do