blob: ec09df3a86141e0c31252eacf438297cbb93c6fe (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
|
#!/bin/bash
TEMP_POD=$(kubectl get pod -n $NAMESPACE_ENV --selector \
app='{{ include "common.fullname" . }}' -o \
jsonpath='{.items[?(@.metadata.ownerReferences[].kind=="ReplicaSet")].metadata.name}')
tmp_MYSQL_PASSWORD=$(echo -n $(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- printenv \
MYSQL_PASSWORD) | base64)
tmp_ROOT_PASSWORD=$(echo -n $(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- printenv \
MYSQL_ROOT_PASSWORD) | base64)
FLAG_EX_ROOT_SEC='{{ include "common.secret.getSecretNameFast" (dict "global" . "uid" (include "common.mariadb.secret.rootPassUID" .)) }}'
FLAG_EX_SEC='{{ include "common.secret.getSecretNameFast" (dict "global" . "uid" (include "common.mariadb.secret.userCredentialsUID" .)) }}'
kubectl patch secret $FLAG_EX_ROOT_SEC -p \
'{"data":{"password":"'"$tmp_ROOT_PASSWORD"'"}}'
kubectl patch secret $FLAG_EX_SEC -p \
'{"data":{"password":"'"$tmp_MYSQL_PASSWORD"'"}}'
MYSQL_USER=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- printenv MYSQL_USER)
MYSQL_PASSWORD=$(echo -n $(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- printenv MYSQL_PASSWORD))
MYSQL_ROOT_PASSWORD=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- printenv MYSQL_ROOT_PASSWORD)
CURRENT_STS_REPLICA=$(kubectl get statefulsets -n $NAMESPACE_ENV \
{{ include "common.fullname" . }} -o jsonpath='{.status.replicas}')
DEPLOYMENT_REPLICA=$(kubectl get deployment -n $NAMESPACE_ENV \
{{ include "common.fullname" . }}-upgrade-deployment -o \
jsonpath='{.status.replicas}')
if [[ $CURRENT_STS_REPLICA == "0" ]]
then
echo "Seems there was no upgrade of cluster and we will scale up cluster replicas back to $REPLICA_COUNT now"
kubectl scale statefulsets {{ include "common.fullname" . }} --replicas=$REPLICA_COUNT
fi
MY_REPLICA_NUMBER=$(kubectl get statefulsets -n $NAMESPACE_ENV \
{{ include "common.fullname" . }} -o jsonpath='{.status.replicas}')
while [[ ! $MY_REPLICA_NUMBER == $REPLICA_COUNT ]]
do
echo "The cluster is not scaled up to $REPLICA_COUNT yet. Please wait ..."
MY_REPLICA_NUMBER=$(kubectl get statefulsets -n $NAMESPACE_ENV \
{{ include "common.fullname" . }} -o jsonpath='{.status.replicas}')
echo "The current status of the cluster is $MY_REPLICA_NUMBER"
sleep 2
if [[ $MY_REPLICA_NUMBER == $REPLICA_COUNT ]]
then
break
fi
done
CLUSTER_NO=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- \
mysql --skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \
-p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';" | \
awk '{print $2}')
CLUSTER_STATE=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- \
mysql --skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \
-p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';" \
| awk '{print $2}')
while [[ ! $CLUSTER_NO == $((REPLICA_COUNT+DEPLOYMENT_REPLICA)) ]] \
|| [[ ! $CLUSTER_STATE == "Synced" ]]
do
echo "$CLUSTER_NO and $CLUSTER_STATE"
CLUSTER_NO=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- mysql \
--skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \
-p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';" \
| awk '{print $2}')
CLUSTER_STATE=$(kubectl exec -n $NAMESPACE_ENV $TEMP_POD -- mysql \
--skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \
-p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';" \
| awk '{print $2}')
sleep 2
if [[ $CLUSTER_NO == $((REPLICA_COUNT+DEPLOYMENT_REPLICA)) ]] \
&& [[ $CLUSTER_STATE == "Synced" ]]
then
echo "The cluster has $CLUSTER_NO members and $CLUSTER_STATE state."
break
fi
done
MYSQL_STATUS=$(kubectl exec -n $NAMESPACE_ENV {{ include "common.fullname" . }}-0 -- mysqladmin \
-uroot -p$MYSQL_ROOT_PASSWORD ping)
while [[ ! $MYSQL_STATUS == "mysqld is alive" ]]
do
echo "Mariadb deployment is not ready yet."
sleep 2
MYSQL_STATUS=$(kubectl exec -n $NAMESPACE_ENV {{ include "common.fullname" . }}-0 -- mysqladmin \
-uroot -p$MYSQL_ROOT_PASSWORD ping)
if [[ $MYSQL_STATUS == "mysqld is alive" ]]
then
echo "Mariadb deployment is ready and cluster size is $CLUSTER_NO"
break
fi
done
echo "Deleting upgrade deployment now"
kubectl delete deployment -n $NAMESPACE_ENV {{ include "common.fullname" . }}-upgrade-deployment
kubectl delete secret -n $NAMESPACE_ENV {{ include "common.fullname" . }}-temp-upgrade-root
kubectl delete secret -n $NAMESPACE_ENV {{ include "common.fullname" . }}-temp-upgrade-usercred
CLUSTER_NO=$(kubectl exec -n $NAMESPACE_ENV {{ include "common.fullname" . }}-0 -- \
mysql --skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \
-p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';" | \
awk '{print $2}')
CLUSTER_STATE=$(kubectl exec -n $NAMESPACE_ENV {{ include "common.fullname" . }}-0 -- \
mysql --skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \
-p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';" \
| awk '{print $2}')
while [[ ! $CLUSTER_NO == $REPLICA_COUNT ]] \
|| [[ ! $CLUSTER_STATE == "Synced" ]]
do
echo "$CLUSTER_NO and $CLUSTER_STATE"
CLUSTER_NO=$(kubectl exec -n $NAMESPACE_ENV {{ include "common.fullname" . }}-0 -- mysql \
--skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \
-p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';" \
| awk '{print $2}')
CLUSTER_STATE=$(kubectl exec -n $NAMESPACE_ENV {{ include "common.fullname" . }}-0 -- mysql \
--skip-column-names -h{{ $.Values.service.name }} -u$MYSQL_USER \
-p$MYSQL_PASSWORD -e "SHOW GLOBAL STATUS LIKE 'wsrep_local_state_comment';" \
| awk '{print $2}')
sleep 2
if [[ $CLUSTER_NO == $REPLICA_COUNT ]] \
&& [[ $CLUSTER_STATE == "Synced" ]]
then
echo "The cluster has $CLUSTER_NO members and $CLUSTER_STATE state."
break
fi
done
echo "The cluster upgrade is finished now"
|