1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
|
#!/bin/bash
#############################################################################
# Copyright © 2019 Bell.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#############################################################################
#
# This installation is for an RKE install of kubernetes
# after this run the standard oom install
# this installation can be run on any ubuntu 16.04/18.04 VM, RHEL 7.6 (root only), physical or cloud azure/aws host
# https://wiki.onap.org/display/DW/OOM+RKE+Kubernetes+Deployment
# source from https://jira.onap.org/browse/OOM-1598
#
# master/dublin
# RKE 0.1.16 Kubernetes 1.11.6, kubectl 1.11.6, Helm 2.9.1, Docker 18.06
# 20190428 RKE 0.2.1, Kubernetes 1.13.5, kubectl 1.13.5, Helm 2.12.3, Docker 18.09.5
# single node install, HA pending
usage() {
cat <<EOF
Usage: $0 [PARAMs]
example
sudo ./rke_setup.sh -b dublin -s rke.onap.cloud -e onap -l amdocs -v true
-u : Display usage
-b [branch] : branch = master or dublin (required)
-s [server] : server = IP or DNS name (required)
-e [environment] : use the default (onap)
-k [key] : ssh key name
-l [username] : login username account (use ubuntu for example)
EOF
}
install_onap() {
#constants
PORT=8880
KUBERNETES_VERSION=
RKE_VERSION=0.2.1
KUBECTL_VERSION=1.13.5
HELM_VERSION=2.12.3
DOCKER_VERSION=18.09
# copy your private ssh key and cluster.yml file to the vm
# on your dev machine
#sudo cp ~/.ssh/onap_rsa .
#sudo chmod 777 onap_rsa
#scp onap_rsa ubuntu@192.168.241.132:~/
# on this vm
#sudo chmod 400 onap_rsa
#sudo cp onap_rsa ~/.ssh
# make sure public key is insetup correctly in
# sudo vi ~/.ssh/authorized_keys
echo "please supply your ssh key as provided by the -k keyname - it must be be chmod 400 and chown user:user in ~/.ssh/"
echo "The RKE version specific cluster.yaml is already integrated in this script for 0.2.1 no need for below generation..."
echo "rke config --name cluster.yml"
echo "specifically"
echo "address: $SERVER"
echo "user: $USERNAME"
echo "ssh_key_path: $SSHPATH_PREFIX/$SSHKEY"
RKETOOLS=
HYPERCUBE=
POD_INFRA_CONTAINER=
RKETOOLS=0.1.27
HYPERCUBE=1.13.5-rancher1
POD_INFRA_CONTAINER=rancher/pause:3.1
cat > cluster.yml <<EOF
# generated from rke_setup.sh
nodes:
- address: $SERVER
port: "22"
internal_address: ""
role:
- controlplane
- worker
- etcd
hostname_override: ""
user: $USERNAME
docker_socket: /var/run/docker.sock
ssh_key: ""
ssh_key_path: $SSHPATH_PREFIX/$SSHKEY
ssh_cert: ""
ssh_cert_path: ""
labels: {}
services:
etcd:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
external_urls: []
ca_cert: ""
cert: ""
key: ""
path: ""
snapshot: null
retention: ""
creation: ""
backup_config: null
kube-api:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
service_cluster_ip_range: 10.43.0.0/16
service_node_port_range: ""
pod_security_policy: false
always_pull_images: false
kube-controller:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
cluster_cidr: 10.42.0.0/16
service_cluster_ip_range: 10.43.0.0/16
scheduler:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
kubelet:
image: ""
extra_args:
max-pods: 900
extra_binds: []
extra_env: []
cluster_domain: cluster.local
infra_container_image: ""
cluster_dns_server: 10.43.0.10
fail_swap_on: false
kubeproxy:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
network:
plugin: canal
options: {}
authentication:
strategy: x509
sans: []
webhook: null
system_images:
etcd: rancher/coreos-etcd:v3.2.24-rancher1
alpine: rancher/rke-tools:v$RKETOOLS
nginx_proxy: rancher/rke-tools:v$RKETOOLS
cert_downloader: rancher/rke-tools:v$RKETOOLS
kubernetes_services_sidecar: rancher/rke-tools:v$RKETOOLS
kubedns: rancher/k8s-dns-kube-dns:1.15.0
dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.0
kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.0
kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0
kubernetes: rancher/hyperkube:v$HYPERCUBE
flannel: rancher/coreos-flannel:v0.10.0-rancher1
flannel_cni: rancher/flannel-cni:v0.3.0-rancher1
calico_node: rancher/calico-node:v3.4.0
calico_cni: rancher/calico-cni:v3.4.0
calico_controllers: ""
calico_ctl: rancher/calico-ctl:v2.0.0
canal_node: rancher/calico-node:v3.4.0
canal_cni: rancher/calico-cni:v3.4.0
canal_flannel: rancher/coreos-flannel:v0.10.0
wave_node: weaveworks/weave-kube:2.5.0
weave_cni: weaveworks/weave-npc:2.5.0
pod_infra_container: $POD_INFRA_CONTAINER
ingress: rancher/nginx-ingress-controller:0.21.0-rancher3
ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1
metrics_server: rancher/metrics-server:v0.3.1
ssh_key_path: $SSHPATH
ssh_cert_path: ""
ssh_agent_auth: false
authorization:
mode: rbac
options: {}
ignore_docker_version: false
kubernetes_version: "$KUBERNETES_VERSION"
private_registries: []
ingress:
provider: ""
options: {}
node_selector: {}
extra_args: {}
cluster_name: ""
cloud_provider:
name: ""
prefix_path: ""
addon_job_timeout: 0
bastion_host:
address: ""
port: ""
user: ""
ssh_key: ""
ssh_key_path: ""
ssh_cert: ""
ssh_cert_path: ""
monitoring:
provider: ""
options: {}
restore:
restore: false
snapshot_name: ""
dns: null
EOF
echo "Installing on ${SERVER} for ${BRANCH}: RKE: ${RKE_VERSION} Kubectl: ${KUBECTL_VERSION} Helm: ${HELM_VERSION} Docker: ${DOCKER_VERSION} username: ${USERNAME}"
sudo echo "127.0.0.1 ${SERVER}" >> /etc/hosts
echo "Install docker - If you must install as non-root - comment out the docker install below - run it separately, run the user mod, logout/login and continue this script"
curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
sudo usermod -aG docker $USERNAME
echo "Install RKE"
sudo wget https://github.com/rancher/rke/releases/download/v$RKE_VERSION/rke_linux-amd64
mv rke_linux-amd64 rke
sudo chmod +x rke
sudo mv ./rke /usr/local/bin/rke
echo "Install make - required for beijing+ - installed via yum groupinstall Development Tools in RHEL"
# ubuntu specific
sudo apt-get install make -y
sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
sudo chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
sudo mkdir ~/.kube
wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
sudo mv linux-amd64/helm /usr/local/bin/helm
echo "Bringing RKE up - using supplied cluster.yml"
sudo rke up
echo "wait 2 extra min for the cluster"
sleep 60
echo "1 more min"
sleep 60
echo "copy kube_config_cluter.yaml generated - to ~/.kube/config"
sudo cp kube_config_cluster.yml ~/.kube/config
# avoid using sudo for kubectl
sudo chmod 777 ~/.kube/config
echo "Verify all pods up on the kubernetes system - will return localhost:8080 until a host is added"
echo "kubectl get pods --all-namespaces"
kubectl get pods --all-namespaces
echo "install tiller/helm"
kubectl -n kube-system create serviceaccount tiller
kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
helm init --service-account tiller
kubectl -n kube-system rollout status deploy/tiller-deploy
echo "upgrade server side of helm in kubernetes"
if [ "$USERNAME" == "root" ]; then
helm version
else
sudo helm version
fi
echo "sleep 30"
sleep 30
if [ "$USERNAME" == "root" ]; then
helm init --upgrade
else
sudo helm init --upgrade
fi
echo "sleep 30"
sleep 30
echo "verify both versions are the same below"
if [ "$USERNAME" == "root" ]; then
helm version
else
sudo helm version
fi
echo "start helm server"
if [ "$USERNAME" == "root" ]; then
helm serve &
else
sudo helm serve &
fi
echo "sleep 30"
sleep 30
echo "add local helm repo"
if [ "$USERNAME" == "root" ]; then
helm repo add local http://127.0.0.1:8879
helm repo list
else
sudo helm repo add local http://127.0.0.1:8879
sudo helm repo list
fi
echo "To enable grafana dashboard - do this after running cd.sh which brings up onap - or you may get a 302xx port conflict"
echo "kubectl expose -n kube-system deployment monitoring-grafana --type=LoadBalancer --name monitoring-grafana-client"
echo "to get the nodeport for a specific VM running grafana"
echo "kubectl get services --all-namespaces | grep graf"
sudo docker version
helm version
kubectl version
kubectl get services --all-namespaces
kubectl get pods --all-namespaces
echo "finished!"
}
BRANCH=
SERVER=
ENVIRON=
VALIDATE=false
USERNAME=ubuntu
SSHPATH_PREFIX=~/.ssh
while getopts ":b:s:e:u:l:k:v" PARAM; do
case $PARAM in
u)
usage
exit 1
;;
b)
BRANCH=${OPTARG}
;;
e)
ENVIRON=${OPTARG}
;;
s)
SERVER=${OPTARG}
;;
l)
USERNAME=${OPTARG}
;;
k)
SSHKEY=${OPTARG}
;;
v)
VALIDATE=${OPTARG}
;;
?)
usage
exit
;;
esac
done
if [[ -z $BRANCH ]]; then
usage
exit 1
fi
install_onap $BRANCH $SERVER $ENVIRON $USERNAME $SSHPATH_PREFIX $SSHKEY $VALIDATE
|