summaryrefslogtreecommitdiffstats
path: root/kud/hosting_providers
diff options
context:
space:
mode:
authorRitu Sood <Ritu.Sood@intel.com>2019-10-17 15:46:33 +0000
committerGerrit Code Review <gerrit@onap.org>2019-10-17 15:46:33 +0000
commit07eb93be83349a4da6b41c5a0a1dc98284462098 (patch)
treedcb115d66ee58a2845710f343c7c4070ca656788 /kud/hosting_providers
parent17183fa19ce4cca8e7768ab6da1b46c51fd0d589 (diff)
parent3c1c267a44fb959b8bd87e56655935bb12b58b1a (diff)
Merge "Adding kud containerized features"
Diffstat (limited to 'kud/hosting_providers')
-rw-r--r--kud/hosting_providers/containerized/README.md139
l---------kud/hosting_providers/containerized/installer1
-rwxr-xr-xkud/hosting_providers/containerized/installer.sh223
-rw-r--r--kud/hosting_providers/containerized/inventory/group_vars/all.yml2
-rw-r--r--kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml73
5 files changed, 438 insertions, 0 deletions
diff --git a/kud/hosting_providers/containerized/README.md b/kud/hosting_providers/containerized/README.md
new file mode 100644
index 00000000..4119ca78
--- /dev/null
+++ b/kud/hosting_providers/containerized/README.md
@@ -0,0 +1,139 @@
+# Multi cluster installation
+
+## Introduction
+
+Multi Cluster installation is an important features for production deployments.
+
+Most of the project are using the Kubernetes as undercloud orchestration. So deploying multi cluster for the multi cloud region should be maintained by Kubernetes
+
+This section explains how to deploy the Multi cluster of Kubernetes from a containerized KUD running as a Kubernetes Job.
+
+## How it works
+
+KUD installation installer is divided into two regions with args - `--install-pkg` and `--cluster <cluster-name>`
+
+### Args
+**--install-pkg** - Installs packages required to run installer script itself inside a container and kubespray packages
+
+**--cluster < cluster-name >** - Installs k8s cluster, addons and plugins and store the artifacts in the host machine
+
+### Internal Mechanism
+
+* Container image is build using the `installer --install-pkg` arg and Kubernetes job is used to install the cluster using `installer --cluster <cluster-name>`. Installer will invoke the kubespray cluster.yml, kud-addsons and plugins ansible cluster.
+
+Installer script finds the `hosts.init` for each cluster in `/opt/multi-cluster/<cluster-name>`
+
+Kubernetes jobs(a cluster per job) are used to install multiple clusters and logs of each cluster deployments are stored in the `/opt/kud/multi-cluster/<cluster-name>/logs` and artifacts are stored as follows `/opt/kud/multi-cluster/<cluster-name>/artifacts`
+
+## Quickstart Installation Guide
+
+Build the kud docker images as follows:
+
+```
+$ git clone https://github.com/onap/multicloud-k8s.git && cd multicloud-k8s
+$ docker build --rm \
+ --build-arg http_proxy=${http_proxy} \
+ --build-arg HTTP_PROXY=${HTTP_PROXY} \
+ --build-arg https_proxy=${https_proxy} \
+ --build-arg HTTPS_PROXY=${HTTPS_PROXY} \
+ --build-arg no_proxy=${no_proxy} \
+ --build-arg NO_PROXY=${NO_PROXY} \
+ -t github.com/onap/multicloud-k8s:latest . -f build/Dockerfile
+```
+Let's create a cluster-101 and cluster-102 hosts.ini as follows
+
+```
+$ mkdir -p /opt/kud/multi-cluster/{cluster-101,cluster-102}
+```
+
+Create hosts.ini as follows in the direcotry cluster-101(c01 IP address 10.10.10.3) and cluster-102(c02 IP address 10.10.10.5)
+
+```
+/opt/kud/multi-cluster/cluster-101/hosts.ini
+[all]
+c01 ansible_ssh_host=10.10.10.5 ansible_ssh_port=22
+
+[kube-master]
+c01
+
+[kube-node]
+c01
+
+[etcd]
+c01
+
+[ovn-central]
+c01
+
+[ovn-controller]
+c01
+
+[virtlet]
+c01
+
+[k8s-cluster:children]
+kube-node
+kube-master
+```
+Do the same for the cluster-102 with c01 and IP address 10.10.10.5.
+
+Create the ssh secret for Baremetal or VM based on your deployment. and Launch the kubernetes job as follows
+```
+$ kubectl create secret generic ssh-key-secret --from-file=id_rsa=/root/.ssh/id_rsa --from-file=id_rsa.pub=/root/.ssh/id_rsa.pub
+$ CLUSTER_NAME=cluster-101
+$ cat <<EOF | kubectl create -f -
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: kud-$CLUSTER_NAME
+spec:
+ template:
+ spec:
+ hostNetwork: true
+ containers:
+ - name: kud
+ image: github.com/onap/multicloud-k8s:latest
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: multi-cluster
+ mountPath: /opt/kud/multi-cluster
+ - name: secret-volume
+ mountPath: "/.ssh"
+ command: ["/bin/sh","-c"]
+ args: ["cp -r /.ssh /root/; chmod -R 600 /root/.ssh; ./installer --cluster $CLUSTER_NAME"]
+ securityContext:
+ privileged: true
+ volumes:
+ - name: multi-cluster
+ hostPath:
+ path: /opt/kud/multi-cluster
+ - name: secret-volume
+ secret:
+ secretName: ssh-key-secret
+ restartPolicy: Never
+ backoffLimit: 0
+
+EOF
+```
+
+Multi - cluster information from the host machine;
+
+```
+$ kubectl --kubeconfig=/opt/kud/multi-cluster/cluster-101/artifacts/admin.conf cluster-info
+Kubernetes master is running at https://192.168.121.2:6443
+coredns is running at https://192.168.121.2:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy
+kubernetes-dashboard is running at https://192.168.121.2:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
+
+To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
+$ kubectl --kubeconfig=/opt/kud/multi-cluster/cluster-102/artifacts/admin.conf cluster-info
+Kubernetes master is running at https://192.168.121.6:6443
+coredns is running at https://192.168.121.6:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy
+kubernetes-dashboard is running at https://192.168.121.6:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
+
+To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
+```
+
+
+## License
+
+Apache-2.0
diff --git a/kud/hosting_providers/containerized/installer b/kud/hosting_providers/containerized/installer
new file mode 120000
index 00000000..2b6cb163
--- /dev/null
+++ b/kud/hosting_providers/containerized/installer
@@ -0,0 +1 @@
+installer.sh \ No newline at end of file
diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh
new file mode 100755
index 00000000..426c89a0
--- /dev/null
+++ b/kud/hosting_providers/containerized/installer.sh
@@ -0,0 +1,223 @@
+#!/bin/bash
+#SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+set -ex
+
+INSTALLER_DIR="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")"
+
+function install_prerequisites {
+#install package for docker images
+ apt-get update
+ apt-get install -y curl vim wget git \
+ software-properties-common python-pip
+ add-apt-repository ppa:longsleep/golang-backports
+ apt-get update
+ apt-get install -y golang-go rsync
+}
+
+# _install_ansible() - Install and Configure Ansible program
+function _install_ansible {
+ local version=$(grep "ansible_version" ${kud_playbooks}/kud-vars.yml | \
+ awk -F ': ' '{print $2}')
+ mkdir -p /etc/ansible/
+ pip install ansible==$version
+}
+
+# install_k8s() - Install Kubernetes using kubespray tool
+function install_kubespray {
+ echo "Deploying kubernetes"
+ version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
+ awk -F ': ' '{print $2}')
+ local_release_dir=$(grep "local_release_dir" \
+ $kud_inventory_folder/group_vars/k8s-cluster.yml | \
+ awk -F "\"" '{print $2}')
+ local tarball=v$version.tar.gz
+ # install make to run mitogen target & unzip is mitogen playbook dependency
+ apt-get install -y sshpass make unzip
+ _install_ansible
+ wget https://github.com/kubernetes-incubator/kubespray/archive/$tarball
+ tar -C $dest_folder -xzf $tarball
+ mv $dest_folder/kubespray-$version/ansible.cfg /etc/ansible/ansible.cfg
+ chown -R root:root $dest_folder/kubespray-$version
+ mkdir -p ${local_release_dir}/containers
+ rm $tarball
+
+ pushd $dest_folder/kubespray-$version/
+ pip install -r ./requirements.txt
+ make mitogen
+ popd
+ rm -f $kud_inventory_folder/group_vars/all.yml 2> /dev/null
+ if [[ -n "${verbose:-}" ]]; then
+ echo "kube_log_level: 5" | tee \
+ $kud_inventory_folder/group_vars/all.yml
+ else
+ echo "kube_log_level: 2" | tee \
+ $kud_inventory_folder/group_vars/all.yml
+ fi
+ echo "kubeadm_enabled: true" | \
+ tee --append $kud_inventory_folder/group_vars/all.yml
+ if [[ -n "${http_proxy:-}" ]]; then
+ echo "http_proxy: \"$http_proxy\"" | tee --append \
+ $kud_inventory_folder/group_vars/all.yml
+ fi
+ if [[ -n "${https_proxy:-}" ]]; then
+ echo "https_proxy: \"$https_proxy\"" | tee --append \
+ $kud_inventory_folder/group_vars/all.yml
+ fi
+}
+
+function install_k8s {
+ version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \
+ awk -F ': ' '{print $2}')
+ local cluster_name=$1
+ ansible-playbook $verbose -i \
+ $kud_inventory $dest_folder/kubespray-$version/cluster.yml \
+ -e cluster_name=$cluster_name --become --become-user=root | \
+ tee $cluster_log/setup-kubernetes.log
+
+ # Configure environment
+ mkdir -p $HOME/.kube
+ cp $kud_inventory_folder/artifacts/admin.conf $HOME/.kube/config
+ # Copy Kubespray kubectl to be usable in host running Ansible.
+ # Requires kubectl_localhost: true in inventory/group_vars/k8s-cluster.yml
+ if !(which kubectl); then
+ cp $kud_inventory_folder/artifacts/kubectl /usr/local/bin/
+ fi
+
+ cp -rf $kud_inventory_folder/artifacts \
+ /opt/kud/multi-cluster/$cluster_name/
+}
+
+# install_addons() - Install Kubenertes AddOns
+function install_addons {
+ source /etc/environment
+ echo "Installing Kubernetes AddOns"
+ ansible-galaxy install $verbose -r \
+ $kud_infra_folder/galaxy-requirements.yml --ignore-errors
+
+ ansible-playbook $verbose -i \
+ $kud_inventory $kud_playbooks/configure-kud.yml | \
+ tee $cluster_log/setup-kud.log
+ for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd}; do
+ echo "Deploying $addon using configure-$addon.yml playbook.."
+ ansible-playbook $verbose -i \
+ $kud_inventory $kud_playbooks/configure-${addon}.yml | \
+ tee $cluster_log/setup-${addon}.log
+ if [[ "${testing_enabled}" == "true" ]]; then
+ pushd $kud_tests
+ bash ${addon}.sh
+ popd
+ fi
+ done
+}
+
+# install_plugin() - Install ONAP Multicloud Kubernetes plugin
+function install_plugin {
+ echo "Installing multicloud/k8s plugin"
+ mkdir -p /opt/{kubeconfig,consul/config}
+ cp $HOME/.kube/config /opt/kubeconfig/kud
+
+ pushd $kud_folder/../../../deployments
+ ./build.sh
+ if [[ "${testing_enabled}" == "true" ]]; then
+ ./start.sh
+ pushd $kud_tests
+ for functional_test in plugin plugin_edgex plugin_fw; do
+ bash ${functional_test}.sh
+ done
+ popd
+ fi
+ popd
+}
+
+# _print_kubernetes_info() - Prints the login Kubernetes information
+function _print_kubernetes_info {
+ if ! $(kubectl version &>/dev/null); then
+ return
+ fi
+
+ # Expose Dashboard using NodePort
+ node_port=30080
+ KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" \
+ kubectl -n kube-system edit service kubernetes-dashboard
+ KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" \
+ kubectl -n kube-system edit service kubernetes-dashboard
+
+ master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
+ awk -F ":" '{print $2}')
+
+ printf "Kubernetes Info\n===============\n" > $k8s_info_file
+ echo "Dashboard URL: https:$master_ip:$node_port" >> $k8s_info_file
+ echo "Admin user: kube" >> $k8s_info_file
+ echo "Admin password: secret" >> $k8s_info_file
+}
+
+verbose=""
+if [[ -n "${KUD_DEBUG:-}" ]]; then
+ set -o xtrace
+ verbose="-vvv"
+fi
+
+# Configuration values
+dest_folder=/opt
+kud_folder=${INSTALLER_DIR}
+kud_infra_folder=$kud_folder/../../deployment_infra
+kud_playbooks=$kud_infra_folder/playbooks
+kud_tests=$kud_folder/../../tests
+k8s_info_file=$kud_folder/k8s_info.log
+testing_enabled=${KUD_ENABLE_TESTS:-false}
+
+mkdir -p /opt/csar
+
+function install_pkg {
+# Install dependencies
+ apt-get update
+ install_prerequisites
+ install_kubespray
+}
+
+function install_cluster {
+ install_k8s $1
+ install_addons
+ echo "installed the addons"
+ if ${KUD_PLUGIN_ENABLED:-false}; then
+ install_plugin
+ echo "installed the install_plugin"
+ fi
+ _print_kubernetes_info
+}
+
+
+if [ "$1" == "--install_pkg" ]; then
+ export kud_inventory_folder=$kud_folder/inventory
+ kud_inventory=$kud_inventory_folder/hosts.ini
+ install_pkg
+ exit 0
+fi
+
+if [ "$1" == "--cluster" ]; then
+ cluster_name=$2
+ kud_multi_cluster_path=/opt/kud/multi-cluster
+ cluster_path=$kud_multi_cluster_path/$cluster_name
+ cluster_log=$kud_multi_cluster_path/$cluster_name/log
+ export kud_inventory_folder=$kud_folder/inventory/$cluster_name
+ kud_inventory=$kud_inventory_folder/hosts.ini
+
+ mkdir -p $kud_inventory_folder
+ mkdir -p $cluster_log
+ cp $kud_multi_cluster_path/$cluster_name/hosts.ini $kud_inventory_folder/
+ cp -rf $kud_folder/inventory/group_vars $kud_inventory_folder/
+
+ install_cluster $cluster_name
+ exit 0
+fi
diff --git a/kud/hosting_providers/containerized/inventory/group_vars/all.yml b/kud/hosting_providers/containerized/inventory/group_vars/all.yml
new file mode 100644
index 00000000..528430c1
--- /dev/null
+++ b/kud/hosting_providers/containerized/inventory/group_vars/all.yml
@@ -0,0 +1,2 @@
+kube_log_level: 2
+kubeadm_enabled: true
diff --git a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
new file mode 100644
index 00000000..bc085b4e
--- /dev/null
+++ b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
@@ -0,0 +1,73 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Kubernetes configuration dirs and system namespace.
+# Those are where all the additional config stuff goes
+# kubernetes normally puts in /srv/kubernetes.
+# This puts them in a sane location and namespace.
+# Editing those values will almost surely break something.
+system_namespace: kube-system
+
+# Logging directory (sysvinit systems)
+kube_log_dir: "/var/log/kubernetes"
+
+kube_api_anonymous_auth: true
+
+# Users to create for basic auth in Kubernetes API via HTTP
+# Optionally add groups for user
+kube_api_pwd: "secret"
+kube_users:
+ kube:
+ pass: "{{kube_api_pwd}}"
+ role: admin
+ groups:
+ - system:masters
+
+## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
+#kube_oidc_auth: false
+kube_basic_auth: true
+kube_token_auth: true
+
+# Choose network plugin (calico, contiv, weave or flannel)
+# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
+kube_network_plugin: flannel
+
+# Make a copy of kubeconfig (admin.conf) on the host that runs Ansible to inventory/artifacts
+kubeconfig_localhost: true
+# Copy kubectl binary on the host that runs Ansible to inventory/artifacts
+kubectl_localhost: true
+
+# Enable MountPropagation gate feature
+local_volumes_enabled: true
+local_volume_provisioner_enabled: true
+
+## Change this to use another Kubernetes version, e.g. a current beta release
+kube_version: v1.14.3
+
+# Helm deployment
+helm_enabled: true
+
+# Kube-proxy proxyMode configuration.
+# NOTE: Ipvs is based on netfilter hook function, but uses hash table as the underlying data structure and
+# works in the kernel space
+# https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-ipvs
+#kube_proxy_mode: ipvs
+
+# Download container images only once then push to cluster nodes in batches
+download_run_once: false
+
+# Where the binaries will be downloaded.
+# Note: ensure that you've enough disk space (about 1G)
+local_release_dir: "/tmp/releases"
+
+# Makes the installer node a delegate for pushing images while running
+# the deployment with ansible. This maybe the case if cluster nodes
+# cannot access each over via ssh or you want to use local docker
+# images as a cache for multiple clusters.
+download_localhost: false