aboutsummaryrefslogtreecommitdiffstats
path: root/kud/hosting_providers/containerized
diff options
context:
space:
mode:
Diffstat (limited to 'kud/hosting_providers/containerized')
-rw-r--r--kud/hosting_providers/containerized/README.md23
-rw-r--r--kud/hosting_providers/containerized/addons/README.md.tmpl45
-rw-r--r--kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl19
-rw-r--r--kud/hosting_providers/containerized/addons/values.yaml.tmpl24
-rwxr-xr-xkud/hosting_providers/containerized/installer.sh149
-rw-r--r--kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml11
6 files changed, 237 insertions, 34 deletions
diff --git a/kud/hosting_providers/containerized/README.md b/kud/hosting_providers/containerized/README.md
index 2f9a9e52..bd5b08a8 100644
--- a/kud/hosting_providers/containerized/README.md
+++ b/kud/hosting_providers/containerized/README.md
@@ -21,9 +21,9 @@ KUD installation installer is divided into two regions with args - `--install-pk
* Container image is build using the `installer --install-pkg` arg and Kubernetes job is used to install the cluster using `installer --cluster <cluster-name>`. Installer will invoke the kubespray cluster.yml, kud-addsons and plugins ansible cluster.
-Installer script finds the `hosts.init` for each cluster in `/opt/multi-cluster/<cluster-name>`
+Installer script finds the `hosts.ini` for each cluster in `/opt/multi-cluster/<cluster-name>`
-Kubernetes jobs(a cluster per job) are used to install multiple clusters and logs of each cluster deployments are stored in the `/opt/kud/multi-cluster/<cluster-name>/logs` and artifacts are stored as follows `/opt/kud/multi-cluster/<cluster-name>/artifacts`
+Kubernetes jobs (a cluster per job) are used to install multiple clusters and logs of each cluster deployments are stored in the `/opt/kud/multi-cluster/<cluster-name>/logs` and artifacts are stored as follows `/opt/kud/multi-cluster/<cluster-name>/artifacts`
## Creating TestBed for Testing and Development
@@ -38,26 +38,31 @@ $ pushd multicloud-k8s/kud/hosting_providers/containerized/testing
$ vagrant up
$ popd
```
-Do following steps to keep note of
+Do the following steps to keep note of
1. Get the IP address for the Vagrant machine - <VAGRANT_IP_ADDRESS>
2. Copy the host /root/.ssh/id_rsa.pub into the vagrant /root/.ssh/authorized_keys
3. From host make sure to ssh into vagrant without password ssh root@<VAGRANT_IP_ADDRESS>
## Quickstart Installation Guide
-Build the kud docker images as follows, add KUD_ENABLE_TESTS & KUD_PLUGIN_ENABLED for the testing only:
+Build the kud docker images as follows. Add `KUD_ENABLE_TESTS` & `KUD_PLUGIN_ENABLED`
+for the testing only. Currently only docker and containerd are supported CRI
+runtimes and can be configured using the `CONTAINER_RUNTIME` environment variable.
+To be able to run secure containers using Kata Containers, it is required to
+change the CRI runtime to containerd.
```
$ git clone https://github.com/onap/multicloud-k8s.git && cd multicloud-k8s
-$ docker build --rm \
+$ docker build --rm \
--build-arg http_proxy=${http_proxy} \
--build-arg HTTP_PROXY=${HTTP_PROXY} \
--build-arg https_proxy=${https_proxy} \
--build-arg HTTPS_PROXY=${HTTPS_PROXY} \
--build-arg no_proxy=${no_proxy} \
--build-arg NO_PROXY=${NO_PROXY} \
- --build-arg KUD_ENABLE_TESTS=true \
- --build-arg KUD_PLUGIN_ENABLED=true \
+ --build-arg KUD_ENABLE_TESTS=true \
+ --build-arg KUD_PLUGIN_ENABLED=true \
+ --build-arg CONTAINER_RUNTIME=docker \
-t github.com/onap/multicloud-k8s:latest . -f kud/build/Dockerfile
```
Let's create a cluster-101 and cluster-102 hosts.ini as follows
@@ -66,7 +71,7 @@ Let's create a cluster-101 and cluster-102 hosts.ini as follows
$ mkdir -p /opt/kud/multi-cluster/{cluster-101,cluster-102}
```
-Create hosts.ini as follows in the direcotry cluster-101(c01 IP address 10.10.10.3) and cluster-102(c02 IP address 10.10.10.5). If user used Vagrant setup as mentioned in the above steps, replace the IP address with vagrant IP address
+Create the hosts.ini as follows in the directory cluster-101(c01 IP address 10.10.10.3) and cluster-102(c02 IP address 10.10.10.5). If the user used a Vagrant setup as mentioned in the above steps, replace the IP address with the vagrant IP address.
```
$ cat /opt/kud/multi-cluster/cluster-101/hosts.ini
@@ -97,7 +102,7 @@ kube-master
```
Do the same for the cluster-102 with c01 and IP address 10.10.10.5.
-Create the ssh secret for Baremetal or VM based on your deployment. and Launch the kubernetes job as follows
+Create the ssh secret for Baremetal or VM based on your deployment. Launch the kubernetes job as follows.
```
$ kubectl create secret generic ssh-key-secret --from-file=id_rsa=/root/.ssh/id_rsa --from-file=id_rsa.pub=/root/.ssh/id_rsa.pub
$ CLUSTER_NAME=cluster-101
diff --git a/kud/hosting_providers/containerized/addons/README.md.tmpl b/kud/hosting_providers/containerized/addons/README.md.tmpl
new file mode 100644
index 00000000..8ab16104
--- /dev/null
+++ b/kud/hosting_providers/containerized/addons/README.md.tmpl
@@ -0,0 +1,45 @@
+# Installing KUD addons with emcoctl
+
+1. Customize values.yaml and values-resources.yaml as needed
+
+To create a customized profile for a specific addon, edit the profile
+as needed, and then (for example, cpu-manager):
+
+```
+ tar -czf /opt/kud/multi-cluster/addons/cpu-manager.tar.gz -C /opt/kud/multi-cluster/addons/cpu-manager/helm .
+ tar -czf /opt/kud/multi-cluster/addons/collectd_profile.tar.gz -C /opt/kud/multi-cluster/addons/cpu-manager/profile .
+```
+
+2. Create prerequisites to deploy addons
+
+Apply prerequisites.yaml. This step is optional. If there are
+existing resources in the cluster, it is sufficient to customize
+values.yaml with the values of those resources. The supplied
+prequisites.yaml creates controllers, one project, one cluster, and
+one logical cloud.
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f prerequisites.yaml -v values.yaml\`
+
+3. Deploy addons
+
+Apply addons.yaml. This deploys the addons listed in the \`Addons\`
+value in values.yaml.
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values-resources.yaml\`
+
+# Uninstalling KUD addons with emcoctl
+
+1. Delete addons
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values-resources.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values.yaml\`
+
+2. Cleanup prerequisites
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f prerequisites.yaml -v values.yaml\`
+
+#### NOTE: Known issue: deletion of the resources fails sometimes as
+some resources can't be deleted before others are deleted. This can
+happen due to timing issue. In that case try deleting again and the
+deletion should succeed.
diff --git a/kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl b/kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl
new file mode 100644
index 00000000..ed568238
--- /dev/null
+++ b/kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl
@@ -0,0 +1,19 @@
+HostIP: ${HOST_IP}
+KubeConfig: ${KUBE_PATH}
+PackagesPath: ${PACKAGES_PATH}
+ProjectName: proj1
+RsyncPort: 30441
+GacPort: 30493
+OvnPort: 30473
+DtcPort: 30483
+ClusterProvider: provider1
+Cluster1: cluster1
+ClusterLabel: edge-cluster
+LogicalCloud: default
+Apps:
+- sriov-network
+CompositeApp: addon-resources
+CompositeProfile: addon-resources-profile
+DeploymentIntentGroup: addon-resources-deployment-intent-group
+DeploymentIntent: addon-resources-deployment-intent
+GenericPlacementIntent: addon-resources-placement-intent
diff --git a/kud/hosting_providers/containerized/addons/values.yaml.tmpl b/kud/hosting_providers/containerized/addons/values.yaml.tmpl
new file mode 100644
index 00000000..62936beb
--- /dev/null
+++ b/kud/hosting_providers/containerized/addons/values.yaml.tmpl
@@ -0,0 +1,24 @@
+HostIP: ${HOST_IP}
+KubeConfig: ${KUBE_PATH}
+PackagesPath: ${PACKAGES_PATH}
+ProjectName: proj1
+RsyncPort: 30441
+GacPort: 30493
+OvnPort: 30473
+DtcPort: 30483
+ClusterProvider: provider1
+Cluster1: cluster1
+ClusterLabel: edge-cluster
+LogicalCloud: default
+Apps:
+- multus-cni
+- ovn4nfv
+- node-feature-discovery
+- sriov-network-operator
+- qat-device-plugin
+- cpu-manager
+CompositeApp: addons
+CompositeProfile: addons-profile
+DeploymentIntentGroup: addons-deployment-intent-group
+DeploymentIntent: addons-deployment-intent
+GenericPlacementIntent: addons-placement-intent
diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh
index b2ec52af..427850ab 100755
--- a/kud/hosting_providers/containerized/installer.sh
+++ b/kud/hosting_providers/containerized/installer.sh
@@ -22,7 +22,7 @@ function install_prerequisites {
find /etc/apt/sources.list.d -maxdepth 1 -name '*jonathonf*' -delete || true
apt-get update
apt-get install -y curl vim wget git \
- software-properties-common python-pip sudo
+ software-properties-common python-pip sudo gettext-base
add-apt-repository -y ppa:longsleep/golang-backports
apt-get update
apt-get install -y golang-go rsync
@@ -77,29 +77,44 @@ function install_kubespray {
fi
}
-# install_k8s() - Install Kubernetes using kubespray tool
+# install_k8s() - Install Kubernetes using kubespray tool including Kata
function install_k8s {
local cluster_name=$1
ansible-playbook $verbose -i \
$kud_inventory $kud_playbooks/preconfigure-kubespray.yml \
--become --become-user=root | \
tee $cluster_log/setup-kubernetes.log
- ansible-playbook $verbose -i \
- $kud_inventory $dest_folder/kubespray-$version/cluster.yml \
- -e cluster_name=$cluster_name --become --become-user=root | \
- tee $cluster_log/setup-kubernetes.log
+ if [ "$container_runtime" == "docker" ]; then
+ echo "Docker will be used as the container runtime interface"
+ ansible-playbook $verbose -i \
+ $kud_inventory $dest_folder/kubespray-$version/cluster.yml \
+ -e cluster_name=$cluster_name --become --become-user=root | \
+ tee $cluster_log/setup-kubernetes.log
+ elif [ "$container_runtime" == "containerd" ]; then
+ echo "Containerd will be used as the container runtime interface"
+ ansible-playbook $verbose -i \
+ $kud_inventory $dest_folder/kubespray-$version/cluster.yml \
+ -e $kud_kata_override_variables -e cluster_name=$cluster_name \
+ --become --become-user=root | \
+ tee $cluster_log/setup-kubernetes.log
+ #Install Kata Containers in containerd scenario
+ ansible-playbook $verbose -i \
+ $kud_inventory -e "base_dest=$HOME" \
+ $kud_playbooks/configure-kata.yml | \
+ tee $cluster_log/setup-kata.log
+ else
+ echo "Only Docker or Containerd are supported container runtimes"
+ exit 1
+ fi
# Configure environment
+ # Requires kubeconfig_localhost and kubectl_localhost to be true
+ # in inventory/group_vars/k8s-cluster.yml
mkdir -p $HOME/.kube
cp $kud_inventory_folder/artifacts/admin.conf $HOME/.kube/config
- # Copy Kubespray kubectl to be usable in host running Ansible.
- # Requires kubectl_localhost: true in inventory/group_vars/k8s-cluster.yml
if !(which kubectl); then
cp $kud_inventory_folder/artifacts/kubectl /usr/local/bin/
fi
-
- cp -rf $kud_inventory_folder/artifacts \
- /opt/kud/multi-cluster/$cluster_name/
}
# install_addons() - Install Kubenertes AddOns
@@ -118,21 +133,37 @@ function install_addons {
$kud_infra_folder/galaxy-requirements.yml --ignore-errors
ansible-playbook $verbose -i \
- $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | \
- tee $cluster_log/setup-kud.log
- # The order of KUD_ADDONS is important: some plugins (sriov, qat)
- # require nfd to be enabled.
- for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk $plugins_name}; do
+ $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml \
+ | tee $cluster_log/setup-kud.log
+
+ kud_addons="${KUD_ADDONS:-} ${plugins_name}"
+
+ for addon in ${kud_addons}; do
echo "Deploying $addon using configure-$addon.yml playbook.."
ansible-playbook $verbose -i \
- $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-${addon}.yml | \
+ $kud_inventory -e "base_dest=$HOME" \
+ $kud_playbooks/configure-${addon}.yml | \
tee $cluster_log/setup-${addon}.log
done
echo "Run the test cases if testing_enabled is set to true."
if [[ "${testing_enabled}" == "true" ]]; then
failed_kud_tests=""
- for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk $plugins_name}; do
+ # Run Kata test first if Kata was installed
+ if [ "$container_runtime" == "containerd" ]; then
+ #Install Kata webhook for test pods
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
+ $kud_playbooks/configure-kata-webhook.yml \
+ --become --become-user=root | \
+ sudo tee $cluster_log/setup-kata-webhook.log
+ kata_webhook_deployed=true
+ pushd $kud_tests
+ bash kata.sh || failed_kud_tests="${failed_kud_tests} kata"
+ popd
+ fi
+ #Run other plugin tests
+ for addon in ${kud_addons}; do
pushd $kud_tests
bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}"
case $addon in
@@ -144,21 +175,80 @@ function install_addons {
;;
"emco" )
echo "Test the emco plugin installation"
- for functional_test in plugin_fw_v2; do
- bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
- done
+ # TODO plugin_fw_v2 requires virtlet and a patched multus to succeed
+ # for functional_test in plugin_fw_v2; do
+ # bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
+ # done
;;
esac
popd
done
+ # Remove Kata webhook if user didn't want it permanently installed
+ if ! [ "$enable_kata_webhook" == "true" ] && [ "$kata_webhook_deployed" == "true" ]; then
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
+ $kud_playbooks/configure-kata-webhook-reset.yml \
+ --become --become-user=root | \
+ sudo tee $cluster_log/kata-webhook-reset.log
+ kata_webhook_deployed=false
+ fi
if [[ ! -z "$failed_kud_tests" ]]; then
echo "Test cases failed:${failed_kud_tests}"
return 1
fi
fi
+
+ # Check if Kata webhook should be installed and isn't already installed
+ if [ "$enable_kata_webhook" == "true" ] && ! [ "$kata_webhook_deployed" == "true" ]; then
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
+ $kud_playbooks/configure-kata-webhook.yml \
+ --become --become-user=root | \
+ sudo tee $cluster_log/setup-kata-webhook.log
+ fi
+
echo "Add-ons deployment complete..."
}
+function master_ip {
+ kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | awk -F '[:/]' '{print $4}'
+}
+
+# Copy installation artifacts to be usable in host running Ansible
+function install_host_artifacts {
+ local -r cluster_name=$1
+ local -r host_dir="/opt/kud/multi-cluster"
+ local -r host_addons_dir="${host_dir}/addons"
+ local -r host_artifacts_dir="${host_dir}/${cluster_name}/artifacts"
+
+ for addon in cpu-manager multus-cni node-feature-discovery ovn4nfv qat-device-plugin sriov-network sriov-network-operator; do
+ mkdir -p ${host_addons_dir}/${addon}/{helm,profile}
+ cp -r ${kud_infra_folder}/helm/${addon} ${host_addons_dir}/${addon}/helm
+ cp -r ${kud_infra_folder}/profiles/${addon}/* ${host_addons_dir}/${addon}/profile
+ tar -czf ${host_addons_dir}/${addon}.tar.gz -C ${host_addons_dir}/${addon}/helm .
+ tar -czf ${host_addons_dir}/${addon}_profile.tar.gz -C ${host_addons_dir}/${addon}/profile .
+ done
+
+ mkdir -p ${host_addons_dir}/tests
+ for test in _common _common_test _functions multus ovn4nfv nfd sriov-network qat cmk; do
+ cp ${kud_tests}/${test}.sh ${host_addons_dir}/tests
+ done
+
+ mkdir -p ${host_artifacts_dir}
+ cp -rf ${kud_inventory_folder}/artifacts/* ${host_artifacts_dir}
+
+ mkdir -p ${host_artifacts_dir}/addons
+ cp ${kud_infra_folder}/emco/examples/prerequisites.yaml ${host_artifacts_dir}/addons
+ cp ${kud_infra_folder}/emco/composite-app.yaml ${host_artifacts_dir}/addons
+ for template in addons/*.tmpl; do
+ CLUSTER_NAME="${cluster_name}" \
+ HOST_IP="$(master_ip)" \
+ KUBE_PATH="${host_artifacts_dir}/admin.conf" \
+ PACKAGES_PATH="${host_addons_dir}" \
+ envsubst <${template} >${host_artifacts_dir}/${template%.tmpl}
+ done
+}
+
# _print_kubernetes_info() - Prints the login Kubernetes information
function _print_kubernetes_info {
if ! $(kubectl version &>/dev/null); then
@@ -172,11 +262,8 @@ function _print_kubernetes_info {
KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" \
kubectl -n kube-system edit service kubernetes-dashboard
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}')
-
printf "Kubernetes Info\n===============\n" > $k8s_info_file
- echo "Dashboard URL: https:$master_ip:$node_port" >> $k8s_info_file
+ echo "Dashboard URL: https://$(master_ip):$node_port" >> $k8s_info_file
echo "Admin user: kube" >> $k8s_info_file
echo "Admin password: secret" >> $k8s_info_file
}
@@ -195,6 +282,15 @@ kud_playbooks=$kud_infra_folder/playbooks
kud_tests=$kud_folder/../../tests
k8s_info_file=$kud_folder/k8s_info.log
testing_enabled=${KUD_ENABLE_TESTS:-false}
+container_runtime=${CONTAINER_RUNTIME:-docker}
+enable_kata_webhook=${ENABLE_KATA_WEBHOOK:-false}
+kata_webhook_runtimeclass=${KATA_WEBHOOK_RUNTIMECLASS:-kata-qemu}
+kata_webhook_deployed=false
+# For containerd the etcd_deployment_type: docker is the default and doesn't work.
+# You have to use either etcd_kubeadm_enabled: true or etcd_deployment_type: host
+# See https://github.com/kubernetes-sigs/kubespray/issues/5713
+kud_kata_override_variables="container_manager=containerd \
+ -e etcd_deployment_type=host -e kubelet_cgroup_driver=cgroupfs"
mkdir -p /opt/csar
export CSAR_DIR=/opt/csar
@@ -219,6 +315,8 @@ function install_cluster {
fi
echo "installed the addons"
+ install_host_artifacts $1
+
_print_kubernetes_info
}
@@ -299,6 +397,7 @@ if [ "$1" == "--cluster" ]; then
exit 0
fi
+
echo "Error: Refer the installer usage"
usage
exit 1
diff --git a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
index 18a55035..7d0404a5 100644
--- a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
@@ -51,6 +51,7 @@ local_volume_provisioner_enabled: true
# Helm deployment
helm_enabled: true
+helm_stable_repo_url: "https://charts.helm.sh/stable"
# Kube-proxy proxyMode configuration.
# NOTE: Ipvs is based on netfilter hook function, but uses hash table as the underlying data structure and
@@ -86,10 +87,20 @@ podsecuritypolicy_enabled: true
# allowedCapabilities:
# - '*'
# by
+# allowedCapabilities:
+# - NET_ADMIN
+# - SYS_ADMIN
+# - SYS_NICE
+# - SYS_PTRACE
# requiredDropCapabilities:
# - NET_RAW
podsecuritypolicy_restricted_spec:
privileged: true
+ allowedCapabilities:
+ - NET_ADMIN
+ - SYS_ADMIN
+ - SYS_NICE
+ - SYS_PTRACE
allowPrivilegeEscalation: true
volumes:
- '*'