diff options
Diffstat (limited to 'kud/hosting_providers')
9 files changed, 577 insertions, 10 deletions
diff --git a/kud/hosting_providers/containerized/README.md b/kud/hosting_providers/containerized/README.md new file mode 100644 index 00000000..12ce1a19 --- /dev/null +++ b/kud/hosting_providers/containerized/README.md @@ -0,0 +1,141 @@ +# Multi cluster installation + +## Introduction + +Multi Cluster installation is an important features for production deployments. + +Most of the project are using the Kubernetes as undercloud orchestration. So deploying multi cluster for the multi cloud region should be maintained by Kubernetes + +This section explains how to deploy the Multi cluster of Kubernetes from a containerized KUD running as a Kubernetes Job. + +## How it works + +KUD installation installer is divided into two regions with args - `--install-pkg` and `--cluster <cluster-name>` + +### Args +**--install-pkg** - Installs packages required to run installer script itself inside a container and kubespray packages + +**--cluster < cluster-name >** - Installs k8s cluster, addons and plugins and store the artifacts in the host machine + +### Internal Mechanism + +* Container image is build using the `installer --install-pkg` arg and Kubernetes job is used to install the cluster using `installer --cluster <cluster-name>`. Installer will invoke the kubespray cluster.yml, kud-addsons and plugins ansible cluster. + +Installer script finds the `hosts.init` for each cluster in `/opt/multi-cluster/<cluster-name>` + +Kubernetes jobs(a cluster per job) are used to install multiple clusters and logs of each cluster deployments are stored in the `/opt/kud/multi-cluster/<cluster-name>/logs` and artifacts are stored as follows `/opt/kud/multi-cluster/<cluster-name>/artifacts` + +## Quickstart Installation Guide + +Build the kud docker images as follows, add KUD_ENABLE_TESTS & KUD_PLUGIN_ENABLED for the testing only: + +``` +$ git clone https://github.com/onap/multicloud-k8s.git && cd multicloud-k8s +$ docker build --rm \ + --build-arg http_proxy=${http_proxy} \ + --build-arg HTTP_PROXY=${HTTP_PROXY} \ + --build-arg https_proxy=${https_proxy} \ + --build-arg HTTPS_PROXY=${HTTPS_PROXY} \ + --build-arg no_proxy=${no_proxy} \ + --build-arg NO_PROXY=${NO_PROXY} \ + --build-arg KUD_ENABLE_TESTS=true \ + --build-arg KUD_PLUGIN_ENABLED=true \ + -t github.com/onap/multicloud-k8s:latest . -f build/Dockerfile +``` +Let's create a cluster-101 and cluster-102 hosts.ini as follows + +``` +$ mkdir -p /opt/kud/multi-cluster/{cluster-101,cluster-102} +``` + +Create hosts.ini as follows in the direcotry cluster-101(c01 IP address 10.10.10.3) and cluster-102(c02 IP address 10.10.10.5) + +``` +/opt/kud/multi-cluster/cluster-101/hosts.ini +[all] +c01 ansible_ssh_host=10.10.10.5 ansible_ssh_port=22 + +[kube-master] +c01 + +[kube-node] +c01 + +[etcd] +c01 + +[ovn-central] +c01 + +[ovn-controller] +c01 + +[virtlet] +c01 + +[k8s-cluster:children] +kube-node +kube-master +``` +Do the same for the cluster-102 with c01 and IP address 10.10.10.5. + +Create the ssh secret for Baremetal or VM based on your deployment. and Launch the kubernetes job as follows +``` +$ kubectl create secret generic ssh-key-secret --from-file=id_rsa=/root/.ssh/id_rsa --from-file=id_rsa.pub=/root/.ssh/id_rsa.pub +$ CLUSTER_NAME=cluster-101 +$ cat <<EOF | kubectl create -f - +apiVersion: batch/v1 +kind: Job +metadata: + name: kud-$CLUSTER_NAME +spec: + template: + spec: + hostNetwork: true + containers: + - name: kud + image: github.com/onap/multicloud-k8s:latest + imagePullPolicy: IfNotPresent + volumeMounts: + - name: multi-cluster + mountPath: /opt/kud/multi-cluster + - name: secret-volume + mountPath: "/.ssh" + command: ["/bin/sh","-c"] + args: ["cp -r /.ssh /root/; chmod -R 600 /root/.ssh; ./installer --cluster $CLUSTER_NAME --plugins onap4k8s"] + securityContext: + privileged: true + volumes: + - name: multi-cluster + hostPath: + path: /opt/kud/multi-cluster + - name: secret-volume + secret: + secretName: ssh-key-secret + restartPolicy: Never + backoffLimit: 0 + +EOF +``` + +Multi - cluster information from the host machine; + +``` +$ kubectl --kubeconfig=/opt/kud/multi-cluster/cluster-101/artifacts/admin.conf cluster-info +Kubernetes master is running at https://192.168.121.2:6443 +coredns is running at https://192.168.121.2:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy +kubernetes-dashboard is running at https://192.168.121.2:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy + +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. +$ kubectl --kubeconfig=/opt/kud/multi-cluster/cluster-102/artifacts/admin.conf cluster-info +Kubernetes master is running at https://192.168.121.6:6443 +coredns is running at https://192.168.121.6:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy +kubernetes-dashboard is running at https://192.168.121.6:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy + +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. +``` + + +## License + +Apache-2.0 diff --git a/kud/hosting_providers/containerized/installer b/kud/hosting_providers/containerized/installer new file mode 120000 index 00000000..2b6cb163 --- /dev/null +++ b/kud/hosting_providers/containerized/installer @@ -0,0 +1 @@ +installer.sh
\ No newline at end of file diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh new file mode 100755 index 00000000..f1b95acb --- /dev/null +++ b/kud/hosting_providers/containerized/installer.sh @@ -0,0 +1,312 @@ +#!/bin/bash +#SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +set -o errexit +set -o nounset +set -o pipefail +set -ex + +INSTALLER_DIR="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")" + +function install_prerequisites { +#install package for docker images + apt-get update + apt-get install -y curl vim wget git \ + software-properties-common python-pip sudo + add-apt-repository -y ppa:longsleep/golang-backports + apt-get update + apt-get install -y golang-go rsync +} + +# _install_ansible() - Install and Configure Ansible program +function _install_ansible { + local version=$(grep "ansible_version" ${kud_playbooks}/kud-vars.yml | + awk -F ': ' '{print $2}') + mkdir -p /etc/ansible/ + pip install ansible==$version +} + +# install_k8s() - Install Kubernetes using kubespray tool +function install_kubespray { + echo "Deploying kubernetes" + version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \ + awk -F ': ' '{print $2}') + local_release_dir=$(grep "local_release_dir" \ + $kud_inventory_folder/group_vars/k8s-cluster.yml | \ + awk -F "\"" '{print $2}') + local tarball=v$version.tar.gz + # install make to run mitogen target & unzip is mitogen playbook dependency + apt-get install -y sshpass make unzip + _install_ansible + wget https://github.com/kubernetes-incubator/kubespray/archive/$tarball + tar -C $dest_folder -xzf $tarball + mv $dest_folder/kubespray-$version/ansible.cfg /etc/ansible/ansible.cfg + chown -R root:root $dest_folder/kubespray-$version + mkdir -p ${local_release_dir}/containers + rm $tarball + + pushd $dest_folder/kubespray-$version/ + pip install -r ./requirements.txt + make mitogen + popd + rm -f $kud_inventory_folder/group_vars/all.yml 2> /dev/null + if [[ -n "${verbose:-}" ]]; then + echo "kube_log_level: 5" | tee \ + $kud_inventory_folder/group_vars/all.yml + else + echo "kube_log_level: 2" | tee \ + $kud_inventory_folder/group_vars/all.yml + fi + echo "kubeadm_enabled: true" | \ + tee --append $kud_inventory_folder/group_vars/all.yml + if [[ -n "${http_proxy:-}" ]]; then + echo "http_proxy: \"$http_proxy\"" | tee --append \ + $kud_inventory_folder/group_vars/all.yml + fi + if [[ -n "${https_proxy:-}" ]]; then + echo "https_proxy: \"$https_proxy\"" | tee --append \ + $kud_inventory_folder/group_vars/all.yml + fi +} + +function install_k8s { + version=$(grep "kubespray_version" ${kud_playbooks}/kud-vars.yml | \ + awk -F ': ' '{print $2}') + local cluster_name=$1 + ansible-playbook $verbose -i \ + $kud_inventory $dest_folder/kubespray-$version/cluster.yml \ + -e cluster_name=$cluster_name --become --become-user=root | \ + tee $cluster_log/setup-kubernetes.log + + # Configure environment + mkdir -p $HOME/.kube + cp $kud_inventory_folder/artifacts/admin.conf $HOME/.kube/config + # Copy Kubespray kubectl to be usable in host running Ansible. + # Requires kubectl_localhost: true in inventory/group_vars/k8s-cluster.yml + if !(which kubectl); then + cp $kud_inventory_folder/artifacts/kubectl /usr/local/bin/ + fi + + cp -rf $kud_inventory_folder/artifacts \ + /opt/kud/multi-cluster/$cluster_name/ +} + +# install_addons() - Install Kubenertes AddOns +function install_addons { + if [ ${1:+1} ]; then + local plugins_name="$1" + echo "additional addons plugins $1" + else + local plugins_name="" + echo "no additional addons pluigns" + fi + + source /etc/environment + echo "Installing Kubernetes AddOns" + ansible-galaxy install $verbose -r \ + $kud_infra_folder/galaxy-requirements.yml --ignore-errors + + ansible-playbook $verbose -i \ + $kud_inventory $kud_playbooks/configure-kud.yml | \ + tee $cluster_log/setup-kud.log + for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov $plugins_name}; do + echo "Deploying $addon using configure-$addon.yml playbook.." + ansible-playbook $verbose -i \ + $kud_inventory $kud_playbooks/configure-${addon}.yml | \ + tee $cluster_log/setup-${addon}.log + done + + echo "Run the test cases if testing_enabled is set to true." + if [[ "${testing_enabled}" == "true" ]]; then + for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov $plugins_name}; do + pushd $kud_tests + bash ${addon}.sh + popd + done + fi + echo "Add-ons deployment complete..." +} + +# install_plugin() - Install ONAP Multicloud Kubernetes plugin +function install_plugin { + echo "Installing multicloud/k8s onap4k8s plugin" + if [[ "${testing_enabled}" == "true" ]]; then + pushd $kud_tests + echo "Test the onap4k8s installation" + bash onap4k8s.sh + echo "Test the onap4k8s plugin installation" + for functional_test in plugin_edgex plugin_fw; do + bash ${functional_test}.sh --external + done + popd + fi +} + +# install_controllers() - Install ONAP Multicloud Kubernetes controllers +function install_controllers { + echo "Installing multicloud/k8s onap4k8s controllers" + if [[ "${testing_enabled}" == "true" ]]; then + echo "Test controllers installation" + for controller_test in sdwan; do + pushd $kud_tests/$controller_test + ansible-playbook $verbose -i \ + $kud_inventory ${controller_test}.yml | \ + tee $cluster_log/test-${controller_test}.log + popd + done + fi +} + +# _print_kubernetes_info() - Prints the login Kubernetes information +function _print_kubernetes_info { + if ! $(kubectl version &>/dev/null); then + return + fi + + # Expose Dashboard using NodePort + node_port=30080 + KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" \ + kubectl -n kube-system edit service kubernetes-dashboard + KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" \ + kubectl -n kube-system edit service kubernetes-dashboard + + master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \ + awk -F ":" '{print $2}') + + printf "Kubernetes Info\n===============\n" > $k8s_info_file + echo "Dashboard URL: https:$master_ip:$node_port" >> $k8s_info_file + echo "Admin user: kube" >> $k8s_info_file + echo "Admin password: secret" >> $k8s_info_file +} + +verbose="" +if [[ -n "${KUD_DEBUG:-}" ]]; then + set -o xtrace + verbose="-vvv" +fi + +# Configuration values +dest_folder=/opt +kud_folder=${INSTALLER_DIR} +kud_infra_folder=$kud_folder/../../deployment_infra +kud_playbooks=$kud_infra_folder/playbooks +kud_tests=$kud_folder/../../tests +k8s_info_file=$kud_folder/k8s_info.log +testing_enabled=${KUD_ENABLE_TESTS:-false} + +mkdir -p /opt/csar +export CSAR_DIR=/opt/csar + +function install_pkg { +# Install dependencies + apt-get update + install_prerequisites + install_kubespray +} + +function install_cluster { + install_k8s $1 + if [ ${2:+1} ]; then + echo "install default addons and $2" + install_addons "$2" + else + install_addons + fi + + echo "installed the addons" + if ${KUD_PLUGIN_ENABLED:-false}; then + install_plugin + echo "installed the install_plugin" + install_controllers + echo "installed controllers" + fi + _print_kubernetes_info +} + +function usage { + echo "installer usage:" + echo "./installer.sh --install_pkg - Install the required softwarepackage" + echo "./installer.sh --cluster <cluster name> \ +- Install k8s cluster with default plugins" + echo "./installer.sh --cluster <cluster name> \ +--plugins <plugin_1 plugin_2> - Install k8s cluster with default plugins \ +and additional plugins such as onap4k8s." +} + +if [ $# -eq 0 ]; then + echo "Error: No arguments supplied" + usage + exit 1 +fi + +if [ -z "$1" ]; then + echo "Error: Null argument passed" + usage + exit 1 +fi + +if [ "$1" == "--install_pkg" ]; then + export kud_inventory_folder=$kud_folder/inventory + kud_inventory=$kud_inventory_folder/hosts.ini + install_pkg + echo "install pkg" + exit 0 +fi + +if [ "$1" == "--cluster" ]; then + if [ -z "${2-}" ]; then + echo "Error: Cluster name is null" + usage + exit 1 + fi + + cluster_name=$2 + kud_multi_cluster_path=/opt/kud/multi-cluster + cluster_path=$kud_multi_cluster_path/$cluster_name + echo $cluster_path + if [ ! -d "${cluster_path}" ]; then + echo "Error: cluster_path ${cluster_path} doesn't exit" + usage + exit 1 + fi + + cluster_log=$kud_multi_cluster_path/$cluster_name/log + export kud_inventory_folder=$kud_folder/inventory/$cluster_name + kud_inventory=$kud_inventory_folder/hosts.ini + + mkdir -p $kud_inventory_folder + mkdir -p $cluster_log + cp $kud_multi_cluster_path/$cluster_name/hosts.ini $kud_inventory_folder/ + cp -rf $kud_folder/inventory/group_vars $kud_inventory_folder/ + + if [ ${3:+1} ]; then + if [ "$3" == "--plugins" ]; then + if [ -z "${4-}" ]; then + echo "Error: plugins arguments is null; Refer the usage" + usage + exit 1 + fi + plugins_name=${@:4:$#} + install_cluster $cluster_name "$plugins_name" + exit 0 + else + echo "Error: cluster argument should have plugins; \ + Refer the usage" + usage + exit 1 + fi + fi + install_cluster $cluster_name + exit 0 +fi + +echo "Error: Refer the installer usage" +usage +exit 1 diff --git a/kud/hosting_providers/containerized/inventory/group_vars/all.yml b/kud/hosting_providers/containerized/inventory/group_vars/all.yml new file mode 100644 index 00000000..528430c1 --- /dev/null +++ b/kud/hosting_providers/containerized/inventory/group_vars/all.yml @@ -0,0 +1,2 @@ +kube_log_level: 2 +kubeadm_enabled: true diff --git a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml new file mode 100644 index 00000000..31d0d669 --- /dev/null +++ b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml @@ -0,0 +1,82 @@ +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +system_namespace: kube-system + +# Logging directory (sysvinit systems) +kube_log_dir: "/var/log/kubernetes" + +kube_api_anonymous_auth: true + +# Users to create for basic auth in Kubernetes API via HTTP +# Optionally add groups for user +kube_api_pwd: "secret" +kube_users: + kube: + pass: "{{kube_api_pwd}}" + role: admin + groups: + - system:masters + +## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) +#kube_oidc_auth: false +kube_basic_auth: true +kube_token_auth: true + +# Choose network plugin (calico, contiv, weave or flannel) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: flannel + +# Make a copy of kubeconfig (admin.conf) on the host that runs Ansible to inventory/artifacts +kubeconfig_localhost: true +# Copy kubectl binary on the host that runs Ansible to inventory/artifacts +kubectl_localhost: true + +# Enable MountPropagation gate feature +local_volumes_enabled: true +local_volume_provisioner_enabled: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.14.3 + +# Helm deployment +helm_enabled: true + +# Kube-proxy proxyMode configuration. +# NOTE: Ipvs is based on netfilter hook function, but uses hash table as the underlying data structure and +# works in the kernel space +# https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-ipvs +#kube_proxy_mode: ipvs + +# Download container images only once then push to cluster nodes in batches +download_run_once: false + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" + +# Makes the installer node a delegate for pushing images while running +# the deployment with ansible. This maybe the case if cluster nodes +# cannot access each over via ssh or you want to use local docker +# images as a cache for multiple clusters. +download_localhost: false + +# Subnet for cluster IPs +kube_service_addresses: 10.244.0.0/18 + +# Subnet for Pod IPs +kube_pods_subnet: 10.244.64.0/18 + +# disable localdns cache +enable_nodelocaldns: false diff --git a/kud/hosting_providers/vagrant/Vagrantfile b/kud/hosting_providers/vagrant/Vagrantfile index 2d1b5ab4..58251fe9 100644 --- a/kud/hosting_providers/vagrant/Vagrantfile +++ b/kud/hosting_providers/vagrant/Vagrantfile @@ -10,8 +10,8 @@ ############################################################################## box = { - :virtualbox => { :name => 'elastic/ubuntu-16.04-x86_64', :version => '20180708.0.0' }, - :libvirt => { :name => 'elastic/ubuntu-16.04-x86_64', :version=> '20180210.0.0'} + :virtualbox => { :name => 'elastic/ubuntu-18.04-x86_64', :version => '20191013.0.0' }, + :libvirt => { :name => 'peru/ubuntu-18.04-server-amd64'} } require 'yaml' diff --git a/kud/hosting_providers/vagrant/clean_sriov.sh b/kud/hosting_providers/vagrant/clean_sriov.sh new file mode 100644 index 00000000..76b8a960 --- /dev/null +++ b/kud/hosting_providers/vagrant/clean_sriov.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# SPDX-license-identifier: Apache-2.0 +############################################################################## +# Copyright (c) 2018 +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +modprobe -r iavf +kver=`uname -a | awk '{print $3}'` +rm -rf /lib/modules/$kver/updates/drivers/net/ethernet/intel/iavf/iavf.ko +depmod -a +sudo rm -rf /tmp/sriov +sudo rm -rf iavf-3.7.34.tar.gz diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh index 41b21f64..235736e1 100755 --- a/kud/hosting_providers/vagrant/installer.sh +++ b/kud/hosting_providers/vagrant/installer.sh @@ -21,6 +21,11 @@ function _install_go { version=$(grep "go_version" ${kud_playbooks}/kud-vars.yml | awk -F "'" '{print $2}') local tarball=go$version.linux-amd64.tar.gz + #gcc is required for go apps compilation + if ! which gcc; then + sudo apt-get install -y gcc + fi + if $(go version &>/dev/null); then return fi @@ -107,6 +112,7 @@ function install_k8s { local_release_dir=$(grep "local_release_dir" $kud_inventory_folder/group_vars/k8s-cluster.yml | awk -F "\"" '{print $2}') local tarball=v$version.tar.gz sudo apt-get install -y sshpass make unzip # install make to run mitogen target and unzip is mitogen playbook dependency + sudo apt-get install -y gnupg2 software-properties-common _install_docker _install_ansible wget https://github.com/kubernetes-incubator/kubespray/archive/$tarball @@ -148,17 +154,20 @@ function install_addons { echo "Installing Kubernetes AddOns" _install_ansible sudo ansible-galaxy install $verbose -r $kud_infra_folder/galaxy-requirements.yml --ignore-errors - ansible-playbook $verbose -i $kud_inventory $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log - for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd}; do + for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov}; do echo "Deploying $addon using configure-$addon.yml playbook.." ansible-playbook $verbose -i $kud_inventory $kud_playbooks/configure-${addon}.yml | sudo tee $log_folder/setup-${addon}.log - if [[ "${testing_enabled}" == "true" ]]; then + done + echo "Run the test cases if testing_enabled is set to true." + if [[ "${testing_enabled}" == "true" ]]; then + for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov}; do pushd $kud_tests bash ${addon}.sh popd - fi - done + done + fi + echo "Add-ons deployment complete..." } # install_plugin() - Install ONAP Multicloud Kubernetes plugin @@ -229,11 +238,9 @@ kud_playbooks=$kud_infra_folder/playbooks kud_tests=$kud_folder/../../tests k8s_info_file=$kud_folder/k8s_info.log testing_enabled=${KUD_ENABLE_TESTS:-false} - sudo mkdir -p $log_folder sudo mkdir -p /opt/csar sudo chown -R $USER /opt/csar - # Install dependencies # Setup proxy variables if [ -f $kud_folder/sources.list ]; then diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml index 14146742..fb744d0e 100644 --- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml +++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml @@ -42,7 +42,8 @@ kube_network_plugin: flannel kubeconfig_localhost: true # Copy kubectl binary on the host that runs Ansible to inventory/artifacts kubectl_localhost: true - +# Disable nodelocal dns cache +enable_nodelocaldns: false # Enable MountPropagation gate feature local_volumes_enabled: true local_volume_provisioner_enabled: true @@ -71,3 +72,8 @@ local_release_dir: "/tmp/releases" # cannot access each over via ssh or you want to use local docker # images as a cache for multiple clusters. download_localhost: true + +# Subnet for cluster IPs +kube_service_addresses: 10.244.0.0/18 +# Subnet for Pod IPs +kube_pods_subnet: 10.244.64.0/18 |