diff options
Diffstat (limited to 'vagrant')
-rwxr-xr-x | vagrant/installer.sh | 2 | ||||
-rw-r--r-- | vagrant/inventory/group_vars/k8s-cluster.yml | 111 | ||||
-rwxr-xr-x | vagrant/node.sh | 34 | ||||
-rw-r--r-- | vagrant/playbooks/krd-vars.yml | 4 | ||||
-rwxr-xr-x | vagrant/setup.sh | 6 |
5 files changed, 40 insertions, 117 deletions
diff --git a/vagrant/installer.sh b/vagrant/installer.sh index f3f11a77..30683cb0 100755 --- a/vagrant/installer.sh +++ b/vagrant/installer.sh @@ -140,7 +140,7 @@ function install_k8s { # Configure environment mkdir -p $HOME/.kube - mv $HOME/admin.conf $HOME/.kube/config + mv $krd_inventory_folder/artifacts/admin.conf $HOME/.kube/config } # install_addons() - Install Kubenertes AddOns diff --git a/vagrant/inventory/group_vars/k8s-cluster.yml b/vagrant/inventory/group_vars/k8s-cluster.yml index cc3bba55..8a012a32 100644 --- a/vagrant/inventory/group_vars/k8s-cluster.yml +++ b/vagrant/inventory/group_vars/k8s-cluster.yml @@ -1,56 +1,21 @@ -# Valid bootstrap options (required): ubuntu, coreos, centos, none -bootstrap_os: none - -#Directory where etcd data stored -etcd_data_dir: /var/lib/etcd - -# Directory where the binaries will be installed -bin_dir: /usr/local/bin - -### OTHER OPTIONAL VARIABLES ## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed ## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes ## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel ## modules. kubelet_load_modules: true -# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3. -docker_dns_servers_strict: false - # Kubernetes configuration dirs and system namespace. # Those are where all the additional config stuff goes # kubernetes normally puts in /srv/kubernetes. # This puts them in a sane location and namespace. # Editing those values will almost surely break something. -kube_config_dir: /etc/kubernetes -kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" -kube_manifest_dir: "{{ kube_config_dir }}/manifests" system_namespace: kube-system # Logging directory (sysvinit systems) kube_log_dir: "/var/log/kubernetes" -# This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/ssl" - -# This is where all of the bearer tokens will be stored -kube_token_dir: "{{ kube_config_dir }}/tokens" - -# This is where to save basic auth file -kube_users_dir: "{{ kube_config_dir }}/users" - kube_api_anonymous_auth: true -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/tmp/releases" -# Random shifts for retrying failed ops like pushing/downloading -retry_stagger: 5 - -# This is the group that the cert creation scripts chgrp the -# cert files to. Not really changable... -kube_cert_group: kube-cert - # Users to create for basic auth in Kubernetes API via HTTP # Optionally add groups for user kube_api_pwd: "secret" @@ -70,84 +35,14 @@ kube_token_auth: true # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing kube_network_plugin: flannel -# Enable kubernetes network policies -enable_network_policy: false - -# Kubernetes internal network for services, unused block of space. -kube_service_addresses: 10.233.0.0/18 - -# internal network. When used, it will assign IP -# addresses from this range to individual pods. -# This network must be unused in your network infrastructure! -kube_pods_subnet: 10.233.64.0/18 - -# internal network node size allocation (optional). This is the size allocated -# to each node on your network. With these defaults you should have -# room for 4096 nodes with 254 pods per node. -kube_network_node_prefix: 24 - -# The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 6443 # (https) -kube_apiserver_insecure_port: 8080 # (http) - -# DNS configuration. -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local -# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods -ndots: 2 -# Can be dnsmasq_kubedns, kubedns or none -dns_mode: kubedns -# Can be docker_dns, host_resolvconf or none -resolvconf_mode: docker_dns -# Deploy netchecker app to verify DNS resolve as an HTTP service -deploy_netchecker: false -# Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" -dns_domain: "{{ cluster_name }}" - -# Path used to store Docker data -docker_daemon_graph: "/var/lib/docker" - -## A string of extra options to pass to the docker daemon. -## This string should be exactly as you wish it to appear. -## An obvious use case is allowing insecure-registry access -## to self hosted registries like so: - -docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}" -docker_bin_dir: "/usr/bin" - -# Settings for containerized control plane (etcd/kubelet/secrets) -etcd_deployment_type: docker +# Settings for containerized control plane kubelet_deployment_type: host -vault_deployment_type: docker -helm_deployment_type: host - -# K8s image pull policy (imagePullPolicy) -k8s_image_pull_policy: IfNotPresent - -# Kubernetes dashboard -# RBAC required. see docs/getting-started.md for access details. -dashboard_enabled: true - -# Monitoring apps for k8s -efk_enabled: false - -# Helm deployment -helm_enabled: false - -# Istio deployment -istio_enabled: false -# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) -persistent_volumes_enabled: false +# NGINX Ingress Controller +ingress_nginx_enabled: true # Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts kubeconfig_localhost: true -# Download kubectl onto the host that runs Ansible in GITDIR/artifacts -kubectl_localhost: false -artifacts_dir: "{{ ansible_env.HOME }}" # Enable MountPropagation gate feature local_volumes_enabled: true diff --git a/vagrant/node.sh b/vagrant/node.sh index 5edc3a3c..3bf704b1 100755 --- a/vagrant/node.sh +++ b/vagrant/node.sh @@ -53,7 +53,33 @@ if [[ -n "${dict_volumes+x}" ]]; then mount_external_partition ${kv%=*} ${kv#*=} done fi -rmmod kvm-intel -echo 'options kvm-intel nested=y' >> /etc/modprobe.d/dist.conf -modprobe kvm-intel -grep -q -i ^Y$ /sys/module/kvm_intel/parameters/nested || { echo "Nested virtualization is not enabled"; exit 1; } + +vendor_id=$(lscpu|grep "Vendor ID") +if [[ $vendor_id == *GenuineIntel* ]]; then + kvm_ok=$(cat /sys/module/kvm_intel/parameters/nested) + if [[ $kvm_ok == 'N' ]]; then + echo "Enable Intel Nested-Virtualization" + rmmod kvm-intel + echo 'options kvm-intel nested=y' >> /etc/modprobe.d/dist.conf + modprobe kvm-intel + fi +else + kvm_ok=$(cat /sys/module/kvm_amd/parameters/nested) + if [[ $kvm_ok == '0' ]]; then + echo "Enable AMD Nested-Virtualization" + rmmod kvm-amd + sh -c "echo 'options kvm-amd nested=1' >> /etc/modprobe.d/dist.conf" + modprobe kvm-amd + fi +fi +source /etc/os-release || source /usr/lib/os-release +case ${ID,,} in + *suse) + ;; + ubuntu|debian) + apt-get install -y cpu-checker + kvm-ok + ;; + rhel|centos|fedora) + ;; +esac diff --git a/vagrant/playbooks/krd-vars.yml b/vagrant/playbooks/krd-vars.yml index 89aa21db..d5bec5b6 100644 --- a/vagrant/playbooks/krd-vars.yml +++ b/vagrant/playbooks/krd-vars.yml @@ -35,7 +35,7 @@ criproxy_url: "https://github.com/Mirantis/criproxy/releases/download/v{{ cripro #criproxy_url: "https://github.com/Mirantis/criproxy" virtlet_dest: "{{ base_dest }}/virtlet" virtlet_source_type: "binary" -virtlet_version: 1.3.0 +virtlet_version: 1.4.1 virtlet_url: "https://github.com/Mirantis/virtlet/releases/download/v{{ virtlet_version }}/virtletctl" #virtlet_source_type: "source" #virtlet_version: 68e11b8f1db2c78b063126899f0e60910700975d @@ -47,4 +47,4 @@ nfd_version: 175305b1ad73be7301ac94add475cec6fef797a9 nfd_url: "https://github.com/kubernetes-incubator/node-feature-discovery" go_version: 1.10.3 -kubespray_version: 2.6.0 +kubespray_version: 2.7.0 diff --git a/vagrant/setup.sh b/vagrant/setup.sh index 2c4ece68..85900fda 100755 --- a/vagrant/setup.sh +++ b/vagrant/setup.sh @@ -61,6 +61,7 @@ packages=() case ${ID,,} in *suse) INSTALLER_CMD="sudo -H -E zypper -q install -y --no-recommends" + packages+=(python-devel) # Vagrant installation if [[ "${enable_vagrant_install+x}" ]]; then @@ -95,6 +96,7 @@ case ${ID,,} in ubuntu|debian) libvirt_group="libvirtd" INSTALLER_CMD="sudo -H -E apt-get -y -q=3 install" + packages+=(python-dev) # Vagrant installation if [[ "${enable_vagrant_install+x}" ]]; then @@ -124,6 +126,7 @@ case ${ID,,} in PKG_MANAGER=$(which dnf || which yum) sudo $PKG_MANAGER updateinfo INSTALLER_CMD="sudo -H -E ${PKG_MANAGER} -q -y install" + packages+=(python-devel) # Vagrant installation if [[ "${enable_vagrant_install+x}" ]]; then @@ -150,13 +153,12 @@ case ${ID,,} in esac +${INSTALLER_CMD} ${packages[@]} if ! which pip; then curl -sL https://bootstrap.pypa.io/get-pip.py | sudo python fi sudo -H pip install --upgrade pip sudo -H pip install tox - -${INSTALLER_CMD} ${packages[@]} if [[ ${http_proxy+x} ]]; then vagrant plugin install vagrant-proxyconf fi |