aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rwxr-xr-xvagrant/installer.sh2
-rw-r--r--vagrant/inventory/group_vars/k8s-cluster.yml111
-rwxr-xr-xvagrant/node.sh34
-rw-r--r--vagrant/playbooks/krd-vars.yml2
5 files changed, 36 insertions, 114 deletions
diff --git a/.gitignore b/.gitignore
index a9a06af0..b32f4c13 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,6 +30,7 @@ src/k8splugin/plugins/**/*.so
# KRD
.vagrant/
vagrant/inventory/hosts.ini
+vagrant/inventory/artifacts
vagrant/inventory/group_vars/all.yml
vagrant/config/pdf.yml
vagrant/sources.list
diff --git a/vagrant/installer.sh b/vagrant/installer.sh
index dd5b77b6..65c649e1 100755
--- a/vagrant/installer.sh
+++ b/vagrant/installer.sh
@@ -140,7 +140,7 @@ function install_k8s {
# Configure environment
mkdir -p $HOME/.kube
- mv $HOME/admin.conf $HOME/.kube/config
+ mv $krd_inventory_folder/artifacts/admin.conf $HOME/.kube/config
}
# install_addons() - Install Kubenertes AddOns
diff --git a/vagrant/inventory/group_vars/k8s-cluster.yml b/vagrant/inventory/group_vars/k8s-cluster.yml
index cc3bba55..8a012a32 100644
--- a/vagrant/inventory/group_vars/k8s-cluster.yml
+++ b/vagrant/inventory/group_vars/k8s-cluster.yml
@@ -1,56 +1,21 @@
-# Valid bootstrap options (required): ubuntu, coreos, centos, none
-bootstrap_os: none
-
-#Directory where etcd data stored
-etcd_data_dir: /var/lib/etcd
-
-# Directory where the binaries will be installed
-bin_dir: /usr/local/bin
-
-### OTHER OPTIONAL VARIABLES
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
## modules.
kubelet_load_modules: true
-# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
-docker_dns_servers_strict: false
-
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location and namespace.
# Editing those values will almost surely break something.
-kube_config_dir: /etc/kubernetes
-kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
-kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
-# This is where all the cert scripts and certs will be located
-kube_cert_dir: "{{ kube_config_dir }}/ssl"
-
-# This is where all of the bearer tokens will be stored
-kube_token_dir: "{{ kube_config_dir }}/tokens"
-
-# This is where to save basic auth file
-kube_users_dir: "{{ kube_config_dir }}/users"
-
kube_api_anonymous_auth: true
-# Where the binaries will be downloaded.
-# Note: ensure that you've enough disk space (about 1G)
-local_release_dir: "/tmp/releases"
-# Random shifts for retrying failed ops like pushing/downloading
-retry_stagger: 5
-
-# This is the group that the cert creation scripts chgrp the
-# cert files to. Not really changable...
-kube_cert_group: kube-cert
-
# Users to create for basic auth in Kubernetes API via HTTP
# Optionally add groups for user
kube_api_pwd: "secret"
@@ -70,84 +35,14 @@ kube_token_auth: true
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: flannel
-# Enable kubernetes network policies
-enable_network_policy: false
-
-# Kubernetes internal network for services, unused block of space.
-kube_service_addresses: 10.233.0.0/18
-
-# internal network. When used, it will assign IP
-# addresses from this range to individual pods.
-# This network must be unused in your network infrastructure!
-kube_pods_subnet: 10.233.64.0/18
-
-# internal network node size allocation (optional). This is the size allocated
-# to each node on your network. With these defaults you should have
-# room for 4096 nodes with 254 pods per node.
-kube_network_node_prefix: 24
-
-# The port the API Server will be listening on.
-kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
-kube_apiserver_port: 6443 # (https)
-kube_apiserver_insecure_port: 8080 # (http)
-
-# DNS configuration.
-# Kubernetes cluster name, also will be used as DNS domain
-cluster_name: cluster.local
-# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
-ndots: 2
-# Can be dnsmasq_kubedns, kubedns or none
-dns_mode: kubedns
-# Can be docker_dns, host_resolvconf or none
-resolvconf_mode: docker_dns
-# Deploy netchecker app to verify DNS resolve as an HTTP service
-deploy_netchecker: false
-# Ip address of the kubernetes skydns service
-skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
-dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
-dns_domain: "{{ cluster_name }}"
-
-# Path used to store Docker data
-docker_daemon_graph: "/var/lib/docker"
-
-## A string of extra options to pass to the docker daemon.
-## This string should be exactly as you wish it to appear.
-## An obvious use case is allowing insecure-registry access
-## to self hosted registries like so:
-
-docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
-docker_bin_dir: "/usr/bin"
-
-# Settings for containerized control plane (etcd/kubelet/secrets)
-etcd_deployment_type: docker
+# Settings for containerized control plane
kubelet_deployment_type: host
-vault_deployment_type: docker
-helm_deployment_type: host
-
-# K8s image pull policy (imagePullPolicy)
-k8s_image_pull_policy: IfNotPresent
-
-# Kubernetes dashboard
-# RBAC required. see docs/getting-started.md for access details.
-dashboard_enabled: true
-
-# Monitoring apps for k8s
-efk_enabled: false
-
-# Helm deployment
-helm_enabled: false
-
-# Istio deployment
-istio_enabled: false
-# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
-persistent_volumes_enabled: false
+# NGINX Ingress Controller
+ingress_nginx_enabled: true
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
kubeconfig_localhost: true
-# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
-kubectl_localhost: false
-artifacts_dir: "{{ ansible_env.HOME }}"
# Enable MountPropagation gate feature
local_volumes_enabled: true
diff --git a/vagrant/node.sh b/vagrant/node.sh
index 5edc3a3c..3bf704b1 100755
--- a/vagrant/node.sh
+++ b/vagrant/node.sh
@@ -53,7 +53,33 @@ if [[ -n "${dict_volumes+x}" ]]; then
mount_external_partition ${kv%=*} ${kv#*=}
done
fi
-rmmod kvm-intel
-echo 'options kvm-intel nested=y' >> /etc/modprobe.d/dist.conf
-modprobe kvm-intel
-grep -q -i ^Y$ /sys/module/kvm_intel/parameters/nested || { echo "Nested virtualization is not enabled"; exit 1; }
+
+vendor_id=$(lscpu|grep "Vendor ID")
+if [[ $vendor_id == *GenuineIntel* ]]; then
+ kvm_ok=$(cat /sys/module/kvm_intel/parameters/nested)
+ if [[ $kvm_ok == 'N' ]]; then
+ echo "Enable Intel Nested-Virtualization"
+ rmmod kvm-intel
+ echo 'options kvm-intel nested=y' >> /etc/modprobe.d/dist.conf
+ modprobe kvm-intel
+ fi
+else
+ kvm_ok=$(cat /sys/module/kvm_amd/parameters/nested)
+ if [[ $kvm_ok == '0' ]]; then
+ echo "Enable AMD Nested-Virtualization"
+ rmmod kvm-amd
+ sh -c "echo 'options kvm-amd nested=1' >> /etc/modprobe.d/dist.conf"
+ modprobe kvm-amd
+ fi
+fi
+source /etc/os-release || source /usr/lib/os-release
+case ${ID,,} in
+ *suse)
+ ;;
+ ubuntu|debian)
+ apt-get install -y cpu-checker
+ kvm-ok
+ ;;
+ rhel|centos|fedora)
+ ;;
+esac
diff --git a/vagrant/playbooks/krd-vars.yml b/vagrant/playbooks/krd-vars.yml
index 89aa21db..b259e08d 100644
--- a/vagrant/playbooks/krd-vars.yml
+++ b/vagrant/playbooks/krd-vars.yml
@@ -47,4 +47,4 @@ nfd_version: 175305b1ad73be7301ac94add475cec6fef797a9
nfd_url: "https://github.com/kubernetes-incubator/node-feature-discovery"
go_version: 1.10.3
-kubespray_version: 2.6.0
+kubespray_version: 2.7.0