summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/inventory/hosts.yml9
-rw-r--r--ansible/rke.yml26
-rw-r--r--ansible/roles/rke/defaults/main.yml33
-rw-r--r--ansible/roles/rke/tasks/main.yml2
-rw-r--r--ansible/roles/rke/tasks/rke_config.yml46
-rw-r--r--ansible/roles/rke/tasks/rke_deploy.yml5
-rw-r--r--ansible/roles/rke/tasks/rke_node.yml11
-rw-r--r--ansible/roles/rke/templates/cluster.yml.j2145
8 files changed, 277 insertions, 0 deletions
diff --git a/ansible/inventory/hosts.yml b/ansible/inventory/hosts.yml
index a29072c5..37ae4e39 100644
--- a/ansible/inventory/hosts.yml
+++ b/ansible/inventory/hosts.yml
@@ -31,6 +31,15 @@ all:
#ip of the node that it uses for communication with k8s cluster.
cluster_ip: 10.8.8.19
+ # This is a group of hosts that are to be used as kubernetes control plane nodes.
+ # This means they host kubernetes api server, controller manager and scheduler.
+ # This example uses infra for this purpose, however note that any
+ # other host could be used including kubernetes nodes.
+ # cluster_ip needs to be set for hosts used as control planes.
+ kubernetes-control-plane:
+ hosts:
+ infrastructure-server
+
nfs-server:
hosts:
kubernetes-node-1
diff --git a/ansible/rke.yml b/ansible/rke.yml
new file mode 100644
index 00000000..81e964d9
--- /dev/null
+++ b/ansible/rke.yml
@@ -0,0 +1,26 @@
+---
+- name: Gather facts for all hosts
+ hosts: all
+
+- name: Configure kubernetes cluster (RKE)
+ hosts: infrastructure
+ roles:
+ - role: rke
+ vars:
+ mode: config
+
+- name: Prepare kubernetes nodes (RKE)
+ hosts:
+ - kubernetes
+ - kubernetes-control-plane
+ roles:
+ - role: rke
+ vars:
+ mode: node
+
+- name: Deploy kubernetes cluster (RKE)
+ hosts: infrastructure
+ roles:
+ - role: rke
+ vars:
+ mode: deploy
diff --git a/ansible/roles/rke/defaults/main.yml b/ansible/roles/rke/defaults/main.yml
new file mode 100644
index 00000000..3e1c26a6
--- /dev/null
+++ b/ansible/roles/rke/defaults/main.yml
@@ -0,0 +1,33 @@
+---
+rke_binary: rke
+rke_username: rke
+rke_bin_dir: /usr/local/bin
+cluster_config_dir: "{{ app_data_path }}/cluster"
+rke:
+ etcd: rancher/coreos-etcd:v3.2.24-rancher1
+ alpine: rancher/rke-tools:v0.1.27
+ nginx_proxy: rancher/rke-tools:v0.1.27
+ cert_downloader: rancher/rke-tools:v0.1.27
+ kubernetes_services_sidecar: rancher/rke-tools:v0.1.27
+ kubedns: rancher/k8s-dns-kube-dns:1.15.0
+ dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.0
+ kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.0
+ kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0
+ coredns: coredns/coredns:1.2.6
+ coredns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0
+ kubernetes: rancher/hyperkube:v1.13.5-rancher1
+ flannel: rancher/coreos-flannel:v0.10.0-rancher1
+ flannel_cni: rancher/flannel-cni:v0.3.0-rancher1
+ calico_node: rancher/calico-node:v3.4.0
+ calico_cni: rancher/calico-cni:v3.4.0
+ calico_controllers: ""
+ calico_ctl: rancher/calico-ctl:v2.0.0
+ canal_node: rancher/calico-node:v3.4.0
+ canal_cni: rancher/calico-cni:v3.4.0
+ canal_flannel: rancher/coreos-flannel:v0.10.0
+ weave_node: weaveworks/weave-kube:2.5.0
+ weave_cni: weaveworks/weave-npc:2.5.0
+ pod_infra_container: rancher/pause:3.1
+ ingress: rancher/nginx-ingress-controller:0.21.0-rancher3
+ ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1
+ metrics_server: rancher/metrics-server:v0.3.1
diff --git a/ansible/roles/rke/tasks/main.yml b/ansible/roles/rke/tasks/main.yml
new file mode 100644
index 00000000..2f832973
--- /dev/null
+++ b/ansible/roles/rke/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "rke_{{ mode }}.yml"
diff --git a/ansible/roles/rke/tasks/rke_config.yml b/ansible/roles/rke/tasks/rke_config.yml
new file mode 100644
index 00000000..49503192
--- /dev/null
+++ b/ansible/roles/rke/tasks/rke_config.yml
@@ -0,0 +1,46 @@
+---
+- name: "Ensure the .ssh directory exists"
+ file:
+ path: "{{ ansible_env.HOME }}/.ssh"
+ mode: 0700
+ state: directory
+
+- name: Add kubernetes nodes host keys to known_hosts file
+ known_hosts:
+ name: "{{ hostvars[item].cluster_ip }}"
+ key: "{{ hostvars[item].cluster_ip }} ssh-rsa {{ hostvars[item].ansible_ssh_host_key_rsa_public }}"
+ hash_host: true
+ state: present
+ loop: "{{ groups['kubernetes'] }}"
+
+- name: "Ensure {{ cluster_config_dir }} is present"
+ file:
+ path: "{{ cluster_config_dir }}"
+ state: directory
+ mode: 0755
+
+- name: Generate cluster wide ssh key pair
+ command: "ssh-keygen -q -b 4096 -t rsa -N '' -f {{ cluster_config_dir }}/cluster_key"
+ args:
+ creates: "{{ cluster_config_dir }}/cluster_key"
+
+- name: Get ssh public key
+ slurp:
+ src: "{{ cluster_config_dir }}/cluster_key.pub"
+ register: cluster_public_key_out
+
+- name: Decode ssh public key
+ set_fact:
+ cluster_public_key: "{{ cluster_public_key_out.content | b64decode }}"
+
+- name: Prepare rke cluster.yml
+ template:
+ src: cluster.yml.j2
+ dest: "{{ cluster_config_dir }}/cluster.yml"
+
+- name: Install rke cli tool
+ copy:
+ src: "{{ app_data_path }}/downloads/{{ rke_binary }}"
+ dest: "{{ rke_bin_dir }}/rke"
+ remote_src: true
+ mode: 0755
diff --git a/ansible/roles/rke/tasks/rke_deploy.yml b/ansible/roles/rke/tasks/rke_deploy.yml
new file mode 100644
index 00000000..7b3e2510
--- /dev/null
+++ b/ansible/roles/rke/tasks/rke_deploy.yml
@@ -0,0 +1,5 @@
+---
+- name: Run rke up
+ command: "{{ rke_bin_dir }}/rke up --config cluster.yml"
+ args:
+ chdir: "{{ cluster_config_dir }}"
diff --git a/ansible/roles/rke/tasks/rke_node.yml b/ansible/roles/rke/tasks/rke_node.yml
new file mode 100644
index 00000000..9ec9f073
--- /dev/null
+++ b/ansible/roles/rke/tasks/rke_node.yml
@@ -0,0 +1,11 @@
+---
+- name: Create a rke user on the node
+ user:
+ name: "{{ rke_username }}"
+ groups: docker
+ password_lock: yes
+
+- name: Distribute rke user ssh public key
+ authorized_key:
+ user: "{{ rke_username }}"
+ key: "{{ hostvars[groups['infrastructure'][0]].cluster_public_key }}"
diff --git a/ansible/roles/rke/templates/cluster.yml.j2 b/ansible/roles/rke/templates/cluster.yml.j2
new file mode 100644
index 00000000..d55a486c
--- /dev/null
+++ b/ansible/roles/rke/templates/cluster.yml.j2
@@ -0,0 +1,145 @@
+nodes:
+{# Note that we iterate through all nodes in relevant groups.
+We check which groups they belong to exactly later to determine roles. #}
+{% for node in groups['kubernetes'] | union(groups['kubernetes-control-plane']) %}
+- address: "{{ hostvars[node].cluster_ip }}"
+ port: "22"
+ internal_address: "{{ hostvars[node].cluster_ip }}"
+ role:
+{% if node in groups['kubernetes-control-plane'] %}
+ - controlplane
+{% endif %}
+{% if node in groups['kubernetes'] %}
+ - worker
+ - etcd
+{% endif %}
+ hostname_override: ""
+ user: "{{ rke_username }}"
+ docker_socket: /var/run/docker.sock
+ ssh_key: ""
+ ssh_key_path: "{{ cluster_config_dir }}/cluster_key"
+ ssh_cert: ""
+ ssh_cert_path: ""
+ labels: {}
+{% endfor %}
+services:
+ etcd:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+ external_urls: []
+ ca_cert: ""
+ cert: ""
+ key: ""
+ path: ""
+ snapshot: null
+ retention: ""
+ creation: ""
+ backup_config: null
+ kube-api:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+ service_cluster_ip_range: 10.43.0.0/16
+ service_node_port_range: ""
+ pod_security_policy: false
+ always_pull_images: false
+ kube-controller:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+ cluster_cidr: 10.42.0.0/16
+ service_cluster_ip_range: 10.43.0.0/16
+ scheduler:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+ kubelet:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+ cluster_domain: cluster.local
+ infra_container_image: ""
+ cluster_dns_server: 10.43.0.10
+ fail_swap_on: false
+ kubeproxy:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+network:
+ plugin: canal
+ options: {}
+authentication:
+ strategy: x509
+ sans: []
+ webhook: null
+addons: ""
+addons_include: []
+system_images:
+ etcd: "{{ rke.etcd }}"
+ alpine: "{{ rke.alpine }}"
+ nginx_proxy: "{{ rke.nginx_proxy }}"
+ cert_downloader: "{{ rke.cert_downloader }}"
+ kubernetes_services_sidecar: "{{ rke.kubernetes_services_sidecar }}"
+ kubedns: "{{ rke.kubedns }}"
+ dnsmasq: "{{ rke.dnsmasq }}"
+ kubedns_sidecar: "{{ rke.kubedns_sidecar }}"
+ kubedns_autoscaler: "{{ rke.kubedns_autoscaler }}"
+ coredns: "{{ rke.coredns }}"
+ coredns_autoscaler: "{{ rke.coredns_autoscaler }}"
+ kubernetes: "{{ rke.kubernetes }}"
+ flannel: "{{ rke.flannel }}"
+ flannel_cni: "{{ rke.flannel_cni }}"
+ calico_node: "{{ rke.calico_node }}"
+ calico_cni: "{{ rke.calico_cni }}"
+ calico_controllers: ""
+ calico_ctl: "{{ rke.calico_ctl }}"
+ canal_node: "{{ rke.canal_node }}"
+ canal_cni: "{{ rke.canal_cni }}"
+ canal_flannel: "{{ rke.canal_flannel }}"
+ weave_node: "{{ rke.weave_node }}"
+ weave_cni: "{{ rke.weave_cni }}"
+ pod_infra_container: "{{ rke.pod_infra_container }}"
+ ingress: "{{ rke.ingress }}"
+ ingress_backend: "{{ rke.ingress_backend }}"
+ metrics_server: "{{ rke.metrics_server }}"
+ssh_key_path: "{{ cluster_config_dir }}/cluster_key"
+ssh_cert_path: ""
+ssh_agent_auth: false
+authorization:
+ mode: none
+ options: {}
+ignore_docker_version: false
+kubernetes_version: ""
+private_registries: []
+ingress:
+ provider: ""
+ options: {}
+ node_selector: {}
+ extra_args: {}
+cluster_name: ""
+cloud_provider:
+ name: ""
+prefix_path: ""
+addon_job_timeout: 0
+bastion_host:
+ address: ""
+ port: ""
+ user: ""
+ ssh_key: ""
+ ssh_key_path: ""
+ ssh_cert: ""
+ ssh_cert_path: ""
+monitoring:
+ provider: ""
+ options: {}
+restore:
+ restore: false
+ snapshot_name: ""
+dns: null