diff options
author | Michal Ptacek <m.ptacek@partner.samsung.com> | 2019-05-20 10:35:30 +0000 |
---|---|---|
committer | Gerrit Code Review <gerrit@onap.org> | 2019-05-20 10:35:30 +0000 |
commit | bf11d96ff6d9908bf04335a66d0e092a736d4561 (patch) | |
tree | baa9b4aba1f970f8fc083f44e562dfa1fa9505ff /ansible | |
parent | 8fd23141ffc7dd2f3c02b62e8fed1ff8364319b0 (diff) | |
parent | 119f760bc517ca037fbbc90bc741902c75c4696c (diff) |
Merge changes from topic "rke-support"
* changes:
Add setup for kubectl and helm
Add support for RKE kubernetes implementation
Diffstat (limited to 'ansible')
-rw-r--r-- | ansible/inventory/hosts.yml | 9 | ||||
-rw-r--r-- | ansible/rke.yml | 28 | ||||
-rw-r--r-- | ansible/roles/kubectl/defaults/main.yml | 3 | ||||
-rw-r--r-- | ansible/roles/kubectl/tasks/main.yml | 10 | ||||
-rw-r--r-- | ansible/roles/rancher/defaults/main.yml | 2 | ||||
-rw-r--r-- | ansible/roles/rancher/tasks/rancher_server.yml | 10 | ||||
-rw-r--r-- | ansible/roles/rancher/templates/kube_config.j2 (renamed from ansible/roles/kubectl/templates/kube_config.j2) | 0 | ||||
-rw-r--r-- | ansible/roles/rke/defaults/main.yml | 35 | ||||
-rw-r--r-- | ansible/roles/rke/tasks/main.yml | 2 | ||||
-rw-r--r-- | ansible/roles/rke/tasks/rke_config.yml | 46 | ||||
-rw-r--r-- | ansible/roles/rke/tasks/rke_deploy.yml | 17 | ||||
-rw-r--r-- | ansible/roles/rke/tasks/rke_node.yml | 11 | ||||
-rw-r--r-- | ansible/roles/rke/templates/cluster.yml.j2 | 145 |
13 files changed, 305 insertions, 13 deletions
diff --git a/ansible/inventory/hosts.yml b/ansible/inventory/hosts.yml index a29072c5..37ae4e39 100644 --- a/ansible/inventory/hosts.yml +++ b/ansible/inventory/hosts.yml @@ -31,6 +31,15 @@ all: #ip of the node that it uses for communication with k8s cluster. cluster_ip: 10.8.8.19 + # This is a group of hosts that are to be used as kubernetes control plane nodes. + # This means they host kubernetes api server, controller manager and scheduler. + # This example uses infra for this purpose, however note that any + # other host could be used including kubernetes nodes. + # cluster_ip needs to be set for hosts used as control planes. + kubernetes-control-plane: + hosts: + infrastructure-server + nfs-server: hosts: kubernetes-node-1 diff --git a/ansible/rke.yml b/ansible/rke.yml new file mode 100644 index 00000000..e0d6dcf1 --- /dev/null +++ b/ansible/rke.yml @@ -0,0 +1,28 @@ +--- +- name: Gather facts for all hosts + hosts: all + +- name: Configure kubernetes cluster (RKE) + hosts: infrastructure + roles: + - role: rke + vars: + mode: config + +- name: Prepare kubernetes nodes (RKE) + hosts: + - kubernetes + - kubernetes-control-plane + roles: + - role: rke + vars: + mode: node + +- name: Deploy kubernetes cluster (RKE) + hosts: infrastructure + roles: + - role: rke + vars: + mode: deploy + - kubectl + - helm diff --git a/ansible/roles/kubectl/defaults/main.yml b/ansible/roles/kubectl/defaults/main.yml index 78c15c75..b922fb58 100644 --- a/ansible/roles/kubectl/defaults/main.yml +++ b/ansible/roles/kubectl/defaults/main.yml @@ -1,5 +1,2 @@ --- kubectl_bin_dir: /usr/local/bin -kube_directory: ~/.kube -# Defaulting to rancher setup -kube_server: "https://{{ ansible_host }}:8080/r/projects/{{ k8s_env_id | mandatory }}/kubernetes:6443"
\ No newline at end of file diff --git a/ansible/roles/kubectl/tasks/main.yml b/ansible/roles/kubectl/tasks/main.yml index 9ecb5c44..7c77c3c5 100644 --- a/ansible/roles/kubectl/tasks/main.yml +++ b/ansible/roles/kubectl/tasks/main.yml @@ -5,13 +5,3 @@ dest: "{{ kubectl_bin_dir }}/kubectl" remote_src: true mode: 0755 - -- name: Ensure .kube directory exists - file: - path: "{{ kube_directory }}" - state: directory - -- name: Create kube config - template: - src: kube_config.j2 - dest: "{{ kube_directory }}/config" diff --git a/ansible/roles/rancher/defaults/main.yml b/ansible/roles/rancher/defaults/main.yml index 6d354e6e..e4e12d23 100644 --- a/ansible/roles/rancher/defaults/main.yml +++ b/ansible/roles/rancher/defaults/main.yml @@ -4,6 +4,8 @@ rancher_remove_other_env: true rancher_redeploy_k8s_env: true rancher_cluster_health_state: healthy rancher_cluster_health_check_retries: 30 +kube_directory: ~/.kube +kube_server: "https://{{ ansible_host }}:8080/r/projects/{{ k8s_env_id | mandatory }}/kubernetes:6443" rancher: # The following variables can be set via the UI under advanced/settings. # All of these affect tables in the cattle db and are uninteresting diff --git a/ansible/roles/rancher/tasks/rancher_server.yml b/ansible/roles/rancher/tasks/rancher_server.yml index a0893b0b..f467ff3f 100644 --- a/ansible/roles/rancher/tasks/rancher_server.yml +++ b/ansible/roles/rancher/tasks/rancher_server.yml @@ -93,3 +93,13 @@ data: option: audit_log.purge.after.seconds value: "{{ rancher.audit_log_purge_after_seconds }}" + +- name: Ensure .kube directory exists + file: + path: "{{ kube_directory }}" + state: directory + +- name: Create kube config + template: + src: kube_config.j2 + dest: "{{ kube_directory }}/config" diff --git a/ansible/roles/kubectl/templates/kube_config.j2 b/ansible/roles/rancher/templates/kube_config.j2 index 586c59d4..586c59d4 100644 --- a/ansible/roles/kubectl/templates/kube_config.j2 +++ b/ansible/roles/rancher/templates/kube_config.j2 diff --git a/ansible/roles/rke/defaults/main.yml b/ansible/roles/rke/defaults/main.yml new file mode 100644 index 00000000..cbf03b74 --- /dev/null +++ b/ansible/roles/rke/defaults/main.yml @@ -0,0 +1,35 @@ +--- +rke_binary: rke +rke_username: rke +rke_bin_dir: /usr/local/bin +kube_config_dir: "{{ ansible_env.HOME }}/.kube" +cluster_config_dir: "{{ app_data_path }}/cluster" +rke: + # rke (rancher) images + etcd: rancher/coreos-etcd:v3.2.24-rancher1 + alpine: rancher/rke-tools:v0.1.27 + nginx_proxy: rancher/rke-tools:v0.1.27 + cert_downloader: rancher/rke-tools:v0.1.27 + kubernetes_services_sidecar: rancher/rke-tools:v0.1.27 + kubedns: rancher/k8s-dns-kube-dns:1.15.0 + dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.0 + kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.0 + kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0 + coredns: coredns/coredns:1.2.6 + coredns_autoscaler: rancher/cluster-proportional-autoscaler:1.0.0 + kubernetes: rancher/hyperkube:v1.13.5-rancher1 + flannel: rancher/coreos-flannel:v0.10.0-rancher1 + flannel_cni: rancher/flannel-cni:v0.3.0-rancher1 + calico_node: rancher/calico-node:v3.4.0 + calico_cni: rancher/calico-cni:v3.4.0 + calico_controllers: "" + calico_ctl: rancher/calico-ctl:v2.0.0 + canal_node: rancher/calico-node:v3.4.0 + canal_cni: rancher/calico-cni:v3.4.0 + canal_flannel: rancher/coreos-flannel:v0.10.0 + weave_node: weaveworks/weave-kube:2.5.0 + weave_cni: weaveworks/weave-npc:2.5.0 + pod_infra_container: rancher/pause:3.1 + ingress: rancher/nginx-ingress-controller:0.21.0-rancher3 + ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4-rancher1 + metrics_server: rancher/metrics-server:v0.3.1 diff --git a/ansible/roles/rke/tasks/main.yml b/ansible/roles/rke/tasks/main.yml new file mode 100644 index 00000000..2f832973 --- /dev/null +++ b/ansible/roles/rke/tasks/main.yml @@ -0,0 +1,2 @@ +--- +- include_tasks: "rke_{{ mode }}.yml" diff --git a/ansible/roles/rke/tasks/rke_config.yml b/ansible/roles/rke/tasks/rke_config.yml new file mode 100644 index 00000000..49503192 --- /dev/null +++ b/ansible/roles/rke/tasks/rke_config.yml @@ -0,0 +1,46 @@ +--- +- name: "Ensure the .ssh directory exists" + file: + path: "{{ ansible_env.HOME }}/.ssh" + mode: 0700 + state: directory + +- name: Add kubernetes nodes host keys to known_hosts file + known_hosts: + name: "{{ hostvars[item].cluster_ip }}" + key: "{{ hostvars[item].cluster_ip }} ssh-rsa {{ hostvars[item].ansible_ssh_host_key_rsa_public }}" + hash_host: true + state: present + loop: "{{ groups['kubernetes'] }}" + +- name: "Ensure {{ cluster_config_dir }} is present" + file: + path: "{{ cluster_config_dir }}" + state: directory + mode: 0755 + +- name: Generate cluster wide ssh key pair + command: "ssh-keygen -q -b 4096 -t rsa -N '' -f {{ cluster_config_dir }}/cluster_key" + args: + creates: "{{ cluster_config_dir }}/cluster_key" + +- name: Get ssh public key + slurp: + src: "{{ cluster_config_dir }}/cluster_key.pub" + register: cluster_public_key_out + +- name: Decode ssh public key + set_fact: + cluster_public_key: "{{ cluster_public_key_out.content | b64decode }}" + +- name: Prepare rke cluster.yml + template: + src: cluster.yml.j2 + dest: "{{ cluster_config_dir }}/cluster.yml" + +- name: Install rke cli tool + copy: + src: "{{ app_data_path }}/downloads/{{ rke_binary }}" + dest: "{{ rke_bin_dir }}/rke" + remote_src: true + mode: 0755 diff --git a/ansible/roles/rke/tasks/rke_deploy.yml b/ansible/roles/rke/tasks/rke_deploy.yml new file mode 100644 index 00000000..9983d08a --- /dev/null +++ b/ansible/roles/rke/tasks/rke_deploy.yml @@ -0,0 +1,17 @@ +--- +- name: Run rke up + command: "{{ rke_bin_dir }}/rke up --config cluster.yml" + args: + chdir: "{{ cluster_config_dir }}" + +- name: Ensure .kube directory is present + file: + path: "{{ kube_config_dir }}" + state: directory + +- name: Setup kubeconfig + copy: + src: "{{ cluster_config_dir }}/kube_config_cluster.yml" + dest: "{{ kube_config_dir }}/config" + remote_src: true + mode: 0755 diff --git a/ansible/roles/rke/tasks/rke_node.yml b/ansible/roles/rke/tasks/rke_node.yml new file mode 100644 index 00000000..9ec9f073 --- /dev/null +++ b/ansible/roles/rke/tasks/rke_node.yml @@ -0,0 +1,11 @@ +--- +- name: Create a rke user on the node + user: + name: "{{ rke_username }}" + groups: docker + password_lock: yes + +- name: Distribute rke user ssh public key + authorized_key: + user: "{{ rke_username }}" + key: "{{ hostvars[groups['infrastructure'][0]].cluster_public_key }}" diff --git a/ansible/roles/rke/templates/cluster.yml.j2 b/ansible/roles/rke/templates/cluster.yml.j2 new file mode 100644 index 00000000..d55a486c --- /dev/null +++ b/ansible/roles/rke/templates/cluster.yml.j2 @@ -0,0 +1,145 @@ +nodes: +{# Note that we iterate through all nodes in relevant groups. +We check which groups they belong to exactly later to determine roles. #} +{% for node in groups['kubernetes'] | union(groups['kubernetes-control-plane']) %} +- address: "{{ hostvars[node].cluster_ip }}" + port: "22" + internal_address: "{{ hostvars[node].cluster_ip }}" + role: +{% if node in groups['kubernetes-control-plane'] %} + - controlplane +{% endif %} +{% if node in groups['kubernetes'] %} + - worker + - etcd +{% endif %} + hostname_override: "" + user: "{{ rke_username }}" + docker_socket: /var/run/docker.sock + ssh_key: "" + ssh_key_path: "{{ cluster_config_dir }}/cluster_key" + ssh_cert: "" + ssh_cert_path: "" + labels: {} +{% endfor %} +services: + etcd: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + external_urls: [] + ca_cert: "" + cert: "" + key: "" + path: "" + snapshot: null + retention: "" + creation: "" + backup_config: null + kube-api: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + service_cluster_ip_range: 10.43.0.0/16 + service_node_port_range: "" + pod_security_policy: false + always_pull_images: false + kube-controller: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + cluster_cidr: 10.42.0.0/16 + service_cluster_ip_range: 10.43.0.0/16 + scheduler: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + kubelet: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] + cluster_domain: cluster.local + infra_container_image: "" + cluster_dns_server: 10.43.0.10 + fail_swap_on: false + kubeproxy: + image: "" + extra_args: {} + extra_binds: [] + extra_env: [] +network: + plugin: canal + options: {} +authentication: + strategy: x509 + sans: [] + webhook: null +addons: "" +addons_include: [] +system_images: + etcd: "{{ rke.etcd }}" + alpine: "{{ rke.alpine }}" + nginx_proxy: "{{ rke.nginx_proxy }}" + cert_downloader: "{{ rke.cert_downloader }}" + kubernetes_services_sidecar: "{{ rke.kubernetes_services_sidecar }}" + kubedns: "{{ rke.kubedns }}" + dnsmasq: "{{ rke.dnsmasq }}" + kubedns_sidecar: "{{ rke.kubedns_sidecar }}" + kubedns_autoscaler: "{{ rke.kubedns_autoscaler }}" + coredns: "{{ rke.coredns }}" + coredns_autoscaler: "{{ rke.coredns_autoscaler }}" + kubernetes: "{{ rke.kubernetes }}" + flannel: "{{ rke.flannel }}" + flannel_cni: "{{ rke.flannel_cni }}" + calico_node: "{{ rke.calico_node }}" + calico_cni: "{{ rke.calico_cni }}" + calico_controllers: "" + calico_ctl: "{{ rke.calico_ctl }}" + canal_node: "{{ rke.canal_node }}" + canal_cni: "{{ rke.canal_cni }}" + canal_flannel: "{{ rke.canal_flannel }}" + weave_node: "{{ rke.weave_node }}" + weave_cni: "{{ rke.weave_cni }}" + pod_infra_container: "{{ rke.pod_infra_container }}" + ingress: "{{ rke.ingress }}" + ingress_backend: "{{ rke.ingress_backend }}" + metrics_server: "{{ rke.metrics_server }}" +ssh_key_path: "{{ cluster_config_dir }}/cluster_key" +ssh_cert_path: "" +ssh_agent_auth: false +authorization: + mode: none + options: {} +ignore_docker_version: false +kubernetes_version: "" +private_registries: [] +ingress: + provider: "" + options: {} + node_selector: {} + extra_args: {} +cluster_name: "" +cloud_provider: + name: "" +prefix_path: "" +addon_job_timeout: 0 +bastion_host: + address: "" + port: "" + user: "" + ssh_key: "" + ssh_key_path: "" + ssh_cert: "" + ssh_cert_path: "" +monitoring: + provider: "" + options: {} +restore: + restore: false + snapshot_name: "" +dns: null |