summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/.gitignore1
-rw-r--r--ansible/application.yml23
-rw-r--r--ansible/application/README.md58
-rw-r--r--ansible/infrastructure.yml71
-rw-r--r--ansible/rancher_kubernetes.yml31
-rw-r--r--ansible/roles/application-install/defaults/main.yml1
-rw-r--r--ansible/roles/application-install/tasks/install.yml34
-rw-r--r--ansible/roles/application-install/tasks/main.yml5
-rwxr-xr-xansible/run_playbook.sh132
-rw-r--r--ansible/setup.yml26
-rw-r--r--ansible/upload_resources.yml49
11 files changed, 431 insertions, 0 deletions
diff --git a/ansible/.gitignore b/ansible/.gitignore
new file mode 100644
index 00000000..5cddc2eb
--- /dev/null
+++ b/ansible/.gitignore
@@ -0,0 +1 @@
+ansible_chroot
diff --git a/ansible/application.yml b/ansible/application.yml
new file mode 100644
index 00000000..bbac7e5c
--- /dev/null
+++ b/ansible/application.yml
@@ -0,0 +1,23 @@
+---
+- name: Setup nfs server
+ hosts: nfs-server
+ roles:
+ - {role: nfs, when: groups.kubernetes | length > 1 }
+
+- name: Setup nfs mounts
+ hosts: kubernetes:!nfs-server
+ roles:
+ - {role: nfs, when: groups.kubernetes | length > 1 }
+
+- name: Install Helm application {{ app_name }} into offline Kubernetes cluster
+ hosts: infrastructure
+ roles:
+ - role: application-install
+ vars:
+ phase: pre-install
+ - role: application-install
+ vars:
+ phase: install
+ - role: application-install
+ vars:
+ phase: post-install
diff --git a/ansible/application/README.md b/ansible/application/README.md
new file mode 100644
index 00000000..342240be
--- /dev/null
+++ b/ansible/application/README.md
@@ -0,0 +1,58 @@
+# Application specific configuration
+
+This directory is **empty** on purpose in git. Content in this folder is
+placed on installer packaging time and can be modified by user on target
+server where installer package is installed.
+
+## Application configuration
+
+All application related configuration variables are defined in file
+`application_configuration.yml` in this folder. The name of configuration file
+does not matter but it must be given to ansible run as command line variable file.
+
+Example:
+```
+./run_playbook.sh application.yml -i application/hosts.yml -e @application/application_configuration.yml
+```
+
+## Application Helm charts
+
+Application helm charts must be available on infra node before application playbook is executed.
+That folder on infra node is specified within `app_helm_charts_infra_directory` variable.
+
+Helm charts folder name is configured on `application_configuration.yml` file
+with `app_helm_charts_directory` variable - it is the path on remote infrastructure server.
+
+Example:
+```
+app_helm_charts_directory: /opt/application/helm_charts
+```
+
+It is expected that helm charts are available from packaging script as a part of installer SW package.
+Such source directory of helm charts is specified by `app_helm_charts_install_directory` variable
+
+Example:
+```
+app_helm_charts_install_directory: ansible/application/helm_charts/kubernetes
+```
+
+## Application specific roles
+
+Installer supports optional custom pre and post install roles. Custom roles' code folders
+need to be placed to this directory and name of those folders are configured in
+application.yml with variable `application_pre_install_role` and `application_post_install_role`.
+
+Example:
+```
+application_pre_install_role: "{{ project_configuration }}-patch-role"
+```
+
+
+## Inventory hosts
+
+Ansible inventory file is least application specific but in practice example
+inventory file in git ansible/inventory/hosts.yml cannot be directly used anyway
+and at least ip addresses need to be changed according to target servers after
+installer installation and before starting installer execution.
+
+So it's better to place also hosts.yml to this application directory and edit it here.
diff --git a/ansible/infrastructure.yml b/ansible/infrastructure.yml
new file mode 100644
index 00000000..789f8716
--- /dev/null
+++ b/ansible/infrastructure.yml
@@ -0,0 +1,71 @@
+---
+- name: Perform common environment setup for nodes
+ hosts: infrastructure, kubernetes
+ tasks:
+ - name: Setup resolv.conf
+ lineinfile:
+ line: "nameserver {{ hostvars[groups.infrastructure[0]].ansible_host }}"
+ path: /etc/resolv.conf
+ state: present
+ insertbefore: BOF
+ become: yes
+ - name: Add application offline rpm repository
+ yum_repository:
+ name: "{{ app_name }}"
+ file: "{{ app_name | lower }}"
+ description: "{{ app_name }} offline repository"
+ baseurl: "{{ 'http://repo.infra-server' if 'infrastructure' not in group_names else 'file://' + app_data_path + '/pkg/rhel' }}"
+ gpgcheck: no
+ enabled: yes
+ when: deploy_rpm_repository
+ become: yes
+
+- name: Setup firewall
+ hosts: infrastructure, kubernetes
+ roles:
+ - role: firewall
+ vars:
+ state: disable
+
+- name: Setup infrastructure servers
+ hosts: infrastructure
+ roles:
+ - certificates
+ - docker
+ - dns
+ - vncserver
+ - role: nexus
+ vars:
+ phase: install
+ - nginx
+ tasks:
+ - name: "wait for nexus to come up"
+ uri:
+ url: "{{ nexus_url }}/service/metrics/healthcheck"
+ user: admin
+ password: admin123
+ force_basic_auth: yes
+ method: GET
+ register: nexus_wait
+ until: not nexus_wait.failed
+ retries: 30
+ delay: 10
+
+- name: Nexus changes in runtime
+ hosts: infrastructure
+ roles:
+ - role: nexus
+ vars:
+ phase: configure
+ when: populate_nexus | bool
+ - role: nexus
+ vars:
+ phase: runtime-populate
+ when: runtime_images is defined
+
+- name: Setup base for Kubernetes nodes
+ hosts: kubernetes
+ roles:
+ - docker
+ tasks:
+ - import_tasks: roles/certificates/tasks/upload_root_ca.yml
diff --git a/ansible/rancher_kubernetes.yml b/ansible/rancher_kubernetes.yml
new file mode 100644
index 00000000..196f1fc2
--- /dev/null
+++ b/ansible/rancher_kubernetes.yml
@@ -0,0 +1,31 @@
+---
+- name: Install binaries for controlling deployment
+ hosts: infrastructure
+ roles:
+ - kubectl
+ - helm
+
+- name: Deploy rancher server and create k8s env
+ hosts: infrastructure
+ roles:
+ - rancher
+ vars:
+ rancher_role: server
+
+- name: Deploy rancher agents
+ hosts: kubernetes
+ roles:
+ - rancher
+ vars:
+ rancher_role: agent
+
+- name: Wait for Kubernetes environment to be healthy
+ hosts: infrastructure
+ tasks:
+ - name: Check cluster health
+ uri:
+ url: "{{ rancher_server_url }}/v2-beta/projects/{{ k8s_env_id }}"
+ register: env_info
+ retries: 30
+ delay: 15
+ until: "env_info.json.healthState == 'healthy'"
diff --git a/ansible/roles/application-install/defaults/main.yml b/ansible/roles/application-install/defaults/main.yml
new file mode 100644
index 00000000..473fbb80
--- /dev/null
+++ b/ansible/roles/application-install/defaults/main.yml
@@ -0,0 +1 @@
+phase: install
diff --git a/ansible/roles/application-install/tasks/install.yml b/ansible/roles/application-install/tasks/install.yml
new file mode 100644
index 00000000..54b64439
--- /dev/null
+++ b/ansible/roles/application-install/tasks/install.yml
@@ -0,0 +1,34 @@
+---
+- name: Helm init and upgrade
+ command: |
+ {{ helm_bin_dir }}/helm init
+ --upgrade
+ --skip-refresh
+
+- name: Wait for helm
+ wait_for: timeout=10
+ delegate_to: localhost
+
+- name: Get all helm repos
+ command: "{{ helm_bin_dir }}/helm repo list"
+ register: repos
+
+- name: Remove stable repo
+ command: "{{ helm_bin_dir }}/helm repo remove stable"
+ when: "'stable' in repos.stdout"
+
+- name: Helm Serve
+ shell: "{{ helm_bin_dir }}/helm serve &"
+ async: 45
+ poll: 0
+
+- name: Helm Add Repo
+ command: "{{ helm_bin_dir }}/helm repo add {{ helm_repository_name }} {{ helm_repository_url }}"
+
+- name: Helm Make All
+ make:
+ chdir: "{{ app_helm_charts_directory }}"
+ target: all
+
+- name: Helm Install application {{ app_name }}
+ command: "helm install {{ helm_repository_name }}/{{ app_helm_chart_name }} --name {{ app_helm_release_name }} --namespace {{ app_kubernetes_namespace }}"
diff --git a/ansible/roles/application-install/tasks/main.yml b/ansible/roles/application-install/tasks/main.yml
new file mode 100644
index 00000000..3306d9e4
--- /dev/null
+++ b/ansible/roles/application-install/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+- debug:
+ msg: "phase is {{ phase }}"
+
+- include_tasks: "{{ phase }}.yml"
diff --git a/ansible/run_playbook.sh b/ansible/run_playbook.sh
new file mode 100755
index 00000000..88c86bc3
--- /dev/null
+++ b/ansible/run_playbook.sh
@@ -0,0 +1,132 @@
+#!/bin/sh
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2018 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+
+set -e
+
+script_path=$(readlink -f "$0")
+script_name=$(basename "$script_path")
+ANSIBLE_DIR=$(dirname "$script_path")
+ANSIBLE_CHROOT="${ANSIBLE_DIR}/ansible_chroot"
+
+
+#
+# functions
+#
+
+help()
+{
+ echo "
+NAME:
+ ${script_name} - wrapper for ansible-playbook command
+
+DESCRIPTION:
+ Run ansible playbook (or other command if it is there) inside a docker
+ container or a chroot environment.
+
+ By default the chroot is used because it has less dependencies and no
+ service needs to be run (provided that chroot command is installed).
+
+ Docker support is kept for compatibility reasons.
+
+ To run ansible docker image you must set environment variable:
+ ANSIBLE_DOCKER_IMAGE
+
+ So this wrapper can know by which name you have built the included
+ Dockerfile and also to trigger this different behaviour.
+
+ For example:
+ ANSIBLE_DOCKER_IMAGE=ansible
+
+USAGE:
+ ./${script_name}
+ This help
+
+ ./${script_name} <args>
+ Run ansible-playbook command inside a chroot
+
+ ANSIBLE_DOCKER_IMAGE=<docker-image> ./${script_name} <args>
+ Run ansible-playbook command inside a docker container
+
+REQUIREMENTS:
+ For the optimal usage your system should support overlay mount. Which
+ should be available on any recent kernel at least couple of years back.
+
+ Another requirement is the 'unshare' utility which is part of 'util-linux'
+ package and also is part of system for couple of years already.
+
+ The last is 'chroot' command itself and that is also part of system
+ basically everywhere.
+"
+}
+
+
+#
+# run playbook
+#
+
+# if no arg then print help and exit
+if [ -z "$1" ] ; then
+ help
+ exit 0
+fi
+
+# we must be root
+if [ "$(id -u)" -ne 0 ] ; then
+ echo ERROR: "I need root privileges and you are not root: $(id -nu)" >&2
+ exit 1
+fi
+
+# if env var is set then run in docker
+if [ -n "$ANSIBLE_DOCKER_IMAGE" ] ; then
+ exec docker run --rm \
+ -v "${HOME}"/.ssh:/root/.ssh:rw \
+ -v "$ANSIBLE_DIR:/ansible:ro" \
+ -v "$ANSIBLE_DIR/application:/ansible/application:rw" \
+ -v "$ANSIBLE_DIR/certs/:/certs:rw" \
+ -it "${ANSIBLE_DOCKER_IMAGE}" "$@"
+fi
+
+# if not already there then unpack chroot
+if ! [ -d "$ANSIBLE_CHROOT" ] ; then
+ if ! [ -f "$ANSIBLE_DIR"/docker/ansible_chroot.tgz ] ; then
+ echo ERROR: "Missing chroot archive: ${ANSIBLE_DIR}/ansible_chroot.tgz" >&2
+ exit 1
+ fi
+
+ echo INFO: "Unpacking chroot tar into: ${ANSIBLE_CHROOT}" >&2
+ if ! tar -C "$ANSIBLE_DIR" -xzf "$ANSIBLE_DIR"/docker/ansible_chroot.tgz ; then
+ echo ERROR: "Unpacking failed - ABORT" >&2
+ exit 1
+ fi
+fi
+
+# run chroot
+mkdir -p "$ANSIBLE_DIR"/application
+mkdir -p "$ANSIBLE_DIR"/certs
+"$ANSIBLE_DIR"/docker/run_chroot.sh \
+ --mount rw:"${HOME}/.ssh":/root/.ssh \
+ --mount ro:"$ANSIBLE_DIR":/ansible \
+ --mount rw:"$ANSIBLE_DIR"/application:/ansible/application \
+ --mount rw:"$ANSIBLE_DIR"/certs:/certs \
+ --workdir /ansible \
+ execute "$ANSIBLE_CHROOT" ansible-playbook "$@"
+
+exit 0
diff --git a/ansible/setup.yml b/ansible/setup.yml
new file mode 100644
index 00000000..ec572973
--- /dev/null
+++ b/ansible/setup.yml
@@ -0,0 +1,26 @@
+---
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: "Check and generate key if needed"
+ block:
+ - stat:
+ path: '{{ private_key }}.pub'
+ register: p
+
+ - command: ssh-keygen -f {{ private_key }} -t rsa -N ''
+ when: not p.stat.exists
+ vars:
+ private_key: /root/.ssh/offline_ssh_key
+
+- hosts: all
+ gather_facts: false
+ tasks:
+ - name: Setup authorized_keys file
+ authorized_key:
+ user: root
+ state: present
+ key: "{{ lookup('file', public_key) }}"
+ become: true
+ vars:
+ public_key: /root/.ssh/offline_ssh_key.pub
diff --git a/ansible/upload_resources.yml b/ansible/upload_resources.yml
new file mode 100644
index 00000000..68010eb1
--- /dev/null
+++ b/ansible/upload_resources.yml
@@ -0,0 +1,49 @@
+---
+- name: Check for presence of auxiliary resources tar file
+ hosts: resources[0]
+ tasks:
+ - name: Store auxiliary resources tar file info into variable
+ stat:
+ path: "{{ hostvars[groups.resources.0].resources_dir }}/{{ hostvars[groups.resources.0].aux_resources_filename }}"
+ register: aux_file_presence
+
+- name: Check infrastructure server for presence of resources and requirements
+ hosts: infrastructure
+ tasks:
+ - name: Check if nfs-utils is installed
+ yum:
+ list: nfs-utils
+ register: nfs_utils_check
+
+ - name: Check if the resources are already unpacked
+ stat:
+ path: "{{ app_data_path }}"
+ register: resources_data_check
+
+ - name: Check if the auxilliary resources are already unpacked
+ stat:
+ path: "{{ aux_data_path }}"
+ register: aux_resources_data_check
+ when: aux_data_path is defined and aux_data_path is not none
+
+- name: Ensure the existence of data directory/ies on infrastructure server
+ hosts: infrastructure
+ tasks:
+ - name: Create data directory
+ file:
+ path: "{{ app_data_path }}"
+ state: directory
+
+ - name: Create auxiliary data directory
+ file:
+ path: "{{ aux_data_path }}"
+ state: directory
+ when: aux_data_path is defined and aux_data_path is not none
+
+- name: Upload resources to infrastructure server
+ hosts: infrastructure
+ roles:
+ # use nfs or ssh and unpack resources into data directory/ies
+ - role: resource-data
+ vars:
+ transport: "{{ 'nfs' if resources_on_nfs and (nfs_utils_check.results|selectattr('yumstate', 'match', 'installed')|list|length != 0) else 'ssh' }}"