summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/.gitignore1
-rw-r--r--ansible/ansible.cfg8
-rw-r--r--ansible/application.yml23
-rw-r--r--ansible/application/README.md58
-rw-r--r--ansible/docker/.gitignore3
-rw-r--r--ansible/docker/Dockerfile34
-rwxr-xr-xansible/docker/build_ansible_image.sh52
-rwxr-xr-xansible/docker/create_docker_chroot.sh220
-rwxr-xr-xansible/docker/run_chroot.sh465
-rwxr-xr-xansible/group_vars/all.yml129
-rwxr-xr-xansible/group_vars/infrastructure.yml36
-rw-r--r--ansible/infrastructure.yml71
-rw-r--r--ansible/inventory/hosts.yml32
-rw-r--r--ansible/library/json_add.py90
-rw-r--r--ansible/library/rancher_k8s_environment.py341
-rw-r--r--ansible/rancher_kubernetes.yml31
-rw-r--r--ansible/roles/application-install/defaults/main.yml1
-rw-r--r--ansible/roles/application-install/tasks/install.yml34
-rw-r--r--ansible/roles/application-install/tasks/main.yml5
-rw-r--r--ansible/roles/nexus/defaults/main.yml2
-rw-r--r--ansible/roles/nexus/files/configure.groovy37
-rw-r--r--ansible/roles/nexus/tasks/configure.yml34
-rw-r--r--ansible/roles/nexus/tasks/insert-images.yml19
-rw-r--r--ansible/roles/nexus/tasks/install.yml29
-rw-r--r--ansible/roles/nexus/tasks/main.yml2
-rw-r--r--ansible/roles/nexus/tasks/runtime-populate.yml12
-rw-r--r--ansible/roles/nexus/vars/main.yml1
-rw-r--r--ansible/roles/nginx/tasks/main.yml37
-rw-r--r--ansible/roles/nginx/templates/nginx.conf.j2105
-rw-r--r--ansible/roles/rancher/tasks/main.yml2
-rw-r--r--ansible/roles/rancher/tasks/rancher_agent.yml13
-rw-r--r--ansible/roles/rancher/tasks/rancher_server.yml51
-rw-r--r--ansible/roles/rancher/templates/kube_config.j219
-rw-r--r--ansible/roles/resource-data/tasks/main.yml2
-rw-r--r--ansible/roles/resource-data/tasks/nfs-upload.yml52
-rw-r--r--ansible/roles/resource-data/tasks/ssh-upload.yml59
-rw-r--r--ansible/roles/vncserver/tasks/main.yml19
-rwxr-xr-xansible/run_playbook.sh132
-rw-r--r--ansible/setup.yml26
-rw-r--r--ansible/upload_resources.yml49
40 files changed, 2336 insertions, 0 deletions
diff --git a/ansible/.gitignore b/ansible/.gitignore
new file mode 100644
index 00000000..5cddc2eb
--- /dev/null
+++ b/ansible/.gitignore
@@ -0,0 +1 @@
+ansible_chroot
diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg
new file mode 100644
index 00000000..c2ee3ff3
--- /dev/null
+++ b/ansible/ansible.cfg
@@ -0,0 +1,8 @@
+[defaults]
+
+# Define any custom roles used by applications installed by installer
+# this parameter is telling ansible what additional folder it should
+# browse while looking up for roles code
+# relative path ./application is mapped into ansible container under
+# /ansible/application where application roles should be found
+roles_path = /ansible/application
diff --git a/ansible/application.yml b/ansible/application.yml
new file mode 100644
index 00000000..bbac7e5c
--- /dev/null
+++ b/ansible/application.yml
@@ -0,0 +1,23 @@
+---
+- name: Setup nfs server
+ hosts: nfs-server
+ roles:
+ - {role: nfs, when: groups.kubernetes | length > 1 }
+
+- name: Setup nfs mounts
+ hosts: kubernetes:!nfs-server
+ roles:
+ - {role: nfs, when: groups.kubernetes | length > 1 }
+
+- name: Install Helm application {{ app_name }} into offline Kubernetes cluster
+ hosts: infrastructure
+ roles:
+ - role: application-install
+ vars:
+ phase: pre-install
+ - role: application-install
+ vars:
+ phase: install
+ - role: application-install
+ vars:
+ phase: post-install
diff --git a/ansible/application/README.md b/ansible/application/README.md
new file mode 100644
index 00000000..342240be
--- /dev/null
+++ b/ansible/application/README.md
@@ -0,0 +1,58 @@
+# Application specific configuration
+
+This directory is **empty** on purpose in git. Content in this folder is
+placed on installer packaging time and can be modified by user on target
+server where installer package is installed.
+
+## Application configuration
+
+All application related configuration variables are defined in file
+`application_configuration.yml` in this folder. The name of configuration file
+does not matter but it must be given to ansible run as command line variable file.
+
+Example:
+```
+./run_playbook.sh application.yml -i application/hosts.yml -e @application/application_configuration.yml
+```
+
+## Application Helm charts
+
+Application helm charts must be available on infra node before application playbook is executed.
+That folder on infra node is specified within `app_helm_charts_infra_directory` variable.
+
+Helm charts folder name is configured on `application_configuration.yml` file
+with `app_helm_charts_directory` variable - it is the path on remote infrastructure server.
+
+Example:
+```
+app_helm_charts_directory: /opt/application/helm_charts
+```
+
+It is expected that helm charts are available from packaging script as a part of installer SW package.
+Such source directory of helm charts is specified by `app_helm_charts_install_directory` variable
+
+Example:
+```
+app_helm_charts_install_directory: ansible/application/helm_charts/kubernetes
+```
+
+## Application specific roles
+
+Installer supports optional custom pre and post install roles. Custom roles' code folders
+need to be placed to this directory and name of those folders are configured in
+application.yml with variable `application_pre_install_role` and `application_post_install_role`.
+
+Example:
+```
+application_pre_install_role: "{{ project_configuration }}-patch-role"
+```
+
+
+## Inventory hosts
+
+Ansible inventory file is least application specific but in practice example
+inventory file in git ansible/inventory/hosts.yml cannot be directly used anyway
+and at least ip addresses need to be changed according to target servers after
+installer installation and before starting installer execution.
+
+So it's better to place also hosts.yml to this application directory and edit it here.
diff --git a/ansible/docker/.gitignore b/ansible/docker/.gitignore
new file mode 100644
index 00000000..7df2d855
--- /dev/null
+++ b/ansible/docker/.gitignore
@@ -0,0 +1,3 @@
+ansible_docker.tar
+ansible_chroot.tgz
+ansible_chroot
diff --git a/ansible/docker/Dockerfile b/ansible/docker/Dockerfile
new file mode 100644
index 00000000..b0172709
--- /dev/null
+++ b/ansible/docker/Dockerfile
@@ -0,0 +1,34 @@
+FROM alpine:3.8
+
+ARG ansible_version=2.6.3
+LABEL ansible_version=$ansible_version vendor=Samsung
+
+# Install Ansible build dependencies
+RUN apk --no-cache update \
+&& apk --no-cache --update add --virtual build-dependencies \
+ gcc \
+ make \
+ musl-dev \
+ libffi-dev \
+ openssl-dev \
+ python3-dev \
+&& apk add --no-cache \
+ python3 \
+ py3-pip \
+ openssh-client \
+ openssl \
+ py3-openssl \
+ openssh \
+ sshpass \
+&& pip3 install --no-cache-dir --upgrade pip \
+&& pip3 install --no-cache-dir \
+ ansible==$ansible_version \
+ jmespath \
+&& apk del build-dependencies && rm -rf /var/cache/apk/*
+
+ENV ANSIBLE_HOST_KEY_CHECKING false
+ENV ANSIBLE_RETRY_FILES_ENABLED false
+
+WORKDIR /ansible
+
+ENTRYPOINT ["ansible-playbook"]
diff --git a/ansible/docker/build_ansible_image.sh b/ansible/docker/build_ansible_image.sh
new file mode 100755
index 00000000..d54ddc43
--- /dev/null
+++ b/ansible/docker/build_ansible_image.sh
@@ -0,0 +1,52 @@
+#! /usr/bin/env bash
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2018 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+
+set -e
+
+ansible_version="$1"
+image_name="${2:-ansible:latest}"
+
+script_path=$(readlink -f "$0")
+script_dir=$(dirname "$script_path")
+
+git_commit=$(git rev-parse --revs-only HEAD)
+build_date=$(date -I)
+
+if [ -z "$ansible_version" ]; then
+ docker build "$script_dir" -t "${image_name}" --label "git-commit=$git_commit" --label "build-date=$build_date"
+else
+ docker build "$script_dir" -t "${image_name}" --label "git-commit=$git_commit" --label "build-date=$build_date" --build-arg ansible_version="$ansible_version"
+fi
+
+# Export docker image into chroot and tararchive it. It takes ~40M of space and is packaged together with sw.
+if "${script_dir}"/create_docker_chroot.sh convert "${image_name}" "${script_dir}"/ansible_chroot ; then
+ cd "$script_dir"
+ echo INFO: "Tarring and zipping the chroot directory..." >&2
+ tar -czf ansible_chroot.tgz ansible_chroot
+ rm -rf "${script_dir}"/ansible_chroot
+ echo INFO: "Finished: ${script_dir}/ansible_chroot.tgz" >&2
+ cd -
+else
+ echo ERROR: "I failed to create a chroot environment" >&2
+ exit 1
+fi
+
+exit 0 \ No newline at end of file
diff --git a/ansible/docker/create_docker_chroot.sh b/ansible/docker/create_docker_chroot.sh
new file mode 100755
index 00000000..f8e256da
--- /dev/null
+++ b/ansible/docker/create_docker_chroot.sh
@@ -0,0 +1,220 @@
+#!/bin/sh
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2018 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+
+set -e
+
+CMD=$(basename "$0")
+
+help()
+{
+ echo "
+NAME:
+ ${CMD} - create a chroot directory from docker image
+
+DESCRIPTION:
+ It will export docker image into a directory capable of chrooting.
+ It needs and will run these commands (requires docker service):
+ docker create
+ docker export
+
+USAGE:
+ ${CMD} [-h|--help|help]
+ This help
+
+ ${CMD} convert <docker-name> <name-of-directory>
+
+ It will convert docker image into directory - no chroot yet.
+ The name of the docker image must be imported already (not a file):
+ docker image ls
+
+ The directory will be created and so this command will fail if some
+ directory or a file of this name (filepath) already exists!
+ There is another script run_chroot.sh with which you can do chroot
+ on this newly created directory - so it is expected that this
+ directory is kept clean and as it is.
+ If you don't care about this feature (run_chroot.sh) and you know
+ what are you doing, then do necessary mounts and execute:
+ chroot <name-of-directory>/chroot /bin/sh -l
+"
+}
+
+#
+# PLEASE DON'T TOUCH ME
+#
+
+# readme file for run_chroot.sh
+readme()
+{
+ md_codequote='```'
+
+cat > "$CHROOT_METADIR"/README.md <<EOF
+# RUN CHROOT COMMAND
+
+# usage:
+
+${md_codequote}
+run_chroot.sh help
+${md_codequote}
+
+**Don't modify insides of this directory (where this README.md lies).**
+
+The structure is needed as it is.
+
+If you wish to just run chroot by yourself, you can do:
+${md_codequote}
+chroot ./chroot /bin/sh -l
+${md_codequote}
+
+# requirements:
+
+* root privileges
+* docker service
+
+# directory structure:
+${md_codequote}
+ README.md
+ chroot/
+ .overlay
+ .workdir
+ .merged
+${md_codequote}
+EOF
+}
+
+# arg: <docker-name>
+check_docker_image()
+{
+ image="$1"
+ match=$(docker image ls --no-trunc -q "$image" | wc -l)
+
+ case $match in
+ 0)
+ echo ERROR: "Docker image does not exist: ${DOCKER_IMAGE}" >&2
+ exit 1
+ ;;
+ 1)
+ :
+ ;;
+ *)
+ echo ERROR: "Multiple results for this docker name: ${DOCKER_IMAGE}" >&2
+ exit 1
+ ;;
+ esac
+
+ return 0
+}
+
+cleanup()
+{
+ if [ -n "$DOCKER_CONTAINER" ] ; then
+ echo INFO: "Delete the export container: ${DOCKER_CONTAINER}" >&2
+ if ! docker rm "$DOCKER_CONTAINER" > /dev/null ; then
+ echo ERROR: "Failed to delete: ${DOCKER_CONTAINER}" >&2
+ fi
+ fi
+}
+
+on_exit()
+{
+ set +e
+ cleanup
+}
+
+action=nil
+case "$1" in
+ ''|-h|--help|help)
+ help
+ exit 0
+ ;;
+ convert)
+ action=convert
+ DOCKER_IMAGE="$2"
+ CHROOT_METADIR="$3"
+ ;;
+ *)
+ echo ERROR: "Bad usage" >&2
+ help >&2
+ exit 1
+ ;;
+esac
+
+
+case "$action" in
+ ''|nil)
+ echo ERROR: "Nothing to do - missing command" >&2
+ help >&2
+ exit 1
+ ;;
+ convert)
+ if [ -z "$DOCKER_IMAGE" ] || [ -z "$CHROOT_METADIR" ] ; then
+ echo ERROR: "Missing argument" >&2
+ help >&2
+ exit 1
+ fi
+
+ if [ -e "$CHROOT_METADIR" ] ; then
+ echo ERROR: "Filepath already exists: ${CHROOT_METADIR}" >&2
+ echo ERROR: "Please rename it, remove it or use different name" >&2
+ echo ERROR: "I need my working directory empty, thanks" >&2
+ exit 1
+ fi
+
+ # check if docker image is there
+ check_docker_image "$DOCKER_IMAGE"
+
+ # we must be root
+ if [ "$(id -u)" -ne 0 ] ; then
+ echo ERROR: "I need root privileges and you are not root: $(id -nu)" >&2
+ exit 1
+ fi
+
+ # making sure that CHROOT_METADIR is absolute path
+ CHROOT_METADIR=$(readlink -f "$CHROOT_METADIR")
+
+ # set trap
+ trap on_exit INT QUIT TERM EXIT
+
+ # making readme
+ mkdir -p "$CHROOT_METADIR"/
+ readme
+
+ # create container
+ DOCKER_CONTAINER=$(docker create "$DOCKER_IMAGE")
+ if [ -z "$DOCKER_CONTAINER" ] ; then
+ echo ERROR: "I could not create a container from: ${DOCKER_IMAGE}" >&2
+ exit 1
+ fi
+
+ # unpacking of image
+ mkdir -p "$CHROOT_METADIR"/chroot
+ echo INFO: "Export started - it can take a while to finish..." >&2
+ if ! docker export "$DOCKER_CONTAINER" | tar -C "$CHROOT_METADIR"/chroot -xf - ; then
+ echo ERROR: "Unpacking failed - permissions?" >&2
+ exit 1
+ else
+ echo INFO: "Export success: $CHROOT_METADIR/chroot" >&2
+ echo INFO: "Checkout the README file: $CHROOT_METADIR/README.md" >&2
+ fi
+ ;;
+esac
+
+exit 0
+
diff --git a/ansible/docker/run_chroot.sh b/ansible/docker/run_chroot.sh
new file mode 100755
index 00000000..b38c1295
--- /dev/null
+++ b/ansible/docker/run_chroot.sh
@@ -0,0 +1,465 @@
+#!/bin/sh
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2018 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+
+set -e
+
+CMD=$(basename "$0")
+UMOUNT_TIMEOUT=120 # 2mins
+
+
+#
+# functions
+#
+
+help()
+{
+ echo "
+NAME:
+ ${CMD} - run command in chrooted directory
+
+DESCRIPTION:
+ It will do necessary steps to be able chroot, optional mounts and it will
+ run commands inside the requested chroot directory.
+
+ It does overlay mount so nothing inside the chroot is modified - if there
+ is no way to do overlay mount it will just do chroot directly - which means
+ that user has power to render chroot useless - beware...
+
+ The chroot is run in it's own namespace for better containerization.
+ Therefore the utility 'unshare' is necessary requirement.
+
+ After exiting the chroot all of those necessary steps are undone.
+
+USAGE:
+ ${CMD} [-h|--help|help]
+ This help
+
+ ${CMD} [OPTIONS] execute <chroot-directory> [<command with args>...]
+
+ It will do some necessary steps after which it will execute chroot
+ command and gives you prompt inside the chroot. When you leave the
+ prompt it will undo those steps.
+ On top of the ordinary chroot it will make overlay, so every change
+ inside the chroot is only temporary and chroot is kept stateless -
+ like inside a docker container. If there is no way to do overlay -
+ ordinary chroot is done.
+ Default command is: /bin/sh -l
+
+ OPTIONS:
+
+ --mount (ro|rw):<src-dir>:<inner-dir>
+ This option will mount 'src-dir' which is full path on the host
+ system into the relative path 'inner-dir' within the chroot
+ directory.
+ It can be mounted as read-only (ro) or read-write (rw).
+ Multiple usage of this argument can be used to create complex
+ hierarchy. Order is significant.
+ For example:
+ --mount ro:/scripts/ANSIBLE_DIR:/ansible \
+ --mount rw:/scripts/ANSIBLE_DIR/app:/ansible/app
+ This will mount directory ansible as read-only into chroot,
+ but it's subdirectory 'app' will be writeable.
+
+ --workdir <inner-dir>
+ This will set working directory (PWD) inside the chroot.
+
+EXAMPLE:
+ ${CMD} --mount ro:/scripts/ansible:ansible \
+ --mount rw:/scripts/ansible/app:ansible/app \
+ --workdir /ansible execute /tmp/ansible_chroot
+ # pwd
+ /ansible
+ # mount
+ overlay on / type overlay ...
+ /dev/disk on /ansible type ext4 (ro,relatime,errors=remount-ro)
+ /dev/disk on /ansible/application type ext4 (rw,relatime,errors=remount-ro)
+ none on /proc type proc (rw,relatime)
+ none on /sys type sysfs (rw,relatime)
+ none on /dev/shm type tmpfs (rw,relatime)
+
+ Directory /ansible inside the chroot is not writable but subdirectory
+ /ansible/app is.
+
+ Rest of the chroot is under overlay and all changes will be lost when
+ chroot command ends. Only changes in app directory persists bacause it
+ was bind mounted as read-write and is not part of overlay.
+
+ Note: as you can see app directory is mounted over itself but read-write.
+"
+}
+
+# arg: <directory>
+is_mounted()
+{
+ mountpoint=$(echo "$1" | sed 's#//*#/#g')
+
+ LANG=C mount | grep -q "^[^[:space:]]\+[[:space:]]\+on[[:space:]]\+${mountpoint}[[:space:]]\+type[[:space:]]\+"
+}
+
+# layers are right to left! First is on the right, top/last is on the left
+do_overlay_mount()
+{
+ if [ -d "$overlay" ] && is_mounted "$overlay" ; then
+ echo ERROR: "The overlay directory is already mounted: $overlay" >&2
+ echo ERROR: "Fix the issue - cannot proceed" >&2
+ exit 1
+ fi
+
+ # prepare dirs
+ rm -rf "$overlay" "$upperdir" "$workdir"
+ mkdir -p "$overlay"
+ mkdir -p "$upperdir"
+ mkdir -p "$workdir"
+
+ # finally overlay mount
+ if ! mount -t overlay --make-rprivate \
+ -o lowerdir="$lowerdir",upperdir="$upperdir",workdir="$workdir" \
+ overlay "$overlay" ;
+ then
+ echo ERROR: "Failed to do overlay mount!" >&2
+ echo ERROR: "Please check that your system supports overlay!" >&2
+ echo NOTE: "Continuing with the ordinary chroot without overlay!"
+
+ CHROOT_DIR="$lowerdir"
+ return 1
+ fi
+
+ CHROOT_DIR="$overlay"
+
+ return 0
+}
+
+cleanup()
+{
+ case "$OVERLAY_MOUNT" in
+ yes)
+ echo INFO: "Umounting overlay..." >&2
+ if ! umount_retry "$CHROOT_DIR" ; then
+ echo ERROR: "Cannot umount chroot: $CHROOT_DIR" >&2
+ return 1
+ fi
+
+ ;;
+ no)
+ echo INFO: "No overlay to umount" >&2
+ ;;
+ esac
+
+ if ! is_mounted "$overlay" ; then
+ echo INFO: "Deleting of temp directories..." >&2
+ rm -rf "$overlay" "$upperdir" "$workdir"
+ else
+ echo ERROR: "Overlay is still mounted: $CHROOT_DIR" >&2
+ echo ERROR: "Cannot delete: $overlay" >&2
+ echo ERROR: "Cannot delete: $upperdir" >&2
+ echo ERROR: "Cannot delete: $workdir" >&2
+ return 1
+ fi
+}
+
+check_external_mounts()
+{
+ echo "$EXTERNAL_MOUNTS" | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do
+ mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}')
+ external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}')
+ internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g')
+
+ case "$mount_type" in
+ ro|rw)
+ :
+ ;;
+ *)
+ echo ERROR: "Wrong mount type (should be 'ro' or 'rw') in: ${mountexpr}" >&2
+ exit 1
+ ;;
+ esac
+
+ if ! [ -d "$external" ] ; then
+ echo ERROR: "Directory for mounting does not exist: ${external}" >&2
+ exit 1
+ fi
+
+ if echo "$internal" | grep -q '^/*$' ; then
+ echo ERROR: "Unacceptable internal path: ${internal}" >&2
+ exit 1
+ fi
+ done
+}
+
+do_external_mounts()
+{
+ echo INFO: "Bind mounting of external mounts..." >&2
+ echo "$EXTERNAL_MOUNTS" | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do
+ mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}')
+ external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}')
+ internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g')
+
+ if is_mounted "${CHROOT_DIR}/${internal}" ; then
+ echo ERROR: "Mountpoint is already mounted: ${CHROOT_DIR}/${internal}" >&2
+ echo ERROR: "Fix the issue - cannot proceed" >&2
+ exit 1
+ fi
+
+ if ! mkdir -p "${CHROOT_DIR}/${internal}" ; then
+ echo ERROR: "Cannot create mountpoint: ${CHROOT_DIR}/${internal}" >&2
+ exit 1
+ fi
+
+ if ! mount --make-rprivate -o bind,${mount_type} "$external" "${CHROOT_DIR}/${internal}" ; then
+ echo ERROR: "Failed to mount: ${external} -> ${internal}" >&2
+ exit 1
+ else
+ echo INFO: "Mount: ${external} -> ${internal}" >&2
+ fi
+ done
+}
+
+# arg: <mountpoint>
+umount_retry()
+{
+ mountpoint=$(echo "$1" | sed 's#//*#/#g')
+ timeout=${UMOUNT_TIMEOUT}
+
+ umount "$mountpoint" 2>/dev/null
+ while is_mounted "$mountpoint" && [ $timeout -gt 0 ] ; do
+ umount "$mountpoint" 2>/dev/null
+ sleep 1
+ timeout=$(( timeout - 1 ))
+ done
+
+ if ! is_mounted "$mountpoint" ; then
+ return 0
+ fi
+
+ return 1
+}
+
+undo_external_mounts()
+{
+ echo INFO: "Umount external mount points..." >&2
+ echo "$EXTERNAL_MOUNTS" | tac | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do
+ mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}')
+ external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}')
+ internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g')
+ if umount_retry "${CHROOT_DIR}/${internal}" ; then
+ echo INFO: "Unmounted: ${CHROOT_DIR}/${internal}" >&2
+ else
+ echo ERROR: "Failed to umount: ${CHROOT_DIR}/${internal}" >&2
+ fi
+ done
+}
+
+install_wrapper()
+{
+ cat > "$CHROOT_DIR"/usr/local/bin/fakeshell.sh <<EOF
+#!/bin/sh
+
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+export PATH
+
+gid_tty=\$(getent group | sed -n '/^tty:/p' | cut -d: -f 3)
+
+mount -t proc proc /proc
+mount -t sysfs none /sys
+mount -t tmpfs none /dev
+
+mkdir -p /dev/shm
+mkdir -p /dev/pts
+mount -t devpts -o gid=\${gid_tty},mode=620 none /dev/pts
+
+[ -e /dev/full ] || mknod -m 666 /dev/full c 1 7
+[ -e /dev/ptmx ] || mknod -m 666 /dev/ptmx c 5 2
+[ -e /dev/random ] || mknod -m 644 /dev/random c 1 8
+[ -e /dev/urandom ] || mknod -m 644 /dev/urandom c 1 9
+[ -e /dev/zero ] || mknod -m 666 /dev/zero c 1 5
+[ -e /dev/tty ] || mknod -m 666 /dev/tty c 5 0
+[ -e /dev/console ] || mknod -m 622 /dev/console c 5 1
+[ -e /dev/null ] || mknod -m 666 /dev/null c 1 3
+
+chown root:tty /dev/console
+chown root:tty /dev/ptmx
+chown root:tty /dev/tty
+
+mkdir -p "\$1" || exit 1
+cd "\$1" || exit 1
+shift
+
+exec "\$@"
+
+EOF
+ chmod +x "$CHROOT_DIR"/usr/local/bin/fakeshell.sh
+}
+
+on_exit()
+{
+ set +e
+ echo
+
+ if [ -n "$OVERLAY_MOUNT" ] ; then
+ undo_external_mounts
+ fi
+ cleanup
+}
+
+
+#
+# parse arguments
+#
+
+state=nil
+action=nil
+EXTERNAL_MOUNTS=''
+CHROOT_WORKDIR=''
+CHROOT_METADIR=''
+CHROOT_DIR=''
+COMMAND=''
+while [ -n "$1" ] ; do
+ case "$state" in
+ nil)
+ case "$1" in
+ ''|-h|--help|help)
+ help
+ exit 0
+ ;;
+ --mount)
+ EXTERNAL_MOUNTS=$(printf "%s\n%s\n" "$EXTERNAL_MOUNTS" "${2}")
+ state=next
+ ;;
+ --workdir)
+ if [ -z "$CHROOT_WORKDIR" ] ; then
+ CHROOT_WORKDIR="$2"
+ state=next
+ else
+ echo ERROR: "Multiple working directory argument" >&2
+ help >&2
+ exit 1
+ fi
+ ;;
+ execute)
+ action=execute
+ state=execute
+ ;;
+ *)
+ echo ERROR: "Bad usage" >&2
+ help >&2
+ exit 1
+ ;;
+ esac
+ ;;
+ next)
+ state=nil
+ ;;
+ execute)
+ CHROOT_METADIR="$1"
+ shift
+ break
+ ;;
+ esac
+ shift
+done
+
+
+case "$action" in
+ ''|nil)
+ echo ERROR: "Nothing to do - missing command" >&2
+ help >&2
+ exit 1
+ ;;
+ execute)
+ # firstly do sanity checking ...
+
+ if [ -z "$CHROOT_METADIR" ] ; then
+ echo ERROR: "Missing argument" >&2
+ help >&2
+ exit 1
+ fi
+
+ # making sure that CHROOT_METADIR is absolute path
+ CHROOT_METADIR=$(readlink -f "$CHROOT_METADIR")
+
+ if ! [ -d "$CHROOT_METADIR"/chroot ] ; then
+ echo ERROR: "Filepath does not exist: ${CHROOT_METADIR}/chroot" >&2
+ exit 1
+ fi
+
+ # check external mounts if there are any
+ check_external_mounts
+
+ # check workdir
+ if [ -n "$CHROOT_WORKDIR" ] ; then
+ CHROOT_WORKDIR=$(echo "$CHROOT_WORKDIR" | sed -e 's#^/*##' -e 's#//*#/#g')
+ fi
+
+ # we must be root
+ if [ "$(id -u)" -ne 0 ] ; then
+ echo ERROR: "Need to be root and you are not: $(id -nu)" >&2
+ exit 1
+ fi
+
+ if ! which unshare >/dev/null 2>/dev/null ; then
+ echo ERROR: "'unshare' system command is missing - ABORT" >&2
+ echo INFO: "Try to install 'util-linux' package" >&2
+ exit 1
+ fi
+
+ # ... sanity checking done
+
+ # setup paths
+ lowerdir="$CHROOT_METADIR"/chroot
+ upperdir="$CHROOT_METADIR"/.overlay
+ workdir="$CHROOT_METADIR"/.workdir
+ overlay="$CHROOT_METADIR"/.merged
+
+ # set trap
+ trap on_exit QUIT TERM EXIT
+
+ # mount overlay
+ OVERLAY_MOUNT=''
+ if do_overlay_mount ; then
+ # overlay chroot
+ OVERLAY_MOUNT=yes
+ else
+ # non overlay mount
+ OVERLAY_MOUNT=no
+ fi
+
+ # do the user-specific mounts
+ do_external_mounts
+
+ # I need this wrapper to do some setup inside the chroot...
+ install_wrapper
+
+ # execute chroot
+ # copy resolv.conf
+ cp -a /etc/resolv.conf "$CHROOT_DIR"/etc/resolv.conf
+
+ if [ -n "$1" ] ; then
+ :
+ else
+ set -- /bin/sh -l
+ fi
+ unshare -mfpi --propagation private \
+ chroot "$CHROOT_DIR" /usr/local/bin/fakeshell.sh "${CHROOT_WORKDIR:-/}" "$@"
+ ;;
+esac
+
+exit 0
+
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
new file mode 100755
index 00000000..fad84e07
--- /dev/null
+++ b/ansible/group_vars/all.yml
@@ -0,0 +1,129 @@
+---
+###################################
+# Resources configuration entries #
+###################################
+
+# Resource host information
+
+# folder on resource host where tars with resources are present
+resources_dir:
+
+# tarfile name within this folder with offline infrastructure sw
+resources_filename:
+
+# the purpose of auxiliary resources is to provide user an interface
+# of how to distribute to infra node another big tar which might be
+# usefull later on in application playbooks, optional param
+aux_resources_filename:
+
+# resources can be exported via nfs
+# default is no - client will use ssh
+# if set yes but nfs-utils is missing then fallback to ssh
+resources_on_nfs: no
+
+# Infra node specific information
+
+# offline solution source data binaries will be decompressed in following dir on infra
+# e.g. app_data_path: /opt/onap
+app_data_path:
+
+# additional data path for auxiliary data transfer
+# e.g. aux_data_path: /opt/onap/onap_me_docker_images
+aux_data_path:
+
+
+
+##########################################
+# Offline Infrastructure specific params #
+##########################################
+
+# information from which rootCA is created
+# e.g.
+# organization_name: Samsung
+# state_or_province_name: Poland
+# country_name: PL
+# locality_name: Krakow
+certificates:
+ organization_name:
+ state_or_province_name:
+ country_name:
+ locality_name:
+
+# Force k8s cluster redeploy if it exists already
+# Default value is to allow redeploy
+redeploy_k8s_env: yes
+
+# Distribute offline rpm repository
+# Default value is to distribute rpm
+deploy_rpm_repository: yes
+
+# Offline solution is deploying app specific rpm repository and requires some name
+# also for k8s cluster
+# e.g. app_name: ONAP
+app_name:
+
+# as nexus blob is prepopulated during build time following block
+# of runtime_images code provides an alternative way how to insert
+# specified images into nexus during infrastructure playbook execution
+# images specified in there must be available inside aux_resources_filename
+# tar file
+# if runtime_images are not specified nothing is inserted on top of existing
+# prebuilt nexus blob in installation time
+# Component name must match with tar filename
+# e.g.
+# aaiadapter-0.0.1.tar is expected in aux_data_path for aaiadapter image
+#runtime_images:
+# aaiadapter-0.0.1:
+# registry: "nexus3.onap.org:10001"
+# path: "/onap/aaiadapter/aaiadapter"
+# tag: "latest"
+runtime_images:
+
+
+###############################
+# Application specific params #
+###############################
+
+# Project name to utilize same codebase
+# e.g. project_configuration: onap-me
+project_configuration:
+
+# App Helm charts dir. E.g. application/helm_charts/<xxx> where xxx is a charts folder name.
+# Helm charts are expected to be inside SW package somewhere inside ./ansible/application
+# those will be available for offline installer under /ansible/application/<helm_charts_name>
+# for OOM project helm charts are usually within kubernetes sub-folder
+# so the path for them can be:
+# e.g app_helm_charts_install_directory: "/ansible/application/oom/kubernetes"
+app_helm_charts_install_directory:
+
+# to specify target dir where helm charts should be copied into on infra node
+# this should be directory with all charts and Makefile
+# e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
+app_helm_charts_infra_directory:
+
+# Main Helm chart to install
+# e.g. app_helm_chart_name: onap
+app_helm_chart_name:
+
+# Helm release name (visible in POD names) used by Helm
+# e.g. app_helm_release_name: "{{ project_configuration }}"
+app_helm_release_name:
+
+# Kubernetes namespace where application is installed
+# e.g. app_kubernetes_namespace: onap
+app_kubernetes_namespace:
+
+# Optional application custom Ansible roles name for pre and post install logic.
+# Location of additional custom roles is defined in ansible.cfg with roles_path.
+# e.g. application_pre_install_role: "{{ project_configuration }}-patch-role"
+application_pre_install_role:
+application_post_install_role:
+
+# any other application specific params can be specified in this file
+# e.g.
+# onap_values:
+# openStackKeyStoneUrl: "http://1.2.3.4:5000"
+# openStackServiceTenantName: "services"
+# openStackDomain: "Default"
+# openStackUserName: "admin"
+# openStackEncryptedPassword: "f7920677e15e2678b0f33736189e8965"
diff --git a/ansible/group_vars/infrastructure.yml b/ansible/group_vars/infrastructure.yml
new file mode 100755
index 00000000..ab314055
--- /dev/null
+++ b/ansible/group_vars/infrastructure.yml
@@ -0,0 +1,36 @@
+---
+nfs_mount_path: /dockerdata-nfs
+vnc_passwd: samsung
+simulated_hosts:
+ git:
+ - gerrit.onap.org
+ - git.rancher.io
+ - github.com
+ http:
+ - git.onap.org
+ - nexus.onap.org
+ - repo.infra-server
+ - www.getcloudify.org
+ - www.springframework.org
+ - repo.maven.apache.org
+ - repo1.maven.org
+ nexus:
+ - docker.elastic.co
+ - docker.io
+ - gcr.io
+ - nexus.{{ ansible_nodename }}
+ - nexus3.onap.org
+ - registry-1.docker.io
+ - registry.hub.docker.com
+ - registry.npmjs.org
+all_simulated_hosts:
+ "{{ simulated_hosts.git + simulated_hosts.http + simulated_hosts.nexus }}"
+rancher_server_url: "http://{{ hostvars[groups.infrastructure.0].ansible_host }}:8080"
+rancher_remove_other_env: yes
+rancher_redeploy_k8s_env: yes
+populate_nexus: no
+kube_directory: /root/.kube
+kubectl_bin_dir: /usr/local/bin
+helm_bin_dir: /usr/local/bin
+helm_repository_name: local
+helm_repository_url: http://127.0.0.1:8879
diff --git a/ansible/infrastructure.yml b/ansible/infrastructure.yml
new file mode 100644
index 00000000..789f8716
--- /dev/null
+++ b/ansible/infrastructure.yml
@@ -0,0 +1,71 @@
+---
+- name: Perform common environment setup for nodes
+ hosts: infrastructure, kubernetes
+ tasks:
+ - name: Setup resolv.conf
+ lineinfile:
+ line: "nameserver {{ hostvars[groups.infrastructure[0]].ansible_host }}"
+ path: /etc/resolv.conf
+ state: present
+ insertbefore: BOF
+ become: yes
+ - name: Add application offline rpm repository
+ yum_repository:
+ name: "{{ app_name }}"
+ file: "{{ app_name | lower }}"
+ description: "{{ app_name }} offline repository"
+ baseurl: "{{ 'http://repo.infra-server' if 'infrastructure' not in group_names else 'file://' + app_data_path + '/pkg/rhel' }}"
+ gpgcheck: no
+ enabled: yes
+ when: deploy_rpm_repository
+ become: yes
+
+- name: Setup firewall
+ hosts: infrastructure, kubernetes
+ roles:
+ - role: firewall
+ vars:
+ state: disable
+
+- name: Setup infrastructure servers
+ hosts: infrastructure
+ roles:
+ - certificates
+ - docker
+ - dns
+ - vncserver
+ - role: nexus
+ vars:
+ phase: install
+ - nginx
+ tasks:
+ - name: "wait for nexus to come up"
+ uri:
+ url: "{{ nexus_url }}/service/metrics/healthcheck"
+ user: admin
+ password: admin123
+ force_basic_auth: yes
+ method: GET
+ register: nexus_wait
+ until: not nexus_wait.failed
+ retries: 30
+ delay: 10
+
+- name: Nexus changes in runtime
+ hosts: infrastructure
+ roles:
+ - role: nexus
+ vars:
+ phase: configure
+ when: populate_nexus | bool
+ - role: nexus
+ vars:
+ phase: runtime-populate
+ when: runtime_images is defined
+
+- name: Setup base for Kubernetes nodes
+ hosts: kubernetes
+ roles:
+ - docker
+ tasks:
+ - import_tasks: roles/certificates/tasks/upload_root_ca.yml
diff --git a/ansible/inventory/hosts.yml b/ansible/inventory/hosts.yml
new file mode 100644
index 00000000..f11ef7b1
--- /dev/null
+++ b/ansible/inventory/hosts.yml
@@ -0,0 +1,32 @@
+---
+# This group contains hosts with all resources (binaries, packages, etc.)
+# in tarball.
+all:
+ vars:
+ # this key is supposed to be generated during setup.yml playbook execution
+ # change it just when you have better one working for all nodes
+ ansible_ssh_private_key_file: /root/.ssh/offline_ssh_key
+ ansible_ssh_common_args: '-o StrictHostKeyChecking=no'
+
+ children:
+ resources:
+ hosts:
+ resource-host:
+ ansible_host: 10.8.8.5
+
+ # This is group of hosts where nexus, nginx, dns and all other required
+ # services are running.
+ infrastructure:
+ hosts:
+ infrastructure-server:
+ ansible_host: 10.8.8.13
+
+ # This is group of hosts which are/will be part of Kubernetes cluster.
+ kubernetes:
+ hosts:
+ kubernetes-node-1:
+ ansible_host: 10.8.8.19
+
+ nfs-server:
+ hosts:
+ kubernetes-node-1
diff --git a/ansible/library/json_add.py b/ansible/library/json_add.py
new file mode 100644
index 00000000..6aad2d7c
--- /dev/null
+++ b/ansible/library/json_add.py
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+
+from ansible.module_utils.basic import AnsibleModule
+import json
+import os
+
+DOCUMENTATION="""
+---
+module: json_add
+descritption:
+ - This module will search top level objects in json and adds specified
+ value into list for specified key.
+ - If file does not exists module will create it automatically.
+
+options:
+ path:
+ required: true
+ aliases=[name, destfile, dest]
+ description:
+ - The json file to modify.
+ key:
+ required: true
+ description:
+ - Top level object.
+ value:
+ required: true
+ description:
+ - Value to add to specified key.
+"""
+
+def load_json(path):
+ if os.path.exists(path):
+ with open(path, 'r') as f:
+ return json.load(f)
+ else:
+ return {}
+
+def value_is_set(path, key, value, json_obj):
+ return value in json_obj.get(key, [])
+
+def insert_to_json(path, key, value, check_mode=False):
+ json_obj = load_json(path)
+ if not value_is_set(path, key, value, json_obj):
+ if not check_mode:
+ json_obj.setdefault(key, []).append(value)
+ store_json(path, json_obj)
+ return True, 'Value %s added to %s.' % (value, key)
+ else:
+ return False, ''
+
+def store_json(path, json_obj):
+ with open(path, 'w') as f:
+ json.dump(json_obj, f, indent=4)
+
+def check_file_attrs(module, changed, message, diff):
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False, diff=diff):
+
+ if changed:
+ message += ' '
+ changed = True
+ message += 'File attributes changed.'
+
+ return changed, message
+
+def run_module():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['name', 'destfile', 'dest']),
+ key=dict(type='str', required=True),
+ value=dict(type='str', required=True),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ params = module.params
+ path = params['path']
+ key = params['key']
+ value = params['value']
+ try:
+ changed, msg = insert_to_json(path, key, value, module.check_mode)
+ fs_diff = {}
+ changed, msg = check_file_attrs(module, changed, msg, fs_diff)
+ module.exit_json(changed=changed, msg=msg, file_attr_diff=fs_diff)
+ except IOError as e:
+ module.fail_json(msg=e.msg)
+
+if __name__ == '__main__':
+ run_module()
+
diff --git a/ansible/library/rancher_k8s_environment.py b/ansible/library/rancher_k8s_environment.py
new file mode 100644
index 00000000..d3d8ac02
--- /dev/null
+++ b/ansible/library/rancher_k8s_environment.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+
+DOCUMENTATION='''
+---
+module: rancher_k8s_environment
+description:
+ - This module will create or delete Kubernetes environment.
+ - It will also delete other environments when variables are set accordingly.
+notes:
+ - It identifies environment only by name. Expect problems with same named environments.
+ - All hosts running Kubernetes cluster should have same OS otherwise there
+ is possibility of misbehavement.
+options:
+ server:
+ required: true
+ description:
+ - Url of rancher server i.e. "http://10.0.0.1:8080".
+ name:
+ required: true
+ descritpion:
+ - Name of the environment to create/remove.
+ descr:
+ description:
+ - Description of environment to create.
+ state:
+ description:
+ - If "present" environment will be created or setup depending if it exists.
+ With multiple environments with same name expect error.
+ If "absent" environment will be removed. If multiple environments have same
+ name all will be deleted.
+ default: present
+ choices: [present, absent]
+ delete_not_k8s:
+ description:
+ - Indicates if environments with different orchestration than Kubernetes should
+ be deleted.
+ type: bool
+ default: yes
+ delete_other_k8s:
+ description:
+ - Indicates if environments with different name than specified should
+ be deleted.
+ type: bool
+ default: no
+ force:
+ description:
+ - Indicates if environment should be deleted and recreated.
+ type: bool
+ default: yes
+ host_os:
+ required: true
+ description:
+ - OS (family from ansible_os_family variable) of the hosts running cluster. If
+ "RedHat" then datavolume fix will be applied.
+ Fix described here:
+ https://github.com/rancher/rancher/issues/10015
+'''
+
+import json
+import time
+
+import requests
+from ansible.module_utils.basic import AnsibleModule
+
+
+
+def get_existing_environments(rancher_address):
+ req = requests.get('{}/v2-beta/projects'.format(rancher_address))
+ envs = req.json()['data']
+ return envs
+
+
+def not_k8s_ids(environments):
+ envs = filter(lambda x: x['orchestration'] != 'kubernetes', environments)
+ return [env['id'] for env in envs]
+
+
+def other_k8s_ids(environments, name):
+ envs = filter(lambda x: x['orchestration'] == 'kubernetes' and x['name'] != name,
+ environments)
+ return [env['id'] for env in envs]
+
+
+def env_ids_by_name(environments, name):
+ envs = filter(lambda x: x['name'] == name, environments)
+ return [env['id'] for env in envs]
+
+
+def env_info_by_id(environments, env_id):
+ env = filter(lambda x: x['id'] == env_id, environments)
+ return [{'id': x['id'], 'name': x['name']} for x in env][0]
+
+
+def delete_multiple_environments(rancher_address, env_ids):
+ deleted = []
+ for env_id in env_ids:
+ deleted.append(delete_environment(rancher_address, env_id))
+ return deleted
+
+
+def delete_environment(rancher_address, env_id):
+ req = requests.delete('{}/v2-beta/projects/{}'.format(rancher_address, env_id))
+ deleted = req.json()['data'][0]
+ return {'id': deleted['id'],
+ 'name': deleted['name'],
+ 'orchestration': deleted['orchestration']}
+
+
+def create_k8s_environment(rancher_address, name, descr):
+ k8s_template_id = None
+ for _ in range(10):
+ k8s_template = requests.get(
+ '{}/v2-beta/projecttemplates?name=Kubernetes'.format(rancher_address)).json()
+ if k8s_template['data']:
+ k8s_template_id = k8s_template['data'][0]['id']
+ break
+ time.sleep(3)
+ if k8s_template_id is None:
+ raise ValueError('Template for kubernetes not found.')
+ body = {
+ 'name': name,
+ 'description': descr,
+ 'projectTemplateId': k8s_template_id,
+ 'allowSystemRole': False,
+ 'members': [],
+ 'virtualMachine': False,
+ 'servicesPortRange': None,
+ 'projectLinks': []
+ }
+
+ body_json = json.dumps(body)
+ req = requests.post('{}/v2-beta/projects'.format(rancher_address), data=body_json)
+ created = req.json()
+ return {'id': created['id'], 'name': created['name']}
+
+
+def get_kubelet_service(rancher_address, env_id):
+ for _ in range(10):
+ response = requests.get(
+ '{}/v2-beta/projects/{}/services/?name=kubelet'.format(rancher_address,
+ env_id))
+
+ if response.status_code >= 400:
+ # too early or too late for obtaining data
+ # small delay will improve our chances to collect it
+ time.sleep(1)
+ continue
+
+ content = response.json()
+
+ if content['data']:
+ return content['data'][0]
+
+ # this is unfortunate, response from service api received but data
+ # not available, lets try again
+ time.sleep(5)
+
+ return None
+
+
+def fix_datavolume_rhel(rancher_address, env_id):
+ kubelet_svc = get_kubelet_service(rancher_address, env_id)
+ if kubelet_svc:
+ try:
+ data_volume_index = kubelet_svc['launchConfig']['dataVolumes'].index(
+ '/sys:/sys:ro,rprivate')
+ except ValueError:
+ return 'Already changed'
+ kubelet_svc['launchConfig']['dataVolumes'][
+ data_volume_index] = '/sys/fs/cgroup:/sys/fs/cgroup:ro,rprivate'
+ body = {
+ 'inServiceStrategy': {
+ 'batchSize': 1,
+ 'intervalMillis': 2000,
+ 'startFirst': False,
+ 'launchConfig': kubelet_svc['launchConfig'],
+ 'secondaryLaunchConfigs': []
+ }
+ }
+ body_json = json.dumps(body)
+ requests.post(
+ '{}/v2-beta/projects/{}/services/{}?action=upgrade'.format(rancher_address,
+ env_id,
+ kubelet_svc[
+ 'id']),
+ data=body_json)
+ for _ in range(10):
+ req_svc = requests.get(
+ '{}/v2-beta/projects/{}/services/{}'.format(rancher_address, env_id,
+ kubelet_svc['id']))
+ req_svc_content = req_svc.json()
+ if 'finishupgrade' in req_svc_content['actions']:
+ req_finish = requests.post(
+ req_svc_content['actions']['finishupgrade'])
+ return {
+ 'dataVolumes': req_finish.json()['upgrade']['inServiceStrategy'][
+ 'launchConfig']['dataVolumes']}
+ time.sleep(5)
+ else:
+ raise ValueError('Could not get kubelet service')
+
+
+def create_registration_tokens(rancher_address, env_id):
+ body = {'name': str(env_id)}
+ body_json = json.dumps(body)
+ response = requests.post(
+ '{}/v2-beta/projects/{}/registrationtokens'.format(rancher_address, env_id,
+ data=body_json))
+ for _ in range(10):
+ tokens = requests.get(response.json()['links']['self'])
+ tokens_content = tokens.json()
+ if tokens_content['image'] is not None and tokens_content[
+ 'registrationUrl'] is not None:
+ return {'image': tokens_content['image'],
+ 'reg_url': tokens_content['registrationUrl']}
+ time.sleep(3)
+ return None
+
+
+def get_registration_tokens(rancher_address, env_id):
+ reg_tokens = requests.get(
+ '{}/v2-beta/projects/{}/registrationtokens'.format(rancher_address, env_id))
+ reg_tokens_content = reg_tokens.json()
+ tokens = reg_tokens_content['data']
+ if not tokens:
+ return None
+ return {'image': tokens[0]['image'], 'reg_url': tokens[0]['registrationUrl']}
+
+
+def create_apikey(rancher_address, env_id):
+ body = {
+ 'name': 'kubectl_env_{}'.format(env_id),
+ 'description': "Provides access to kubectl"
+ }
+ body_json = json.dumps(body)
+ apikey_req = requests.post(
+ '{}/v2-beta/apikey'.format(rancher_address, env_id, data=body_json))
+ apikey_content = apikey_req.json()
+ return {'public': apikey_content['publicValue'],
+ 'private': apikey_content['secretValue']}
+
+
+def run_module():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ descr=dict(type='str'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ delete_other_k8s=dict(type='bool', default=False),
+ delete_not_k8s=dict(type='bool', default=True),
+ force=dict(type='bool', default=True),
+ host_os=dict(type='str', required=True)
+ )
+ )
+
+ params = module.params
+ rancher_address = params['server']
+ name = params['name']
+ descr = params['descr']
+ delete_not_k8s = params['delete_not_k8s']
+ delete_other_k8s = params['delete_other_k8s']
+ force = params['force']
+ host_os = params['host_os']
+ state = params['state']
+
+ existing_envs = get_existing_environments(rancher_address)
+ same_name_ids = env_ids_by_name(existing_envs, name)
+
+ to_delete_ids = []
+ changes = {}
+
+ if delete_other_k8s:
+ to_delete_ids += other_k8s_ids(existing_envs, name)
+
+ if delete_not_k8s:
+ to_delete_ids += not_k8s_ids(existing_envs)
+ if force or state == 'absent':
+ to_delete_ids += same_name_ids
+
+ deleted = delete_multiple_environments(rancher_address, to_delete_ids)
+
+ if deleted:
+ changes['deleted'] = deleted
+ if state == 'absent':
+ module.exit_json(changed=True, deleted=changes['deleted'])
+ else:
+ if state == 'absent':
+ module.exit_json(changed=False)
+
+ if len(same_name_ids) > 1 and not force:
+ module.fail_json(msg='Multiple environments with same name. '
+ 'Use "force: yes" to delete '
+ 'all environments with same name.')
+
+ if same_name_ids and not force:
+ changes['environment'] = env_info_by_id(existing_envs, same_name_ids[0])
+ if host_os == 'RedHat':
+ try:
+ rhel_fix = fix_datavolume_rhel(rancher_address, same_name_ids[0])
+ changes['rhel_fix'] = rhel_fix
+ except ValueError as err:
+ module.fail_json(
+ msg='Error: {} Try to recreate k8s environment.'.format(err))
+
+ reg_tokens = get_registration_tokens(rancher_address, same_name_ids[0])
+ if not reg_tokens:
+ reg_tokens = create_registration_tokens(rancher_address, same_name_ids[0])
+ changes['registration_tokens'] = reg_tokens
+
+ apikey = create_apikey(rancher_address, same_name_ids[0])
+ changes['apikey'] = apikey
+ module.exit_json(changed=True, data=changes,
+ msg='New environment was not created. Only set up was done')
+ try:
+ new_env = create_k8s_environment(rancher_address, name, descr)
+ except ValueError as err:
+ module.fail_json(msg='Error: {} Try to recreate k8s environment.'.format(err))
+
+ if host_os == 'RedHat':
+ try:
+ rhel_fix = fix_datavolume_rhel(rancher_address, new_env['id'])
+ changes['rhel_fix'] = rhel_fix
+ except ValueError as err:
+ module.fail_json(msg='Error: {} Try to recreate k8s environment.'.format(
+ err))
+
+ reg_tokens = create_registration_tokens(rancher_address, new_env['id'])
+
+ apikey = create_apikey(rancher_address, new_env['id'])
+
+ changes['environment'] = new_env
+ changes['registration_tokens'] = reg_tokens
+ changes['apikey'] = apikey
+
+ module.exit_json(changed=True, data=changes)
+
+
+if __name__ == '__main__':
+ run_module()
+
diff --git a/ansible/rancher_kubernetes.yml b/ansible/rancher_kubernetes.yml
new file mode 100644
index 00000000..196f1fc2
--- /dev/null
+++ b/ansible/rancher_kubernetes.yml
@@ -0,0 +1,31 @@
+---
+- name: Install binaries for controlling deployment
+ hosts: infrastructure
+ roles:
+ - kubectl
+ - helm
+
+- name: Deploy rancher server and create k8s env
+ hosts: infrastructure
+ roles:
+ - rancher
+ vars:
+ rancher_role: server
+
+- name: Deploy rancher agents
+ hosts: kubernetes
+ roles:
+ - rancher
+ vars:
+ rancher_role: agent
+
+- name: Wait for Kubernetes environment to be healthy
+ hosts: infrastructure
+ tasks:
+ - name: Check cluster health
+ uri:
+ url: "{{ rancher_server_url }}/v2-beta/projects/{{ k8s_env_id }}"
+ register: env_info
+ retries: 30
+ delay: 15
+ until: "env_info.json.healthState == 'healthy'"
diff --git a/ansible/roles/application-install/defaults/main.yml b/ansible/roles/application-install/defaults/main.yml
new file mode 100644
index 00000000..473fbb80
--- /dev/null
+++ b/ansible/roles/application-install/defaults/main.yml
@@ -0,0 +1 @@
+phase: install
diff --git a/ansible/roles/application-install/tasks/install.yml b/ansible/roles/application-install/tasks/install.yml
new file mode 100644
index 00000000..54b64439
--- /dev/null
+++ b/ansible/roles/application-install/tasks/install.yml
@@ -0,0 +1,34 @@
+---
+- name: Helm init and upgrade
+ command: |
+ {{ helm_bin_dir }}/helm init
+ --upgrade
+ --skip-refresh
+
+- name: Wait for helm
+ wait_for: timeout=10
+ delegate_to: localhost
+
+- name: Get all helm repos
+ command: "{{ helm_bin_dir }}/helm repo list"
+ register: repos
+
+- name: Remove stable repo
+ command: "{{ helm_bin_dir }}/helm repo remove stable"
+ when: "'stable' in repos.stdout"
+
+- name: Helm Serve
+ shell: "{{ helm_bin_dir }}/helm serve &"
+ async: 45
+ poll: 0
+
+- name: Helm Add Repo
+ command: "{{ helm_bin_dir }}/helm repo add {{ helm_repository_name }} {{ helm_repository_url }}"
+
+- name: Helm Make All
+ make:
+ chdir: "{{ app_helm_charts_directory }}"
+ target: all
+
+- name: Helm Install application {{ app_name }}
+ command: "helm install {{ helm_repository_name }}/{{ app_helm_chart_name }} --name {{ app_helm_release_name }} --namespace {{ app_kubernetes_namespace }}"
diff --git a/ansible/roles/application-install/tasks/main.yml b/ansible/roles/application-install/tasks/main.yml
new file mode 100644
index 00000000..3306d9e4
--- /dev/null
+++ b/ansible/roles/application-install/tasks/main.yml
@@ -0,0 +1,5 @@
+---
+- debug:
+ msg: "phase is {{ phase }}"
+
+- include_tasks: "{{ phase }}.yml"
diff --git a/ansible/roles/nexus/defaults/main.yml b/ansible/roles/nexus/defaults/main.yml
new file mode 100644
index 00000000..57a79f95
--- /dev/null
+++ b/ansible/roles/nexus/defaults/main.yml
@@ -0,0 +1,2 @@
+#Defaults to install, can be set to configure.
+phase: install
diff --git a/ansible/roles/nexus/files/configure.groovy b/ansible/roles/nexus/files/configure.groovy
new file mode 100644
index 00000000..5691fe64
--- /dev/null
+++ b/ansible/roles/nexus/files/configure.groovy
@@ -0,0 +1,37 @@
+import org.sonatype.nexus.security.realm.RealmManager
+import org.sonatype.nexus.repository.attributes.AttributesFacet
+import org.sonatype.nexus.security.user.UserManager
+import org.sonatype.nexus.repository.manager.RepositoryManager
+import org.sonatype.nexus.security.user.UserNotFoundException
+
+/* Use the container to look up some services. */
+realmManager = container.lookup(RealmManager.class)
+userManager = container.lookup(UserManager.class, "default") //default user manager
+repositoryManager = container.lookup(RepositoryManager.class)
+
+/* Managers are used when scripting api cannot. Note that scripting api can only create mostly, and that creation methods return objects of created entities. */
+/* Perform cleanup by removing all repos and users. Realms do not need to be re-disabled, admin and anonymous user will not be removed. */
+userManager.listUserIds().each({ id ->
+ if (id != "anonymous" && id != "admin")
+ userManager.deleteUser(id)
+})
+
+repositoryManager.browse().each {
+ repositoryManager.delete(it.getName())
+}
+
+/* Add bearer token realms at the end of realm lists... */
+realmManager.enableRealm("NpmToken")
+realmManager.enableRealm("DockerToken")
+
+/* Create the docker user. */
+security.addUser("docker", "docker", "docker", "docker@example.com", true, "docker", ["nx-anonymous"])
+
+/* Create npm and docker repositories. Their default configuration should be compliant with our requirements, except the docker registry creation. */
+repository.createNpmHosted("npm-private")
+def r = repository.createDockerHosted("docker", 8082, 0)
+
+/* force basic authentication true by default, must set to false for docker repo. */
+conf=r.getConfiguration()
+conf.attributes("docker").set("forceBasicAuth", false)
+repositoryManager.update(conf)
diff --git a/ansible/roles/nexus/tasks/configure.yml b/ansible/roles/nexus/tasks/configure.yml
new file mode 100644
index 00000000..66712d8f
--- /dev/null
+++ b/ansible/roles/nexus/tasks/configure.yml
@@ -0,0 +1,34 @@
+---
+- name: "check if the configuration script is uploaded"
+ uri:
+ url: "{{ nexus_url }}/service/rest/v1/script/configure"
+ method: GET
+ force_basic_auth: yes
+ user: admin
+ password: admin123
+ status_code: [200, 404]
+ register: script
+- block:
+ - name: "upload the configuration script"
+ uri:
+ url: "{{ nexus_url }}/service/rest/v1/script"
+ method: POST
+ force_basic_auth: yes
+ user: admin
+ password: admin123
+ body_format: json
+ body:
+ name: configure
+ type: groovy
+ content: "{{ lookup('file', 'files/configure.groovy') }}"
+ status_code: [204]
+ - name: "execute configuration script"
+ uri:
+ url: "{{ nexus_url }}/service/rest/v1/script/configure/run"
+ method: POST
+ force_basic_auth: yes
+ user: admin
+ password: admin123
+ body_format: raw
+ headers: { "Content-Type": "text/plain" }
+ when: script.status == 404
diff --git a/ansible/roles/nexus/tasks/insert-images.yml b/ansible/roles/nexus/tasks/insert-images.yml
new file mode 100644
index 00000000..2e2a45c3
--- /dev/null
+++ b/ansible/roles/nexus/tasks/insert-images.yml
@@ -0,0 +1,19 @@
+---
+- name: Load docker images and push into registry
+ block:
+ - set_fact:
+ component: "{{ (item.path | basename | splitext)[0] }}"
+
+ - name: Docker login
+ docker_login:
+ registry: "{{ runtime_images[component].registry }}"
+ username: admin
+ password: admin123
+
+ - name: Load and push component {{ component }}
+ docker_image:
+ name: "{{ runtime_images[component].registry }}{{ runtime_images[component].path }}"
+ tag: "{{ runtime_images[component].tag }}"
+ push: yes
+ load_path: "{{ item.path }}"
+
diff --git a/ansible/roles/nexus/tasks/install.yml b/ansible/roles/nexus/tasks/install.yml
new file mode 100644
index 00000000..6dc82fe6
--- /dev/null
+++ b/ansible/roles/nexus/tasks/install.yml
@@ -0,0 +1,29 @@
+---
+- name: Change ownership of nexus_data
+ file:
+ path: "{{ app_data_path }}/nexus_data"
+ owner: 200
+ group: 200
+ recurse: yes
+
+- name: Load nexus image
+ docker_image:
+ name: sonatype/nexus3
+ load_path: "{{ app_data_path }}/offline_data/docker_images_infra/sonatype_nexus3_latest.tar"
+ state: present
+
+- name: Create nexus network
+ docker_network:
+ name: nexus_network
+ state: present
+
+- name: Run nexus container
+ docker_container:
+ name: nexus
+ image: sonatype/nexus3
+ networks:
+ - name: nexus_network
+ volumes:
+ - "{{ app_data_path }}/nexus_data:/nexus-data:rw"
+ state: started
+ restart_policy: unless-stopped
diff --git a/ansible/roles/nexus/tasks/main.yml b/ansible/roles/nexus/tasks/main.yml
new file mode 100644
index 00000000..c5905b13
--- /dev/null
+++ b/ansible/roles/nexus/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ phase }}.yml"
diff --git a/ansible/roles/nexus/tasks/runtime-populate.yml b/ansible/roles/nexus/tasks/runtime-populate.yml
new file mode 100644
index 00000000..e22b650e
--- /dev/null
+++ b/ansible/roles/nexus/tasks/runtime-populate.yml
@@ -0,0 +1,12 @@
+---
+- name: Find images to be inserted into nexus in runtime
+ find:
+ paths: "{{ aux_data_path }}"
+ patterns: '*.tar'
+ register: tar_images
+
+# WA: block of tasks cant be executed in iterations
+# need to iterate over those tasks in include
+- include: "insert-images.yml"
+ with_items: "{{ tar_images.files }}"
+
diff --git a/ansible/roles/nexus/vars/main.yml b/ansible/roles/nexus/vars/main.yml
new file mode 100644
index 00000000..63944161
--- /dev/null
+++ b/ansible/roles/nexus/vars/main.yml
@@ -0,0 +1 @@
+nexus_url: "https://nexus.{{ hostvars[groups.infrastructure[0]].ansible_nodename }}"
diff --git a/ansible/roles/nginx/tasks/main.yml b/ansible/roles/nginx/tasks/main.yml
new file mode 100644
index 00000000..5c010848
--- /dev/null
+++ b/ansible/roles/nginx/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+- name: Create configuration directory
+ file:
+ path: "{{ app_data_path }}/cfg"
+ state: directory
+
+- name: Upload configuration to server
+ template:
+ src: nginx.conf.j2
+ dest: "{{ app_data_path }}/cfg/nginx.conf"
+
+- name: Load nginx image
+ docker_image:
+ name: own_nginx
+ load_path: "{{ app_data_path }}/offline_data/docker_images_infra/own_nginx_latest.tar"
+ state: present
+ timeout: 120
+
+- name: Start nginx
+ docker_container:
+ name: own_nginx
+ image: own_nginx
+ networks:
+ - name: nexus_network
+ ports:
+ - "80:80"
+ - "443:443"
+ - "10001:443"
+ volumes:
+ - "{{ app_data_path }}/cfg/nginx.conf:/etc/nginx/nginx.conf:ro"
+ - "{{ app_data_path }}/certs:/etc/nginx/certs:ro"
+ - "{{ app_data_path }}/git-repo:/srv/git:rw"
+ - "{{ app_data_path }}/http:/srv/http:rw"
+ - "{{ app_data_path }}/pkg/rhel:/srv/http/repo.infra-server:rw"
+ - /var/log/nginx:/var/log/nginx:rw
+ state: started
+ restart_policy: unless-stopped
diff --git a/ansible/roles/nginx/templates/nginx.conf.j2 b/ansible/roles/nginx/templates/nginx.conf.j2
new file mode 100644
index 00000000..fb48565f
--- /dev/null
+++ b/ansible/roles/nginx/templates/nginx.conf.j2
@@ -0,0 +1,105 @@
+worker_processes 2;
+
+events {
+ worker_connections 1024;
+}
+
+http {
+ error_log /var/log/nginx/error.log debug;
+ access_log /var/log/nginx/access.log;
+
+ proxy_intercept_errors on;
+ proxy_send_timeout 120;
+ proxy_read_timeout 300;
+
+ upstream nexus {
+ server nexus:8081;
+ }
+
+ upstream registry {
+ server nexus:8082;
+ }
+
+# http simulations
+ server {
+ listen 80;
+ listen 443 ssl;
+ server_name _;
+ ssl_certificate /etc/nginx/certs/nexus_server.crt;
+ ssl_certificate_key /etc/nginx/certs/nexus_server.key;
+
+ keepalive_timeout 5 5;
+
+ location / {
+ root /srv/http/$host;
+ index index.html;
+ }
+ }
+
+# nexus simulations
+ server {
+ listen 80;
+ listen 443 ssl;
+ server_name {% for host in simulated_hosts.nexus -%}
+ {{ host + " " }}
+ {%- endfor %};
+ ssl_certificate /etc/nginx/certs/nexus_server.crt;
+ ssl_certificate_key /etc/nginx/certs/nexus_server.key;
+
+ keepalive_timeout 5 5;
+ proxy_buffering off;
+
+ # allow large uploads
+ client_max_body_size 3G;
+
+ location / {
+ # redirect to docker registry
+ if ($http_user_agent ~ docker ) {
+ proxy_pass http://registry;
+ }
+ proxy_pass http://nexus;
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ }
+ }
+
+# git simulations
+ server {
+ listen 80;
+ listen 443 ssl;
+ server_name {% for host in simulated_hosts.git -%}
+ {{ host + " " }}
+ {%- endfor %};
+ ssl_certificate /etc/nginx/certs/nexus_server.crt;
+ ssl_certificate_key /etc/nginx/certs/nexus_server.key;
+
+ keepalive_timeout 5 5;
+ proxy_buffering off;
+
+ location / {
+ try_files $uri $uri/ @git;
+ }
+
+ location @git {
+
+ # Set chunks to unlimited, as the body's can be huge
+ client_max_body_size 0;
+
+ fastcgi_param SCRIPT_FILENAME /usr/libexec/git-core/git-http-backend;
+ fastcgi_param QUERY_STRING $args;
+ fastcgi_param HTTP_HOST $server_name;
+ fastcgi_param PATH_INFO $uri;
+
+ include fastcgi_params;
+
+ fastcgi_param GIT_HTTP_EXPORT_ALL "";
+ fastcgi_param GIT_PROJECT_ROOT /srv/git/$host/;
+
+ # Forward REMOTE_USER as we want to know when we are authenticated
+ fastcgi_param REMOTE_USER $remote_user;
+
+ fastcgi_pass unix:/var/run/fcgiwrap.socket;
+ }
+ }
+}
diff --git a/ansible/roles/rancher/tasks/main.yml b/ansible/roles/rancher/tasks/main.yml
new file mode 100644
index 00000000..1370a39f
--- /dev/null
+++ b/ansible/roles/rancher/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "rancher_{{ rancher_role }}.yml"
diff --git a/ansible/roles/rancher/tasks/rancher_agent.yml b/ansible/roles/rancher/tasks/rancher_agent.yml
new file mode 100644
index 00000000..4c9cb8dd
--- /dev/null
+++ b/ansible/roles/rancher/tasks/rancher_agent.yml
@@ -0,0 +1,13 @@
+---
+- name: Add Rancher Agent
+ docker_container:
+ name: rancher_agent
+ image: "{{ server_hostvars.rancher_agent_image }}"
+ command: "{{ server_hostvars.rancher_agent_reg_url }}"
+ volumes:
+ - "/var/run/docker.sock:/var/run/docker.sock"
+ - "/var/lib/rancher:/var/lib/rancher"
+ auto_remove: yes
+ privileged: yes
+ vars:
+ server_hostvars: "{{ hostvars[groups.infrastructure.0] }}"
diff --git a/ansible/roles/rancher/tasks/rancher_server.yml b/ansible/roles/rancher/tasks/rancher_server.yml
new file mode 100644
index 00000000..9abf986b
--- /dev/null
+++ b/ansible/roles/rancher/tasks/rancher_server.yml
@@ -0,0 +1,51 @@
+---
+# DO NOT ADD SPACE AROUND ';'
+- name: Start rancher/server:v1.6.14
+ docker_container:
+ name: rancher_server
+ image: rancher/server:v1.6.14
+ command: ["sh", "-c", "/usr/sbin/update-ca-certificates;/usr/bin/entry /usr/bin/s6-svscan /service"]
+ ports: 8080:8080
+ state: started
+ restart_policy: unless-stopped
+ volumes:
+ - "{{ app_data_path }}/certs:/usr/local/share/ca-certificates/extra:ro"
+
+- name: Wait for rancher server to be ready
+ uri:
+ url: "{{ rancher_server_url }}/v2-beta"
+ register: response
+ retries: 10
+ delay: 30
+ until: not response.failed
+
+- name: Create kubernetes environment
+ rancher_k8s_environment:
+ name: "{{ app_name }}"
+ descr: "Kubernetes environment for {{ app_name }}"
+ server: "{{ rancher_server_url }}"
+ delete_other_k8s: "{{ rancher_remove_other_env }}"
+ force: "{{ rancher_redeploy_k8s_env }}"
+ host_os: "{{ ansible_os_family }}"
+ register: env
+ retries: 10
+ delay: 5
+ until: env.data is defined
+
+- name: Set apikey values
+ set_fact:
+ k8s_env_id: "{{ env.data.environment.id }}"
+ key_public: "{{ env.data.apikey.public }}"
+ key_private: "{{ env.data.apikey.private }}"
+ rancher_agent_image: "{{ env.data.registration_tokens.image }}"
+ rancher_agent_reg_url: "{{ env.data.registration_tokens.reg_url }}"
+
+- name: Ensure .kube directory exists
+ file:
+ path: "{{ kube_directory }}"
+ state: directory
+
+- name: Create kube config
+ template:
+ src: kube_config.j2
+ dest: "{{ kube_directory }}/config"
diff --git a/ansible/roles/rancher/templates/kube_config.j2 b/ansible/roles/rancher/templates/kube_config.j2
new file mode 100644
index 00000000..87f332e6
--- /dev/null
+++ b/ansible/roles/rancher/templates/kube_config.j2
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://{{ ansible_host }}:8080/r/projects/{{ k8s_env_id }}/kubernetes:6443"
+ name: "{{ app_name }}"
+contexts:
+- context:
+ cluster: "{{ app_name }}"
+ user: "{{ app_name }}"
+ name: "{{ app_name }}"
+current-context: "{{ app_name }}"
+users:
+- name: "{{ app_name }}"
+ user:
+ token: "{{ (['Basic', [key_public, key_private] | join(':') | b64encode] | join(' ')) | b64encode }}"
+
diff --git a/ansible/roles/resource-data/tasks/main.yml b/ansible/roles/resource-data/tasks/main.yml
new file mode 100644
index 00000000..51127226
--- /dev/null
+++ b/ansible/roles/resource-data/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ transport }}-upload.yml"
diff --git a/ansible/roles/resource-data/tasks/nfs-upload.yml b/ansible/roles/resource-data/tasks/nfs-upload.yml
new file mode 100644
index 00000000..825486b6
--- /dev/null
+++ b/ansible/roles/resource-data/tasks/nfs-upload.yml
@@ -0,0 +1,52 @@
+---
+- name: Upload resources to infrastructure servers over nfs
+ block:
+ - name: Mount resources
+ mount:
+ path: /tmp/resource_data
+ src: "{{ hostvars[groups.resources.0].ansible_host }}:{{ hostvars[groups.resources.0].resources_dir }}"
+ fstype: nfs
+ state: mounted
+
+ - name: Unarchive resources
+ unarchive:
+ src: "/tmp/resource_data/{{ hostvars[groups.resources.0].resources_filename }}"
+ remote_src: yes
+ dest: "{{ app_data_path }}"
+ when: not resources_data_check.stat.exists
+
+ - name: Unarchive auxiliary resources
+ unarchive:
+ src: "/tmp/resource_data/{{ hostvars[groups.resources.0].aux_resources_filename }}"
+ remote_src: yes
+ dest: "{{ aux_data_path }}"
+ when: >
+ hostvars[groups.resources.0].aux_resources_filename is defined
+ and aux_data_path is defined and aux_data_path is not none
+ and hostvars[groups.resources.0].aux_file_presence.stat.exists
+ and not aux_resources_data_check.stat.exists
+
+ rescue:
+ - name: Removing the resources data due to an error - so the next run can try again
+ command: /bin/false
+ register: upload_failed
+
+ always:
+ - name: unmount resource dir
+ mount:
+ path: /tmp/resource_data
+ src: "{{ hostvars[groups.resources.0].ansible_host }}:{{hostvars[groups.resources.0].resources_dir }}"
+ fstype: nfs
+ state: absent
+
+ - name: Remove the resource data on error
+ file:
+ path: "{{ app_data_path }}"
+ state: absent
+ when: upload_failed is defined
+
+ - name: Remove the auxilliary resource data on error
+ file:
+ path: "{{ aux_data_path }}"
+ state: absent
+ when: upload_failed is defined
diff --git a/ansible/roles/resource-data/tasks/ssh-upload.yml b/ansible/roles/resource-data/tasks/ssh-upload.yml
new file mode 100644
index 00000000..8e04d5c0
--- /dev/null
+++ b/ansible/roles/resource-data/tasks/ssh-upload.yml
@@ -0,0 +1,59 @@
+---
+- name: Upload resources to infrastructure servers over ssh
+ block:
+ - name: Upload ssh private key
+ copy:
+ src: "{{ ansible_ssh_private_key_file }}"
+ dest: /root/.ssh/infra_to_resource.privkey
+ mode: 0600
+ owner: root
+ group: root
+ remote_src: no
+
+ - name: Unarchive resources
+ shell: >
+ ssh -o StrictHostKeyChecking=no -o BatchMode=yes
+ -i /root/.ssh/infra_to_resource.privkey
+ {{ hostvars[groups.resources.0].ansible_host }}
+ 'cat "{{ hostvars[groups.resources.0].resources_dir }}/{{ hostvars[groups.resources.0].resources_filename }}"'
+ | tar -C "{{ app_data_path }}" -xf -
+ args:
+ warn: False
+ when: not resources_data_check.stat.exists
+
+ - name: Unarchive auxiliary resources
+ shell: >
+ ssh -i /root/.ssh/infra_to_resource.privkey
+ {{ hostvars[groups.resources.0].ansible_host }}
+ 'cat "{{ hostvars[groups.resources.0].resources_dir }}/{{ hostvars[groups.resources.0].aux_resources_filename }}"'
+ | tar -C "{{ aux_data_path }}" -xf -
+ when: >
+ hostvars[groups.resources.0].aux_resources_filename is defined
+ and aux_data_path is defined and aux_data_path is not none
+ and hostvars[groups.resources.0].aux_file_presence.stat.exists
+ and not aux_resources_data_check.stat.exists
+ args:
+ warn: False
+
+ rescue:
+ - name: Removing the resources data due to an error - so the next run can try again
+ command: /bin/false
+ register: upload_failed
+
+ always:
+ - name: Remove the ssh private key
+ file:
+ path: /root/.ssh/infra_to_resource.privkey
+ state: absent
+
+ - name: Remove the resource data on error
+ file:
+ path: "{{ app_data_path }}"
+ state: absent
+ when: upload_failed is defined
+
+ - name: Remove the auxilliary resource data on error
+ file:
+ path: "{{ aux_data_path }}"
+ state: absent
+ when: upload_failed is defined
diff --git a/ansible/roles/vncserver/tasks/main.yml b/ansible/roles/vncserver/tasks/main.yml
new file mode 100644
index 00000000..56ae707b
--- /dev/null
+++ b/ansible/roles/vncserver/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+- name: Load VNC server image
+ docker_image:
+ name: consol/centos-icewm-vnc:latest
+ load_path: '{{ app_data_path }}/offline_data/docker_images_infra/consol_centos_icewm_vnc_latest.tar'
+ state: present
+ timeout: 120
+
+- name: Run VNC server
+ docker_container:
+ name: vnc_server
+ image: consol/centos-icewm-vnc
+ state: started
+ restart_policy: unless-stopped
+ ports:
+ - "5901:5901"
+ - "6901:6901"
+ env:
+ VNC_PW: "{{ vnc_passwd }}"
diff --git a/ansible/run_playbook.sh b/ansible/run_playbook.sh
new file mode 100755
index 00000000..88c86bc3
--- /dev/null
+++ b/ansible/run_playbook.sh
@@ -0,0 +1,132 @@
+#!/bin/sh
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2018 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+
+set -e
+
+script_path=$(readlink -f "$0")
+script_name=$(basename "$script_path")
+ANSIBLE_DIR=$(dirname "$script_path")
+ANSIBLE_CHROOT="${ANSIBLE_DIR}/ansible_chroot"
+
+
+#
+# functions
+#
+
+help()
+{
+ echo "
+NAME:
+ ${script_name} - wrapper for ansible-playbook command
+
+DESCRIPTION:
+ Run ansible playbook (or other command if it is there) inside a docker
+ container or a chroot environment.
+
+ By default the chroot is used because it has less dependencies and no
+ service needs to be run (provided that chroot command is installed).
+
+ Docker support is kept for compatibility reasons.
+
+ To run ansible docker image you must set environment variable:
+ ANSIBLE_DOCKER_IMAGE
+
+ So this wrapper can know by which name you have built the included
+ Dockerfile and also to trigger this different behaviour.
+
+ For example:
+ ANSIBLE_DOCKER_IMAGE=ansible
+
+USAGE:
+ ./${script_name}
+ This help
+
+ ./${script_name} <args>
+ Run ansible-playbook command inside a chroot
+
+ ANSIBLE_DOCKER_IMAGE=<docker-image> ./${script_name} <args>
+ Run ansible-playbook command inside a docker container
+
+REQUIREMENTS:
+ For the optimal usage your system should support overlay mount. Which
+ should be available on any recent kernel at least couple of years back.
+
+ Another requirement is the 'unshare' utility which is part of 'util-linux'
+ package and also is part of system for couple of years already.
+
+ The last is 'chroot' command itself and that is also part of system
+ basically everywhere.
+"
+}
+
+
+#
+# run playbook
+#
+
+# if no arg then print help and exit
+if [ -z "$1" ] ; then
+ help
+ exit 0
+fi
+
+# we must be root
+if [ "$(id -u)" -ne 0 ] ; then
+ echo ERROR: "I need root privileges and you are not root: $(id -nu)" >&2
+ exit 1
+fi
+
+# if env var is set then run in docker
+if [ -n "$ANSIBLE_DOCKER_IMAGE" ] ; then
+ exec docker run --rm \
+ -v "${HOME}"/.ssh:/root/.ssh:rw \
+ -v "$ANSIBLE_DIR:/ansible:ro" \
+ -v "$ANSIBLE_DIR/application:/ansible/application:rw" \
+ -v "$ANSIBLE_DIR/certs/:/certs:rw" \
+ -it "${ANSIBLE_DOCKER_IMAGE}" "$@"
+fi
+
+# if not already there then unpack chroot
+if ! [ -d "$ANSIBLE_CHROOT" ] ; then
+ if ! [ -f "$ANSIBLE_DIR"/docker/ansible_chroot.tgz ] ; then
+ echo ERROR: "Missing chroot archive: ${ANSIBLE_DIR}/ansible_chroot.tgz" >&2
+ exit 1
+ fi
+
+ echo INFO: "Unpacking chroot tar into: ${ANSIBLE_CHROOT}" >&2
+ if ! tar -C "$ANSIBLE_DIR" -xzf "$ANSIBLE_DIR"/docker/ansible_chroot.tgz ; then
+ echo ERROR: "Unpacking failed - ABORT" >&2
+ exit 1
+ fi
+fi
+
+# run chroot
+mkdir -p "$ANSIBLE_DIR"/application
+mkdir -p "$ANSIBLE_DIR"/certs
+"$ANSIBLE_DIR"/docker/run_chroot.sh \
+ --mount rw:"${HOME}/.ssh":/root/.ssh \
+ --mount ro:"$ANSIBLE_DIR":/ansible \
+ --mount rw:"$ANSIBLE_DIR"/application:/ansible/application \
+ --mount rw:"$ANSIBLE_DIR"/certs:/certs \
+ --workdir /ansible \
+ execute "$ANSIBLE_CHROOT" ansible-playbook "$@"
+
+exit 0
diff --git a/ansible/setup.yml b/ansible/setup.yml
new file mode 100644
index 00000000..ec572973
--- /dev/null
+++ b/ansible/setup.yml
@@ -0,0 +1,26 @@
+---
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: "Check and generate key if needed"
+ block:
+ - stat:
+ path: '{{ private_key }}.pub'
+ register: p
+
+ - command: ssh-keygen -f {{ private_key }} -t rsa -N ''
+ when: not p.stat.exists
+ vars:
+ private_key: /root/.ssh/offline_ssh_key
+
+- hosts: all
+ gather_facts: false
+ tasks:
+ - name: Setup authorized_keys file
+ authorized_key:
+ user: root
+ state: present
+ key: "{{ lookup('file', public_key) }}"
+ become: true
+ vars:
+ public_key: /root/.ssh/offline_ssh_key.pub
diff --git a/ansible/upload_resources.yml b/ansible/upload_resources.yml
new file mode 100644
index 00000000..68010eb1
--- /dev/null
+++ b/ansible/upload_resources.yml
@@ -0,0 +1,49 @@
+---
+- name: Check for presence of auxiliary resources tar file
+ hosts: resources[0]
+ tasks:
+ - name: Store auxiliary resources tar file info into variable
+ stat:
+ path: "{{ hostvars[groups.resources.0].resources_dir }}/{{ hostvars[groups.resources.0].aux_resources_filename }}"
+ register: aux_file_presence
+
+- name: Check infrastructure server for presence of resources and requirements
+ hosts: infrastructure
+ tasks:
+ - name: Check if nfs-utils is installed
+ yum:
+ list: nfs-utils
+ register: nfs_utils_check
+
+ - name: Check if the resources are already unpacked
+ stat:
+ path: "{{ app_data_path }}"
+ register: resources_data_check
+
+ - name: Check if the auxilliary resources are already unpacked
+ stat:
+ path: "{{ aux_data_path }}"
+ register: aux_resources_data_check
+ when: aux_data_path is defined and aux_data_path is not none
+
+- name: Ensure the existence of data directory/ies on infrastructure server
+ hosts: infrastructure
+ tasks:
+ - name: Create data directory
+ file:
+ path: "{{ app_data_path }}"
+ state: directory
+
+ - name: Create auxiliary data directory
+ file:
+ path: "{{ aux_data_path }}"
+ state: directory
+ when: aux_data_path is defined and aux_data_path is not none
+
+- name: Upload resources to infrastructure server
+ hosts: infrastructure
+ roles:
+ # use nfs or ssh and unpack resources into data directory/ies
+ - role: resource-data
+ vars:
+ transport: "{{ 'nfs' if resources_on_nfs and (nfs_utils_check.results|selectattr('yumstate', 'match', 'installed')|list|length != 0) else 'ssh' }}"