summaryrefslogtreecommitdiffstats
path: root/ansible
diff options
context:
space:
mode:
Diffstat (limited to 'ansible')
-rw-r--r--ansible/docker/.gitignore3
-rw-r--r--ansible/docker/Dockerfile34
-rwxr-xr-xansible/docker/build_ansible_image.sh52
-rwxr-xr-xansible/docker/create_docker_chroot.sh220
-rwxr-xr-xansible/docker/run_chroot.sh465
-rw-r--r--ansible/library/json_add.py90
-rw-r--r--ansible/library/rancher_k8s_environment.py341
-rw-r--r--ansible/roles/nexus/defaults/main.yml2
-rw-r--r--ansible/roles/nexus/files/configure.groovy37
-rw-r--r--ansible/roles/nexus/tasks/configure.yml34
-rw-r--r--ansible/roles/nexus/tasks/insert-images.yml19
-rw-r--r--ansible/roles/nexus/tasks/install.yml29
-rw-r--r--ansible/roles/nexus/tasks/main.yml2
-rw-r--r--ansible/roles/nexus/tasks/runtime-populate.yml12
-rw-r--r--ansible/roles/nexus/vars/main.yml1
-rw-r--r--ansible/roles/rancher/tasks/main.yml2
-rw-r--r--ansible/roles/rancher/tasks/rancher_agent.yml13
-rw-r--r--ansible/roles/rancher/tasks/rancher_server.yml51
-rw-r--r--ansible/roles/rancher/templates/kube_config.j219
19 files changed, 1426 insertions, 0 deletions
diff --git a/ansible/docker/.gitignore b/ansible/docker/.gitignore
new file mode 100644
index 00000000..7df2d855
--- /dev/null
+++ b/ansible/docker/.gitignore
@@ -0,0 +1,3 @@
+ansible_docker.tar
+ansible_chroot.tgz
+ansible_chroot
diff --git a/ansible/docker/Dockerfile b/ansible/docker/Dockerfile
new file mode 100644
index 00000000..b0172709
--- /dev/null
+++ b/ansible/docker/Dockerfile
@@ -0,0 +1,34 @@
+FROM alpine:3.8
+
+ARG ansible_version=2.6.3
+LABEL ansible_version=$ansible_version vendor=Samsung
+
+# Install Ansible build dependencies
+RUN apk --no-cache update \
+&& apk --no-cache --update add --virtual build-dependencies \
+ gcc \
+ make \
+ musl-dev \
+ libffi-dev \
+ openssl-dev \
+ python3-dev \
+&& apk add --no-cache \
+ python3 \
+ py3-pip \
+ openssh-client \
+ openssl \
+ py3-openssl \
+ openssh \
+ sshpass \
+&& pip3 install --no-cache-dir --upgrade pip \
+&& pip3 install --no-cache-dir \
+ ansible==$ansible_version \
+ jmespath \
+&& apk del build-dependencies && rm -rf /var/cache/apk/*
+
+ENV ANSIBLE_HOST_KEY_CHECKING false
+ENV ANSIBLE_RETRY_FILES_ENABLED false
+
+WORKDIR /ansible
+
+ENTRYPOINT ["ansible-playbook"]
diff --git a/ansible/docker/build_ansible_image.sh b/ansible/docker/build_ansible_image.sh
new file mode 100755
index 00000000..d54ddc43
--- /dev/null
+++ b/ansible/docker/build_ansible_image.sh
@@ -0,0 +1,52 @@
+#! /usr/bin/env bash
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2018 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+
+set -e
+
+ansible_version="$1"
+image_name="${2:-ansible:latest}"
+
+script_path=$(readlink -f "$0")
+script_dir=$(dirname "$script_path")
+
+git_commit=$(git rev-parse --revs-only HEAD)
+build_date=$(date -I)
+
+if [ -z "$ansible_version" ]; then
+ docker build "$script_dir" -t "${image_name}" --label "git-commit=$git_commit" --label "build-date=$build_date"
+else
+ docker build "$script_dir" -t "${image_name}" --label "git-commit=$git_commit" --label "build-date=$build_date" --build-arg ansible_version="$ansible_version"
+fi
+
+# Export docker image into chroot and tararchive it. It takes ~40M of space and is packaged together with sw.
+if "${script_dir}"/create_docker_chroot.sh convert "${image_name}" "${script_dir}"/ansible_chroot ; then
+ cd "$script_dir"
+ echo INFO: "Tarring and zipping the chroot directory..." >&2
+ tar -czf ansible_chroot.tgz ansible_chroot
+ rm -rf "${script_dir}"/ansible_chroot
+ echo INFO: "Finished: ${script_dir}/ansible_chroot.tgz" >&2
+ cd -
+else
+ echo ERROR: "I failed to create a chroot environment" >&2
+ exit 1
+fi
+
+exit 0 \ No newline at end of file
diff --git a/ansible/docker/create_docker_chroot.sh b/ansible/docker/create_docker_chroot.sh
new file mode 100755
index 00000000..f8e256da
--- /dev/null
+++ b/ansible/docker/create_docker_chroot.sh
@@ -0,0 +1,220 @@
+#!/bin/sh
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2018 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+
+set -e
+
+CMD=$(basename "$0")
+
+help()
+{
+ echo "
+NAME:
+ ${CMD} - create a chroot directory from docker image
+
+DESCRIPTION:
+ It will export docker image into a directory capable of chrooting.
+ It needs and will run these commands (requires docker service):
+ docker create
+ docker export
+
+USAGE:
+ ${CMD} [-h|--help|help]
+ This help
+
+ ${CMD} convert <docker-name> <name-of-directory>
+
+ It will convert docker image into directory - no chroot yet.
+ The name of the docker image must be imported already (not a file):
+ docker image ls
+
+ The directory will be created and so this command will fail if some
+ directory or a file of this name (filepath) already exists!
+ There is another script run_chroot.sh with which you can do chroot
+ on this newly created directory - so it is expected that this
+ directory is kept clean and as it is.
+ If you don't care about this feature (run_chroot.sh) and you know
+ what are you doing, then do necessary mounts and execute:
+ chroot <name-of-directory>/chroot /bin/sh -l
+"
+}
+
+#
+# PLEASE DON'T TOUCH ME
+#
+
+# readme file for run_chroot.sh
+readme()
+{
+ md_codequote='```'
+
+cat > "$CHROOT_METADIR"/README.md <<EOF
+# RUN CHROOT COMMAND
+
+# usage:
+
+${md_codequote}
+run_chroot.sh help
+${md_codequote}
+
+**Don't modify insides of this directory (where this README.md lies).**
+
+The structure is needed as it is.
+
+If you wish to just run chroot by yourself, you can do:
+${md_codequote}
+chroot ./chroot /bin/sh -l
+${md_codequote}
+
+# requirements:
+
+* root privileges
+* docker service
+
+# directory structure:
+${md_codequote}
+ README.md
+ chroot/
+ .overlay
+ .workdir
+ .merged
+${md_codequote}
+EOF
+}
+
+# arg: <docker-name>
+check_docker_image()
+{
+ image="$1"
+ match=$(docker image ls --no-trunc -q "$image" | wc -l)
+
+ case $match in
+ 0)
+ echo ERROR: "Docker image does not exist: ${DOCKER_IMAGE}" >&2
+ exit 1
+ ;;
+ 1)
+ :
+ ;;
+ *)
+ echo ERROR: "Multiple results for this docker name: ${DOCKER_IMAGE}" >&2
+ exit 1
+ ;;
+ esac
+
+ return 0
+}
+
+cleanup()
+{
+ if [ -n "$DOCKER_CONTAINER" ] ; then
+ echo INFO: "Delete the export container: ${DOCKER_CONTAINER}" >&2
+ if ! docker rm "$DOCKER_CONTAINER" > /dev/null ; then
+ echo ERROR: "Failed to delete: ${DOCKER_CONTAINER}" >&2
+ fi
+ fi
+}
+
+on_exit()
+{
+ set +e
+ cleanup
+}
+
+action=nil
+case "$1" in
+ ''|-h|--help|help)
+ help
+ exit 0
+ ;;
+ convert)
+ action=convert
+ DOCKER_IMAGE="$2"
+ CHROOT_METADIR="$3"
+ ;;
+ *)
+ echo ERROR: "Bad usage" >&2
+ help >&2
+ exit 1
+ ;;
+esac
+
+
+case "$action" in
+ ''|nil)
+ echo ERROR: "Nothing to do - missing command" >&2
+ help >&2
+ exit 1
+ ;;
+ convert)
+ if [ -z "$DOCKER_IMAGE" ] || [ -z "$CHROOT_METADIR" ] ; then
+ echo ERROR: "Missing argument" >&2
+ help >&2
+ exit 1
+ fi
+
+ if [ -e "$CHROOT_METADIR" ] ; then
+ echo ERROR: "Filepath already exists: ${CHROOT_METADIR}" >&2
+ echo ERROR: "Please rename it, remove it or use different name" >&2
+ echo ERROR: "I need my working directory empty, thanks" >&2
+ exit 1
+ fi
+
+ # check if docker image is there
+ check_docker_image "$DOCKER_IMAGE"
+
+ # we must be root
+ if [ "$(id -u)" -ne 0 ] ; then
+ echo ERROR: "I need root privileges and you are not root: $(id -nu)" >&2
+ exit 1
+ fi
+
+ # making sure that CHROOT_METADIR is absolute path
+ CHROOT_METADIR=$(readlink -f "$CHROOT_METADIR")
+
+ # set trap
+ trap on_exit INT QUIT TERM EXIT
+
+ # making readme
+ mkdir -p "$CHROOT_METADIR"/
+ readme
+
+ # create container
+ DOCKER_CONTAINER=$(docker create "$DOCKER_IMAGE")
+ if [ -z "$DOCKER_CONTAINER" ] ; then
+ echo ERROR: "I could not create a container from: ${DOCKER_IMAGE}" >&2
+ exit 1
+ fi
+
+ # unpacking of image
+ mkdir -p "$CHROOT_METADIR"/chroot
+ echo INFO: "Export started - it can take a while to finish..." >&2
+ if ! docker export "$DOCKER_CONTAINER" | tar -C "$CHROOT_METADIR"/chroot -xf - ; then
+ echo ERROR: "Unpacking failed - permissions?" >&2
+ exit 1
+ else
+ echo INFO: "Export success: $CHROOT_METADIR/chroot" >&2
+ echo INFO: "Checkout the README file: $CHROOT_METADIR/README.md" >&2
+ fi
+ ;;
+esac
+
+exit 0
+
diff --git a/ansible/docker/run_chroot.sh b/ansible/docker/run_chroot.sh
new file mode 100755
index 00000000..b38c1295
--- /dev/null
+++ b/ansible/docker/run_chroot.sh
@@ -0,0 +1,465 @@
+#!/bin/sh
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2018 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+
+set -e
+
+CMD=$(basename "$0")
+UMOUNT_TIMEOUT=120 # 2mins
+
+
+#
+# functions
+#
+
+help()
+{
+ echo "
+NAME:
+ ${CMD} - run command in chrooted directory
+
+DESCRIPTION:
+ It will do necessary steps to be able chroot, optional mounts and it will
+ run commands inside the requested chroot directory.
+
+ It does overlay mount so nothing inside the chroot is modified - if there
+ is no way to do overlay mount it will just do chroot directly - which means
+ that user has power to render chroot useless - beware...
+
+ The chroot is run in it's own namespace for better containerization.
+ Therefore the utility 'unshare' is necessary requirement.
+
+ After exiting the chroot all of those necessary steps are undone.
+
+USAGE:
+ ${CMD} [-h|--help|help]
+ This help
+
+ ${CMD} [OPTIONS] execute <chroot-directory> [<command with args>...]
+
+ It will do some necessary steps after which it will execute chroot
+ command and gives you prompt inside the chroot. When you leave the
+ prompt it will undo those steps.
+ On top of the ordinary chroot it will make overlay, so every change
+ inside the chroot is only temporary and chroot is kept stateless -
+ like inside a docker container. If there is no way to do overlay -
+ ordinary chroot is done.
+ Default command is: /bin/sh -l
+
+ OPTIONS:
+
+ --mount (ro|rw):<src-dir>:<inner-dir>
+ This option will mount 'src-dir' which is full path on the host
+ system into the relative path 'inner-dir' within the chroot
+ directory.
+ It can be mounted as read-only (ro) or read-write (rw).
+ Multiple usage of this argument can be used to create complex
+ hierarchy. Order is significant.
+ For example:
+ --mount ro:/scripts/ANSIBLE_DIR:/ansible \
+ --mount rw:/scripts/ANSIBLE_DIR/app:/ansible/app
+ This will mount directory ansible as read-only into chroot,
+ but it's subdirectory 'app' will be writeable.
+
+ --workdir <inner-dir>
+ This will set working directory (PWD) inside the chroot.
+
+EXAMPLE:
+ ${CMD} --mount ro:/scripts/ansible:ansible \
+ --mount rw:/scripts/ansible/app:ansible/app \
+ --workdir /ansible execute /tmp/ansible_chroot
+ # pwd
+ /ansible
+ # mount
+ overlay on / type overlay ...
+ /dev/disk on /ansible type ext4 (ro,relatime,errors=remount-ro)
+ /dev/disk on /ansible/application type ext4 (rw,relatime,errors=remount-ro)
+ none on /proc type proc (rw,relatime)
+ none on /sys type sysfs (rw,relatime)
+ none on /dev/shm type tmpfs (rw,relatime)
+
+ Directory /ansible inside the chroot is not writable but subdirectory
+ /ansible/app is.
+
+ Rest of the chroot is under overlay and all changes will be lost when
+ chroot command ends. Only changes in app directory persists bacause it
+ was bind mounted as read-write and is not part of overlay.
+
+ Note: as you can see app directory is mounted over itself but read-write.
+"
+}
+
+# arg: <directory>
+is_mounted()
+{
+ mountpoint=$(echo "$1" | sed 's#//*#/#g')
+
+ LANG=C mount | grep -q "^[^[:space:]]\+[[:space:]]\+on[[:space:]]\+${mountpoint}[[:space:]]\+type[[:space:]]\+"
+}
+
+# layers are right to left! First is on the right, top/last is on the left
+do_overlay_mount()
+{
+ if [ -d "$overlay" ] && is_mounted "$overlay" ; then
+ echo ERROR: "The overlay directory is already mounted: $overlay" >&2
+ echo ERROR: "Fix the issue - cannot proceed" >&2
+ exit 1
+ fi
+
+ # prepare dirs
+ rm -rf "$overlay" "$upperdir" "$workdir"
+ mkdir -p "$overlay"
+ mkdir -p "$upperdir"
+ mkdir -p "$workdir"
+
+ # finally overlay mount
+ if ! mount -t overlay --make-rprivate \
+ -o lowerdir="$lowerdir",upperdir="$upperdir",workdir="$workdir" \
+ overlay "$overlay" ;
+ then
+ echo ERROR: "Failed to do overlay mount!" >&2
+ echo ERROR: "Please check that your system supports overlay!" >&2
+ echo NOTE: "Continuing with the ordinary chroot without overlay!"
+
+ CHROOT_DIR="$lowerdir"
+ return 1
+ fi
+
+ CHROOT_DIR="$overlay"
+
+ return 0
+}
+
+cleanup()
+{
+ case "$OVERLAY_MOUNT" in
+ yes)
+ echo INFO: "Umounting overlay..." >&2
+ if ! umount_retry "$CHROOT_DIR" ; then
+ echo ERROR: "Cannot umount chroot: $CHROOT_DIR" >&2
+ return 1
+ fi
+
+ ;;
+ no)
+ echo INFO: "No overlay to umount" >&2
+ ;;
+ esac
+
+ if ! is_mounted "$overlay" ; then
+ echo INFO: "Deleting of temp directories..." >&2
+ rm -rf "$overlay" "$upperdir" "$workdir"
+ else
+ echo ERROR: "Overlay is still mounted: $CHROOT_DIR" >&2
+ echo ERROR: "Cannot delete: $overlay" >&2
+ echo ERROR: "Cannot delete: $upperdir" >&2
+ echo ERROR: "Cannot delete: $workdir" >&2
+ return 1
+ fi
+}
+
+check_external_mounts()
+{
+ echo "$EXTERNAL_MOUNTS" | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do
+ mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}')
+ external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}')
+ internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g')
+
+ case "$mount_type" in
+ ro|rw)
+ :
+ ;;
+ *)
+ echo ERROR: "Wrong mount type (should be 'ro' or 'rw') in: ${mountexpr}" >&2
+ exit 1
+ ;;
+ esac
+
+ if ! [ -d "$external" ] ; then
+ echo ERROR: "Directory for mounting does not exist: ${external}" >&2
+ exit 1
+ fi
+
+ if echo "$internal" | grep -q '^/*$' ; then
+ echo ERROR: "Unacceptable internal path: ${internal}" >&2
+ exit 1
+ fi
+ done
+}
+
+do_external_mounts()
+{
+ echo INFO: "Bind mounting of external mounts..." >&2
+ echo "$EXTERNAL_MOUNTS" | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do
+ mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}')
+ external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}')
+ internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g')
+
+ if is_mounted "${CHROOT_DIR}/${internal}" ; then
+ echo ERROR: "Mountpoint is already mounted: ${CHROOT_DIR}/${internal}" >&2
+ echo ERROR: "Fix the issue - cannot proceed" >&2
+ exit 1
+ fi
+
+ if ! mkdir -p "${CHROOT_DIR}/${internal}" ; then
+ echo ERROR: "Cannot create mountpoint: ${CHROOT_DIR}/${internal}" >&2
+ exit 1
+ fi
+
+ if ! mount --make-rprivate -o bind,${mount_type} "$external" "${CHROOT_DIR}/${internal}" ; then
+ echo ERROR: "Failed to mount: ${external} -> ${internal}" >&2
+ exit 1
+ else
+ echo INFO: "Mount: ${external} -> ${internal}" >&2
+ fi
+ done
+}
+
+# arg: <mountpoint>
+umount_retry()
+{
+ mountpoint=$(echo "$1" | sed 's#//*#/#g')
+ timeout=${UMOUNT_TIMEOUT}
+
+ umount "$mountpoint" 2>/dev/null
+ while is_mounted "$mountpoint" && [ $timeout -gt 0 ] ; do
+ umount "$mountpoint" 2>/dev/null
+ sleep 1
+ timeout=$(( timeout - 1 ))
+ done
+
+ if ! is_mounted "$mountpoint" ; then
+ return 0
+ fi
+
+ return 1
+}
+
+undo_external_mounts()
+{
+ echo INFO: "Umount external mount points..." >&2
+ echo "$EXTERNAL_MOUNTS" | tac | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do
+ mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}')
+ external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}')
+ internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g')
+ if umount_retry "${CHROOT_DIR}/${internal}" ; then
+ echo INFO: "Unmounted: ${CHROOT_DIR}/${internal}" >&2
+ else
+ echo ERROR: "Failed to umount: ${CHROOT_DIR}/${internal}" >&2
+ fi
+ done
+}
+
+install_wrapper()
+{
+ cat > "$CHROOT_DIR"/usr/local/bin/fakeshell.sh <<EOF
+#!/bin/sh
+
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+export PATH
+
+gid_tty=\$(getent group | sed -n '/^tty:/p' | cut -d: -f 3)
+
+mount -t proc proc /proc
+mount -t sysfs none /sys
+mount -t tmpfs none /dev
+
+mkdir -p /dev/shm
+mkdir -p /dev/pts
+mount -t devpts -o gid=\${gid_tty},mode=620 none /dev/pts
+
+[ -e /dev/full ] || mknod -m 666 /dev/full c 1 7
+[ -e /dev/ptmx ] || mknod -m 666 /dev/ptmx c 5 2
+[ -e /dev/random ] || mknod -m 644 /dev/random c 1 8
+[ -e /dev/urandom ] || mknod -m 644 /dev/urandom c 1 9
+[ -e /dev/zero ] || mknod -m 666 /dev/zero c 1 5
+[ -e /dev/tty ] || mknod -m 666 /dev/tty c 5 0
+[ -e /dev/console ] || mknod -m 622 /dev/console c 5 1
+[ -e /dev/null ] || mknod -m 666 /dev/null c 1 3
+
+chown root:tty /dev/console
+chown root:tty /dev/ptmx
+chown root:tty /dev/tty
+
+mkdir -p "\$1" || exit 1
+cd "\$1" || exit 1
+shift
+
+exec "\$@"
+
+EOF
+ chmod +x "$CHROOT_DIR"/usr/local/bin/fakeshell.sh
+}
+
+on_exit()
+{
+ set +e
+ echo
+
+ if [ -n "$OVERLAY_MOUNT" ] ; then
+ undo_external_mounts
+ fi
+ cleanup
+}
+
+
+#
+# parse arguments
+#
+
+state=nil
+action=nil
+EXTERNAL_MOUNTS=''
+CHROOT_WORKDIR=''
+CHROOT_METADIR=''
+CHROOT_DIR=''
+COMMAND=''
+while [ -n "$1" ] ; do
+ case "$state" in
+ nil)
+ case "$1" in
+ ''|-h|--help|help)
+ help
+ exit 0
+ ;;
+ --mount)
+ EXTERNAL_MOUNTS=$(printf "%s\n%s\n" "$EXTERNAL_MOUNTS" "${2}")
+ state=next
+ ;;
+ --workdir)
+ if [ -z "$CHROOT_WORKDIR" ] ; then
+ CHROOT_WORKDIR="$2"
+ state=next
+ else
+ echo ERROR: "Multiple working directory argument" >&2
+ help >&2
+ exit 1
+ fi
+ ;;
+ execute)
+ action=execute
+ state=execute
+ ;;
+ *)
+ echo ERROR: "Bad usage" >&2
+ help >&2
+ exit 1
+ ;;
+ esac
+ ;;
+ next)
+ state=nil
+ ;;
+ execute)
+ CHROOT_METADIR="$1"
+ shift
+ break
+ ;;
+ esac
+ shift
+done
+
+
+case "$action" in
+ ''|nil)
+ echo ERROR: "Nothing to do - missing command" >&2
+ help >&2
+ exit 1
+ ;;
+ execute)
+ # firstly do sanity checking ...
+
+ if [ -z "$CHROOT_METADIR" ] ; then
+ echo ERROR: "Missing argument" >&2
+ help >&2
+ exit 1
+ fi
+
+ # making sure that CHROOT_METADIR is absolute path
+ CHROOT_METADIR=$(readlink -f "$CHROOT_METADIR")
+
+ if ! [ -d "$CHROOT_METADIR"/chroot ] ; then
+ echo ERROR: "Filepath does not exist: ${CHROOT_METADIR}/chroot" >&2
+ exit 1
+ fi
+
+ # check external mounts if there are any
+ check_external_mounts
+
+ # check workdir
+ if [ -n "$CHROOT_WORKDIR" ] ; then
+ CHROOT_WORKDIR=$(echo "$CHROOT_WORKDIR" | sed -e 's#^/*##' -e 's#//*#/#g')
+ fi
+
+ # we must be root
+ if [ "$(id -u)" -ne 0 ] ; then
+ echo ERROR: "Need to be root and you are not: $(id -nu)" >&2
+ exit 1
+ fi
+
+ if ! which unshare >/dev/null 2>/dev/null ; then
+ echo ERROR: "'unshare' system command is missing - ABORT" >&2
+ echo INFO: "Try to install 'util-linux' package" >&2
+ exit 1
+ fi
+
+ # ... sanity checking done
+
+ # setup paths
+ lowerdir="$CHROOT_METADIR"/chroot
+ upperdir="$CHROOT_METADIR"/.overlay
+ workdir="$CHROOT_METADIR"/.workdir
+ overlay="$CHROOT_METADIR"/.merged
+
+ # set trap
+ trap on_exit QUIT TERM EXIT
+
+ # mount overlay
+ OVERLAY_MOUNT=''
+ if do_overlay_mount ; then
+ # overlay chroot
+ OVERLAY_MOUNT=yes
+ else
+ # non overlay mount
+ OVERLAY_MOUNT=no
+ fi
+
+ # do the user-specific mounts
+ do_external_mounts
+
+ # I need this wrapper to do some setup inside the chroot...
+ install_wrapper
+
+ # execute chroot
+ # copy resolv.conf
+ cp -a /etc/resolv.conf "$CHROOT_DIR"/etc/resolv.conf
+
+ if [ -n "$1" ] ; then
+ :
+ else
+ set -- /bin/sh -l
+ fi
+ unshare -mfpi --propagation private \
+ chroot "$CHROOT_DIR" /usr/local/bin/fakeshell.sh "${CHROOT_WORKDIR:-/}" "$@"
+ ;;
+esac
+
+exit 0
+
diff --git a/ansible/library/json_add.py b/ansible/library/json_add.py
new file mode 100644
index 00000000..6aad2d7c
--- /dev/null
+++ b/ansible/library/json_add.py
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+
+from ansible.module_utils.basic import AnsibleModule
+import json
+import os
+
+DOCUMENTATION="""
+---
+module: json_add
+descritption:
+ - This module will search top level objects in json and adds specified
+ value into list for specified key.
+ - If file does not exists module will create it automatically.
+
+options:
+ path:
+ required: true
+ aliases=[name, destfile, dest]
+ description:
+ - The json file to modify.
+ key:
+ required: true
+ description:
+ - Top level object.
+ value:
+ required: true
+ description:
+ - Value to add to specified key.
+"""
+
+def load_json(path):
+ if os.path.exists(path):
+ with open(path, 'r') as f:
+ return json.load(f)
+ else:
+ return {}
+
+def value_is_set(path, key, value, json_obj):
+ return value in json_obj.get(key, [])
+
+def insert_to_json(path, key, value, check_mode=False):
+ json_obj = load_json(path)
+ if not value_is_set(path, key, value, json_obj):
+ if not check_mode:
+ json_obj.setdefault(key, []).append(value)
+ store_json(path, json_obj)
+ return True, 'Value %s added to %s.' % (value, key)
+ else:
+ return False, ''
+
+def store_json(path, json_obj):
+ with open(path, 'w') as f:
+ json.dump(json_obj, f, indent=4)
+
+def check_file_attrs(module, changed, message, diff):
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False, diff=diff):
+
+ if changed:
+ message += ' '
+ changed = True
+ message += 'File attributes changed.'
+
+ return changed, message
+
+def run_module():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['name', 'destfile', 'dest']),
+ key=dict(type='str', required=True),
+ value=dict(type='str', required=True),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+ params = module.params
+ path = params['path']
+ key = params['key']
+ value = params['value']
+ try:
+ changed, msg = insert_to_json(path, key, value, module.check_mode)
+ fs_diff = {}
+ changed, msg = check_file_attrs(module, changed, msg, fs_diff)
+ module.exit_json(changed=changed, msg=msg, file_attr_diff=fs_diff)
+ except IOError as e:
+ module.fail_json(msg=e.msg)
+
+if __name__ == '__main__':
+ run_module()
+
diff --git a/ansible/library/rancher_k8s_environment.py b/ansible/library/rancher_k8s_environment.py
new file mode 100644
index 00000000..d3d8ac02
--- /dev/null
+++ b/ansible/library/rancher_k8s_environment.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+
+DOCUMENTATION='''
+---
+module: rancher_k8s_environment
+description:
+ - This module will create or delete Kubernetes environment.
+ - It will also delete other environments when variables are set accordingly.
+notes:
+ - It identifies environment only by name. Expect problems with same named environments.
+ - All hosts running Kubernetes cluster should have same OS otherwise there
+ is possibility of misbehavement.
+options:
+ server:
+ required: true
+ description:
+ - Url of rancher server i.e. "http://10.0.0.1:8080".
+ name:
+ required: true
+ descritpion:
+ - Name of the environment to create/remove.
+ descr:
+ description:
+ - Description of environment to create.
+ state:
+ description:
+ - If "present" environment will be created or setup depending if it exists.
+ With multiple environments with same name expect error.
+ If "absent" environment will be removed. If multiple environments have same
+ name all will be deleted.
+ default: present
+ choices: [present, absent]
+ delete_not_k8s:
+ description:
+ - Indicates if environments with different orchestration than Kubernetes should
+ be deleted.
+ type: bool
+ default: yes
+ delete_other_k8s:
+ description:
+ - Indicates if environments with different name than specified should
+ be deleted.
+ type: bool
+ default: no
+ force:
+ description:
+ - Indicates if environment should be deleted and recreated.
+ type: bool
+ default: yes
+ host_os:
+ required: true
+ description:
+ - OS (family from ansible_os_family variable) of the hosts running cluster. If
+ "RedHat" then datavolume fix will be applied.
+ Fix described here:
+ https://github.com/rancher/rancher/issues/10015
+'''
+
+import json
+import time
+
+import requests
+from ansible.module_utils.basic import AnsibleModule
+
+
+
+def get_existing_environments(rancher_address):
+ req = requests.get('{}/v2-beta/projects'.format(rancher_address))
+ envs = req.json()['data']
+ return envs
+
+
+def not_k8s_ids(environments):
+ envs = filter(lambda x: x['orchestration'] != 'kubernetes', environments)
+ return [env['id'] for env in envs]
+
+
+def other_k8s_ids(environments, name):
+ envs = filter(lambda x: x['orchestration'] == 'kubernetes' and x['name'] != name,
+ environments)
+ return [env['id'] for env in envs]
+
+
+def env_ids_by_name(environments, name):
+ envs = filter(lambda x: x['name'] == name, environments)
+ return [env['id'] for env in envs]
+
+
+def env_info_by_id(environments, env_id):
+ env = filter(lambda x: x['id'] == env_id, environments)
+ return [{'id': x['id'], 'name': x['name']} for x in env][0]
+
+
+def delete_multiple_environments(rancher_address, env_ids):
+ deleted = []
+ for env_id in env_ids:
+ deleted.append(delete_environment(rancher_address, env_id))
+ return deleted
+
+
+def delete_environment(rancher_address, env_id):
+ req = requests.delete('{}/v2-beta/projects/{}'.format(rancher_address, env_id))
+ deleted = req.json()['data'][0]
+ return {'id': deleted['id'],
+ 'name': deleted['name'],
+ 'orchestration': deleted['orchestration']}
+
+
+def create_k8s_environment(rancher_address, name, descr):
+ k8s_template_id = None
+ for _ in range(10):
+ k8s_template = requests.get(
+ '{}/v2-beta/projecttemplates?name=Kubernetes'.format(rancher_address)).json()
+ if k8s_template['data']:
+ k8s_template_id = k8s_template['data'][0]['id']
+ break
+ time.sleep(3)
+ if k8s_template_id is None:
+ raise ValueError('Template for kubernetes not found.')
+ body = {
+ 'name': name,
+ 'description': descr,
+ 'projectTemplateId': k8s_template_id,
+ 'allowSystemRole': False,
+ 'members': [],
+ 'virtualMachine': False,
+ 'servicesPortRange': None,
+ 'projectLinks': []
+ }
+
+ body_json = json.dumps(body)
+ req = requests.post('{}/v2-beta/projects'.format(rancher_address), data=body_json)
+ created = req.json()
+ return {'id': created['id'], 'name': created['name']}
+
+
+def get_kubelet_service(rancher_address, env_id):
+ for _ in range(10):
+ response = requests.get(
+ '{}/v2-beta/projects/{}/services/?name=kubelet'.format(rancher_address,
+ env_id))
+
+ if response.status_code >= 400:
+ # too early or too late for obtaining data
+ # small delay will improve our chances to collect it
+ time.sleep(1)
+ continue
+
+ content = response.json()
+
+ if content['data']:
+ return content['data'][0]
+
+ # this is unfortunate, response from service api received but data
+ # not available, lets try again
+ time.sleep(5)
+
+ return None
+
+
+def fix_datavolume_rhel(rancher_address, env_id):
+ kubelet_svc = get_kubelet_service(rancher_address, env_id)
+ if kubelet_svc:
+ try:
+ data_volume_index = kubelet_svc['launchConfig']['dataVolumes'].index(
+ '/sys:/sys:ro,rprivate')
+ except ValueError:
+ return 'Already changed'
+ kubelet_svc['launchConfig']['dataVolumes'][
+ data_volume_index] = '/sys/fs/cgroup:/sys/fs/cgroup:ro,rprivate'
+ body = {
+ 'inServiceStrategy': {
+ 'batchSize': 1,
+ 'intervalMillis': 2000,
+ 'startFirst': False,
+ 'launchConfig': kubelet_svc['launchConfig'],
+ 'secondaryLaunchConfigs': []
+ }
+ }
+ body_json = json.dumps(body)
+ requests.post(
+ '{}/v2-beta/projects/{}/services/{}?action=upgrade'.format(rancher_address,
+ env_id,
+ kubelet_svc[
+ 'id']),
+ data=body_json)
+ for _ in range(10):
+ req_svc = requests.get(
+ '{}/v2-beta/projects/{}/services/{}'.format(rancher_address, env_id,
+ kubelet_svc['id']))
+ req_svc_content = req_svc.json()
+ if 'finishupgrade' in req_svc_content['actions']:
+ req_finish = requests.post(
+ req_svc_content['actions']['finishupgrade'])
+ return {
+ 'dataVolumes': req_finish.json()['upgrade']['inServiceStrategy'][
+ 'launchConfig']['dataVolumes']}
+ time.sleep(5)
+ else:
+ raise ValueError('Could not get kubelet service')
+
+
+def create_registration_tokens(rancher_address, env_id):
+ body = {'name': str(env_id)}
+ body_json = json.dumps(body)
+ response = requests.post(
+ '{}/v2-beta/projects/{}/registrationtokens'.format(rancher_address, env_id,
+ data=body_json))
+ for _ in range(10):
+ tokens = requests.get(response.json()['links']['self'])
+ tokens_content = tokens.json()
+ if tokens_content['image'] is not None and tokens_content[
+ 'registrationUrl'] is not None:
+ return {'image': tokens_content['image'],
+ 'reg_url': tokens_content['registrationUrl']}
+ time.sleep(3)
+ return None
+
+
+def get_registration_tokens(rancher_address, env_id):
+ reg_tokens = requests.get(
+ '{}/v2-beta/projects/{}/registrationtokens'.format(rancher_address, env_id))
+ reg_tokens_content = reg_tokens.json()
+ tokens = reg_tokens_content['data']
+ if not tokens:
+ return None
+ return {'image': tokens[0]['image'], 'reg_url': tokens[0]['registrationUrl']}
+
+
+def create_apikey(rancher_address, env_id):
+ body = {
+ 'name': 'kubectl_env_{}'.format(env_id),
+ 'description': "Provides access to kubectl"
+ }
+ body_json = json.dumps(body)
+ apikey_req = requests.post(
+ '{}/v2-beta/apikey'.format(rancher_address, env_id, data=body_json))
+ apikey_content = apikey_req.json()
+ return {'public': apikey_content['publicValue'],
+ 'private': apikey_content['secretValue']}
+
+
+def run_module():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ descr=dict(type='str'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ delete_other_k8s=dict(type='bool', default=False),
+ delete_not_k8s=dict(type='bool', default=True),
+ force=dict(type='bool', default=True),
+ host_os=dict(type='str', required=True)
+ )
+ )
+
+ params = module.params
+ rancher_address = params['server']
+ name = params['name']
+ descr = params['descr']
+ delete_not_k8s = params['delete_not_k8s']
+ delete_other_k8s = params['delete_other_k8s']
+ force = params['force']
+ host_os = params['host_os']
+ state = params['state']
+
+ existing_envs = get_existing_environments(rancher_address)
+ same_name_ids = env_ids_by_name(existing_envs, name)
+
+ to_delete_ids = []
+ changes = {}
+
+ if delete_other_k8s:
+ to_delete_ids += other_k8s_ids(existing_envs, name)
+
+ if delete_not_k8s:
+ to_delete_ids += not_k8s_ids(existing_envs)
+ if force or state == 'absent':
+ to_delete_ids += same_name_ids
+
+ deleted = delete_multiple_environments(rancher_address, to_delete_ids)
+
+ if deleted:
+ changes['deleted'] = deleted
+ if state == 'absent':
+ module.exit_json(changed=True, deleted=changes['deleted'])
+ else:
+ if state == 'absent':
+ module.exit_json(changed=False)
+
+ if len(same_name_ids) > 1 and not force:
+ module.fail_json(msg='Multiple environments with same name. '
+ 'Use "force: yes" to delete '
+ 'all environments with same name.')
+
+ if same_name_ids and not force:
+ changes['environment'] = env_info_by_id(existing_envs, same_name_ids[0])
+ if host_os == 'RedHat':
+ try:
+ rhel_fix = fix_datavolume_rhel(rancher_address, same_name_ids[0])
+ changes['rhel_fix'] = rhel_fix
+ except ValueError as err:
+ module.fail_json(
+ msg='Error: {} Try to recreate k8s environment.'.format(err))
+
+ reg_tokens = get_registration_tokens(rancher_address, same_name_ids[0])
+ if not reg_tokens:
+ reg_tokens = create_registration_tokens(rancher_address, same_name_ids[0])
+ changes['registration_tokens'] = reg_tokens
+
+ apikey = create_apikey(rancher_address, same_name_ids[0])
+ changes['apikey'] = apikey
+ module.exit_json(changed=True, data=changes,
+ msg='New environment was not created. Only set up was done')
+ try:
+ new_env = create_k8s_environment(rancher_address, name, descr)
+ except ValueError as err:
+ module.fail_json(msg='Error: {} Try to recreate k8s environment.'.format(err))
+
+ if host_os == 'RedHat':
+ try:
+ rhel_fix = fix_datavolume_rhel(rancher_address, new_env['id'])
+ changes['rhel_fix'] = rhel_fix
+ except ValueError as err:
+ module.fail_json(msg='Error: {} Try to recreate k8s environment.'.format(
+ err))
+
+ reg_tokens = create_registration_tokens(rancher_address, new_env['id'])
+
+ apikey = create_apikey(rancher_address, new_env['id'])
+
+ changes['environment'] = new_env
+ changes['registration_tokens'] = reg_tokens
+ changes['apikey'] = apikey
+
+ module.exit_json(changed=True, data=changes)
+
+
+if __name__ == '__main__':
+ run_module()
+
diff --git a/ansible/roles/nexus/defaults/main.yml b/ansible/roles/nexus/defaults/main.yml
new file mode 100644
index 00000000..57a79f95
--- /dev/null
+++ b/ansible/roles/nexus/defaults/main.yml
@@ -0,0 +1,2 @@
+#Defaults to install, can be set to configure.
+phase: install
diff --git a/ansible/roles/nexus/files/configure.groovy b/ansible/roles/nexus/files/configure.groovy
new file mode 100644
index 00000000..5691fe64
--- /dev/null
+++ b/ansible/roles/nexus/files/configure.groovy
@@ -0,0 +1,37 @@
+import org.sonatype.nexus.security.realm.RealmManager
+import org.sonatype.nexus.repository.attributes.AttributesFacet
+import org.sonatype.nexus.security.user.UserManager
+import org.sonatype.nexus.repository.manager.RepositoryManager
+import org.sonatype.nexus.security.user.UserNotFoundException
+
+/* Use the container to look up some services. */
+realmManager = container.lookup(RealmManager.class)
+userManager = container.lookup(UserManager.class, "default") //default user manager
+repositoryManager = container.lookup(RepositoryManager.class)
+
+/* Managers are used when scripting api cannot. Note that scripting api can only create mostly, and that creation methods return objects of created entities. */
+/* Perform cleanup by removing all repos and users. Realms do not need to be re-disabled, admin and anonymous user will not be removed. */
+userManager.listUserIds().each({ id ->
+ if (id != "anonymous" && id != "admin")
+ userManager.deleteUser(id)
+})
+
+repositoryManager.browse().each {
+ repositoryManager.delete(it.getName())
+}
+
+/* Add bearer token realms at the end of realm lists... */
+realmManager.enableRealm("NpmToken")
+realmManager.enableRealm("DockerToken")
+
+/* Create the docker user. */
+security.addUser("docker", "docker", "docker", "docker@example.com", true, "docker", ["nx-anonymous"])
+
+/* Create npm and docker repositories. Their default configuration should be compliant with our requirements, except the docker registry creation. */
+repository.createNpmHosted("npm-private")
+def r = repository.createDockerHosted("docker", 8082, 0)
+
+/* force basic authentication true by default, must set to false for docker repo. */
+conf=r.getConfiguration()
+conf.attributes("docker").set("forceBasicAuth", false)
+repositoryManager.update(conf)
diff --git a/ansible/roles/nexus/tasks/configure.yml b/ansible/roles/nexus/tasks/configure.yml
new file mode 100644
index 00000000..66712d8f
--- /dev/null
+++ b/ansible/roles/nexus/tasks/configure.yml
@@ -0,0 +1,34 @@
+---
+- name: "check if the configuration script is uploaded"
+ uri:
+ url: "{{ nexus_url }}/service/rest/v1/script/configure"
+ method: GET
+ force_basic_auth: yes
+ user: admin
+ password: admin123
+ status_code: [200, 404]
+ register: script
+- block:
+ - name: "upload the configuration script"
+ uri:
+ url: "{{ nexus_url }}/service/rest/v1/script"
+ method: POST
+ force_basic_auth: yes
+ user: admin
+ password: admin123
+ body_format: json
+ body:
+ name: configure
+ type: groovy
+ content: "{{ lookup('file', 'files/configure.groovy') }}"
+ status_code: [204]
+ - name: "execute configuration script"
+ uri:
+ url: "{{ nexus_url }}/service/rest/v1/script/configure/run"
+ method: POST
+ force_basic_auth: yes
+ user: admin
+ password: admin123
+ body_format: raw
+ headers: { "Content-Type": "text/plain" }
+ when: script.status == 404
diff --git a/ansible/roles/nexus/tasks/insert-images.yml b/ansible/roles/nexus/tasks/insert-images.yml
new file mode 100644
index 00000000..2e2a45c3
--- /dev/null
+++ b/ansible/roles/nexus/tasks/insert-images.yml
@@ -0,0 +1,19 @@
+---
+- name: Load docker images and push into registry
+ block:
+ - set_fact:
+ component: "{{ (item.path | basename | splitext)[0] }}"
+
+ - name: Docker login
+ docker_login:
+ registry: "{{ runtime_images[component].registry }}"
+ username: admin
+ password: admin123
+
+ - name: Load and push component {{ component }}
+ docker_image:
+ name: "{{ runtime_images[component].registry }}{{ runtime_images[component].path }}"
+ tag: "{{ runtime_images[component].tag }}"
+ push: yes
+ load_path: "{{ item.path }}"
+
diff --git a/ansible/roles/nexus/tasks/install.yml b/ansible/roles/nexus/tasks/install.yml
new file mode 100644
index 00000000..6dc82fe6
--- /dev/null
+++ b/ansible/roles/nexus/tasks/install.yml
@@ -0,0 +1,29 @@
+---
+- name: Change ownership of nexus_data
+ file:
+ path: "{{ app_data_path }}/nexus_data"
+ owner: 200
+ group: 200
+ recurse: yes
+
+- name: Load nexus image
+ docker_image:
+ name: sonatype/nexus3
+ load_path: "{{ app_data_path }}/offline_data/docker_images_infra/sonatype_nexus3_latest.tar"
+ state: present
+
+- name: Create nexus network
+ docker_network:
+ name: nexus_network
+ state: present
+
+- name: Run nexus container
+ docker_container:
+ name: nexus
+ image: sonatype/nexus3
+ networks:
+ - name: nexus_network
+ volumes:
+ - "{{ app_data_path }}/nexus_data:/nexus-data:rw"
+ state: started
+ restart_policy: unless-stopped
diff --git a/ansible/roles/nexus/tasks/main.yml b/ansible/roles/nexus/tasks/main.yml
new file mode 100644
index 00000000..c5905b13
--- /dev/null
+++ b/ansible/roles/nexus/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "{{ phase }}.yml"
diff --git a/ansible/roles/nexus/tasks/runtime-populate.yml b/ansible/roles/nexus/tasks/runtime-populate.yml
new file mode 100644
index 00000000..e22b650e
--- /dev/null
+++ b/ansible/roles/nexus/tasks/runtime-populate.yml
@@ -0,0 +1,12 @@
+---
+- name: Find images to be inserted into nexus in runtime
+ find:
+ paths: "{{ aux_data_path }}"
+ patterns: '*.tar'
+ register: tar_images
+
+# WA: block of tasks cant be executed in iterations
+# need to iterate over those tasks in include
+- include: "insert-images.yml"
+ with_items: "{{ tar_images.files }}"
+
diff --git a/ansible/roles/nexus/vars/main.yml b/ansible/roles/nexus/vars/main.yml
new file mode 100644
index 00000000..63944161
--- /dev/null
+++ b/ansible/roles/nexus/vars/main.yml
@@ -0,0 +1 @@
+nexus_url: "https://nexus.{{ hostvars[groups.infrastructure[0]].ansible_nodename }}"
diff --git a/ansible/roles/rancher/tasks/main.yml b/ansible/roles/rancher/tasks/main.yml
new file mode 100644
index 00000000..1370a39f
--- /dev/null
+++ b/ansible/roles/rancher/tasks/main.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: "rancher_{{ rancher_role }}.yml"
diff --git a/ansible/roles/rancher/tasks/rancher_agent.yml b/ansible/roles/rancher/tasks/rancher_agent.yml
new file mode 100644
index 00000000..4c9cb8dd
--- /dev/null
+++ b/ansible/roles/rancher/tasks/rancher_agent.yml
@@ -0,0 +1,13 @@
+---
+- name: Add Rancher Agent
+ docker_container:
+ name: rancher_agent
+ image: "{{ server_hostvars.rancher_agent_image }}"
+ command: "{{ server_hostvars.rancher_agent_reg_url }}"
+ volumes:
+ - "/var/run/docker.sock:/var/run/docker.sock"
+ - "/var/lib/rancher:/var/lib/rancher"
+ auto_remove: yes
+ privileged: yes
+ vars:
+ server_hostvars: "{{ hostvars[groups.infrastructure.0] }}"
diff --git a/ansible/roles/rancher/tasks/rancher_server.yml b/ansible/roles/rancher/tasks/rancher_server.yml
new file mode 100644
index 00000000..9abf986b
--- /dev/null
+++ b/ansible/roles/rancher/tasks/rancher_server.yml
@@ -0,0 +1,51 @@
+---
+# DO NOT ADD SPACE AROUND ';'
+- name: Start rancher/server:v1.6.14
+ docker_container:
+ name: rancher_server
+ image: rancher/server:v1.6.14
+ command: ["sh", "-c", "/usr/sbin/update-ca-certificates;/usr/bin/entry /usr/bin/s6-svscan /service"]
+ ports: 8080:8080
+ state: started
+ restart_policy: unless-stopped
+ volumes:
+ - "{{ app_data_path }}/certs:/usr/local/share/ca-certificates/extra:ro"
+
+- name: Wait for rancher server to be ready
+ uri:
+ url: "{{ rancher_server_url }}/v2-beta"
+ register: response
+ retries: 10
+ delay: 30
+ until: not response.failed
+
+- name: Create kubernetes environment
+ rancher_k8s_environment:
+ name: "{{ app_name }}"
+ descr: "Kubernetes environment for {{ app_name }}"
+ server: "{{ rancher_server_url }}"
+ delete_other_k8s: "{{ rancher_remove_other_env }}"
+ force: "{{ rancher_redeploy_k8s_env }}"
+ host_os: "{{ ansible_os_family }}"
+ register: env
+ retries: 10
+ delay: 5
+ until: env.data is defined
+
+- name: Set apikey values
+ set_fact:
+ k8s_env_id: "{{ env.data.environment.id }}"
+ key_public: "{{ env.data.apikey.public }}"
+ key_private: "{{ env.data.apikey.private }}"
+ rancher_agent_image: "{{ env.data.registration_tokens.image }}"
+ rancher_agent_reg_url: "{{ env.data.registration_tokens.reg_url }}"
+
+- name: Ensure .kube directory exists
+ file:
+ path: "{{ kube_directory }}"
+ state: directory
+
+- name: Create kube config
+ template:
+ src: kube_config.j2
+ dest: "{{ kube_directory }}/config"
diff --git a/ansible/roles/rancher/templates/kube_config.j2 b/ansible/roles/rancher/templates/kube_config.j2
new file mode 100644
index 00000000..87f332e6
--- /dev/null
+++ b/ansible/roles/rancher/templates/kube_config.j2
@@ -0,0 +1,19 @@
+apiVersion: v1
+kind: Config
+clusters:
+- cluster:
+ api-version: v1
+ insecure-skip-tls-verify: true
+ server: "https://{{ ansible_host }}:8080/r/projects/{{ k8s_env_id }}/kubernetes:6443"
+ name: "{{ app_name }}"
+contexts:
+- context:
+ cluster: "{{ app_name }}"
+ user: "{{ app_name }}"
+ name: "{{ app_name }}"
+current-context: "{{ app_name }}"
+users:
+- name: "{{ app_name }}"
+ user:
+ token: "{{ (['Basic', [key_public, key_private] | join(':') | b64encode] | join(' ')) | b64encode }}"
+