summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--ansible/application.yml16
-rwxr-xr-xansible/docker/run_chroot.sh420
-rwxr-xr-xansible/group_vars/all.yml18
-rwxr-xr-xansible/group_vars/infrastructure.yml9
-rw-r--r--ansible/infrastructure.yml6
-rw-r--r--ansible/rancher_kubernetes.yml32
-rw-r--r--ansible/roles/application-install/defaults/main.yml1
-rw-r--r--ansible/roles/application-install/tasks/main.yml22
-rw-r--r--ansible/roles/application-override/tasks/main.yml16
-rw-r--r--ansible/roles/application/defaults/main.yml11
-rw-r--r--ansible/roles/application/tasks/custom_role.yml (renamed from ansible/roles/application-install/tasks/custom_role.yml)2
-rw-r--r--ansible/roles/application/tasks/install.yml (renamed from ansible/roles/application-install/tasks/install.yml)28
-rw-r--r--ansible/roles/application/tasks/main.yml24
-rw-r--r--ansible/roles/application/tasks/post-install.yml (renamed from ansible/roles/application-install/tasks/post-install.yml)2
-rw-r--r--ansible/roles/application/tasks/pre-install.yml5
-rw-r--r--ansible/roles/application/tasks/transfer-helm-charts.yml (renamed from ansible/roles/application-install/tasks/pre-install.yml)9
-rw-r--r--ansible/roles/chrony/defaults/main.yml16
-rw-r--r--ansible/roles/chrony/handlers/main.yml5
-rw-r--r--ansible/roles/chrony/tasks/main.yml26
-rw-r--r--ansible/roles/chrony/templates/chrony.conf.j222
-rw-r--r--ansible/roles/docker/handlers/docker_restart.yml18
-rw-r--r--ansible/roles/docker/handlers/main.yml5
-rw-r--r--ansible/roles/kubectl/defaults/main.yml5
-rw-r--r--ansible/roles/kubectl/tasks/main.yml10
-rw-r--r--ansible/roles/kubectl/templates/kube_config.j2 (renamed from ansible/roles/rancher/templates/kube_config.j2)2
-rw-r--r--ansible/roles/nginx/templates/nginx.conf.j21
-rw-r--r--ansible/roles/package-repository-check/tasks/RedHat.yml20
-rw-r--r--ansible/roles/package-repository-check/tasks/main.yml12
-rw-r--r--ansible/roles/rancher/defaults/main.yml4
-rw-r--r--ansible/roles/rancher/tasks/main.yml2
-rw-r--r--ansible/roles/rancher/tasks/rancher_agent.yml4
-rw-r--r--ansible/roles/rancher/tasks/rancher_health.yml8
-rw-r--r--ansible/roles/rancher/tasks/rancher_server.yml14
-rw-r--r--ansible/roles/resource-data/tasks/main.yml27
-rwxr-xr-xansible/test/images/docker/build-all.sh29
-rw-r--r--ansible/test/images/docker/centos7/Dockerfile29
-rwxr-xr-xansible/test/images/docker/centos7/build.sh22
-rw-r--r--ansible/test/images/docker/centos7/dbus.service16
-rw-r--r--docs/InstallGuide.rst54
-rw-r--r--docs/TestingGuide.rst365
-rw-r--r--docs/index.rst2
41 files changed, 949 insertions, 390 deletions
diff --git a/ansible/application.yml b/ansible/application.yml
index bbac7e5c..02c654f2 100644
--- a/ansible/application.yml
+++ b/ansible/application.yml
@@ -2,22 +2,16 @@
- name: Setup nfs server
hosts: nfs-server
roles:
- - {role: nfs, when: groups.kubernetes | length > 1 }
+ - role: nfs
+ when: groups.kubernetes | length > 1
- name: Setup nfs mounts
hosts: kubernetes:!nfs-server
roles:
- - {role: nfs, when: groups.kubernetes | length > 1 }
+ - role: nfs
+ when: groups.kubernetes | length > 1
- name: Install Helm application {{ app_name }} into offline Kubernetes cluster
hosts: infrastructure
roles:
- - role: application-install
- vars:
- phase: pre-install
- - role: application-install
- vars:
- phase: install
- - role: application-install
- vars:
- phase: post-install
+ - application
diff --git a/ansible/docker/run_chroot.sh b/ansible/docker/run_chroot.sh
index 3359fdcd..8ae9c188 100755
--- a/ansible/docker/run_chroot.sh
+++ b/ansible/docker/run_chroot.sh
@@ -22,7 +22,6 @@
set -e
CMD=$(basename "$0")
-UMOUNT_TIMEOUT=120 # 2mins
#
@@ -106,31 +105,18 @@ EXAMPLE:
"
}
-# arg: <directory>
-is_mounted()
-{
- mountpoint=$(echo "$1" | sed 's#//*#/#g')
-
- LANG=C mount | grep -q "^[^[:space:]]\+[[:space:]]\+on[[:space:]]\+${mountpoint}[[:space:]]\+type[[:space:]]\+"
-}
-
# layers are right to left! First is on the right, top/last is on the left
do_overlay_mount()
{
- if [ -d "$overlay" ] && is_mounted "$overlay" ; then
- echo ERROR: "The overlay directory is already mounted: $overlay" >&2
- echo ERROR: "Fix the issue - cannot proceed" >&2
- exit 1
- fi
-
# prepare dirs
- rm -rf "$overlay" "$upperdir" "$workdir"
+mkdir -p $ovtempdir
+mount -t tmpfs -o mode=0755 tmpfs $ovtempdir
mkdir -p "$overlay"
mkdir -p "$upperdir"
mkdir -p "$workdir"
# finally overlay mount
- if ! mount -t overlay --make-rprivate \
+ if ! mount -t overlay \
-o lowerdir="$lowerdir",upperdir="$upperdir",workdir="$workdir" \
overlay "$overlay" ;
then
@@ -147,40 +133,16 @@ do_overlay_mount()
return 0
}
-cleanup()
-{
- case "$OVERLAY_MOUNT" in
- yes)
- echo INFO: "Umounting overlay..." >&2
- if ! umount_retry "$CHROOT_DIR" ; then
- echo ERROR: "Cannot umount chroot: $CHROOT_DIR" >&2
- return 1
- fi
-
- ;;
- no)
- echo INFO: "No overlay to umount" >&2
- ;;
- esac
-
- if ! is_mounted "$overlay" ; then
- echo INFO: "Deleting of temp directories..." >&2
- rm -rf "$overlay" "$upperdir" "$workdir"
- else
- echo ERROR: "Overlay is still mounted: $CHROOT_DIR" >&2
- echo ERROR: "Cannot delete: $overlay" >&2
- echo ERROR: "Cannot delete: $upperdir" >&2
- echo ERROR: "Cannot delete: $workdir" >&2
- return 1
- fi
-}
-
check_external_mounts()
{
- echo "$EXTERNAL_MOUNTS" | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do
+ echo "$EXTERNAL_MOUNTS" | while read -r mountexpr ; do
+ #Skip empty lines, done with if for readability.
+ if [ -z $mountexpr ]; then
+ continue
+ fi
mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}')
external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}')
- internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g')
+ internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}')
case "$mount_type" in
ro|rw)
@@ -203,16 +165,13 @@ check_external_mounts()
do_external_mounts()
{
echo INFO: "Bind mounting of external mounts..." >&2
- echo "$EXTERNAL_MOUNTS" | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do
+ echo "$EXTERNAL_MOUNTS" | while read -r mountexpr ; do
+ if [ -z $mountexpr ]; then
+ continue
+ fi
mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}')
external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}')
- internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g')
-
- if is_mounted "${CHROOT_DIR}/${internal}" ; then
- echo ERROR: "Mountpoint is already mounted: ${CHROOT_DIR}/${internal}" >&2
- echo ERROR: "Fix the issue - cannot proceed" >&2
- exit 1
- fi
+ internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}')
# trying to follow the behaviour of docker
if ! [ -e "$external" ] || [ -d "$external" ] ; then
@@ -242,7 +201,9 @@ do_external_mounts()
exit 1
fi
- if ! mount --make-rprivate -o bind,${mount_type} "$external" "${CHROOT_DIR}/${internal}" ; then
+#Note, this double mounting is needed to support older util-linux.
+ if ! mount -o bind "${external}" "${CHROOT_DIR}/${internal}" ||
+ ! mount -o remount,bind,${mount_type} "${CHROOT_DIR}/${internal}" ; then
echo ERROR: "Failed to mount: ${external} -> ${internal}" >&2
exit 1
else
@@ -251,231 +212,166 @@ do_external_mounts()
done
}
-# arg: <mountpoint>
-umount_retry()
-{
- mountpoint=$(echo "$1" | sed 's#//*#/#g')
- timeout=${UMOUNT_TIMEOUT}
-
- umount "$mountpoint" 2>/dev/null
- while is_mounted "$mountpoint" && [ $timeout -gt 0 ] ; do
- umount "$mountpoint" 2>/dev/null
- sleep 1
- timeout=$(( timeout - 1 ))
- done
-
- if ! is_mounted "$mountpoint" ; then
- return 0
- fi
-
- return 1
-}
-
-undo_external_mounts()
-{
- echo INFO: "Umount external mount points..." >&2
- echo "$EXTERNAL_MOUNTS" | tac | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do
- mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}')
- external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}')
- internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g')
- if umount_retry "${CHROOT_DIR}/${internal}" ; then
- echo INFO: "Unmounted: ${CHROOT_DIR}/${internal}" >&2
- else
- echo ERROR: "Failed to umount: ${CHROOT_DIR}/${internal}" >&2
- fi
- done
-}
-
-install_wrapper()
-{
- cat > "$CHROOT_DIR"/usr/local/bin/fakeshell.sh <<EOF
-#!/bin/sh
-
-PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-export PATH
-
-gid_tty=\$(getent group | sed -n '/^tty:/p' | cut -d: -f 3)
-
-mount -t proc proc /proc
-mount -t sysfs none /sys
-mount -t tmpfs none /dev
-
-mkdir -p /dev/shm
-mkdir -p /dev/pts
-mount -t devpts -o gid=\${gid_tty},mode=620 none /dev/pts
-
-[ -e /dev/full ] || mknod -m 666 /dev/full c 1 7
-[ -e /dev/ptmx ] || mknod -m 666 /dev/ptmx c 5 2
-[ -e /dev/random ] || mknod -m 644 /dev/random c 1 8
-[ -e /dev/urandom ] || mknod -m 644 /dev/urandom c 1 9
-[ -e /dev/zero ] || mknod -m 666 /dev/zero c 1 5
-[ -e /dev/tty ] || mknod -m 666 /dev/tty c 5 0
-[ -e /dev/console ] || mknod -m 622 /dev/console c 5 1
-[ -e /dev/null ] || mknod -m 666 /dev/null c 1 3
-
-chown root:tty /dev/console
-chown root:tty /dev/ptmx
-chown root:tty /dev/tty
-
-mkdir -p "\$1" || exit 1
-cd "\$1" || exit 1
-shift
-
-exec "\$@"
-
-EOF
- chmod +x "$CHROOT_DIR"/usr/local/bin/fakeshell.sh
-}
-
-on_exit()
-{
- set +e
- echo
-
- if [ -n "$OVERLAY_MOUNT" ] ; then
- undo_external_mounts
- fi
- cleanup
-}
#
-# parse arguments
+# parse arguments out of namespace.
#
-state=nil
-action=nil
-EXTERNAL_MOUNTS=''
-CHROOT_WORKDIR=''
-CHROOT_METADIR=''
-CHROOT_DIR=''
-COMMAND=''
-while [ -n "$1" ] ; do
- case "$state" in
- nil)
- case "$1" in
- ''|-h|--help|help)
- help
- exit 0
- ;;
- --mount)
- EXTERNAL_MOUNTS=$(printf "%s\n%s\n" "$EXTERNAL_MOUNTS" "${2}")
- state=next
- ;;
- --workdir)
- if [ -z "$CHROOT_WORKDIR" ] ; then
- CHROOT_WORKDIR="$2"
+if [ -z $IN_NAMESPACE ]; then
+ export state=nil
+ export action=nil
+ export EXTERNAL_MOUNTS=''
+ export CHROOT_WORKDIR=''
+ export CHROOT_METADIR=''
+ export CHROOT_DIR=''
+ export COMMAND=''
+ while [ -n "$1" ] ; do
+ case "$state" in
+ nil)
+ case "$1" in
+ ''|-h|--help|help)
+ help
+ exit 0
+ ;;
+ --mount)
+ EXTERNAL_MOUNTS=$(printf "%s\n%s" "$EXTERNAL_MOUNTS" "${2}")
state=next
- else
- echo ERROR: "Multiple working directory argument" >&2
+ ;;
+ --workdir)
+ if [ -z "$CHROOT_WORKDIR" ] ; then
+ CHROOT_WORKDIR="$2"
+ state=next
+ else
+ echo ERROR: "Multiple working directory argument" >&2
+ help >&2
+ exit 1
+ fi
+ ;;
+ execute)
+ action=execute
+ state=execute
+ ;;
+ *)
+ echo ERROR: "Bad usage" >&2
help >&2
exit 1
- fi
- ;;
- execute)
- action=execute
- state=execute
- ;;
- *)
- echo ERROR: "Bad usage" >&2
- help >&2
- exit 1
- ;;
- esac
- ;;
- next)
- state=nil
- ;;
- execute)
- CHROOT_METADIR="$1"
- shift
- break
- ;;
- esac
- shift
-done
-
-
-case "$action" in
- ''|nil)
+ ;;
+ esac
+ ;;
+ next)
+ state=nil
+ ;;
+ execute)
+ CHROOT_METADIR="$1"
+ shift
+ break
+ ;;
+ esac
+ shift
+ done
+
+
+ if [ $action = "nil" ]; then
echo ERROR: "Nothing to do - missing command" >&2
help >&2
exit 1
- ;;
- execute)
- # firstly do sanity checking ...
-
- if [ -z "$CHROOT_METADIR" ] ; then
- echo ERROR: "Missing argument" >&2
- help >&2
- exit 1
- fi
-
- # making sure that CHROOT_METADIR is absolute path
- CHROOT_METADIR=$(readlink -f "$CHROOT_METADIR")
-
- if ! [ -d "$CHROOT_METADIR"/chroot ] ; then
- echo ERROR: "Filepath does not exist: ${CHROOT_METADIR}/chroot" >&2
- exit 1
- fi
-
- # check external mounts if there are any
- check_external_mounts
-
- # check workdir
- if [ -n "$CHROOT_WORKDIR" ] ; then
- CHROOT_WORKDIR=$(echo "$CHROOT_WORKDIR" | sed -e 's#^/*##' -e 's#//*#/#g')
- fi
-
- # we must be root
- if [ "$(id -u)" -ne 0 ] ; then
- echo ERROR: "Need to be root and you are not: $(id -nu)" >&2
- exit 1
- fi
+ fi
- if ! which unshare >/dev/null 2>/dev/null ; then
- echo ERROR: "'unshare' system command is missing - ABORT" >&2
- echo INFO: "Try to install 'util-linux' package" >&2
- exit 1
- fi
+ # do sanity checking ...
- # ... sanity checking done
+ if [ -z "$CHROOT_METADIR" ] ; then
+ echo ERROR: "Missing argument" >&2
+ help >&2
+ exit 1
+ fi
- # setup paths
- lowerdir="$CHROOT_METADIR"/chroot
- upperdir="$CHROOT_METADIR"/.overlay
- workdir="$CHROOT_METADIR"/.workdir
- overlay="$CHROOT_METADIR"/.merged
+ # making sure that CHROOT_METADIR is absolute path
+ CHROOT_METADIR=$(readlink -f "$CHROOT_METADIR")
- # set trap
- trap on_exit QUIT TERM EXIT
+ if ! [ -d "$CHROOT_METADIR"/chroot ] ; then
+ echo ERROR: "Filepath does not exist: ${CHROOT_METADIR}/chroot" >&2
+ exit 1
+ fi
- # mount overlay
- OVERLAY_MOUNT=''
- if do_overlay_mount ; then
- # overlay chroot
- OVERLAY_MOUNT=yes
- else
- # non overlay mount
- OVERLAY_MOUNT=no
- fi
+ # check external mounts if there are any
+ check_external_mounts
- # do the user-specific mounts
- do_external_mounts
+ # we must be root
+ if [ "$(id -u)" -ne 0 ] ; then
+ echo ERROR: "Need to be root and you are not: $(id -nu)" >&2
+ exit 1
+ fi
- # I need this wrapper to do some setup inside the chroot...
- install_wrapper
+ if ! which unshare >/dev/null 2>/dev/null ; then
+ echo ERROR: "'unshare' system command is missing - ABORT" >&2
+ echo INFO: "Try to install 'util-linux' package" >&2
+ exit 1
+ fi
- # execute chroot
- if [ -n "$1" ] ; then
- :
- else
- set -- /bin/sh -l
- fi
- unshare -mfpi --propagation private \
- chroot "$CHROOT_DIR" /usr/local/bin/fakeshell.sh "${CHROOT_WORKDIR:-/}" "$@"
- ;;
-esac
+ # ... sanity checking done
+
+ #Reexec ourselves in new pid and mount namespace (isolate!).
+ #Note: newly executed shell will be pid1 in a new namespace. Killing it will kill
+ #every other process in the whole process tree with sigkill. That will in turn
+ #destroy namespaces and undo all mounts done previously.
+ IN_NAMESPACE=1 exec unshare -mpf "$0" "$@"
+fi
+
+#We are namespaced.
+# setup paths
+lowerdir="$CHROOT_METADIR"/chroot
+ovtempdir="$CHROOT_METADIR"/tmp
+upperdir="$ovtempdir"/.overlay
+workdir="$ovtempdir"/.workdir
+overlay="$CHROOT_METADIR"/.merged
+
+#In case we are using a realy old unshare, make the whole tree into private mounts manually.
+mount --make-rprivate /
+#New mounts are private always from now on.
+
+do_overlay_mount
+
+# do the user-specific mounts
+do_external_mounts
+
+#And setup api filesystems.
+mount -t proc proc "${CHROOT_DIR}/proc"
+mount -t sysfs none "${CHROOT_DIR}/sys"
+mount -t tmpfs none "${CHROOT_DIR}/dev"
+
+mkdir -p "${CHROOT_DIR}/dev/shm"
+mkdir -p "${CHROOT_DIR}/dev/pts"
+mount -t devpts none "${CHROOT_DIR}/dev/pts"
+
+mknod -m 666 "${CHROOT_DIR}/dev/full" c 1 7
+mknod -m 666 "${CHROOT_DIR}/dev/ptmx" c 5 2
+mknod -m 644 "${CHROOT_DIR}/dev/random" c 1 8
+mknod -m 644 "${CHROOT_DIR}/dev/urandom" c 1 9
+mknod -m 666 "${CHROOT_DIR}/dev/zero" c 1 5
+mknod -m 666 "${CHROOT_DIR}/dev/tty" c 5 0
+mknod -m 622 "${CHROOT_DIR}/dev/console" c 5 1
+mknod -m 666 "${CHROOT_DIR}/dev/null" c 1 3
+ln -s /proc/self/fd/0 "$CHROOT_DIR/dev/stdin"
+ln -s /proc/self/fd/1 "$CHROOT_DIR/dev/stdout"
+ln -s /proc/self/fd/2 "$CHROOT_DIR/dev/stderr"
+
+# execute chroot
+if [ -z "$1" ] ; then
+ set -- /bin/sh -l
+fi
+
+#The redirection is to save our stdin, because we use it to pipe commands and we
+#may want interactivity.
+exec chroot "${CHROOT_DIR}" /bin/sh /dev/stdin "${CHROOT_WORKDIR:-/}" "$@" 3<&0 << "EOF"
+PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+export PATH
+mkdir -p $1
+cd $1
+shift
+#I intend to reset stdin back *and* close the copy.
+exec "$@" <&3 3<&-
+EOF
exit 0
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index 0d22ac5d..f9d6726f 100755
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -90,9 +90,10 @@ runtime_images: {}
# Application specific params #
###############################
-# App Helm charts directory location in installation package.
-# The path is absolute path (even locates relative inside of this sw package
-# installation folder) because it must be visible for ansible docker/chroot
+# App Helm charts directory location in installation package
+# (local path for the ansible process).
+# The path locates relative inside of this sw package
+# installation folder and must be visible for ansible docker/chroot
# process to find directory and to transfer it into machine (infra node) running
# Helm repository.
# Content of the folder must be Helm chart directories of the app with Makefile.
@@ -100,7 +101,7 @@ runtime_images: {}
# NOTE: This default value should not be changed if not really needed and it
# must match with the variable "HELM_CHARTS_DIR_IN_PACKAGE" value in package.sh
# script!
-app_helm_charts_install_directory: "/ansible/application/helm_charts"
+app_helm_charts_install_directory: application/helm_charts
# Specify target dir where helm charts are copied into on infra node.
# (same as content of "app_helm_charts_install_directory" copied by installer to this dir.)
@@ -146,3 +147,12 @@ application_post_install_role:
# openStackDomain: "Default"
# openStackUserName: "admin"
# openStackEncryptedPassword: "f7920677e15e2678b0f33736189e8965"
+
+# Optional time synchronisation settings
+# timesync:
+# servers:
+# - <ip address of NTP_1>
+# - <...>
+# - <ip address of NTP_N>
+# slewclock: false
+# timezone: <timezone name from tz database>
diff --git a/ansible/group_vars/infrastructure.yml b/ansible/group_vars/infrastructure.yml
index 9fd88f25..a248a514 100755
--- a/ansible/group_vars/infrastructure.yml
+++ b/ansible/group_vars/infrastructure.yml
@@ -24,13 +24,6 @@ simulated_hosts:
- registry.npmjs.org
all_simulated_hosts:
"{{ simulated_hosts.git + simulated_hosts.http + simulated_hosts.nexus }}"
-rancher_server_url: "http://{{ hostvars[groups.infrastructure.0].ansible_host }}:8080"
rancher_server_version: v1.6.22
-rancher_remove_other_env: yes
-rancher_redeploy_k8s_env: yes
-populate_nexus: no
-kube_directory: /root/.kube
-kubectl_bin_dir: /usr/local/bin
+populate_nexus: false
helm_bin_dir: /usr/local/bin
-helm_repository_name: local
-helm_repository_url: http://127.0.0.1:8879
diff --git a/ansible/infrastructure.yml b/ansible/infrastructure.yml
index 18290ae4..e49ee919 100644
--- a/ansible/infrastructure.yml
+++ b/ansible/infrastructure.yml
@@ -3,11 +3,13 @@
hosts: infrastructure, kubernetes
roles:
- package-repository
- - role: firewall
+ - firewall
- name: Setup infrastructure servers
hosts: infrastructure
roles:
+ - chrony
+ - package-repository-check
- certificates
- docker
- dns
@@ -18,6 +20,8 @@
- name: Setup base for Kubernetes nodes
hosts: kubernetes
roles:
+ - chrony
+ - package-repository-check
- docker
tasks:
- include_role:
diff --git a/ansible/rancher_kubernetes.yml b/ansible/rancher_kubernetes.yml
index 196f1fc2..e44f18d3 100644
--- a/ansible/rancher_kubernetes.yml
+++ b/ansible/rancher_kubernetes.yml
@@ -1,31 +1,23 @@
---
-- name: Install binaries for controlling deployment
+- name: Deploy rancher server and create k8s env
hosts: infrastructure
roles:
+ - role: rancher
+ vars:
+ mode: server
- kubectl
- helm
-- name: Deploy rancher server and create k8s env
- hosts: infrastructure
- roles:
- - rancher
- vars:
- rancher_role: server
-
-- name: Deploy rancher agents
+- name: Deploy rancher agents for kube nodes
hosts: kubernetes
roles:
- - rancher
- vars:
- rancher_role: agent
+ - role: rancher
+ vars:
+ mode: agent
- name: Wait for Kubernetes environment to be healthy
hosts: infrastructure
- tasks:
- - name: Check cluster health
- uri:
- url: "{{ rancher_server_url }}/v2-beta/projects/{{ k8s_env_id }}"
- register: env_info
- retries: 30
- delay: 15
- until: "env_info.json.healthState == 'healthy'"
+ roles:
+ - role: rancher
+ vars:
+ mode: health \ No newline at end of file
diff --git a/ansible/roles/application-install/defaults/main.yml b/ansible/roles/application-install/defaults/main.yml
deleted file mode 100644
index 473fbb80..00000000
--- a/ansible/roles/application-install/defaults/main.yml
+++ /dev/null
@@ -1 +0,0 @@
-phase: install
diff --git a/ansible/roles/application-install/tasks/main.yml b/ansible/roles/application-install/tasks/main.yml
deleted file mode 100644
index ba522792..00000000
--- a/ansible/roles/application-install/tasks/main.yml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- debug:
- msg: "phase is {{ phase }}"
-
-- name: Check if install needed
- block:
- - name: "Does {{ app_helm_charts_install_directory }} exist and contain Helm Charts"
- find:
- paths: "{{ app_helm_charts_install_directory }}"
- recurse: yes
- delegate_to: localhost
- register: charts_files
- - name: Set install active fact
- set_fact:
- install_needed: "{{ true if charts_files.matched | int > 0 else false }}"
- when: phase == "pre-install"
-
-- include_tasks: "{{ phase }}.yml"
- when: install_needed
-
-- debug:
- msg: "Install needed {{ install_needed }}"
diff --git a/ansible/roles/application-override/tasks/main.yml b/ansible/roles/application-override/tasks/main.yml
new file mode 100644
index 00000000..1ecf7c79
--- /dev/null
+++ b/ansible/roles/application-override/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+# Role for generating Helm override.yml file
+- name: Register root certificate
+ slurp:
+ src: "{{ playbook_dir }}/certs/rootCA.crt"
+ register: root_cert
+ delegate_to: localhost
+
+- name: "Set root ca certificate"
+ set_fact:
+ merged_overrides: "{{ overrides | default({}) | combine({'global': {'cacert': root_cert.content | b64decode}}, recursive=True) }}"
+
+- name: "Create {{ app_helm_override_file }}"
+ copy:
+ dest: "{{ app_helm_override_file }}"
+ content: "{{ merged_overrides | to_nice_yaml }}"
diff --git a/ansible/roles/application/defaults/main.yml b/ansible/roles/application/defaults/main.yml
new file mode 100644
index 00000000..dec17601
--- /dev/null
+++ b/ansible/roles/application/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+helm_repository_name: local
+helm_repository_url: http://127.0.0.1:8879
+# Override file generation for Helm application can be customized by any role
+# given by user and found by ansible from roles_path.
+# By default override file is generated by 'application-override' role that is
+# specific for offline installer (for onap) as it's generating server
+# certificate needed to simulate internet by offline installer.
+app_skip_helm_override: false
+app_helm_override_role: application-override
+app_helm_override_file: "{{ app_data_path }}/override.yaml"
diff --git a/ansible/roles/application-install/tasks/custom_role.yml b/ansible/roles/application/tasks/custom_role.yml
index b6f6f351..f0b9a84f 100644
--- a/ansible/roles/application-install/tasks/custom_role.yml
+++ b/ansible/roles/application/tasks/custom_role.yml
@@ -1,6 +1,6 @@
---
# Caller fills application_custom_role variable with actual role name.
-- name: "Execute custom role {{ application_custom_role }} {{ phase }} Helm install."
+- name: "Execute custom role {{ application_custom_role }} for Helm install."
include_role:
name: "{{ application_custom_role }}"
when:
diff --git a/ansible/roles/application-install/tasks/install.yml b/ansible/roles/application/tasks/install.yml
index 1cccf9ad..103ecc8b 100644
--- a/ansible/roles/application-install/tasks/install.yml
+++ b/ansible/roles/application/tasks/install.yml
@@ -6,8 +6,8 @@
--skip-refresh
changed_when: true # init is always changed type of action
-#A correct way to implement this would be using --wait option in helm init invocation.
-#However, it does not work due to https://github.com/helm/helm/issues/4031 (fixed in newer helm release)
+# A correct way to implement this would be using --wait option in helm init invocation.
+# However, it does not work due to https://github.com/helm/helm/issues/4031 (fixed in newer helm release)
- name: "Wait for helm upgrade to finish"
command: "{{ helm_bin_dir }}/helm version --tiller-connection-timeout 10"
register: result
@@ -42,7 +42,7 @@
- "'Error: no repositories to show' not in helm_repo_list.stderr"
- name: Helm Add Repo
- command: "{{ helm_bin_dir }}/helm repo add {{ helm_repository_name }} {{ helm_repository_url }}"
+ command: "{{ helm_bin_dir }}/helm repo add {{ helm_repository_name | mandatory }} {{ helm_repository_url | mandatory }}"
when: "'local' not in helm_repo_list.stdout"
changed_when: true # when executed its a changed type of action
@@ -50,24 +50,14 @@
make:
chdir: "{{ app_helm_charts_infra_directory }}"
target: "{{ item }}"
- with_items: "{{ app_helm_build_targets }}"
+ loop: "{{ app_helm_build_targets }}"
environment:
PATH: "{{ helm_bin_dir }}:{{ ansible_env.PATH }}"
-- name: Register root certificate
- slurp:
- src: "{{ playbook_dir }}/certs/rootCA.crt"
- register: root_cert
- delegate_to: localhost
-
-# WA: this is required because deploy plugin dont process params properly
-- name: Create override file with global.cacert
- copy:
- dest: "{{ app_data_path }}/override.yaml"
- content: |
- global:
- cacert: |
- {{ root_cert['content'] | b64decode | indent( width=4, indentfirst=False) }}
+- name: Generate Helm application override file with custom role
+ include_role:
+ name: "{{ app_helm_override_role }}"
+ when: not app_skip_helm_override
- name: Check for deploy plugin presence
stat:
@@ -81,7 +71,7 @@
{{ app_helm_release_name }}
{{ helm_repository_name }}/{{ app_helm_chart_name }}
--namespace {{ app_kubernetes_namespace }}
- -f {{ app_data_path }}/override.yaml
+ {{ '' if app_skip_helm_override else '-f ' + app_helm_override_file }}
changed_when: true # when executed its a changed type of action
register: helm_install
failed_when: helm_install.stderr
diff --git a/ansible/roles/application/tasks/main.yml b/ansible/roles/application/tasks/main.yml
new file mode 100644
index 00000000..3018e95f
--- /dev/null
+++ b/ansible/roles/application/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+- name: Check if application Helm charts exist and install is even needed
+ block:
+ - name: "Does {{ app_helm_charts_install_directory }} directory exist and contain Helm Charts"
+ find:
+ paths: "{{ app_helm_charts_install_directory }}"
+ recurse: true
+ delegate_to: localhost
+ register: charts_files
+ - name: Set install active fact
+ set_fact:
+ install_needed: "{{ true if charts_files.matched | int > 0 else false }}"
+
+- name: Install app with Helm charts
+ block:
+ - include_tasks: transfer-helm-charts.yml
+ - include_tasks: pre-install.yml
+ - include_tasks: install.yml
+ - include_tasks: post-install.yml
+ when: install_needed
+
+- debug:
+ msg: "NOTE, nothing done as application Helm charts does not exist!"
+ when: not install_needed
diff --git a/ansible/roles/application-install/tasks/post-install.yml b/ansible/roles/application/tasks/post-install.yml
index 10594233..5464cb46 100644
--- a/ansible/roles/application-install/tasks/post-install.yml
+++ b/ansible/roles/application/tasks/post-install.yml
@@ -2,4 +2,4 @@
- name: "Execute custome role {{ application_post_install_role }} if defined."
include_tasks: custom_role.yml
vars:
- application_custom_role: "{{ application_post_install_role }}"
+ application_custom_role: "{{ application_post_install_role | default('') }}"
diff --git a/ansible/roles/application/tasks/pre-install.yml b/ansible/roles/application/tasks/pre-install.yml
new file mode 100644
index 00000000..74f1548f
--- /dev/null
+++ b/ansible/roles/application/tasks/pre-install.yml
@@ -0,0 +1,5 @@
+---
+- name: "Execute custom role {{ application_pre_install_role }} if defined."
+ include_tasks: custom_role.yml
+ vars:
+ application_custom_role: "{{ application_pre_install_role | default('') }}"
diff --git a/ansible/roles/application-install/tasks/pre-install.yml b/ansible/roles/application/tasks/transfer-helm-charts.yml
index bf6619b0..0cd7c02f 100644
--- a/ansible/roles/application-install/tasks/pre-install.yml
+++ b/ansible/roles/application/tasks/transfer-helm-charts.yml
@@ -38,14 +38,7 @@
copy:
src: "{{ item.path }}"
dest: "{{ helm_home_dir.stdout }}/plugins"
- directory_mode: yes
+ directory_mode: true
mode: 0755
with_items: "{{ list_of_plugins.files }}"
- become: true
when: app_helm_plugins_directory is defined and app_helm_plugins_directory is not none
-
-- name: "Execute custom role {{ application_pre_install_role }} if defined."
- include_tasks: custom_role.yml
- vars:
- application_custom_role: "{{ application_pre_install_role }}"
-
diff --git a/ansible/roles/chrony/defaults/main.yml b/ansible/roles/chrony/defaults/main.yml
new file mode 100644
index 00000000..af433dac
--- /dev/null
+++ b/ansible/roles/chrony/defaults/main.yml
@@ -0,0 +1,16 @@
+---
+timesync: {}
+chrony:
+ servers: "{{ timesync.servers | default([hostvars[groups.infrastructure[0]].cluster_ip]) }}" # chronyd's NTP servers
+ slewclock: "{{ timesync.slewclock | default(false) }}" # chronyd's makestep property
+ timezone: "{{ timesync.timezone | default('Universal') }}" # Timezone name according to tz database
+ makestep: '1 -1'
+ maxjitter: 10 # Max allowed jitter if using infra as time source as it may by unstable due to pretending stratum 1 time source
+ initstepslew: 30
+ conf:
+ RedHat:
+ config_file: /etc/chrony.conf
+ driftfile: /var/lib/chrony/drift
+ Debian:
+ config_file: /etc/chrony/chrony.conf
+ driftfile: /var/lib/chrony/chrony.drift
diff --git a/ansible/roles/chrony/handlers/main.yml b/ansible/roles/chrony/handlers/main.yml
new file mode 100644
index 00000000..80ab9fa9
--- /dev/null
+++ b/ansible/roles/chrony/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Restart chronyd
+ systemd:
+ name: chronyd
+ state: restarted
diff --git a/ansible/roles/chrony/tasks/main.yml b/ansible/roles/chrony/tasks/main.yml
new file mode 100644
index 00000000..69a11587
--- /dev/null
+++ b/ansible/roles/chrony/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+- name: Check if server mode
+ set_fact:
+ chrony_mode: 'server'
+ when: "'infrastructure' in group_names and timesync.servers is not defined"
+
+- name: Check if client mode
+ set_fact:
+ chrony_mode: 'client'
+ when: "timesync.servers is defined or 'infrastructure' not in group_names"
+
+- name: "Upload chronyd {{ chrony_mode }} configuration"
+ template:
+ src: "chrony.conf.j2"
+ dest: "{{ chrony['conf'][ansible_os_family]['config_file'] }}"
+ notify: Restart chronyd
+
+- name: Ensure chronyd is enabled/running
+ systemd:
+ name: chronyd
+ state: started
+ enabled: true
+
+- name: Setup timezone
+ timezone:
+ name: "{{ chrony.timezone }}"
diff --git a/ansible/roles/chrony/templates/chrony.conf.j2 b/ansible/roles/chrony/templates/chrony.conf.j2
new file mode 100644
index 00000000..3bfb4e40
--- /dev/null
+++ b/ansible/roles/chrony/templates/chrony.conf.j2
@@ -0,0 +1,22 @@
+{% if chrony_mode == 'server' %}
+local stratum 1
+allow
+{% elif chrony_mode == 'client' %}
+{% for tserver in chrony.servers %}
+server {{ tserver }} iburst
+{% endfor %}
+{% if chrony.slewclock == false %}
+{# Step the time by default #}
+makestep {{ chrony.makestep }}
+{% else %}
+{# Slew the clock but step at boot time if time error larger than 30 seconds #}
+initstepslew {{ chrony.initstepslew }}{% for tserver in chrony.servers %} {{ tserver }}{% endfor %}
+
+{% endif %}
+{% if timesync.servers is not defined %}
+maxjitter {{ chrony.maxjitter }}
+{% endif %}
+{% endif %}
+driftfile {{ chrony['conf'][ansible_os_family]['driftfile'] }}
+rtcsync
+logdir /var/log/chrony
diff --git a/ansible/roles/docker/handlers/docker_restart.yml b/ansible/roles/docker/handlers/docker_restart.yml
new file mode 100644
index 00000000..8feb9a95
--- /dev/null
+++ b/ansible/roles/docker/handlers/docker_restart.yml
@@ -0,0 +1,18 @@
+---
+- name: Stopping docker systemd service
+ systemd:
+ name: docker
+ state: stopped
+ enabled: true
+
+# WA for docker bug, see OOM-1735
+- name: Ensure docker.sock cleaned properly
+ file:
+ state: absent
+ path: /var/run/docker.sock
+
+- name: Starting docker systemd service
+ systemd:
+ name: docker
+ state: started
+ enabled: true
diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml
index 5df47e8d..de4a52ac 100644
--- a/ansible/roles/docker/handlers/main.yml
+++ b/ansible/roles/docker/handlers/main.yml
@@ -1,6 +1,3 @@
---
- name: Restart Docker
- systemd:
- name: docker
- state: restarted
- enabled: yes
+ import_tasks: docker_restart.yml
diff --git a/ansible/roles/kubectl/defaults/main.yml b/ansible/roles/kubectl/defaults/main.yml
new file mode 100644
index 00000000..78c15c75
--- /dev/null
+++ b/ansible/roles/kubectl/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+kubectl_bin_dir: /usr/local/bin
+kube_directory: ~/.kube
+# Defaulting to rancher setup
+kube_server: "https://{{ ansible_host }}:8080/r/projects/{{ k8s_env_id | mandatory }}/kubernetes:6443" \ No newline at end of file
diff --git a/ansible/roles/kubectl/tasks/main.yml b/ansible/roles/kubectl/tasks/main.yml
index 7c77c3c5..9ecb5c44 100644
--- a/ansible/roles/kubectl/tasks/main.yml
+++ b/ansible/roles/kubectl/tasks/main.yml
@@ -5,3 +5,13 @@
dest: "{{ kubectl_bin_dir }}/kubectl"
remote_src: true
mode: 0755
+
+- name: Ensure .kube directory exists
+ file:
+ path: "{{ kube_directory }}"
+ state: directory
+
+- name: Create kube config
+ template:
+ src: kube_config.j2
+ dest: "{{ kube_directory }}/config"
diff --git a/ansible/roles/rancher/templates/kube_config.j2 b/ansible/roles/kubectl/templates/kube_config.j2
index 87f332e6..586c59d4 100644
--- a/ansible/roles/rancher/templates/kube_config.j2
+++ b/ansible/roles/kubectl/templates/kube_config.j2
@@ -4,7 +4,7 @@ clusters:
- cluster:
api-version: v1
insecure-skip-tls-verify: true
- server: "https://{{ ansible_host }}:8080/r/projects/{{ k8s_env_id }}/kubernetes:6443"
+ server: "{{ kube_server }}"
name: "{{ app_name }}"
contexts:
- context:
diff --git a/ansible/roles/nginx/templates/nginx.conf.j2 b/ansible/roles/nginx/templates/nginx.conf.j2
index ff9d2a9c..9860a168 100644
--- a/ansible/roles/nginx/templates/nginx.conf.j2
+++ b/ansible/roles/nginx/templates/nginx.conf.j2
@@ -5,6 +5,7 @@ events {
}
http {
+ server_names_hash_bucket_size 64;
error_log /var/log/nginx/error.log debug;
access_log /var/log/nginx/access.log;
diff --git a/ansible/roles/package-repository-check/tasks/RedHat.yml b/ansible/roles/package-repository-check/tasks/RedHat.yml
new file mode 100644
index 00000000..ed496f99
--- /dev/null
+++ b/ansible/roles/package-repository-check/tasks/RedHat.yml
@@ -0,0 +1,20 @@
+---
+- name: verify
+ block:
+ # Clean cache prior to refreshing
+ - name: Clean yum cache
+ command: yum clean all
+ args:
+ warn: false
+ # Refresh cache to ensure repo is reachable
+ - name: Update yum cache
+ yum:
+ update_cache: yes
+ state: latest
+ tags:
+ - skip_ansible_lint # Prevent '[403] Package installs should not use latest' ansible lint task rule
+ rescue:
+ - name: Fail if yum cache updating failed
+ fail:
+ msg: "Couldn't refresh yum cache, repositories not configured properly. Check ansible logs for details."
+ become: true
diff --git a/ansible/roles/package-repository-check/tasks/main.yml b/ansible/roles/package-repository-check/tasks/main.yml
new file mode 100644
index 00000000..ac63eceb
--- /dev/null
+++ b/ansible/roles/package-repository-check/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+# Purpose of this role is to check whether configured repositories are working.
+#
+# Successfull prior invocation of 'package-repository' role on 'infrastructure' hosts
+# is prerequisite for playing this one on 'infrastructure' group.
+#
+# Successfull prior invocation of 'package-repository' and 'nginx' role on infrastructure hosts
+# is prerequisite for playing this one on 'kubernetes' group.
+
+#Set of tasks designated to failing fast if configured repos are not functioning properly
+- include_tasks: "{{ ansible_os_family }}.yml"
+ when: ansible_os_family == 'RedHat'
diff --git a/ansible/roles/rancher/defaults/main.yml b/ansible/roles/rancher/defaults/main.yml
new file mode 100644
index 00000000..8edc5180
--- /dev/null
+++ b/ansible/roles/rancher/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+rancher_server_url: "http://{{ hostvars[groups.infrastructure.0].ansible_host }}:8080"
+rancher_remove_other_env: true
+rancher_redeploy_k8s_env: true
diff --git a/ansible/roles/rancher/tasks/main.yml b/ansible/roles/rancher/tasks/main.yml
index 1370a39f..045363d0 100644
--- a/ansible/roles/rancher/tasks/main.yml
+++ b/ansible/roles/rancher/tasks/main.yml
@@ -1,2 +1,2 @@
---
-- include_tasks: "rancher_{{ rancher_role }}.yml"
+- include_tasks: "rancher_{{ mode }}.yml"
diff --git a/ansible/roles/rancher/tasks/rancher_agent.yml b/ansible/roles/rancher/tasks/rancher_agent.yml
index 4c9cb8dd..091503c7 100644
--- a/ansible/roles/rancher/tasks/rancher_agent.yml
+++ b/ansible/roles/rancher/tasks/rancher_agent.yml
@@ -7,7 +7,7 @@
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
- "/var/lib/rancher:/var/lib/rancher"
- auto_remove: yes
- privileged: yes
+ auto_remove: true
+ privileged: true
vars:
server_hostvars: "{{ hostvars[groups.infrastructure.0] }}"
diff --git a/ansible/roles/rancher/tasks/rancher_health.yml b/ansible/roles/rancher/tasks/rancher_health.yml
new file mode 100644
index 00000000..b0323739
--- /dev/null
+++ b/ansible/roles/rancher/tasks/rancher_health.yml
@@ -0,0 +1,8 @@
+---
+- name: Check cluster health
+ uri:
+ url: "{{ rancher_server_url }}/v2-beta/projects/{{ k8s_env_id }}"
+ register: env_info
+ retries: 30
+ delay: 15
+ until: "env_info.json.healthState == 'healthy'"
diff --git a/ansible/roles/rancher/tasks/rancher_server.yml b/ansible/roles/rancher/tasks/rancher_server.yml
index 64b35e4c..ebee6cc7 100644
--- a/ansible/roles/rancher/tasks/rancher_server.yml
+++ b/ansible/roles/rancher/tasks/rancher_server.yml
@@ -2,7 +2,7 @@
# DO NOT ADD SPACE AROUND ';'
- name: Start rancher/server:{{ rancher_server_version }}
docker_container:
- name: rancher_server
+ name: rancher-server
image: rancher/server:{{ rancher_server_version }}
command: ["sh", "-c", "/usr/sbin/update-ca-certificates;/usr/bin/entry /usr/bin/s6-svscan /service"]
ports: 8080:8080
@@ -19,7 +19,7 @@
delay: 30
until: not response.failed
-- name: Create kubernetes environment
+- name: Create rancher kubernetes environment
rancher_k8s_environment:
name: "{{ app_name }}"
descr: "Kubernetes environment for {{ app_name }}"
@@ -39,13 +39,3 @@
key_private: "{{ env.data.apikey.private }}"
rancher_agent_image: "{{ env.data.registration_tokens.image }}"
rancher_agent_reg_url: "{{ env.data.registration_tokens.reg_url }}"
-
-- name: Ensure .kube directory exists
- file:
- path: "{{ kube_directory }}"
- state: directory
-
-- name: Create kube config
- template:
- src: kube_config.j2
- dest: "{{ kube_directory }}/config"
diff --git a/ansible/roles/resource-data/tasks/main.yml b/ansible/roles/resource-data/tasks/main.yml
index 41046d81..023a160e 100644
--- a/ansible/roles/resource-data/tasks/main.yml
+++ b/ansible/roles/resource-data/tasks/main.yml
@@ -3,23 +3,34 @@
block:
- name: Check if source dir and files are present
stat:
- path: "{{ item.source }}"
+ path: "{{ item.source_dir }}/{{ item.source_filename }}"
get_checksum: false
loop:
- - { source: "{{ resources_dir }}/{{ resources_filename | default('thisdoesnotexists', true) }}",
- target: "{{ app_data_path }}/{{ resources_filename | default('thisdoesnotexists', true) }}" }
- - { source: "{{ resources_dir }}/{{ aux_resources_filename | default('thisdoesnotexists', true) }}",
- target: "{{ aux_data_path }}/{{ aux_resources_filename | default('thisdoesnotexists', true) }}" }
+ - { source_dir: "{{ resources_dir | default('', true) }}",
+ source_filename: "{{ resources_filename | default('', true) }}",
+ target_dir: "{{ app_data_path | default('', true) }}",
+ target_filename: "{{ resources_filename | default('', true) }}" }
+
+ - { source_dir: "{{ resources_dir | default('', true) }}",
+ source_filename: "{{ aux_resources_filename | default('', true) }}",
+ target_dir: "{{ aux_data_path | default('', true) }}",
+ target_filename: "{{ aux_resources_filename | default('', true) }}" }
register: source_path
+ when:
+ - item.source_dir | length > 0
+ - item.source_filename | length > 0
+ - item.target_dir | length > 0
+ - item.target_filename | length > 0
- name: Create initial resources list of dicts
set_fact:
to_be_uploaded_resources_list: "{{
to_be_uploaded_resources_list | default([]) + [
- {'file': item.item.target | basename,
- 'destination_dir': item.item.target | dirname } ] }}"
+ {'file': item.item.target_filename,
+ 'destination_dir': item.item.target_dir } ] }}"
loop: "{{ source_path.results }}"
- when: item.stat.exists
+ when: item.stat is defined
+ failed_when: not item.stat.exists
when: inventory_hostname in groups.resources
- name: "Upload resource files {{ hostvars[groups.resources.0].to_be_uploaded_resources_list }} to infrastructure"
diff --git a/ansible/test/images/docker/build-all.sh b/ansible/test/images/docker/build-all.sh
new file mode 100755
index 00000000..dd5db093
--- /dev/null
+++ b/ansible/test/images/docker/build-all.sh
@@ -0,0 +1,29 @@
+#! /usr/bin/env bash
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2019 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+BUILD_SCRIPT=${1:-build.sh}
+
+# Run all build scripts in direct subdirectories
+for buildfile in $(find -mindepth 2 -maxdepth 2 -name ${BUILD_SCRIPT});
+do
+ pushd $(dirname ${buildfile})
+ . ${BUILD_SCRIPT}
+ popd
+done
diff --git a/ansible/test/images/docker/centos7/Dockerfile b/ansible/test/images/docker/centos7/Dockerfile
new file mode 100644
index 00000000..8c024ab9
--- /dev/null
+++ b/ansible/test/images/docker/centos7/Dockerfile
@@ -0,0 +1,29 @@
+# https://github.com/chrismeyersfsu/provision_docker/tree/master/files
+FROM centos:centos7
+ENV container docker
+
+RUN yum -y update; yum clean all
+
+RUN systemctl mask dev-mqueue.mount dev-hugepages.mount \
+ systemd-remount-fs.service sys-kernel-config.mount \
+ sys-kernel-debug.mount sys-fs-fuse-connections.mount \
+ display-manager.service graphical.target systemd-logind.service
+
+RUN yum -y install openssh-server sudo openssh-clients \
+ epel-release python-docker-py iproute
+RUN sed -i 's/#PermitRootLogin no/PermitRootLogin yes/' /etc/ssh/sshd_config
+RUN ssh-keygen -q -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa && \
+ ssh-keygen -q -f /etc/ssh/ssh_host_ecdsa_key -N '' -t ecdsa && \
+ ssh-keygen -q -f /etc/ssh/ssh_host_ed25519_key -N '' -t ed25519
+RUN echo 'root:docker.io' | chpasswd
+RUN systemctl enable sshd.service
+
+# firewalld needs this .. and I needs my firewalld
+ADD dbus.service /etc/systemd/system/dbus.service
+RUN systemctl enable dbus.service
+
+VOLUME ["/run"]
+
+EXPOSE 22
+
+CMD ["/usr/sbin/init"]
diff --git a/ansible/test/images/docker/centos7/build.sh b/ansible/test/images/docker/centos7/build.sh
new file mode 100755
index 00000000..fe0aea66
--- /dev/null
+++ b/ansible/test/images/docker/centos7/build.sh
@@ -0,0 +1,22 @@
+#! /usr/bin/env bash
+
+# COPYRIGHT NOTICE STARTS HERE
+
+# Copyright 2018 © Samsung Electronics Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# COPYRIGHT NOTICE ENDS HERE
+
+set -e
+docker build . -t molecule-centos7:latest
diff --git a/ansible/test/images/docker/centos7/dbus.service b/ansible/test/images/docker/centos7/dbus.service
new file mode 100644
index 00000000..69dbb153
--- /dev/null
+++ b/ansible/test/images/docker/centos7/dbus.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=D-Bus System Message Bus
+Requires=dbus.socket
+After=syslog.target
+
+[Service]
+PIDFile=/var/run/messagebus.pid
+ExecStartPre=/bin/mkdir -p /var/run/dbus
+ExecStartPre=/bin/chmod g+w /var/run/ /var/run/dbus/
+ExecStart=/bin/dbus-daemon --system --fork
+ExecReload=/bin/dbus-send --print-reply --system --type=method_call --dest=org.freedesktop.DBus / org.freedesktop.DBus.ReloadConfig
+ExecStopPost=/bin/rm -f /var/run/messagebus.pid
+#OOMScoreAdjust=-900
+User=dbus
+Group=root
+PermissionsStartOnly=true
diff --git a/docs/InstallGuide.rst b/docs/InstallGuide.rst
index e91c7bd7..6ed9c328 100644
--- a/docs/InstallGuide.rst
+++ b/docs/InstallGuide.rst
@@ -122,7 +122,7 @@ Change the current directory to the ``'ansible'``::
You can see multiple files and directories inside - this is the *offline-installer*. It is implemented as a set of ansible playbooks.
-If you created the ``'sw'`` package according to the *Build Guide* then you should had have the ``'application'`` directory populated with at least the following files:
+If you created the ``'sw'`` package according to the *Build Guide* then you should have had the ``'application'`` directory populated with at least the following files:
- ``application_configuration.yml``
- ``hosts.yml``
@@ -250,6 +250,7 @@ Here, we will be interested in the following variables:
- ``app_data_path``
- ``aux_data_path``
- ``app_name``
+- ``timesync``
``'resource_dir'``, ``'resources_filename'`` and ``'aux_resources_filename'`` must correspond to the file paths on the *resource-host* (variable ``'resource_host'``), which is in our case the *install-server*.
@@ -259,14 +260,61 @@ The ``'resource_dir'`` should be set to ``'/data'``, ``'resources_filename'`` to
**NOTE:** As we mentioned in `Installer packages`_ - the auxiliary package is not mandatory and we will not utilize it in here either.
-The last variable ``'app_name'`` should be short and descriptive. We will set it simply to: ``onap``.
+The ``'app_name'`` variable should be short and descriptive. We will set it simply to: ``onap``.
-It can look all together something like this::
+The ``'timesync'`` variable is optional and controls synchronisation of the system clock on hosts. It should be configured only if a custom NTP server is available and needed. Such a time authority should be on a host reachable from all installation nodes. If this setting is not provided then the default behavior is to setup NTP daemon on infra-node and sync all kube-nodes' time with it.
+
+If you wish to provide your own NTP servers configure their IPs as follows::
+
+ timesync:
+ servers:
+ - <ip address of NTP_1>
+ - <...>
+ - <ip address of NTP_N>
+
+Another time adjustment related variables are ``'timesync.slewclock'`` and ``'timesync.timezone'`` .
+First one can have value of ``'true'`` or ``'false'`` (default). It controls whether (in case of big time difference compared to server) time should be adjusted gradually by slowing down or speeding up the clock as required (``'true'``) or in one step (``'false'``)::
+
+ timesync:
+ slewclock: true
+
+Second one controls time zone setting on host. It's value should be time zone name according to tz database names with ``'Universal'`` being the default one::
+
+ timesync.
+ timezone: UTC
+
+``'timesync.servers'``, ``'timesync.slewclock'`` and ``'timesync.timezone'`` settings can be used independently.
+
+Final configuration can resemble the following::
resources_dir: /data
resources_filename: offline-onap-3.0.1-resources.tar
app_data_path: /opt/onap
app_name: onap
+ timesync:
+ servers:
+ - 192.168.0.1
+ - 192.168.0.2
+ slewclock: true
+ timezone: UTC
+
+.. _oooi_installguide_config_appconfig_overrides:
+
+Helm chart value overrides
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If there is a need to change onap settings such as managed openstack credentials, service ports, or even docker image versions used, you can do this by putting settings under the ``overrides`` key in ``application_configuration.yml``.
+These settings will override helm values originally stored in ``values.yaml`` files in helm chart directories.
+
+For example, the following lines could be appended to ``application_configuration.yml`` to set up managed openstack credentials for onap's so component::
+
+ overrides:
+ so:
+ config:
+ openStackUserName: "os_user"
+ openStackRegion: "region_name"
+ openStackKeyStoneUrl: "keystone_url"
+ openStackEncryptedPasswordHere: "encrypted_password"
.. _oooi_installguide_config_ssh:
diff --git a/docs/TestingGuide.rst b/docs/TestingGuide.rst
new file mode 100644
index 00000000..caf3d927
--- /dev/null
+++ b/docs/TestingGuide.rst
@@ -0,0 +1,365 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. Copyright 2019 Samsung Electronics Co., Ltd.
+
+OOM ONAP Offline Installer Testing Guide
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This testing guide describes how offline installer can be tested in local
+development environment (laptop) without the need for actual servers.
+
+Documentation refers to files/directories in ``ansible`` directory of this repository.
+
+Introduction
+============
+
+Offline installer uses Molecule_ for testing all roles.
+
+Molecule is tool for ansible roles development and testing. In this project
+Molecule is used for integration type of testing for both roles and playbooks.
+Role code is tested against simulated host.
+
+Molecule is designed to test single Ansible_ role in isolation. Offline installer however
+has many small roles that are dependent on each other and also execution order for roles
+is meaningful. In that respect Molecule's design does not offer sufficient level
+of testing as it's lacking playbook level of scenario testing by default.
+Luckily Molecule is highly configurable and it is possible to achieve a higher level of
+testing scenarios for the offline installer.
+
+Testing with Molecule is divided to two levels of testing:
+ 1) role level testing (as per Molecule design)
+ 2) playbook level testing (offline installer own setup)
+
+Purpose
+=======
+
+The purpose of using testing framework like Molecule is to make possible for developer to
+verify ansible code changes locally in own laptop without the need for big resources.
+
+Developer is also expected to do development of the Ansible code and the Molecule test
+code at the same time.
+Offline installer does not have unittest level of testing for the ansible code.
+
+Any commit made to ansible code base needs to first pass Molecule tests before
+it's merged.
+
+Test levels
+===========
+
+To cover both testing levels (role and playbook) with maximum benefit and minimum
+copy-pasting, the testing code should be written in reusable way.
+
+Reusable test code can be achieved by writing all prepare/cleanup and other
+helping code as a roles into main test directory.
+Also testinfra_ test code can be shared between different roles and between different scenarios
+of one role.
+
+Testing of role and one scenario (one execution run of molecule) is fully
+defined by **molecule.yml** file.
+
+molecule.yml file is always located in directory:
+
+ <tested-role>/molecule/<scenario>/molecule.yml
+
+i.e. one role can have multiple scenarios (different configuration, OS etc. whatever user wants)
+to execute tests for same role. Each scenario has own molecule.yml file and own testinfra
+tests.
+
+Molecule.yml file is the only file that cannot be re-used (except with symbolic links) but
+all other resources can be reused by referencing those in molecule.yml file or/and indirectly
+from resources molecule.yml is pointing to.
+
+**tested-role** is clear in case of normal role level testing, but in playbook level testing the
+tested-role is just an invented role name and directory with molecule directory inside but no
+actual ansible role code.
+
+Role level testing
+------------------
+
+The target is to test single role in isolation just like Molecule is designed.
+Role level testing is supposed to cover:
+
+- Syntax checking (Yamllint_, `Ansible lint`_, flake8_)
+- Ansible code testing
+- Idempotence testing
+- Verifying role results from target hosts (testinfra tests)
+
+Ansible code testing can/should also cover all different options how this role
+can be run (`scenario <https://molecule.readthedocs.io/en/latest/configuration.html#root-scenario>`_).
+Different molecule runs can be implemented as own scenarios (in addition to default scenario)
+or default scenario playbook can be extended to run role tests multiple times just adjusting
+configuration between.
+
+Example with nexus role
+::
+
+ ├── infrastructure.yml
+ ├── roles
+ │   ├── nexus
+ │   │   ├── defaults
+ │   │   ├── files
+ │   │   ├── molecule
+ │   │   │   └── default
+ │   │   │   ├── molecule.yml
+ │   │   │   ├── playbook.yml
+ │   │   │   ├── prepare.yml
+ │   │   │   └── tests
+ │   │   ├── tasks
+ │   │   └── vars
+
+Playbook level testing
+----------------------
+
+Playbook level testing is this project's (offline installer) own
+setup and way of using Molecule. The target is to raise testing level
+from single role testing up to single playbook testing.
+
+Playbook level testing can be used also to run multiple playbooks and/or
+playbooks multiple times with different configuration.
+
+The aim is to verify multiple roles working together i.e. higher level of
+integration testing.
+
+Practically the **tested-role** is just a wrapper directory to conform
+molecule required directory structure and provide a name for the test.
+Directory itself does not contain any ansible role code, but just
+molecule files configured to run multiple other roles.
+
+Playbook level test directories should be named consistently according to
+tested playbook and prefix string ``play`` and with optional description
+if there are multiple scenarios for single playbook:
+
+ play-<playbookname>[-<description>]
+
+E.g.
+
+- ``play-infrastructure``
+- ``play-resources``
+
+As role's are tested with own molecule tests in isolation, playbook level tests
+should focus to integration of the roles and should avoid of repeating same tests
+as done already for individual roles.
+
+Playbook level testing is supposed to cover:
+ - Ansible code testing
+
+Basically it's easier to highlight what is supposed to be **avoided** in playbook level
+testing for the reason not to repeat the same that is done already in role level testing.
+
+- Syntax checking is left out already by default as molecule does linting only for the
+ role code where molecule is run, and in this case tested-role is empty.
+
+- Idempotence can be tested, but should be disabled (by default) in molecule.yml because
+ it takes too much time and was tested already for individual roles.
+
+- Verifying target hosts with testinfra tests can be done but then something else
+ should be tested as in role based tests. And if those 2 would overlap it's better
+ to leave them out.
+
+Example with infrastructure playbook level test files
+::
+
+ ├── infrastructure.yml
+ └── test
+ ├── play-infrastructure
+ │   └── molecule
+ │   └── default
+ │   ├── molecule.yml
+ │   ├── playbook.yml
+ │   ├── prepare.yml
+ │   └── tests
+
+Test code reuse and naming
+===========================
+
+As both testing levels test the same Ansible roles, there are a need
+to share common code for both of them.
+
+Testinfra_ Python code should be shared when also playbook level
+tests verify target hosts. However sharing is not limited only for the 2 test levels
+but also between different roles.
+
+Individual role have testinfra tests on directory:
+
+ roles/<role>/molecule/<scenario>/tests
+
+and any commonly usable testinfra Python code should be placed to directory:
+
+ test/testinfra
+
+Ansible role testing uses several resources defined by provisioner section of
+molecule.yml
+https://molecule.readthedocs.io/en/latest/configuration.html#provisioner
+
+Most common resources that are written for role testing are:
+
+- playbook.yml (mandatory but can include specific code)
+- prepare.yml
+- cleanup.yml
+- create.yml
+- destroy.yml
+
+all of which can be just placed to scenario directory together with playbook.yml
+(without editing molecule.yml when in default directory) and all of which can
+include ansible code to do something e.g. prepare role for testing.
+
+Example molecule files:
+
+Role level tests for nexus role:
+ - roles/nexus/molecule/default/molecule.yml
+ - roles/nexus/molecule/default/playbook.yml
+ - roles/nexus/molecule/default/prepare.yml
+playbook level tests for infrastructure playbook:
+ - test/play-infrastructure/molecule/default/molecule.yml
+ - test/play-infrastructure/molecule/default/playbook.yml
+ - test/play-infrastructure/molecule/default/prepare.yml
+
+Sharing all test code should be done by writing them in the form of ansible
+roles and placing commonly usable roles into:
+
+ test/roles/<testrole>
+
+Test roles should be named consistently according to action it's needed and
+role for it's for together with optional description:
+
+ <action>-<role>[-<description>]
+
+Examples of commonly used test roles
+::
+
+ ├── infrastructure.yml
+ └── test
+ ├── play-infrastructure
+ └── roles
+ ├── post-certificates
+ ├── prepare-common
+ ├── prepare-dns
+ ├── prepare-docker
+ ├── prepare-nexus
+ └── prepare-nginx
+
+Molecule platform images
+========================
+
+Molecule can build images of the tested hosts on the fly with default
+Dockerfile template (docker driver) or from a Dockerfile provided by user.
+In case of Vagrant driver used box image can be also fully customized by user.
+
+To speed up testing it's preferred to pre-build needed images to be usable in
+local docker repository in case of docker driver or Vagrant image cache in case
+of Vagrant driver.
+
+Used Dockerfiles/Box definitions are kept in following directory structure
+::
+
+ └── test
+ └── images
+ ├── docker
+ │   ├── build-all.sh
+ │   ├── centos7
+ │   │   ├── build.sh
+ │   │   ├── dbus.service
+ │   │   └── Dockerfile
+ │   └── ubuntu
+ │   ├── build.sh
+ │   └── Dockerfile
+ └── vagrant
+
+Build images
+------------
+
+Build all platforms images before running Molecule tests. Building can be done
+with the following single command:
+
+ test/images/docker/build-all.sh
+
+Install
+=======
+
+Molecule can be installed in multiple ways and in this guide 2 different ways is
+covered.
+
+- Install Molecule with pip in virtual environment
+- Use Molecule provided docker container to run Molecule
+
+Install with pip
+----------------
+
+This is a OS dependent and some prerequisites needs to be installed, but after
+prerequisites are installed installing Molecule can be done by calling following
+script:
+
+ source test/bin/install-molecule.sh
+
+As for the required OS packages, see example for Ubuntu in the install-molecule.sh
+script's comments or from Molecule_ pages.
+
+Note that sourcing the script is not needed to get Molecule installed but it leaves
+you already into virtual environment and ready to run Molecule.
+
+To get out from virtual environment issue:
+
+ deactivate
+
+And next time to activate virtual environment again before running Molecule, issue:
+
+ source ~/molecule_venv/bin/activate
+
+And here the directory ``~/molecule_venv`` is just the default virtual environment
+path that install-molecule.sh script is using and can be overridden with
+``VENV_PATH`` environment variable.
+
+Use Molecule docker container
+-----------------------------
+
+Molecule provides docker containers images via quay.io_ where Molecule, Ansible
+and all needed dependencies are build to the image.
+
+In this way of using Molecule, no installation is needed and only docker is the
+prerequisite for running Molecule.
+
+For using provided image to test offline-installer roles, following scripts are
+provided:
+
+Build container image:
+ ``test/molecule-docker/build.sh``
+
+This will build image named ``molecule-dev`` with strict version tag.
+
+Set molecule into the PATH:
+ ``source test/bin/set_molecule_paths.sh``
+
+That will add the actual Molecule run wrapper script test/bin/molecule.sh to path
+usable from everywhere similarly than molecule with pip and virtual environment.
+
+Run Molecule wrapper script:
+ ``test/bin/molecule.sh``
+
+For running Molecule. Using ``molecule-dev`` image and the exact version defined by
+test/docker/build.sh script.
+
+Usage
+=====
+
+Basic usage of molecule tests. See more detailed instructions from Molecule_
+
+Run complete testing for a role or a playbook:
+
+1. cd roles/<role> or cd test/play-<playbook-name>
+2. molecule test
+
+Develop a role code and run testing during the coding:
+
+1. cd roles/<role>
+2. Edit ansible code and molecule test code when needed
+3. molecule converge
+4. Repeat steps 2 and 3 until code is ready and molecule tests are passing
+5. molecule test
+
+.. _Molecule: https://molecule.readthedocs.io
+.. _quay.io: https://quay.io/repository/ansible/molecule
+.. _Testinfra: https://testinfra.readthedocs.io
+.. _Flake8: http://flake8.pycqa.org
+.. _Yamllint: https://github.com/adrienverge/yamllint
+.. _Ansible Lint: https://github.com/ansible/ansible-lint
+.. _Ansible: https://www.ansible.com/
diff --git a/docs/index.rst b/docs/index.rst
index a43eedf5..5275cf46 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -8,4 +8,4 @@ OOM offline-installer
BuildGuide.rst
InstallGuide.rst
-
+ TestingGuide.rst