diff options
-rwxr-xr-x | ansible/docker/run_chroot.sh | 420 | ||||
-rw-r--r-- | ansible/roles/docker/handlers/docker_restart.yml | 18 | ||||
-rw-r--r-- | ansible/roles/docker/handlers/main.yml | 5 | ||||
-rw-r--r-- | ansible/roles/nfs/.yamllint | 11 | ||||
-rw-r--r-- | ansible/roles/nfs/handlers/main.yml | 3 | ||||
-rw-r--r-- | ansible/roles/nfs/molecule/default/molecule.yml | 44 | ||||
-rw-r--r-- | ansible/roles/nfs/molecule/default/playbook.yml | 5 | ||||
-rw-r--r-- | ansible/roles/nfs/molecule/default/tests/test_default.py | 16 | ||||
-rw-r--r-- | ansible/roles/nfs/molecule/default/tests/test_kubernetes-server.py | 15 | ||||
-rw-r--r-- | ansible/roles/nfs/molecule/default/tests/test_nfs-server.py | 25 | ||||
-rw-r--r-- | ansible/roles/nfs/tasks/main.yml | 15 | ||||
-rw-r--r-- | ansible/roles/nfs/templates/exports.j2 | 4 | ||||
-rwxr-xr-x | ansible/test/images/docker/build-all.sh | 29 | ||||
-rw-r--r-- | ansible/test/images/docker/centos7/Dockerfile | 29 | ||||
-rwxr-xr-x | ansible/test/images/docker/centos7/build.sh | 22 | ||||
-rw-r--r-- | ansible/test/images/docker/centos7/dbus.service | 16 | ||||
-rw-r--r-- | docs/TestingGuide.rst | 365 | ||||
-rw-r--r-- | docs/index.rst | 2 |
18 files changed, 769 insertions, 275 deletions
diff --git a/ansible/docker/run_chroot.sh b/ansible/docker/run_chroot.sh index 3359fdcd..8ae9c188 100755 --- a/ansible/docker/run_chroot.sh +++ b/ansible/docker/run_chroot.sh @@ -22,7 +22,6 @@ set -e CMD=$(basename "$0") -UMOUNT_TIMEOUT=120 # 2mins # @@ -106,31 +105,18 @@ EXAMPLE: " } -# arg: <directory> -is_mounted() -{ - mountpoint=$(echo "$1" | sed 's#//*#/#g') - - LANG=C mount | grep -q "^[^[:space:]]\+[[:space:]]\+on[[:space:]]\+${mountpoint}[[:space:]]\+type[[:space:]]\+" -} - # layers are right to left! First is on the right, top/last is on the left do_overlay_mount() { - if [ -d "$overlay" ] && is_mounted "$overlay" ; then - echo ERROR: "The overlay directory is already mounted: $overlay" >&2 - echo ERROR: "Fix the issue - cannot proceed" >&2 - exit 1 - fi - # prepare dirs - rm -rf "$overlay" "$upperdir" "$workdir" +mkdir -p $ovtempdir +mount -t tmpfs -o mode=0755 tmpfs $ovtempdir mkdir -p "$overlay" mkdir -p "$upperdir" mkdir -p "$workdir" # finally overlay mount - if ! mount -t overlay --make-rprivate \ + if ! mount -t overlay \ -o lowerdir="$lowerdir",upperdir="$upperdir",workdir="$workdir" \ overlay "$overlay" ; then @@ -147,40 +133,16 @@ do_overlay_mount() return 0 } -cleanup() -{ - case "$OVERLAY_MOUNT" in - yes) - echo INFO: "Umounting overlay..." >&2 - if ! umount_retry "$CHROOT_DIR" ; then - echo ERROR: "Cannot umount chroot: $CHROOT_DIR" >&2 - return 1 - fi - - ;; - no) - echo INFO: "No overlay to umount" >&2 - ;; - esac - - if ! is_mounted "$overlay" ; then - echo INFO: "Deleting of temp directories..." >&2 - rm -rf "$overlay" "$upperdir" "$workdir" - else - echo ERROR: "Overlay is still mounted: $CHROOT_DIR" >&2 - echo ERROR: "Cannot delete: $overlay" >&2 - echo ERROR: "Cannot delete: $upperdir" >&2 - echo ERROR: "Cannot delete: $workdir" >&2 - return 1 - fi -} - check_external_mounts() { - echo "$EXTERNAL_MOUNTS" | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do + echo "$EXTERNAL_MOUNTS" | while read -r mountexpr ; do + #Skip empty lines, done with if for readability. + if [ -z $mountexpr ]; then + continue + fi mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}') external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}') - internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g') + internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}') case "$mount_type" in ro|rw) @@ -203,16 +165,13 @@ check_external_mounts() do_external_mounts() { echo INFO: "Bind mounting of external mounts..." >&2 - echo "$EXTERNAL_MOUNTS" | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do + echo "$EXTERNAL_MOUNTS" | while read -r mountexpr ; do + if [ -z $mountexpr ]; then + continue + fi mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}') external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}') - internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g') - - if is_mounted "${CHROOT_DIR}/${internal}" ; then - echo ERROR: "Mountpoint is already mounted: ${CHROOT_DIR}/${internal}" >&2 - echo ERROR: "Fix the issue - cannot proceed" >&2 - exit 1 - fi + internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}') # trying to follow the behaviour of docker if ! [ -e "$external" ] || [ -d "$external" ] ; then @@ -242,7 +201,9 @@ do_external_mounts() exit 1 fi - if ! mount --make-rprivate -o bind,${mount_type} "$external" "${CHROOT_DIR}/${internal}" ; then +#Note, this double mounting is needed to support older util-linux. + if ! mount -o bind "${external}" "${CHROOT_DIR}/${internal}" || + ! mount -o remount,bind,${mount_type} "${CHROOT_DIR}/${internal}" ; then echo ERROR: "Failed to mount: ${external} -> ${internal}" >&2 exit 1 else @@ -251,231 +212,166 @@ do_external_mounts() done } -# arg: <mountpoint> -umount_retry() -{ - mountpoint=$(echo "$1" | sed 's#//*#/#g') - timeout=${UMOUNT_TIMEOUT} - - umount "$mountpoint" 2>/dev/null - while is_mounted "$mountpoint" && [ $timeout -gt 0 ] ; do - umount "$mountpoint" 2>/dev/null - sleep 1 - timeout=$(( timeout - 1 )) - done - - if ! is_mounted "$mountpoint" ; then - return 0 - fi - - return 1 -} - -undo_external_mounts() -{ - echo INFO: "Umount external mount points..." >&2 - echo "$EXTERNAL_MOUNTS" | tac | sed '/^[[:space:]]*$/d' | while read -r mountexpr ; do - mount_type=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $1;}') - external=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $2;}') - internal=$(echo "$mountexpr" | awk 'BEGIN{FS=":"}{print $3;}' | sed -e 's#^/*##' -e 's#//*#/#g') - if umount_retry "${CHROOT_DIR}/${internal}" ; then - echo INFO: "Unmounted: ${CHROOT_DIR}/${internal}" >&2 - else - echo ERROR: "Failed to umount: ${CHROOT_DIR}/${internal}" >&2 - fi - done -} - -install_wrapper() -{ - cat > "$CHROOT_DIR"/usr/local/bin/fakeshell.sh <<EOF -#!/bin/sh - -PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin -export PATH - -gid_tty=\$(getent group | sed -n '/^tty:/p' | cut -d: -f 3) - -mount -t proc proc /proc -mount -t sysfs none /sys -mount -t tmpfs none /dev - -mkdir -p /dev/shm -mkdir -p /dev/pts -mount -t devpts -o gid=\${gid_tty},mode=620 none /dev/pts - -[ -e /dev/full ] || mknod -m 666 /dev/full c 1 7 -[ -e /dev/ptmx ] || mknod -m 666 /dev/ptmx c 5 2 -[ -e /dev/random ] || mknod -m 644 /dev/random c 1 8 -[ -e /dev/urandom ] || mknod -m 644 /dev/urandom c 1 9 -[ -e /dev/zero ] || mknod -m 666 /dev/zero c 1 5 -[ -e /dev/tty ] || mknod -m 666 /dev/tty c 5 0 -[ -e /dev/console ] || mknod -m 622 /dev/console c 5 1 -[ -e /dev/null ] || mknod -m 666 /dev/null c 1 3 - -chown root:tty /dev/console -chown root:tty /dev/ptmx -chown root:tty /dev/tty - -mkdir -p "\$1" || exit 1 -cd "\$1" || exit 1 -shift - -exec "\$@" - -EOF - chmod +x "$CHROOT_DIR"/usr/local/bin/fakeshell.sh -} - -on_exit() -{ - set +e - echo - - if [ -n "$OVERLAY_MOUNT" ] ; then - undo_external_mounts - fi - cleanup -} # -# parse arguments +# parse arguments out of namespace. # -state=nil -action=nil -EXTERNAL_MOUNTS='' -CHROOT_WORKDIR='' -CHROOT_METADIR='' -CHROOT_DIR='' -COMMAND='' -while [ -n "$1" ] ; do - case "$state" in - nil) - case "$1" in - ''|-h|--help|help) - help - exit 0 - ;; - --mount) - EXTERNAL_MOUNTS=$(printf "%s\n%s\n" "$EXTERNAL_MOUNTS" "${2}") - state=next - ;; - --workdir) - if [ -z "$CHROOT_WORKDIR" ] ; then - CHROOT_WORKDIR="$2" +if [ -z $IN_NAMESPACE ]; then + export state=nil + export action=nil + export EXTERNAL_MOUNTS='' + export CHROOT_WORKDIR='' + export CHROOT_METADIR='' + export CHROOT_DIR='' + export COMMAND='' + while [ -n "$1" ] ; do + case "$state" in + nil) + case "$1" in + ''|-h|--help|help) + help + exit 0 + ;; + --mount) + EXTERNAL_MOUNTS=$(printf "%s\n%s" "$EXTERNAL_MOUNTS" "${2}") state=next - else - echo ERROR: "Multiple working directory argument" >&2 + ;; + --workdir) + if [ -z "$CHROOT_WORKDIR" ] ; then + CHROOT_WORKDIR="$2" + state=next + else + echo ERROR: "Multiple working directory argument" >&2 + help >&2 + exit 1 + fi + ;; + execute) + action=execute + state=execute + ;; + *) + echo ERROR: "Bad usage" >&2 help >&2 exit 1 - fi - ;; - execute) - action=execute - state=execute - ;; - *) - echo ERROR: "Bad usage" >&2 - help >&2 - exit 1 - ;; - esac - ;; - next) - state=nil - ;; - execute) - CHROOT_METADIR="$1" - shift - break - ;; - esac - shift -done - - -case "$action" in - ''|nil) + ;; + esac + ;; + next) + state=nil + ;; + execute) + CHROOT_METADIR="$1" + shift + break + ;; + esac + shift + done + + + if [ $action = "nil" ]; then echo ERROR: "Nothing to do - missing command" >&2 help >&2 exit 1 - ;; - execute) - # firstly do sanity checking ... - - if [ -z "$CHROOT_METADIR" ] ; then - echo ERROR: "Missing argument" >&2 - help >&2 - exit 1 - fi - - # making sure that CHROOT_METADIR is absolute path - CHROOT_METADIR=$(readlink -f "$CHROOT_METADIR") - - if ! [ -d "$CHROOT_METADIR"/chroot ] ; then - echo ERROR: "Filepath does not exist: ${CHROOT_METADIR}/chroot" >&2 - exit 1 - fi - - # check external mounts if there are any - check_external_mounts - - # check workdir - if [ -n "$CHROOT_WORKDIR" ] ; then - CHROOT_WORKDIR=$(echo "$CHROOT_WORKDIR" | sed -e 's#^/*##' -e 's#//*#/#g') - fi - - # we must be root - if [ "$(id -u)" -ne 0 ] ; then - echo ERROR: "Need to be root and you are not: $(id -nu)" >&2 - exit 1 - fi + fi - if ! which unshare >/dev/null 2>/dev/null ; then - echo ERROR: "'unshare' system command is missing - ABORT" >&2 - echo INFO: "Try to install 'util-linux' package" >&2 - exit 1 - fi + # do sanity checking ... - # ... sanity checking done + if [ -z "$CHROOT_METADIR" ] ; then + echo ERROR: "Missing argument" >&2 + help >&2 + exit 1 + fi - # setup paths - lowerdir="$CHROOT_METADIR"/chroot - upperdir="$CHROOT_METADIR"/.overlay - workdir="$CHROOT_METADIR"/.workdir - overlay="$CHROOT_METADIR"/.merged + # making sure that CHROOT_METADIR is absolute path + CHROOT_METADIR=$(readlink -f "$CHROOT_METADIR") - # set trap - trap on_exit QUIT TERM EXIT + if ! [ -d "$CHROOT_METADIR"/chroot ] ; then + echo ERROR: "Filepath does not exist: ${CHROOT_METADIR}/chroot" >&2 + exit 1 + fi - # mount overlay - OVERLAY_MOUNT='' - if do_overlay_mount ; then - # overlay chroot - OVERLAY_MOUNT=yes - else - # non overlay mount - OVERLAY_MOUNT=no - fi + # check external mounts if there are any + check_external_mounts - # do the user-specific mounts - do_external_mounts + # we must be root + if [ "$(id -u)" -ne 0 ] ; then + echo ERROR: "Need to be root and you are not: $(id -nu)" >&2 + exit 1 + fi - # I need this wrapper to do some setup inside the chroot... - install_wrapper + if ! which unshare >/dev/null 2>/dev/null ; then + echo ERROR: "'unshare' system command is missing - ABORT" >&2 + echo INFO: "Try to install 'util-linux' package" >&2 + exit 1 + fi - # execute chroot - if [ -n "$1" ] ; then - : - else - set -- /bin/sh -l - fi - unshare -mfpi --propagation private \ - chroot "$CHROOT_DIR" /usr/local/bin/fakeshell.sh "${CHROOT_WORKDIR:-/}" "$@" - ;; -esac + # ... sanity checking done + + #Reexec ourselves in new pid and mount namespace (isolate!). + #Note: newly executed shell will be pid1 in a new namespace. Killing it will kill + #every other process in the whole process tree with sigkill. That will in turn + #destroy namespaces and undo all mounts done previously. + IN_NAMESPACE=1 exec unshare -mpf "$0" "$@" +fi + +#We are namespaced. +# setup paths +lowerdir="$CHROOT_METADIR"/chroot +ovtempdir="$CHROOT_METADIR"/tmp +upperdir="$ovtempdir"/.overlay +workdir="$ovtempdir"/.workdir +overlay="$CHROOT_METADIR"/.merged + +#In case we are using a realy old unshare, make the whole tree into private mounts manually. +mount --make-rprivate / +#New mounts are private always from now on. + +do_overlay_mount + +# do the user-specific mounts +do_external_mounts + +#And setup api filesystems. +mount -t proc proc "${CHROOT_DIR}/proc" +mount -t sysfs none "${CHROOT_DIR}/sys" +mount -t tmpfs none "${CHROOT_DIR}/dev" + +mkdir -p "${CHROOT_DIR}/dev/shm" +mkdir -p "${CHROOT_DIR}/dev/pts" +mount -t devpts none "${CHROOT_DIR}/dev/pts" + +mknod -m 666 "${CHROOT_DIR}/dev/full" c 1 7 +mknod -m 666 "${CHROOT_DIR}/dev/ptmx" c 5 2 +mknod -m 644 "${CHROOT_DIR}/dev/random" c 1 8 +mknod -m 644 "${CHROOT_DIR}/dev/urandom" c 1 9 +mknod -m 666 "${CHROOT_DIR}/dev/zero" c 1 5 +mknod -m 666 "${CHROOT_DIR}/dev/tty" c 5 0 +mknod -m 622 "${CHROOT_DIR}/dev/console" c 5 1 +mknod -m 666 "${CHROOT_DIR}/dev/null" c 1 3 +ln -s /proc/self/fd/0 "$CHROOT_DIR/dev/stdin" +ln -s /proc/self/fd/1 "$CHROOT_DIR/dev/stdout" +ln -s /proc/self/fd/2 "$CHROOT_DIR/dev/stderr" + +# execute chroot +if [ -z "$1" ] ; then + set -- /bin/sh -l +fi + +#The redirection is to save our stdin, because we use it to pipe commands and we +#may want interactivity. +exec chroot "${CHROOT_DIR}" /bin/sh /dev/stdin "${CHROOT_WORKDIR:-/}" "$@" 3<&0 << "EOF" +PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +export PATH +mkdir -p $1 +cd $1 +shift +#I intend to reset stdin back *and* close the copy. +exec "$@" <&3 3<&- +EOF exit 0 diff --git a/ansible/roles/docker/handlers/docker_restart.yml b/ansible/roles/docker/handlers/docker_restart.yml new file mode 100644 index 00000000..8feb9a95 --- /dev/null +++ b/ansible/roles/docker/handlers/docker_restart.yml @@ -0,0 +1,18 @@ +--- +- name: Stopping docker systemd service + systemd: + name: docker + state: stopped + enabled: true + +# WA for docker bug, see OOM-1735 +- name: Ensure docker.sock cleaned properly + file: + state: absent + path: /var/run/docker.sock + +- name: Starting docker systemd service + systemd: + name: docker + state: started + enabled: true diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml index 5df47e8d..de4a52ac 100644 --- a/ansible/roles/docker/handlers/main.yml +++ b/ansible/roles/docker/handlers/main.yml @@ -1,6 +1,3 @@ --- - name: Restart Docker - systemd: - name: docker - state: restarted - enabled: yes + import_tasks: docker_restart.yml diff --git a/ansible/roles/nfs/.yamllint b/ansible/roles/nfs/.yamllint new file mode 100644 index 00000000..ad0be760 --- /dev/null +++ b/ansible/roles/nfs/.yamllint @@ -0,0 +1,11 @@ +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable + truthy: disable diff --git a/ansible/roles/nfs/handlers/main.yml b/ansible/roles/nfs/handlers/main.yml new file mode 100644 index 00000000..4b8deaeb --- /dev/null +++ b/ansible/roles/nfs/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: reload nfs + command: exportfs -ra diff --git a/ansible/roles/nfs/molecule/default/molecule.yml b/ansible/roles/nfs/molecule/default/molecule.yml new file mode 100644 index 00000000..552f4bcc --- /dev/null +++ b/ansible/roles/nfs/molecule/default/molecule.yml @@ -0,0 +1,44 @@ +--- +dependency: + name: galaxy +driver: + name: docker +lint: + name: yamllint +platforms: + - name: kubernetes-node-1 + image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos7}:${PREBUILD_DISTRO_VERSION:-latest} + pre_build_image: true + privileged: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + groups: + - kubernetes + - nfs-server + networks: + - name: nfs-net + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + - ${HOME}/data:/dockerdata-nfs:rw + + - name: kubernetes-node-2 + image: molecule-${PREBUILD_PLATFORM_DISTRO:-centos7}:${PREBUILD_DISTRO_VERSION:-latest} + pre_build_image: true + privileged: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + groups: + - kubernetes + networks: + - name: nfs-net + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro +provisioner: + name: ansible + inventory: + links: + group_vars: ../../../../group_vars + lint: + name: ansible-lint +verifier: + name: testinfra + lint: + name: flake8 diff --git a/ansible/roles/nfs/molecule/default/playbook.yml b/ansible/roles/nfs/molecule/default/playbook.yml new file mode 100644 index 00000000..37aad5b9 --- /dev/null +++ b/ansible/roles/nfs/molecule/default/playbook.yml @@ -0,0 +1,5 @@ +--- +- name: Converge + hosts: all + roles: + - role: nfs diff --git a/ansible/roles/nfs/molecule/default/tests/test_default.py b/ansible/roles/nfs/molecule/default/tests/test_default.py new file mode 100644 index 00000000..48139898 --- /dev/null +++ b/ansible/roles/nfs/molecule/default/tests/test_default.py @@ -0,0 +1,16 @@ +import os +import pytest + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +@pytest.mark.parametrize('pkg', [ + 'nfs-utils' +]) +def test_pkg(host, pkg): + package = host.package(pkg) + + assert package.is_installed diff --git a/ansible/roles/nfs/molecule/default/tests/test_kubernetes-server.py b/ansible/roles/nfs/molecule/default/tests/test_kubernetes-server.py new file mode 100644 index 00000000..b702a73d --- /dev/null +++ b/ansible/roles/nfs/molecule/default/tests/test_kubernetes-server.py @@ -0,0 +1,15 @@ +import os + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('kubernetes-node-2') + + +def test_nfs_mount(host): + mp = host.mount_point("/dockerdata-nfs") + assert mp.exists + assert mp.filesystem == "nfs" + assert mp.device == "kubernetes-node-1:/dockerdata-nfs" + assert host.file("/etc/fstab").\ + contains("kubernetes-node-1:/dockerdata-nfs /dockerdata-nfs nfs") diff --git a/ansible/roles/nfs/molecule/default/tests/test_nfs-server.py b/ansible/roles/nfs/molecule/default/tests/test_nfs-server.py new file mode 100644 index 00000000..078c653e --- /dev/null +++ b/ansible/roles/nfs/molecule/default/tests/test_nfs-server.py @@ -0,0 +1,25 @@ +import os +import pytest + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('nfs-server') + + +@pytest.mark.parametrize('svc', [ + 'rpcbind', + 'nfs-server' +]) +def test_svc(host, svc): + service = host.service(svc) + + assert service.is_running + assert service.is_enabled + + +def test_exports(host): + f = host.file("/etc/exports") + assert f.exists + assert f.content_string == \ + """/dockerdata-nfs kubernetes-node-2(rw,sync,no_root_squash,no_subtree_check)""" # noqa: E501 diff --git a/ansible/roles/nfs/tasks/main.yml b/ansible/roles/nfs/tasks/main.yml index aca1771f..e7580b66 100644 --- a/ansible/roles/nfs/tasks/main.yml +++ b/ansible/roles/nfs/tasks/main.yml @@ -9,7 +9,7 @@ package: name: "{{ item }}" state: present - with_items: "{{ nfs_packages[ansible_os_family] }}" + loop: "{{ nfs_packages[ansible_os_family] }}" - name: Setup nfs server block: @@ -17,22 +17,25 @@ systemd: name: "{{ item }}" state: started - with_items: "{{ nfs_services[ansible_os_family] }}" + enabled: true + loop: "{{ nfs_services[ansible_os_family] }}" - name: Add hosts to exports template: src: exports.j2 dest: /etc/exports - - - name: Export nfs - command: exportfs -ar + notify: + - reload nfs when: - "'nfs-server' in group_names" +- name: Force notified handlers to run at this point + meta: flush_handlers + - name: Mount dockerdata-nfs mount: path: "{{ nfs_mount_path }}" - src: "{{ hostvars[groups['nfs-server'].0].ansible_host }}:{{ nfs_mount_path }}" + src: "{{ hostvars[groups['nfs-server'].0].ansible_host | default(hostvars[groups['nfs-server'].0].inventory_hostname) }}:{{ nfs_mount_path }}" fstype: nfs state: mounted when: diff --git a/ansible/roles/nfs/templates/exports.j2 b/ansible/roles/nfs/templates/exports.j2 index 1f6956c2..465c9d21 100644 --- a/ansible/roles/nfs/templates/exports.j2 +++ b/ansible/roles/nfs/templates/exports.j2 @@ -1,3 +1,3 @@ -{% for host in groups.kubernetes[1:] -%} - {{ nfs_mount_path }} {{ hostvars[host].ansible_host }}(rw,sync,no_root_squash,no_subtree_check) +{% for host in groups.kubernetes | difference(groups['nfs-server']) -%} + {{ nfs_mount_path }} {{ hostvars[host].ansible_host | default(hostvars[host].inventory_hostname) }}(rw,sync,no_root_squash,no_subtree_check) {% endfor %} diff --git a/ansible/test/images/docker/build-all.sh b/ansible/test/images/docker/build-all.sh new file mode 100755 index 00000000..dd5db093 --- /dev/null +++ b/ansible/test/images/docker/build-all.sh @@ -0,0 +1,29 @@ +#! /usr/bin/env bash + +# COPYRIGHT NOTICE STARTS HERE + +# Copyright 2019 © Samsung Electronics Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# COPYRIGHT NOTICE ENDS HERE + +BUILD_SCRIPT=${1:-build.sh} + +# Run all build scripts in direct subdirectories +for buildfile in $(find -mindepth 2 -maxdepth 2 -name ${BUILD_SCRIPT}); +do + pushd $(dirname ${buildfile}) + . ${BUILD_SCRIPT} + popd +done diff --git a/ansible/test/images/docker/centos7/Dockerfile b/ansible/test/images/docker/centos7/Dockerfile new file mode 100644 index 00000000..8c024ab9 --- /dev/null +++ b/ansible/test/images/docker/centos7/Dockerfile @@ -0,0 +1,29 @@ +# https://github.com/chrismeyersfsu/provision_docker/tree/master/files +FROM centos:centos7 +ENV container docker + +RUN yum -y update; yum clean all + +RUN systemctl mask dev-mqueue.mount dev-hugepages.mount \ + systemd-remount-fs.service sys-kernel-config.mount \ + sys-kernel-debug.mount sys-fs-fuse-connections.mount \ + display-manager.service graphical.target systemd-logind.service + +RUN yum -y install openssh-server sudo openssh-clients \ + epel-release python-docker-py iproute +RUN sed -i 's/#PermitRootLogin no/PermitRootLogin yes/' /etc/ssh/sshd_config +RUN ssh-keygen -q -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa && \ + ssh-keygen -q -f /etc/ssh/ssh_host_ecdsa_key -N '' -t ecdsa && \ + ssh-keygen -q -f /etc/ssh/ssh_host_ed25519_key -N '' -t ed25519 +RUN echo 'root:docker.io' | chpasswd +RUN systemctl enable sshd.service + +# firewalld needs this .. and I needs my firewalld +ADD dbus.service /etc/systemd/system/dbus.service +RUN systemctl enable dbus.service + +VOLUME ["/run"] + +EXPOSE 22 + +CMD ["/usr/sbin/init"] diff --git a/ansible/test/images/docker/centos7/build.sh b/ansible/test/images/docker/centos7/build.sh new file mode 100755 index 00000000..fe0aea66 --- /dev/null +++ b/ansible/test/images/docker/centos7/build.sh @@ -0,0 +1,22 @@ +#! /usr/bin/env bash + +# COPYRIGHT NOTICE STARTS HERE + +# Copyright 2018 © Samsung Electronics Co., Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# COPYRIGHT NOTICE ENDS HERE + +set -e +docker build . -t molecule-centos7:latest diff --git a/ansible/test/images/docker/centos7/dbus.service b/ansible/test/images/docker/centos7/dbus.service new file mode 100644 index 00000000..69dbb153 --- /dev/null +++ b/ansible/test/images/docker/centos7/dbus.service @@ -0,0 +1,16 @@ +[Unit] +Description=D-Bus System Message Bus +Requires=dbus.socket +After=syslog.target + +[Service] +PIDFile=/var/run/messagebus.pid +ExecStartPre=/bin/mkdir -p /var/run/dbus +ExecStartPre=/bin/chmod g+w /var/run/ /var/run/dbus/ +ExecStart=/bin/dbus-daemon --system --fork +ExecReload=/bin/dbus-send --print-reply --system --type=method_call --dest=org.freedesktop.DBus / org.freedesktop.DBus.ReloadConfig +ExecStopPost=/bin/rm -f /var/run/messagebus.pid +#OOMScoreAdjust=-900 +User=dbus +Group=root +PermissionsStartOnly=true diff --git a/docs/TestingGuide.rst b/docs/TestingGuide.rst new file mode 100644 index 00000000..caf3d927 --- /dev/null +++ b/docs/TestingGuide.rst @@ -0,0 +1,365 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. Copyright 2019 Samsung Electronics Co., Ltd. + +OOM ONAP Offline Installer Testing Guide +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This testing guide describes how offline installer can be tested in local +development environment (laptop) without the need for actual servers. + +Documentation refers to files/directories in ``ansible`` directory of this repository. + +Introduction +============ + +Offline installer uses Molecule_ for testing all roles. + +Molecule is tool for ansible roles development and testing. In this project +Molecule is used for integration type of testing for both roles and playbooks. +Role code is tested against simulated host. + +Molecule is designed to test single Ansible_ role in isolation. Offline installer however +has many small roles that are dependent on each other and also execution order for roles +is meaningful. In that respect Molecule's design does not offer sufficient level +of testing as it's lacking playbook level of scenario testing by default. +Luckily Molecule is highly configurable and it is possible to achieve a higher level of +testing scenarios for the offline installer. + +Testing with Molecule is divided to two levels of testing: + 1) role level testing (as per Molecule design) + 2) playbook level testing (offline installer own setup) + +Purpose +======= + +The purpose of using testing framework like Molecule is to make possible for developer to +verify ansible code changes locally in own laptop without the need for big resources. + +Developer is also expected to do development of the Ansible code and the Molecule test +code at the same time. +Offline installer does not have unittest level of testing for the ansible code. + +Any commit made to ansible code base needs to first pass Molecule tests before +it's merged. + +Test levels +=========== + +To cover both testing levels (role and playbook) with maximum benefit and minimum +copy-pasting, the testing code should be written in reusable way. + +Reusable test code can be achieved by writing all prepare/cleanup and other +helping code as a roles into main test directory. +Also testinfra_ test code can be shared between different roles and between different scenarios +of one role. + +Testing of role and one scenario (one execution run of molecule) is fully +defined by **molecule.yml** file. + +molecule.yml file is always located in directory: + + <tested-role>/molecule/<scenario>/molecule.yml + +i.e. one role can have multiple scenarios (different configuration, OS etc. whatever user wants) +to execute tests for same role. Each scenario has own molecule.yml file and own testinfra +tests. + +Molecule.yml file is the only file that cannot be re-used (except with symbolic links) but +all other resources can be reused by referencing those in molecule.yml file or/and indirectly +from resources molecule.yml is pointing to. + +**tested-role** is clear in case of normal role level testing, but in playbook level testing the +tested-role is just an invented role name and directory with molecule directory inside but no +actual ansible role code. + +Role level testing +------------------ + +The target is to test single role in isolation just like Molecule is designed. +Role level testing is supposed to cover: + +- Syntax checking (Yamllint_, `Ansible lint`_, flake8_) +- Ansible code testing +- Idempotence testing +- Verifying role results from target hosts (testinfra tests) + +Ansible code testing can/should also cover all different options how this role +can be run (`scenario <https://molecule.readthedocs.io/en/latest/configuration.html#root-scenario>`_). +Different molecule runs can be implemented as own scenarios (in addition to default scenario) +or default scenario playbook can be extended to run role tests multiple times just adjusting +configuration between. + +Example with nexus role +:: + + ├── infrastructure.yml + ├── roles + │ ├── nexus + │ │ ├── defaults + │ │ ├── files + │ │ ├── molecule + │ │ │ └── default + │ │ │ ├── molecule.yml + │ │ │ ├── playbook.yml + │ │ │ ├── prepare.yml + │ │ │ └── tests + │ │ ├── tasks + │ │ └── vars + +Playbook level testing +---------------------- + +Playbook level testing is this project's (offline installer) own +setup and way of using Molecule. The target is to raise testing level +from single role testing up to single playbook testing. + +Playbook level testing can be used also to run multiple playbooks and/or +playbooks multiple times with different configuration. + +The aim is to verify multiple roles working together i.e. higher level of +integration testing. + +Practically the **tested-role** is just a wrapper directory to conform +molecule required directory structure and provide a name for the test. +Directory itself does not contain any ansible role code, but just +molecule files configured to run multiple other roles. + +Playbook level test directories should be named consistently according to +tested playbook and prefix string ``play`` and with optional description +if there are multiple scenarios for single playbook: + + play-<playbookname>[-<description>] + +E.g. + +- ``play-infrastructure`` +- ``play-resources`` + +As role's are tested with own molecule tests in isolation, playbook level tests +should focus to integration of the roles and should avoid of repeating same tests +as done already for individual roles. + +Playbook level testing is supposed to cover: + - Ansible code testing + +Basically it's easier to highlight what is supposed to be **avoided** in playbook level +testing for the reason not to repeat the same that is done already in role level testing. + +- Syntax checking is left out already by default as molecule does linting only for the + role code where molecule is run, and in this case tested-role is empty. + +- Idempotence can be tested, but should be disabled (by default) in molecule.yml because + it takes too much time and was tested already for individual roles. + +- Verifying target hosts with testinfra tests can be done but then something else + should be tested as in role based tests. And if those 2 would overlap it's better + to leave them out. + +Example with infrastructure playbook level test files +:: + + ├── infrastructure.yml + └── test + ├── play-infrastructure + │ └── molecule + │ └── default + │ ├── molecule.yml + │ ├── playbook.yml + │ ├── prepare.yml + │ └── tests + +Test code reuse and naming +=========================== + +As both testing levels test the same Ansible roles, there are a need +to share common code for both of them. + +Testinfra_ Python code should be shared when also playbook level +tests verify target hosts. However sharing is not limited only for the 2 test levels +but also between different roles. + +Individual role have testinfra tests on directory: + + roles/<role>/molecule/<scenario>/tests + +and any commonly usable testinfra Python code should be placed to directory: + + test/testinfra + +Ansible role testing uses several resources defined by provisioner section of +molecule.yml +https://molecule.readthedocs.io/en/latest/configuration.html#provisioner + +Most common resources that are written for role testing are: + +- playbook.yml (mandatory but can include specific code) +- prepare.yml +- cleanup.yml +- create.yml +- destroy.yml + +all of which can be just placed to scenario directory together with playbook.yml +(without editing molecule.yml when in default directory) and all of which can +include ansible code to do something e.g. prepare role for testing. + +Example molecule files: + +Role level tests for nexus role: + - roles/nexus/molecule/default/molecule.yml + - roles/nexus/molecule/default/playbook.yml + - roles/nexus/molecule/default/prepare.yml +playbook level tests for infrastructure playbook: + - test/play-infrastructure/molecule/default/molecule.yml + - test/play-infrastructure/molecule/default/playbook.yml + - test/play-infrastructure/molecule/default/prepare.yml + +Sharing all test code should be done by writing them in the form of ansible +roles and placing commonly usable roles into: + + test/roles/<testrole> + +Test roles should be named consistently according to action it's needed and +role for it's for together with optional description: + + <action>-<role>[-<description>] + +Examples of commonly used test roles +:: + + ├── infrastructure.yml + └── test + ├── play-infrastructure + └── roles + ├── post-certificates + ├── prepare-common + ├── prepare-dns + ├── prepare-docker + ├── prepare-nexus + └── prepare-nginx + +Molecule platform images +======================== + +Molecule can build images of the tested hosts on the fly with default +Dockerfile template (docker driver) or from a Dockerfile provided by user. +In case of Vagrant driver used box image can be also fully customized by user. + +To speed up testing it's preferred to pre-build needed images to be usable in +local docker repository in case of docker driver or Vagrant image cache in case +of Vagrant driver. + +Used Dockerfiles/Box definitions are kept in following directory structure +:: + + └── test + └── images + ├── docker + │ ├── build-all.sh + │ ├── centos7 + │ │ ├── build.sh + │ │ ├── dbus.service + │ │ └── Dockerfile + │ └── ubuntu + │ ├── build.sh + │ └── Dockerfile + └── vagrant + +Build images +------------ + +Build all platforms images before running Molecule tests. Building can be done +with the following single command: + + test/images/docker/build-all.sh + +Install +======= + +Molecule can be installed in multiple ways and in this guide 2 different ways is +covered. + +- Install Molecule with pip in virtual environment +- Use Molecule provided docker container to run Molecule + +Install with pip +---------------- + +This is a OS dependent and some prerequisites needs to be installed, but after +prerequisites are installed installing Molecule can be done by calling following +script: + + source test/bin/install-molecule.sh + +As for the required OS packages, see example for Ubuntu in the install-molecule.sh +script's comments or from Molecule_ pages. + +Note that sourcing the script is not needed to get Molecule installed but it leaves +you already into virtual environment and ready to run Molecule. + +To get out from virtual environment issue: + + deactivate + +And next time to activate virtual environment again before running Molecule, issue: + + source ~/molecule_venv/bin/activate + +And here the directory ``~/molecule_venv`` is just the default virtual environment +path that install-molecule.sh script is using and can be overridden with +``VENV_PATH`` environment variable. + +Use Molecule docker container +----------------------------- + +Molecule provides docker containers images via quay.io_ where Molecule, Ansible +and all needed dependencies are build to the image. + +In this way of using Molecule, no installation is needed and only docker is the +prerequisite for running Molecule. + +For using provided image to test offline-installer roles, following scripts are +provided: + +Build container image: + ``test/molecule-docker/build.sh`` + +This will build image named ``molecule-dev`` with strict version tag. + +Set molecule into the PATH: + ``source test/bin/set_molecule_paths.sh`` + +That will add the actual Molecule run wrapper script test/bin/molecule.sh to path +usable from everywhere similarly than molecule with pip and virtual environment. + +Run Molecule wrapper script: + ``test/bin/molecule.sh`` + +For running Molecule. Using ``molecule-dev`` image and the exact version defined by +test/docker/build.sh script. + +Usage +===== + +Basic usage of molecule tests. See more detailed instructions from Molecule_ + +Run complete testing for a role or a playbook: + +1. cd roles/<role> or cd test/play-<playbook-name> +2. molecule test + +Develop a role code and run testing during the coding: + +1. cd roles/<role> +2. Edit ansible code and molecule test code when needed +3. molecule converge +4. Repeat steps 2 and 3 until code is ready and molecule tests are passing +5. molecule test + +.. _Molecule: https://molecule.readthedocs.io +.. _quay.io: https://quay.io/repository/ansible/molecule +.. _Testinfra: https://testinfra.readthedocs.io +.. _Flake8: http://flake8.pycqa.org +.. _Yamllint: https://github.com/adrienverge/yamllint +.. _Ansible Lint: https://github.com/ansible/ansible-lint +.. _Ansible: https://www.ansible.com/ diff --git a/docs/index.rst b/docs/index.rst index a43eedf5..5275cf46 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,4 +8,4 @@ OOM offline-installer BuildGuide.rst InstallGuide.rst - + TestingGuide.rst |