summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coafile11
-rw-r--r--.gitignore2
-rwxr-xr-xansible/docker/build_ansible_image.sh5
-rw-r--r--ansible/library/rancher_k8s_environment.py1
-rw-r--r--ansible/roles/kubectl/defaults/main.yml2
-rw-r--r--ansible/roles/kubectl/molecule/default/tests/test_default.py4
-rw-r--r--ansible/roles/kubectl/tasks/main.yml22
-rwxr-xr-xansible/test/bin/install-molecule.sh2
-rwxr-xr-xbuild/build_nexus_blob.sh129
-rwxr-xr-xbuild/create_repo.sh111
-rwxr-xr-xbuild/creating_data/docker-images-collector.sh168
-rw-r--r--build/data_lists/infra_bin_utils.list2
-rw-r--r--build/data_lists/onap_deb.list7
-rw-r--r--build/data_lists/onap_rpm.list6
-rwxr-xr-xbuild/docker-entrypoint.sh178
-rw-r--r--build/download/__init__.py1
-rwxr-xr-xbuild/download/clean_docker_images.py1
-rwxr-xr-xbuild/download/docker_downloader.py3
-rwxr-xr-xbuild/download/pypi_downloader.py2
-rwxr-xr-xbuild/download/rpm_downloader.py2
-rw-r--r--build/download/tox.ini7
-rwxr-xr-xbuild/package.py2
-rw-r--r--build/tox.ini7
-rw-r--r--docs/InstallGuide.rst2
-rw-r--r--tools/cicdansible/ansible.cfg1
-rw-r--r--tools/cicdansible/group_vars/all.yml4
-rw-r--r--tools/cicdansible/heat/installer.yaml5
-rw-r--r--tools/cicdansible/roles/configure_instances/templates/daemon.json.j218
-rw-r--r--tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml1
-rwxr-xr-xtools/helm-healer.sh190
-rwxr-xr-xtools/helm_deployment_status.py30
-rwxr-xr-xtools/remove_runtime_images.py1
-rw-r--r--tox.ini19
33 files changed, 651 insertions, 295 deletions
diff --git a/.coafile b/.coafile
new file mode 100644
index 00000000..3e55d459
--- /dev/null
+++ b/.coafile
@@ -0,0 +1,11 @@
+[py]
+bears = PyLintBear
+pylint_disable = all
+pylint_enable =
+ bad-indentation, trailing-whitespace, unused-wildcard-import, unused-import,
+ unnecessary-semicolon, unnecessary-semicolon, undefined-variable,
+ syntax-error, unused-variable, using-constant-test,unused-argument,
+ len-as-condition, trailing-newlines, missing-final-newline, reimported,
+ too-many-function-args, singleton-comparison
+ignore =
+ .tox/**
diff --git a/.gitignore b/.gitignore
index ffef34ef..6076713f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,3 +2,5 @@ doc
/package.info
/ansible/log/
/*.log
+.tox/
+__pycache__/
diff --git a/ansible/docker/build_ansible_image.sh b/ansible/docker/build_ansible_image.sh
index 49833be5..a0eb775a 100755
--- a/ansible/docker/build_ansible_image.sh
+++ b/ansible/docker/build_ansible_image.sh
@@ -24,6 +24,9 @@ set -e
ansible_version="$1"
image_name="${2:-ansible:latest}"
+# Override image name from env variable if set
+image_name="${ANSIBLE_CHROOT_IMAGE_NAME:-$image_name}"
+
script_path=$(readlink -f "$0")
script_dir=$(dirname "$script_path")
@@ -47,4 +50,4 @@ else
exit 1
fi
-exit 0 \ No newline at end of file
+exit 0
diff --git a/ansible/library/rancher_k8s_environment.py b/ansible/library/rancher_k8s_environment.py
index d3d8ac02..00bb7d49 100644
--- a/ansible/library/rancher_k8s_environment.py
+++ b/ansible/library/rancher_k8s_environment.py
@@ -338,4 +338,3 @@ def run_module():
if __name__ == '__main__':
run_module()
-
diff --git a/ansible/roles/kubectl/defaults/main.yml b/ansible/roles/kubectl/defaults/main.yml
index b922fb58..5c2a4c69 100644
--- a/ansible/roles/kubectl/defaults/main.yml
+++ b/ansible/roles/kubectl/defaults/main.yml
@@ -1,2 +1,4 @@
---
kubectl_bin_dir: /usr/local/bin
+completion_dir: /etc/bash_completion.d
+completion_package: bash-completion
diff --git a/ansible/roles/kubectl/molecule/default/tests/test_default.py b/ansible/roles/kubectl/molecule/default/tests/test_default.py
index 4f799b95..3f4c7c7d 100644
--- a/ansible/roles/kubectl/molecule/default/tests/test_default.py
+++ b/ansible/roles/kubectl/molecule/default/tests/test_default.py
@@ -9,3 +9,7 @@ testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
def test_kubectl(host):
assert host.file('/usr/local/bin/kubectl').exists
assert host.run('kubectl').rc != 127
+
+
+def test_kubectl_bash_completion(host):
+ assert host.file('/etc/bash_completion.d/kubectl').exists
diff --git a/ansible/roles/kubectl/tasks/main.yml b/ansible/roles/kubectl/tasks/main.yml
index 7c77c3c5..03b9f9b0 100644
--- a/ansible/roles/kubectl/tasks/main.yml
+++ b/ansible/roles/kubectl/tasks/main.yml
@@ -5,3 +5,25 @@
dest: "{{ kubectl_bin_dir }}/kubectl"
remote_src: true
mode: 0755
+
+- name: Install completion for the bash shell
+ package:
+ name: "{{ completion_package }}"
+ state: present
+
+- name: Generate shell autocompletion code for kubectl
+ command: kubectl completion bash
+ register: kubectl_completion
+ changed_when: false
+
+- name: Ensure bash completion dir exists
+ file:
+ path: "{{ completion_dir }}"
+ state: directory
+ mode: 0755
+
+- name: Install bash autocompletion code for kubectl
+ copy:
+ content: "{{ kubectl_completion.stdout }}"
+ dest: "{{ completion_dir }}/kubectl"
+ mode: 0644
diff --git a/ansible/test/bin/install-molecule.sh b/ansible/test/bin/install-molecule.sh
index 7857e0c8..ab6de436 100755
--- a/ansible/test/bin/install-molecule.sh
+++ b/ansible/test/bin/install-molecule.sh
@@ -41,5 +41,5 @@ source ${VENV_PATH}/bin/activate
# Install Molecule
if [ ! -z ${VIRTUAL_ENV} ]; then
echo "Activated virtual env in ${VIRTUAL_ENV}"
- pip -q install molecule==2.20 ansible==2.7.8 docker pyopenssl
+ pip -q install molecule==2.20 ansible==2.7.8 ansible-lint==4.2.0 docker pyopenssl
fi
diff --git a/build/build_nexus_blob.sh b/build/build_nexus_blob.sh
index 688410ef..9c4b5e69 100755
--- a/build/build_nexus_blob.sh
+++ b/build/build_nexus_blob.sh
@@ -2,7 +2,7 @@
# COPYRIGHT NOTICE STARTS HERE
#
-# Copyright 2018-2019 © Samsung Electronics Co., Ltd.
+# Copyright 2018-2020© Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,13 +35,13 @@ SCRIPT_LOG="/tmp/$(basename $0)_$(eval ${TIMESTAMP}).log"
# Log everything
exec &> >(tee -a "${SCRIPT_LOG}")
-# Nexus repository location
+# Nexus repository properties
NEXUS_DOMAIN="nexus"
-NEXUS_PORT="8081"
-NEXUS_DOCKER_PORT="8082"
-NPM_REGISTRY="http://${NEXUS_DOMAIN}:${NEXUS_PORT}/repository/npm-private/"
-PYPI_REGISTRY="http://${NEXUS_DOMAIN}:${NEXUS_PORT}/repository/pypi-private/"
-DOCKER_REGISTRY="${NEXUS_DOMAIN}:${NEXUS_DOCKER_PORT}"
+NEXUS_HOST="127.0.0.1"
+NEXUS_EXPOSED_PORT="8081"
+NEXUS_PORT=${NEXUS_EXPOSED_PORT}
+NEXUS_DOCKER_EXPOSED_PORT="8082"
+NEXUS_DOCKER_PORT=${NEXUS_DOCKER_EXPOSED_PORT}
DEFAULT_REGISTRY="docker.io"
# Nexus repository credentials
@@ -97,34 +97,13 @@ usage () {
-o | --output-directory use specific directory for the target blob
-p | --pypi use specific list of pypi packages to be pushed into Nexus
-rl | --resource-list-directory use specific directory with docker, pypi and npm lists
+ -c | --container-name use specific Nexus docker container name
+ -NP | --nexus-port use specific port for published Nexus service
+ -DP | --docker-port use specific port for published Nexus docker registry port
"
exit 1
}
-publish_ports () {
- for REGISTRY in $(sed -n '/\.[^/].*\//p' ${1} | sed -e 's/\/.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY} || true) ${NEXUS_PORT}; do
- if [[ ${REGISTRY} != *":"* ]]; then
- if [[ ${PUBLISHED_PORTS} != *"80:${NEXUS_DOCKER_PORT}"* ]]; then
- PUBLISHED_PORTS="${PUBLISHED_PORTS} -p 80:${NEXUS_DOCKER_PORT}"
- fi
- else
- REGISTRY_PORT="$(sed 's/^.*\:\([[:digit:]]*\)$/\1/' <<< ${REGISTRY})"
- if [[ ${PUBLISHED_PORTS} != *"${REGISTRY_PORT}:${NEXUS_DOCKER_PORT}"* ]]; then
- PUBLISHED_PORTS="${PUBLISHED_PORTS} -p ${REGISTRY_PORT}:${NEXUS_DOCKER_PORT}"
- fi
- fi
- done
-}
-
-simulated_hosts () {
- SIMUL_HOSTS=($(sed -n '/\.[^/].*\//p' ${1} | sed -e 's/\/.*$// ; s/:.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY} || true ) ${NEXUS_DOMAIN})
- for HOST in "${SIMUL_HOSTS[@]}"; do
- if ! grep -wq ${HOST} /etc/hosts; then
- echo "127.0.0.1 ${HOST}" >> /etc/hosts
- fi
- done
-}
-
load_docker_images () {
for ARCHIVE in $(sed $'s/\r// ; /^#/d ; s/\:/\_/g ; s/\//\_/g ; s/$/\.tar/g' ${1} | awk '{ print $1 }'); do
docker load -i ${NXS_SRC_DOCKER_IMG_DIR}/${ARCHIVE}
@@ -177,12 +156,8 @@ push_pip () {
}
docker_login () {
- for REGISTRY in $(sed -n '/\.[^/].*\//p' ${1} | sed -e 's/\/.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY}) ${DOCKER_REGISTRY}; do
- if ! grep -wqs ${REGISTRY} ~/.docker/config.json; then
- echo "Docker login to ${REGISTRY}"
- echo -n "${NEXUS_PASSWORD}" | docker login -u "${NEXUS_USERNAME}" --password-stdin ${REGISTRY} > /dev/null
- fi
- done
+ echo "Docker login to ${DOCKER_REGISTRY}"
+ echo -n "${NEXUS_PASSWORD}" | docker --config "${DOCKER_CONFIG_DIR}" login -u "${NEXUS_USERNAME}" --password-stdin ${DOCKER_REGISTRY} > /dev/null
}
push_docker () {
@@ -198,17 +173,28 @@ push_docker () {
fi
elif [[ -z $(sed -n '/\.[^/].*\//p' <<< ${IMAGE}) ]]; then
PUSH="${DOCKER_REGISTRY}/${IMAGE}"
- fi
- if [[ ! -z ${PUSH} ]]; then
- docker tag ${IMAGE} ${PUSH}
else
- PUSH="${IMAGE}"
+ # substitute all host names with $DOCKER_REGISTRY
+ repo_host=$(sed -e 's/\/.*$//' <<< ${IMAGE})
+ PUSH="$(sed -e 's/'"${repo_host}"'/'"${DOCKER_REGISTRY}"'/' <<< ${IMAGE})"
fi
- docker push ${PUSH}
+ docker tag ${IMAGE} ${PUSH}
+ docker --config "${DOCKER_CONFIG_DIR}" push ${PUSH}
+ # Remove created tag
+ docker rmi ${PUSH}
echo "${IMAGE} pushed as ${PUSH} to Nexus"
done
}
+validate_container_name () {
+ # Verify $1 is a valid hostname
+ if ! echo "${1}" | egrep -q "^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$";
+ then
+ echo "ERROR: ${1} is not a valid name!"
+ exit 1;
+ fi
+}
+
while [ "${1}" != "" ]; do
case ${1} in
-d | --docker ) shift
@@ -224,6 +210,10 @@ while [ "${1}" != "" ]; do
shift
NXS_NPM_LISTS+=("$(realpath ${1})")
;;
+ -c | --container-name ) shift
+ validate_container_name "${1}"
+ NEXUS_DOMAIN="${1}"
+ ;;
-o | --output-directory ) shift
NEXUS_DATA_DIR="$(realpath ${1})"
;;
@@ -235,6 +225,12 @@ while [ "${1}" != "" ]; do
-rl | --resource-list-directory ) shift
LISTS_DIR="$(realpath ${1})"
;;
+ -NP | --nexus-port ) shift
+ NEXUS_PORT="${1}"
+ ;;
+ -DP | --docker-port ) shift
+ NEXUS_DOCKER_PORT="${1}"
+ ;;
-h | --help ) usage
;;
*) usage
@@ -256,6 +252,11 @@ if [ ${#FAILED_COMMANDS[*]} -gt 0 ]; then
exit 1
fi
+# Nexus repository locations
+NPM_REGISTRY="http://${NEXUS_HOST}:${NEXUS_PORT}/repository/npm-private/"
+PYPI_REGISTRY="http://${NEXUS_HOST}:${NEXUS_PORT}/repository/pypi-private/"
+DOCKER_REGISTRY="${NEXUS_HOST}:${NEXUS_DOCKER_PORT}"
+
# Setup directories with resources for docker, npm and pypi
NXS_SRC_DOCKER_IMG_DIR="${DATA_DIR}/offline_data/docker_images_for_nexus"
NXS_SRC_NPM_DIR="${DATA_DIR}/offline_data/npm_tar"
@@ -276,25 +277,11 @@ if [ ${#NXS_DOCKER_IMG_LISTS[@]} -eq 0 ]; then
NXS_DOCKER_IMG_LISTS=("${NXS_DOCKER_IMG_LIST}" "${NXS_RKE_DOCKER_IMG_LIST}" "${NXS_K8S_DOCKER_IMG_LIST}")
fi
-# Backup /etc/hosts
-HOSTS_BACKUP="$(eval ${TIMESTAMP}_hosts.bk)"
-cp /etc/hosts /etc/${HOSTS_BACKUP}
-
-# Backup the current docker registry settings
-if [ -f ~/.docker/config.json ]; then
- DOCKER_CONF_BACKUP="$(eval ${TIMESTAMP}_config.json.bk)"
- mv ~/.docker/config.json ~/.docker/${DOCKER_CONF_BACKUP}
-fi
+# Create Docker client config dir
+DOCKER_CONFIG_DIR=$(mktemp -p /tmp -d .docker.XXXXXXXX)
# Setup default ports published to host as docker registry
-PUBLISHED_PORTS="-p ${NEXUS_PORT}:${NEXUS_PORT} -p ${NEXUS_DOCKER_PORT}:${NEXUS_DOCKER_PORT}"
-
-# Setup additional ports published to host based on simulated docker registries
-# Setup simulated domain names to be able to push all to private Nexus repository
-for DOCKER_IMG_LIST in "${NXS_DOCKER_IMG_LISTS[@]}"; do
- publish_ports "${DOCKER_IMG_LIST}"
- simulated_hosts "${DOCKER_IMG_LIST}"
-done
+PUBLISHED_PORTS="-p ${NEXUS_PORT}:${NEXUS_EXPOSED_PORT} -p ${NEXUS_DOCKER_PORT}:${NEXUS_DOCKER_EXPOSED_PORT}"
# Nexus repository configuration setup
NEXUS_CONFIG_GROOVY='import org.sonatype.nexus.security.realm.RealmManager
@@ -379,15 +366,15 @@ INFO
# Start the Nexus
NEXUS_CONT_ID=$(docker run -d --rm -v ${NEXUS_DATA_DIR}:/nexus-data:rw --name ${NEXUS_DOMAIN} ${PUBLISHED_PORTS} ${NEXUS_IMAGE})
echo "Waiting for Nexus to fully start"
-until curl -su ${NEXUS_USERNAME}:${NEXUS_PASSWORD} http://${NEXUS_DOMAIN}:${NEXUS_PORT}/service/metrics/healthcheck | grep '"healthy":true' > /dev/null ; do
+until curl -su ${NEXUS_USERNAME}:${NEXUS_PASSWORD} http://${NEXUS_HOST}:${NEXUS_PORT}/service/metrics/healthcheck | grep '"healthy":true' > /dev/null ; do
printf "."
sleep 3
done
echo -e "\nNexus started"
# Configure the nexus repository
-curl -sX POST --header 'Content-Type: application/json' --data-binary "${NEXUS_CONFIG}" http://${NEXUS_USERNAME}:${NEXUS_PASSWORD}@${NEXUS_DOMAIN}:${NEXUS_PORT}/service/rest/v1/script
-curl -sX POST --header "Content-Type: text/plain" http://${NEXUS_USERNAME}:${NEXUS_PASSWORD}@${NEXUS_DOMAIN}:${NEXUS_PORT}/service/rest/v1/script/configure/run > /dev/null
+curl -sX POST --header 'Content-Type: application/json' --data-binary "${NEXUS_CONFIG}" http://${NEXUS_USERNAME}:${NEXUS_PASSWORD}@${NEXUS_HOST}:${NEXUS_PORT}/service/rest/v1/script
+curl -sX POST --header "Content-Type: text/plain" http://${NEXUS_USERNAME}:${NEXUS_PASSWORD}@${NEXUS_HOST}:${NEXUS_PORT}/service/rest/v1/script/configure/run > /dev/null
###########################
# Populate NPM repository #
@@ -420,12 +407,12 @@ fi
## Populate Docker repository #
###############################
-# Login to simulated docker registries
+# Login to docker registry simulated by Nexus container
# Push images to private nexus based on the lists
-# Images from default registry need to be tagged to private registry
-# and those without defined repository in tag uses default repository 'library'
+# All images need to be tagged to simulated registry
+# and those without defined repository in tag use default repository 'library'
+docker_login
for DOCKER_IMG_LIST in "${NXS_DOCKER_IMG_LISTS[@]}"; do
- docker_login "${DOCKER_IMG_LIST}"
push_docker "${DOCKER_IMG_LIST}"
done
@@ -433,17 +420,13 @@ done
# Stop the Nexus and cleanup #
##############################
-echo "Stopping Nexus and returning backups"
+echo "Stopping Nexus"
# Stop the Nexus
docker stop ${NEXUS_CONT_ID} > /dev/null
-# Return backed up configuration files
-mv -f "/etc/${HOSTS_BACKUP}" /etc/hosts
-
-if [ -f ~/.docker/${DOCKER_CONF_BACKUP} ]; then
- mv -f ~/.docker/${DOCKER_CONF_BACKUP} ~/.docker/config.json
-fi
+# Drop temporary Docker client config dir
+rm -rf ${DOCKER_CONFIG_DIR}
echo "Nexus blob is built"
exit 0
diff --git a/build/create_repo.sh b/build/create_repo.sh
index fa53e688..f2bde32d 100755
--- a/build/create_repo.sh
+++ b/build/create_repo.sh
@@ -1,60 +1,65 @@
#!/usr/bin/env bash
-# Set type of distribution
+# Set distribution type
distro_type="$(cat /etc/*-release | grep -w "ID" | awk -F'=' '{ print $2 }' | tr -d '"')"
-# Path to folder with cloned offline-installer build directory with docker_entrypoint script
+# Path to cloned offline-installer build directory with docker_entrypoint script
volume_offline_directory="$(readlink -f $(dirname ${0}))"
-# Path for directory where repository will be created
+# Destination path for created repository
volume_repo_directory="$(pwd)"
# Path inside container with cloned offline-installer build directory
container_offline_volume="/mnt/offline/"
-# Path inside container where will be created repository
+# Target repository path inside container
container_repo_volume="/mnt/repo/"
-# Path inside container where will be stored additional packages lists
+# Additional packages lists files path within container
container_list_volume="/mnt/additional-lists/"
-# Show help for using this script
+# Use cache by default
+drop_cache=false
+
+# Show script usage
help () {
cat <<EOF
-Script for run docker container creating DEB or RPM repository
+Wrapper script running docker container for creating package repository
+
+Repository type is set with --target-platform option and the default is to use host OS platform type
-Type of repository is created based on user input or if input is empty type of host OS
+usage: create_repo.sh [OPTION]...
-usage: create_repo.sh [-d|--destination-repository output directory] [-c|--cloned-directory input directory]
- [-t|--target-platform centos target platform for repository]
- [-a|----additional-lists path to additional package list]
--h --help: Show this help
--d --destination-repository: set path where will be stored RPM packages. Default value is current directory
--c --cloned-directory: set path where is stored this script and docker-entrypoint script (offline-installer/build directory). Fill it just when you want to use different script/datalists
--t --target-platform: set target platform for repository (ubuntu/rhel/centos)
--a --additional-list: add additional packages list
- can be used multiple times for more additional lists
-If build folder from offline repository is not specified will be used default path of current folder.
+ -d | --destination-repository target path to store downloaded packages. Current directory by default
+ -c | --cloned-directory path to directory containing this and docker-entrypoint scripts (offline-installer/build directory)
+ Set it only when you want to use different script/datalists
+ -t | --target-platform target repository platform type (ubuntu/rhel/centos)
+ -a | --additional-list additional packages list; can be used multiple times for more additional lists
+ -n | --container-name-suffix add custom suffix to docker container name
+ -r | --drop-cache remove cached packages (use package cache by default)
+ -h | --help show this help
+
+If build folder from offline repository is not specified current one will be used by default.
EOF
}
-# Get type of distribution
+# Get distribution type
# Set Docker image name and version based on type of linux distribution
# Set expected directory for RPM/DEB packages
-set_enviroment () {
+set_environment () {
case "$1" in
ubuntu)
distro_type="ubuntu"
docker_image="ubuntu:18.04"
expected_dir="resources/pkg/deb"
- container_name=$1"_repo"
+ container_name="${1}_repo${container_name_suffix}"
;;
centos|rhel)
distro_type="rhel"
docker_image="centos:centos7.6.1810"
expected_dir="resources/pkg/rpm"
- container_name=$1"_repo"
+ container_name="${1}_repo${container_name_suffix}"
;;
*)
echo "Unknown type of linux distribution."
@@ -64,7 +69,6 @@ set_enviroment () {
}
# Getting input parametters
-POSITIONAL=()
if [[ $# -eq 0 ]] ; then
help # show help
exit 0
@@ -79,24 +83,36 @@ do
exit 0
;;
-c|--cloned-directory)
- # Directory parametter
- # Sets path where is cloned offline-installer build directory
+ # Directory parameter
+ # Set path to offline-installer build directory
volume_offline_directory="$2"
+ shift
;;
-d|--destination-repository)
- # Repository direcotry parametter
- # Sets path where will be repository created
+ # Repository directory parameter
+ # Set destination path for created repository
volume_repo_directory="$2"
+ shift
;;
-t|--target-platform)
# Repository type (rpm/deb)
- # Sets target platform for repository
+ # Set target platform for repository
target_input="$2"
+ shift
;;
-a|--additional-list)
- # Array with more packages lists
- # Add more packages lists to download
+ # Array of additional packages lists
additional_lists+=("$2")
+ shift
+ ;;
+ -n|--container-name-suffix)
+ # Set custom container name suffix
+ container_name_suffix="_${2}"
+ shift
+ ;;
+ -r|--drop-cache)
+ # Set flag to clean cache
+ drop_cache=true
;;
*)
# unknown option
@@ -104,40 +120,37 @@ do
exit 1
;;
esac
- shift;shift
+ shift
done
-# Check if user specified type of repository
-# This settings have higher priority, then type of distribution
+# Check if user specified repository type
+# This setting has higher priority than distribution type
if ! test -z "$target_input"
then
- set_enviroment "$target_input"
+ set_environment "$target_input"
else
- set_enviroment "$distro_type"
+ set_environment "$distro_type"
fi
-# Check if path contains expected path:
+# Check if path contains expected components:
# "resources/pkg/rpm" for Rhel/CentOS or
# "resources/pkg/deb" for Ubuntu/Debian
if ! [[ "/$volume_repo_directory/" = *"/$expected_dir/"* ]]; then
- # Create repo folder if it not exists
+ # Create repo folder if it doesn't exist
case "$distro_type" in
ubuntu)
volume_repo_directory="$volume_repo_directory"/resources/pkg/deb
;;
rhel)
- volume_repo_directory="$volume_repo_directory"/resources/pkg/rhel
+ volume_repo_directory="$volume_repo_directory"/resources/pkg/rpm
;;
esac
[ ! -d "$volume_repo_directory" ] && mkdir -p $volume_repo_directory
fi
-#Check if container "centos-repo" is running
-if [ ! "$(docker ps -q -f name=$container_name)" ]; then
- if [ "$(docker ps -aq -f status=exited -f name=$container_name)" ]; then
- # cleanup
- docker rm $container_name
- fi
+# Check if container is already running
+if [ ! $(docker ps -q -f name="^${container_name}$") ];
+then
# run repo container
# name of container $container_name
# docker entrypoint script from mounted volume
@@ -148,6 +161,10 @@ if [ ! "$(docker ps -q -f name=$container_name)" ]; then
param_array+=(--directory ${container_repo_volume})
param_array+=(--list ${container_offline_volume}data_lists/)
param_array+=(--packages-lists-path ${container_list_volume})
+ if ${drop_cache};
+ then
+ param_array+=(--drop-cache)
+ fi
[[ ! ${#additional_lists[@]} -eq 0 ]] && \
for array_list in "${additional_lists[@]}";
do
@@ -155,14 +172,12 @@ if [ ! "$(docker ps -q -f name=$container_name)" ]; then
mounted_lists+=(-v ${array_list}:${container_list_volume}${array_list##*/})
done
- docker run -d \
- --name $container_name \
+ docker run --name $container_name \
-v ${volume_offline_directory}:${container_offline_volume} \
-v ${volume_repo_directory}:${container_repo_volume} \
"${mounted_lists[@]}" \
--rm \
--entrypoint="${container_offline_volume}docker-entrypoint.sh" \
- -it ${docker_image} \
+ ${docker_image} \
"${param_array[@]}"
- docker logs $(docker ps --filter "name=${container_name}" --format '{{.ID}}' -a) -f
fi
diff --git a/build/creating_data/docker-images-collector.sh b/build/creating_data/docker-images-collector.sh
index 8789350c..d07aa64d 100755
--- a/build/creating_data/docker-images-collector.sh
+++ b/build/creating_data/docker-images-collector.sh
@@ -2,7 +2,7 @@
# COPYRIGHT NOTICE STARTS HERE
#
-# Copyright 2019 © Samsung Electronics Co., Ltd.
+# Copyright 2019-2020 © Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,8 +20,9 @@
### This script is preparing docker images list based on kubernetes project
-### NOTE: helm needs to be installed and working, it is required for correct processing
-### of helm charts in oom directory
+### NOTE: helm needs to be installed; it is required for correct processing
+### of helm charts in oom directory; chartmuseum and helm push plugin are also
+### required if using helm v3
# Fail fast settings
set -e
@@ -30,11 +31,17 @@ usage () {
echo " "
echo " This script is preparing docker images list based on kubernetes project"
echo " Usage:"
- echo " ./$(basename $0) <path to project> [<output list file>]"
+ echo " ./$(basename $0) [OPTION]... <path to project> [<output list file>]"
+ echo " "
+ echo " Options:"
+ echo " -h | --help Show script usage synopsis"
+ echo " -p | --helm-port Chart repository server port"
+ echo " -b | --helm-bin Path to Helm executable"
+ echo " -c | --chartmuseum-bin Path to Chartmuseum executable"
echo " "
echo " Example: ./$(basename $0) /root/oom/kubernetes/onap"
echo " "
- echo " Dependencies: helm, python-yaml, make"
+ echo " Dependencies: helm, python-yaml, make, chartmuseum (helm v3 only)"
echo " "
exit 1
}
@@ -60,21 +67,120 @@ create_list() {
else
>&2 echo -e \n" !!! ${1} sybsystem does not exist !!!"\n
fi
- helm template --set global.masterPassword=TemplatePassword -f ${PROJECT_DIR}/values.yaml "${SUBSYS_DIR}" | grep 'image:\ \|tag_version:\ \|h._image' |
+ ${HELM_BIN} template --set global.masterPassword=TemplatePassword,global.offlineDeploymentBuild=true -f ${PROJECT_DIR}/values.yaml "${SUBSYS_DIR}" | grep 'image:\ \|tag_version:\ \|h._image' |
sed -e 's/^.*\"h._image\"\ :\ //; s/^.*\"\(.*\)\".*$/\1/' \
-e 's/\x27\|,//g; s/^.*\(image\|tag_version\):\ //' | tr -d '\r'
}
-# Configuration
-if [ "${1}" == "-h" ] || [ "${1}" == "--help" ] || [ $# -lt 1 ]; then
+# Kill helm repository if already running
+kill_chart_repo() {
+ if [[ "${HELM_VERSION}" =~ "v3" ]];
+ then
+ cmd=${HELM3_CMD_PATTERN}
+ else
+ cmd=${HELM2_CMD_PATTERN}
+ fi
+ for pid in $(pgrep -f "${cmd}");
+ do
+ # Kill helm repository server process
+ kill $pid
+ done
+}
+
+validate_port() {
+ if [ -z $1 ];
+ then
+ echo "Error: No valid port number provided"
+ exit 1
+ fi
+ if ! [[ "$1" =~ ^[0-9]*$ ]];
+ then
+ echo "Error: "${1}" is not a valid port number"
+ exit 1
+ fi
+}
+
+validate_bin() {
+ if [ -z $1 ];
+ then
+ echo "Error: No path to executable provided"
+ exit 1
+ else
+ if ! [[ -x ${1} && -f ${1} ]];
+ then
+ echo "Error: ${1} is not an executable"
+ exit 1
+ fi
+ fi
+}
+
+check_chart_repo() {
+ sleep 2 # let the helm repository process settle
+ if [[ "${HELM_VERSION}" =~ "v3" ]];
+ then
+ cmd=${HELM3_CMD_PATTERN}
+ else
+ cmd=${HELM2_CMD_PATTERN}
+ fi
+ if [ $(pgrep -f "${cmd}" -c) -eq 0 ];
+ then
+ echo "Fatal: Helm chart repository server failed to start"
+ exit 1
+ fi
+}
+
+# Proccess input options
+if [ $# -lt 1 ]; then
usage
fi
+while [ $# -gt 0 ];
+do
+ case "${1}" in
+ -h | --help)
+ usage
+ ;;
+ -p | --helm-port)
+ PORT="${2}"
+ validate_port "${PORT}"
+ shift 2
+ ;;
+ -b | --helm-bin)
+ BIN="${2}"
+ validate_bin "${BIN}"
+ shift 2
+ ;;
+ -c | --chartmuseum-bin)
+ CHARTMUSEUM_BIN="${2}"
+ validate_bin "${CHARTMUSEUM_BIN}"
+ shift 2
+ ;;
+ -*)
+ echo "Unknown option ${1}"
+ usage
+ ;;
+ *)
+ # end of options
+ break
+ ;;
+ esac
+done
+
+# Configuration
PROJECT_DIR="${1}"
LIST="${2}"
LISTS_DIR="$(readlink -f $(dirname ${0}))/../data_lists"
-HELM_REPO="local http://127.0.0.1:8879"
+HELM_BIN=${BIN:-helm}
+CHARTMUSEUM_BIN=${CHART_BIN:-chartmuseum}
+HELM_REPO_HOST="127.0.0.1"
+HELM_REPO_PORT="${PORT:-8879}"
+HELM_REPO="${HELM_REPO_HOST}:${HELM_REPO_PORT}"
+HELM_REPO_PATH="dist/packages" # based on PACKAGE_DIR defined in oom/kubernetes/Makefile
+HELM_VERSION=$(${HELM_BIN} version -c --template "{{.Version}}")
+DOCKER_CONTAINER="generate-certs-${HELM_REPO_PORT}" # oom-cert-service container name override
PROJECT="$(basename ${1})"
+HELM3_CMD_PATTERN="${CHARTMUSEUM_BIN} --storage local --storage-local-rootdir .*/chartmuseum -port ${HELM_REPO_PORT}"
+HELM2_CMD_PATTERN="${HELM_BIN} serve --address ${HELM_REPO}"
if [ ! -f "${PROJECT_DIR}/../Makefile" ]; then
echo "Wrong path to project directory entered"
@@ -82,6 +188,11 @@ if [ ! -f "${PROJECT_DIR}/../Makefile" ]; then
elif [ -z "${LIST}" ]; then
mkdir -p ${LISTS_DIR}
LIST="${LISTS_DIR}/${PROJECT}_docker_images.list"
+else
+ # $2 is not empty - ensure LIST path exists
+ LIST_DIR="$(dirname ${LIST})"
+ mkdir -p "${LIST_DIR}"
+ MSG="${LIST_DIR} didn't exist, created\n"
fi
if [ -e "${LIST}" ]; then
@@ -89,23 +200,34 @@ if [ -e "${LIST}" ]; then
MSG="$(realpath ${LIST}) already existed\nCreated backup $(realpath ${LIST}).bk\n"
fi
-# Setup helm
-if ps -eaf | grep -v "grep" | grep "helm" > /dev/null; then
- echo "helm is already running"
-else
- helm init -c > /dev/null
- helm serve &
-fi
+HELM_HOME=$(mktemp -p /tmp -d .helm.XXXXXXXX)
+export HELM_HOME
-# Create helm repository
-if ! helm repo list 2>&1 | awk '{ print $1, $2 }' | grep -q "$HELM_REPO" > /dev/null; then
- helm repo add $HELM_REPO
+kill_chart_repo # make sure it's not already running
+if [[ "${HELM_VERSION}" =~ "v3" ]];
+then
+ # Setup helm v3
+ export HELM_CONFIG_HOME="${HELM_HOME}/.config"
+ export HELM_CACHE_HOME="${HELM_HOME}/.cache"
+ ${CHARTMUSEUM_BIN} --storage local --storage-local-rootdir "${HELM_HOME}/chartmuseum" -port ${HELM_REPO_PORT} &
+ sleep 2 # let the chartmuseum process settle
+ ${HELM_BIN} repo add local "http://${HELM_REPO}"
+else
+ # Setup helm v2
+ mkdir -p "${PROJECT_DIR}/../${HELM_REPO_PATH}"
+ ${HELM_BIN} init --skip-refresh -c --local-repo-url "http://${HELM_REPO}"
+ ${HELM_BIN} serve --address ${HELM_REPO} --repo-path "${PROJECT_DIR}/../${HELM_REPO_PATH}" &
+ ${HELM_BIN} repo remove stable 2>/dev/null || true
fi
+check_chart_repo
# Make all
pushd "${PROJECT_DIR}/.."
echo "Building project..."
-make all > /dev/null; make ${PROJECT} > /dev/null
+export SKIP_LINT=TRUE
+export DOCKER_CONTAINER
+export HELM_BIN
+make -e all > /dev/null; make -e ${PROJECT} > /dev/null
popd
# Create the list from all enabled subsystems
@@ -125,4 +247,10 @@ sed -i "1i# generated from ${COMMENT}" "${LIST}"
echo -e ${MSG}
echo -e 'The list has been created:\n '"${LIST}"
+
+# Kill helm
+kill_chart_repo
+# Remove temporary helm directory
+rm -rf ${HELM_HOME}
+
exit 0
diff --git a/build/data_lists/infra_bin_utils.list b/build/data_lists/infra_bin_utils.list
index a82d0560..36a9fe6b 100644
--- a/build/data_lists/infra_bin_utils.list
+++ b/build/data_lists/infra_bin_utils.list
@@ -1,3 +1,3 @@
https://storage.googleapis.com/kubernetes-release/release/v1.15.11/bin/linux/amd64/kubectl
-https://storage.googleapis.com/kubernetes-helm/helm-v2.16.6-linux-amd64.tar.gz
+https://get.helm.sh/helm-v2.16.6-linux-amd64.tar.gz
https://github.com/rancher/rke/releases/download/v1.0.4/rke_linux-amd64
diff --git a/build/data_lists/onap_deb.list b/build/data_lists/onap_deb.list
index fcc6391d..c2ca342d 100644
--- a/build/data_lists/onap_deb.list
+++ b/build/data_lists/onap_deb.list
@@ -35,8 +35,8 @@ cpp
dpkg-dev
g++
g++-7
-libcc-0
-libcc-7-dev
+libcc1-0
+libgcc-7-dev
libgomp1
libitm1
libatomic1
@@ -47,11 +47,10 @@ libubsan0
libcilkrts5
libmpx2
libquadmath0
-libc6-dev
gcc
gcc-7
libc6-dev
-libc-dev
make
binutils
resolvconf
+bash-completion
diff --git a/build/data_lists/onap_rpm.list b/build/data_lists/onap_rpm.list
index 56eca600..a764c262 100644
--- a/build/data_lists/onap_rpm.list
+++ b/build/data_lists/onap_rpm.list
@@ -1,8 +1,9 @@
+audit-2.8.5-4.el7.x86_64
+bash-completion-2.1-8.el7.noarch
containerd.io-1.2.5-3.1.el7.x86_64
container-selinux-2.107-1.el7_6.noarch
docker-ce-18.09.5-3.el7.x86_64
docker-ce-cli-18.09.6-3.el7.x86_64
-gssproxy-0.7.0-26.el7.x86_64
keyutils-1.5.8-3.el7.x86_64
libbasicobjects-0.1.1-32.el7.x86_64
libcollection-0.7.0-32.el7.x86_64
@@ -12,9 +13,10 @@ libnfsidmap-0.25-19.el7.x86_64
libpath_utils-0.2.1-32.el7.x86_64
libref_array-0.1.5-32.el7.x86_64
libverto-libevent-0.2.5-4.el7.x86_64
-nfs-utils-1.3.0-0.65.el7.x86_64
+nfs-utils-1.3.0-0.68.el7.x86_64
python-docker-py-1.10.6-9.el7_6.noarch
python-docker-pycreds-0.3.0-9.el7_6.noarch
+python-chardet-2.2.1-3.el7.noarch
python-ipaddress-1.0.16-2.el7.noarch
python-jsonpointer-1.9-2.el7.noarch
python-websocket-client-0.56.0-3.git3c25814.el7.noarch
diff --git a/build/docker-entrypoint.sh b/build/docker-entrypoint.sh
index b3306e26..81dcd84c 100755
--- a/build/docker-entrypoint.sh
+++ b/build/docker-entrypoint.sh
@@ -1,6 +1,8 @@
#!/usr/bin/env bash
-# Set type of distribution where script is running
+set -eo pipefail
+
+# Set distribution family
distro_type=$(cat /etc/*-release | grep -w "ID" | awk -F'=' '{ print $2 }' | tr -d '"')
case "$distro_type" in
ubuntu)
@@ -15,37 +17,40 @@ case "$distro_type" in
;;
esac
-# Path where will be created repository (in container)
+# Target path for created repository
OFFLINE_REPO_DIR=""
-# Path where is stored onap_rpm.list and onap_deb.list file
+# Path to directory containing onap_rpm.list and onap_deb.list files
PCKG_LIST_DIR=""
-# Path where is stored additional packages lists
+# Path to additional packages lists
ADD_LIST_DIR=""
-# Show help for using this script
+# Use cache by default
+drop_cache=false
+
+# Show help
help () {
cat <<EOF
-Docker entrypoint script for creating RPM/DEB repository based on linux distribution where script is running
+Docker entrypoint script for creating RPM/DEB repository based on container platform type
+
+usage: create-repo.sh [OPTION]...
-usage: create-repo.sh [-d|--directory output directory] [-l|--list input rpm/deb list directory] [-a|--additional-lists list1.list]
--h --help: Show this help
--d --directory: set path for repo directory in container
--l --list: set path where rpm or deb list is stored in container
--a --additional-list: add name of additional packages list
- can be used multiple times for more additional lists
--p --packages-lists-path: set path for other additional packages lists
+ -d | --directory target repository path
+ -l | --list input rpm/deb list directory
+ -a | --additional-list additional packages list; can be used multiple times
+ -p | --packages-lists-path other additional packages lists
+ -r | --drop-cache remove cached packages (use package cache by default)
+ -h | --help show this help
Both paths have to be set with shared volume between
-container and host computer. Default path in container is: /tmp/
+container and the host. Default path in container is: /tmp/
Repository will be created at: /<path>/resources/pkg/rhel/
RMP/DEB list is stored at: ./data_list/
EOF
}
-# Getting input parametters
-POSITIONAL=()
+# Getting input parameters
if [[ $# -eq 0 ]] ; then
help # show help
exit 0
@@ -54,28 +59,36 @@ while [[ $# -gt 0 ]]
do
case "$1" in
-h|--help)
- # Help parametter
+ # Help parameter
help # show help
exit
;;
-d|--directory)
- # Directory parametter
- # Sets path where will be created reposity
+ # Directory parameter
+ # Set target reposity path
OFFLINE_REPO_DIR="$2"
+ shift
;;
-l|--list)
- # List parametter
- # Sets path where is stored onap_rpm.list or onap_deb.list file
+ # List parameter
+ # Set path containing onap_rpm.list or onap_deb.list file
PCKG_LIST_DIR="$2"
+ shift
;;
-p|--packages-lists-path)
- # Path parametter
- # Sets path where is stored additional packages lists
+ # Path parameter
+ # Set path for additional packages lists
ADD_LIST_DIR="$2"
+ shift
;;
-a|--additional-list)
# Array of additional packages lists
ADDITIONAL_LISTS+=("$2")
+ shift
+ ;;
+ -r|--drop-cache)
+ # Set flag to clean cache
+ drop_cache=true
;;
*)
# unknown option
@@ -83,31 +96,34 @@ do
exit
;;
esac
- shift;shift
+ shift
done
-# Testing if directory parametter was used
-# If not variable is sets to default value:
-# /tmp/repo/resources/pkg/rpm
-# or
-# /tmp/repo/resources/pkg/deb
+# Testing if directory parameter was used
+# If not variable is set to /tmp/repo by default
if test -z "$OFFLINE_REPO_DIR"
then
OFFLINE_REPO_DIR="/tmp/repo/"
fi
-# Testing if list parametter was used
-# If not variable is sets to default value /tmp/offline/data-list
+# Testing if list parameter was used
+# If not variable is set to default value /tmp/offline/data-list
if test -z "$PCKG_LIST_DIR"
then
- PCKG_LIST_DIR="/tmp/offline/data_list/"
+ PCKG_LIST_DIR="/tmp/offline/data_list"
fi
-# Testing if additional packages list parametter was used
-# If not variable is sets to default value /tmp/additional-lists
+# Testing if additional packages list parameter was used
+# If not variable is set to default value /tmp/additional-lists
if test -z "$PCKG_LIST_DIR"
then
- PCKG_LIST_DIR="/tmp/additional-lists/"
+ PCKG_LIST_DIR="/tmp/additional-lists"
+fi
+
+# Clean target repo dir if --drop-cache set
+if ${drop_cache};
+then
+ rm -rf ${OFFLINE_REPO_DIR}/*
fi
case "$distro_type" in
@@ -132,22 +148,54 @@ case "$distro_type" in
# https://bugs.launchpad.net/ubuntu/+source/aptitude/+bug/1543280
chown _apt $OFFLINE_REPO_DIR
- # Download all packages from onap_deb.list via apt-get to repository folder
- for i in $(cat ${PCKG_LIST_DIR}onap_deb.list | awk '{print $1}');do apt-get download $i -y; done
- for i in $(cat ${PCKG_LIST_DIR}onap_deb.list | awk '{print $1}');
- do
- for depends in $(apt-cache depends $i | grep -E 'Depends' | cut -d ':' -f 2,3 | sed -e s/'<'/''/ -e s/'>'/''/);
- do apt-get download $depends -y;
- done;
- done
+ # Create tmp file for package list
+ list_file=$(mktemp)
+
+ # Enumerate packages that are already downloaded
+ for package in $(cat ${PCKG_LIST_DIR}/onap_deb.list);
+ do
+ # If package name contains explicit version info cut the version string off for further processing
+ p=$(echo $package |sed -r 's/=.*//')
+ # Add package to download list only if it's not already there
+ if [ $(ls ${p}_*.deb 2>/dev/null | wc -l) -eq 0 ];
+ then
+ echo ${package} >> ${list_file}
+ fi
+ done
- # Download all packages with dependecies from all additional packages lists via apt-get to repository folder
+ # Download all packages via apt-get to repository folder
+ for i in $(cat ${list_file});do echo apt-get download $i -y; done
+ for i in $(cat ${list_file});
+ do
+ for depends in $(apt-cache depends $i | grep -E 'Depends' | grep -v 'Depends:.*>$' | cut -d ':' -f 2,3 | sed -e s/'<'/''/ -e s/'>'/''/);
+ do
+echo apt-get download $depends -y;
+ done;
+ done
+
+ # Download all packages with dependencies from all additional packages lists via apt-get to repository folder
if ! [ ${#ADDITIONAL_LISTS[@]} -eq 0 ]; then
for list in ${ADDITIONAL_LISTS[@]}
do
- for i in $(cat ${ADD_LIST_DIR}$list | awk '{print $1}');do apt-get download $i -y; done
- for i in $(cat ${ADD_LIST_DIR}$list | awk '{print $1}');
- do
+
+ # Create tmp file for package list
+ list_file=$(mktemp)
+
+ # Enumerate packages that are already downloaded
+ for package in $(cat ${ADD_LIST_DIR}/${list});
+ do
+ # If package name contains explicit version info cut the version string off for further processing
+ p=$(echo $package |sed -r 's/=.*//')
+ # Add package to download list only if it's not already there
+ if [ $(ls ${p}_*.deb 2>/dev/null | wc -l) -eq 0 ];
+ then
+ echo ${package} >> ${list_file}
+ fi
+ done
+
+ for i in $(cat ${list_file});do apt-get download $i -y; done
+ for i in $(cat ${list_file});
+ do
for depends in $(apt-cache depends $i | grep -E 'Depends' | cut -d ':' -f 2,3 | sed -e s/'<'/''/ -e s/'>'/''/);
do apt-get download $depends -y;
done;
@@ -168,18 +216,46 @@ case "$distro_type" in
# Add official docker repository
yum-config-manager --add-repo=https://download.docker.com/linux/centos/7/x86_64/stable/
+ # Create tmp file for package list
+ list_file=$(mktemp)
+
+ # Enumerate packages that are already downloaded
+ for package in $(cat ${PCKG_LIST_DIR}/onap_rpm.list);
+ do
+ # Add package to download list only if it's not already there
+ if [ ! -f ${OFFLINE_REPO_DIR}/${package}.rpm ];
+ then
+ echo ${package} >> ${list_file}
+ fi
+ done
+
# Download all packages from onap_rpm.list via yumdownloader to repository folder
- for i in $(cat ${PCKG_LIST_DIR}onap_rpm.list | awk '{print $1}');do yumdownloader --resolve --downloadonly --destdir=${OFFLINE_REPO_DIR} $i -y; done
+ for i in $(cat ${list_file});do yumdownloader --resolve --downloadonly --destdir=${OFFLINE_REPO_DIR} $i -y; done
- # Download all packages from all additional packages lists via apt-get to repository folder
+ # Download all packages from all additional packages lists via yumdownloader to repository folder
if ! [ ${#ADDITIONAL_LISTS[@]} -eq 0 ]; then
for list in ${ADDITIONAL_LISTS[@]}
do
- for i in $(cat ${ADD_LIST_DIR}$list | awk '{print $1}');do yumdownloader --resolve --downloadonly --destdir=${OFFLINE_REPO_DIR} $i -y; done
+ # Create tmp file for additional package list
+ list_file=$(mktemp)
+ # Enumerate packages that are already downloaded
+ for package in $(cat ${ADD_LIST_DIR}/${list});
+ do
+ # Add package to download list only if it's not already there
+ if [ ! -f ${OFFLINE_REPO_DIR}/${package}.rpm ];
+ then
+ echo ${package} >> ${list_file}
+ fi
+ done
+
+ for i in $(cat ${list_file});
+ do
+ yumdownloader --resolve --downloadonly --destdir=${OFFLINE_REPO_DIR} $i -y
+ done
done
fi
- # In repository folder create repositor
+ # Create repository
createrepo $OFFLINE_REPO_DIR
;;
diff --git a/build/download/__init__.py b/build/download/__init__.py
index b1ef8d99..4f259ea7 100644
--- a/build/download/__init__.py
+++ b/build/download/__init__.py
@@ -17,4 +17,3 @@
# limitations under the License.
# COPYRIGHT NOTICE ENDS HERE
-
diff --git a/build/download/clean_docker_images.py b/build/download/clean_docker_images.py
index 186bfd60..42157490 100755
--- a/build/download/clean_docker_images.py
+++ b/build/download/clean_docker_images.py
@@ -68,4 +68,3 @@ def main():
if __name__ == '__main__':
main()
-
diff --git a/build/download/docker_downloader.py b/build/download/docker_downloader.py
index 27dde12f..847bc180 100755
--- a/build/download/docker_downloader.py
+++ b/build/download/docker_downloader.py
@@ -168,8 +168,7 @@ class DockerDownloader(ConcurrentDownloader):
:param image_name: name of the image from list
"""
dst = '{}/{}'.format(output_dir, self._image_filename(image_name))
- if not os.path.exists(output_dir):
- os.makedirs(output_dir)
+ os.makedirs(output_dir, exist_ok=True)
try:
with open(dst, 'wb') as f:
for chunk in image.save(named=self.image_registry_name(image_name)):
diff --git a/build/download/pypi_downloader.py b/build/download/pypi_downloader.py
index 10ac7b9f..aa2a46e0 100755
--- a/build/download/pypi_downloader.py
+++ b/build/download/pypi_downloader.py
@@ -47,7 +47,7 @@ class PyPiDownloader(CommandDownloader):
log.warning('Check mode for pypi is not implemented.')
return ''
- def _is_missing(self, item):
+ def _is_missing(self, item): # pylint: disable=W0613
"""
Check if item is missing
:param item: item to check
diff --git a/build/download/rpm_downloader.py b/build/download/rpm_downloader.py
index 415f9483..2b94e167 100755
--- a/build/download/rpm_downloader.py
+++ b/build/download/rpm_downloader.py
@@ -66,7 +66,7 @@ class RpmDownloader(CommandDownloader):
self._missing[dst].add(item)
return self._missing
- def _is_missing(self, item):
+ def _is_missing(self, item): # pylint: disable=W0613
"""
Check if item is missing
:param item: item to check
diff --git a/build/download/tox.ini b/build/download/tox.ini
new file mode 100644
index 00000000..e4616fff
--- /dev/null
+++ b/build/download/tox.ini
@@ -0,0 +1,7 @@
+[tox]
+envlist = download
+skipsdist = true
+
+[testenv:download]
+basepython = python3
+deps = -r{toxinidir}/requirements.txt
diff --git a/build/package.py b/build/package.py
index 9e37d4bf..c3d89d30 100755
--- a/build/package.py
+++ b/build/package.py
@@ -263,7 +263,7 @@ def build_offline_deliverables(build_version,
create_package(aux_content, aux_package_tar_path)
add_checksum_info(output_dir)
- shutil.rmtree(application_dir)
+ shutil.rmtree(application_dir, ignore_errors=True)
def run_cli():
diff --git a/build/tox.ini b/build/tox.ini
new file mode 100644
index 00000000..e14a29c3
--- /dev/null
+++ b/build/tox.ini
@@ -0,0 +1,7 @@
+[tox]
+envlist = package
+skipsdist = true
+
+[testenv:package]
+basepython = python3
+deps = -r{toxinidir}/requirements.txt
diff --git a/docs/InstallGuide.rst b/docs/InstallGuide.rst
index 4bd65fac..77aeff7f 100644
--- a/docs/InstallGuide.rst
+++ b/docs/InstallGuide.rst
@@ -64,7 +64,7 @@ KUBERNETES NODE OS NETWORK CPU RAM
SUM ``56 vCPUs`` ``176+ GB`` ``400 GB``
=========================================================== ============== ============ ===============
-Unfortunately, the offline installer supports only **RHEL 7.x** or **CentOS 7.6** distribution as of now. So, your VMs should be preinstalled with this operating system - the hypervisor and platform can be of your choosing.
+As of now, the offline installer supports only **RHEL 7.x** and **CentOS 7.6** distributions, with at least *@core* and *@base* package groups installed including *Mandatory* and *Default* package sets. So, your VMs should be preinstalled with this operating system - the hypervisor and platform can be of your choosing.
We will expect from now on that you installed four VMs and they are connected to the shared network. All VMs must be reachable from our *install-server* (below), which can be the hypervisor, *infra-node* or completely different machine. But in either of these cases the *install-server* must be able to connect over ssh to all of these nodes.
diff --git a/tools/cicdansible/ansible.cfg b/tools/cicdansible/ansible.cfg
index e74dda58..8ffd6ee2 100644
--- a/tools/cicdansible/ansible.cfg
+++ b/tools/cicdansible/ansible.cfg
@@ -4,6 +4,7 @@
stdout_callback=debug
#Default verbosity level, for logging all module outputs.
verbosity=1
+jinja2_extensions = jinja2.ext.do
[inventory]
#Fail when inventory parsing fails.
diff --git a/tools/cicdansible/group_vars/all.yml b/tools/cicdansible/group_vars/all.yml
index 3165e374..ef23eb98 100644
--- a/tools/cicdansible/group_vars/all.yml
+++ b/tools/cicdansible/group_vars/all.yml
@@ -37,11 +37,15 @@ subnet_range_start: "10.1.0.4"
subnet_range_end: "10.1.0.254"
#Ip address of router used as a gateway to external network.
router_addr: "10.1.0.1"
+#A list of dns resolvers for all instances
+dns_nameservers: []
#Cidr of external subnet to allow access to, 0.0.0.0/0 means allow internet access.
# For offline deployment it is recommended to set this to a cidr of intranet.
external_subnet_cidr: ""
#Address of cicd docker registry.
cicd_docker_registry: ""
+#Address of custom docker registry mirror
+docker_registry_mirror: ""
#Number of nodes to deploy.
num_nodes: "3"
#Stack name to deploy on heat.
diff --git a/tools/cicdansible/heat/installer.yaml b/tools/cicdansible/heat/installer.yaml
index 1f65f73f..b7b03d1a 100644
--- a/tools/cicdansible/heat/installer.yaml
+++ b/tools/cicdansible/heat/installer.yaml
@@ -58,6 +58,10 @@ parameters:
type: string
constraints:
- custom_constraint: ip_addr
+ dns_nameservers:
+ label: "dns resolvers"
+ description: "List of dns resolvers"
+ type: comma_delimited_list
public_network_name:
label: "name of the public network"
description: "Name of the public, internet facing network, also allowing access to company internal hosts"
@@ -140,6 +144,7 @@ resources:
allocation_pools:
- { start: { get_param: subnet_range_start }, end: { get_param: subnet_range_end } }
gateway_ip: { get_param: router_addr }
+ dns_nameservers: { get_param: dns_nameservers }
ip_version: 4
#A port connected to the private network, taken by router.
routerport:
diff --git a/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2 b/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2
index 1c3ca9bb..01e5e512 100644
--- a/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2
+++ b/tools/cicdansible/roles/configure_instances/templates/daemon.json.j2
@@ -1,3 +1,19 @@
+{% set insecure_registries = [] %}
+{% for registry in [cicd_docker_registry, docker_registry_mirror] %}
+{% if registry|length > 0 %}
+{% do insecure_registries.append(registry) %}
+{% endif %}
+{% endfor %}
{
-"insecure-registries": ["{{ cicd_docker_registry }}"]
+"insecure-registries": [
+{%- for registry in insecure_registries %}
+"{{ registry }}"{% if not loop.last %}, {% else %}]{% endif %}
+{% endfor %}
+{% if docker_registry_mirror|length > 0 %}
+,
+"registry-mirrors": ["http://{{ docker_registry_mirror }}"]
}
+{% else %}
+
+}
+{% endif %}
diff --git a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml
index 25e7ac79..21dfadcf 100644
--- a/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml
+++ b/tools/cicdansible/roles/setup_openstack_infrastructure/tasks/deploy/heat.yml
@@ -24,6 +24,7 @@
subnet_range_start: "{{ subnet_range_start }}"
subnet_range_end: "{{ subnet_range_end }}"
router_addr: "{{ router_addr }}"
+ dns_nameservers: "{{ dns_nameservers }}"
auth_key: "{{ auth_public_key }}"
image_name: "{{ image_name }}"
node_flavor_name: "{{ node_flavor_name }}"
diff --git a/tools/helm-healer.sh b/tools/helm-healer.sh
index a6e5b398..92ddbdb7 100755
--- a/tools/helm-healer.sh
+++ b/tools/helm-healer.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
@@ -191,6 +191,7 @@ helm_undeploy()
{
msg "Undeploy helm release name: ${1}"
helm undeploy ${1} --purge
+ sleep 15s
}
# arg: <job name>
@@ -212,41 +213,64 @@ delete_job()
done
}
-# arg: <resource> <release name>
-delete_resource()
+#arg: <component>
+get_resources_for_component()
{
- _resource="$1"
- _release="$2"
- msg "Delete ${_resource} for ${_release}..."
- {
- kubectl get ${_resource} -n ${NAMESPACE} \
- --ignore-not-found=true \
- --selector="release=${_release}" \
- --no-headers=true
-
- # this is due to missing "release" label in some pods
- # grep for the rescue...
- kubectl get ${_resource} -n ${NAMESPACE} \
- --no-headers=true | grep "^${_release}[-]"
- } | awk '{print $1}' | sort -u | while read -r _name _rest ; do
- echo "Deleting '${_name}'"
- kubectl delete ${_resource} -n ${NAMESPACE} \
- --cascade=true \
- --now=true \
- --wait=true \
- ${_name} \
- 2>&1 | grep -iv 'not[[:space:]]*found'
-
- # wait for resource to be deleted
- _output=start
- while [ -n "$_output" ] && sleep 1 ; do
- _output=$(kubectl get ${_resource} -n ${NAMESPACE} \
- --ignore-not-found=true \
- --no-headers=true \
- --field-selector="metadata.name=${_name}")
- done
+helm status $1 | awk -f <(cat - <<-'EOD'
+BEGIN {
+ work="no"
+ kind=""
+ a["dummy"]=""
+}
+
+$1 ~ ":" {
+ if ( $1 == "RESOURCES:" ) {
+ work="yes"
+} else {
+ work="no"
+}
+
+}
+
+$1 == "==>" {
+ split($2, a, "[/(]")
+ kind=a[2]
+}
+
+$1 != "NAME" && $1 != "==>" && work == "yes" && $1 !~ ":" && $1 != "" {
+ printf "%s/%s\n", kind, $1
+}
+
+EOD
+)
+}
+
+# arg: <resource>
+delete_resource()
+{
+ local _resource="$1"
+ local _kind="${_resource%/*}"
+ local _name="${_resource#*/}"
+
+
+ if kubectl get ${_resource} >/dev/null 2>&1; then
+ msg "${_resource} has not been removed with helm undeploy, manual removal is required. Proceeding"
+ kubectl delete ${_resource} -n ${NAMESPACE} \
+ --cascade=true \
+ --now=true \
+ --wait=true \
+ 2>&1 | grep -iv 'not[[:space:]]*found'
+
+ # wait for resource to be deleted
+ _output=start
+ while [ -n "$_output" ] && sleep 1 ; do
+ _output=$(kubectl get ${_kind} ${_name} -n ${NAMESPACE} \
+ --ignore-not-found=true \
+ --no-headers=true )
done
+ msg "Done"
+ fi
}
delete_namespace()
@@ -267,7 +291,18 @@ delete_namespace()
done
}
-# arg: [optional: subdir]
+delete_persistent_volume()
+{
+ _persistent_volume=$1
+ if kubectl get ${_persistent_volume} >/dev/null 2>&1; then
+ msg "${_persistent_volume} has not been removed with helm undeploy, manual removal is required. Proceeding"
+ #very often k8s hangs on Terminating state for pv due to still active pvc. It is better to delete pvc directly
+ _claim=$(kubectl get ${_persistent_volume} -o jsonpath='{ .spec.claimRef.name}')
+ delete_resource PersistentVolumeClaim/${_claim}
+ fi
+}
+
+# arg: [optional: directory]
delete_storage()
{
_node=$(kubectl get nodes \
@@ -280,15 +315,11 @@ delete_storage()
error "Could not list kubernetes nodes - SKIPPING DELETION"
else
if [ -n "$1" ] ; then
- msg "Delete directory '${VOLUME_STORAGE}/${1}' on $_node"
- ssh -T $_node <<EOF
-rm -rf "${VOLUME_STORAGE}/${1}"
-EOF
+ msg "Delete directory '${1}' on $_node"
+ ssh $_node "rm -rf '${1}'"
else
msg "Delete directories '${VOLUME_STORAGE}/*' on $_node"
- ssh -T $_node <<EOF
-find "${VOLUME_STORAGE}" -maxdepth 1 -mindepth 1 -exec rm -rf '{}' \;
-EOF
+ ssh $_node "find '${VOLUME_STORAGE}' -maxdepth 1 -mindepth 1 -exec rm -rf '{}' \;"
fi
fi
}
@@ -308,13 +339,7 @@ docker_cleanup()
for _node in $_nodes ; do
msg "Docker cleanup on $_node"
- {
- ssh -T $_node >/dev/null <<EOF
-if which docker >/dev/null ; then
- docker system prune --force --all --volumes
-fi
-EOF
- } &
+ ssh $_node "docker system prune --force --all --volumes" >/dev/null &
done
msg "We are waiting now for docker cleanup to finish on all nodes..."
@@ -338,28 +363,57 @@ is_helm_serve_running()
# arg: <release name>
undeploy_component()
{
- _chart=$(echo "$1" | sed 's/[^-]*-//')
- helm_undeploy ${1}
-
- # for all kubernetes resources: kubectl api-resources
- # TODO: does deleted secret per component break something?
- for x in jobs \
- deployments \
- services \
- replicasets \
- statefulsets \
- daemonsets \
- pods \
- pvc \
- pv \
- ;
- do
- delete_resource ${x} ${1}
+ local _component=$1
+
+ #Because Helm undeploy is not reliable: Gathering resources assigned to componen to track and remove orphans later
+ _component_resources=($(get_resources_for_component ${_component}))
+
+ declare -a _persistent_volumes
+ declare -a _standard
+ declare -a _unknown_kinds
+
+ for resource in ${_component_resources[@]}; do
+ case $resource in
+ CronJob/* | Job/* | Secret/* | ConfigMap/* | Pod/* | Service/* | Deployment/* | StatefulSet/*)
+ _standard+=(${resource});;
+ #Ignoring PVC, they will be handled along with PV as 'helm' status does not return them for some components
+ PersistentVolumeClaim/*)
+ ;;
+ PersistentVolume/*)
+ _persistent_volumes+=(${resource});;
+ *)
+ _unknown_kinds+=(${resource})
+ esac
done
- if [ -n "$VOLUME_STORAGE" ] ; then
- msg "Persistent volume data deletion in directory: ${VOLUME_STORAGE}/${1}"
- delete_storage "$1"
+
+ #Gathering physical location of directories for persistent volumes to delete them after undeploy
+ declare -a _physical_locations
+ for volume in ${_persistent_volumes[@]}; do
+ _physical_locations+=($(kubectl get ${volume} -o jsonpath='{ .spec.hostPath.path}' ))
+ done
+
+ helm_undeploy ${_component}
+
+ #Manual items removal
+ for resource in ${_standard[@]}; do
+ delete_resource ${resource}
+ done
+
+ for volume in ${_persistent_volumes[@]}; do
+ delete_persistent_volume ${volume}
+ done
+
+ for subdir in ${_physical_locations[@]}; do
+ delete_storage ${subdir}
+ done
+
+ if [ "${#_unknown_kinds[@]}" -ne 0 ] ; then
+ for resource in ${_unknown_kinds[@]}; do
+ error "Untracked resource kind present: ${resource}, attempting to delete it..."
+ delete_resource ${resource}
+ done
+ return
fi
}
diff --git a/tools/helm_deployment_status.py b/tools/helm_deployment_status.py
index 8917e992..f92773db 100755
--- a/tools/helm_deployment_status.py
+++ b/tools/helm_deployment_status.py
@@ -111,20 +111,20 @@ def exec_healthcheck(hp_script, namespace, hp_mode):
return hc.returncode
def check_readiness(k8s, verbosity):
- k8s_controllers, not_ready_controllers = get_k8s_controllers(k8s)
-
- # check pods only when it is explicitly wanted (judging readiness by deployment status)
- if verbosity > 1:
- pods = k8s.get_resources('api/v1', 'pods')
- unready_pods = chain.from_iterable(
- get_names(not_ready_pods(
- pods_by_parent(pods, x)))
- for x in not_ready_controllers)
- else:
- unready_pods = []
+ k8s_controllers, not_ready_controllers = get_k8s_controllers(k8s)
+
+ # check pods only when it is explicitly wanted (judging readiness by deployment status)
+ if verbosity > 1:
+ pods = k8s.get_resources('api/v1', 'pods')
+ unready_pods = chain.from_iterable(
+ get_names(not_ready_pods(
+ pods_by_parent(pods, x)))
+ for x in not_ready_controllers)
+ else:
+ unready_pods = []
- print_status(verbosity, k8s_controllers, unready_pods)
- return not not_ready_controllers
+ print_status(verbosity, k8s_controllers, unready_pods)
+ return not not_ready_controllers
def check_in_loop(k8s, max_time, sleep_time, verbosity):
max_end_time = datetime.datetime.now() + datetime.timedelta(minutes=max_time)
@@ -224,7 +224,7 @@ class Kubernetes:
req = requests.get(url, verify=False)
else:
req = requests.get(url, verify=self.crt_tmp_file.name, cert=self.crt_tmp_file.name)
- except requests.exceptions.ConnectionError as err:
+ except requests.exceptions.ConnectionError:
sys.exit('Error: Could not connect to {}'.format(self.url))
if req.status_code == 200:
json = req.json()
@@ -264,7 +264,7 @@ class Kubernetes:
config['users'][0]['user']['client-certificate-data'])))
certs.update(dict(client_key=b64decode(
config['users'][0]['user']['client-key-data'])))
- except KeyError as err:
+ except KeyError:
print('Warning: could not get Kubernetes config for certificates. ' \
'Turning off SSL authentication.')
self.no_ssl_auth = True
diff --git a/tools/remove_runtime_images.py b/tools/remove_runtime_images.py
index 67d732bb..40f38eb1 100755
--- a/tools/remove_runtime_images.py
+++ b/tools/remove_runtime_images.py
@@ -76,4 +76,3 @@ def run_cli():
if __name__ == '__main__':
run_cli()
-
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..8237e045
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,19 @@
+[tox]
+minversion = 3.2.0
+envlist = py
+skipsdist = true
+requires = pip >= 8
+
+[testenv]
+basepython = python3
+whitelist_externals =
+ git
+ bash
+deps =
+ coala-bears
+
+[testenv:py]
+commands_pre =
+ /bin/sh -c "git --no-pager diff HEAD HEAD^ --name-only '*.py' > /tmp/.coalist_py"
+commands =
+ /bin/bash -c "coala --non-interactive --disable-caching --no-autoapply-warn py --files $(</tmp/.coalist_py) \ "