diff options
Diffstat (limited to 'build')
-rwxr-xr-x | build/build_nexus_blob.sh | 229 | ||||
-rw-r--r-- | build/data_lists/deb_packages.list | 22 | ||||
-rw-r--r-- | build/download/requirements.txt | 2 | ||||
-rwxr-xr-x | build/fetch_and_patch_charts.sh | 70 | ||||
-rw-r--r-- | build/package.conf | 80 | ||||
-rwxr-xr-x | build/package.py | 67 | ||||
-rwxr-xr-x | build/package.sh | 267 | ||||
-rw-r--r-- | build/requirements.txt | 2 |
8 files changed, 202 insertions, 537 deletions
diff --git a/build/build_nexus_blob.sh b/build/build_nexus_blob.sh index 6ecab695..51ab05e7 100755 --- a/build/build_nexus_blob.sh +++ b/build/build_nexus_blob.sh @@ -62,31 +62,127 @@ LISTS_DIR="${LOCAL_PATH}/data_lists" COMMANDS=(jq docker expect npm twine) usage () { - echo " Example usage: build_nexus_blob.sh --input-directory </path/to/downloaded/files/dir> --output-directory - </path/to/output/dir> --resource-list-directory </path/to/dir/with/resource/list> [--load-docker-images] - - -i | --input-directory directory containing file needed to create nexus blob. The structure of this directory must organized as described in build guide - -ld | --load-docker-images load docker images from stored files in the input directory - -o | --output-directory - -rl | --resource-list-directory directory with files containing docker, pypi and npm lists + echo " + Usage: $(basename $0) [OPTION...] [FILE]... + + This script prepares Nexus repositories data blobs for ONAP + + Following dependencies are required: nodejs, jq, docker, twine, expect + By default, without any lists or dirs provided, the resources are expected as downloaded + during download process and default lists will be used to build the Nexus blob in the same + resources dir + + Examples: + $(basename $0) --input-directory </path/to/downloaded/files/dir> -ld --output-directory + </path/to/output/dir> --resource-list-directory </path/to/dir/with/resource/list> + # Docker images, npms and pypi packages will be loaded from specified directory + # and the blob is created + $(basename $0) -d </path/to/docker/images/list> -d </path/to/another/docker/images/list> + -n </path/to/npm/list> -p </path/to/pip/list> + # Docker images, npms and pypi packages will be pushed to Nexus based and provided data + # lists (multiple lists can be provided) + + -d | --docker use specific list of docker images to be pushed into Nexus + (in case of -ld used, this list will be used for loading of + the images) + -h | --help print this usage + -i | --input-directory use specific directory containing resources needed to + create nexus blob + The structure of this directory must organized as described + in build guide + -ld | --load-docker-images load docker images from resource directory + -n | --npm list of npm packages to be pushed into Nexus + -o | --output-directory use specific directory for the target blob + -p | --pypi use specific list of pypi packages to be pushed into Nexus + -rl | --resource-list-directory use specific directory with docker, pypi and npm lists " exit 1 } +publish_ports () { + for REGISTRY in $(sed -n '/\.[^/].*\//p' ${1} | sed -e 's/\/.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY} || true) ${NEXUS_PORT}; do + if [[ ${REGISTRY} != *":"* ]]; then + if [[ ${PUBLISHED_PORTS} != *"80:${NEXUS_DOCKER_PORT}"* ]]; then + PUBLISHED_PORTS="${PUBLISHED_PORTS} -p 80:${NEXUS_DOCKER_PORT}" + fi + else + REGISTRY_PORT="$(sed 's/^.*\:\([[:digit:]]*\)$/\1/' <<< ${REGISTRY})" + if [[ ${PUBLISHED_PORTS} != *"${REGISTRY_PORT}:${NEXUS_DOCKER_PORT}"* ]]; then + PUBLISHED_PORTS="${PUBLISHED_PORTS} -p ${REGISTRY_PORT}:${NEXUS_DOCKER_PORT}" + fi + fi + done +} + +simulated_hosts () { + SIMUL_HOSTS=($(sed -n '/\.[^/].*\//p' ${1} | sed -e 's/\/.*$// ; s/:.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY} || true ) ${NEXUS_DOMAIN}) + for HOST in "${SIMUL_HOSTS[@]}"; do + if ! grep -wq ${HOST} /etc/hosts; then + echo "127.0.0.1 ${HOST}" >> /etc/hosts + fi + done +} + load_docker_images () { for ARCHIVE in $(sed $'s/\r// ; /^#/d ; s/\:/\_/g ; s/\//\_/g ; s/$/\.tar/g' ${1} | awk '{ print $1 }'); do docker load -i ${NXS_SRC_DOCKER_IMG_DIR}/${ARCHIVE} done } +push_npm () { + for ARCHIVE in $(sed $'s/\r// ; s/\\@/\-/g ; s/$/\.tgz/g' ${1}); do + npm publish --access public ${ARCHIVE} > /dev/null + echo "NPM ${ARCHIVE} pushed to Nexus" + done +} + +push_pip () { + for PACKAGE in $(sed $'s/\r//; s/==/-/' ${NXS_PYPI_LIST}); do + twine upload -u "${NEXUS_USERNAME}" -p "${NEXUS_PASSWORD}" --repository-url ${PYPI_REGISTRY} ${PACKAGE}* + echo "PYPI ${PACKAGE} pushed to Nexus" + done +} + +docker_login () { + for REGISTRY in $(sed -n '/\.[^/].*\//p' ${1} | sed -e 's/\/.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY}) ${DOCKER_REGISTRY}; do + if ! grep -wqs ${REGISTRY} ~/.docker/config.json; then + echo "Docker login to ${REGISTRY}" + echo -n "${NEXUS_PASSWORD}" | docker login -u "${NEXUS_USERNAME}" --password-stdin ${REGISTRY} > /dev/null + fi + done +} + +push_docker () { + for IMAGE in $(sed $'s/\r// ; /^#/d' ${1} | awk '{ print $1 }'); do + PUSH="" + if [[ ${IMAGE} != *"/"* ]]; then + PUSH="${DOCKER_REGISTRY}/library/${IMAGE}" + elif [[ ${IMAGE} == *"${DEFAULT_REGISTRY}"* ]]; then + if [[ ${IMAGE} == *"/"*"/"* ]]; then + PUSH="$(sed 's/'"${DEFAULT_REGISTRY}"'/'"${DOCKER_REGISTRY}"'/' <<< ${IMAGE})" + else + PUSH="$(sed 's/'"${DEFAULT_REGISTRY}"'/'"${DOCKER_REGISTRY}"'\/library/' <<< ${IMAGE})" + fi + elif [[ -z $(sed -n '/\.[^/].*\//p' <<< ${IMAGE}) ]]; then + PUSH="${DOCKER_REGISTRY}/${IMAGE}" + fi + if [[ ! -z ${PUSH} ]]; then + docker tag ${IMAGE} ${PUSH} + else + PUSH="${IMAGE}" + fi + docker push ${PUSH} + echo "${IMAGE} pushed as ${PUSH} to Nexus" + done +} + # Verify all dependencies are available in PATH FAILED_COMMANDS=() -for cmd in ${COMMANDS[*]}; -do +for cmd in ${COMMANDS[*]}; do command -v $cmd >/dev/null 2>&1 || FAILED_COMMANDS+=($cmd) done -if [ ${#FAILED_COMMANDS[*]} -gt 0 ]; -then + +if [ ${#FAILED_COMMANDS[*]} -gt 0 ]; then echo "Following commands where not found in PATH and are required:" echo ${FAILED_COMMANDS[*]} echo "Aborting." @@ -95,14 +191,23 @@ fi while [ "${1}" != "" ]; do case ${1} in + -d | --docker ) shift + NXS_DOCKER_IMG_LISTS+=("${1}") + ;; -i | --input-directory ) shift DATA_DIR="${1}" ;; -ld | --load-docker-images ) DOCKER_LOAD="true" ;; + -n | --npm ) shift + NXS_NPM_LISTS+=("${1}") + ;; -o | --output-directory ) shift NEXUS_DATA_DIR="${1}" ;; + -p | --pypi ) shift + NXS_PYPI_LISTS+=("${1}") + ;; -rl | --resource-list-directory ) shift LISTS_DIR="${1}" ;; @@ -119,34 +224,42 @@ NXS_SRC_NPM_DIR="${DATA_DIR}/offline_data/npm_tar" NXS_SRC_PYPI_DIR="${DATA_DIR}/offline_data/pypi" # Setup specific resources lists +NXS_INFRA_LIST="${LISTS_DIR}/infra_docker_images.list" NXS_DOCKER_IMG_LIST="${LISTS_DIR}/onap_docker_images.list" +NXS_RKE_DOCKER_IMG_LIST="${LISTS_DIR}/rke_docker_images.list" NXS_NPM_LIST="${LISTS_DIR}/onap_npm.list" NXS_PYPI_LIST="${LISTS_DIR}/onap_pip_packages.list" # Setup Nexus image used for build and install infra -INFRA_LIST="${LISTS_DIR}/infra_docker_images.list" -NEXUS_IMAGE="$(grep sonatype/nexus3 ${INFRA_LIST})" +NEXUS_IMAGE="$(grep sonatype/nexus3 ${NXS_INFRA_LIST})" NEXUS_IMAGE_TAR="${DATA_DIR}/offline_data/docker_images_infra/$(sed 's/\//\_/ ; s/$/\.tar/ ; s/\:/\_/' <<< ${NEXUS_IMAGE})" +# Set default lists if nothing specific defined by user +if [ $((${#NXS_DOCKER_IMG_LISTS[@]} + ${#NXS_NPM_LISTS[@]} + ${#NXS_PYPI_LISTS[@]})) -eq 0 ]; then + NXS_DOCKER_IMG_LISTS=("${NXS_DOCKER_IMG_LIST}" "${NXS_RKE_DOCKER_IMG_LIST}") + NXS_NPM_LISTS[0]="${NXS_NPM_LIST}" + NXS_PYPI_LISTS[0]="${NXS_PYPI_LIST}" +fi + +# Backup /etc/hosts +HOSTS_BACKUP="$(eval ${TIMESTAMP}_hosts.bk)" +cp /etc/hosts /etc/${HOSTS_BACKUP} + +# Backup the current docker registry settings +if [ -f ~/.docker/config.json ]; then + DOCKER_CONF_BACKUP="$(eval ${TIMESTAMP}_config.json.bk)" + mv ~/.docker/config.json ~/.docker/${DOCKER_CONF_BACKUP} +fi + # Setup default ports published to host as docker registry PUBLISHED_PORTS="-p ${NEXUS_PORT}:${NEXUS_PORT} -p ${NEXUS_DOCKER_PORT}:${NEXUS_DOCKER_PORT}" # Setup additional ports published to host based on simulated docker registries -for REGISTRY in $(sed -n '/\.[^/].*\//p' ${NXS_DOCKER_IMG_LIST} | sed -e 's/\/.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY} || true); do - if [[ ${REGISTRY} != *":"* ]]; then - if [[ ${PUBLISHED_PORTS} != *"80:${NEXUS_DOCKER_PORT}"* ]]; then - PUBLISHED_PORTS="${PUBLISHED_PORTS} -p 80:${NEXUS_DOCKER_PORT}" - fi - else - REGISTRY_PORT="$(sed 's/^.*\:\([[:digit:]]*\)$/\1/' <<< ${REGISTRY})" - if [[ ${PUBLISHED_PORTS} != *"${REGISTRY_PORT}:${NEXUS_DOCKER_PORT}"* ]]; then - PUBLISHED_PORTS="${PUBLISHED_PORTS} -p ${REGISTRY_PORT}:${NEXUS_DOCKER_PORT}" - fi - fi -done - # Setup simulated domain names to be able to push all to private Nexus repository -SIMUL_HOSTS="$(sed -n '/\.[^/].*\//p' ${NXS_DOCKER_IMG_LIST} | sed -e 's/\/.*$// ; s/:.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY} || true) ${NEXUS_DOMAIN}" +for DOCKER_IMG_LIST in "${NXS_DOCKER_IMG_LISTS[@]}"; do + publish_ports "${DOCKER_IMG_LIST}" + simulated_hosts "${DOCKER_IMG_LIST}" +done # Nexus repository configuration setup NEXUS_CONFIG_GROOVY='import org.sonatype.nexus.security.realm.RealmManager @@ -186,23 +299,6 @@ repositoryManager.update(conf)' NEXUS_CONFIG=$(echo "${NEXUS_CONFIG_GROOVY}" | jq -Rsc '{"name":"configure", "type":"groovy", "content":.}') ################################# -# Prepare the local environment # -################################# - -# Add simulated domain names to /etc/hosts -HOSTS_BACKUP="$(eval ${TIMESTAMP}_hosts.bk)" -cp /etc/hosts /etc/${HOSTS_BACKUP} -for DNS in ${SIMUL_HOSTS}; do - echo "127.0.0.1 ${DNS}" >> /etc/hosts -done - -# Backup the current docker registry settings -if [ -f ~/.docker/config.json ]; then - DOCKER_CONF_BACKUP="$(eval ${TIMESTAMP}_config.json.bk)" - mv ~/.docker/config.json ~/.docker/${DOCKER_CONF_BACKUP} -fi - -################################# # Docker repository preparation # ################################# @@ -210,7 +306,9 @@ if [ "${DOCKER_LOAD}" == "true" ]; then # Load predefined Nexus image docker load -i ${NEXUS_IMAGE_TAR} # Load all necessary images - load_docker_images ${NXS_DOCKER_IMG_LIST} + for DOCKER_IMG_LIST in "${NXS_DOCKER_IMG_LISTS[@]}"; do + load_docker_images "${DOCKER_IMG_LIST}" + done fi ################################ @@ -282,20 +380,19 @@ if [[ ! -z "${PATCHED_NPM}" ]] && ! zgrep -aq "${NPM_REGISTRY}" "${PATCHED_NPM}" fi # Push NPM packages to Nexus repository -for ARCHIVE in $(sed $'s/\r// ; s/\\@/\-/g ; s/$/\.tgz/g' ${NXS_NPM_LIST});do - npm publish --access public ${ARCHIVE} > /dev/null - echo "NPM ${ARCHIVE} pushed to Nexus" +for NPM_LIST in "${NXS_NPM_LISTS[@]}"; do + push_npm "${NPM_LIST}" done popd +npm logout ############################### ## Populate PyPi repository # ############################### pushd ${NXS_SRC_PYPI_DIR} -for PACKAGE in $(sed $'s/\r//; s/==/-/' ${NXS_PYPI_LIST}); do - twine upload -u "${NEXUS_USERNAME}" -p "${NEXUS_PASSWORD}" --repository-url ${PYPI_REGISTRY} ${PACKAGE}* - echo "PYPI ${PACKAGE} pushed to Nexus" +for PYPI_LIST in "${NXS_PYPI_LISTS[@]}"; do + push_pip "${PYPI_LIST}" done popd @@ -304,34 +401,12 @@ popd ############################### # Login to simulated docker registries -for REGISTRY in $(sed -n '/\.[^/].*\//p' ${NXS_DOCKER_IMG_LIST} | sed -e 's/\/.*$//' | sort -u | grep -v ${DEFAULT_REGISTRY}) ${DOCKER_REGISTRY}; do - echo "Docker login to ${REGISTRY}" - docker login -u "${NEXUS_USERNAME}" -p "${NEXUS_PASSWORD}" ${REGISTRY} > /dev/null -done - # Push images to private nexus based on the list # Images from default registry need to be tagged to private registry # and those without defined repository in tag uses default repository 'library' -for IMAGE in $(sed $'s/\r// ; /^#/d' ${NXS_DOCKER_IMG_LIST} | awk '{ print $1 }'); do - PUSH="" - if [[ ${IMAGE} != *"/"* ]]; then - PUSH="${DOCKER_REGISTRY}/library/${IMAGE}" - elif [[ ${IMAGE} == *"${DEFAULT_REGISTRY}"* ]]; then - if [[ ${IMAGE} == *"/"*"/"* ]]; then - PUSH="$(sed 's/'"${DEFAULT_REGISTRY}"'/'"${DOCKER_REGISTRY}"'/' <<< ${IMAGE})" - else - PUSH="$(sed 's/'"${DEFAULT_REGISTRY}"'/'"${DOCKER_REGISTRY}"'\/library/' <<< ${IMAGE})" - fi - elif [[ -z $(sed -n '/\.[^/].*\//p' <<< ${IMAGE}) ]]; then - PUSH="${DOCKER_REGISTRY}/${IMAGE}" - fi - if [[ ! -z ${PUSH} ]]; then - docker tag ${IMAGE} ${PUSH} - else - PUSH="${IMAGE}" - fi - docker push ${PUSH} - echo "${IMAGE} pushed as ${PUSH} to Nexus" +for DOCKER_IMG_LIST in "${NXS_DOCKER_IMG_LISTS[@]}"; do + docker_login "${DOCKER_IMG_LIST}" + push_docker "${DOCKER_IMG_LIST}" done ############################## @@ -344,7 +419,7 @@ echo "Stopping Nexus and returning backups" docker stop ${NEXUS_CONT_ID} > /dev/null # Return backed up configuration files -mv -f /etc/${HOSTS_BACKUP} /etc/hosts +mv -f "/etc/${HOSTS_BACKUP}" /etc/hosts if [ -f ~/.docker/${DOCKER_CONF_BACKUP} ]; then mv -f ~/.docker/${DOCKER_CONF_BACKUP} ~/.docker/config.json diff --git a/build/data_lists/deb_packages.list b/build/data_lists/deb_packages.list deleted file mode 100644 index ef9b0229..00000000 --- a/build/data_lists/deb_packages.list +++ /dev/null @@ -1,22 +0,0 @@ -archive.ubuntu.com/ubuntu/ubuntu/pool/universe/a/ansible/ansible_2.0.0.2-2_all.deb -archive.ubuntu.com/ubuntu/ubuntu/pool/main/i/ieee-data/ieee-data_20150531.1_all.deb -archive.debian.org/debian/pool/main/liby/libyaml/libyaml-0-2_0.1.6-3_amd64.deb -security.ubuntu.com/ubuntu/pool/main/p/python-crypto/python-crypto_2.6.1-6ubuntu0.16.04.3_amd64.deb -archive.ubuntu.com/ubuntu/pool/universe/p/python-ecdsa/python-ecdsa_0.13-2_all.deb -archive.ubuntu.com/ubuntu/pool/main/p/python-httplib2/python-httplib2_0.9.1+dfsg-1_all.deb -archive.ubuntu.com/ubuntu/pool/main/j/jinja2/python-jinja2_2.8-1_all.deb -archive.ubuntu.com/ubuntu/pool/main/m/markupsafe/python-markupsafe_0.23-2build2_amd64.deb -archive.ubuntu.com/ubuntu/pool/main/p/python-netaddr/python-netaddr_0.7.18-1_all.deb -archive.ubuntu.com/ubuntu/pool/main/p/paramiko/python-paramiko_1.16.0-1ubuntu0.2_all.deb -archive.ubuntu.com/ubuntu/pool/universe/libs/libselinux/python-selinux_2.2.2-1_amd64.deb -archive.ubuntu.com/ubuntu/pool/main/s/six/python-six_1.10.0-3_all.deb -archive.ubuntu.com/ubuntu/pool/main/p/pyyaml/python-yaml_3.11-3build1_amd64.deb -security.ubuntu.com/ubuntu/pool/main/a/apt/apt-utils_1.2.29ubuntu0.1_amd64.deb -security.ubuntu.com/ubuntu/pool/main/c/cron/cron_3.0pl1-128ubuntu2_amd64.deb -archive.ubuntu.com/ubuntu/pool/main/g/gobject-introspection/libgirepository-1.0-1_1.46.0-3ubuntu1_amd64.deb -archive.ubuntu.com/ubuntu/pool/main/d/dbus-glib/libdbus-glib-1-2_0.106-1_amd64.deb -security.ubuntu.com/ubuntu/pool/main/a/apt/libapt-inst2.0_1.2.29ubuntu0.1_amd64.deb -archive.ubuntu.com/ubuntu/pool/main/i/iso-codes/iso-codes_3.65-1_all.deb -security.ubuntu.com/ubuntu/pool/main/d/dh-python/dh-python_2.20151103ubuntu1_all.deb -security.ubuntu.com/ubuntu/pool/main/d/distro-info-data/distro-info-data_0.28ubuntu0.9_all.deb -archive.ubuntu.com/ubuntu/pool/main/g/gobject-introspection/gir1.2-glib-2.0_1.46.0-3ubuntu1_amd64.deb diff --git a/build/download/requirements.txt b/build/download/requirements.txt index 3eee2a2f..681c0dd5 100644 --- a/build/download/requirements.txt +++ b/build/download/requirements.txt @@ -1,3 +1,3 @@ -docker==3.7.2 +docker>=3.7.2 prettytable==0.7.2 retrying==1.3.3 diff --git a/build/fetch_and_patch_charts.sh b/build/fetch_and_patch_charts.sh deleted file mode 100755 index 22d45e66..00000000 --- a/build/fetch_and_patch_charts.sh +++ /dev/null @@ -1,70 +0,0 @@ -#! /usr/bin/env bash - -# COPYRIGHT NOTICE STARTS HERE -# -# Copyright 2018 © Samsung Electronics Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# COPYRIGHT NOTICE ENDS HERE - -# This simple script should be used during build / packaging process -# and it should be referenced in BuildGuide. -# Patching of helm charts is the only way for OOM charts to be compatible -# with this offline installer. This will become obsolete once native -# solution is implemented in charts themselves and which is tracked -# in OOM-1610 - -# fail fast -set -e - -# colours -_R='\033[0;31;1m' #Red -_G='\033[0;32;1m' #Green -_Y='\033[0;33;1m' #Yellow -C_='\033[0m' #Color off - -usage () { - echo "Usage:" - echo -e "./$(basename $0) <helm charts repo> <commit/tag/branch> <patchfile> <target_dir>\n" - echo "Example: ./$(basename $0) https://gerrit.onap.org/r/oom master /root/offline-installer/patches/onap.patch /root/offline-installer/ansible/application/helm_charts" -} - -if [ "$#" -ne 4 ]; then - echo "This script should get exactly 4 arguments!" - echo -e "Wrong number of parameters provided\n" - usage - exit 1 -fi - -# main -# git and patch tools are preconditions for this to work -CURR=1 -TOTAL=5 -PATCH_FILE=$(realpath "${3}") - -echo -e "${_G}[Step $((CURR++))/${TOTAL} cloning repo with charts to be patched]${C_}" -git clone --recurse-submodules "${1}" "${4}" - -echo -e "${_G}[Step $((CURR++))/${TOTAL} setting working dir to ${4}]${C_}" -pushd "${4}" - -echo -e "${_G}[Step $((CURR++))/${TOTAL} git-checkout to correct base]${C_}" -git checkout "${2}" - -echo -e "${_G}[Step $((CURR++))/${TOTAL} patching charts]${C_}" -git apply "${PATCH_FILE}" - -echo -e "${_G}[Step $((CURR++))/${TOTAL} returning to original working directory]${C_}" -popd - diff --git a/build/package.conf b/build/package.conf deleted file mode 100644 index d74eac0a..00000000 --- a/build/package.conf +++ /dev/null @@ -1,80 +0,0 @@ -# For the packaging script it is expected that all artifacts are present on local file system. -# Artifacts include: -# - installer source code (this git repository content) -# - all binary artifacts pre-downloaded from internet (docker images, rpm packages, npm packages, Maven artifacts etc.) -# Script will create 3 packages: -# offline-${PROJECT_NAME}-${PROJECT_VERSION}-sw.tar -# - installer code (ansible dir in this git repo) -# - Files/dirs defined by APP_CONFIGURATION if any. -# - Directory content of HELM_CHARTS_DIR if defined. -# offline-${PROJECT_NAME}-${PROJECT_VERSION}-resources.tar -# - Directory content of APP_BINARY_RESOURCES_DIR if defined. -# offline-${PROJECT_NAME}-${PROJECT_VERSION}-aux-resources.tar -# - Files defined by APP_AUX_BINARIES if any. - -########################### -# Application Helm charts # -########################### - -# Provide application installed to Kubernetes cluster. Helm chart is the supported format https://helm.sh/. -# Directory provided here must contain all the Chart directories of the application (https://docs.helm.sh/developing_charts/#charts) and Makefile. -# E.g. in case of ONAP oom repo it will be the content of kubernetes directory. -# NOTE: Leaving this variable commented out will mean that no Helm application will be installed to -# offline Kubernetes cluster. This may be sometimes wanted. -#HELM_CHARTS_DIR=<oom-clone>/kubernetes -HELM_CHARTS_DIR=/tmp/oom-clone/kubernetes - -##################################### -# Application install configuration # -##################################### - -# APP_CONFIGURATION array variable can be used to provide files/directories -# into sw package available for the Ansible process to consume. -# The main configuration file for your application is -# "application_configuration.yml" (name of file can be anything) where user -# shall provide values to control ansible installer process. Yml file is given -# as command line parameter to ansible run. -# See more from UserGuide documentation (LINK HERE) how to use installer. -# Available configuration parameters user can configure are seen from group_vars files: -# ansible/group_vars/all.yml -# ansible/group_vars/infrastucture.yml -# ansible/group_vars/kubernetes.yml -# Additionally user can optionally provide own ansible roles code to customize install process. -# At the moment 2 custom ansible roles are supported pre and post install roles, which are -# run by installer prior Helm install and after Kubernetes app has been installed. -# In application_configuration.yml those role names are configured with variables: -# application_pre_install_role: my-pre-install-role -# application_post_install_role: my-post-install-role -# And according to Ansible functionality, roles' code must be placed to directories -# with the same name: -#APP_CONFIGURATION=( -# <offline-installer-clone>/config/application_configuration.yml -# <offline-installer-clone>/patches/my-pre-install-role -# ~/myappfiles/my-post-install-role -#) -APP_CONFIGURATION=( - /tmp/offline-installer/config/application_configuration.yml - /tmp/offline-installer/patches/onap-patch-role -) - -# APP_BINARY_RESOURCES_DIR is variable to directory containing directories and files for offline -# installer infra and the the application run in that infra. -# Currently mixed with infra and app binaries to same, maybe to be separated in the future. -# Following directories and files are expected: -# downloads ... directory with exacutable binaries for offline infra usage (e.g. rancher, kubectl, jq, helm) -# git-repo ... directory with git repos for application needs to be simulated -# http ... directory with http simulation files (e.g. Maven artifacts are simulated here) -# offline_data ... directory with offline infra specific docker images -# pkg ... directory with rpm/deb packages mainly for offline infra -# nexus_data.tar ... tar file with pre-generated nexus blobs containing e.g. docker images for the application. -#APP_BINARY_RESOURCES_DIR=~/myappfiles/all_binaries -APP_BINARY_RESOURCES_DIR=/tmp/onap-offline/resources - -# APP_AUX_BINARIES is array variable for additional application files. -# Docker images supported currently in tar format. -#APP_AUX_BINARIES=( -# ~/myappfiles/docker_images_populated_runtime/aaa-component-0.0.1.tar -# ~/myappfiles/docker_images_populated_runtime/xyz-component-0.0.1.tar -#) -APP_AUX_BINARIES=() - diff --git a/build/package.py b/build/package.py index 8a1808b3..d30b40c1 100755 --- a/build/package.py +++ b/build/package.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # -*- coding: utf-8 -*- # COPYRIGHT NOTICE STARTS HERE @@ -68,7 +68,7 @@ def prepare_application_repository(directory, url, refspec, patch_path): return repository -def create_package_info_file(output_file, repository_list): +def create_package_info_file(output_file, repository_list, tag): """ Generates text file in json format containing basic information about the build :param output_file: @@ -78,7 +78,8 @@ def create_package_info_file(output_file, repository_list): log.info('Generating package.info file') build_info = { 'Build_info': { - 'build_date': datetime.now().strftime('%Y-%m-%d_%H-%M') + 'build_date': datetime.now().strftime('%Y-%m-%d_%H-%M'), + 'Version': tag } } for repository in repository_list: @@ -98,25 +99,36 @@ def create_package(tar_content, file_name): log.info('Creating package {}'.format(file_name)) with tarfile.open(file_name, 'w') as output_tar_file: for src, dst in tar_content.items(): - output_tar_file.add(src, dst) + if src != '': + output_tar_file.add(src, dst) -def build_offline_deliverables(application_repository_url, +def build_offline_deliverables(build_version, + application_repository_url, application_repository_reference, application_patch_file, + application_charts_dir, + application_configuration, + application_patch_role, output_dir, resources_directory, + aux_directory, skip_sw, skip_resources, skip_aux, overwrite): """ Prepares offline deliverables + :param build_version: Version for packages tagging :param application_repository_url: git repository hosting application helm charts :param application_repository_reference: git refspec for repository hosting application helm charts :param application_patch_file: git patch file to be applied over application repository + :param application_charts_dir: path to directory under application repository containing helm charts + :param application_configuration: path to application configuration file (helm override configuration) + :param application_patch_role: path to application patch role (executed just before helm deploy) :param output_dir: Destination directory for saving packages :param resources_directory: Path to resource directory + :param aux_directory: Path to aux binary directory :param skip_sw: skip sw package generation :param skip_resources: skip resources package generation :param skip_aux: skip aux package generation @@ -128,6 +140,7 @@ def build_offline_deliverables(application_repository_url, if not overwrite: log.error('Output directory is not empty, use overwrite to force build') raise FileExistsError + shutil.rmtree(output_dir) # Git offline_repository_dir = os.path.join(script_location, '..') @@ -141,23 +154,23 @@ def build_offline_deliverables(application_repository_url, # Package info info_file = os.path.join(output_dir, 'package.info') - create_package_info_file(info_file, [application_repository, offline_repository]) + create_package_info_file(info_file, [application_repository, offline_repository], build_version) # packages layout as dictionaries. <file> : <file location under tar archive> sw_content = { os.path.join(offline_repository_dir, 'ansible'): 'ansible', - os.path.join(offline_repository_dir, 'config', - 'application_configuration.yml'): 'ansible/application/application_configuration.yml', - os.path.join(offline_repository_dir, 'patches', 'onap-patch-role'): 'ansible/application/onap-patch-role', - os.path.join(application_dir, 'kubernetes'): 'ansible/application/helm_charts', - info_file: 'packge.info' + application_configuration: 'ansible/application/application_configuration.yml', + application_patch_role: 'ansible/application/onap-patch-role', + os.path.join(application_dir, application_charts_dir): 'ansible/application/helm_charts', + info_file: 'package.info' } resources_content = { resources_directory: '', - info_file: 'packge.info' + info_file: 'package.info' } aux_content = { - info_file: 'packge.info' + aux_directory: '', + info_file: 'package.info' } if not skip_sw: @@ -167,7 +180,7 @@ def build_offline_deliverables(application_repository_url, os.path.join(offline_repository_dir, 'ansible', 'docker', 'build_ansible_image.sh')) installer_build.check_returncode() os.chdir(script_location) - sw_package_tar_path = os.path.join(output_dir, 'sw_package.tar') + sw_package_tar_path = os.path.join(output_dir, 'sw_package' + build_version + '.tar') create_package(sw_content, sw_package_tar_path) if not skip_resources: @@ -201,11 +214,11 @@ def build_offline_deliverables(application_repository_url, createrepo = subprocess.run(['createrepo', os.path.join(resources_directory, 'pkg', 'rhel')]) createrepo.check_returncode() - resources_package_tar_path = os.path.join(output_dir, 'resources_package.tar') + resources_package_tar_path = os.path.join(output_dir, 'resources_package' + build_version + '.tar') create_package(resources_content, resources_package_tar_path) if not skip_aux: - aux_package_tar_path = os.path.join(output_dir, 'aux_package.tar') + aux_package_tar_path = os.path.join(output_dir, 'aux_package'+ build_version + '.tar') create_package(aux_content, aux_package_tar_path) shutil.rmtree(application_dir) @@ -216,16 +229,28 @@ def run_cli(): Run as cli tool """ parser = argparse.ArgumentParser(description='Create Package For Offline Installer') + parser.add_argument('--build-version', + help='version of the build', default='custom') parser.add_argument('application_repository_url', metavar='application-repository-url', help='git repository hosting application helm charts') parser.add_argument('--application-repository_reference', default='master', help='git refspec for repository hosting application helm charts') parser.add_argument('--application-patch_file', help='git patch file to be applied over application repository', default='') + parser.add_argument('--application-charts_dir', + help='path to directory under application repository containing helm charts ', default='kubernetes') + parser.add_argument('--application-configuration', + help='path to application configuration file (helm override configuration)', + default='') + parser.add_argument('--application-patch-role', + help='path to application patch role file (ansible role) to be executed right before installation', + default='') parser.add_argument('--output-dir', '-o', default=os.path.join(script_location, '..', '..'), help='Destination directory for saving packages') - parser.add_argument('--resources-directory', + parser.add_argument('--resources-directory', default='', help='Path to resource directory') + parser.add_argument('--aux-directory', + help='Path to aux binary directory', default='') parser.add_argument('--skip-sw', action='store_true', default=False, help='Set to skip sw package generation') parser.add_argument('--skip-resources', action='store_true', default=False, @@ -243,11 +268,16 @@ def run_cli(): else: logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s') - build_offline_deliverables(args.application_repository_url, + build_offline_deliverables(args.build_version, + args.application_repository_url, args.application_repository_reference, args.application_patch_file, + args.application_charts_dir, + args.application_configuration, + args.application_patch_role, args.output_dir, args.resources_directory, + args.aux_directory, args.skip_sw, args.skip_resources, args.skip_aux, @@ -256,4 +286,3 @@ def run_cli(): if __name__ == '__main__': run_cli() - diff --git a/build/package.sh b/build/package.sh deleted file mode 100755 index a3c1ded2..00000000 --- a/build/package.sh +++ /dev/null @@ -1,267 +0,0 @@ -#! /usr/bin/env bash - -# COPYRIGHT NOTICE STARTS HERE -# -# Copyright 2018-2019 © Samsung Electronics Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# COPYRIGHT NOTICE ENDS HERE - - -# Scope of this packaging script is to generate tarfiles for offline installation -# Build of any additional artifacts is out of scope for this script -set -e - -crash () { - local exit_code="$1" - local cause="$2" - echo "Packaging script finished prematurely" - echo "Cause: $2" - exit "${exit_code}" -} - -crash_arguments () { - echo "Missing some mandatory arguments!" - usage - exit 1 -} - -usage () { - echo "Usage:" - echo " ./$(basename $0) <project_name> <version> <packaging_target_dir> [--conf <file>] [--force]" - echo "" - echo "Options:" - echo " --force Remove packaging_target_dir if exists prior to script execution" - echo " --conf Custom configuration file path for script" - echo "" - echo "Example:" - echo " ./$(basename $0) myproject 1.0.1 /tmp/package --conf ~/myproject.conf" - echo "" - echo "packaging_target_dir will be created if does not exist. All tars will be produced into it." -} - -function create_tar { - local tar_dir="$1" - local tar_name="$2" - - cd ${tar_dir} - touch ${tar_name} # Trick to avoid sporadic "tar: .: file changed as we read it" warning message - tar --exclude=${tar_name} -cf ../${tar_name} . - cd - &> /dev/null # Trick to avoid printing new dir on stdout - - # Remove packaged folders - find ${tar_dir}/* -maxdepth 0 -type d -exec rm -rf '{}' \; - # Remove packaged files - find ${tar_dir}/* ! -name ${tar_name} -exec rm '{}' \; - echo "Tar file created to $(dirname ${tar_dir})/${tar_name}" -} - -function create_pkg { - local pkg_type="$1" - echo "[Creating ${pkg_type} package]" - create_tar "${PKG_ROOT}" offline-${PROJECT_NAME}-${PROJECT_VERSION}-${pkg_type}.tar - rm -rf "${PKG_ROOT}" -} - -function add_metadata { - local metafile="$1" - echo "Project name: ${PROJECT_NAME}" >> "${metafile}" - echo "Project version: ${PROJECT_VERSION}" >> "${metafile}" - echo "Package date: ${TIMESTAMP}" >> "${metafile}" -} - -function add_additions { - local source="$1" - local target="$2" - if [ -d "${source}" ]; then - mkdir -p "${target}/$(basename $source)" - cp -r "${source}" "${target}" - echo "Adding directory ... $(basename $source)" - else - if [ -f "${source}" ]; then - cp "${source}" "${target}" - echo "Adding file ... $(basename $source)" - else - crash 4 "Invalid source specified for packaging: $1" - fi - fi -} - -function build_sw_artifacts { - cd ${LOCAL_PATH}/../ansible/docker - ./build_ansible_image.sh - if [ $? -ne 0 ]; then - crash 5 "Building of ansible runner image failed." - fi - cd - -} - -function create_sw_package { - PKG_ROOT="${PACKAGING_TARGET_DIR}/sw" - - # Create directory structure of the sw package - mkdir -p "${PKG_ROOT}" - cp -r ${LOCAL_PATH}/../ansible "${PKG_ROOT}" - - # Add application additional files/dirs into package based on package.conf - for item in "${APP_CONFIGURATION[@]}";do - # all SW package addons are expected within ./ansible/application folder - add_additions "${item}" "${PKG_ROOT}/${APPLICATION_FILES_IN_PACKAGE}" - done - - # Application Helm charts - # To be consistent with resources and aux dir, create charts dir even if no charts provided. - mkdir -p ${PKG_ROOT}/${HELM_CHARTS_DIR_IN_PACKAGE} - if [ ! -z "${HELM_CHARTS_DIR}" ]; - then - echo "Add application Helm charts" - # Copy charts available for ansible playbook to use/move them to target server/dir - cp -r "${HELM_CHARTS_DIR}"/* ${PKG_ROOT}/${HELM_CHARTS_DIR_IN_PACKAGE} - else - echo "No Helm charts defined, no application will be automatically installed by this package!" - fi - - # Add metadata to the package - add_metadata "${PKG_ROOT}"/package.info - - # Create sw tar package - create_pkg sw -} - -function create_resource_package { - PKG_ROOT="${PACKAGING_TARGET_DIR}/resources" - - # Create directory structure of the resource package - mkdir -p "${PKG_ROOT}" - - # Add artifacts into resource package based on package.conf config - if [ ! -z ${APP_BINARY_RESOURCES_DIR} ]; then - cp -r ${APP_BINARY_RESOURCES_DIR}/* ${PKG_ROOT} - fi - - # tar file with nexus_data is expected, we should find and untar it - # before resource.tar is created - for i in `ls -1 ${PKG_ROOT} | grep tar`; do - tar tvf "${PKG_ROOT}/${i}" | grep nexus_data &> /dev/null - if [ $? -eq 0 ]; then - echo "Debug: tar file with nexus blobs detected ${PKG_ROOT}/${i}. Start unarchive ..." - tar xf "${PKG_ROOT}/${i}" -C "${PKG_ROOT}" &> /dev/null - echo "Debug: unarchive finished. Removing original file" - rm -f "${PKG_ROOT}/${i}" - fi - done - - create_pkg resources -} - -function create_aux_package { - PKG_ROOT="${PACKAGING_TARGET_DIR}/aux" - - # Create directory structure of the aux resource package - mkdir -p "${PKG_ROOT}" - - # Add artifacts into resource packagee based on package.conf config - for item in "${APP_AUX_BINARIES[@]}";do - add_additions "${item}" "${PKG_ROOT}" - done - - create_pkg aux-resources -} - -# -# =================== Main =================== -# - -PROJECT_NAME="$1" -PROJECT_VERSION="$2" -PACKAGING_TARGET_DIR="$3" - -TIMESTAMP=$(date -u +%Y%m%dT%H%M%S) -SCRIPT_DIR=$(dirname "${0}") -LOCAL_PATH=$(readlink -f "$SCRIPT_DIR") - -# Relative location inside the package for application related files. -# Application means Kubernetes application installed by Helm charts on ready cluster (e.g. onap). -APPLICATION_FILES_IN_PACKAGE="ansible/application" - -# Relative location inside the package to place Helm charts to be available for -# Ansible process to transfer them into machine (infra node) running Helm repository. -# NOTE: This is quite hardcoded place to put them and agreement with Ansible code -# is done in ansible/group_vars/all.yml with variable "app_helm_charts_install_directory" -# whihc value must match to value of this variable (with exception of slash '/' -# prepended so that ansible docker/chroot process can see the dir). -# This variable can be of course changed in package.conf if really needed if -# corresponding ansible variable "app_helm_charts_install_directory" value -# adjusted accordingly. -HELM_CHARTS_DIR_IN_PACKAGE="${APPLICATION_FILES_IN_PACKAGE}/helm_charts" - -if [ $# -eq 0 ]; then - crash_arguments -fi - -CONF_FILE="" -FORCE_REMOVE=0 -arg_ind=0 -for arg in "$@"; do - shift - ((arg_ind+=1)) - if [[ ${arg} =~ ^[-]{1,2}[a-zA-Z-]+$ && ${arg_ind} -lt 4 ]]; then - echo "Non-positional parameters should follow mandatory arguments!" - usage - exit 1 - fi - case "$arg" in - -c|--conf) - CONF_FILE="$1" ;; - --force) - FORCE_REMOVE=1 ;; - *) - set -- "$@" "$arg" - if [ "$#" -lt 3 ]; then - crash_arguments - fi ;; - esac -done - -if [ -z ${CONF_FILE} ]; then - CONF_FILE=${LOCAL_PATH}/package.conf # Fall to default conf file -fi - -if [ ! -f ${CONF_FILE} ]; then - crash 2 "Mandatory config file missing! Provide it with --conf option or ${LOCAL_PATH}/package.conf" -fi - -source ${CONF_FILE} -pushd ${LOCAL_PATH} - -# checking bash capability of parsing arrays -whotest[0]='test' || (crash 3 "Arrays not supported in this version of bash.") - -# Prepare output directory for our packaging -# Check target dir exists and is not empty -if [ -d ${PACKAGING_TARGET_DIR} ] && [ "$(ls -A ${PACKAGING_TARGET_DIR})" ]; then - if [ ${FORCE_REMOVE} -eq 0 ]; then - crash 1 "Target directory not empty. Use --force to overwrite it." - else - rm -rf ${PACKAGING_TARGET_DIR} - fi -fi - -# Create all tars -build_sw_artifacts -create_sw_package -create_resource_package -create_aux_package - -popd diff --git a/build/requirements.txt b/build/requirements.txt index 2c404aed..39544458 100644 --- a/build/requirements.txt +++ b/build/requirements.txt @@ -1,2 +1,2 @@ -docker==3.7.2 +docker>=3.7.2 gitpython==2.1.11 |