summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xansible/group_vars/all.yml47
-rw-r--r--build/package.conf105
-rwxr-xr-xbuild/package.sh118
-rwxr-xr-xdoc/BuildGuide.rst23
4 files changed, 138 insertions, 155 deletions
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index d2385f65..dbb05205 100755
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -5,15 +5,18 @@
# Resource host information
-# folder on resource host where tars with resources are present
+# Directory on resource host where tars with resources are present
resources_dir:
-# tarfile name within this folder with offline infrastructure sw
+# tarfile name within resources_dir directory with offline infrastructure binaries.
+# Content of APP_BINARY_RESOURCES_DIR (defined in package.conf) packaged by package.sh to single tar file.
resources_filename:
+# tarfile name within resources_dir directory with auxiliary resources.
+# Content of APP_AUX_BINARIES (defined in package.conf) packaged by package.sh to single tar file.
# the purpose of auxiliary resources is to provide user an interface
-# of how to distribute to infra node another big tar which might be
-# usefull later on in application playbooks, optional param
+# to distribute to infra node tar file with application specific files.
+# Docker images in tar format are currently the only supported content of aux_resources package.
aux_resources_filename:
# resources can be exported via nfs
@@ -23,14 +26,17 @@ resources_on_nfs: no
# Infra node specific information
-# offline solution source data binaries will be decompressed in following dir on infra
+# Offline solution source data binaries (resources_filename tar) will be
+# decompressed in this directory on target infra server.
# e.g. app_data_path: /opt/onap
app_data_path:
-# additional data path for auxiliary data transfer
-# e.g. aux_data_path: /opt/onap/onap_me_docker_images
-aux_data_path:
-
+# Path for auxiliary data in target infra server.
+# Data from resource host defined by aux_resources_filename variable is placed to this directory.
+# Currently docker images in tar format are supported (see runtime_images parameter).
+# Could be used for other kind of application specific data also.
+# e.g. aux_data_path: /opt/onap/my_extra_pods_docker_images
+aux_data_path: "{{ app_data_path }}/runtime_images_source_dir"
##########################################
@@ -62,24 +68,24 @@ deploy_rpm_repository: yes
# e.g. app_name: ONAP
app_name:
-# as nexus blob is prepopulated during build time following block
-# of runtime_images code provides an alternative way how to insert
-# specified images into nexus during infrastructure playbook execution
-# images specified in there must be available inside aux_resources_filename
-# tar file
+# runtime_images provides an way to insert docker images
+# into nexus during infrastructure playbook execution (populated to nexus at runtime).
+# images specified must be available inside aux_resources_filename
+# tar file that is extracted by installer into aux_data_path directory in infra server.
+# Source format of an image is .tar file in aux_data_path directory and all .tar
+# files in that dir are checked to match runtime_images definition.
# if runtime_images are not specified nothing is inserted on top of existing
-# prebuilt nexus blob in installation time
-# Component name must match with tar filename
+# prebuilt nexus blob in installation time.
+# Component name must match with tar filename!
# e.g.
# aaa-component-0.0.1.tar is expected in aux_data_path for aaa-component image
#runtime_images:
- # aaa-component-0.0.1:
+# aaa-component-0.0.1:
# registry: "nexus3.onap.org:10001"
# path: "/onap/components/aaa-component"
# tag: "latest"
runtime_images:
-
###############################
# Application specific params #
###############################
@@ -102,8 +108,9 @@ project_configuration:
# script!
app_helm_charts_install_directory: "/ansible/application/helm_charts"
-# to specify target dir where helm charts should be copied into on infra node
-# this should be directory with all charts and Makefile
+# Specify target dir where helm charts are copied into on infra node.
+# (same as content of "app_helm_charts_install_directory" copied by installer to this dir.)
+# This must be directory with all charts and Makefile.
# e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
diff --git a/build/package.conf b/build/package.conf
index 7a738f31..78da5eac 100644
--- a/build/package.conf
+++ b/build/package.conf
@@ -2,18 +2,20 @@
# Artifacts include:
# - installer source code (this git repository content)
# - all binary artifacts pre-downloaded from internet (docker images, rpm packages, npm packages, Maven artifacts etc.)
+# Script will create 3 packages:
+# offline-${PROJECT_NAME}-${PROJECT_VERSION}-sw.tar
+# - installer code (ansible dir in this git repo)
+# - Files/dirs defined by APP_CONFIGURATION if any.
+# - Directory content of HELM_CHARTS_DIR if defined.
+# offline-${PROJECT_NAME}-${PROJECT_VERSION}-resources.tar
+# - Directory content of APP_BINARY_RESOURCES_DIR if defined.
+# offline-${PROJECT_NAME}-${PROJECT_VERSION}-aux-resources.tar
+# - Files defined by APP_AUX_BINARIES if any.
###########################
-# Project specific params #
+# Application Helm charts #
###########################
-# Final package name will be ${SOFTWARE_PACKAGE_BASENAME}-${PROJECT_NAME}-${PROJECT_VERSION}.tar
-SOFTWARE_PACKAGE_BASENAME="onap-offline"
-
-########################
-# Helm charts #
-########################
-
# Provide application installed to Kubernetes cluster. Helm chart is the supported format https://helm.sh/.
# Directory provided here must contain all the Chart directories of the application (https://docs.helm.sh/developing_charts/#charts) and Makefile.
# E.g. in case of ONAP oom repo it will be the content of kubernetes directory.
@@ -21,53 +23,50 @@ SOFTWARE_PACKAGE_BASENAME="onap-offline"
# offline Kubernetes cluster. This may be sometimes wanted.
#HELM_CHARTS_DIR=~/myclones/casablanca_oom/
-###################
-# Packages addons #
-###################
-
-# in there we define array of files/directories to be added into particular packages
-# SW_PACKAGE_ADDONS are offline installer specific entries which are supposed to be inserted
-# into ./ansible/application directory which is the only place where installer expects SW addons
-# if directory is specified, whole dir will be copied into ./ansible/application inc. subdirs
-# if file is specified it will be just copied into ./ansible/application folder.
-#
-SW_PACKAGE_ADDONS=(
- '/root/ansible/application/onap-me-patch-role'
- '/root/ansible/application/application_configuration.yml'
-)
-
-# following array contains directories and files from where offline installer can get required artifacts
-# following binaries and files are expected:
-# <path_to_downloads>/downloads ... path to directory with application binaries (e.g. rancher, kubectl, jq, helm)
-# <path_to_git-repo>/git-repo ... path to directory with git repos
-# <path_to_http>/http ... path to directory with http files
-# <path_to_offline_data>/offline_data ... path to directory with infra specific docker images
-# <path_to_pkg>/pkg ... path to directory with rpm/deb packages
-# <path_to_nexus_blob>/nexus_data.tar ... path to tar file with collected nexus blobs (output of different script)
-#
-# alternatively and if all above is available just ./resources folder with contain all of those might be used
-# <path_to_complete_resources_folder>/resources
-#
+#####################################
+# Application install configuration #
+#####################################
-EXTERNAL_BINARIES_PACKAGE_ADDONS=(
- '/root/resources'
-)
-#EXTERNAL_BINARIES_PACKAGE_ADDONS=(
-# '/root/resources/downloads'
-# '/root/resources/git-repo'
-# '/root/resources/http'
-# '/root/resources/offline_data'
-# '/root/resources/pkg'
-# '/root/resources/nexus_data.tar'
+# APP_CONFIGURATION array variable can be used to provide files/directories
+# into sw package available for the Ansible process to consume.
+# The main configuration for you application is a yml file
+# "application_configuration.yml" (name of file can be anything) where user
+# need to provide values to control ansible installer process. Yml file is given
+# as command line parameter to ansible run.
+# See more from UserGuide documentation (LINK HERE) how to use installer.
+# Available configuration parameters user can configure is seen from group_vars files:
+# ansible/group_vars/all.yml
+# ansible/group_vars/infrastucture.yml
+# ansible/group_vars/kubernetes.yml
+# Additionally user can optionally provide own ansible roles code to customize install process.
+# At the moment 2 custom ansible roles are supported pre and post install roles, which are
+# run by installer prior Helm install and after Kubernetes app has been installed.
+# In application_configuration.yml those role names are configured with variables:
+# application_pre_install_role: my-pre-install-role
+# application_post_install_role: my-post-install-role
+# And according to Ansible functionality, roles' code must be placed to directories
+# with the same name.
+#APP_CONFIGURATION=(
+# <offline-installer-clone>/config/application_configuration.yml
+# <offline-installer-clone>/patches/onap-casablanca-patch-role
+# ~/myappfiles/my-post-install-role
#)
-# this param should be set to true if additional application package is supposed to be created
-#
-PREPARE_AUX_PACKAGE="true"
+# APP_BINARY_RESOURCES_DIR is variable to directory containing directories and files for offline
+# installer infra and the the application run in that infra.
+# Currently mixed with infra and app binaries to same, maybe to be separated in the future.
+# Following directories and files are expected:
+# downloads ... directory with exacutable binaries for offline infra usage (e.g. rancher, kubectl, jq, helm)
+# git-repo ... directory with git repos for application needs to be simulated
+# http ... directory with http simulation files (e.g. Maven artifacts are simulated here)
+# offline_data ... directory with offline infra specific docker images
+# pkg ... directory with rpm/deb packages mainly for offline infra
+# nexus_data.tar ... tar file with pre-generated nexus blobs containing e.g. docker images for the application.
+#APP_BINARY_RESOURCES_DIR=~/myappfiles/all_binaries
-# tar files with additional application images supposed to be inserted into nexus during runtime are expected
-#
-#AUX_BINARIES_PACKAGE_ADDONS=(
-# '/root/resource_aux/aaa-component-0.0.1.tar'
-# '/root/resource_aux/xyz-component-0.0.1.tar'
+# APP_AUX_BINARIES is array variable for additional application files.
+# Docker images supported currently in tar format.
+#APP_AUX_BINARIES=(
+# ~/myappfiles/docker_images_populated_runtime/aaa-component-0.0.1.tar
+# ~/myappfiles/docker_images_populated_runtime/xyz-component-0.0.1.tar
#)
diff --git a/build/package.sh b/build/package.sh
index 89764ccf..63774e02 100755
--- a/build/package.sh
+++ b/build/package.sh
@@ -50,12 +50,18 @@ function create_tar {
find ${tar_dir}/* -maxdepth 0 -type d -exec rm -rf '{}' \;
# Remove packaged files
find ${tar_dir}/* ! -name ${tar_name} -exec rm '{}' \;
- echo "tar file ${tar_name} created in target dir"
+ echo "Tar file created to $(dirname ${tar_dir})/${tar_name}"
+}
+
+function create_pkg {
+ local pkg_type="$1"
+ echo "[Creating ${pkg_type} package]"
+ create_tar "${PKG_ROOT}" offline-${PROJECT_NAME}-${PROJECT_VERSION}-${pkg_type}.tar
+ rm -rf "${PKG_ROOT}"
}
function add_metadata {
local metafile="$1"
-
echo "Project name: ${PROJECT_NAME}" >> "${metafile}"
echo "Project version: ${PROJECT_VERSION}" >> "${metafile}"
echo "Package date: ${TIMESTAMP}" >> "${metafile}"
@@ -64,7 +70,6 @@ function add_metadata {
function add_additions {
local source="$1"
local target="$2"
-
if [ -d "${source}" ]; then
mkdir -p "${target}/$(basename $source)"
cp -r "${source}" "${target}"
@@ -80,7 +85,7 @@ function add_additions {
}
function build_sw_artifacts {
- cd ../ansible/docker
+ cd ${LOCAL_PATH}/../ansible/docker
./build_ansible_image.sh
if [ $? -ne 0 ]; then
crash 5 "Building of ansible runner image failed."
@@ -89,95 +94,75 @@ function build_sw_artifacts {
}
function create_sw_package {
- local pkg_root="${PACKAGING_TARGET_DIR}/sw"
-
- # Create tar package
- echo "[Creating software package]"
+ PKG_ROOT="${PACKAGING_TARGET_DIR}/sw"
# Create directory structure of the sw package
- mkdir -p "${pkg_root}"
- cp -r ansible "${pkg_root}"
+ mkdir -p "${PKG_ROOT}"
+ cp -r ${LOCAL_PATH}/../ansible "${PKG_ROOT}"
- # Add additional files/dirs into package based on package.conf
- for item in "${SW_PACKAGE_ADDONS[@]}";do
+ # Add application additional files/dirs into package based on package.conf
+ for item in "${APP_CONFIGURATION[@]}";do
# all SW package addons are expected within ./ansible/application folder
- add_additions "${item}" "${pkg_root}/ansible/application"
+ add_additions "${item}" "${PKG_ROOT}/${APPLICATION_FILES_IN_PACKAGE}"
done
- # Helm charts handling
+ # Application Helm charts
+ # To be consistent with resources and aux dir, create charts dir even if no charts provided.
+ mkdir -p ${PKG_ROOT}/${HELM_CHARTS_DIR_IN_PACKAGE}
if [ ! -z "${HELM_CHARTS_DIR}" ];
then
- echo "Helm charts handling"
+ echo "Add application Helm charts"
# Copy charts available for ansible playbook to use/move them to target server/dir
- mkdir -p ${pkg_root}/${HELM_CHARTS_DIR_IN_PACKAGE}
- cp -r "${HELM_CHARTS_DIR}"/* ${pkg_root}/${HELM_CHARTS_DIR_IN_PACKAGE}
+ cp -r "${HELM_CHARTS_DIR}"/* ${PKG_ROOT}/${HELM_CHARTS_DIR_IN_PACKAGE}
+ else
+ echo "No Helm charts defined, no application will be automatically installed by this package!"
fi
# Add metadata to the package
- add_metadata "${pkg_root}"/package.info
+ add_metadata "${PKG_ROOT}"/package.info
# Create sw tar package
- echo "Creating tar file ..."
- PACKAGE_BASE_NAME="${SOFTWARE_PACKAGE_BASENAME}"
- create_tar "${pkg_root}" ${PACKAGE_BASE_NAME}-${PROJECT_NAME}-${PROJECT_VERSION}-sw.tar
- rm -rf "${pkg_root}"
+ create_pkg sw
}
function create_resource_package {
- local pkg_root="${PACKAGING_TARGET_DIR}/resources"
-
- # Create resource tar package
- echo "[Creating resource package]"
+ PKG_ROOT="${PACKAGING_TARGET_DIR}/resources"
# Create directory structure of the resource package
- mkdir -p "${pkg_root}"
+ mkdir -p "${PKG_ROOT}"
- # Add artifacts into resource packagee based on package.conf config
- for item in "${EXTERNAL_BINARIES_PACKAGE_ADDONS[@]}";do
- if [ "$(basename $item)" == "resources" ]; then
- echo "Note: Packaging all resources at once"
- add_additions "${item}" "${PACKAGING_TARGET_DIR}"
- else
- add_additions "${item}" "${pkg_root}"
- fi
- done
+ # Add artifacts into resource package based on package.conf config
+ if [ ! -z ${APP_BINARY_RESOURCES_DIR} ]; then
+ cp -r ${APP_BINARY_RESOURCES_DIR}/* ${PKG_ROOT}
+ fi
# tar file with nexus_data is expected, we should find and untar it
# before resource.tar is created
- for i in `ls -1 ${pkg_root} | grep tar`; do
- tar tvf "${pkg_root}/${i}" | grep nexus_data &> /dev/null
- if [ $? -eq 0 ]; then
- echo "Debug: tar file with nexus blobs detected ${pkg_root}/${i}. Start unarchive ..."
- tar xf "${pkg_root}/${i}" -C "${pkg_root}" &> /dev/null
- echo "Debug: unarchive finished. Removing original file"
- rm -f "${pkg_root}/${i}"
- fi
+ for i in `ls -1 ${PKG_ROOT} | grep tar`; do
+ tar tvf "${PKG_ROOT}/${i}" | grep nexus_data &> /dev/null
+ if [ $? -eq 0 ]; then
+ echo "Debug: tar file with nexus blobs detected ${PKG_ROOT}/${i}. Start unarchive ..."
+ tar xf "${PKG_ROOT}/${i}" -C "${PKG_ROOT}" &> /dev/null
+ echo "Debug: unarchive finished. Removing original file"
+ rm -f "${PKG_ROOT}/${i}"
+ fi
done
- echo "Creating tar file ..."
- PACKAGE_BASE_NAME="${SOFTWARE_PACKAGE_BASENAME}"
- create_tar "${pkg_root}" "${PACKAGE_BASE_NAME}-${PROJECT_NAME}-${PROJECT_VERSION}-resources.tar"
- rm -rf "${pkg_root}"
+ create_pkg resources
}
function create_aux_package {
- local pkg_root="${PACKAGING_TARGET_DIR}/aux"
-
- # Create aux resource tar package
- echo "Creating aux resource package"
+ PKG_ROOT="${PACKAGING_TARGET_DIR}/aux"
# Create directory structure of the aux resource package
- mkdir -p "${pkg_root}"
+ mkdir -p "${PKG_ROOT}"
# Add artifacts into resource packagee based on package.conf config
- for item in "${AUX_BINARIES_PACKAGE_ADDONS[@]}";do
- add_additions "${item}" "${pkg_root}"
+ for item in "${APP_AUX_BINARIES[@]}";do
+ add_additions "${item}" "${PKG_ROOT}"
done
- echo "Creating tar file ..."
- PACKAGE_BASE_NAME="${SOFTWARE_PACKAGE_BASENAME}"
- create_tar "${pkg_root}" "${PACKAGE_BASE_NAME}-${PROJECT_NAME}-${PROJECT_VERSION}-aux-resources.tar"
- rm -rf "${pkg_root}"
+ create_pkg aux-resources
}
#
@@ -192,6 +177,10 @@ TIMESTAMP=$(date -u +%Y%m%dT%H%M%S)
SCRIPT_DIR=$(dirname "${0}")
LOCAL_PATH=$(readlink -f "$SCRIPT_DIR")
+# Relative location inside the package for application related files.
+# Application means Kubernetes application installed by Helm charts on ready cluster (e.g. onap).
+APPLICATION_FILES_IN_PACKAGE="ansible/application"
+
# Relative location inside the package to place Helm charts to be available for
# Ansible process to transfer them into machine (infra node) running Helm repository.
# NOTE: This is quite hardcoded place to put them and agreement with Ansible code
@@ -201,7 +190,7 @@ LOCAL_PATH=$(readlink -f "$SCRIPT_DIR")
# This variable can be of course changed in package.conf if really needed if
# corresponding ansible variable "app_helm_charts_install_directory" value
# adjusted accordingly.
-HELM_CHARTS_DIR_IN_PACKAGE="ansible/application/helm_charts"
+HELM_CHARTS_DIR_IN_PACKAGE="${APPLICATION_FILES_IN_PACKAGE}/helm_charts"
if [ "$#" -lt 3 ]; then
echo "Missing some mandatory parameter!"
@@ -241,13 +230,6 @@ rm -rf ${PACKAGING_TARGET_DIR}
build_sw_artifacts
create_sw_package
create_resource_package
-
-# This part will create aux package which consists of
-# artifacts which can be added into offline nexus during runtime
-if [ "${PREPARE_AUX_PACKAGE}" == "true" ]; then
- create_aux_package
-else
- echo "AUX package won't be created"
-fi
+create_aux_package
popd
diff --git a/doc/BuildGuide.rst b/doc/BuildGuide.rst
index 6e36b253..a06180e0 100755
--- a/doc/BuildGuide.rst
+++ b/doc/BuildGuide.rst
@@ -305,23 +305,19 @@ For the packagin itself it's necessary to prepare configuration. You can
use ./onap/install/onap-offline/build/package.conf as template or
directly modify it.
-There are some parameters needs to be set in configuration file and some
-are optional:
+There are some parameters needs to be set in configuration file.
+---------------------------------------+------------------------------------------------------------------------------+
| Parameter | Description |
+=======================================+==============================================================================+
-| SOFTWARE\_PACKAGE\_BASENAME | defines package name prefix (e.g. onap-offline) |
+| HELM\_CHARTS\_DIR | directory with Helm charts for the application |
+---------------------------------------+------------------------------------------------------------------------------+
-| HELM\_CHARTS\_DIR | oom directory from oom git repostitory |
+| APP\_CONFIGURATION | application install configuration (application_configuration.yml) for |
+| | ansible installer and custom ansible role code directories if any |
+---------------------------------------+------------------------------------------------------------------------------+
-| SW\_PACKAGE\_ADDONS | specific entries which are inserted into ./ansible/application |
+| APP\_BINARY\_RESOURCES\_DIR | directory with all (binary) resources for offline infra and application |
+---------------------------------------+------------------------------------------------------------------------------+
-| EXTERNAL\_BINARIES\_PACKAGE\_ADDONS | other addons used as resources |
-+---------------------------------------+------------------------------------------------------------------------------+
-| PREPARE\_AUX\_PACKAGE | boolean condition if prepare AUX package [optional] |
-+---------------------------------------+------------------------------------------------------------------------------+
-| AUX\_BINARIES\_PACKAGE\_ADDONS | additional binaries such as docker images loaded during runtime [optional] |
+| APP\_AUX\_BINARIES | additional binaries such as docker images loaded during runtime [optional] |
+---------------------------------------+------------------------------------------------------------------------------+
Offline installer packages are created with prepopulated data via
@@ -336,9 +332,8 @@ E.g.
So in the target directory you should find tar files with
-<PACKAGE\_BASE\_NAME>-<PROJECT\_NAME>-<PROJECT\_VERSION>-sw.tar
+offline-<PROJECT\_NAME>-<PROJECT\_VERSION>-sw.tar
-<PACKAGE\_BASE\_NAME>-<PROJECT\_NAME>-<PROJECT\_VERSION>-resources.tar
+offline-<PROJECT\_NAME>-<PROJECT\_VERSION>-resources.tar
-Optionally:
-<PACKAGE\_BASE\_NAME>-<PROJECT\_NAME>-<PROJECT\_VERSION>-aux-resources.tar
+offline-<PROJECT\_NAME>-<PROJECT\_VERSION>-aux-resources.tar