From fe111116be4128a9fb90d175c38e0aa955e7e33f Mon Sep 17 00:00:00 2001 From: Samuli Silvius Date: Tue, 5 Feb 2019 09:45:24 +0200 Subject: Helm install optional and default values Make Helm applicaton installation optional by allowing user not to provide Helm charts. Then only empty Kubernetes cluster will be installed. Provide some reasonable default values for Helm charts configuration variables both in package script and ansible installer itself. User provided Helm charts configuration must be in sync with packaging and installer, provided some clarifying comments for that. Issue-ID: OOM-1629 Change-Id: Ica9fc76856cb50c9d636bea99a326736736c7a56 Signed-off-by: Samuli Silvius --- ansible/group_vars/all.yml | 30 ++++++++++++++---------- ansible/roles/application-install/tasks/main.yml | 17 ++++++++++++++ 2 files changed, 35 insertions(+), 12 deletions(-) (limited to 'ansible') diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index e70a837b..d2385f65 100755 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -84,22 +84,28 @@ runtime_images: # Application specific params # ############################### -# Project name to utilize same codebase -# e.g. project_configuration: onap-me +# Project name to utilize same codebase. Just helper variable inside ansible +# configuration files (like this file) to avoid writing own project name multiple +# times for paths, namespaces, Helm release, derived variables.. +# e.g. project_configuration: onap-casablanca project_configuration: -# App Helm charts dir. E.g. application/helm_charts/ where xxx is a charts folder name. -# Helm charts are expected to be inside SW package somewhere inside ./ansible/application -# those will be available for offline installer under /ansible/application/ -# for OOM project helm charts are usually within kubernetes sub-folder -# so the path for them can be: -# e.g app_helm_charts_install_directory: "/ansible/application/oom/kubernetes" -app_helm_charts_install_directory: +# App Helm charts directory location in installation package. +# The path is absolute path (even locates relative inside of this sw package +# installation folder) because it must be visible for ansible docker/chroot +# process to find directory and to transfer it into machine (infra node) running +# Helm repository. +# Content of the folder must be Helm chart directories of the app with Makefile. +# In case of ONAP OOM it would be /kubernetes folder content. +# NOTE: This default value should not be changed if not really needed and it +# must match with the variable "HELM_CHARTS_DIR_IN_PACKAGE" value in package.sh +# script! +app_helm_charts_install_directory: "/ansible/application/helm_charts" # to specify target dir where helm charts should be copied into on infra node # this should be directory with all charts and Makefile # e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts" -app_helm_charts_infra_directory: +app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts" # Main Helm chart to install # e.g. app_helm_chart_name: onap @@ -114,12 +120,12 @@ app_helm_build_targets: # Directory with helm plugins # It's an optional parameter used e.g. in OOM Casablanca -# app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/kubernetes/helm/plugins/" +# app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/helm/plugins/" app_helm_plugins_directory: # Helm release name (visible in POD names) used by Helm # e.g. app_helm_release_name: "{{ project_configuration }}" -app_helm_release_name: +app_helm_release_name: "{{ project_configuration }}" # Kubernetes namespace where application is installed # e.g. app_kubernetes_namespace: onap diff --git a/ansible/roles/application-install/tasks/main.yml b/ansible/roles/application-install/tasks/main.yml index 3306d9e4..89e7ef7e 100644 --- a/ansible/roles/application-install/tasks/main.yml +++ b/ansible/roles/application-install/tasks/main.yml @@ -2,4 +2,21 @@ - debug: msg: "phase is {{ phase }}" +- name: Check if install needed + block: + - name: "Does {{ app_helm_charts_install_directory }} exist and contain Helm Charts" + find: + paths: "{{ app_helm_charts_install_directory }}" + recurse: yes + delegate_to: localhost + register: charts_files + - name: Set install active fact + set_fact: + install_needed: "{{ yes if charts_files.matched | int > 0 else no }}" + when: phase == "pre-install" + - include_tasks: "{{ phase }}.yml" + when: install_needed + +- debug: + msg: "Install needed {{ install_needed }}" -- cgit 1.2.3-korg From 426e6c09fe0c8a0d0c35631231f023eaf8823775 Mon Sep 17 00:00:00 2001 From: Samuli Silvius Date: Wed, 6 Feb 2019 11:25:01 +0200 Subject: Clarify packaging variables documentation For the user package.sh/package.conf is a bit confusing and variables are not well named nor documented. Binary definition can be also simplified by providing just single directory to take binaries from. Corresponding variable documentation on installer side (ansible code) is also confusing and missing relation to packaging variables This commit aims on improving that documentation. Issue-ID: OOM-1633 Change-Id: I81261f51d393f434eac26c4a46624388671bd36b Signed-off-by: Samuli Silvius --- ansible/group_vars/all.yml | 47 ++++++++++-------- build/package.conf | 105 ++++++++++++++++++++-------------------- build/package.sh | 118 +++++++++++++++++++-------------------------- doc/BuildGuide.rst | 23 ++++----- 4 files changed, 138 insertions(+), 155 deletions(-) (limited to 'ansible') diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index d2385f65..dbb05205 100755 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -5,15 +5,18 @@ # Resource host information -# folder on resource host where tars with resources are present +# Directory on resource host where tars with resources are present resources_dir: -# tarfile name within this folder with offline infrastructure sw +# tarfile name within resources_dir directory with offline infrastructure binaries. +# Content of APP_BINARY_RESOURCES_DIR (defined in package.conf) packaged by package.sh to single tar file. resources_filename: +# tarfile name within resources_dir directory with auxiliary resources. +# Content of APP_AUX_BINARIES (defined in package.conf) packaged by package.sh to single tar file. # the purpose of auxiliary resources is to provide user an interface -# of how to distribute to infra node another big tar which might be -# usefull later on in application playbooks, optional param +# to distribute to infra node tar file with application specific files. +# Docker images in tar format are currently the only supported content of aux_resources package. aux_resources_filename: # resources can be exported via nfs @@ -23,14 +26,17 @@ resources_on_nfs: no # Infra node specific information -# offline solution source data binaries will be decompressed in following dir on infra +# Offline solution source data binaries (resources_filename tar) will be +# decompressed in this directory on target infra server. # e.g. app_data_path: /opt/onap app_data_path: -# additional data path for auxiliary data transfer -# e.g. aux_data_path: /opt/onap/onap_me_docker_images -aux_data_path: - +# Path for auxiliary data in target infra server. +# Data from resource host defined by aux_resources_filename variable is placed to this directory. +# Currently docker images in tar format are supported (see runtime_images parameter). +# Could be used for other kind of application specific data also. +# e.g. aux_data_path: /opt/onap/my_extra_pods_docker_images +aux_data_path: "{{ app_data_path }}/runtime_images_source_dir" ########################################## @@ -62,24 +68,24 @@ deploy_rpm_repository: yes # e.g. app_name: ONAP app_name: -# as nexus blob is prepopulated during build time following block -# of runtime_images code provides an alternative way how to insert -# specified images into nexus during infrastructure playbook execution -# images specified in there must be available inside aux_resources_filename -# tar file +# runtime_images provides an way to insert docker images +# into nexus during infrastructure playbook execution (populated to nexus at runtime). +# images specified must be available inside aux_resources_filename +# tar file that is extracted by installer into aux_data_path directory in infra server. +# Source format of an image is .tar file in aux_data_path directory and all .tar +# files in that dir are checked to match runtime_images definition. # if runtime_images are not specified nothing is inserted on top of existing -# prebuilt nexus blob in installation time -# Component name must match with tar filename +# prebuilt nexus blob in installation time. +# Component name must match with tar filename! # e.g. # aaa-component-0.0.1.tar is expected in aux_data_path for aaa-component image #runtime_images: - # aaa-component-0.0.1: +# aaa-component-0.0.1: # registry: "nexus3.onap.org:10001" # path: "/onap/components/aaa-component" # tag: "latest" runtime_images: - ############################### # Application specific params # ############################### @@ -102,8 +108,9 @@ project_configuration: # script! app_helm_charts_install_directory: "/ansible/application/helm_charts" -# to specify target dir where helm charts should be copied into on infra node -# this should be directory with all charts and Makefile +# Specify target dir where helm charts are copied into on infra node. +# (same as content of "app_helm_charts_install_directory" copied by installer to this dir.) +# This must be directory with all charts and Makefile. # e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts" app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts" diff --git a/build/package.conf b/build/package.conf index 7a738f31..78da5eac 100644 --- a/build/package.conf +++ b/build/package.conf @@ -2,18 +2,20 @@ # Artifacts include: # - installer source code (this git repository content) # - all binary artifacts pre-downloaded from internet (docker images, rpm packages, npm packages, Maven artifacts etc.) +# Script will create 3 packages: +# offline-${PROJECT_NAME}-${PROJECT_VERSION}-sw.tar +# - installer code (ansible dir in this git repo) +# - Files/dirs defined by APP_CONFIGURATION if any. +# - Directory content of HELM_CHARTS_DIR if defined. +# offline-${PROJECT_NAME}-${PROJECT_VERSION}-resources.tar +# - Directory content of APP_BINARY_RESOURCES_DIR if defined. +# offline-${PROJECT_NAME}-${PROJECT_VERSION}-aux-resources.tar +# - Files defined by APP_AUX_BINARIES if any. ########################### -# Project specific params # +# Application Helm charts # ########################### -# Final package name will be ${SOFTWARE_PACKAGE_BASENAME}-${PROJECT_NAME}-${PROJECT_VERSION}.tar -SOFTWARE_PACKAGE_BASENAME="onap-offline" - -######################## -# Helm charts # -######################## - # Provide application installed to Kubernetes cluster. Helm chart is the supported format https://helm.sh/. # Directory provided here must contain all the Chart directories of the application (https://docs.helm.sh/developing_charts/#charts) and Makefile. # E.g. in case of ONAP oom repo it will be the content of kubernetes directory. @@ -21,53 +23,50 @@ SOFTWARE_PACKAGE_BASENAME="onap-offline" # offline Kubernetes cluster. This may be sometimes wanted. #HELM_CHARTS_DIR=~/myclones/casablanca_oom/ -################### -# Packages addons # -################### - -# in there we define array of files/directories to be added into particular packages -# SW_PACKAGE_ADDONS are offline installer specific entries which are supposed to be inserted -# into ./ansible/application directory which is the only place where installer expects SW addons -# if directory is specified, whole dir will be copied into ./ansible/application inc. subdirs -# if file is specified it will be just copied into ./ansible/application folder. -# -SW_PACKAGE_ADDONS=( - '/root/ansible/application/onap-me-patch-role' - '/root/ansible/application/application_configuration.yml' -) - -# following array contains directories and files from where offline installer can get required artifacts -# following binaries and files are expected: -# /downloads ... path to directory with application binaries (e.g. rancher, kubectl, jq, helm) -# /git-repo ... path to directory with git repos -# /http ... path to directory with http files -# /offline_data ... path to directory with infra specific docker images -# /pkg ... path to directory with rpm/deb packages -# /nexus_data.tar ... path to tar file with collected nexus blobs (output of different script) -# -# alternatively and if all above is available just ./resources folder with contain all of those might be used -# /resources -# +##################################### +# Application install configuration # +##################################### -EXTERNAL_BINARIES_PACKAGE_ADDONS=( - '/root/resources' -) -#EXTERNAL_BINARIES_PACKAGE_ADDONS=( -# '/root/resources/downloads' -# '/root/resources/git-repo' -# '/root/resources/http' -# '/root/resources/offline_data' -# '/root/resources/pkg' -# '/root/resources/nexus_data.tar' +# APP_CONFIGURATION array variable can be used to provide files/directories +# into sw package available for the Ansible process to consume. +# The main configuration for you application is a yml file +# "application_configuration.yml" (name of file can be anything) where user +# need to provide values to control ansible installer process. Yml file is given +# as command line parameter to ansible run. +# See more from UserGuide documentation (LINK HERE) how to use installer. +# Available configuration parameters user can configure is seen from group_vars files: +# ansible/group_vars/all.yml +# ansible/group_vars/infrastucture.yml +# ansible/group_vars/kubernetes.yml +# Additionally user can optionally provide own ansible roles code to customize install process. +# At the moment 2 custom ansible roles are supported pre and post install roles, which are +# run by installer prior Helm install and after Kubernetes app has been installed. +# In application_configuration.yml those role names are configured with variables: +# application_pre_install_role: my-pre-install-role +# application_post_install_role: my-post-install-role +# And according to Ansible functionality, roles' code must be placed to directories +# with the same name. +#APP_CONFIGURATION=( +# /config/application_configuration.yml +# /patches/onap-casablanca-patch-role +# ~/myappfiles/my-post-install-role #) -# this param should be set to true if additional application package is supposed to be created -# -PREPARE_AUX_PACKAGE="true" +# APP_BINARY_RESOURCES_DIR is variable to directory containing directories and files for offline +# installer infra and the the application run in that infra. +# Currently mixed with infra and app binaries to same, maybe to be separated in the future. +# Following directories and files are expected: +# downloads ... directory with exacutable binaries for offline infra usage (e.g. rancher, kubectl, jq, helm) +# git-repo ... directory with git repos for application needs to be simulated +# http ... directory with http simulation files (e.g. Maven artifacts are simulated here) +# offline_data ... directory with offline infra specific docker images +# pkg ... directory with rpm/deb packages mainly for offline infra +# nexus_data.tar ... tar file with pre-generated nexus blobs containing e.g. docker images for the application. +#APP_BINARY_RESOURCES_DIR=~/myappfiles/all_binaries -# tar files with additional application images supposed to be inserted into nexus during runtime are expected -# -#AUX_BINARIES_PACKAGE_ADDONS=( -# '/root/resource_aux/aaa-component-0.0.1.tar' -# '/root/resource_aux/xyz-component-0.0.1.tar' +# APP_AUX_BINARIES is array variable for additional application files. +# Docker images supported currently in tar format. +#APP_AUX_BINARIES=( +# ~/myappfiles/docker_images_populated_runtime/aaa-component-0.0.1.tar +# ~/myappfiles/docker_images_populated_runtime/xyz-component-0.0.1.tar #) diff --git a/build/package.sh b/build/package.sh index 89764ccf..63774e02 100755 --- a/build/package.sh +++ b/build/package.sh @@ -50,12 +50,18 @@ function create_tar { find ${tar_dir}/* -maxdepth 0 -type d -exec rm -rf '{}' \; # Remove packaged files find ${tar_dir}/* ! -name ${tar_name} -exec rm '{}' \; - echo "tar file ${tar_name} created in target dir" + echo "Tar file created to $(dirname ${tar_dir})/${tar_name}" +} + +function create_pkg { + local pkg_type="$1" + echo "[Creating ${pkg_type} package]" + create_tar "${PKG_ROOT}" offline-${PROJECT_NAME}-${PROJECT_VERSION}-${pkg_type}.tar + rm -rf "${PKG_ROOT}" } function add_metadata { local metafile="$1" - echo "Project name: ${PROJECT_NAME}" >> "${metafile}" echo "Project version: ${PROJECT_VERSION}" >> "${metafile}" echo "Package date: ${TIMESTAMP}" >> "${metafile}" @@ -64,7 +70,6 @@ function add_metadata { function add_additions { local source="$1" local target="$2" - if [ -d "${source}" ]; then mkdir -p "${target}/$(basename $source)" cp -r "${source}" "${target}" @@ -80,7 +85,7 @@ function add_additions { } function build_sw_artifacts { - cd ../ansible/docker + cd ${LOCAL_PATH}/../ansible/docker ./build_ansible_image.sh if [ $? -ne 0 ]; then crash 5 "Building of ansible runner image failed." @@ -89,95 +94,75 @@ function build_sw_artifacts { } function create_sw_package { - local pkg_root="${PACKAGING_TARGET_DIR}/sw" - - # Create tar package - echo "[Creating software package]" + PKG_ROOT="${PACKAGING_TARGET_DIR}/sw" # Create directory structure of the sw package - mkdir -p "${pkg_root}" - cp -r ansible "${pkg_root}" + mkdir -p "${PKG_ROOT}" + cp -r ${LOCAL_PATH}/../ansible "${PKG_ROOT}" - # Add additional files/dirs into package based on package.conf - for item in "${SW_PACKAGE_ADDONS[@]}";do + # Add application additional files/dirs into package based on package.conf + for item in "${APP_CONFIGURATION[@]}";do # all SW package addons are expected within ./ansible/application folder - add_additions "${item}" "${pkg_root}/ansible/application" + add_additions "${item}" "${PKG_ROOT}/${APPLICATION_FILES_IN_PACKAGE}" done - # Helm charts handling + # Application Helm charts + # To be consistent with resources and aux dir, create charts dir even if no charts provided. + mkdir -p ${PKG_ROOT}/${HELM_CHARTS_DIR_IN_PACKAGE} if [ ! -z "${HELM_CHARTS_DIR}" ]; then - echo "Helm charts handling" + echo "Add application Helm charts" # Copy charts available for ansible playbook to use/move them to target server/dir - mkdir -p ${pkg_root}/${HELM_CHARTS_DIR_IN_PACKAGE} - cp -r "${HELM_CHARTS_DIR}"/* ${pkg_root}/${HELM_CHARTS_DIR_IN_PACKAGE} + cp -r "${HELM_CHARTS_DIR}"/* ${PKG_ROOT}/${HELM_CHARTS_DIR_IN_PACKAGE} + else + echo "No Helm charts defined, no application will be automatically installed by this package!" fi # Add metadata to the package - add_metadata "${pkg_root}"/package.info + add_metadata "${PKG_ROOT}"/package.info # Create sw tar package - echo "Creating tar file ..." - PACKAGE_BASE_NAME="${SOFTWARE_PACKAGE_BASENAME}" - create_tar "${pkg_root}" ${PACKAGE_BASE_NAME}-${PROJECT_NAME}-${PROJECT_VERSION}-sw.tar - rm -rf "${pkg_root}" + create_pkg sw } function create_resource_package { - local pkg_root="${PACKAGING_TARGET_DIR}/resources" - - # Create resource tar package - echo "[Creating resource package]" + PKG_ROOT="${PACKAGING_TARGET_DIR}/resources" # Create directory structure of the resource package - mkdir -p "${pkg_root}" + mkdir -p "${PKG_ROOT}" - # Add artifacts into resource packagee based on package.conf config - for item in "${EXTERNAL_BINARIES_PACKAGE_ADDONS[@]}";do - if [ "$(basename $item)" == "resources" ]; then - echo "Note: Packaging all resources at once" - add_additions "${item}" "${PACKAGING_TARGET_DIR}" - else - add_additions "${item}" "${pkg_root}" - fi - done + # Add artifacts into resource package based on package.conf config + if [ ! -z ${APP_BINARY_RESOURCES_DIR} ]; then + cp -r ${APP_BINARY_RESOURCES_DIR}/* ${PKG_ROOT} + fi # tar file with nexus_data is expected, we should find and untar it # before resource.tar is created - for i in `ls -1 ${pkg_root} | grep tar`; do - tar tvf "${pkg_root}/${i}" | grep nexus_data &> /dev/null - if [ $? -eq 0 ]; then - echo "Debug: tar file with nexus blobs detected ${pkg_root}/${i}. Start unarchive ..." - tar xf "${pkg_root}/${i}" -C "${pkg_root}" &> /dev/null - echo "Debug: unarchive finished. Removing original file" - rm -f "${pkg_root}/${i}" - fi + for i in `ls -1 ${PKG_ROOT} | grep tar`; do + tar tvf "${PKG_ROOT}/${i}" | grep nexus_data &> /dev/null + if [ $? -eq 0 ]; then + echo "Debug: tar file with nexus blobs detected ${PKG_ROOT}/${i}. Start unarchive ..." + tar xf "${PKG_ROOT}/${i}" -C "${PKG_ROOT}" &> /dev/null + echo "Debug: unarchive finished. Removing original file" + rm -f "${PKG_ROOT}/${i}" + fi done - echo "Creating tar file ..." - PACKAGE_BASE_NAME="${SOFTWARE_PACKAGE_BASENAME}" - create_tar "${pkg_root}" "${PACKAGE_BASE_NAME}-${PROJECT_NAME}-${PROJECT_VERSION}-resources.tar" - rm -rf "${pkg_root}" + create_pkg resources } function create_aux_package { - local pkg_root="${PACKAGING_TARGET_DIR}/aux" - - # Create aux resource tar package - echo "Creating aux resource package" + PKG_ROOT="${PACKAGING_TARGET_DIR}/aux" # Create directory structure of the aux resource package - mkdir -p "${pkg_root}" + mkdir -p "${PKG_ROOT}" # Add artifacts into resource packagee based on package.conf config - for item in "${AUX_BINARIES_PACKAGE_ADDONS[@]}";do - add_additions "${item}" "${pkg_root}" + for item in "${APP_AUX_BINARIES[@]}";do + add_additions "${item}" "${PKG_ROOT}" done - echo "Creating tar file ..." - PACKAGE_BASE_NAME="${SOFTWARE_PACKAGE_BASENAME}" - create_tar "${pkg_root}" "${PACKAGE_BASE_NAME}-${PROJECT_NAME}-${PROJECT_VERSION}-aux-resources.tar" - rm -rf "${pkg_root}" + create_pkg aux-resources } # @@ -192,6 +177,10 @@ TIMESTAMP=$(date -u +%Y%m%dT%H%M%S) SCRIPT_DIR=$(dirname "${0}") LOCAL_PATH=$(readlink -f "$SCRIPT_DIR") +# Relative location inside the package for application related files. +# Application means Kubernetes application installed by Helm charts on ready cluster (e.g. onap). +APPLICATION_FILES_IN_PACKAGE="ansible/application" + # Relative location inside the package to place Helm charts to be available for # Ansible process to transfer them into machine (infra node) running Helm repository. # NOTE: This is quite hardcoded place to put them and agreement with Ansible code @@ -201,7 +190,7 @@ LOCAL_PATH=$(readlink -f "$SCRIPT_DIR") # This variable can be of course changed in package.conf if really needed if # corresponding ansible variable "app_helm_charts_install_directory" value # adjusted accordingly. -HELM_CHARTS_DIR_IN_PACKAGE="ansible/application/helm_charts" +HELM_CHARTS_DIR_IN_PACKAGE="${APPLICATION_FILES_IN_PACKAGE}/helm_charts" if [ "$#" -lt 3 ]; then echo "Missing some mandatory parameter!" @@ -241,13 +230,6 @@ rm -rf ${PACKAGING_TARGET_DIR} build_sw_artifacts create_sw_package create_resource_package - -# This part will create aux package which consists of -# artifacts which can be added into offline nexus during runtime -if [ "${PREPARE_AUX_PACKAGE}" == "true" ]; then - create_aux_package -else - echo "AUX package won't be created" -fi +create_aux_package popd diff --git a/doc/BuildGuide.rst b/doc/BuildGuide.rst index 6e36b253..a06180e0 100755 --- a/doc/BuildGuide.rst +++ b/doc/BuildGuide.rst @@ -305,23 +305,19 @@ For the packagin itself it's necessary to prepare configuration. You can use ./onap/install/onap-offline/build/package.conf as template or directly modify it. -There are some parameters needs to be set in configuration file and some -are optional: +There are some parameters needs to be set in configuration file. +---------------------------------------+------------------------------------------------------------------------------+ | Parameter | Description | +=======================================+==============================================================================+ -| SOFTWARE\_PACKAGE\_BASENAME | defines package name prefix (e.g. onap-offline) | +| HELM\_CHARTS\_DIR | directory with Helm charts for the application | +---------------------------------------+------------------------------------------------------------------------------+ -| HELM\_CHARTS\_DIR | oom directory from oom git repostitory | +| APP\_CONFIGURATION | application install configuration (application_configuration.yml) for | +| | ansible installer and custom ansible role code directories if any | +---------------------------------------+------------------------------------------------------------------------------+ -| SW\_PACKAGE\_ADDONS | specific entries which are inserted into ./ansible/application | +| APP\_BINARY\_RESOURCES\_DIR | directory with all (binary) resources for offline infra and application | +---------------------------------------+------------------------------------------------------------------------------+ -| EXTERNAL\_BINARIES\_PACKAGE\_ADDONS | other addons used as resources | -+---------------------------------------+------------------------------------------------------------------------------+ -| PREPARE\_AUX\_PACKAGE | boolean condition if prepare AUX package [optional] | -+---------------------------------------+------------------------------------------------------------------------------+ -| AUX\_BINARIES\_PACKAGE\_ADDONS | additional binaries such as docker images loaded during runtime [optional] | +| APP\_AUX\_BINARIES | additional binaries such as docker images loaded during runtime [optional] | +---------------------------------------+------------------------------------------------------------------------------+ Offline installer packages are created with prepopulated data via @@ -336,9 +332,8 @@ E.g. So in the target directory you should find tar files with ----sw.tar +offline---sw.tar ----resources.tar +offline---resources.tar -Optionally: ----aux-resources.tar +offline---aux-resources.tar -- cgit 1.2.3-korg From f3eee9e2131a59e2a0995c53c07001f24f9187a2 Mon Sep 17 00:00:00 2001 From: Samuli Silvius Date: Sun, 10 Feb 2019 13:24:03 +0200 Subject: More default values and simplify onap config Populate more default values for ansible all.yml configuration to make it simpler by default for user. Removed project_configuration variable as it caused just confusion. Replaced that mostly by using app_name variable. According to above changed simplified actual configuration config/application_configuration.yml used to install onap. Mostly removed repeation of comments. Also updated ansible/application/README.md that was not updated for a while. Issue-ID: OOM-1633 Change-Id: Idcc4c510b64bb61edd874d0e0616cdb41938f2d3 Signed-off-by: Samuli Silvius --- ansible/application/README.md | 29 ++++------- ansible/group_vars/all.yml | 20 +++----- build/package.conf | 10 +++- config/application_configuration.yml | 94 ++++-------------------------------- doc/BuildGuide.rst | 16 ++++-- 5 files changed, 47 insertions(+), 122 deletions(-) (limited to 'ansible') diff --git a/ansible/application/README.md b/ansible/application/README.md index 342240be..d260b3cb 100644 --- a/ansible/application/README.md +++ b/ansible/application/README.md @@ -1,8 +1,8 @@ # Application specific configuration This directory is **empty** on purpose in git. Content in this folder is -placed on installer packaging time and can be modified by user on target -server where installer package is installed. +populated packaging time (see package.sh/package.conf) and can be modified if needed +also on target server where package is installed. ## Application configuration @@ -20,34 +20,20 @@ Example: Application helm charts must be available on infra node before application playbook is executed. That folder on infra node is specified within `app_helm_charts_infra_directory` variable. -Helm charts folder name is configured on `application_configuration.yml` file -with `app_helm_charts_directory` variable - it is the path on remote infrastructure server. - -Example: -``` -app_helm_charts_directory: /opt/application/helm_charts -``` - -It is expected that helm charts are available from packaging script as a part of installer SW package. -Such source directory of helm charts is specified by `app_helm_charts_install_directory` variable - -Example: -``` -app_helm_charts_install_directory: ansible/application/helm_charts/kubernetes -``` +There is a good default value for this variable and if not changed, installer will handle +Helm charts transfer from packaging up to the target infra server. ## Application specific roles Installer supports optional custom pre and post install roles. Custom roles' code folders -need to be placed to this directory and name of those folders are configured in +are placed to this directory at packaging time and name of those folders are configured in application.yml with variable `application_pre_install_role` and `application_post_install_role`. Example: ``` -application_pre_install_role: "{{ project_configuration }}-patch-role" +application_pre_install_role: "{{ app_name }}-patch-role" ``` - ## Inventory hosts Ansible inventory file is least application specific but in practice example @@ -56,3 +42,6 @@ and at least ip addresses need to be changed according to target servers after installer installation and before starting installer execution. So it's better to place also hosts.yml to this application directory and edit it here. +That can be done either at packaging time same way as application_configuration.yml +or after package has been installed to server where ansible process are run just +before lauching any playbooks. diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index dbb05205..cd8c7f58 100755 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -65,7 +65,7 @@ deploy_rpm_repository: yes # Offline solution is deploying app specific rpm repository and requires some name # also for k8s cluster -# e.g. app_name: ONAP +# e.g. app_name: onap app_name: # runtime_images provides an way to insert docker images @@ -90,12 +90,6 @@ runtime_images: # Application specific params # ############################### -# Project name to utilize same codebase. Just helper variable inside ansible -# configuration files (like this file) to avoid writing own project name multiple -# times for paths, namespaces, Helm release, derived variables.. -# e.g. project_configuration: onap-casablanca -project_configuration: - # App Helm charts directory location in installation package. # The path is absolute path (even locates relative inside of this sw package # installation folder) because it must be visible for ansible docker/chroot @@ -116,7 +110,7 @@ app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts" # Main Helm chart to install # e.g. app_helm_chart_name: onap -app_helm_chart_name: +app_helm_chart_name: "{{ app_name }}" # Targets for helm charts repository build # e.g. for ONAP Casablanca @@ -128,19 +122,19 @@ app_helm_build_targets: # Directory with helm plugins # It's an optional parameter used e.g. in OOM Casablanca # app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/helm/plugins/" -app_helm_plugins_directory: +app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/helm/plugins/" # Helm release name (visible in POD names) used by Helm -# e.g. app_helm_release_name: "{{ project_configuration }}" -app_helm_release_name: "{{ project_configuration }}" +# e.g. app_helm_release_name: onap +app_helm_release_name: "{{ app_name }}" # Kubernetes namespace where application is installed # e.g. app_kubernetes_namespace: onap -app_kubernetes_namespace: +app_kubernetes_namespace: "{{ app_name }}" # Optional application custom Ansible roles name for pre and post install logic. # Location of additional custom roles is defined in ansible.cfg with roles_path. -# e.g. application_pre_install_role: "{{ project_configuration }}-patch-role" +# e.g. application_pre_install_role: "{{ app_name }}-patch-role" application_pre_install_role: application_post_install_role: diff --git a/build/package.conf b/build/package.conf index 78da5eac..29ca3cb1 100644 --- a/build/package.conf +++ b/build/package.conf @@ -21,7 +21,8 @@ # E.g. in case of ONAP oom repo it will be the content of kubernetes directory. # NOTE: Leaving this variable commented out will mean that no Helm application will be installed to # offline Kubernetes cluster. This may be sometimes wanted. -#HELM_CHARTS_DIR=~/myclones/casablanca_oom/ +#HELM_CHARTS_DIR=/kubernetes +HELM_CHARTS_DIR=/tmp/oom-clone/kubernetes ##################################### # Application install configuration # @@ -51,6 +52,10 @@ # /patches/onap-casablanca-patch-role # ~/myappfiles/my-post-install-role #) +APP_CONFIGURATION=( + /tmp/offline-installer/config/application_configuration.yml + /tmp/offline-installer/patches/onap-casablanca-patch-role +) # APP_BINARY_RESOURCES_DIR is variable to directory containing directories and files for offline # installer infra and the the application run in that infra. @@ -63,6 +68,7 @@ # pkg ... directory with rpm/deb packages mainly for offline infra # nexus_data.tar ... tar file with pre-generated nexus blobs containing e.g. docker images for the application. #APP_BINARY_RESOURCES_DIR=~/myappfiles/all_binaries +APP_BINARY_RESOURCES_DIR=/tmp/onap-offline/resources # APP_AUX_BINARIES is array variable for additional application files. # Docker images supported currently in tar format. @@ -70,3 +76,5 @@ # ~/myappfiles/docker_images_populated_runtime/aaa-component-0.0.1.tar # ~/myappfiles/docker_images_populated_runtime/xyz-component-0.0.1.tar #) +APP_AUX_BINARIES=() + diff --git a/config/application_configuration.yml b/config/application_configuration.yml index e5438dce..25737edb 100755 --- a/config/application_configuration.yml +++ b/config/application_configuration.yml @@ -1,95 +1,21 @@ --- -################################### -# Resources configuration entries # -################################### -# Resource host information - -# folder on resource host where tars with resources are present +# +# This is example configuration to install offline ONAP. +# See available variables to configure and used default values +# from ../ansible/group_vars/*.yml files. +# resources_dir: /data - -# tarfile name within this folder with offline infrastructure sw -resources_filename: onap-offline-onap-full-3.0.0.resources.tar - -# Infra node specific information - -# offline solution source data binaries will be decompressed in following dir on infra -# e.g. app_data_path: /opt/onap -app_data_path: /opt/onap - -########################################## -# Offline Infrastructure specific params # -########################################## - -# information from which rootCA is created -# e.g. -# organization_name: Samsung -# state_or_province_name: Poland -# country_name: PL -# locality_name: Krakow +app_name: onap +resources_filename: "offline-{{ app_name }}-3.0.0.resources.tar" +app_data_path: "/opt/{{ app_name }}" certificates: organization_name: Samsung state_or_province_name: Poland country_name: PL locality_name: Krakow - -# Offline solution is deploying app specific rpm repository and requires some name -# also for k8s cluster -# e.g. app_name: ONAP -app_name: ONAP - - -############################### -# Application specific params # -############################### - -# Project name to utilize same codebase -# e.g. project_configuration: onap-full -project_configuration: onap-full - -# App Helm charts dir. E.g. application/helm_charts/ where xxx is a charts folder name. -# Helm charts are expected to be inside SW package somewhere inside ./ansible/application -# those will be available for offline installer under /ansible/application/ -# for OOM project helm charts are usually within kubernetes sub-folder -# so the path for them can be: -# e.g app_helm_charts_install_directory: "/ansible/application/oom/kubernetes" -app_helm_charts_install_directory: "/ansible/application/helm_charts" - -# to specify target dir where helm charts should be copied into on infra node -# this should be directory with all charts and Makefile -# e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts" -app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts" - -# Main Helm chart to install -# e.g. app_helm_chart_name: onap -app_helm_chart_name: onap - -# Targets for helm charts repository build -# e.g. for ONAP Casablanca -# app_helm_build_targets: -# - all -# - onap app_helm_build_targets: - all - - onap - -# Directory with helm plugins -# It's an optional parameter used e.g. in OOM Casablanca -# app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/kubernetes/helm/plugins/" -app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/kubernetes/helm/plugins/" - -# Helm release name (visible in POD names) used by Helm -# e.g. app_helm_release_name: "{{ project_configuration }}" -app_helm_release_name: "{{ project_configuration }}" - -# Kubernetes namespace where application is installed -# e.g. app_kubernetes_namespace: onap -app_kubernetes_namespace: onap - -# Optional application custom Ansible roles name for pre and post install logic. -# Location of additional custom roles is defined in ansible.cfg with roles_path. -# e.g. application_pre_install_role: "{{ project_configuration }}-patch-role" -application_pre_install_role: onap-casablanca-patch-role -application_post_install_role: + - "{{ app_name }}" +application_pre_install_role: "{{ app_name }}-casablanca-patch-role" -# any other application specific params can be specified in this file diff --git a/doc/BuildGuide.rst b/doc/BuildGuide.rst index a06180e0..c9148f12 100755 --- a/doc/BuildGuide.rst +++ b/doc/BuildGuide.rst @@ -296,26 +296,34 @@ offline. Use the following command: For example: -``$ ./build/fetch_and_patch_charts.sh https://gerrit.onap.org/r/oom 3.0.0-ONAP /root/offline-installer/patches/casablanca_3.0.0.patch /tmp/offline-installer/ansible/application/helm_charts`` +``$ ./build/fetch_and_patch_charts.sh https://gerrit.onap.org/r/oom 3.0.0-ONAP /tmp/offline-installer/patches/casablanca_3.0.0.patch /tmp/oom-clone`` Part 5. Creating offline installation package --------------------------------------------- For the packagin itself it's necessary to prepare configuration. You can -use ./onap/install/onap-offline/build/package.conf as template or +use ./build/package.conf as template or directly modify it. There are some parameters needs to be set in configuration file. +Example values below are setup according to steps done in this guide to package ONAP. +---------------------------------------+------------------------------------------------------------------------------+ | Parameter | Description | +=======================================+==============================================================================+ | HELM\_CHARTS\_DIR | directory with Helm charts for the application | +| | Example: /tmp/oom-clone/kubernetes | +---------------------------------------+------------------------------------------------------------------------------+ | APP\_CONFIGURATION | application install configuration (application_configuration.yml) for | -| | ansible installer and custom ansible role code directories if any | +| | ansible installer and custom ansible role code directories if any. | +| | Example: | +| | APP_CONFIGURATION=( | +| | /tmp/offline-installer/config/application_configuration.yml | +| | /tmp/offline-installer/patches/onap-casablanca-patch-role | +| | ) | +---------------------------------------+------------------------------------------------------------------------------+ | APP\_BINARY\_RESOURCES\_DIR | directory with all (binary) resources for offline infra and application | +| | Example: /tmp/onap-offline/resources | +---------------------------------------+------------------------------------------------------------------------------+ | APP\_AUX\_BINARIES | additional binaries such as docker images loaded during runtime [optional] | +---------------------------------------+------------------------------------------------------------------------------+ @@ -327,7 +335,7 @@ following command run from offline-installer directory E.g. -``$ ./build/package.sh onap 1.0.1 /tmp/package_onap_1.0.0"`` +``$ ./build/package.sh onap 1.0.1 /tmp/package"`` So in the target directory you should find tar files with -- cgit 1.2.3-korg