summaryrefslogtreecommitdiffstats
path: root/build/package.conf
diff options
context:
space:
mode:
Diffstat (limited to 'build/package.conf')
-rw-r--r--build/package.conf129
1 files changed, 69 insertions, 60 deletions
diff --git a/build/package.conf b/build/package.conf
index 1141798b..29ca3cb1 100644
--- a/build/package.conf
+++ b/build/package.conf
@@ -1,71 +1,80 @@
-# For the packaging script it is expected that all artifacts are present on local file system
-# (e.g. they can be mounted) Downloading stuff from internet is currently not supported.
-# Furthermore we don't want to replicate content of our static data_lists for download in there
-# and those are downloaded before this packaging script is supposed to be run.
-# Therefore we can limit number of artifacts to be added into packages just to couple of items.
+# For the packaging script it is expected that all artifacts are present on local file system.
+# Artifacts include:
+# - installer source code (this git repository content)
+# - all binary artifacts pre-downloaded from internet (docker images, rpm packages, npm packages, Maven artifacts etc.)
+# Script will create 3 packages:
+# offline-${PROJECT_NAME}-${PROJECT_VERSION}-sw.tar
+# - installer code (ansible dir in this git repo)
+# - Files/dirs defined by APP_CONFIGURATION if any.
+# - Directory content of HELM_CHARTS_DIR if defined.
+# offline-${PROJECT_NAME}-${PROJECT_VERSION}-resources.tar
+# - Directory content of APP_BINARY_RESOURCES_DIR if defined.
+# offline-${PROJECT_NAME}-${PROJECT_VERSION}-aux-resources.tar
+# - Files defined by APP_AUX_BINARIES if any.
###########################
-# Project specific params #
+# Application Helm charts #
###########################
-# Final package name will be ${SOFTWARE_PACKAGE_BASENAME}-${PROJECT_NAME}-${PROJECT_VERSION}.tar
-SOFTWARE_PACKAGE_BASENAME="onap-offline"
+# Provide application installed to Kubernetes cluster. Helm chart is the supported format https://helm.sh/.
+# Directory provided here must contain all the Chart directories of the application (https://docs.helm.sh/developing_charts/#charts) and Makefile.
+# E.g. in case of ONAP oom repo it will be the content of kubernetes directory.
+# NOTE: Leaving this variable commented out will mean that no Helm application will be installed to
+# offline Kubernetes cluster. This may be sometimes wanted.
+#HELM_CHARTS_DIR=<oom-clone>/kubernetes
+HELM_CHARTS_DIR=/tmp/oom-clone/kubernetes
-########################
-# Helm charts handling #
-########################
+#####################################
+# Application install configuration #
+#####################################
-# directory with helm charts
-HELM_CHARTS_DIR="/root/oom"
-
-
-###################
-# Packages addons #
-###################
-
-# in there we define array of files/directories to be added into particular packages
-# SW_PACKAGE_ADDONS are offline installer specific entries which are supposed to be inserted
-# into ./ansible/application directory which is the only place where installer expects SW addons
-# if directory is specified, whole dir will be copied into ./ansible/application inc. subdirs
-# if file is specified it will be just copied into ./ansible/application folder.
-#
-SW_PACKAGE_ADDONS=(
- '/root/ansible/application/onap-me-patch-role'
- '/root/ansible/application/application_configuration.yml'
+# APP_CONFIGURATION array variable can be used to provide files/directories
+# into sw package available for the Ansible process to consume.
+# The main configuration for you application is a yml file
+# "application_configuration.yml" (name of file can be anything) where user
+# need to provide values to control ansible installer process. Yml file is given
+# as command line parameter to ansible run.
+# See more from UserGuide documentation (LINK HERE) how to use installer.
+# Available configuration parameters user can configure is seen from group_vars files:
+# ansible/group_vars/all.yml
+# ansible/group_vars/infrastucture.yml
+# ansible/group_vars/kubernetes.yml
+# Additionally user can optionally provide own ansible roles code to customize install process.
+# At the moment 2 custom ansible roles are supported pre and post install roles, which are
+# run by installer prior Helm install and after Kubernetes app has been installed.
+# In application_configuration.yml those role names are configured with variables:
+# application_pre_install_role: my-pre-install-role
+# application_post_install_role: my-post-install-role
+# And according to Ansible functionality, roles' code must be placed to directories
+# with the same name.
+#APP_CONFIGURATION=(
+# <offline-installer-clone>/config/application_configuration.yml
+# <offline-installer-clone>/patches/onap-casablanca-patch-role
+# ~/myappfiles/my-post-install-role
+#)
+APP_CONFIGURATION=(
+ /tmp/offline-installer/config/application_configuration.yml
+ /tmp/offline-installer/patches/onap-casablanca-patch-role
)
-# following array contains directories and files from where offline installer can get required artifacts
-# following binaries and files are expected:
-# <path_to_downloads>/downloads ... path to directory with application binaries (e.g. rancher, kubectl, jq, helm)
-# <path_to_git-repo>/git-repo ... path to directory with git repos
-# <path_to_http>/http ... path to directory with http files
-# <path_to_offline_data>/offline_data ... path to directory with infra specific docker images
-# <path_to_pkg>/pkg ... path to directory with rpm/deb packages
-# <path_to_nexus_blob>/nexus_data.tar ... path to tar file with collected nexus blobs (output of different script)
-#
-# alternatively and if all above is available just ./resources folder with contain all of those might be used
-# <path_to_complete_resources_folder>/resources
-#
+# APP_BINARY_RESOURCES_DIR is variable to directory containing directories and files for offline
+# installer infra and the the application run in that infra.
+# Currently mixed with infra and app binaries to same, maybe to be separated in the future.
+# Following directories and files are expected:
+# downloads ... directory with exacutable binaries for offline infra usage (e.g. rancher, kubectl, jq, helm)
+# git-repo ... directory with git repos for application needs to be simulated
+# http ... directory with http simulation files (e.g. Maven artifacts are simulated here)
+# offline_data ... directory with offline infra specific docker images
+# pkg ... directory with rpm/deb packages mainly for offline infra
+# nexus_data.tar ... tar file with pre-generated nexus blobs containing e.g. docker images for the application.
+#APP_BINARY_RESOURCES_DIR=~/myappfiles/all_binaries
+APP_BINARY_RESOURCES_DIR=/tmp/onap-offline/resources
-EXTERNAL_BINARIES_PACKAGE_ADDONS=(
- '/root/resources'
-)
-#EXTERNAL_BINARIES_PACKAGE_ADDONS=(
-# '/root/resources/downloads'
-# '/root/resources/git-repo'
-# '/root/resources/http'
-# '/root/resources/offline_data'
-# '/root/resources/pkg'
-# '/root/resources/nexus_data.tar'
+# APP_AUX_BINARIES is array variable for additional application files.
+# Docker images supported currently in tar format.
+#APP_AUX_BINARIES=(
+# ~/myappfiles/docker_images_populated_runtime/aaa-component-0.0.1.tar
+# ~/myappfiles/docker_images_populated_runtime/xyz-component-0.0.1.tar
#)
+APP_AUX_BINARIES=()
-# this param should be set to true if additional application package is supposed to be created
-#
-PREPARE_AUX_PACKAGE="true"
-
-# tar files with additional application images supposed to be inserted into nexus during runtime are expected
-#
-#AUX_BINARIES_PACKAGE_ADDONS=(
-# '/root/resource_aux/aaa-component-0.0.1.tar'
-# '/root/resource_aux/xyz-component-0.0.1.tar'
-#)