summaryrefslogtreecommitdiffstats
path: root/ansible
diff options
context:
space:
mode:
authorMichal Ptacek <m.ptacek@partner.samsung.com>2019-02-13 15:34:14 +0000
committerGerrit Code Review <gerrit@onap.org>2019-02-13 15:34:14 +0000
commit8063d1a15de138bbeff98fc8b48c78154ada8717 (patch)
treea0d21b562a259c17e9caac20294df6abb3c5f367 /ansible
parent59d5c325dcdd0f46be5ae15b683474578b13d98c (diff)
parentf3eee9e2131a59e2a0995c53c07001f24f9187a2 (diff)
Merge changes Idcc4c510,I81261f51,Ica9fc768,I7b5d135a
* changes: More default values and simplify onap config Clarify packaging variables documentation Helm install optional and default values Helm charts dir commenting causes / dir copying
Diffstat (limited to 'ansible')
-rw-r--r--ansible/application/README.md29
-rwxr-xr-xansible/group_vars/all.yml89
-rw-r--r--ansible/roles/application-install/tasks/main.yml17
3 files changed, 74 insertions, 61 deletions
diff --git a/ansible/application/README.md b/ansible/application/README.md
index 342240be..d260b3cb 100644
--- a/ansible/application/README.md
+++ b/ansible/application/README.md
@@ -1,8 +1,8 @@
# Application specific configuration
This directory is **empty** on purpose in git. Content in this folder is
-placed on installer packaging time and can be modified by user on target
-server where installer package is installed.
+populated packaging time (see package.sh/package.conf) and can be modified if needed
+also on target server where package is installed.
## Application configuration
@@ -20,34 +20,20 @@ Example:
Application helm charts must be available on infra node before application playbook is executed.
That folder on infra node is specified within `app_helm_charts_infra_directory` variable.
-Helm charts folder name is configured on `application_configuration.yml` file
-with `app_helm_charts_directory` variable - it is the path on remote infrastructure server.
-
-Example:
-```
-app_helm_charts_directory: /opt/application/helm_charts
-```
-
-It is expected that helm charts are available from packaging script as a part of installer SW package.
-Such source directory of helm charts is specified by `app_helm_charts_install_directory` variable
-
-Example:
-```
-app_helm_charts_install_directory: ansible/application/helm_charts/kubernetes
-```
+There is a good default value for this variable and if not changed, installer will handle
+Helm charts transfer from packaging up to the target infra server.
## Application specific roles
Installer supports optional custom pre and post install roles. Custom roles' code folders
-need to be placed to this directory and name of those folders are configured in
+are placed to this directory at packaging time and name of those folders are configured in
application.yml with variable `application_pre_install_role` and `application_post_install_role`.
Example:
```
-application_pre_install_role: "{{ project_configuration }}-patch-role"
+application_pre_install_role: "{{ app_name }}-patch-role"
```
-
## Inventory hosts
Ansible inventory file is least application specific but in practice example
@@ -56,3 +42,6 @@ and at least ip addresses need to be changed according to target servers after
installer installation and before starting installer execution.
So it's better to place also hosts.yml to this application directory and edit it here.
+That can be done either at packaging time same way as application_configuration.yml
+or after package has been installed to server where ansible process are run just
+before lauching any playbooks.
diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml
index e70a837b..cd8c7f58 100755
--- a/ansible/group_vars/all.yml
+++ b/ansible/group_vars/all.yml
@@ -5,15 +5,18 @@
# Resource host information
-# folder on resource host where tars with resources are present
+# Directory on resource host where tars with resources are present
resources_dir:
-# tarfile name within this folder with offline infrastructure sw
+# tarfile name within resources_dir directory with offline infrastructure binaries.
+# Content of APP_BINARY_RESOURCES_DIR (defined in package.conf) packaged by package.sh to single tar file.
resources_filename:
+# tarfile name within resources_dir directory with auxiliary resources.
+# Content of APP_AUX_BINARIES (defined in package.conf) packaged by package.sh to single tar file.
# the purpose of auxiliary resources is to provide user an interface
-# of how to distribute to infra node another big tar which might be
-# usefull later on in application playbooks, optional param
+# to distribute to infra node tar file with application specific files.
+# Docker images in tar format are currently the only supported content of aux_resources package.
aux_resources_filename:
# resources can be exported via nfs
@@ -23,14 +26,17 @@ resources_on_nfs: no
# Infra node specific information
-# offline solution source data binaries will be decompressed in following dir on infra
+# Offline solution source data binaries (resources_filename tar) will be
+# decompressed in this directory on target infra server.
# e.g. app_data_path: /opt/onap
app_data_path:
-# additional data path for auxiliary data transfer
-# e.g. aux_data_path: /opt/onap/onap_me_docker_images
-aux_data_path:
-
+# Path for auxiliary data in target infra server.
+# Data from resource host defined by aux_resources_filename variable is placed to this directory.
+# Currently docker images in tar format are supported (see runtime_images parameter).
+# Could be used for other kind of application specific data also.
+# e.g. aux_data_path: /opt/onap/my_extra_pods_docker_images
+aux_data_path: "{{ app_data_path }}/runtime_images_source_dir"
##########################################
@@ -59,51 +65,52 @@ deploy_rpm_repository: yes
# Offline solution is deploying app specific rpm repository and requires some name
# also for k8s cluster
-# e.g. app_name: ONAP
+# e.g. app_name: onap
app_name:
-# as nexus blob is prepopulated during build time following block
-# of runtime_images code provides an alternative way how to insert
-# specified images into nexus during infrastructure playbook execution
-# images specified in there must be available inside aux_resources_filename
-# tar file
+# runtime_images provides an way to insert docker images
+# into nexus during infrastructure playbook execution (populated to nexus at runtime).
+# images specified must be available inside aux_resources_filename
+# tar file that is extracted by installer into aux_data_path directory in infra server.
+# Source format of an image is .tar file in aux_data_path directory and all .tar
+# files in that dir are checked to match runtime_images definition.
# if runtime_images are not specified nothing is inserted on top of existing
-# prebuilt nexus blob in installation time
-# Component name must match with tar filename
+# prebuilt nexus blob in installation time.
+# Component name must match with tar filename!
# e.g.
# aaa-component-0.0.1.tar is expected in aux_data_path for aaa-component image
#runtime_images:
- # aaa-component-0.0.1:
+# aaa-component-0.0.1:
# registry: "nexus3.onap.org:10001"
# path: "/onap/components/aaa-component"
# tag: "latest"
runtime_images:
-
###############################
# Application specific params #
###############################
-# Project name to utilize same codebase
-# e.g. project_configuration: onap-me
-project_configuration:
-
-# App Helm charts dir. E.g. application/helm_charts/<xxx> where xxx is a charts folder name.
-# Helm charts are expected to be inside SW package somewhere inside ./ansible/application
-# those will be available for offline installer under /ansible/application/<helm_charts_name>
-# for OOM project helm charts are usually within kubernetes sub-folder
-# so the path for them can be:
-# e.g app_helm_charts_install_directory: "/ansible/application/oom/kubernetes"
-app_helm_charts_install_directory:
-
-# to specify target dir where helm charts should be copied into on infra node
-# this should be directory with all charts and Makefile
+# App Helm charts directory location in installation package.
+# The path is absolute path (even locates relative inside of this sw package
+# installation folder) because it must be visible for ansible docker/chroot
+# process to find directory and to transfer it into machine (infra node) running
+# Helm repository.
+# Content of the folder must be Helm chart directories of the app with Makefile.
+# In case of ONAP OOM it would be <oom_repo>/kubernetes folder content.
+# NOTE: This default value should not be changed if not really needed and it
+# must match with the variable "HELM_CHARTS_DIR_IN_PACKAGE" value in package.sh
+# script!
+app_helm_charts_install_directory: "/ansible/application/helm_charts"
+
+# Specify target dir where helm charts are copied into on infra node.
+# (same as content of "app_helm_charts_install_directory" copied by installer to this dir.)
+# This must be directory with all charts and Makefile.
# e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
-app_helm_charts_infra_directory:
+app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
# Main Helm chart to install
# e.g. app_helm_chart_name: onap
-app_helm_chart_name:
+app_helm_chart_name: "{{ app_name }}"
# Targets for helm charts repository build
# e.g. for ONAP Casablanca
@@ -114,20 +121,20 @@ app_helm_build_targets:
# Directory with helm plugins
# It's an optional parameter used e.g. in OOM Casablanca
-# app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/kubernetes/helm/plugins/"
-app_helm_plugins_directory:
+# app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/helm/plugins/"
+app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/helm/plugins/"
# Helm release name (visible in POD names) used by Helm
-# e.g. app_helm_release_name: "{{ project_configuration }}"
-app_helm_release_name:
+# e.g. app_helm_release_name: onap
+app_helm_release_name: "{{ app_name }}"
# Kubernetes namespace where application is installed
# e.g. app_kubernetes_namespace: onap
-app_kubernetes_namespace:
+app_kubernetes_namespace: "{{ app_name }}"
# Optional application custom Ansible roles name for pre and post install logic.
# Location of additional custom roles is defined in ansible.cfg with roles_path.
-# e.g. application_pre_install_role: "{{ project_configuration }}-patch-role"
+# e.g. application_pre_install_role: "{{ app_name }}-patch-role"
application_pre_install_role:
application_post_install_role:
diff --git a/ansible/roles/application-install/tasks/main.yml b/ansible/roles/application-install/tasks/main.yml
index 3306d9e4..89e7ef7e 100644
--- a/ansible/roles/application-install/tasks/main.yml
+++ b/ansible/roles/application-install/tasks/main.yml
@@ -2,4 +2,21 @@
- debug:
msg: "phase is {{ phase }}"
+- name: Check if install needed
+ block:
+ - name: "Does {{ app_helm_charts_install_directory }} exist and contain Helm Charts"
+ find:
+ paths: "{{ app_helm_charts_install_directory }}"
+ recurse: yes
+ delegate_to: localhost
+ register: charts_files
+ - name: Set install active fact
+ set_fact:
+ install_needed: "{{ yes if charts_files.matched | int > 0 else no }}"
+ when: phase == "pre-install"
+
- include_tasks: "{{ phase }}.yml"
+ when: install_needed
+
+- debug:
+ msg: "Install needed {{ install_needed }}"