blob: fad84e07881a1d29ace7180a86a6aab964af37eb (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
|
---
###################################
# Resources configuration entries #
###################################
# Resource host information
# folder on resource host where tars with resources are present
resources_dir:
# tarfile name within this folder with offline infrastructure sw
resources_filename:
# the purpose of auxiliary resources is to provide user an interface
# of how to distribute to infra node another big tar which might be
# usefull later on in application playbooks, optional param
aux_resources_filename:
# resources can be exported via nfs
# default is no - client will use ssh
# if set yes but nfs-utils is missing then fallback to ssh
resources_on_nfs: no
# Infra node specific information
# offline solution source data binaries will be decompressed in following dir on infra
# e.g. app_data_path: /opt/onap
app_data_path:
# additional data path for auxiliary data transfer
# e.g. aux_data_path: /opt/onap/onap_me_docker_images
aux_data_path:
##########################################
# Offline Infrastructure specific params #
##########################################
# information from which rootCA is created
# e.g.
# organization_name: Samsung
# state_or_province_name: Poland
# country_name: PL
# locality_name: Krakow
certificates:
organization_name:
state_or_province_name:
country_name:
locality_name:
# Force k8s cluster redeploy if it exists already
# Default value is to allow redeploy
redeploy_k8s_env: yes
# Distribute offline rpm repository
# Default value is to distribute rpm
deploy_rpm_repository: yes
# Offline solution is deploying app specific rpm repository and requires some name
# also for k8s cluster
# e.g. app_name: ONAP
app_name:
# as nexus blob is prepopulated during build time following block
# of runtime_images code provides an alternative way how to insert
# specified images into nexus during infrastructure playbook execution
# images specified in there must be available inside aux_resources_filename
# tar file
# if runtime_images are not specified nothing is inserted on top of existing
# prebuilt nexus blob in installation time
# Component name must match with tar filename
# e.g.
# aaiadapter-0.0.1.tar is expected in aux_data_path for aaiadapter image
#runtime_images:
# aaiadapter-0.0.1:
# registry: "nexus3.onap.org:10001"
# path: "/onap/aaiadapter/aaiadapter"
# tag: "latest"
runtime_images:
###############################
# Application specific params #
###############################
# Project name to utilize same codebase
# e.g. project_configuration: onap-me
project_configuration:
# App Helm charts dir. E.g. application/helm_charts/<xxx> where xxx is a charts folder name.
# Helm charts are expected to be inside SW package somewhere inside ./ansible/application
# those will be available for offline installer under /ansible/application/<helm_charts_name>
# for OOM project helm charts are usually within kubernetes sub-folder
# so the path for them can be:
# e.g app_helm_charts_install_directory: "/ansible/application/oom/kubernetes"
app_helm_charts_install_directory:
# to specify target dir where helm charts should be copied into on infra node
# this should be directory with all charts and Makefile
# e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
app_helm_charts_infra_directory:
# Main Helm chart to install
# e.g. app_helm_chart_name: onap
app_helm_chart_name:
# Helm release name (visible in POD names) used by Helm
# e.g. app_helm_release_name: "{{ project_configuration }}"
app_helm_release_name:
# Kubernetes namespace where application is installed
# e.g. app_kubernetes_namespace: onap
app_kubernetes_namespace:
# Optional application custom Ansible roles name for pre and post install logic.
# Location of additional custom roles is defined in ansible.cfg with roles_path.
# e.g. application_pre_install_role: "{{ project_configuration }}-patch-role"
application_pre_install_role:
application_post_install_role:
# any other application specific params can be specified in this file
# e.g.
# onap_values:
# openStackKeyStoneUrl: "http://1.2.3.4:5000"
# openStackServiceTenantName: "services"
# openStackDomain: "Default"
# openStackUserName: "admin"
# openStackEncryptedPassword: "f7920677e15e2678b0f33736189e8965"
|