summaryrefslogtreecommitdiffstats
path: root/ansible/group_vars/all.yml
blob: 9019abf031a5b8e5bff010744f264a6ee9a05c0c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
---
###################################
# Resources configuration entries #
###################################

# Resource host information

# Directory on resource host where tars with resources are present
resources_dir:

# tarfile name within resources_dir directory with offline infrastructure binaries.
resources_filename:

# tarfile name within resources_dir directory with auxiliary resources.
# the purpose of auxiliary resources is to provide user an interface
# to distribute to infra node tar file with application specific files.
aux_resources_filename:

# resources can be exported via nfs
# default is no - client will use ssh
# if set yes but nfs-utils is missing then fallback to ssh
resources_on_nfs: no

# Infra node specific information

# Offline solution source data binaries (resources_filename tar) will be
# decompressed in this directory on target infra server.
# e.g. app_data_path: /opt/onap
app_data_path:

# Path for tarballs with images loaded on infrastructure server
infra_images_path: "{{ app_data_path }}/offline_data/docker_images_infra"

# Path for auxiliary data in target infra server.
# Data from resource host defined by aux_resources_filename variable is placed to this directory.
# Currently docker images in tar format are supported (see runtime_images parameter).
# Could be used for other kind of application specific data also.
# e.g. aux_data_path: /opt/onap/my_extra_pods_docker_images
aux_data_path: "{{ app_data_path }}/runtime_images_source_dir"


##########################################
# Offline Infrastructure specific params #
##########################################

# information from which rootCA is created
# e.g.
# organization_name: Samsung
# state_or_province_name: Poland
# country_name: PL
# locality_name: Krakow
certificates:
  organization_name:
  state_or_province_name:
  country_name:
  locality_name:

# Force k8s cluster redeploy if it exists already
# Default value is to allow redeploy
redeploy_k8s_env: yes

# Offline solution is deploying app specific rpm repository and requires some name
# also for k8s cluster
# e.g. app_name: onap
app_name:

# runtime_images provides an way to insert docker images
# into nexus during infrastructure playbook execution (populated to nexus at runtime).
# images specified must be available inside aux_resources_filename
# tar file that is extracted by installer into aux_data_path directory in infra server.
# Source format of an image is .tar file in aux_data_path directory and all .tar
# files in that dir are checked to match runtime_images definition.
# if runtime_images are not specified nothing is inserted on top of existing
# prebuilt nexus blob in installation time.
# Component name must match with tar filename!
# e.g.
# aaa/bbb-component-0.0.1.tar are expected in aux_data_path for component images.
#runtime_images:
#  aaa-component-0.0.1:
#    registry: "nexus3.onap.org:10001"
#    path:     "/onap/components/aaa-component"
#    tag:      "latest"
#  bbb-component-0.0.1:
#    registry: "nexus3.onap.org:10001"
#    path:     "/onap/components/bbb-component"
#    tag:      "latest"
runtime_images: {}

###############################
# Application specific params #
###############################

# App Helm charts directory location in installation package
# (local path for the ansible process).
# The path locates relative inside of this sw package
# installation folder and must be visible for ansible docker/chroot
# process to find directory and to transfer it into machine (infra node) running
# Helm repository.
# Content of the folder must be Helm chart directories of the app with Makefile.
# In case of ONAP OOM it would be <oom_repo>/kubernetes folder content.
app_helm_charts_install_directory: application/helm_charts

# Specify target dir where helm charts are copied into on infra node.
# (same as content of "app_helm_charts_install_directory" copied by installer to this dir.)
# This must be directory with all charts and Makefile.
# e.g. app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"
app_helm_charts_infra_directory: "{{ app_data_path }}/helm_charts"

# Main Helm chart to install
# e.g. app_helm_chart_name: onap
app_helm_chart_name:

# Targets for helm charts repository build
# app_helm_build_targets:
#   - all
app_helm_build_targets:

# Directory with helm plugins
# It's an optional parameter used e.g. in OOM Casablanca
# app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/helm/plugins/"
app_helm_plugins_directory: "{{ app_helm_charts_install_directory}}/helm/plugins/"

# Helm release name (visible in POD names) used by Helm
# e.g. app_helm_release_name: onap
app_helm_release_name: "{{ app_name }}"

# Kubernetes namespace where application is installed
# e.g. app_kubernetes_namespace: onap
app_kubernetes_namespace: "{{ app_name }}"

# Optional application custom Ansible roles name for pre and post install logic.
# Location of additional custom roles is defined in ansible.cfg with roles_path.
# e.g. application_pre_install_role: "my-pre-install-role"
application_pre_install_role:
application_post_install_role:

# any other application specific params can be specified in this file
# e.g.
# onap_values:
#  openStackKeyStoneUrl: "http://1.2.3.4:5000"
#  openStackServiceTenantName: "services"
#  openStackDomain: "Default"
#  openStackUserName: "admin"
#  openStackEncryptedPassword: "f7920677e15e2678b0f33736189e8965"

# Optional time synchronisation settings
# timesync:
#   servers:
#     - <ip address of NTP_1>
#     - <...>
#     - <ip address of NTP_N>
#   slewclock: false
#   timezone: <timezone name from tz database>