summaryrefslogtreecommitdiffstats
path: root/tools/cicdansible/heat/installer.yaml
blob: 885bc8af84bd56f3940523a0be5687bbbed9beb2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
#This is the environment heat template, compatible with openstack ocata.
heat_template_version: 2017-02-24
description: "Heat template for deploying onap env"
parameters:
  auth_key:
    label: "Auth public key"
    description: "The public key used to authenticate to instances"
    type: string
  node_flavor_name:
    label: "name of node flavor"
    description: "The name of the flavor used to create kubernetes nodes"
    type: string
    constraints:
      - custom_constraint: nova.flavor
        description: "need to specify a valid flavor"
  infra_flavor_name:
    label: "name of infra flavor"
    description: "flavor used to create infra instance"
    type: string
    constraints:
      - custom_constraint: nova.flavor
        description: "need to specify a valid flavor"
  installer_flavor_name:
    label: "name of installer flavor"
    description: "flavor used to create installer instance"
    type: string
    constraints:
      - custom_constraint: nova.flavor
        description: "need to specify a valid flavor"
  image_name:
    label: "image name"
    description: "name of the image from which to create all instances, should be rhel/centos 7.9 image"
    type: string
    constraints:
      - custom_constraint: glance.image
        description: "must specify a valid image name"
  availability_zone:
    label: "availability zone"
    description: "availability zone to use for scheduling instances"
    type: string
  subnet_cidr:
    label: "private subnet cidr"
    description: "Cidr of a private subnet instances will be connected to"
    type: string
    constraints:
      - custom_constraint: net_cidr
  subnet_range_start:
    label: "subnet dhcp allocation range start"
    description: "Start of range of dhcp allocatable ips on private subnet"
    type: string
    constraints:
      - custom_constraint: ip_addr
  subnet_range_end:
    label: "end of subnet dhcp allocation range"
    description: "End of private subnet's dhcp allocation range"
    type: string
    constraints:
      - custom_constraint: ip_addr
  router_addr:
    label: "ip address of router"
    description: "IP address of the router allowing access to other networks incl. company network"
    type: string
    constraints:
      - custom_constraint: ip_addr
  dns_nameservers:
    label: "dns resolvers"
    description: "List of dns resolvers"
    type: comma_delimited_list
  public_network_name:
    label: "name of the public network"
    description: "Name of the public, internet facing network, also allowing access to company internal hosts"
    type: string
    constraints:
      - custom_constraint: neutron.network
        description: "Must specify a valid network name or id"
  external_subnet_cidr:
    label: "external subnet cidr"
    description: "The CIDR of the external subnet, that should be accessible from instances, even when internet access is cut. Putting 0.0.0.0/0 here means access to internet."
    type: string
    constraints:
      - custom_constraint: net_cidr
  installer_ip:
    label: "floating ip of the installer"
    description: "a pre-allocated floating ip that will be associated with the installer instance"
    type: string
  infra_ip:
    label: "floating ip of the infra"
    description: "a pre-allocated floating ip that will be associated with the infrastructure instance"
    type: string
  node_ip:
    label: "floating ip of the first node"
    description: "a pre-allocated floating ip that will be associated with the first kubernetes node and allow accessing onap"
    type: string
  num_nodes:
    label: "num nodes"
    description: "the number of kubernetes nodes to create, min 1"
    type: number
    constraints:
      - range: { min: 1 }
        description: "must be a positive number"
  use_volume_for_nfs:
    type: boolean
    label: "use volume for nfs storage"
    description: "Indicates whether a cinder volume should be used for nfs storage or not. If not checked, the nfs would be stored in the root disk"
  demo_network:
    label: "demo net id"
    type: string
    description: "specifies id of network used for demo usecases"
    default: ""
  docker_storage_size:
    label: "nodes' docker storage size"
    type: number
    description: "Size of the volume for the docker storage on nodes"
conditions:
  #Condition for nfs volume usage.
  use_volume_for_nfs: { get_param: use_volume_for_nfs }
resources:
  # Security group used to secure access to instances.
  secgroup:
    type: OS::Neutron::SecurityGroup
    properties:
      rules:
        # Egress rule allowing access to external_subnet_cidr.
        - direction: egress
          ethertype: IPv4
          remote_ip_prefix: { get_param: external_subnet_cidr }
        # Ingress rule, allowing also inbound access by external network.
        - direction: ingress
          ethertype: IPv4
          remote_ip_prefix: { get_param: external_subnet_cidr }
        # Allow outbound communication with the internal subnet.
        - direction: egress
          ethertype: IPv4
          remote_ip_prefix: { get_param: subnet_cidr }
        # Allow inbound communication from internal network.
        - direction: ingress
          ethertype: IPv4
          remote_ip_prefix: { get_param: subnet_cidr }
        # Allow outbound access to 169.254.0.0/16, mainly for metadata. We do not need inbound.
        - direction: egress
          ethertype: IPv4
          remote_ip_prefix: 169.254.0.0/16
  #A network that our test environment will be connected to.
  privnet:
    type: OS::Neutron::Net
  #Subnet that instances will live in.
  privsubnet:
    type: OS::Neutron::Subnet
    properties:
      network: { get_resource: privnet }
      cidr: { get_param: subnet_cidr }
      allocation_pools:
        - { start: { get_param: subnet_range_start }, end: { get_param: subnet_range_end } }
      gateway_ip: { get_param: router_addr }
      dns_nameservers: { get_param: dns_nameservers }
      ip_version: 4
  #A port connected to the private network, taken by router.
  routerport:
    type: OS::Neutron::Port
    properties:
      network: { get_resource: privnet }
      fixed_ips:
        - { subnet: { get_resource: privsubnet }, ip_address: { get_param: router_addr } }
      security_groups: [{ get_resource: secgroup }]
  #This is a router, routing between us and the internet.
  #It has an external gateway to public network.
  router:
    type: OS::Neutron::Router
    properties:
      external_gateway_info:
        network: { get_param: public_network_name }
  #This is a router interface connecting it to our private subnet's router port.
  routercon:
    type: OS::Neutron::RouterInterface
    properties:
      router: { get_resource: router }
      port: { get_resource: routerport }

  #Key used to authenticate to instances as root.
  key:
    type: OS::Nova::KeyPair
    properties:
      name: { get_param: "OS::stack_name" }
      public_key: { get_param: auth_key }
  #Handle to signal about starting up of instances.
  instance_wait_handle:
    type: OS::Heat::WaitConditionHandle
  #Monitor waiting for all instances to start.
  instance_wait:
    type: OS::Heat::WaitCondition
    properties:
      handle: { get_resource: instance_wait_handle }
      timeout: 1200
      count:
        yaql:
          data: { num_nodes: { get_param: num_nodes } }
          #This is number of all nodes + 2 (infra instance and installer)
          expression: "$.data.num_nodes + 2"
  #Affinity Policy - nodes spread onto as many physical machines as possible (aka. .anti-affinity.).
  anti_affinity_group:
   type: OS::Nova::ServerGroup
   properties:
     name: k8s nodes on separate computes
     policies:
      - anti-affinity
  #Resource group to deploy n nodes using node template for each, each node numbered starting from 0.
  nodes:
    type: OS::Heat::ResourceGroup
    properties:
      count: { get_param: num_nodes }
      resource_def:
        type: node.yaml
        properties:
          nodenum: "%index%"
          key_name: { get_resource: key }
          image_name: { get_param: image_name }
          network: { get_resource: privnet }
          subnet: { get_resource: privsubnet }
          flavor_name: { get_param: node_flavor_name }
          availability_zone: { get_param: availability_zone }
          notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
          security_group: { get_resource: secgroup }
          demo_network: { get_param: demo_network }
          docker_storage_size: { get_param: docker_storage_size }
          scheduler_hints:
            group: { get_resource: anti_affinity_group }
    depends_on: [routercon, instance_wait_handle]
  #Nfs storage volume for first node.
  nfs_storage:
    type: OS::Cinder::Volume
    condition: use_volume_for_nfs
    properties:
      name: nfs_storage
      size: 50
  #Attachment of volume to first node.
  nfs_storage_attachment:
    type: OS::Cinder::VolumeAttachment
    condition: use_volume_for_nfs
    properties:
      instance_uuid: { get_attr: [nodes, "resource.0"] }
      volume_id: { get_resource: nfs_storage }
  #Floating ip association for node (first only).
  node_fip_assoc:
    type: OS::Neutron::FloatingIPAssociation
    properties:
      floatingip_id: { get_param: node_ip }
      port_id: { get_attr: ["nodes", "resource.0.port_id"] }
  #Openstack volume used for storing resources.
  resources_storage:
    type: "OS::Cinder::Volume"
    properties:
      name: "resources_storage"
      size: 120
  #Instance representing infrastructure instance, created using subtemplate.
  infra:
    type: "instance.yaml"
    properties:
      instance_name: infra
      network: { get_resource: privnet }
      subnet: { get_resource: privsubnet }
      key_name: { get_resource: key }
      flavor_name: { get_param: infra_flavor_name }
      availability_zone: { get_param: availability_zone }
      image_name: { get_param: image_name }
      notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
      security_group: { get_resource: secgroup }
      scheduler_hints: {}
      demo_network: { get_param: demo_network }
    depends_on: [instance_wait_handle]
  #Volume attachment for infra node.
  resources_storage_attachment:
    type: OS::Cinder::VolumeAttachment
    properties:
      volume_id: { get_resource: resources_storage }
      instance_uuid: { get_resource: infra }
  #Floating ip association for infra.
  infra_fip_assoc:
    type: OS::Neutron::FloatingIPAssociation
    properties:
      floatingip_id: { get_param: infra_ip }
      port_id: { get_attr: ["infra", "port_id"] }
  #Small installer vm having access to other instances, used to install onap.
  installer:
    type: "instance.yaml"
    properties:
      instance_name: installer
      image_name: { get_param: image_name }
      flavor_name: { get_param: installer_flavor_name }
      availability_zone: { get_param: availability_zone }
      key_name: { get_resource: key }
      network: { get_resource: privnet }
      subnet: { get_resource: privsubnet }
      notify_command: { get_attr: ["instance_wait_handle", "curl_cli"] }
      security_group: { get_resource: secgroup }
      scheduler_hints: {}
    depends_on: instance_wait_handle
  #Floating ip for installer.
  installer_fip_assoc:
    type: OS::Neutron::FloatingIPAssociation
    properties:
      floatingip_id: { get_param: installer_ip }
      port_id: { get_attr: [installer, port_id] }
  #Map of node volumes, taken from volumes output param.
  node_volumes:
    type: OS::Heat::Value
    properties:
      type: json
      #We need yaql transformation to be done on the volume map.
      value:
        yaql:
          data:
            #This is a map of node number to value of "volumes" attribute, that contains
            #a list of volumes written as pairs [volumeid, mountpoint].
            volumes: { get_attr: [nodes, attributes, volumes] }
          #We need yaql expressions to transform node numbers to node names in the form "node0" and similar.
          #However we don't need anything more complicated.
          expression: "$.data.volumes?.items()?.toDict('node'+str($[0]), $[1])"
  #List of infra specific volumes (not a map as above).
  infra_volumes:
    type: OS::Heat::Value
    properties:
      value:
        - [{ get_resource: resources_storage }, "/opt/onap"]
  #Contains node0 specific volume list.
  node0_volumes:
    type: OS::Heat::Value
    properties:
      #Note that it returns an empty list if nfs volume is disabled.
      value:
        if:
          - use_volume_for_nfs
          - - [{ get_resource: nfs_storage }, "/dockerdata-nfs"]
          - []
#Output values
outputs:
  network_name:
    value: {get_attr: [privnet, name] }
    description: "Name of private network"
  network_id:
    value: { get_resource: privnet }
    description: "ID of private network"
  subnet_id:
    value: { get_resource: privsubnet }
    description: "ID of private subnet"
  installer_ip:
    value: { get_attr: [installer, ip] }
    description: "Internal ip of installer instance"
  infra_ip:
    value: { get_attr: [infra, ip] }
    description: "Internal ip of infra instance"
  node_ips:
    value: { get_attr: [nodes, ip] }
    description: "Serialized json list of node internal ips starting at node0"
  volumes:
    description: "map of volumes per each instance"
    value:
      #Can do deep merging only with yaql.
      yaql:
        data:
          node_volumes: { get_attr: [node_volumes, value]}
          infra_volumes: { infra: { get_attr: [infra_volumes, value] }}
          node0_volumes: {node0: { get_attr: [node0_volumes, value] }}
        expression: "$.data.node_volumes?.mergeWith($.data.infra_volumes)?.mergeWith($.data.node0_volumes)"