aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--heat/vIMS/README24
-rw-r--r--heat/vIMS/base_clearwater.env35
-rw-r--r--heat/vIMS/base_clearwater.yaml421
-rw-r--r--heat/vIMS/bono.yaml234
-rw-r--r--heat/vIMS/dime.yaml225
-rw-r--r--heat/vIMS/dns.yaml204
-rw-r--r--heat/vIMS/ellis.yaml282
-rw-r--r--heat/vIMS/homer.yaml223
-rw-r--r--heat/vIMS/sprout.yaml252
-rw-r--r--heat/vIMS/vellum.yaml227
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/.helmignore28
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/Chart.yaml8
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/0-namespace.yaml10
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/10-ingress-deployment.yaml40
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/100-gloo-crds.yaml111
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/101-knative-crds-0.5.1.yaml343
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/11-ingress-proxy-deployment.yaml65
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/12-ingress-proxy-configmap.yaml52
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/13-ingress-proxy-service.yaml23
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml58
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml49
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/16-clusteringress-proxy-service.yaml21
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml982
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/18-settings.yaml30
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml29
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml29
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml29
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml22
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml22
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml21
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/3-gloo-deployment.yaml57
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/4-gloo-service.yaml18
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/5-discovery-deployment.yaml46
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/6-gateway-deployment.yaml47
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/7-gateway-proxy-deployment.yaml67
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/8-gateway-proxy-service.yaml35
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/templates/9-gateway-proxy-configmap.yaml54
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/values-ingress.yaml74
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/values-knative.yaml72
-rwxr-xr-xvnfs/DAaaS/00-init/gloo/values.yaml56
-rwxr-xr-xvnfs/DAaaS/minio/values.yaml7
41 files changed, 2501 insertions, 2131 deletions
diff --git a/heat/vIMS/README b/heat/vIMS/README
deleted file mode 100644
index 37d05ce5..00000000
--- a/heat/vIMS/README
+++ /dev/null
@@ -1,24 +0,0 @@
-This heat template allows the deployement of the clearwater vIMS [1],
-a fully functional open source vIMS solution.
-
-The current heat template is derivated from the heat templates published
-by Metaswitch [2].
-Initial templates have been adapted to be VVP compliant (they are tested
-through VVP linting in onap-tests repository [3])
-
-These templates still require Ubuntu 14.04 as base image and would need some
-adaptations to be upgraded with more recent ubuntu base images to perform
-the userdata part at boot.
-
-They are integrated in Orange Openlab onap-tests CI chains and part of
-the non regression end to end tests used at ONAP gating for OOM [4] [5].
-
-It has been succesfully tested on ONAP Beijing, Casablanca and Master.
-
-Contacts: morgan.richomme AT orange.com
-
-[1]: https://www.projectclearwater.org/
-[2]: https://github.com/Metaswitch/clearwater-heat
-[3]: https://gitlab.com/Orange-OpenSource/lfn/onap/onap-tests
-[4]: https://gitlab.com/Orange-OpenSource/lfn/onap/xtesting-onap
-[5]: https://wiki.onap.org/display/DW/OOM+Gating
diff --git a/heat/vIMS/base_clearwater.env b/heat/vIMS/base_clearwater.env
deleted file mode 100644
index 07b5412a..00000000
--- a/heat/vIMS/base_clearwater.env
+++ /dev/null
@@ -1,35 +0,0 @@
-parameters:
-# Metadata required by ONAP
- vnf_name: vIMS
- vf_module_id: "654321"
- vnf_id: "123456"
-
-# Server parameters, naming required by ONAP
- bono_flavor_name: "onap.medium"
- bono_image_name: "ubuntu-14.04-daily"
- dime_flavor_name: "onap.medium"
- dime_image_name: "ubuntu-14.04-daily"
- dns_flavor_name: "onap.medium"
- dns_image_name: "ubuntu-14.04-daily"
- ellis_flavor_name: "onap.medium"
- ellis_image_name: "ubuntu-14.04-daily"
- homer_flavor_name: "onap.medium"
- homer_image_name: "ubuntu-14.04-daily"
- robot_flavor_name: "onap.medium"
- robot_image_name: "ubuntu-14.04-daily"
- sprout_flavor_name: "onap.medium"
- sprout_image_name: "ubuntu-14.04-daily"
- vellum_flavor_name: "onap.medium"
- vellum_image_name: "ubuntu-14.04-daily"
-
-# Network parameters, naming required by ONAP
- admin_plane_net_name: "admin"
-
-# Additional parameters
- clearwater_key_name: vims_demo
- clearwater_pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDY15cdBmIs2XOpe4EiFCsaY6bmUmK/GysMoLl4UG51JCfJwvwoWCoA+6mDIbymZxhxq9IGxilp/yTA6WQ9s/5pBag1cUMJmFuda9PjOkXl04jgqh5tR6I+GZ97AvCg93KAECis5ubSqw1xOCj4utfEUtPoF1OuzqM/lE5mY4N6VKXn+fT7pCD6cifBEs6JHhVNvs5OLLp/tO8Pa3kKYQOdyS0xc3rh+t2lrzvKUSWGZbX+dLiFiEpjsUL3tDqzkEMNUn4pdv69OJuzWHCxRWPfdrY9Wg0j3mJesP29EBht+w+EC9/kBKq+1VKdmsXUXAcjEvjovVL8l1BrX3BY0R8D imported-openssh-key
- repo_url: "http://repo.cw-ngv.com/stable"
- dnssec_key: "9FPdYTWhk5+LbhrqtTPQKw=="
- dn_range_length: "10000"
- dn_range_start: "2425550000"
- zone: "vimstest.onap.org"
diff --git a/heat/vIMS/base_clearwater.yaml b/heat/vIMS/base_clearwater.yaml
deleted file mode 100644
index f290a719..00000000
--- a/heat/vIMS/base_clearwater.yaml
+++ /dev/null
@@ -1,421 +0,0 @@
-# Project Clearwater - IMS in the Cloud
-# Copyright (C) 2015 Metaswitch Networks Ltd
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version, along with the "Special Exception" for use of
-# the program along with SSL, set forth below. This program is distributed
-# in the hope that it will be useful, but WITHOUT ANY WARRANTY;
-# without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details. You should have received a copy of the GNU General Public
-# License along with this program. If not, see
-# <http://www.gnu.org/licenses/>.
-#
-# The author can be reached by email at clearwater@metaswitch.com or by
-# post at Metaswitch Networks Ltd, 100 Church St, Enfield EN2 6BQ, UK
-#
-# Special Exception
-# Metaswitch Networks Ltd grants you permission to copy, modify,
-# propagate, and distribute a work formed by combining OpenSSL with The
-# Software, or a work derivative of such a combination, even if such
-# copying, modification, propagation, or distribution would otherwise
-# violate the terms of the GPL. You must comply with the GPL in all
-# respects for all of the code used other than OpenSSL.
-# "OpenSSL" means OpenSSL toolkit software distributed by the OpenSSL
-# Project and licensed under the OpenSSL Licenses, or a work based on such
-# software and licensed under the OpenSSL Licenses.
-# "OpenSSL Licenses" means the OpenSSL License and Original SSLeay License
-# under which the OpenSSL Project distributes the OpenSSL toolkit software,
-# as those licenses appear in the file LICENSE-OPENSSL.
-
-heat_template_version: 2015-04-30
-
-description: >
- Base Project Clearwater Nazgul deployment on ONAP (Open Network Automation Platform)
-
-parameters:
-# Metadata required by ONAP
- vnf_name:
- type: string
- label: VNF name
- description: Unique name for this VNF instance
- vnf_id:
- type: string
- label: VNF ID
- description: The VNF ID provided by ONAP
- vf_module_id:
- type: string
- label: VNF module ID
- description: The VNF module ID provided by ONAP
-
-# flavor parameters, naming required by ONAP
- bono_flavor_name:
- type: string
- description: VM flavor for bono VMs
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- sprout_flavor_name:
- type: string
- description: VM flavor for sprout VMs
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- vellum_flavor_name:
- type: string
- description: VM flavor for homestead VMs
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- homer_flavor_name:
- type: string
- description: VM flavor for homer VMs
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- dime_flavor_name:
- type: string
- description: VM flavor for dime VMs
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- ellis_flavor_name:
- type: string
- description: VM flavor for ellis VM
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- dns_flavor_name:
- type: string
- description: VM flavor for dns VM
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- robot_flavor_name:
- type: string
- description: VM flavor for robot_test VM
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
-
-# image parameters, naming required by ONAP
- bono_image_name:
- type: string
- description: Name of image for bono VMs
- sprout_image_name:
- type: string
- description: Name of image for sprout VMs
- vellum_image_name:
- type: string
- description: Name of image for homestead VMs
- homer_image_name:
- type: string
- description: Name of image for homer VMs
- dime_image_name:
- type: string
- description: Name of image for dime VMs
- ellis_image_name:
- type: string
- description: Name of image for ellis VM
- dns_image_name:
- type: string
- description: Name of image for dns VMs
- robot_image_name:
- type: string
- description: Name of image for robot_test VMs
-
-# overall clearwater parameters, naming required by ONAP
- clearwater_key_name:
- type: string
- label: openSSH Key name
- description: openSSH key name
- clearwater_pub_key:
- type: string
- label: Public key
- description: Public key to be installed on the compute instance
- repo_url:
- type: string
- description: URL for Clearwater repository
- zone:
- type: string
- description: DNS zone
- dn_range_start:
- type: string
- description: First directory number in pool
- constraints:
- - allowed_pattern: "[0-9]+"
- description: Must be numeric
- dn_range_length:
- type: string
- description: Number of directory numbers to add to pool
- constraints:
- - allowed_pattern: "[0-9]+"
- description: Must be numeric
- dnssec_key:
- type: string
- description: DNSSEC private key (Base64-encoded)
- constraints:
- - allowed_pattern: "[0-9A-Za-z+/=]+"
- description: Must be Base64-encoded
-# names parameters
- bono_name_0:
- type: string
- description: The VM name
- sprout_name_0:
- type: string
- description: The VM name
- vellum_name_0:
- type: string
- description: The VM name
- homer_name_0:
- type: string
- description: The VM name
- dime_name_0:
- type: string
- description: The VM name
- ellis_name_0:
- type: string
- description: The VM name
- dns_name_0:
- type: string
- description: The VM name
-
-
-# Network parameters, naming required by ONAP
- admin_plane_net_name:
- type: string
- label: external management network
- description: The external management network
-
-
-resources:
-
- clearwater_random_str:
- type: OS::Heat::RandomString
- properties:
- length: 4
-
- clearwater_instantiated_key_name:
- type: OS::Nova::KeyPair
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: key_
- base: { get_param: vnf_name }
- rand: { get_resource: clearwater_random_str }
- public_key: { get_param: clearwater_pub_key }
- save_private_key: false
-
- dns:
- type: dns.yaml
- properties:
- vnf_id: { get_param: vnf_id }
- vf_module_id: { get_param: vf_module_id }
- vnf_name: { get_param: vnf_name }
- public_net_id: { get_param: admin_plane_net_name }
- dns_flavor_name: { get_param: dns_flavor_name }
- dns_image_name: { get_param: dns_image_name }
- key_name: { get_resource: clearwater_instantiated_key_name }
- zone: { get_param: zone }
- dnssec_key: { get_param: dnssec_key }
- dns_name_0: { get_param: dns_name_0 }
-
-
- ellis:
- type: ellis.yaml
- properties:
- vnf_id: { get_param: vnf_id }
- vf_module_id: { get_param: vf_module_id }
- vnf_name: { get_param: vnf_name }
- public_net_id: { get_param: admin_plane_net_name }
- ellis_flavor_name: { get_param: ellis_flavor_name }
- ellis_image_name: { get_param: ellis_image_name }
- key_name: { get_resource: clearwater_instantiated_key_name }
- repo_url: { get_param: repo_url }
- zone: { get_param: zone }
- dn_range_start: { get_param: dn_range_start }
- dn_range_length: { get_param: dn_range_length }
- dns_ip: { get_attr: [ dns, dns_ip ] }
- dnssec_key: { get_param: dnssec_key }
- etcd_ip: "" #for ellis etcd_ip is empty
- ellis_name_0: { get_param: ellis_name_0 }
-
- bono:
- type: bono.yaml
- properties:
- vnf_id: { get_param: vnf_id }
- vf_module_id: { get_param: vf_module_id }
- vnf_name: { get_param: vnf_name }
- public_net_id: { get_param: admin_plane_net_name }
- bono_flavor_name: { get_param: bono_flavor_name }
- bono_image_name: { get_param: bono_image_name }
- key_name: { get_resource: clearwater_instantiated_key_name }
- repo_url: { get_param: repo_url }
- zone: { get_param: zone }
- dns_ip: { get_attr: [ dns, dns_ip ] }
- dnssec_key: { get_param: dnssec_key }
- etcd_ip: { get_attr: [ ellis, ellis_ip ] }
- bono_name_0: { get_param: bono_name_0 }
-
- sprout:
- type: sprout.yaml
- properties:
- vnf_id: { get_param: vnf_id }
- vf_module_id: { get_param: vf_module_id }
- vnf_name: { get_param: vnf_name }
- public_net_id: { get_param: admin_plane_net_name }
- sprout_flavor_name: { get_param: sprout_flavor_name }
- sprout_image_name: { get_param: sprout_image_name }
- key_name: { get_resource: clearwater_instantiated_key_name }
- repo_url: { get_param: repo_url }
- zone: { get_param: zone }
- dns_ip: { get_attr: [ dns, dns_ip ] }
- dnssec_key: { get_param: dnssec_key }
- etcd_ip: { get_attr: [ ellis, ellis_ip ] }
- sprout_name_0: { get_param: sprout_name_0 }
-
-
- homer:
- type: homer.yaml
- properties:
- vnf_id: { get_param: vnf_id }
- vf_module_id: { get_param: vf_module_id }
- vnf_name: { get_param: vnf_name }
- public_net_id: { get_param: admin_plane_net_name }
- homer_flavor_name: { get_param: homer_flavor_name }
- homer_image_name: { get_param: homer_image_name }
- key_name: { get_resource: clearwater_instantiated_key_name }
- repo_url: { get_param: repo_url }
- zone: { get_param: zone }
- dns_ip: { get_attr: [ dns, dns_ip ] }
- dnssec_key: { get_param: dnssec_key }
- etcd_ip: { get_attr: [ ellis, ellis_ip ] }
- homer_name_0: { get_param: homer_name_0 }
-
- vellum:
- type: vellum.yaml
- properties:
- vnf_id: { get_param: vnf_id }
- vf_module_id: { get_param: vf_module_id }
- vnf_name: { get_param: vnf_name }
- public_net_id: { get_param: admin_plane_net_name }
- vellum_flavor_name: { get_param: vellum_flavor_name }
- vellum_image_name: { get_param: vellum_image_name }
- key_name: { get_resource: clearwater_instantiated_key_name }
- repo_url: { get_param: repo_url }
- zone: { get_param: zone }
- dns_ip: { get_attr: [ dns, dns_ip ] }
- dnssec_key: { get_param: dnssec_key }
- etcd_ip: { get_attr: [ ellis, ellis_ip ] }
- vellum_name_0: { get_param: vellum_name_0 }
-
- dime:
- type: dime.yaml
- properties:
- vnf_id: { get_param: vnf_id }
- vf_module_id: { get_param: vf_module_id }
- vnf_name: { get_param: vnf_name }
- public_net_id: { get_param: admin_plane_net_name }
- dime_flavor_name: { get_param: dime_flavor_name }
- dime_image_name: { get_param: dime_image_name }
- key_name: { get_resource: clearwater_instantiated_key_name }
- repo_url: { get_param: repo_url }
- zone: { get_param: zone }
- dns_ip: { get_attr: [ dns, dns_ip ] }
- dnssec_key: { get_param: dnssec_key }
- etcd_ip: { get_attr: [ ellis, ellis_ip ] }
- dime_name_0: { get_param: dime_name_0 }
-
- robot_0_security_group:
- type: OS::Neutron::SecurityGroup
- properties:
- description: security group
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: robot_sg_
- base: { get_param: vnf_name }
- rand: { get_resource: clearwater_random_str }
- rules: [
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 22, port_range_max: 22},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 161, port_range_max: 162},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 2380, port_range_max: 2380},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 4000, port_range_max: 4000},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 443, port_range_max: 443},
- {remote_ip_prefix: 0.0.0.0/0, protocol: icmp}]
-
- robot_0_admin_plane_port_0:
- type: OS::Neutron::Port
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: admin_port_0_
- base: { get_param: vnf_name }
- rand: { get_resource: clearwater_random_str }
- network: { get_param: admin_plane_net_name }
- security_groups: [{ get_resource: robot_0_security_group }]
-
- robot_server_0:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: robot_server_0_
- base: { get_param: vnf_name }
- rand: { get_resource: clearwater_random_str }
- flavor: { get_param: robot_flavor_name }
- image: { get_param: robot_image_name }
- key_name: { get_resource: clearwater_instantiated_key_name }
- networks:
- - port: { get_resource: robot_0_admin_plane_port_0 }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, vnf_name: { get_param: vnf_name }}
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __zone__: { get_param: zone }
- __DNS_IP_ADDR__: { get_attr: [ dns, dns_ip ] }
-
- template: |
- #!/bin/bash
-
- ## activate debug
- set -x
-
- ## install 'clearwater-live-test' in ubuntu home directory
- ## without this all is installed in root dir
- mkdir /home/ubuntu -p
- cd /home/ubuntu
-
- sudo apt-get update
- sudo apt-get install build-essential git --yes
- curl -sSL https://rvm.io/mpapis.asc | gpg --import -
- curl -L https://get.rvm.io | bash -s stable
- source /etc/profile.d/rvm.sh
- rvm autolibs enable
- rvm install 1.9.3
- rvm use 1.9.3
-
- git config --global url."https://".insteadOf git://
- git config --global url."https://github.com/Metaswitch".insteadOf "git@github.com:Metaswitch"
- git clone -b stable https://github.com/Metaswitch/clearwater-live-test.git --recursive
- cd clearwater-live-test
- sudo apt-get install bundler --yes
- sudo bundle install
-
- ##update dns
- echo "nameserver __DNS_IP_ADDR__" >> /etc/resolvconf/resolv.conf.d/head
- resolvconf -u
-
- echo "To start live-test run: rake test[__zone__] SIGNUP_CODE=secret"
-
- #rake test[vimstest.onap.org] SIGNUP_CODE=secret PROXY=84.39.37.62 ELLIS=84.39.34.60
diff --git a/heat/vIMS/bono.yaml b/heat/vIMS/bono.yaml
deleted file mode 100644
index 25a974df..00000000
--- a/heat/vIMS/bono.yaml
+++ /dev/null
@@ -1,234 +0,0 @@
-# Project Clearwater - IMS in the Cloud
-# Copyright (C) 2015 Metaswitch Networks Ltd
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version, along with the "Special Exception" for use of
-# the program along with SSL, set forth below. This program is distributed
-# in the hope that it will be useful, but WITHOUT ANY WARRANTY;
-# without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details. You should have received a copy of the GNU General Public
-# License along with this program. If not, see
-# <http://www.gnu.org/licenses/>.
-#
-# The author can be reached by email at clearwater@metaswitch.com or by
-# post at Metaswitch Networks Ltd, 100 Church St, Enfield EN2 6BQ, UK
-#
-# Special Exception
-# Metaswitch Networks Ltd grants you permission to copy, modify,
-# propagate, and distribute a work formed by combining OpenSSL with The
-# Software, or a work derivative of such a combination, even if such
-# copying, modification, propagation, or distribution would otherwise
-# violate the terms of the GPL. You must comply with the GPL in all
-# respects for all of the code used other than OpenSSL.
-# "OpenSSL" means OpenSSL toolkit software distributed by the OpenSSL
-# Project and licensed under the OpenSSL Licenses, or a work based on such
-# software and licensed under the OpenSSL Licenses.
-# "OpenSSL Licenses" means the OpenSSL License and Original SSLeay License
-# under which the OpenSSL Project distributes the OpenSSL toolkit software,
-# as those licenses appear in the file LICENSE-OPENSSL.
-
-heat_template_version: 2014-10-16
-
-description: >
- Clearwater Bono node
-
-parameters:
- vnf_name:
- type: string
- label: VNF ID
- description: The VNF name provided by ONAP
- vnf_id:
- type: string
- label: VNF ID
- description: The VNF ID provided by ONAP
- vf_module_id:
- type: string
- label: VNF module ID
- description: The VNF module ID provided by ONAP
- public_net_id:
- type: string
- description: ID of public network
- constraints:
- - custom_constraint: neutron.network
- description: Must be a valid network ID
- bono_flavor_name:
- type: string
- description: Flavor to use
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- bono_image_name:
- type: string
- description: Name of image to use
- key_name:
- type: string
- description: Name of keypair to assign
- constraints:
- - custom_constraint: nova.keypair
- description: Must be a valid keypair name
- repo_url:
- type: string
- description: URL for Clearwater repository
- zone:
- type: string
- description: DNS zone
- dns_ip:
- type: string
- description: IP address for DNS server on management network
- dnssec_key:
- type: string
- description: DNSSEC private key (Base64-encoded)
- constraints:
- - allowed_pattern: "[0-9A-Za-z+/=]+"
- description: Must be Base64-encoded
- etcd_ip:
- type: string
- description: IP address of an existing member of the etcd cluster
-
-resources:
-
- bono_random_str:
- type: OS::Heat::RandomString
- properties:
- length: 4
-
- bono_Sec_Grp:
- type: OS::Neutron::SecurityGroup
- properties:
- description: security group
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: bono_sg_
- base: { get_param: vnf_name }
- rand: { get_resource: bono_random_str }
- rules: [
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 22, port_range_max: 22},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 161, port_range_max: 162},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 2380, port_range_max: 2380},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 4000, port_range_max: 4000},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 3478, port_range_max: 3478},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 3478, port_range_max: 3478},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 5060, port_range_max: 5060},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 5060, port_range_max: 5060},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 5062, port_range_max: 5062},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 32768, port_range_max: 65535},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 5058, port_range_max: 5058},
- {remote_ip_prefix: 0.0.0.0/0, protocol: icmp}]
-
- bono_admin_port_0:
- type: OS::Neutron::Port
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: bono_admin_
- base: { get_param: vnf_name }
- rand: { get_resource: bono_random_str }
- network: { get_param: public_net_id }
- security_groups: [{ get_resource: bono_Sec_Grp }]
-
- bono_server_0:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: bono_
- base: { get_param: vnf_name }
- rand: { get_resource: bono_random_str }
- image: { get_param: bono_image_name }
- flavor: { get_param: bono_flavor_name }
- key_name: { get_param: key_name }
- networks:
- - port: { get_resource: bono_admin_port_0 }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, vnf_name: { get_param: vnf_name }}
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __repo_url__: { get_param: repo_url }
- __zone__: { get_param: zone }
- __dns_ip__: { get_param: dns_ip }
- __dnssec_key__: { get_param: dnssec_key }
- __etcd_ip__ : { get_param: etcd_ip }
- __index__ : 0
-
- template: |
- #!/bin/bash
-
- # Log all output to file.
- exec > >(tee -a /var/log/clearwater-heat-bono.log) 2>&1
- set -x
-
- # Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
- apt-get update
-
- # Get the public IP address from eth0
- sudo apt-get install ipcalc
- ADDR=`ip addr show eth0 | awk '/inet /{print $2}'`
- PUBLIC_ADDR=`ipcalc -n -b $ADDR | awk '/Address:/{print $2}'`
-
- # Configure /etc/clearwater/local_config.
- mkdir -p /etc/clearwater
- etcd_ip=__etcd_ip__
- [ -n "$etcd_ip" ] || etcd_ip=$PUBLIC_ADDR
- cat > /etc/clearwater/local_config << EOF
- management_local_ip=$PUBLIC_ADDR
- local_ip=$PUBLIC_ADDR
- public_ip=$PUBLIC_ADDR
- public_hostname=__index__.bono.__zone__
- etcd_cluster=$etcd_ip
- EOF
-
- # Now install the software.
- DEBIAN_FRONTEND=noninteractive apt-get install bono restund --yes --force-yes
- DEBIAN_FRONTEND=noninteractive apt-get install clearwater-management --yes --force-yes
-
- # Function to give DNS record type and IP address for specified IP address
- ip2rr() {
- if echo $1 | grep -q -e '[^0-9.]' ; then
- echo AAAA $1
- else
- echo A $1
- fi
- }
-
- # Update DNS
- retries=0
- while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
- server __dns_ip__
- update add bono-__index__.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add __index__.bono.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add __zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add __zone__. 30 NAPTR 0 0 "s" "SIP+D2T" "" _sip._tcp.__zone__.
- update add __zone__. 30 NAPTR 0 0 "s" "SIP+D2U" "" _sip._udp.__zone__.
- update add _sip._tcp.__zone__. 30 SRV 0 0 5060 __index__.bono.__zone__.
- update add _sip._udp.__zone__. 30 SRV 0 0 5060 __index__.bono.__zone__.
- send
- EOF
- } && [ $retries -lt 10 ]
- do
- retries=$((retries + 1))
- echo 'nsupdate failed - retrying (retry '$retries')...'
- sleep 5
- done
-
- # Use the DNS server.
- echo 'nameserver __dns_ip__' > /etc/dnsmasq.resolv.conf
- echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
- service dnsmasq force-reload
-
-
-outputs:
- bono_ip:
- description: IP address in public network
- value: { get_attr: [ bono_server_0, networks, { get_param: public_net_id }, 0 ] }
diff --git a/heat/vIMS/dime.yaml b/heat/vIMS/dime.yaml
deleted file mode 100644
index b86a60dc..00000000
--- a/heat/vIMS/dime.yaml
+++ /dev/null
@@ -1,225 +0,0 @@
-# Project Clearwater - IMS in the Cloud
-# Copyright (C) 2015 Metaswitch Networks Ltd
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version, along with the "Special Exception" for use of
-# the program along with SSL, set forth below. This program is distributed
-# in the hope that it will be useful, but WITHOUT ANY WARRANTY;
-# without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details. You should have received a copy of the GNU General Public
-# License along with this program. If not, see
-# <http://www.gnu.org/licenses/>.
-#
-# The author can be reached by email at clearwater@metaswitch.com or by
-# post at Metaswitch Networks Ltd, 100 Church St, Enfield EN2 6BQ, UK
-#
-# Special Exception
-# Metaswitch Networks Ltd grants you permission to copy, modify,
-# propagate, and distribute a work formed by combining OpenSSL with The
-# Software, or a work derivative of such a combination, even if such
-# copying, modification, propagation, or distribution would otherwise
-# violate the terms of the GPL. You must comply with the GPL in all
-# respects for all of the code used other than OpenSSL.
-# "OpenSSL" means OpenSSL toolkit software distributed by the OpenSSL
-# Project and licensed under the OpenSSL Licenses, or a work based on such
-# software and licensed under the OpenSSL Licenses.
-# "OpenSSL Licenses" means the OpenSSL License and Original SSLeay License
-# under which the OpenSSL Project distributes the OpenSSL toolkit software,
-# as those licenses appear in the file LICENSE-OPENSSL.
-
-heat_template_version: 2014-10-16
-
-description: >
- Clearwater dime node
-
-parameters:
- vnf_name:
- type: string
- label: VNF ID
- description: The VNF name provided by ONAP
- vnf_id:
- type: string
- label: VNF ID
- description: The VNF ID provided by ONAP
- vf_module_id:
- type: string
- label: VNF module ID
- description: The VNF module ID provided by ONAP
- public_net_id:
- type: string
- description: ID of public network
- constraints:
- - custom_constraint: neutron.network
- description: Must be a valid network ID
- dime_flavor_name:
- type: string
- description: Flavor to use
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- dime_image_name:
- type: string
- description: Name of image to use
- key_name:
- type: string
- description: Name of keypair to assign
- constraints:
- - custom_constraint: nova.keypair
- description: Must be a valid keypair name
- repo_url:
- type: string
- description: URL for Clearwater repository
- zone:
- type: string
- description: DNS zone
- dns_ip:
- type: string
- description: IP address for DNS server
- dnssec_key:
- type: string
- description: DNSSEC private key (Base64-encoded)
- constraints:
- - allowed_pattern: "[0-9A-Za-z+/=]+"
- description: Must be Base64-encoded
- etcd_ip:
- type: string
- description: IP address of an existing member of the etcd cluster
-
-resources:
-
- dime_random_str:
- type: OS::Heat::RandomString
- properties:
- length: 4
-
- dime_security_group:
- type: OS::Neutron::SecurityGroup
- properties:
- description: security group
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: dime_sg_
- base: { get_param: vnf_name }
- rand: { get_resource: dime_random_str }
- rules: [
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 22, port_range_max: 22},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 161, port_range_max: 162},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 2380, port_range_max: 2380},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 4000, port_range_max: 4000},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 8888, port_range_max: 8888},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 8889, port_range_max: 8889},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 10888, port_range_max: 10888},
- {remote_ip_prefix: 0.0.0.0/0, protocol: icmp}]
-
- dime_admin_port_0:
- type: OS::Neutron::Port
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: dime_admin_port_0_
- base: { get_param: vnf_name }
- rand: { get_resource: dime_random_str }
- network: { get_param: public_net_id }
- security_groups: [{ get_resource: dime_security_group }]
-
- dime_server_0:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: dime_server_0_
- base: { get_param: vnf_name }
- rand: { get_resource: dime_random_str }
- image: { get_param: dime_image_name }
- flavor: { get_param: dime_flavor_name }
- key_name: { get_param: key_name }
- networks:
- - port: { get_resource: dime_admin_port_0 }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, vnf_name: { get_param: vnf_name }}
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __repo_url__: { get_param: repo_url }
- __zone__: { get_param: zone }
- __dns_ip__: { get_param: dns_ip }
- __dnssec_key__: { get_param: dnssec_key }
- __etcd_ip__ : { get_param: etcd_ip }
- __index__ : 0
- template: |
- #!/bin/bash
-
- # Log all output to file.
- exec > >(tee -a /var/log/clearwater-heat-dime.log) 2>&1
- set -x
-
- # Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
- apt-get update
-
- # Get the public IP address from eth0
- sudo apt-get install ipcalc
- ADDR=`ip addr show eth0 | awk '/inet /{print $2}'`
- PUBLIC_ADDR=`ipcalc -n -b $ADDR | awk '/Address:/{print $2}'`
-
- # Configure /etc/clearwater/local_config.
- mkdir -p /etc/clearwater
- etcd_ip=__etcd_ip__
- [ -n "$etcd_ip" ] || etcd_ip=$PUBLIC_ADDR
- cat > /etc/clearwater/local_config << EOF
- management_local_ip=$PUBLIC_ADDR
- local_ip=$PUBLIC_ADDR
- public_ip=$PUBLIC_ADDR
- public_hostname=dime-__index__.__zone__
- etcd_cluster=$etcd_ip
- EOF
-
- # Now install the software.
- DEBIAN_FRONTEND=noninteractive apt-get install dime clearwater-prov-tools --yes --force-yes
- DEBIAN_FRONTEND=noninteractive apt-get install clearwater-management --yes --force-yes
-
- # Function to give DNS record type and IP address for specified IP address
- ip2rr() {
- if echo $1 | grep -q -e '[^0-9.]' ; then
- echo AAAA $1
- else
- echo A $1
- fi
- }
-
- # Update DNS
- retries=0
- while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
- server __dns_ip__
- update add dime-__index__.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add dime.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add hs.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add ralf.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- send
- EOF
- } && [ $retries -lt 10 ]
- do
- retries=$((retries + 1))
- echo 'nsupdate failed - retrying (retry '$retries')...'
- sleep 5
- done
-
- # Use the DNS server.
- echo 'nameserver __dns_ip__' > /etc/dnsmasq.resolv.conf
- echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
- service dnsmasq force-reload
-
-outputs:
- dime_ip:
- description: IP address in public network
- value: { get_attr: [ dime_server_0, networks, { get_param: public_net_id }, 0 ] }
diff --git a/heat/vIMS/dns.yaml b/heat/vIMS/dns.yaml
deleted file mode 100644
index 0eb0704b..00000000
--- a/heat/vIMS/dns.yaml
+++ /dev/null
@@ -1,204 +0,0 @@
-# Project Clearwater - IMS in the Cloud
-# Copyright (C) 2015 Metaswitch Networks Ltd
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version, along with the "Special Exception" for use of
-# the program along with SSL, set forth below. This program is distributed
-# in the hope that it will be useful, but WITHOUT ANY WARRANTY;
-# without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details. You should have received a copy of the GNU General Public
-# License along with this program. If not, see
-# <http://www.gnu.org/licenses/>.
-#
-# The author can be reached by email at clearwater@metaswitch.com or by
-# post at Metaswitch Networks Ltd, 100 Church St, Enfield EN2 6BQ, UK
-#
-# Special Exception
-# Metaswitch Networks Ltd grants you permission to copy, modify,
-# propagate, and distribute a work formed by combining OpenSSL with The
-# Software, or a work derivative of such a combination, even if such
-# copying, modification, propagation, or distribution would otherwise
-# violate the terms of the GPL. You must comply with the GPL in all
-# respects for all of the code used other than OpenSSL.
-# "OpenSSL" means OpenSSL toolkit software distributed by the OpenSSL
-# Project and licensed under the OpenSSL Licenses, or a work based on such
-# software and licensed under the OpenSSL Licenses.
-# "OpenSSL Licenses" means the OpenSSL License and Original SSLeay License
-# under which the OpenSSL Project distributes the OpenSSL toolkit software,
-# as those licenses appear in the file LICENSE-OPENSSL.
-
-heat_template_version: 2014-10-16
-
-description: >
- DNS server exposing dynamic DNS using DNSSEC
-
-parameters:
- vnf_name:
- type: string
- label: VNF ID
- description: The VNF name provided by ONAP
- vnf_id:
- type: string
- label: VNF ID
- description: The VNF ID provided by ONAP
- vf_module_id:
- type: string
- label: VNF module ID
- description: The VNF module ID provided by ONAP
- public_net_id:
- type: string
- description: ID of public network
- constraints:
- - custom_constraint: neutron.network
- description: Must be a valid network ID
- dns_name_0:
- type: string
- description: Name of server to use
- dns_flavor_name:
- type: string
- description: Flavor to use
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- dns_image_name:
- type: string
- description: Name of image to use
- key_name:
- type: string
- description: Name of keypair to assign
- constraints:
- - custom_constraint: nova.keypair
- description: Must be a valid keypair name
- zone:
- type: string
- description: DNS zone
- dnssec_key:
- type: string
- description: DNSSEC private key (Base64-encoded)
-
-resources:
-
- dns_random_str:
- type: OS::Heat::RandomString
- properties:
- length: 4
-
- dns_security_group:
- type: OS::Neutron::SecurityGroup
- properties:
- description: security group
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: dns_sg_
- base: { get_param: vnf_name }
- rand: { get_resource: dns_random_str }
- rules: [
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 22, port_range_max: 22},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 53, port_range_max: 53},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 53, port_range_max: 53},
- {remote_ip_prefix: 0.0.0.0/0, protocol: icmp}]
-
- dns_admin_port_0:
- type: OS::Neutron::Port
- properties:
- name:
- str_replace:
- template: base_rand
- params:
- base: dns_admin_port_0
- rand: { get_resource: dns_random_str }
- network: { get_param: public_net_id }
- security_groups: [{ get_resource: dns_security_group }]
-
- dns_server_0:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: dns_server_0_
- base: { get_param: vnf_name }
- rand: { get_resource: dns_random_str }
- image: { get_param: dns_image_name }
- flavor: { get_param: dns_flavor_name }
- key_name: { get_param: key_name }
- networks:
- - port: { get_resource: dns_admin_port_0 }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, vnf_name: { get_param: vnf_name }}
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __zone__: { get_param: zone }
- __dnssec_key__: { get_param: dnssec_key }
- template: |
- #!/bin/bash
-
- # Log all output to file.
- exec > >(tee -a /var/log/clearwater-heat-dns.log) 2>&1
- set -x
-
- # Install BIND.
- apt-get update
- DEBIAN_FRONTEND=noninteractive apt-get install bind9 --yes
-
- # Get the IP address from eth0
- sudo apt-get install ipcalc
- ADDR=`ip addr show eth0 | awk '/inet /{print $2}'`
- PUBLIC_ADDR=`ipcalc -n -b $ADDR | awk '/Address:/{print $2}'`
-
- # Update BIND configuration with the specified zone and key.
- cat >> /etc/bind/named.conf.local << EOF
- key __zone__. {properties
- algorithm "HMAC-MD5";
- secret "__dnssec_key__";
- };
-
- zone "__zone__" IN {
- type master;
- file "/var/lib/bind/db.__zone__";
- allow-update {
- key __zone__.;
- };
- };
- EOF
-
- # Function to give DNS record type and IP address for specified IP address
- ip2rr() {
- if echo $1 | grep -q -e '[^0-9.]' ; then
- echo AAAA $1
- else
- echo A $1
- fi
- }
-
- # Create basic zone configuration.
- cat > /var/lib/bind/db.__zone__ << EOF
- \$ORIGIN __zone__.
- \$TTL 1h
- @ IN SOA ns admin\@__zone__. ( $(date +%Y%m%d%H) 1d 2h 1w 30s )
- @ NS ns
- ns $(ip2rr $PUBLIC_ADDR)
- EOF
- chown root:bind /var/lib/bind/db.__zone__
-
- # Now that BIND configuration is correct, kick it to reload.
- service bind9 reload
-
-
-outputs:
- dns_ip:
- description: IP address of DNS server
- value: { get_attr: [ dns_server_0, networks, { get_param: public_net_id }, 0 ] }
- zone:
- description: DNS zone
- value: { get_param: zone }
- dnssec_key:
- description: DNSSEC private key (Base64-encoded)
- value: { get_param: dnssec_key }
diff --git a/heat/vIMS/ellis.yaml b/heat/vIMS/ellis.yaml
deleted file mode 100644
index 9010c0c8..00000000
--- a/heat/vIMS/ellis.yaml
+++ /dev/null
@@ -1,282 +0,0 @@
-# Project Clearwater - IMS in the Cloud
-# Copyright (C) 2015 Metaswitch Networks Ltd
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version, along with the "Special Exception" for use of
-# the program along with SSL, set forth below. This program is distributed
-# in the hope that it will be useful, but WITHOUT ANY WARRANTY;
-# without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details. You should have received a copy of the GNU General Public
-# License along with this program. If not, see
-# <http://www.gnu.org/licenses/>.
-#
-# The author can be reached by email at clearwater@metaswitch.com or by
-# post at Metaswitch Networks Ltd, 100 Church St, Enfield EN2 6BQ, UK
-#
-# Special Exception
-# Metaswitch Networks Ltd grants you permission to copy, modify,
-# propagate, and distribute a work formed by combining OpenSSL with The
-# Software, or a work derivative of such a combination, even if such
-# copying, modification, propagation, or distribution would otherwise
-# violate the terms of the GPL. You must comply with the GPL in all
-# respects for all of the code used other than OpenSSL.
-# "OpenSSL" means OpenSSL toolkit software distributed by the OpenSSL
-# Project and licensed under the OpenSSL Licenses, or a work based on such
-# software and licensed under the OpenSSL Licenses.
-# "OpenSSL Licenses" means the OpenSSL License and Original SSLeay License
-# under which the OpenSSL Project distributes the OpenSSL toolkit software,
-# as those licenses appear in the file LICENSE-OPENSSL.
-
-heat_template_version: 2014-10-16
-
-description: >
- Clearwater Ellis node
-
-parameters:
- vnf_name:
- type: string
- label: VNF ID
- description: The VNF name provided by ONAP
- vnf_id:
- type: string
- label: VNF ID
- description: The VNF ID provided by ONAP
- vf_module_id:
- type: string
- label: VNF module ID
- description: The VNF module ID provided by ONAP
- public_net_id:
- type: string
- description: ID of public network
- constraints:
- - custom_constraint: neutron.network
- description: Must be a valid network ID
- ellis_name_0:
- type: string
- description: Name of server to use
- ellis_flavor_name:
- type: string
- description: Flavor to use
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- ellis_image_name:
- type: string
- description: Name of image to use
- key_name:
- type: string
- description: Name of keypair to assign
- constraints:
- - custom_constraint: nova.keypair
- description: Must be a valid keypair name
- repo_url:
- type: string
- description: URL for Clearwater repository
- zone:
- type: string
- description: DNS zone
- dn_range_start:
- type: string
- description: First directory number in pool
- constraints:
- - allowed_pattern: "[0-9]+"
- description: Must be numeric
- dn_range_length:
- type: string
- description: Number of directory numbers to add to pool
- constraints:
- - allowed_pattern: "[0-9]+"
- description: Must be numeric
- dns_ip:
- type: string
- description: IP address for DNS server
- dnssec_key:
- type: string
- description: DNSSEC private key (Base64-encoded)
- constraints:
- - allowed_pattern: "[0-9A-Za-z+/=]+"
- description: Must be Base64-encoded
- etcd_ip:
- type: string
- description: IP address of an existing member of the etcd cluster
-
-resources:
-
- ellis_random_str:
- type: OS::Heat::RandomString
- properties:
- length: 4
-
- ellis_security_group:
- type: OS::Neutron::SecurityGroup
- properties:
- description: security group
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: ellis_sg_
- base: { get_param: vnf_name }
- rand: { get_resource: ellis_random_str }
- rules: [
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 22, port_range_max: 22},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 161, port_range_max: 162},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 2380, port_range_max: 2380},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 4000, port_range_max: 4000},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 80, port_range_max: 80},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 443, port_range_max: 443},
- {remote_ip_prefix: 0.0.0.0/0, protocol: icmp}]
-
- ellis_admin_port_0:
- type: OS::Neutron::Port
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: ellis_admin_port_0_
- base: { get_param: vnf_name }
- rand: { get_resource: ellis_random_str }
- network: { get_param: public_net_id }
- security_groups: [{ get_resource: ellis_security_group }]
-
- ellis_server_0:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: ellis_server_0_
- base: { get_param: vnf_name }
- rand: { get_resource: ellis_random_str }
- image: { get_param: ellis_image_name }
- flavor: { get_param: ellis_flavor_name }
- key_name: { get_param: key_name }
- networks:
- - port: { get_resource: ellis_admin_port_0 }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, vnf_name: { get_param: vnf_name }}
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __repo_url__: { get_param: repo_url }
- __zone__: { get_param: zone }
- __dn_range_start__: { get_param: dn_range_start }
- __dn_range_length__: { get_param: dn_range_length }
- __dns_ip__: { get_param: dns_ip }
- __dnssec_key__: { get_param: dnssec_key }
- __etcd_ip__ : { get_param: etcd_ip }
- __index__ : 0
- template: |
- #!/bin/bash
-
- # Log all output to file.
- exec > >(tee -a /var/log/clearwater-heat-ellis.log) 2>&1
- set -x
-
- # Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
- apt-get update
-
- # Get the public IP address from eth0
- sudo apt-get install ipcalc
- ADDR=`ip addr show eth0 | awk '/inet /{print $2}'`
- PUBLIC_ADDR=`ipcalc -n -b $ADDR | awk '/Address:/{print $2}'`
-
- # Configure /etc/clearwater/local_config. Add xdms_hostname here to use Homer's management
- # hostname instead of signaling. This will override shared_config. This works around
- # https://github.com/Metaswitch/ellis/issues/153.
- mkdir -p /etc/clearwater
- etcd_ip=__etcd_ip__
- [ -n "$etcd_ip" ] || etcd_ip=$PUBLIC_ADDR
- cat > /etc/clearwater/local_config << EOF
- local_ip=$PUBLIC_ADDR
- public_ip=$PUBLIC_ADDR
- public_hostname=ellis-__index__.__zone__
- etcd_cluster=$etcd_ip
- xdms_hostname=homer-0.__zone__:7888
- EOF
-
- # Now install the software.
- DEBIAN_FRONTEND=noninteractive apt-get install ellis --yes --force-yes
- DEBIAN_FRONTEND=noninteractive apt-get install clearwater-management --yes --force-yes
-
- # Wait until etcd is up and running before uploading the shared_config
- /usr/share/clearwater/clearwater-etcd/scripts/wait_for_etcd
-
- # Configure and upload /etc/clearwater/shared_config.
- cat > /etc/clearwater/shared_config << EOF
- # Deployment definitions
- home_domain=__zone__
- sprout_hostname=sprout.__zone__
- sprout_registration_store=vellum.__zone__
- hs_hostname=hs.__zone__:8888
- hs_provisioning_hostname=hs.__zone__:8889
- sprout_impi_store=vellum.__zone__
- homestead_impu_store=vellum.__zone__
- ralf_hostname=ralf.__zone__:10888
- ralf_session_store=vellum.__zone__
- xdms_hostname=homer.__zone__:7888
- chronos_hostname=vellum.__zone__
- cassandra_hostname=vellum.__zone__
-
- # Email server configuration
- smtp_smarthost=localhost
- smtp_username=username
- smtp_password=password
- email_recovery_sender=clearwater@example.org
-
- # Keys
- signup_key=secret
- turn_workaround=secret
- ellis_api_key=secret
- ellis_cookie_key=secret
- EOF
- sudo /usr/share/clearwater/clearwater-config-manager/scripts/upload_shared_config
-
- # Allocate a pool of numbers to assign to users. Before we do this,
- # restart clearwater-infrastructure to make sure that
- # local_settings.py runs to pick up the configuration changes.
- service clearwater-infrastructure restart
- service ellis stop
- /usr/share/clearwater/ellis/env/bin/python /usr/share/clearwater/ellis/src/metaswitch/ellis/tools/create_numbers.py --start __dn_range_start__ --count __dn_range_length__
-
- # Function to give DNS record type and IP address for specified IP address
- ip2rr() {
- if echo $1 | grep -q -e '[^0-9.]' ; then
- echo AAAA $1
- else
- echo A $1
- fi
- }
-
- # Update DNS
- retries=0
- while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
- server __dns_ip__
- update add ellis-__index__.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add ellis.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- send
- EOF
- } && [ $retries -lt 10 ]
- do
- retries=$((retries + 1))
- echo 'nsupdate failed - retrying (retry '$retries')...'
- sleep 5
- done
-
- # Use the DNS server.
- echo 'nameserver __dns_ip__' > /etc/dnsmasq.resolv.conf
- echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
- service dnsmasq force-reload
-
-
-outputs:
- ellis_ip:
- description: IP address in public network
- value: { get_attr: [ ellis_server_0, networks, { get_param: public_net_id }, 0 ] }
diff --git a/heat/vIMS/homer.yaml b/heat/vIMS/homer.yaml
deleted file mode 100644
index c93a240d..00000000
--- a/heat/vIMS/homer.yaml
+++ /dev/null
@@ -1,223 +0,0 @@
-# Project Clearwater - IMS in the Cloud
-# Copyright (C) 2015 Metaswitch Networks Ltd
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version, along with the "Special Exception" for use of
-# the program along with SSL, set forth below. This program is distributed
-# in the hope that it will be useful, but WITHOUT ANY WARRANTY;
-# without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details. You should have received a copy of the GNU General Public
-# License along with this program. If not, see
-# <http://www.gnu.org/licenses/>.
-#
-# The author can be reached by email at clearwater@metaswitch.com or by
-# post at Metaswitch Networks Ltd, 100 Church St, Enfield EN2 6BQ, UK
-#
-# Special Exception
-# Metaswitch Networks Ltd grants you permission to copy, modify,
-# propagate, and distribute a work formed by combining OpenSSL with The
-# Software, or a work derivative of such a combination, even if such
-# copying, modification, propagation, or distribution would otherwise
-# violate the terms of the GPL. You must comply with the GPL in all
-# respects for all of the code used other than OpenSSL.
-# "OpenSSL" means OpenSSL toolkit software distributed by the OpenSSL
-# Project and licensed under the OpenSSL Licenses, or a work based on such
-# software and licensed under the OpenSSL Licenses.
-# "OpenSSL Licenses" means the OpenSSL License and Original SSLeay License
-# under which the OpenSSL Project distributes the OpenSSL toolkit software,
-# as those licenses appear in the file LICENSE-OPENSSL.
-
-heat_template_version: 2014-10-16
-
-description: >
- Clearwater Homer node
-
-parameters:
- vnf_name:
- type: string
- label: VNF ID
- description: The VNF name provided by ONAP
- vnf_id:
- type: string
- label: VNF ID
- description: The VNF ID provided by ONAP
- vf_module_id:
- type: string
- label: VNF module ID
- description: The VNF module ID provided by ONAP
- public_net_id:
- type: string
- description: ID of public network
- constraints:
- - custom_constraint: neutron.network
- description: Must be a valid network ID
- homer_flavor_name:
- type: string
- description: Flavor to use
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- homer_image_name:
- type: string
- description: Name of image to use
- key_name:
- type: string
- description: Name of keypair to assign
- constraints:
- - custom_constraint: nova.keypair
- description: Must be a valid keypair name
- repo_url:
- type: string
- description: URL for Clearwater repository
- zone:
- type: string
- description: DNS zone
- dns_ip:
- type: string
- description: IP address for DNS server on management network
- dnssec_key:
- type: string
- description: DNSSEC private key (Base64-encoded)
- constraints:
- - allowed_pattern: "[0-9A-Za-z+/=]+"
- description: Must be Base64-encoded
- etcd_ip:
- type: string
- description: IP address of an existing member of the etcd cluster
-
-resources:
-
- homer_random_str:
- type: OS::Heat::RandomString
- properties:
- length: 4
-
- homer_security_group:
- type: OS::Neutron::SecurityGroup
- properties:
- description: security group
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: homer_sg_
- base: { get_param: vnf_name }
- rand: { get_resource: homer_random_str }
- rules: [
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 22, port_range_max: 22},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 161, port_range_max: 162},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 2380, port_range_max: 2380},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 4000, port_range_max: 4000},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 7888, port_range_max: 7888},
- {remote_ip_prefix: 0.0.0.0/0, protocol: icmp}]
-
- homer_admin_port_0:
- type: OS::Neutron::Port
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: homer_admin_port_0_
- base: { get_param: vnf_name }
- rand: { get_resource: homer_random_str }
- network: { get_param: public_net_id }
- security_groups: [{ get_resource: homer_security_group }]
-
- homer_server_0:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: homer_server_0_
- base: { get_param: vnf_name }
- rand: { get_resource: homer_random_str }
- image: { get_param: homer_image_name }
- flavor: { get_param: homer_flavor_name }
- key_name: { get_param: key_name }
- networks:
- - port: { get_resource: homer_admin_port_0 }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, vnf_name: { get_param: vnf_name }}
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __repo_url__: { get_param: repo_url }
- __zone__: { get_param: zone }
- __dns_ip__: { get_param: dns_ip }
- __dnssec_key__: { get_param: dnssec_key }
- __etcd_ip__ : { get_param: etcd_ip }
- __index__ : 0
- template: |
- #!/bin/bash
-
- # Log all output to file.
- exec > >(tee -a /var/log/clearwater-heat-homer.log) 2>&1
- set -x
-
- # Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
- apt-get update
-
- # Get the public IP address from eth0
- sudo apt-get install ipcalc
- ADDR=`ip addr show eth0 | awk '/inet /{print $2}'`
- PUBLIC_ADDR=`ipcalc -n -b $ADDR | awk '/Address:/{print $2}'`
-
- # Configure /etc/clearwater/local_config.
- mkdir -p /etc/clearwater
- etcd_ip=__etcd_ip__
- [ -n "$etcd_ip" ] || etcd_ip=$PUBLIC_ADDR
- cat > /etc/clearwater/local_config << EOF
- management_local_ip=$PUBLIC_ADDR
- local_ip=$PUBLIC_ADDR
- public_ip=$PUBLIC_ADDR
- public_hostname=homer-__index__.__zone__
- etcd_cluster=$etcd_ip
- EOF
-
- # Now install the software.
- DEBIAN_FRONTEND=noninteractive apt-get install homer --yes --force-yes
- DEBIAN_FRONTEND=noninteractive apt-get install clearwater-management --yes --force-yes
-
- # Function to give DNS record type and IP address for specified IP address
- ip2rr() {
- if echo $1 | grep -q -e '[^0-9.]' ; then
- echo AAAA $1
- else
- echo A $1
- fi
- }
-
- # Update DNS
- retries=0
- while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
- server __dns_ip__
- update add homer-__index__.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add homer.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- send
- EOF
- } && [ $retries -lt 10 ]
- do
- retries=$((retries + 1))
- echo 'nsupdate failed - retrying (retry '$retries')...'
- sleep 5
- done
-
- # Use the DNS server.
- # Use the DNS server.
- echo 'nameserver __dns_ip__' > /etc/dnsmasq.resolv.conf
- echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
- service dnsmasq force-reload
-
-
-outputs:
- homer_ip:
- description: IP address in public network
- value: { get_attr: [ homer_server_0, networks, { get_param: public_net_id }, 0 ] }
diff --git a/heat/vIMS/sprout.yaml b/heat/vIMS/sprout.yaml
deleted file mode 100644
index 4a8518f7..00000000
--- a/heat/vIMS/sprout.yaml
+++ /dev/null
@@ -1,252 +0,0 @@
-# Project Clearwater - IMS in the Cloud
-# Copyright (C) 2015 Metaswitch Networks Ltd
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version, along with the "Special Exception" for use of
-# the program along with SSL, set forth below. This program is distributed
-# in the hope that it will be useful, but WITHOUT ANY WARRANTY;
-# without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details. You should have received a copy of the GNU General Public
-# License along with this program. If not, see
-# <http://www.gnu.org/licenses/>.
-#
-# The author can be reached by email at clearwater@metaswitch.com or by
-# post at Metaswitch Networks Ltd, 100 Church St, Enfield EN2 6BQ, UK
-#
-# Special Exception
-# Metaswitch Networks Ltd grants you permission to copy, modify,
-# propagate, and distribute a work formed by combining OpenSSL with The
-# Software, or a work derivative of such a combination, even if such
-# copying, modification, propagation, or distribution would otherwise
-# violate the terms of the GPL. You must comply with the GPL in all
-# respects for all of the code used other than OpenSSL.
-# "OpenSSL" means OpenSSL toolkit software distributed by the OpenSSL
-# Project and licensed under the OpenSSL Licenses, or a work based on such
-# software and licensed under the OpenSSL Licenses.
-# "OpenSSL Licenses" means the OpenSSL License and Original SSLeay License
-# under which the OpenSSL Project distributes the OpenSSL toolkit software,
-# as those licenses appear in the file LICENSE-OPENSSL.
-
-heat_template_version: 2014-10-16
-
-description: >
- Clearwater Sprout node
-
-parameters:
- vnf_name:
- type: string
- label: VNF ID
- description: The VNF name provided by ONAP
- vnf_id:
- type: string
- label: VNF ID
- description: The VNF ID provided by ONAP
- vf_module_id:
- type: string
- label: VNF module ID
- description: The VNF module ID provided by ONAP
- public_net_id:
- type: string
- description: ID of public network
- constraints:
- - custom_constraint: neutron.network
- description: Must be a valid network ID
- sprout_flavor_name:
- type: string
- description: Flavor to use
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- sprout_image_name:
- type: string
- description: Name of image to use
- key_name:
- type: string
- description: Name of keypair to assign
- constraints:
- - custom_constraint: nova.keypair
- description: Must be a valid keypair name
- repo_url:
- type: string
- description: URL for Clearwater repository
- zone:
- type: string
- description: DNS zone
- dns_ip:
- type: string
- description: IP address for DNS server on network
- dnssec_key:
- type: string
- description: DNSSEC private key (Base64-encoded)
- constraints:
- - allowed_pattern: "[0-9A-Za-z+/=]+"
- description: Must be Base64-encoded
- etcd_ip:
- type: string
- description: IP address of an existing member of the etcd cluster
-
-resources:
-
- sprout_random_str:
- type: OS::Heat::RandomString
- properties:
- length: 4
-
- sprout_security_group:
- type: OS::Neutron::SecurityGroup
- properties:
- description: security group
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: sprout_sg_
- base: { get_param: vnf_name }
- rand: { get_resource: sprout_random_str }
- rules: [
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 22, port_range_max: 22},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 161, port_range_max: 162},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 2380, port_range_max: 2380},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 4000, port_range_max: 4000},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 5054, port_range_max: 5054},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 5052, port_range_max: 5052},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 9888, port_range_max: 9888},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 443, port_range_max: 443},
- {remote_ip_prefix: 0.0.0.0/0, protocol: icmp}]
-
- sprout_admin_port_0:
- type: OS::Neutron::Port
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: sprout_admin_port_0_
- base: { get_param: vnf_name }
- rand: { get_resource: sprout_random_str }
- network: { get_param: public_net_id }
- security_groups: [{ get_resource: sprout_security_group }]
-
- sprout_server_0:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: sprout_server_0_
- base: { get_param: vnf_name }
- rand: { get_resource: sprout_random_str }
- image: { get_param: sprout_image_name }
- flavor: { get_param: sprout_flavor_name }
- key_name: { get_param: key_name }
- networks:
- - port: { get_resource: sprout_admin_port_0 }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, vnf_name: { get_param: vnf_name }}
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __repo_url__: { get_param: repo_url }
- __zone__: { get_param: zone }
- __dns_ip__: { get_param: dns_ip }
- __dnssec_key__: { get_param: dnssec_key }
- __etcd_ip__ : { get_param: etcd_ip }
- __index__ : 0
- template: |
- #!/bin/bash
-
- # Log all output to file.
- exec > >(tee -a /var/log/clearwater-heat-sprout.log) 2>&1
- set -x
-
- # Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
- apt-get update
-
- # Get the public IP address from eth0
- sudo apt-get install ipcalc
- ADDR=`ip addr show eth0 | awk '/inet /{print $2}'`
- PUBLIC_ADDR=`ipcalc -n -b $ADDR | awk '/Address:/{print $2}'`
-
- # Configure /etc/clearwater/local_config.
- mkdir -p /etc/clearwater
- etcd_ip=__etcd_ip__
- [ -n "$etcd_ip" ] || etcd_ip=$PUBLIC_ADDR
- cat > /etc/clearwater/local_config << EOF
- management_local_ip=$PUBLIC_ADDR
- local_ip=$PUBLIC_ADDR
- public_ip=$PUBLIC_ADDR
- public_hostname=__index__.sprout.__zone__
- etcd_cluster=$etcd_ip
- EOF
-
- # Create /etc/chronos/chronos.conf.
- mkdir -p /etc/chronos
- cat > /etc/chronos/chronos.conf << EOF
- [http]
- bind-address = $PUBLIC_ADDR
- bind-port = 7253
- threads = 50
-
- [logging]
- folder = /var/log/chronos
- level = 2
-
- [alarms]
- enabled = true
-
- [exceptions]
- max_ttl = 600
- EOF
-
- # Now install the software.
- DEBIAN_FRONTEND=noninteractive apt-get install sprout --yes --force-yes
- DEBIAN_FRONTEND=noninteractive apt-get install clearwater-management --yes --force-yes
-
- # Function to give DNS record type and IP address for specified IP address
- ip2rr() {
- if echo $1 | grep -q -e '[^0-9.]' ; then
- echo AAAA $1
- else
- echo A $1
- fi
- }
-
- # Update DNS
- retries=0
- while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
- server __dns_ip__
- update add sprout-__index__.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add __index__.sprout.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add sprout.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add scscf.sprout.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add icscf.sprout.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add sprout.__zone__. 30 NAPTR 0 0 "s" "SIP+D2T" "" _sip._tcp.sprout.__zone__.
- update add _sip._tcp.sprout.__zone__. 30 SRV 0 0 5054 __index__.sprout.__zone__.
- update add icscf.sprout.__zone__. 30 NAPTR 0 0 "s" "SIP+D2T" "" _sip._tcp.icscf.sprout.__zone__.
- update add _sip._tcp.icscf.sprout.__zone__. 30 SRV 0 0 5052 __index__.sprout.__zone__.
- update add scscf.sprout.__zone__. 30 NAPTR 0 0 "s" "SIP+D2T" "" _sip._tcp.scscf.sprout.__zone__.
- update add _sip._tcp.scscf.sprout.__zone__. 30 SRV 0 0 5054 __index__.sprout.__zone__.
- send
- EOF
- } && [ $retries -lt 10 ]
- do
- retries=$((retries + 1))
- echo 'nsupdate failed - retrying (retry '$retries')...'
- sleep 5
- done
-
- # Use the DNS server.
- echo 'nameserver __dns_ip__' > /etc/dnsmasq.resolv.conf
- echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
- service dnsmasq force-reload
-
-outputs:
- sprout_ip:
- description: IP address in public network
- value: { get_attr: [ sprout_server_0, networks, { get_param: public_net_id }, 0 ] }
diff --git a/heat/vIMS/vellum.yaml b/heat/vIMS/vellum.yaml
deleted file mode 100644
index ab6329b8..00000000
--- a/heat/vIMS/vellum.yaml
+++ /dev/null
@@ -1,227 +0,0 @@
-# Project Clearwater - IMS in the Cloud
-# Copyright (C) 2015 Metaswitch Networks Ltd
-#
-# This program is free software: you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or (at your
-# option) any later version, along with the "Special Exception" for use of
-# the program along with SSL, set forth below. This program is distributed
-# in the hope that it will be useful, but WITHOUT ANY WARRANTY;
-# without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-# A PARTICULAR PURPOSE. See the GNU General Public License for more
-# details. You should have received a copy of the GNU General Public
-# License along with this program. If not, see
-# <http://www.gnu.org/licenses/>.
-#
-# The author can be reached by email at clearwater@metaswitch.com or by
-# post at Metaswitch Networks Ltd, 100 Church St, Enfield EN2 6BQ, UK
-#
-# Special Exception
-# Metaswitch Networks Ltd grants you permission to copy, modify,
-# propagate, and distribute a work formed by combining OpenSSL with The
-# Software, or a work derivative of such a combination, even if such
-# copying, modification, propagation, or distribution would otherwise
-# violate the terms of the GPL. You must comply with the GPL in all
-# respects for all of the code used other than OpenSSL.
-# "OpenSSL" means OpenSSL toolkit software distributed by the OpenSSL
-# Project and licensed under the OpenSSL Licenses, or a work based on such
-# software and licensed under the OpenSSL Licenses.
-# "OpenSSL Licenses" means the OpenSSL License and Original SSLeay License
-# under which the OpenSSL Project distributes the OpenSSL toolkit software,
-# as those licenses appear in the file LICENSE-OPENSSL.
-
-heat_template_version: 2015-04-30
-
-description: >
- Clearwater Vellum node
-
-parameters:
- vnf_name:
- type: string
- label: VNF ID
- description: The VNF name provided by ONAP
- vnf_id:
- type: string
- label: VNF ID
- description: The VNF ID provided by ONAP
- vf_module_id:
- type: string
- label: VNF module ID
- description: The VNF module ID provided by ONAP
- public_net_id:
- type: string
- description: ID of public network
- constraints:
- - custom_constraint: neutron.network
- description: Must be a valid network ID
- vellum_flavor_name:
- type: string
- description: Flavor to use
- constraints:
- - custom_constraint: nova.flavor
- description: Must be a valid flavor name
- vellum_image_name:
- type: string
- description: Name of image to use
- key_name:
- type: string
- description: Name of keypair to assign
- constraints:
- - custom_constraint: nova.keypair
- description: Must be a valid keypair name
- repo_url:
- type: string
- description: URL for Clearwater repository
- zone:
- type: string
- description: DNS zone
- dns_ip:
- type: string
- description: IP address for DNS server on management network
- dnssec_key:
- type: string
- description: DNSSEC private key (Base64-encoded)
- constraints:
- - allowed_pattern: "[0-9A-Za-z+/=]+"
- description: Must be Base64-encoded
- etcd_ip:
- type: string
- description: IP address of an existing member of the etcd cluster
-
-
-resources:
-
- vellum_random_str:
- type: OS::Heat::RandomString
- properties:
- length: 4
-
- vellum_security_group:
- type: OS::Neutron::SecurityGroup
- properties:
- description: security group
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: vellum_sg_
- base: { get_param: vnf_name }
- rand: { get_resource: vellum_random_str }
- rules: [
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 22, port_range_max: 22},
- {remote_ip_prefix: 0.0.0.0/0, protocol: udp, port_range_min: 161, port_range_max: 162},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 2380, port_range_max: 2380},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 4000, port_range_max: 4000},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 7253, port_range_max: 7253},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 11211, port_range_max: 11211},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 7000, port_range_max: 7000},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 11311, port_range_max: 11311},
- {remote_ip_prefix: 0.0.0.0/0, protocol: tcp, port_range_min: 9160, port_range_max: 9160},
- {remote_ip_prefix: 0.0.0.0/0, protocol: icmp}]
-
- vellum_admin_port_0:
- type: OS::Neutron::Port
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: vellum_admin_port_0_
- base: { get_param: vnf_name }
- rand: { get_resource: vellum_random_str }
- network: { get_param: public_net_id }
- security_groups: [{ get_resource: vellum_security_group }]
-
- vellum_server_0:
- type: OS::Nova::Server
- properties:
- name:
- str_replace:
- template: pre_base_rand
- params:
- pre: vellum_server_0_
- base: { get_param: vnf_name }
- rand: { get_resource: vellum_random_str }
- image: { get_param: vellum_image_name }
- flavor: { get_param: vellum_flavor_name }
- key_name: { get_param: key_name }
- networks:
- - port: { get_resource: vellum_admin_port_0 }
- metadata: {vnf_id: { get_param: vnf_id }, vf_module_id: { get_param: vf_module_id }, vnf_name: { get_param: vnf_name }}
- user_data_format: RAW
- user_data:
- str_replace:
- params:
- __repo_url__: { get_param: repo_url }
- __zone__: { get_param: zone }
- __dns_ip__: { get_param: dns_ip }
- __dnssec_key__: { get_param: dnssec_key }
- __etcd_ip__ : { get_param: etcd_ip }
- __index__ : 0
- template: |
- #!/bin/bash
-
- # Log all output to file.
- exec > >(tee -a /var/log/clearwater-heat-vellum.log) 2>&1
- set -x
-
- # Configure the APT software source.
- echo 'deb __repo_url__ binary/' > /etc/apt/sources.list.d/clearwater.list
- curl -L http://repo.cw-ngv.com/repo_key | apt-key add -
- apt-get update
-
- # Get the public IP address from eth0
- sudo apt-get install ipcalc
- ADDR=`ip addr show eth0 | awk '/inet /{print $2}'`
- PUBLIC_ADDR=`ipcalc -n -b $ADDR | awk '/Address:/{print $2}'`
-
- # Configure /etc/clearwater/local_config.
- mkdir -p /etc/clearwater
- etcd_ip=__etcd_ip__
- [ -n "$etcd_ip" ] || etcd_ip=$PUBLIC_ADDR
- cat > /etc/clearwater/local_config << EOF
- management_local_ip=$PUBLIC_ADDR
- local_ip=$PUBLIC_ADDR
- public_ip=$PUBLIC_ADDR
- public_hostname=vellum-__index__.__zone__
- etcd_cluster=$etcd_ip
- EOF
-
- # Now install the software.
- DEBIAN_FRONTEND=noninteractive apt-get install vellum --yes --force-yes
- DEBIAN_FRONTEND=noninteractive apt-get install clearwater-management --yes --force-yes
-
- # Function to give DNS record type and IP address for specified IP address
- ip2rr() {
- if echo $1 | grep -q -e '[^0-9.]' ; then
- echo AAAA $1
- else
- echo A $1
- fi
- }
-
- # Update DNS
- retries=0
- while ! { nsupdate -y "__zone__:__dnssec_key__" -v << EOF
- server __dns_ip__
- update add vellum-__index__.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- update add vellum.__zone__. 30 $(ip2rr $PUBLIC_ADDR)
- send
- EOF
- } && [ $retries -lt 10 ]
- do
- retries=$((retries + 1))
- echo 'nsupdate failed - retrying (retry '$retries')...'
- sleep 5
- done
-
- # Use the DNS server.
- echo 'nameserver __dns_ip__' > /etc/dnsmasq.resolv.conf
- echo 'RESOLV_CONF=/etc/dnsmasq.resolv.conf' >> /etc/default/dnsmasq
- service dnsmasq force-reload
-
-
-outputs:
- vellum_ip:
- description: IP address in public network
- value: { get_attr: [ vellum_server_0, networks, { get_param: public_net_id }, 0 ] }
diff --git a/vnfs/DAaaS/00-init/gloo/.helmignore b/vnfs/DAaaS/00-init/gloo/.helmignore
new file mode 100755
index 00000000..08c5989a
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/.helmignore
@@ -0,0 +1,28 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+# template files
+*-template.yaml
+
+# generator files
+*.go
+generate/
diff --git a/vnfs/DAaaS/00-init/gloo/Chart.yaml b/vnfs/DAaaS/00-init/gloo/Chart.yaml
new file mode 100755
index 00000000..4f5e9315
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/Chart.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+description: Gloo Helm chart for Kubernetes
+home: https://gloo.solo.io/
+icon: https://raw.githubusercontent.com/solo-io/gloo/master/docs/img/Gloo-01.png
+name: gloo
+sources:
+- https://github.com/solo-io/gloo
+version: 0.13.18
diff --git a/vnfs/DAaaS/00-init/gloo/templates/0-namespace.yaml b/vnfs/DAaaS/00-init/gloo/templates/0-namespace.yaml
new file mode 100755
index 00000000..92a37f9d
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/0-namespace.yaml
@@ -0,0 +1,10 @@
+{{- if .Values.namespace.create -}}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Release.Namespace }}
+ labels:
+ app: gloo
+ annotations:
+ "helm.sh/hook": pre-install
+{{- end}} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/10-ingress-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/10-ingress-deployment.yaml
new file mode 100755
index 00000000..7314b4e3
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/10-ingress-deployment.yaml
@@ -0,0 +1,40 @@
+{{- if or (.Values.ingress.enabled) (.Values.settings.integrations.knative.enabled) }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: ingress
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.ingress.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: ingress
+ template:
+ metadata:
+ labels:
+ gloo: ingress
+ spec:
+ containers:
+ - image: "{{ .Values.ingress.deployment.image.repository }}:{{ .Values.ingress.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.ingress.deployment.image.pullPolicy }}
+ name: ingress
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+{{- if .Values.settings.integrations.knative.enabled }}
+ - name: "ENABLE_KNATIVE_INGRESS"
+ value: "true"
+{{- end }}
+
+{{- if not (.Values.ingress.enabled) }}
+ - name: "DISABLE_KUBE_INGRESS"
+ value: "true"
+{{- end }}
+
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/100-gloo-crds.yaml b/vnfs/DAaaS/00-init/gloo/templates/100-gloo-crds.yaml
new file mode 100755
index 00000000..2c111170
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/100-gloo-crds.yaml
@@ -0,0 +1,111 @@
+{{- if .Values.crds.create }}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: settings.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ gloo: settings
+spec:
+ group: gloo.solo.io
+ names:
+ kind: Settings
+ listKind: SettingsList
+ plural: settings
+ shortNames:
+ - st
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: gateways.gateway.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gateway.solo.io
+ names:
+ kind: Gateway
+ listKind: GatewayList
+ plural: gateways
+ shortNames:
+ - gw
+ singular: gateway
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: virtualservices.gateway.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gateway.solo.io
+ names:
+ kind: VirtualService
+ listKind: VirtualServiceList
+ plural: virtualservices
+ shortNames:
+ - vs
+ singular: virtualservice
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: proxies.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gloo.solo.io
+ names:
+ kind: Proxy
+ listKind: ProxyList
+ plural: proxies
+ shortNames:
+ - px
+ singular: proxy
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: upstreams.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gloo.solo.io
+ names:
+ kind: Upstream
+ listKind: UpstreamList
+ plural: upstreams
+ shortNames:
+ - us
+ singular: upstream
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: upstreamgroups.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gloo.solo.io
+ names:
+ kind: UpstreamGroup
+ listKind: UpstreamGroupList
+ plural: upstreamgroups
+ shortNames:
+ - ug
+ singular: upstreamgroup
+ scope: Namespaced
+ version: v1
+---
+{{- end}} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/101-knative-crds-0.5.1.yaml b/vnfs/DAaaS/00-init/gloo/templates/101-knative-crds-0.5.1.yaml
new file mode 100755
index 00000000..3c9987ef
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/101-knative-crds-0.5.1.yaml
@@ -0,0 +1,343 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+
+---
+# ↓ required as knative dependency on istio crds is hard-coded right now ↓
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: virtualservices.networking.istio.io
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ app: istio-pilot
+spec:
+ group: networking.istio.io
+ names:
+ kind: VirtualService
+ listKind: VirtualServiceList
+ plural: virtualservices
+ singular: virtualservice
+ categories:
+ - istio-io
+ - networking-istio-io
+ scope: Namespaced
+ version: v1alpha3
+
+# ↑ required as knative dependency on istio crds is hard-coded right now ↑
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: certificates.networking.internal.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.conditions[?(@.type=="Ready")].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Ready")].reason
+ name: Reason
+ type: string
+ group: networking.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - networking
+ kind: Certificate
+ plural: certificates
+ shortNames:
+ - kcert
+ singular: certificate
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: clusteringresses.networking.internal.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: networking.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - networking
+ kind: ClusterIngress
+ plural: clusteringresses
+ singular: clusteringress
+ scope: Cluster
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: configurations.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.latestCreatedRevisionName
+ name: LatestCreated
+ type: string
+ - JSONPath: .status.latestReadyRevisionName
+ name: LatestReady
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Configuration
+ plural: configurations
+ shortNames:
+ - config
+ - cfg
+ singular: configuration
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ name: images.caching.internal.knative.dev
+spec:
+ group: caching.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - caching
+ kind: Image
+ plural: images
+ shortNames:
+ - img
+ singular: image
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: podautoscalers.autoscaling.internal.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: autoscaling.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - autoscaling
+ kind: PodAutoscaler
+ plural: podautoscalers
+ shortNames:
+ - kpa
+ singular: podautoscaler
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: revisions.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.serviceName
+ name: Service Name
+ type: string
+ - JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration']
+ name: Generation
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Revision
+ plural: revisions
+ shortNames:
+ - rev
+ singular: revision
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: routes.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.domain
+ name: Domain
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Route
+ plural: routes
+ shortNames:
+ - rt
+ singular: route
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: services.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.domain
+ name: Domain
+ type: string
+ - JSONPath: .status.latestCreatedRevisionName
+ name: LatestCreated
+ type: string
+ - JSONPath: .status.latestReadyRevisionName
+ name: LatestReady
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Service
+ plural: services
+ shortNames:
+ - kservice
+ - ksvc
+ singular: service
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: serverlessservices.networking.internal.knative.dev
+spec:
+ group: networking.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - networking
+ kind: ServerlessService
+ plural: serverlessservices
+ shortNames:
+ - sks
+ singular: serverlessservice
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/11-ingress-proxy-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/11-ingress-proxy-deployment.yaml
new file mode 100755
index 00000000..5dc131e5
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/11-ingress-proxy-deployment.yaml
@@ -0,0 +1,65 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: ingress-proxy
+ name: ingress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.ingressProxy.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: ingress-proxy
+ template:
+ metadata:
+ labels:
+ gloo: ingress-proxy
+{{- with .Values.ingressProxy.deployment.extraAnnotations }}
+ annotations:
+{{toYaml . | indent 8}}{{- end }}
+ spec:
+ containers:
+ - args: ["--disable-hot-restart"]
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: "{{ .Values.ingressProxy.deployment.image.repository }}:{{ .Values.ingressProxy.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.ingressProxy.deployment.image.pullPolicy }}
+ name: ingress-proxy
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ ports:
+ - containerPort: {{ .Values.ingressProxy.deployment.httpPort }}
+ name: http
+ protocol: TCP
+ - containerPort: {{ .Values.ingressProxy.deployment.httpsPort }}
+ name: https
+ protocol: TCP
+{{- with .Values.ingressProxy.deployment.extraPorts }}
+{{toYaml . | indent 8}}{{- end }}
+ volumeMounts:
+ - mountPath: /etc/envoy
+ name: envoy-config
+ {{- if .Values.ingressProxy.deployment.image.pullSecret }}
+ imagePullSecrets:
+ - name: {{ .Values.ingressProxy.deployment.image.pullSecret }}{{end}}
+ volumes:
+ - configMap:
+ name: ingress-envoy-config
+ name: envoy-config
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/12-ingress-proxy-configmap.yaml b/vnfs/DAaaS/00-init/gloo/templates/12-ingress-proxy-configmap.yaml
new file mode 100755
index 00000000..8938a477
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/12-ingress-proxy-configmap.yaml
@@ -0,0 +1,52 @@
+{{- if .Values.ingress.enabled }}
+# configmap
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ingress-envoy-config
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: gateway-proxy
+data:
+{{ if (empty .Values.ingressProxy.configMap.data) }}
+ envoy.yaml: |
+ node:
+ cluster: ingress
+ id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+ metadata:
+ # this line must match !
+ role: "{{ "{{" }}.PodNamespace{{ "}}" }}~ingress-proxy"
+ static_resources:
+ clusters:
+ - name: xds_cluster
+ connect_timeout: 5.000s
+ load_assignment:
+ cluster_name: xds_cluster
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: gloo
+ port_value: {{ .Values.gloo.deployment.xdsPort }}
+ http2_protocol_options: {}
+ type: STRICT_DNS
+ dynamic_resources:
+ ads_config:
+ api_type: GRPC
+ grpc_services:
+ - envoy_grpc: {cluster_name: xds_cluster}
+ cds_config:
+ ads: {}
+ lds_config:
+ ads: {}
+ admin:
+ access_log_path: /dev/null
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 19000
+{{- else}}{{ toYaml .Values.ingressProxy.configMap.data | indent 2}}{{- end}}
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/13-ingress-proxy-service.yaml b/vnfs/DAaaS/00-init/gloo/templates/13-ingress-proxy-service.yaml
new file mode 100755
index 00000000..583e8bcd
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/13-ingress-proxy-service.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: ingress-proxy
+ name: ingress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ ports:
+ - port: {{ .Values.ingressProxy.deployment.httpPort }}
+ protocol: TCP
+ name: http
+ - port: {{ .Values.ingressProxy.deployment.httpsPort }}
+ protocol: TCP
+ name: https
+ selector:
+ gloo: ingress-proxy
+ type: LoadBalancer
+
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml
new file mode 100755
index 00000000..fb7874eb
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml
@@ -0,0 +1,58 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: clusteringress-proxy
+ name: clusteringress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.settings.integrations.knative.proxy.replicas }}
+ selector:
+ matchLabels:
+ gloo: clusteringress-proxy
+ template:
+ metadata:
+ labels:
+ gloo: clusteringress-proxy
+ spec:
+ containers:
+ - args: ["--disable-hot-restart"]
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: {{ .Values.settings.integrations.knative.proxy.image.repository }}:{{ .Values.settings.integrations.knative.proxy.image.tag }}
+ imagePullPolicy: {{ .Values.settings.integrations.knative.proxy.image.pullPolicy }}
+ name: clusteringress-proxy
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ ports:
+ - containerPort: {{ .Values.settings.integrations.knative.proxy.httpPort }}
+ name: http
+ protocol: TCP
+ - containerPort: {{ .Values.settings.integrations.knative.proxy.httpsPort }}
+ name: https
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /etc/envoy
+ name: envoy-config
+ volumes:
+ - configMap:
+ name: clusteringress-envoy-config
+ name: envoy-config
+
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml b/vnfs/DAaaS/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml
new file mode 100755
index 00000000..85a6421f
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml
@@ -0,0 +1,49 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+# configmap
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: clusteringress-envoy-config
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: clusteringress-proxy
+data:
+ envoy.yaml: |
+ node:
+ cluster: clusteringress
+ id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+ metadata:
+ # this line must match !
+ role: "{{ "{{" }}.PodNamespace{{ "}}" }}~clusteringress-proxy"
+ static_resources:
+ clusters:
+ - name: xds_cluster
+ connect_timeout: 5.000s
+ load_assignment:
+ cluster_name: xds_cluster
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: gloo
+ port_value: {{ .Values.gloo.deployment.xdsPort }}
+ http2_protocol_options: {}
+ type: STRICT_DNS
+ dynamic_resources:
+ ads_config:
+ api_type: GRPC
+ grpc_services:
+ - envoy_grpc: {cluster_name: xds_cluster}
+ cds_config:
+ ads: {}
+ lds_config:
+ ads: {}
+ admin:
+ access_log_path: /dev/null
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 19000
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/16-clusteringress-proxy-service.yaml b/vnfs/DAaaS/00-init/gloo/templates/16-clusteringress-proxy-service.yaml
new file mode 100755
index 00000000..7e25bee9
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/16-clusteringress-proxy-service.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: clusteringress-proxy
+ name: clusteringress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ ports:
+ - port: {{ .Values.settings.integrations.knative.proxy.httpPort }}
+ protocol: TCP
+ name: http
+ - port: {{ .Values.settings.integrations.knative.proxy.httpsPort }}
+ protocol: TCP
+ name: https
+ selector:
+ gloo: clusteringress-proxy
+ type: LoadBalancer
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml b/vnfs/DAaaS/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml
new file mode 100755
index 00000000..a73cf1f2
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml
@@ -0,0 +1,982 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app: gloo
+ istio-injection: enabled
+ serving.knative.dev/release: devel
+ name: knative-serving
+
+---
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ serving.knative.dev/controller: "true"
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: knative-serving-admin
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ serving.knative.dev/controller: "true"
+ serving.knative.dev/release: devel
+ name: knative-serving-core
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - namespaces
+ - secrets
+ - configmaps
+ - endpoints
+ - services
+ - events
+ - serviceaccounts
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - ingresses
+ - deployments
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ - deployments/scale
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - serving.knative.dev
+ resources:
+ - configurations
+ - routes
+ - revisions
+ - services
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - serving.knative.dev
+ resources:
+ - configurations/status
+ - routes/status
+ - revisions/status
+ - services/status
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - autoscaling.internal.knative.dev
+ resources:
+ - podautoscalers
+ - podautoscalers/status
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - caching.internal.knative.dev
+ resources:
+ - images
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - clusteringresses
+ - clusteringresses/status
+ - serverlessservices
+ - serverlessservices/status
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - deletecollection
+ - patch
+ - watch
+ - apiGroups:
+ - networking.istio.io
+ resources:
+ - virtualservices
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: controller
+ namespace: knative-serving
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: knative-serving-controller-admin
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: knative-serving-admin
+subjects:
+ - kind: ServiceAccount
+ name: controller
+ namespace: knative-serving
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: activator
+ serving.knative.dev/release: devel
+ name: activator-service
+ namespace: knative-serving
+spec:
+ ports:
+ - name: http
+ nodePort: null
+ port: 80
+ protocol: TCP
+ targetPort: 8080
+ - name: http2
+ port: 81
+ protocol: TCP
+ targetPort: 8081
+ - name: metrics
+ nodePort: null
+ port: 9090
+ protocol: TCP
+ targetPort: 9090
+ selector:
+ app: activator
+ type: ClusterIP
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: controller
+ serving.knative.dev/release: devel
+ name: controller
+ namespace: knative-serving
+spec:
+ ports:
+ - name: metrics
+ port: 9090
+ protocol: TCP
+ targetPort: 9090
+ selector:
+ app: controller
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ role: webhook
+ serving.knative.dev/release: devel
+ name: webhook
+ namespace: knative-serving
+spec:
+ ports:
+ - port: 443
+ targetPort: 443
+ selector:
+ role: webhook
+
+---
+apiVersion: caching.internal.knative.dev/v1alpha1
+kind: Image
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: queue-proxy
+ namespace: knative-serving
+spec:
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/queue@sha256:b5c759e4ea6f36ae4498c1ec794653920345b9ad7492731fb1d6087e3b95dc43
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: activator
+ namespace: knative-serving
+spec:
+ selector:
+ matchLabels:
+ app: activator
+ role: activator
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "true"
+ labels:
+ app: activator
+ role: activator
+ serving.knative.dev/release: devel
+ spec:
+ containers:
+ - args:
+ - -logtostderr=false
+ - -stderrthreshold=FATAL
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/activator@sha256:60630ac88d8cb67debd1e2ab1ecd6ec3ff6cbab2336dda8e7ae1c01ebead76c0
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8080
+ name: activator
+ ports:
+ - containerPort: 8080
+ name: http1-port
+ - containerPort: 8081
+ name: h2c-port
+ - containerPort: 9090
+ name: metrics-port
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: 8080
+ resources:
+ limits:
+ cpu: 200m
+ memory: 600Mi
+ requests:
+ cpu: 20m
+ memory: 60Mi
+ volumeMounts:
+ - mountPath: /etc/config-logging
+ name: config-logging
+ - mountPath: /etc/config-observability
+ name: config-observability
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-logging
+ name: config-logging
+ - configMap:
+ name: config-observability
+ name: config-observability
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: autoscaler
+ serving.knative.dev/release: devel
+ name: autoscaler
+ namespace: knative-serving
+spec:
+ ports:
+ - name: http
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: metrics
+ port: 9090
+ protocol: TCP
+ targetPort: 9090
+ selector:
+ app: autoscaler
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: autoscaler
+ namespace: knative-serving
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: autoscaler
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "true"
+ labels:
+ app: autoscaler
+ spec:
+ containers:
+ - env:
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/autoscaler@sha256:442f99e3a55653b19137b44c1d00f681b594d322cb39c1297820eb717e2134ba
+ name: autoscaler
+ ports:
+ - containerPort: 8080
+ name: websocket
+ - containerPort: 9090
+ name: metrics
+ resources:
+ limits:
+ cpu: 300m
+ memory: 400Mi
+ requests:
+ cpu: 30m
+ memory: 40Mi
+ volumeMounts:
+ - mountPath: /etc/config-autoscaler
+ name: config-autoscaler
+ - mountPath: /etc/config-logging
+ name: config-logging
+ - mountPath: /etc/config-observability
+ name: config-observability
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-autoscaler
+ name: config-autoscaler
+ - configMap:
+ name: config-logging
+ name: config-logging
+ - configMap:
+ name: config-observability
+ name: config-observability
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # The Revision ContainerConcurrency field specifies the maximum number
+ # of requests the Container can handle at once. Container concurrency
+ # target percentage is how much of that maximum to use in a stable
+ # state. E.g. if a Revision specifies ContainerConcurrency of 10, then
+ # the Autoscaler will try to maintain 7 concurrent connections per pod
+ # on average. A value of 0.7 is chosen because the Autoscaler panics
+ # when concurrency exceeds 2x the desired set point. So we will panic
+ # before we reach the limit.
+ container-concurrency-target-percentage: "1.0"
+
+ # The container concurrency target default is what the Autoscaler will
+ # try to maintain when the Revision specifies unlimited concurrency.
+ # Even when specifying unlimited concurrency, the autoscaler will
+ # horizontally scale the application based on this target concurrency.
+ #
+ # A value of 100 is chosen because it's enough to allow vertical pod
+ # autoscaling to tune resource requests. E.g. maintaining 1 concurrent
+ # "hello world" request doesn't consume enough resources to allow VPA
+ # to achieve efficient resource usage (VPA CPU minimum is 300m).
+ container-concurrency-target-default: "100"
+
+ # When operating in a stable mode, the autoscaler operates on the
+ # average concurrency over the stable window.
+ stable-window: "60s"
+
+ # When observed average concurrency during the panic window reaches 2x
+ # the target concurrency, the autoscaler enters panic mode. When
+ # operating in panic mode, the autoscaler operates on the average
+ # concurrency over the panic window.
+ panic-window: "6s"
+
+ # Max scale up rate limits the rate at which the autoscaler will
+ # increase pod count. It is the maximum ratio of desired pods versus
+ # observed pods.
+ max-scale-up-rate: "10"
+
+ # Scale to zero feature flag
+ enable-scale-to-zero: "true"
+
+ # Tick interval is the time between autoscaling calculations.
+ tick-interval: "2s"
+
+ # Dynamic parameters (take effect when config map is updated):
+
+ # Scale to zero grace period is the time an inactive revision is left
+ # running before it is scaled to zero (min: 30s).
+ scale-to-zero-grace-period: "30s"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-autoscaler
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # List of repositories for which tag to digest resolving should be skipped
+ registriesSkippingTagResolving: "ko.local,dev.local"
+ queueSidecarImage: gcr.io/knative-releases/github.com/knative/serving/cmd/queue@sha256:b5c759e4ea6f36ae4498c1ec794653920345b9ad7492731fb1d6087e3b95dc43
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-controller
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # revision-timeout-seconds contains the default number of
+ # seconds to use for the revision's per-request timeout, if
+ # none is specified.
+ revision-timeout-seconds: "300" # 5 minutes
+
+ # revision-cpu-request contains the cpu allocation to assign
+ # to revisions by default.
+ revision-cpu-request: "400m" # 0.4 of a CPU (aka 400 milli-CPU)
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-defaults
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # Default value for domain.
+ # Although it will match all routes, it is the least-specific rule so it
+ # will only be used if no other domain matches.
+ example.com: |
+
+ # These are example settings of domain.
+ # example.org will be used for routes having app=nonprofit.
+ example.org: |
+ selector:
+ app: nonprofit
+
+ # Routes having domain suffix of 'svc.cluster.local' will not be exposed
+ # through Ingress. You can define your own label selector to assign that
+ # domain suffix to your Route here, or you can set the label
+ # "serving.knative.dev/visibility=cluster-local"
+ # to achieve the same effect. This shows how to make routes having
+ # the label app=secret only exposed to the local cluster.
+ svc.cluster.local: |
+ selector:
+ app: secret
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-domain
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # Delay after revision creation before considering it for GC
+ stale-revision-create-delay: "24h"
+
+ # Duration since a route has been pointed at a revision before it should be GC'd
+ # This minus lastpinned-debounce be longer than the controller resync period (10 hours)
+ stale-revision-timeout: "15h"
+
+ # Minimum number of generations of revisions to keep before considering for GC
+ stale-revision-minimum-generations: "1"
+
+ # To avoid constant updates, we allow an existing annotation to be stale by this
+ # amount before we update the timestamp
+ stale-revision-lastpinned-debounce: "5h"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-gc
+ namespace: knative-serving
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ networking.knative.dev/ingress-provider: istio
+ serving.knative.dev/release: devel
+ name: config-istio
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # Common configuration for all Knative codebase
+ zap-logger-config: |
+ {
+ "level": "info",
+ "development": false,
+ "outputPaths": ["stdout"],
+ "errorOutputPaths": ["stderr"],
+ "encoding": "json",
+ "encoderConfig": {
+ "timeKey": "ts",
+ "levelKey": "level",
+ "nameKey": "logger",
+ "callerKey": "caller",
+ "messageKey": "msg",
+ "stacktraceKey": "stacktrace",
+ "lineEnding": "",
+ "levelEncoder": "",
+ "timeEncoder": "iso8601",
+ "durationEncoder": "",
+ "callerEncoder": ""
+ }
+ }
+
+ # Log level overrides
+ # For all components except the autoscaler and queue proxy,
+ # changes are be picked up immediately.
+ # For autoscaler and queue proxy, changes require recreation of the pods.
+ loglevel.controller: "info"
+ loglevel.autoscaler: "info"
+ loglevel.queueproxy: "info"
+ loglevel.webhook: "info"
+ loglevel.activator: "info"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-logging
+ namespace: knative-serving
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-network
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # logging.enable-var-log-collection defaults to false.
+ # A fluentd sidecar will be set up to collect var log if
+ # this flag is true.
+ logging.enable-var-log-collection: false
+
+ # logging.fluentd-sidecar-image provides the fluentd sidecar image
+ # to inject as a sidecar to collect logs from /var/log.
+ # Must be presented if logging.enable-var-log-collection is true.
+ logging.fluentd-sidecar-image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
+
+ # logging.fluentd-sidecar-output-config provides the configuration
+ # for the fluentd sidecar, which will be placed into a configmap and
+ # mounted into the fluentd sidecar image.
+ logging.fluentd-sidecar-output-config: |
+ # Parse json log before sending to Elastic Search
+ <filter **>
+ @type parser
+ key_name log
+ <parse>
+ @type multi_format
+ <pattern>
+ format json
+ time_key fluentd-time # fluentd-time is reserved for structured logs
+ time_format %Y-%m-%dT%H:%M:%S.%NZ
+ </pattern>
+ <pattern>
+ format none
+ message_key log
+ </pattern>
+ </parse>
+ </filter>
+ # Send to Elastic Search
+ <match **>
+ @id elasticsearch
+ @type elasticsearch
+ @log_level info
+ include_tag_key true
+ # Elasticsearch service is in monitoring namespace.
+ host elasticsearch-logging.knative-monitoring
+ port 9200
+ logstash_format true
+ <buffer>
+ @type file
+ path /var/log/fluentd-buffers/kubernetes.system.buffer
+ flush_mode interval
+ retry_type exponential_backoff
+ flush_thread_count 2
+ flush_interval 5s
+ retry_forever
+ retry_max_interval 30
+ chunk_limit_size 2M
+ queue_limit_length 8
+ overflow_action block
+ </buffer>
+ </match>
+
+ # logging.revision-url-template provides a template to use for producing the
+ # logging URL that is injected into the status of each Revision.
+ # This value is what you might use the the Knative monitoring bundle, and provides
+ # access to Kibana after setting up kubectl proxy.
+ logging.revision-url-template: |
+ http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase))))
+
+ # If non-empty, this enables queue proxy writing request logs to stdout.
+ # The value determines the shape of the request logs and it must be a valid go text/template.
+ # It is important to keep this as a single line. Multiple lines are parsed as separate entities
+ # by most collection agents and will split the request logs into multiple records.
+ #
+ # The following fields and functions are available to the template:
+ #
+ # Request: An http.Request (see https://golang.org/pkg/net/http/#Request)
+ # representing an HTTP request received by the server.
+ #
+ # Response:
+ # struct {
+ # Code int // HTTP status code (see https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml)
+ # Size int // An int representing the size of the response.
+ # Latency float64 // A float64 representing the latency of the response in seconds.
+ # }
+ #
+ # Revision:
+ # struct {
+ # Name string // Knative revision name
+ # Namespace string // Knative revision namespace
+ # Service string // Knative service name
+ # Configuration string // Knative configuration name
+ # PodName string // Name of the pod hosting the revision
+ # PodIP string // IP of the pod hosting the revision
+ # }
+ #
+ logging.request-log-template: '{"httpRequest": {"requestMethod": "{{ "{{" }}.Request.Method{{ "{{" }}", "requestUrl": "{{ "{{" }}js .Request.RequestURI{{ "{{" }}", "requestSize": "{{ "{{" }}.Request.ContentLength{{ "{{" }}", "status": {{ "{{" }}.Response.Code{{ "{{" }}, "responseSize": "{{ "{{" }}.Response.Size{{ "{{" }}", "userAgent": "{{ "{{" }}js .Request.UserAgent{{ "{{" }}", "remoteIp": "{{ "{{" }}js .Request.RemoteAddr{{ "{{" }}", "serverIp": "{{ "{{" }}.Revision.PodIP{{ "{{" }}", "referer": "{{ "{{" }}js .Request.Referer{{ "{{" }}", "latency": "{{ "{{" }}.Response.Latency{{ "{{" }}s", "protocol": "{{ "{{" }}.Request.Proto{{ "{{" }}"}, "traceId": "{{ "{{" }}index .Request.Header "X-B3-Traceid"{{ "{{" }}"}'
+
+ # metrics.backend-destination field specifies the system metrics destination.
+ # It supports either prometheus (the default) or stackdriver.
+ # Note: Using stackdriver will incur additional charges
+ metrics.backend-destination: prometheus
+
+ # metrics.request-metrics-backend-destination specifies the request metrics
+ # destination. If non-empty, it enables queue proxy to send request metrics.
+ # Currently supported values: prometheus, stackdriver.
+ metrics.request-metrics-backend-destination: prometheus
+
+ # metrics.stackdriver-project-id field specifies the stackdriver project ID. This
+ # field is optional. When running on GCE, application default credentials will be
+ # used if this field is not provided.
+ metrics.stackdriver-project-id: "<your stackdriver project id>"
+
+ # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed to send metrics to
+ # Stackdriver using "global" resource type and custom metric type if the
+ # metrics are not supported by "knative_revision" resource type. Setting this
+ # flag to "true" could cause extra Stackdriver charge.
+ # If metrics.backend-destination is not Stackdriver, this is ignored.
+ metrics.allow-stackdriver-custom-metrics: "false"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-observability
+ namespace: knative-serving
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: controller
+ namespace: knative-serving
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: controller
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "false"
+ labels:
+ app: controller
+ spec:
+ containers:
+ - env:
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/controller@sha256:25af5f3adad8b65db3126e0d6e90aa36835c124c24d9d72ffbdd7ee739a7f571
+ name: controller
+ ports:
+ - containerPort: 9090
+ name: metrics
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 1000Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ volumeMounts:
+ - mountPath: /etc/config-logging
+ name: config-logging
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-logging
+ name: config-logging
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: webhook
+ namespace: knative-serving
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: webhook
+ role: webhook
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "false"
+ labels:
+ app: webhook
+ role: webhook
+ spec:
+ containers:
+ - env:
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/webhook@sha256:d1ba3e2c0d739084ff508629db001619cea9cc8780685e85dd910363774eaef6
+ name: webhook
+ resources:
+ limits:
+ cpu: 200m
+ memory: 200Mi
+ requests:
+ cpu: 20m
+ memory: 20Mi
+ volumeMounts:
+ - mountPath: /etc/config-logging
+ name: config-logging
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-logging
+ name: config-logging
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/18-settings.yaml b/vnfs/DAaaS/00-init/gloo/templates/18-settings.yaml
new file mode 100755
index 00000000..a2eec087
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/18-settings.yaml
@@ -0,0 +1,30 @@
+{{ if .Values.settings.create }}
+
+apiVersion: gloo.solo.io/v1
+kind: Settings
+metadata:
+ name: default
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install
+spec:
+ bindAddr: 0.0.0.0:{{ .Values.gloo.deployment.xdsPort }}
+ discoveryNamespace: {{ .Values.settings.writeNamespace }}
+ kubernetesArtifactSource: {}
+ kubernetesConfigSource: {}
+ kubernetesSecretSource: {}
+ refreshRate: 60s
+
+{{- if .Values.settings.extensions }}
+ extensions:
+{{- toYaml .Values.settings.extensions | nindent 4 }}
+{{- end }}
+
+{{- with .Values.settings.watchNamespaces }}
+ watchNamespaces:
+ {{- range . }}
+ - {{ . }}
+ {{- end }}
+{{- end }}
+
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml b/vnfs/DAaaS/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml
new file mode 100755
index 00000000..35fb5eb0
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.gateway.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-gateway
+ labels:
+ app: gloo
+ gloo: rbac
+rules:
+- apiGroups: [""]
+ resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+ resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+ verbs: ["*"]
+- apiGroups: ["gateway.solo.io"]
+ resources: ["virtualservices", "gateways"]
+ verbs: ["*"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml b/vnfs/DAaaS/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml
new file mode 100755
index 00000000..15215b9f
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.ingress.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-ingress
+ labels:
+ app: gloo
+ gloo: rbac
+rules:
+- apiGroups: [""]
+ resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+ resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+ verbs: ["*"]
+- apiGroups: ["extensions", ""]
+ resources: ["ingresses"]
+ verbs: ["*"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml b/vnfs/DAaaS/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml
new file mode 100755
index 00000000..1bd2b95d
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.settings.integrations.knative.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-knative
+ labels:
+ app: gloo
+ gloo: rbac
+rules:
+- apiGroups: [""]
+ resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+ resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+ verbs: ["*"]
+- apiGroups: ["networking.internal.knative.dev"]
+ resources: ["clusteringresses"]
+ verbs: ["get", "list", "watch"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml b/vnfs/DAaaS/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml
new file mode 100755
index 00000000..62198913
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.gateway.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-binding-gateway-{{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: rbac
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: gloo-role-gateway
+ apiGroup: rbac.authorization.k8s.io
+
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml b/vnfs/DAaaS/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml
new file mode 100755
index 00000000..7ef5cbae
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.ingress.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-binding-ingress-{{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: rbac
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: gloo-role-ingress
+ apiGroup: rbac.authorization.k8s.io
+
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml b/vnfs/DAaaS/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml
new file mode 100755
index 00000000..5f05de96
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.settings.integrations.knative.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-binding-knative-{{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: rbac
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: gloo-role-knative
+ apiGroup: rbac.authorization.k8s.io
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/3-gloo-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/3-gloo-deployment.yaml
new file mode 100755
index 00000000..b3d8423f
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/3-gloo-deployment.yaml
@@ -0,0 +1,57 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: gloo
+ name: gloo
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.gloo.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: gloo
+ template:
+ metadata:
+ labels:
+ gloo: gloo
+ {{- if .Values.gloo.deployment.stats }}
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9091"
+ prometheus.io/scrape: "true"
+ {{- end}}
+ spec:
+ containers:
+ - image: "{{ .Values.gloo.deployment.image.repository }}:{{ .Values.gloo.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.gloo.deployment.image.pullPolicy }}
+ name: gloo
+ resources:
+ requests:
+ cpu: 1
+ memory: 256Mi
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 10101
+ capabilities:
+ drop:
+ - ALL
+ ports:
+ - containerPort: {{ .Values.gloo.deployment.xdsPort }}
+ name: grpc
+ protocol: TCP
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.gloo.deployment.stats }}
+ - name: START_STATS_SERVER
+ value: "true"
+ {{- end}}
+ {{- if .Values.gloo.deployment.image.pullSecret }}
+ imagePullSecrets:
+ - name: {{ .Values.gloo.deployment.image.pullSecret }}{{end}}
+
diff --git a/vnfs/DAaaS/00-init/gloo/templates/4-gloo-service.yaml b/vnfs/DAaaS/00-init/gloo/templates/4-gloo-service.yaml
new file mode 100755
index 00000000..ab49ea3f
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/4-gloo-service.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: gloo
+ name: gloo
+ namespace: {{ .Release.Namespace }}
+spec:
+{{ if .Values.gloo.deployment.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.gloo.deployment.externalTrafficPolicy }}
+{{- end }}
+ ports:
+ - name: grpc
+ port: {{ .Values.gloo.deployment.xdsPort }}
+ protocol: TCP
+ selector:
+ gloo: gloo
diff --git a/vnfs/DAaaS/00-init/gloo/templates/5-discovery-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/5-discovery-deployment.yaml
new file mode 100755
index 00000000..1a44e922
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/5-discovery-deployment.yaml
@@ -0,0 +1,46 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: discovery
+ name: discovery
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.discovery.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: discovery
+ template:
+ metadata:
+ labels:
+ gloo: discovery
+ {{- if .Values.discovery.deployment.stats }}
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9091"
+ prometheus.io/scrape: "true"
+ {{- end}}
+ spec:
+ containers:
+ - image: "{{ .Values.discovery.deployment.image.repository }}:{{ .Values.discovery.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.discovery.deployment.image.pullPolicy }}
+ name: discovery
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 10101
+ capabilities:
+ drop:
+ - ALL
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.discovery.deployment.stats }}
+ - name: START_STATS_SERVER
+ value: "true"
+ {{- end}}
+
diff --git a/vnfs/DAaaS/00-init/gloo/templates/6-gateway-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/6-gateway-deployment.yaml
new file mode 100755
index 00000000..0a32241e
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/6-gateway-deployment.yaml
@@ -0,0 +1,47 @@
+{{- if .Values.gateway.enabled }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: gateway
+ name: gateway
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.gateway.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: gateway
+ template:
+ metadata:
+ labels:
+ gloo: gateway
+ {{- if .Values.gateway.deployment.stats }}
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9091"
+ prometheus.io/scrape: "true"
+ {{- end}}
+ spec:
+ containers:
+ - image: "{{ .Values.gateway.deployment.image.repository }}:{{ .Values.gateway.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.gateway.deployment.image.pullPolicy }}
+ name: gateway
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 10101
+ capabilities:
+ drop:
+ - ALL
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.gateway.deployment.stats }}
+ - name: START_STATS_SERVER
+ value: "true"
+ {{- end}}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/7-gateway-proxy-deployment.yaml b/vnfs/DAaaS/00-init/gloo/templates/7-gateway-proxy-deployment.yaml
new file mode 100755
index 00000000..bb54e8f3
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/7-gateway-proxy-deployment.yaml
@@ -0,0 +1,67 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: {{ $key }}
+ name: {{ $key }}
+ namespace: {{ $.Release.Namespace }}
+spec:
+ replicas: {{ $spec.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: {{ $key }}
+ template:
+ metadata:
+ labels:
+ gloo: {{ $key }}
+{{- with $spec.deployment.extraAnnotations }}
+ annotations:
+{{toYaml . | indent 8}}{{- end }}
+ spec:
+ containers:
+ - args: ["--disable-hot-restart"]
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: {{ $spec.deployment.image.repository }}:{{ $spec.deployment.image.tag }}
+ imagePullPolicy: {{ $spec.deployment.image.pullPolicy }}
+ name: gateway-proxy
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ ports:
+ - containerPort: {{ $spec.deployment.httpPort }}
+ name: http
+ protocol: TCP
+ - containerPort: {{ $spec.deployment.httpsPort }}
+ name: https
+ protocol: TCP
+{{- with $spec.deployment.extraPorts }}
+{{toYaml . | indent 8}}{{- end }}
+ volumeMounts:
+ - mountPath: /etc/envoy
+ name: envoy-config
+ {{- if $spec.deployment.image.pullSecret }}
+ imagePullSecrets:
+ - name: {{ $spec.deployment.image.pullSecret }}{{end}}
+ volumes:
+ - configMap:
+ name: {{ $key }}-envoy-config
+ name: envoy-config
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/templates/8-gateway-proxy-service.yaml b/vnfs/DAaaS/00-init/gloo/templates/8-gateway-proxy-service.yaml
new file mode 100755
index 00000000..f0b7d347
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/8-gateway-proxy-service.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: {{ $key }}
+ name: {{ $key }}
+ namespace: {{ $.Release.Namespace }}
+ {{- with $spec.service.extraAnnotations }}
+ annotations:
+{{toYaml . | indent 8}}{{- end }}
+spec:
+ ports:
+ - port: {{ $spec.service.httpPort }}
+ targetPort: {{ $spec.deployment.httpPort }}
+ protocol: TCP
+ name: http
+ - port: {{ $spec.service.httpsPort }}
+ targetPort: {{ $spec.deployment.httpsPort }}
+ protocol: TCP
+ name: https
+ selector:
+ gloo: {{ $key }}
+ type: {{ $spec.service.type }}
+ {{- if and (eq $spec.service.type "ClusterIP") $spec.service.clusterIP }}
+ clusterIP: {{ $spec.service.clusterIP }}
+ {{- end }}
+ {{- if and (eq $spec.service.type "LoadBalancer") $spec.service.loadBalancerIP }}
+ loadBalancerIP: {{ $spec.service.loadBalancerIP }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/gloo/templates/9-gateway-proxy-configmap.yaml b/vnfs/DAaaS/00-init/gloo/templates/9-gateway-proxy-configmap.yaml
new file mode 100755
index 00000000..03c5a920
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/templates/9-gateway-proxy-configmap.yaml
@@ -0,0 +1,54 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+# config_map
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $key }}-envoy-config
+ namespace: {{ $.Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: {{ $key }}
+data:
+{{ if (empty $spec.configMap.data) }}
+ envoy.yaml: |
+ node:
+ cluster: gateway
+ id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+ metadata:
+ # this line must match !
+ role: "{{ "{{" }}.PodNamespace{{ "}}" }}~gateway-proxy"
+ static_resources:
+ clusters:
+ - name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}
+ connect_timeout: 5.000s
+ load_assignment:
+ cluster_name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: gloo.{{ $.Release.Namespace }}.svc.cluster.local
+ port_value: {{ $.Values.gloo.deployment.xdsPort }}
+ http2_protocol_options: {}
+ type: STRICT_DNS
+ dynamic_resources:
+ ads_config:
+ api_type: GRPC
+ grpc_services:
+ - envoy_grpc: {cluster_name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}}
+ cds_config:
+ ads: {}
+ lds_config:
+ ads: {}
+ admin:
+ access_log_path: /dev/null
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 19000
+{{- else}}{{ toYaml $spec.configMap.data | indent 2}}{{- end}}
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/gloo/values-ingress.yaml b/vnfs/DAaaS/00-init/gloo/values-ingress.yaml
new file mode 100755
index 00000000..98dd42ae
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/values-ingress.yaml
@@ -0,0 +1,74 @@
+crds:
+ create: true
+discovery:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/discovery
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+gateway:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gateway
+ tag: ""
+ replicas: 1
+ stats: false
+ enabled: false
+gatewayProxies:
+ gateway-proxy:
+ configMap:
+ data: null
+ deployment:
+ httpPort: "8080"
+ httpsPort: "8443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: ""
+ replicas: 1
+ stats: false
+ service:
+ httpPort: "80"
+ httpsPort: "443"
+ type: LoadBalancer
+gloo:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ xdsPort: "9977"
+ingress:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/ingress
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ enabled: true
+ingressProxy:
+ configMap: {}
+ deployment:
+ httpPort: "80"
+ httpsPort: "443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+namespace:
+ create: false
+rbac:
+ create: true
+settings:
+ integrations:
+ knative:
+ enabled: false
+ writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/00-init/gloo/values-knative.yaml b/vnfs/DAaaS/00-init/gloo/values-knative.yaml
new file mode 100755
index 00000000..c53ca1a9
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/values-knative.yaml
@@ -0,0 +1,72 @@
+crds:
+ create: true
+discovery:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/discovery
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+gateway:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gateway
+ tag: ""
+ replicas: 1
+ stats: false
+ enabled: false
+gatewayProxies:
+ gateway-proxy:
+ configMap:
+ data: null
+ deployment:
+ httpPort: "8080"
+ httpsPort: "8443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: ""
+ replicas: 1
+ stats: false
+ service:
+ httpPort: "80"
+ httpsPort: "443"
+ type: LoadBalancer
+gloo:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ xdsPort: "9977"
+ingress:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/ingress
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ enabled: false
+namespace:
+ create: false
+rbac:
+ create: true
+settings:
+ integrations:
+ knative:
+ enabled: true
+ proxy:
+ httpPort: "80"
+ httpsPort: "443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/00-init/gloo/values.yaml b/vnfs/DAaaS/00-init/gloo/values.yaml
new file mode 100755
index 00000000..daeab0c3
--- /dev/null
+++ b/vnfs/DAaaS/00-init/gloo/values.yaml
@@ -0,0 +1,56 @@
+crds:
+ create: true
+discovery:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/discovery
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+gateway:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gateway
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ enabled: true
+gatewayProxies:
+ gateway-proxy:
+ configMap:
+ data: null
+ deployment:
+ httpPort: "8080"
+ httpsPort: "8443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ service:
+ httpPort: "80"
+ httpsPort: "443"
+ type: LoadBalancer
+gloo:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ xdsPort: "9977"
+ingress:
+ enabled: false
+namespace:
+ create: false
+rbac:
+ create: true
+settings:
+ integrations:
+ knative:
+ enabled: false
+ writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/minio/values.yaml b/vnfs/DAaaS/minio/values.yaml
index 8ba16128..1b81a8cb 100755
--- a/vnfs/DAaaS/minio/values.yaml
+++ b/vnfs/DAaaS/minio/values.yaml
@@ -105,10 +105,9 @@ service:
ingress:
enabled: true
- annotations: {}
- # kubernetes.io/ingress.class: nginx
- # kubernetes.io/tls-acme: "true"
- path: /
+ annotations:
+ kubernetes.io/ingress.class: gloo
+ path: /.*
hosts:
- minio.modelrepo
tls: []