aboutsummaryrefslogtreecommitdiffstats
path: root/kubernetes
diff options
context:
space:
mode:
Diffstat (limited to 'kubernetes')
-rw-r--r--kubernetes/aaf/charts/aaf-sms/values.yaml4
-rw-r--r--kubernetes/aai/charts/aai-babel/resources/config/tosca-mappings.json2
-rw-r--r--kubernetes/aai/charts/aai-champ/values.yaml2
-rw-r--r--kubernetes/aai/charts/aai-elasticsearch/resources/config/sg/sg_config.yml18
-rw-r--r--kubernetes/aai/charts/aai-resources/templates/deployment.yaml252
-rw-r--r--kubernetes/aai/charts/aai-traversal/templates/deployment.yaml126
-rw-r--r--kubernetes/clamp/charts/clamp-dash-es/values.yaml2
-rw-r--r--kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf17
-rwxr-xr-xkubernetes/contrib/tools/rke/rke_setup.sh355
-rw-r--r--kubernetes/dcaegen2/charts/dcae-bootstrap/resources/config/k8s-plugin.json1
-rw-r--r--kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml3
-rw-r--r--kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml2
-rw-r--r--kubernetes/dcaegen2/charts/dcae-config-binding-service/templates/service.yaml2
-rw-r--r--kubernetes/dcaegen2/charts/dcae-config-binding-service/values.yaml4
-rw-r--r--kubernetes/dcaegen2/charts/dcae-deployment-handler/values.yaml2
-rw-r--r--kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml2
-rw-r--r--kubernetes/dcaegen2/charts/dcae-policy-handler/values.yaml4
-rw-r--r--kubernetes/nbi/values.yaml4
-rw-r--r--kubernetes/pnda/charts/dcae-pnda-bootstrap/values.yaml1
-rwxr-xr-xkubernetes/sdc/resources/config/environments/AUTO.json1
-rwxr-xr-xkubernetes/sdnc/resources/config/bin/startODL.sh48
21 files changed, 813 insertions, 39 deletions
diff --git a/kubernetes/aaf/charts/aaf-sms/values.yaml b/kubernetes/aaf/charts/aaf-sms/values.yaml
index 8e7ea29300..28b46c24e1 100644
--- a/kubernetes/aaf/charts/aaf-sms/values.yaml
+++ b/kubernetes/aaf/charts/aaf-sms/values.yaml
@@ -67,11 +67,11 @@ readiness:
periodSeconds: 30
service:
- type: NodePort
+ type: ClusterIP
name: aaf-sms
portName: aaf-sms
internalPort: 10443
- nodePort: 43
+ externalPort: 10443
persistence:
enabled: true
diff --git a/kubernetes/aai/charts/aai-babel/resources/config/tosca-mappings.json b/kubernetes/aai/charts/aai-babel/resources/config/tosca-mappings.json
index 5be609730d..a6fe82fb70 100644
--- a/kubernetes/aai/charts/aai-babel/resources/config/tosca-mappings.json
+++ b/kubernetes/aai/charts/aai-babel/resources/config/tosca-mappings.json
@@ -15,7 +15,7 @@
{
"type": "VF",
"name": "generic-vnf",
- "deleteFlag": false,
+ "deleteFlag": true,
"modelVersionId": "93a6166f-b3d5-4f06-b4ba-aed48d009ad9",
"modelInvariantId": "acc6edd8-a8d4-4b93-afaa-0994068be14c"
},
diff --git a/kubernetes/aai/charts/aai-champ/values.yaml b/kubernetes/aai/charts/aai-champ/values.yaml
index b35171ecd6..331786cf18 100644
--- a/kubernetes/aai/charts/aai-champ/values.yaml
+++ b/kubernetes/aai/charts/aai-champ/values.yaml
@@ -25,7 +25,7 @@ global:
#################################################################
# application image
-image: onap/champ:1.3.0
+image: onap/champ:1.4-STAGING-latest
flavor: small
diff --git a/kubernetes/aai/charts/aai-elasticsearch/resources/config/sg/sg_config.yml b/kubernetes/aai/charts/aai-elasticsearch/resources/config/sg/sg_config.yml
index 3fefe1690d..9172b71e8d 100644
--- a/kubernetes/aai/charts/aai-elasticsearch/resources/config/sg/sg_config.yml
+++ b/kubernetes/aai/charts/aai-elasticsearch/resources/config/sg/sg_config.yml
@@ -37,19 +37,19 @@
# HTTP
# basic (challenging)
# proxy (not challenging, needs xff)
-# kerberos (challenging) NOT FREE FOR COMMERCIAL
+# kerberos (challenging)
# clientcert (not challenging, needs https)
-# jwt (not challenging) NOT FREE FOR COMMERCIAL
+# jwt (not challenging)
# host (not challenging) #DEPRECATED, will be removed in a future version.
# host based authentication is configurable in sg_roles_mapping
# Authc
# internal
# noop
-# ldap NOT FREE FOR COMMERCIAL USE
+# ldap
# Authz
-# ldap NOT FREE FOR COMMERCIAL USE
+# ldap
# noop
searchguard:
@@ -59,7 +59,7 @@ searchguard:
# Set filtered_alias_mode to 'nowarn' to allow more than 2 filtered aliases per index silently
#filtered_alias_mode: warn
#kibana:
- # Kibana multitenancy - NOT FREE FOR COMMERCIAL USE
+ # Kibana multitenancy
# see https://github.com/floragunncom/search-guard-docs/blob/master/multitenancy.md
# To make this work you need to install https://github.com/floragunncom/search-guard-module-kibana-multitenancy/wiki
#multitenancy_enabled: true
@@ -85,7 +85,7 @@ searchguard:
transport_enabled: false
order: 6
http_authenticator:
- type: kerberos # NOT FREE FOR COMMERCIAL USE
+ type: kerberos
challenge: true
config:
# If true a lot of kerberos/security related debugging output will be logged to standard out
@@ -150,7 +150,7 @@ searchguard:
challenge: false
authentication_backend:
# LDAP authentication backend (authenticate users against a LDAP or Active Directory)
- type: ldap # NOT FREE FOR COMMERCIAL USE
+ type: ldap
config:
# enable ldaps
enable_ssl: false
@@ -176,7 +176,7 @@ searchguard:
transport_enabled: false
authorization_backend:
# LDAP authorization backend (gather roles from a LDAP or Active Directory, you have to configure the above LDAP authentication backend settings too)
- type: ldap # NOT FREE FOR COMMERCIAL USE
+ type: ldap
config:
# enable ldaps
enable_ssl: false
@@ -217,5 +217,5 @@ searchguard:
roles_from_another_ldap:
enabled: false
authorization_backend:
- type: ldap # NOT FREE FOR COMMERCIAL USE
+ type: ldap
#config goes here ...
diff --git a/kubernetes/aai/charts/aai-resources/templates/deployment.yaml b/kubernetes/aai/charts/aai-resources/templates/deployment.yaml
index 0a46c48717..9fe4c17cc9 100644
--- a/kubernetes/aai/charts/aai-resources/templates/deployment.yaml
+++ b/kubernetes/aai/charts/aai-resources/templates/deployment.yaml
@@ -82,6 +82,28 @@ spec:
"path": "/aai/v14/cloud-infrastructure"
},
{
+ "serviceName": "_aai-cloudInfrastructure",
+ "version": "v15",
+ "url": "/aai/v15/cloud-infrastructure",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v15/cloud-infrastructure"
+ },
+ {
+ "serviceName": "_aai-cloudInfrastructure",
+ "version": "v16",
+ "url": "/aai/v16/cloud-infrastructure",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v16/cloud-infrastructure"
+ },
+ {
"serviceName": "_aai-business",
"version": "v11",
"url": "/aai/v11/business",
@@ -126,6 +148,28 @@ spec:
"path": "/aai/v14/business"
},
{
+ "serviceName": "_aai-business",
+ "version": "v15",
+ "url": "/aai/v15/business",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v15/business"
+ },
+ {
+ "serviceName": "_aai-business",
+ "version": "v16",
+ "url": "/aai/v16/business",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v16/business"
+ },
+ {
"serviceName": "_aai-actions",
"version": "v11",
"url": "/aai/v11/actions",
@@ -170,6 +214,28 @@ spec:
"path": "/aai/v14/actions"
},
{
+ "serviceName": "_aai-actions",
+ "version": "v15",
+ "url": "/aai/v15/actions",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v15/actions"
+ },
+ {
+ "serviceName": "_aai-actions",
+ "version": "v16",
+ "url": "/aai/v16/actions",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v16/actions"
+ },
+ {
"serviceName": "_aai-service-design-and-creation",
"version": "v11",
"url": "/aai/v11/service-design-and-creation",
@@ -213,6 +279,28 @@ spec:
"visualRange": "1",
"path": "/aai/v14/service-design-and-creation"
},
+ {
+ "serviceName": "_aai-service-design-and-creation",
+ "version": "v15",
+ "url": "/aai/v15/service-design-and-creation",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v15/service-design-and-creation"
+ },
+ {
+ "serviceName": "_aai-service-design-and-creation",
+ "version": "v16",
+ "url": "/aai/v16/service-design-and-creation",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v16/service-design-and-creation"
+ },
{
"serviceName": "_aai-network",
"version": "v11",
@@ -258,6 +346,28 @@ spec:
"path": "/aai/v14/network"
},
{
+ "serviceName": "_aai-network",
+ "version": "v15",
+ "url": "/aai/v15/network",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v15/network"
+ },
+ {
+ "serviceName": "_aai-network",
+ "version": "v16",
+ "url": "/aai/v16/network",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v16/network"
+ },
+ {
"serviceName": "_aai-externalSystem",
"version": "v11",
"url": "/aai/v11/external-system",
@@ -302,6 +412,28 @@ spec:
"path": "/aai/v14/external-system"
},
{
+ "serviceName": "_aai-externalSystem",
+ "version": "v15",
+ "url": "/aai/v15/external-system",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v15/external-system"
+ },
+ {
+ "serviceName": "_aai-externalSystem",
+ "version": "v16",
+ "url": "/aai/v16/external-system",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v16/external-system"
+ },
+ {
"serviceName": "aai-cloudInfrastructure",
"version": "v11",
"url": "/aai/v11/cloud-infrastructure",
@@ -342,6 +474,26 @@ spec:
"visualRange": "1"
},
{
+ "serviceName": "aai-cloudInfrastructure",
+ "version": "v15",
+ "url": "/aai/v15/cloud-infrastructure",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
+ "serviceName": "aai-cloudInfrastructure",
+ "version": "v16",
+ "url": "/aai/v16/cloud-infrastructure",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
"serviceName": "aai-business",
"version": "v11",
"url": "/aai/v11/business",
@@ -382,6 +534,26 @@ spec:
"visualRange": "1"
},
{
+ "serviceName": "aai-business",
+ "version": "v15",
+ "url": "/aai/v15/business",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
+ "serviceName": "aai-business",
+ "version": "v16",
+ "url": "/aai/v16/business",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
"serviceName": "aai-actions",
"version": "v11",
"url": "/aai/v11/actions",
@@ -422,6 +594,26 @@ spec:
"visualRange": "1"
},
{
+ "serviceName": "aai-actions",
+ "version": "v15",
+ "url": "/aai/v15/actions",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
+ "serviceName": "aai-actions",
+ "version": "v16",
+ "url": "/aai/v16/actions",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
"serviceName": "aai-service-design-and-creation",
"version": "v11",
"url": "/aai/v11/service-design-and-creation",
@@ -462,6 +654,26 @@ spec:
"visualRange": "1"
},
{
+ "serviceName": "aai-service-design-and-creation",
+ "version": "v15",
+ "url": "/aai/v15/service-design-and-creation",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
+ "serviceName": "aai-service-design-and-creation",
+ "version": "v16",
+ "url": "/aai/v16/service-design-and-creation",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
"serviceName": "aai-network",
"version": "v11",
"url": "/aai/v11/network",
@@ -502,6 +714,26 @@ spec:
"visualRange": "1"
},
{
+ "serviceName": "aai-network",
+ "version": "v15",
+ "url": "/aai/v15/network",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
+ "serviceName": "aai-network",
+ "version": "v16",
+ "url": "/aai/v16/network",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
"serviceName": "aai-externalSystem",
"version": "v11",
"url": "/aai/v11/external-system",
@@ -540,6 +772,26 @@ spec:
"enable_ssl": true,
"lb_policy":"ip_hash",
"visualRange": "1"
+ },
+ {
+ "serviceName": "aai-externalSystem",
+ "version": "v15",
+ "url": "/aai/v15/external-system",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
+ "serviceName": "aai-externalSystem",
+ "version": "v16",
+ "url": "/aai/v16/external-system",
+ "protocol": "REST",
+ "port": "8447",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
}
]'
spec:
diff --git a/kubernetes/aai/charts/aai-traversal/templates/deployment.yaml b/kubernetes/aai/charts/aai-traversal/templates/deployment.yaml
index 4f97f4a2c3..be4b863195 100644
--- a/kubernetes/aai/charts/aai-traversal/templates/deployment.yaml
+++ b/kubernetes/aai/charts/aai-traversal/templates/deployment.yaml
@@ -82,6 +82,28 @@ spec:
"path": "/aai/v14/search/generic-query"
},
{
+ "serviceName": "_aai-generic-query",
+ "version": "v15",
+ "url": "/aai/v15/search/generic-query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v15/search/generic-query"
+ },
+ {
+ "serviceName": "_aai-generic-query",
+ "version": "v16",
+ "url": "/aai/v16/search/generic-query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v16/search/generic-query"
+ },
+ {
"serviceName": "_aai-nodes-query",
"version": "v11",
"url": "/aai/v11/search/nodes-query",
@@ -126,6 +148,28 @@ spec:
"path": "/aai/v14/search/nodes-query"
},
{
+ "serviceName": "_aai-nodes-query",
+ "version": "v15",
+ "url": "/aai/v15/search/nodes-query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v15/search/nodes-query"
+ },
+ {
+ "serviceName": "_aai-nodes-query",
+ "version": "v16",
+ "url": "/aai/v16/search/nodes-query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v16/search/nodes-query"
+ },
+ {
"serviceName": "_aai-query",
"version": "v11",
"url": "/aai/v11/query",
@@ -170,6 +214,28 @@ spec:
"path": "/aai/v14/query"
},
{
+ "serviceName": "_aai-query",
+ "version": "v15",
+ "url": "/aai/v15/query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v15/query"
+ },
+ {
+ "serviceName": "_aai-query",
+ "version": "v16",
+ "url": "/aai/v16/query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1",
+ "path": "/aai/v16/query"
+ },
+ {
"serviceName": "_aai-named-query",
"url": "/aai/search",
"protocol": "REST",
@@ -220,6 +286,26 @@ spec:
"visualRange": "1"
},
{
+ "serviceName": "aai-generic-query",
+ "version": "v15",
+ "url": "/aai/v15/search/generic-query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
+ "serviceName": "aai-generic-query",
+ "version": "v16",
+ "url": "/aai/v16/search/generic-query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
"serviceName": "aai-nodes-query",
"version": "v11",
"url": "/aai/v11/search/nodes-query",
@@ -260,6 +346,26 @@ spec:
"visualRange": "1"
},
{
+ "serviceName": "aai-nodes-query",
+ "version": "v15",
+ "url": "/aai/v15/search/nodes-query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
+ "serviceName": "aai-nodes-query",
+ "version": "v16",
+ "url": "/aai/v16/search/nodes-query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
"serviceName": "aai-query",
"version": "v11",
"url": "/aai/v11/query",
@@ -300,6 +406,26 @@ spec:
"visualRange": "1"
},
{
+ "serviceName": "aai-query",
+ "version": "v15",
+ "url": "/aai/v15/query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
+ "serviceName": "aai-query",
+ "version": "v16",
+ "url": "/aai/v16/query",
+ "protocol": "REST",
+ "port": "8446",
+ "enable_ssl": true,
+ "lb_policy":"ip_hash",
+ "visualRange": "1"
+ },
+ {
"serviceName": "aai-named-query",
"url": "/aai/search",
"protocol": "REST",
diff --git a/kubernetes/clamp/charts/clamp-dash-es/values.yaml b/kubernetes/clamp/charts/clamp-dash-es/values.yaml
index 43920b3d65..f25e40bf2a 100644
--- a/kubernetes/clamp/charts/clamp-dash-es/values.yaml
+++ b/kubernetes/clamp/charts/clamp-dash-es/values.yaml
@@ -32,7 +32,7 @@ busyboxImage: library/busybox:latest
# application image
loggingRepository: docker.elastic.co
-image: elasticsearch/elasticsearch-oss:6.1.3
+image: elasticsearch/elasticsearch-oss:6.6.2
pullPolicy: Always
# flag to enable debugging - application support required
diff --git a/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf b/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf
index 317b428c77..b4b5071ba5 100644
--- a/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf
+++ b/kubernetes/clamp/charts/clamp-dash-logstash/resources/config/pipeline.conf
@@ -57,13 +57,13 @@ filter {
drop { }
}
- if [http_request_failure] or [@metadata][code] != "200" {
+ if [http_request_failure] or [@metadata][code] != 200 {
mutate {
add_tag => [ "error" ]
}
}
- if "dmaap_source" in [tags] {
+ if "dmaap_source" in [@metadata][request][tags] {
#
# Dmaap provides a json list, whose items are Strings containing the event
# provided to Dmaap, which itself is an escaped json.
@@ -76,14 +76,7 @@ filter {
source => "[message]"
target => "message"
}
- ruby {
- code => "
- for ev in event.get('message', [])
- ev.set('@metadata', event.get('@metadata'))
- end
- "
- }
-
+
split {
field => "message"
}
@@ -194,7 +187,7 @@ filter {
}
- if "error" not in [tags] {
+ if "error" not in [@metadata][request][tags]{
#
# Creating data for a secondary index
#
@@ -203,7 +196,7 @@ filter {
add_tag => [ "event-cl-aggs" ]
}
- if "event-cl-aggs" in [tags] {
+ if "event-cl-aggs" in [@metadata][request][tags]{
#
# we only need a few fields for aggregations; remove all fields from clone except :
# vmName,vnfName,vnfType,requestID,closedLoopAlarmStart, closedLoopControlName,closedLoopAlarmEnd,abated,nbrDmaapevents,finalFailure
diff --git a/kubernetes/contrib/tools/rke/rke_setup.sh b/kubernetes/contrib/tools/rke/rke_setup.sh
new file mode 100755
index 0000000000..cc71522f12
--- /dev/null
+++ b/kubernetes/contrib/tools/rke/rke_setup.sh
@@ -0,0 +1,355 @@
+#!/bin/bash
+#############################################################################
+# Copyright © 2019 Bell.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#############################################################################
+#
+# This installation is for an RKE install of kubernetes
+# after this run the standard oom install
+# this installation can be run on amy ubuntu 16.04 VM, RHEL 7.6 (root only), physical or cloud azure/aws host
+# https://wiki.onap.org/display/DW/OOM+RKE+Kubernetes+Deployment
+# source from https://jira.onap.org/browse/OOM-1598
+#
+# master/dublin
+# RKE 0.1.16 Kubernetes 1.11.6, kubectl 1.11.6, Helm 2.9.1, Docker 18.06
+# single node install, HA pending
+
+usage() {
+cat <<EOF
+Usage: $0 [PARAMs]
+example
+sudo ./rke_setup.sh -b dublin -s rke.onap.cloud -e onap -l amdocs -v true
+-u : Display usage
+-b [branch] : branch = master or dublin (required)
+-s [server] : server = IP or DNS name (required)
+-e [environment] : use the default (onap)
+-k [key] : ssh key name
+-l [username] : login username account (use ubuntu for example)
+EOF
+}
+
+install_onap() {
+ #constants
+ PORT=8880
+ if [ "$BRANCH" == "casablanca" ]; then
+ KUBERNETES_VERSION=
+ RKE_VERSION=0.1.15
+ KUBECTL_VERSION=1.11.3
+ HELM_VERSION=2.9.1
+ DOCKER_VERSION=17.03
+ else
+ KUBERNETES_VERSION=
+ RKE_VERSION=0.1.16
+ KUBECTL_VERSION=1.11.6
+ HELM_VERSION=2.9.1
+ DOCKER_VERSION=18.06
+ fi
+
+ # copy your private ssh key and cluster.yml file to the vm
+ # on your dev machine
+ #sudo cp ~/.ssh/onap_rsa .
+ #sudo chmod 777 onap_rsa
+ #scp onap_rsa ubuntu@192.168.241.132:~/
+ # on this vm
+ #sudo chmod 400 onap_rsa
+ #sudo cp onap_rsa ~/.ssh
+ # make sure public key is insetup correctly in
+ # sudo vi ~/.ssh/authorized_keys
+
+ echo "please supply your ssh key as provided by the -k keyname - it must be be chmod 400 and chown user:user in ~/.ssh/"
+ echo "The RKE version specific cluster.yaml is already integrated in this script for 0.1.15/0.1.16 no need for below generation..."
+ echo "rke config --name cluster.yml"
+ echo "specifically"
+ echo "address: $SERVER"
+ echo "user: $USERNAME"
+ echo "ssh_key_path: $SSHPATH_PREFIX/$SSHKEY"
+
+ RKETOOLS=
+ HYPERCUBE=
+ POD_INFRA_CONTAINER=
+ if [ "$RKE_VERSION" == "0.1.16" ]; then
+ RKETOOLS=0.1.15
+ HYPERCUBE=1.11.6-rancher1
+ POD_INFRA_CONTAINER=rancher/pause-amd64:3.1
+ else
+ # 0.1.15
+ RKETOOLS=0.1.14
+ HYPERCUBE=1.11.3-rancher1
+ POD_INFRA_CONTAINER=gcr.io.google_containers/pause-amd64:3.1
+ fi
+
+ cat > cluster.yml <<EOF
+# generated from rke_setup.sh
+nodes:
+- address: $SERVER
+ port: "22"
+ internal_address: ""
+ role:
+ - controlplane
+ - worker
+ - etcd
+ hostname_override: ""
+ user: $USERNAME
+ docker_socket: /var/run/docker.sock
+ ssh_key: ""
+ ssh_key_path: $SSHPATH_PREFIX/$SSHKEY
+ labels: {}
+services:
+ etcd:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+ external_urls: []
+ ca_cert: ""
+ cert: ""
+ key: ""
+ path: ""
+ snapshot: null
+ retention: ""
+ creation: ""
+ kube-api:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+ service_cluster_ip_range: 10.43.0.0/16
+ service_node_port_range: ""
+ pod_security_policy: false
+ kube-controller:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+ cluster_cidr: 10.42.0.0/16
+ service_cluster_ip_range: 10.43.0.0/16
+ scheduler:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+ kubelet:
+ image: ""
+ extra_args:
+ max-pods: 900
+ extra_binds: []
+ extra_env: []
+ cluster_domain: cluster.local
+ infra_container_image: ""
+ cluster_dns_server: 10.43.0.10
+ fail_swap_on: false
+ kubeproxy:
+ image: ""
+ extra_args: {}
+ extra_binds: []
+ extra_env: []
+network:
+ plugin: canal
+ options: {}
+authentication:
+ strategy: x509
+ options: {}
+ sans: []
+system_images:
+ etcd: rancher/coreos-etcd:v3.2.18
+ alpine: rancher/rke-tools:v$RKETOOLS
+ nginx_proxy: rancher/rke-tools:v$RKETOOLS
+ cert_downloader: rancher/rke-tools:v$RKETOOLS
+ kubernetes_services_sidecar: rancher/rke-tools:v$RKETOOLS
+ kubedns: rancher/k8s-dns-kube-dns-amd64:1.14.10
+ dnsmasq: rancher/k8s-dns-dnsmasq-nanny-amd64:1.14.10
+ kubedns_sidecar: rancher/k8s-dns-sidecar-amd64:1.14.10
+ kubedns_autoscaler: rancher/cluster-proportional-autoscaler-amd64:1.0.0
+ kubernetes: rancher/hyperkube:v$HYPERCUBE
+ flannel: rancher/coreos-flannel:v0.10.0
+ flannel_cni: rancher/coreos-flannel-cni:v0.3.0
+ calico_node: rancher/calico-node:v3.1.3
+ calico_cni: rancher/calico-cni:v3.1.3
+ calico_controllers: ""
+ calico_ctl: rancher/calico-ctl:v2.0.0
+ canal_node: rancher/calico-node:v3.1.3
+ canal_cni: rancher/calico-cni:v3.1.3
+ canal_flannel: rancher/coreos-flannel:v0.10.0
+ wave_node: weaveworks/weave-kube:2.1.2
+ weave_cni: weaveworks/weave-npc:2.1.2
+ pod_infra_container: $POD_INFRA_CONTAINER
+ ingress: rancher/nginx-ingress-controller:0.16.2-rancher1
+ ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.4
+ metrics_server: rancher/metrics-server-amd64:v0.2.1
+ssh_key_path: $SSHPATH
+ssh_agent_auth: false
+authorization:
+ mode: rbac
+ options: {}
+ignore_docker_version: false
+kubernetes_version: "$KUBERNETES_VERSION"
+private_registries: []
+ingress:
+ provider: ""
+ options: {}
+ node_selector: {}
+ extra_args: {}
+cluster_name: ""
+cloud_provider:
+ name: ""
+prefix_path: ""
+addon_job_timeout: 0
+bastion_host:
+ address: ""
+ port: ""
+ user: ""
+ ssh_key: ""
+ ssh_key_path: ""
+monitoring:
+ provider: ""
+ options: {}
+EOF
+
+
+
+ echo "Installing on ${SERVER} for ${BRANCH}: RKE: ${RKE_VERSION} Kubectl: ${KUBECTL_VERSION} Helm: ${HELM_VERSION} Docker: ${DOCKER_VERSION} username: ${USERNAME}"
+ sudo echo "127.0.0.1 ${SERVER}" >> /etc/hosts
+ echo "Install docker - If you must install as non-root - comment out the docker install below - run it separately, run the user mod, logout/login and continue this script"
+ curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
+ sudo usermod -aG docker $USERNAME
+
+ echo "Install RKE"
+ sudo wget https://github.com/rancher/rke/releases/download/v$RKE_VERSION/rke_linux-amd64
+ mv rke_linux-amd64 rke
+ sudo chmod +x rke
+ sudo mv ./rke /usr/local/bin/rke
+
+ echo "Install make - required for beijing+ - installed via yum groupinstall Development Tools in RHEL"
+ # ubuntu specific
+ sudo apt-get install make -y
+
+ sudo curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
+ sudo chmod +x ./kubectl
+ sudo mv ./kubectl /usr/local/bin/kubectl
+ sudo mkdir ~/.kube
+ wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
+ sudo tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
+ sudo mv linux-amd64/helm /usr/local/bin/helm
+
+ echo "Bringing RKE up - using supplied cluster.yml"
+ sudo rke up
+ echo "wait 2 extra min for the cluster"
+ sleep 60
+ echo "1 more min"
+ sleep 60
+ echo "copy kube_config_cluter.yaml generated - to ~/.kube/config"
+ sudo cp kube_config_cluster.yml ~/.kube/config
+ # avoid using sudo for kubectl
+ sudo chmod 777 ~/.kube/config
+ echo "Verify all pods up on the kubernetes system - will return localhost:8080 until a host is added"
+ echo "kubectl get pods --all-namespaces"
+ kubectl get pods --all-namespaces
+ echo "install tiller/helm"
+ kubectl -n kube-system create serviceaccount tiller
+ kubectl create clusterrolebinding tiller --clusterrole=cluster-admin --serviceaccount=kube-system:tiller
+ helm init --service-account tiller
+ kubectl -n kube-system rollout status deploy/tiller-deploy
+ echo "upgrade server side of helm in kubernetes"
+ if [ "$USERNAME" == "root" ]; then
+ helm version
+ else
+ sudo helm version
+ fi
+ echo "sleep 30"
+ sleep 30
+ if [ "$USERNAME" == "root" ]; then
+ helm init --upgrade
+ else
+ sudo helm init --upgrade
+ fi
+ echo "sleep 30"
+ sleep 30
+ echo "verify both versions are the same below"
+ if [ "$USERNAME" == "root" ]; then
+ helm version
+ else
+ sudo helm version
+ fi
+ echo "start helm server"
+ if [ "$USERNAME" == "root" ]; then
+ helm serve &
+ else
+ sudo helm serve &
+ fi
+ echo "sleep 30"
+ sleep 30
+ echo "add local helm repo"
+ if [ "$USERNAME" == "root" ]; then
+ helm repo add local http://127.0.0.1:8879
+ helm repo list
+ else
+ sudo helm repo add local http://127.0.0.1:8879
+ sudo helm repo list
+ fi
+ echo "To enable grafana dashboard - do this after running cd.sh which brings up onap - or you may get a 302xx port conflict"
+ echo "kubectl expose -n kube-system deployment monitoring-grafana --type=LoadBalancer --name monitoring-grafana-client"
+ echo "to get the nodeport for a specific VM running grafana"
+ echo "kubectl get services --all-namespaces | grep graf"
+ sudo docker version
+ helm version
+ kubectl version
+ kubectl get services --all-namespaces
+ kubectl get pods --all-namespaces
+ echo "finished!"
+}
+
+BRANCH=
+SERVER=
+ENVIRON=
+VALIDATE=false
+USERNAME=ubuntu
+SSHPATH_PREFIX=~/.ssh
+
+while getopts ":b:s:e:u:l:k:v" PARAM; do
+ case $PARAM in
+ u)
+ usage
+ exit 1
+ ;;
+ b)
+ BRANCH=${OPTARG}
+ ;;
+ e)
+ ENVIRON=${OPTARG}
+ ;;
+ s)
+ SERVER=${OPTARG}
+ ;;
+ l)
+ USERNAME=${OPTARG}
+ ;;
+ k)
+ SSHKEY=${OPTARG}
+ ;;
+ v)
+ VALIDATE=${OPTARG}
+ ;;
+ ?)
+ usage
+ exit
+ ;;
+ esac
+done
+
+if [[ -z $BRANCH ]]; then
+ usage
+ exit 1
+fi
+
+install_onap $BRANCH $SERVER $ENVIRON $USERNAME $SSHPATH_PREFIX $SSHKEY $VALIDATE
diff --git a/kubernetes/dcaegen2/charts/dcae-bootstrap/resources/config/k8s-plugin.json b/kubernetes/dcaegen2/charts/dcae-bootstrap/resources/config/k8s-plugin.json
index 3f2168b6af..51d31a355b 100644
--- a/kubernetes/dcaegen2/charts/dcae-bootstrap/resources/config/k8s-plugin.json
+++ b/kubernetes/dcaegen2/charts/dcae-bootstrap/resources/config/k8s-plugin.json
@@ -19,6 +19,7 @@
{
"namespace" : "{{ if .Values.dcae_ns }}{{ .Values.dcae_ns}}{{ else }}{{include "common.namespace" . }}{{ end}}",
"consul_dns_name" : "{{ .Values.config.address.consul.host }}.{{ include "common.namespace" . }}",
+ "default_k8s_location" : "{{ .Values.default_k8s_location }}",
"image_pull_secrets" : ["{{ include "common.namespace" . }}-docker-registry-key"],
"filebeat":
{
diff --git a/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml b/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml
index 347e4a9620..8d525cb051 100644
--- a/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-bootstrap/values.yaml
@@ -81,7 +81,8 @@ postgres:
# application image
repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.9
+image: onap/org.onap.dcaegen2.deployments.k8s-bootstrap-container:1.4.10-STAGING-latest
+default_k8s_location: central
# DCAE component images to be deployed via Cloudify Manager
# Use to override default setting in blueprints
diff --git a/kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml b/kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml
index 5b541fb19d..3995bc6cba 100644
--- a/kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-cloudify-manager/values.yaml
@@ -44,7 +44,7 @@ config:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.cm-container:1.5.1
+image: onap/org.onap.dcaegen2.deployments.cm-container:1.5.2-STAGING-latest
pullPolicy: Always
# probe configuration parameters
diff --git a/kubernetes/dcaegen2/charts/dcae-config-binding-service/templates/service.yaml b/kubernetes/dcaegen2/charts/dcae-config-binding-service/templates/service.yaml
index 088d381d50..794b896eef 100644
--- a/kubernetes/dcaegen2/charts/dcae-config-binding-service/templates/service.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-config-binding-service/templates/service.yaml
@@ -30,7 +30,7 @@ spec:
ports:
{{if eq .Values.service.type "NodePort" -}}
- port: {{ .Values.service.externalPort }}
- nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
+ nodePort: {{ .Values.global.nodePortPrefixExt| default .Values.nodePortPrefixExt }}{{ .Values.service.nodePort }}
name: {{ .Values.service.name }}
{{- else -}}
- port: {{ .Values.service.externalPort }}
diff --git a/kubernetes/dcaegen2/charts/dcae-config-binding-service/values.yaml b/kubernetes/dcaegen2/charts/dcae-config-binding-service/values.yaml
index f6da1ef06c..31d6a8b2fb 100644
--- a/kubernetes/dcaegen2/charts/dcae-config-binding-service/values.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-config-binding-service/values.yaml
@@ -20,6 +20,7 @@
#################################################################
global:
nodePortPrefix: 302
+ nodePortPrefixExt: 304
readinessRepository: oomk8s
readinessImage: readiness-check:2.0.0
loggingRepository: docker.elastic.co
@@ -60,10 +61,11 @@ readiness:
path: /healthcheck
service:
- type: ClusterIP
+ type: NodePort
name: config-binding-service
externalPort: 10000
internalPort: 10000
+ nodePort: 15
# Resource Limit flavor -By Default using small
flavor: small
diff --git a/kubernetes/dcaegen2/charts/dcae-deployment-handler/values.yaml b/kubernetes/dcaegen2/charts/dcae-deployment-handler/values.yaml
index fbd3d9973e..6a00d36f2c 100644
--- a/kubernetes/dcaegen2/charts/dcae-deployment-handler/values.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-deployment-handler/values.yaml
@@ -27,7 +27,7 @@ global:
tlsRepository: nexus3.onap.org:10001
tlsImage: onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.1-STAGING-latest
consulLoaderRepository: nexus3.onap.org:10001
- consulLoaderImage: onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0
+ consulLoaderImage: onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0-STAGING-latest
repositoryCred:
user: docker
password: docker
diff --git a/kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml b/kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml
index 913cb714d6..97cd7d2482 100644
--- a/kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-healthcheck/values.yaml
@@ -45,7 +45,7 @@ readiness:
periodSeconds: 10
# application image
repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.deployments.healthcheck-container:1.2.4
+image: onap/org.onap.dcaegen2.deployments.healthcheck-container:1.2.4-STAGING-latest
# Resource Limit flavor -By Default using small
flavor: small
diff --git a/kubernetes/dcaegen2/charts/dcae-policy-handler/values.yaml b/kubernetes/dcaegen2/charts/dcae-policy-handler/values.yaml
index 604f4ea6fa..c14614278c 100644
--- a/kubernetes/dcaegen2/charts/dcae-policy-handler/values.yaml
+++ b/kubernetes/dcaegen2/charts/dcae-policy-handler/values.yaml
@@ -27,7 +27,7 @@ global:
tlsRepository: nexus3.onap.org:10001
tlsImage: onap/org.onap.dcaegen2.deployments.tls-init-container:1.0.1-STAGING-latest
consulLoaderRepository: nexus3.onap.org:10001
- consulLoaderImage: onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0
+ consulLoaderImage: onap/org.onap.dcaegen2.deployments.consul-loader-container:1.0.0-STAGING-latest
repositoryCred:
user: docker
password: docker
@@ -46,7 +46,7 @@ config:
#################################################################
# application image
repository: nexus3.onap.org:10001
-image: onap/org.onap.dcaegen2.platform.policy-handler:4.6.0
+image: onap/org.onap.dcaegen2.platform.policy-handler:4.6.0-STAGING-latest
pullPolicy: Always
# probe configuration parameters
diff --git a/kubernetes/nbi/values.yaml b/kubernetes/nbi/values.yaml
index ecc01fafa4..9f50620ab6 100644
--- a/kubernetes/nbi/values.yaml
+++ b/kubernetes/nbi/values.yaml
@@ -85,7 +85,7 @@ affinity: {}
# probe configuration parameters
liveness:
httpGet:
- path: /nbi/api/v3/status
+ path: /nbi/api/v4/status
port: 8080
initialDelaySeconds: 180
periodSeconds: 30
@@ -95,7 +95,7 @@ liveness:
readiness:
httpGet:
- path: /nbi/api/v3/status
+ path: /nbi/api/v4/status
port: 8080
initialDelaySeconds: 185
periodSeconds: 30
diff --git a/kubernetes/pnda/charts/dcae-pnda-bootstrap/values.yaml b/kubernetes/pnda/charts/dcae-pnda-bootstrap/values.yaml
index e22b8d26e7..d01c1548c2 100644
--- a/kubernetes/pnda/charts/dcae-pnda-bootstrap/values.yaml
+++ b/kubernetes/pnda/charts/dcae-pnda-bootstrap/values.yaml
@@ -38,7 +38,6 @@ pnda:
osUser: centos
nameserver: 8.8.8.8
ntp: pool.ntp.org
- mirrorNodePort: 88
apps:
fsType: local
networkCidr: 10.0.0.0/16
diff --git a/kubernetes/sdc/resources/config/environments/AUTO.json b/kubernetes/sdc/resources/config/environments/AUTO.json
index bb5df2bf90..be9db4cd52 100755
--- a/kubernetes/sdc/resources/config/environments/AUTO.json
+++ b/kubernetes/sdc/resources/config/environments/AUTO.json
@@ -80,6 +80,7 @@
},
"cassandra": {
+ "cassandra_port": 9042,
"concurrent_reads": "32",
"num_tokens": "256",
"data_dir": "/var/lib/cassandra/data",
diff --git a/kubernetes/sdnc/resources/config/bin/startODL.sh b/kubernetes/sdnc/resources/config/bin/startODL.sh
index 6718aaf128..2513fc9dd4 100755
--- a/kubernetes/sdnc/resources/config/bin/startODL.sh
+++ b/kubernetes/sdnc/resources/config/bin/startODL.sh
@@ -20,17 +20,52 @@
# ============LICENSE_END=========================================================
###
+# Append features to karaf boot feature configuration
+# $1 additional feature to be added
+# $2 repositories to be added (optional)
+function addToFeatureBoot() {
+ CFG=$ODL_HOME/etc/org.apache.karaf.features.cfg
+ ORIG=$CFG.orig
+ if [ -n "$2" ] ; then
+ echo "Add repository: $2"
+ mv $CFG $ORIG
+ cat $ORIG | sed -e "\|featuresRepositories|s|$|,$2|" > $CFG
+ fi
+ echo "Add boot feature: $1"
+ mv $CFG $ORIG
+ cat $ORIG | sed -e "\|featuresBoot *=|s|$|,$1|" > $CFG
+}
+
+# Append features to karaf boot feature configuration
+# $1 search pattern
+# $2 replacement
+function replaceFeatureBoot() {
+ CFG=$ODL_HOME/etc/org.apache.karaf.features.cfg
+ ORIG=$CFG.orig
+ echo "Replace boot feature $1 with: $2"
+ sed -i "/featuresBoot/ s/$1/$2/g" $CFG
+}
+
+function install_sdnrwt_features() {
+ addToFeatureBoot "$SDNRWT_BOOTFEATURES" $SDNRWT_REPOSITORY
+}
+
function enable_odl_cluster(){
if [ -z $SDNC_REPLICAS ]; then
echo "SDNC_REPLICAS is not configured in Env field"
exit
fi
+ #Be sure to remove feature odl-netconf-connector-all from list
+ replaceFeatureBoot "odl-netconf-connector-all,"
+
echo "Installing Opendaylight cluster features"
- mv $ODL_HOME/etc/org.apache.karaf.features.cfg $ODL_HOME/etc/org.apache.karaf.features.cfg.orig
- cat $ODL_HOME/etc/org.apache.karaf.features.cfg.orig | sed -e "\|featuresBoot=config|s|$|,odl-mdsal-clustering,odl-jolokia|" > $ODL_HOME/etc/org.apache.karaf.features.cfg
+ replaceFeatureBoot odl-netconf-topology odl-netconf-clustered-topology
+ replaceFeatureBoot odl-mdsal-all odl-mdsal-all,odl-mdsal-clustering
+ addToFeatureBoot odl-jolokia
#${ODL_HOME}/bin/client feature:install odl-mdsal-clustering
#${ODL_HOME}/bin/client feature:install odl-jolokia
+
echo "Update cluster information statically"
hm=$(hostname)
@@ -88,6 +123,13 @@ MYSQL_HOST=${MYSQL_HOST:-{{.Release.Name}}-{{.Values.mysql.nameOverride}}-0.{{.V
ENABLE_ODL_CLUSTER=${ENABLE_ODL_CLUSTER:-false}
GEO_ENABLED=${GEO_ENABLED:-false}
DBINIT_DIR=${DBINIT_DIR:-/opt/opendaylight/current/daexim}
+SDNRWT=${SDNRWT:-false}
+SDNRWT_BOOTFEATURES=${SDNRWT_BOOTFEATURES:-sdnr-wt-feature-aggregator}
+
+echo "Settings:"
+echo " ENABLE_ODL_CLUSTER=$ENABLE_ODL_CLUSTER"
+echo " SDNC_REPLICAS=$SDNC_REPLICAS"
+echo " SDNRWT=$SDNRWT"
#
# Wait for database to init properly
@@ -129,6 +171,8 @@ then
if $ENABLE_ODL_CLUSTER ; then enable_odl_cluster ; fi
+ if $SDNRWT ; then install_sdnrwt_features ; fi
+
echo "Installed at `date`" > ${SDNC_HOME}/.installed
fi