summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore5
-rw-r--r--.idea/vcs.xml6
-rw-r--r--TOSCA/Helm/helmdelete.sh23
-rw-r--r--TOSCA/Helm/helminstall.sh30
-rw-r--r--TOSCA/Helm/onap-blueprint.yaml291
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/LICENSE17
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/README.md73
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/imports/cloud-config.yaml76
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/imports/kubernetes.yaml216
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/openstack-blueprint.yaml307
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/policies/scale.clj66
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/create.py93
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py175
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py153
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py88
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh29
-rw-r--r--TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py43
-rw-r--r--kubernetes/README_HELM2
-rw-r--r--kubernetes/aai/resources/config/log/model-loader/logback.xml (renamed from kubernetes/aai/resources/model-loader/conf/logback.xml)0
-rw-r--r--kubernetes/aai/resources/config/log/resources/logback.xml (renamed from kubernetes/aai/resources/resources/conf/logback.xml)0
-rw-r--r--kubernetes/aai/resources/config/log/search-data-service/logback.xml (renamed from kubernetes/aai/resources/search-data-service/conf/logback.xml)0
-rw-r--r--kubernetes/aai/resources/config/log/sparky-be/logback.xml (renamed from kubernetes/aai/resources/sparky-be/conf/logback.xml)0
-rw-r--r--kubernetes/aai/resources/config/log/traversal/logback.xml (renamed from kubernetes/aai/resources/traversal/conf/logback.xml)0
-rw-r--r--kubernetes/aai/templates/aai-deployment.yaml1
-rw-r--r--kubernetes/aai/templates/aai-resources-deployment.yaml11
-rw-r--r--kubernetes/aai/templates/aai-resources-traversal-configmap.yaml16
-rw-r--r--kubernetes/aai/templates/aai-traversal-deployment.yaml11
-rw-r--r--kubernetes/aai/templates/data-router-deployment.yaml1
-rw-r--r--kubernetes/aai/templates/elasticsearch-deployment.yaml1
-rw-r--r--kubernetes/aai/templates/hbase-deployment.yaml1
-rw-r--r--kubernetes/aai/templates/modelloader-deployment-configmap.yaml8
-rw-r--r--kubernetes/aai/templates/modelloader-deployment.yaml11
-rw-r--r--kubernetes/aai/templates/search-data-service-configmap.yaml8
-rw-r--r--kubernetes/aai/templates/search-data-service-deployment.yaml11
-rw-r--r--kubernetes/aai/templates/sparky-be-deployment-configmap.yaml8
-rw-r--r--kubernetes/aai/templates/sparky-be-deployment.yaml11
-rw-r--r--kubernetes/aai/values.yaml10
-rw-r--r--kubernetes/appc/templates/appc-deployment.yaml1
-rw-r--r--kubernetes/appc/templates/db-deployment.yaml3
-rw-r--r--kubernetes/appc/templates/dgbuilder-deployment.yaml1
-rw-r--r--kubernetes/appc/values.yaml5
-rw-r--r--kubernetes/message-router/templates/message-router-dmaap.yaml1
-rw-r--r--kubernetes/message-router/templates/message-router-kafka.yaml1
-rw-r--r--kubernetes/message-router/templates/message-router-zookeeper.yaml3
-rw-r--r--kubernetes/message-router/values.yaml3
-rw-r--r--kubernetes/msb/templates/msb-discovery-deployment.yaml17
-rw-r--r--kubernetes/msb/templates/msb-eag-deployment.yaml17
-rw-r--r--kubernetes/msb/templates/msb-iag-deployment.yaml17
-rw-r--r--kubernetes/msb/values.yaml1
-rw-r--r--kubernetes/mso/templates/db-deployment.yaml1
-rw-r--r--kubernetes/mso/templates/mso-deployment.yaml2
-rw-r--r--kubernetes/mso/values.yaml2
-rw-r--r--kubernetes/policy/templates/dep-brmsgw.yaml2
-rw-r--r--kubernetes/policy/templates/dep-drools.yaml2
-rw-r--r--kubernetes/policy/templates/dep-maria.yaml3
-rw-r--r--kubernetes/policy/templates/dep-nexus.yaml1
-rw-r--r--kubernetes/policy/templates/dep-pap.yaml1
-rw-r--r--kubernetes/policy/templates/dep-pdp.yaml2
-rw-r--r--kubernetes/policy/values.yaml9
-rwxr-xr-xkubernetes/portal/templates/portal-apps-deployment.yaml1
-rwxr-xr-xkubernetes/portal/templates/portal-mariadb-deployment.yaml1
-rw-r--r--kubernetes/portal/templates/portal-vnc-dep.yaml1
-rw-r--r--kubernetes/portal/templates/portal-widgets-deployment.yaml1
-rw-r--r--kubernetes/portal/values.yaml4
-rw-r--r--kubernetes/readiness/docker/init/ready.py84
-rw-r--r--kubernetes/sdnc/templates/dgbuilder-deployment.yaml1
-rw-r--r--kubernetes/sdnc/templates/dmaap-deployment.yaml1
-rw-r--r--kubernetes/sdnc/templates/nfs-provisoner-deployment.yaml2
-rw-r--r--kubernetes/sdnc/templates/ueb-deployment.yaml1
-rw-r--r--kubernetes/sdnc/templates/web-deployment.yaml1
-rw-r--r--kubernetes/sdnc/values.yaml5
-rwxr-xr-xkubernetes/vid/resources/config/lf_config/vid-my.cnf (renamed from kubernetes/config/docker/init/src/config/vid/vid/lf_config/vid-my.cnf)0
-rwxr-xr-xkubernetes/vid/resources/config/lf_config/vid-pre-init.sql (renamed from kubernetes/config/docker/init/src/config/vid/vid/lf_config/vid-pre-init.sql)0
-rw-r--r--kubernetes/vid/resources/config/log/filebeat/filebeat.yml41
-rw-r--r--kubernetes/vid/resources/config/log/vid/logback.xml (renamed from kubernetes/config/docker/init/src/config/log/vid/logback.xml)0
-rw-r--r--kubernetes/vid/templates/vid-lfconfig-configmap.yaml9
-rw-r--r--kubernetes/vid/templates/vid-log-configmap.yaml17
-rw-r--r--kubernetes/vid/templates/vid-mariadb-deployment.yaml24
-rw-r--r--kubernetes/vid/templates/vid-pv-pvc.yaml4
-rw-r--r--kubernetes/vid/templates/vid-server-deployment.yaml13
-rw-r--r--kubernetes/vid/values.yaml5
81 files changed, 1996 insertions, 117 deletions
diff --git a/.gitignore b/.gitignore
index b671448e83..028a8ee5c5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,2 @@
kubernetes/config/onap-parameters.yaml
-.idea/vcs.xml
-.idea/modules.xml
-.idea/oom.iml
-.idea/workspace.xml
+.idea/*
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
deleted file mode 100644
index 35eb1ddfbb..0000000000
--- a/.idea/vcs.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project version="4">
- <component name="VcsDirectoryMappings">
- <mapping directory="" vcs="Git" />
- </component>
-</project> \ No newline at end of file
diff --git a/TOSCA/Helm/helmdelete.sh b/TOSCA/Helm/helmdelete.sh
new file mode 100644
index 0000000000..d3f342afc1
--- /dev/null
+++ b/TOSCA/Helm/helmdelete.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+helm delete $1-$2 --purge
+kubectl delete namespace $1-$2
+kubectl delete clusterrolebinding $1-$2-admin-binding
+
+
diff --git a/TOSCA/Helm/helminstall.sh b/TOSCA/Helm/helminstall.sh
new file mode 100644
index 0000000000..96f38dae76
--- /dev/null
+++ b/TOSCA/Helm/helminstall.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+printf "%s" "$*"
+printf `pwd`
+printf "%s" "---------------"
+kubectl create namespace $1-$2
+kubectl create clusterrolebinding $1-$2-admin-binding --clusterrole=cluster-admin --serviceaccount=$1-$2:default
+# assign default auth token
+if [[ -z $ONAP_DEFAULT_AUTH_TOKEN ]]; then
+ DEFAULT_SECRET=`kubectl get secrets -n $1-$2 | grep default-token | awk '{ print $1}'`
+ ONAP_DEFAULT_AUTH_TOKEN=`kubectl get secrets $DEFAULT_SECRET -n $1-$2 -o yaml | grep 'token:' | awk '{ print $2}' | base64 -d`
+fi
+kubectl --namespace $1-$2 create secret docker-registry $1-docker-registry-key --docker-server=nexus3.onap.org:10001 --docker-username=docker --docker-password=docker --docker-email=@
+helm install ../$2/ --name $1-$2 --namespace $1 --set nsPrefix=$1,nodePortPrefix=302,kubeMasterAuthToken=$ONAP_DEFAULT_AUTH_TOKEN \ No newline at end of file
diff --git a/TOSCA/Helm/onap-blueprint.yaml b/TOSCA/Helm/onap-blueprint.yaml
new file mode 100644
index 0000000000..092f7432c5
--- /dev/null
+++ b/TOSCA/Helm/onap-blueprint.yaml
@@ -0,0 +1,291 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+tosca_definitions_version: cloudify_dsl_1_3
+
+imports:
+ - http://www.getcloudify.org/spec/cloudify/3.4.2/types.yaml
+ - http://www.getcloudify.org/spec/fabric-plugin/1.5/plugin.yaml
+
+inputs:
+ # For agent, toggle to true.
+ install_method:
+ default: none # For fabric use false.
+ ip:
+ user:
+ default: centos
+ namespace_perfix:
+ default: onap
+
+node_types:
+ cloudify.nodes.Helm:
+ derived_from: cloudify.nodes.SoftwareComponent
+ properties:
+ cwd:
+ default: /home/centos/oom/kubernetes/oneclick
+ args:
+ default:
+ - onap
+ - mso
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ # This is for fabric: # Run Anywhere.
+ start:
+ implementation: fabric.fabric_plugin.tasks.run_script
+ inputs:
+ script_path:
+ default: helminstall.sh # Relative to the blueprint file.
+ process:
+ default: &process_vars
+ cwd: { get_property: [ SELF, cwd ] }
+ args: { get_property: [ SELF, args ] }
+ fabric_env:
+ default: &fabric_env
+ user: { get_property: [ k8s_master, agent_config, user ] }
+ host_string: { get_property: [ k8s_master, ip ] }
+ key: { get_property: [ k8s_master, agent_config, key ] }
+ stop:
+ implementation: fabric.fabric_plugin.tasks.run_script
+ inputs:
+ script_path:
+ default: helmdelete.sh
+ process:
+ default: *process_vars
+ fabric_env:
+ default: *fabric_env
+
+node_templates:
+
+ k8s_master:
+ type: cloudify.nodes.Compute
+ properties:
+ ip: { get_input: ip }
+ agent_config:
+ install_method: { get_input: install_method } # False for Fabric.
+ key: { get_secret: agent_key_private }
+ user: { get_input: user }
+
+ consul:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - consul
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ msb:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - msb
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+
+ mso:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - mso
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ appc:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - appc
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ message-router:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - message-router
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ sdnc:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - sdnc
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ vid:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - vid
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ robot:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - robot
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ policy:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - policy
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ portal:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - portal
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ aai:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - aai
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ sdc:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - sdc
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ log:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - log
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ cli:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - cli
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ multicloud:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - multicloud
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ clamp:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - clamp
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ vnfsdk:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - vnfsdk
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ uui:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - uui
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ aaf:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - aaf
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ vfc:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - vfc
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
+
+ kube2msb:
+ type: cloudify.nodes.Helm
+ properties:
+ args:
+ - { get_input: namespace_perfix }
+ - kube2msb
+ relationships:
+ - type: cloudify.relationships.connected_to
+ target: k8s_master
diff --git a/TOSCA/kubernetes-cluster-TOSCA/LICENSE b/TOSCA/kubernetes-cluster-TOSCA/LICENSE
new file mode 100644
index 0000000000..696f3d0fb7
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/LICENSE
@@ -0,0 +1,17 @@
+ ============LICENSE_START==========================================
+ ===================================================================
+ Copyright © 2018 AT&T
+ All rights reserved.
+ ===================================================================
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ ============LICENSE_END============================================ \ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/README.md b/TOSCA/kubernetes-cluster-TOSCA/README.md
new file mode 100644
index 0000000000..8bc097f69a
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/README.md
@@ -0,0 +1,73 @@
+[![Build Status](https://circleci.com/gh/cloudify-examples/simple-kubernetes-blueprint.svg?style=shield&circle-token=:circle-token)](https://circleci.com/gh/cloudify-examples/simple-kubernetes-blueprint)
+
+
+## Kubernetes Cluster Example
+
+This blueprint creates an example Kubernetes cluster. It is intended as an example. The underlying Kubernetes configuration method used is [Kubeadm](https://kubernetes.io/docs/admin/kubeadm/), which is not considered production-ready.
+
+Regardless of your infrastructure choice, this blueprint installs and configures on each VM:
+- The Kubernetes Yum repo will be installed on your VMs.
+- Docker, version 1.12.6-28.git1398f24.el7.centos
+- kubelet, version 1.8.6-0.
+- kubeadm, version 1.8.6-0.
+- kubernetes-cni, version 0.5.1-1.
+- weave
+
+
+## prerequisites
+
+You will need a *Cloudify Manager* running in either AWS, Azure, or Openstack. The Cloudify manager should be setup using the [Cloudify environment setup](https://github.com/cloudify-examples/cloudify-environment-setup) - that's how we test this blueprint. The following are therefore assumed:
+* You have uploaded all of the required plugins to your manager in order to use this blueprint. (See the imports section of the blueprint.yaml file to check that you are using the correct plugins and their respective versions.)
+* You have created all of the required secrets on your manager in order to use this blueprint. (See #secrets.)
+* A Centos 7.X image. If you are running in AWS or Openstack, your image must support [Cloud-init](https://cloudinit.readthedocs.io/en/latest/).
+
+
+#### Secrets
+
+* Common Secrets:
+ * agent_key_private
+ * agent_key_public
+
+* Openstack Secrets:
+ * external_network_name: This is the network on your Openstack that represents the internet gateway network.
+ * public_network_name: An openstack network. (Inbound is expected, outbound is required.)
+ * public_subnet_name: A subnet on the public network.
+ * private_network_name: An openstack network. (Inbound is not expected, outbound is required.)
+ * private_subnet_name: A subnet on the network. (Inbound is not expected, outbound is required.)
+ * router_name: This is a router that is attached to your Subnets designated in the secrets public_subnet_name and private_subnet_name.
+ * region: Your Keystone V2 region.
+ * keystone_url: Your Keystone V2 auth URL.
+ * keystone_tenant_name: Your Keystone V2 tenant name.
+ * keystone_password: Your Keystone V2 password.
+ * keystone_username:Your Keystone V2 username.
+
+
+### Step 1: Install the Kubernetes cluster
+
+#### For Openstack run:
+
+Please follow the instruction on wiki
+https://wiki.onap.org/display/DW/ONAP+on+Kubernetes+on+Cloudify#ONAPonKubernetesonCloudify-OpenStack
+
+
+### Step 2: Verify the demo installed and started.
+
+Once the workflow execution is complete, verify that these secrets were created:
+
+
+```shell
+(Incubator)UNICORN:Projects trammell$ cfy secrets list
+Listing all secrets...
+
+Secrets:
++------------------------------------------+--------------------------+--------------------------+------------+----------------+------------+
+| key | created_at | updated_at | permission | tenant_name | created_by |
++------------------------------------------+--------------------------+--------------------------+------------+----------------+------------+
+| kubernetes-admin_client_certificate_data | 2017-08-09 14:58:06.421 | 2017-08-09 14:58:06.421 | | default_tenant | admin |
+| kubernetes-admin_client_key_data | 2017-08-09 14:58:06.513 | 2017-08-09 14:58:06.513 | | default_tenant | admin |
+| kubernetes_certificate_authority_data | 2017-08-09 14:58:06.327 | 2017-08-09 14:58:06.327 | | default_tenant | admin |
+| kubernetes_master_ip | 2017-08-09 14:56:12.359 | 2017-08-09 14:56:12.359 | | default_tenant | admin |
+| kubernetes_master_port | 2017-08-09 14:56:12.452 | 2017-08-09 14:56:12.452 | | default_tenant | admin |
++------------------------------------------+--------------------------+--------------------------+------------+----------------+------------+
+```
+
diff --git a/TOSCA/kubernetes-cluster-TOSCA/imports/cloud-config.yaml b/TOSCA/kubernetes-cluster-TOSCA/imports/cloud-config.yaml
new file mode 100644
index 0000000000..137681631e
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/imports/cloud-config.yaml
@@ -0,0 +1,76 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# this is the cloud init. It will install the reqiured packages and do some basic config on every VM.
+
+node_templates:
+
+ cloudify_host_cloud_config:
+ type: cloudify.nodes.CloudInit.CloudConfig
+ properties:
+ resource_config:
+ groups:
+ - docker
+ users:
+ - name: { get_input: agent_user }
+ primary-group: wheel
+ groups: docker
+ shell: /bin/bash
+ sudo: ['ALL=(ALL) NOPASSWD:ALL']
+ ssh-authorized-keys:
+ - { get_secret: agent_key_public }
+ write_files:
+ - path: /etc/yum.repos.d/kubernetes.repo
+ owner: root:root
+ permissions: '0444'
+ content: |
+ # installed by cloud-init
+ [kubernetes]
+ name=Kubernetes
+ baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
+ enabled=1
+ gpgcheck=1
+ repo_gpgcheck=1
+ gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+
+ - path: /etc/sysctl.d/k8s.conf
+ owner: root:root
+ permissions: '0444'
+ content: |
+ # installed by cloud-init
+ net.bridge.bridge-nf-call-ip6tables = 1
+ net.bridge.bridge-nf-call-iptables = 1
+
+ packages:
+ - [docker, 1.12.6]
+ - [kubelet, 1.8.6-0]
+ - [kubeadm, 1.8.6-0]
+ - [kubectl, 1.8.6-0]
+ - [kubernetes-cni, 0.5.1-1]
+ - [nfs-utils]
+ runcmd:
+ - [ setenforce, 0 ]
+ - [ sysctl , '--system' ]
+ - [ systemctl, enable, docker ]
+ - [ systemctl, start, docker ]
+ - [ systemctl, enable, kubelet ]
+ - [ systemctl, start, kubelet ]
+ - [ mkdir, '-p', /tmp/data ]
+ - [ chcon, '-Rt', svirt_sandbox_file_t, /tmp/data ]
+ - [ mkdir, '-p', /dockerdata-nfs ]
+ - [ chmod, 777, /dockerdata-nfs ] \ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/imports/kubernetes.yaml b/TOSCA/kubernetes-cluster-TOSCA/imports/kubernetes.yaml
new file mode 100644
index 0000000000..4467fc475d
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/imports/kubernetes.yaml
@@ -0,0 +1,216 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+inputs:
+
+ labels:
+ default: {}
+
+node_types:
+
+ cloudify.nodes.Kubernetes:
+ derived_from: cloudify.nodes.Root
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: scripts/create.py
+
+ cloudify.nodes.Kubernetes.Master:
+ derived_from: cloudify.nodes.Root
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: scripts/create.py
+ configure:
+ implementation: scripts/kubernetes_master/configure.py
+ start:
+ implementation: scripts/kubernetes_master/start.py
+
+ cloudify.nodes.Kubernetes.Node:
+ derived_from: cloudify.nodes.Root
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ implementation: scripts/create.py
+ configure:
+ implementation: scripts/kubernetes_node/configure.py
+ start:
+ implementation: fabric.fabric_plugin.tasks.run_task
+ inputs:
+ tasks_file:
+ default: scripts/tasks.py
+ task_name:
+ default: label_node
+ task_properties:
+ default:
+ hostname: { get_attribute: [ SELF, hostname ] }
+ labels: { get_input: labels }
+ fabric_env:
+ default:
+ host_string: { get_attribute: [ kubernetes_master_host, ip ] }
+ user: { get_input: agent_user }
+ key: { get_secret: agent_key_private }
+ stop:
+ implementation: fabric.fabric_plugin.tasks.run_task
+ inputs:
+ tasks_file:
+ default: scripts/tasks.py
+ task_name:
+ default: stop_node
+ task_properties:
+ default:
+ hostname: { get_attribute: [ SELF, hostname ] }
+ fabric_env:
+ default:
+ host_string: { get_attribute: [ kubernetes_master_host, ip ] }
+ user: { get_input: agent_user }
+ key: { get_secret: agent_key_private }
+ delete:
+ implementation: fabric.fabric_plugin.tasks.run_task
+ inputs:
+ tasks_file:
+ default: scripts/tasks.py
+ task_name:
+ default: delete_node
+ task_properties:
+ default:
+ hostname: { get_attribute: [ SELF, hostname ] }
+ fabric_env:
+ default:
+ host_string: { get_attribute: [ kubernetes_master_host, ip ] }
+ user: { get_input: agent_user }
+ key: { get_secret: agent_key_private }
+
+node_templates:
+
+ kubernetes_master:
+ type: cloudify.nodes.Kubernetes.Master
+ relationships:
+ - type: cloudify.relationships.contained_in
+ target: kubernetes_master_host
+
+ kubernetes_node:
+ type: cloudify.nodes.Kubernetes.Node
+ relationships:
+ - type: cloudify.relationships.contained_in
+ target: kubernetes_node_host
+ - type: cloudify.relationships.depends_on
+ target: kubernetes_master
+
+outputs:
+
+ kubernetes_cluster_bootstrap_token:
+ value: { get_attribute: [ kubernetes_master, bootstrap_token ] }
+
+ kubernetes_cluster_master_ip:
+ value: { get_attribute: [ kubernetes_master, master_ip ] }
+
+ kubernetes-admin_client_certificate_data:
+ value: { get_attribute: [ kubernetes_master, kubernetes-admin_client_certificate_data ] }
+
+ kubernetes-admin_client_key_data:
+ value: { get_attribute: [ kubernetes_master, kubernetes-admin_client_key_data ] }
+
+ kubernetes_certificate_authority_data:
+ value: { get_attribute: [ kubernetes_master, kubernetes_certificate_authority_data ] }
+
+policy_types:
+ scale_policy_type:
+ source: policies/scale.clj
+ properties:
+ policy_operates_on_group:
+ default: true
+ service_selector:
+ description: regular expression that selects the metric to be measured
+ default: ".*"
+ moving_window_size:
+ description: the moving window for individual sources in secs
+ default: 10
+ scale_threshold:
+ description: the value to trigger scaling over aggregrated moving values
+ scale_limit:
+ description: scaling limit
+ default: 10
+ scale_direction:
+ description: scale up ('<') or scale down ('>')
+ default: '<'
+ cooldown_time:
+ description: the time to wait before evaluating again after a scale
+ default: 60
+
+groups: {}
+
+# scale_up_group:
+# members: [kubernetes_node_host]
+# policies:
+# auto_scale_up:
+# type: scale_policy_type
+# properties:
+# policy_operates_on_group: true
+# scale_limit: 6
+# scale_direction: '<'
+# scale_threshold: 30
+# service_selector: .*kubernetes_node_host.*cpu.total.user
+# cooldown_time: 60
+# triggers:
+# execute_scale_workflow:
+# type: cloudify.policies.triggers.execute_workflow
+# parameters:
+# workflow: scale
+# workflow_parameters:
+# delta: 1
+# scalable_entity_name: kubernetes_node_host
+
+# scale_down_group:
+# members: [kubernetes_node_host]
+# policies:
+# auto_scale_down:
+# type: scale_policy_type
+# properties:
+# policy_operates_on_group: true
+# scale_limit: 6
+# scale_direction: '<'
+# scale_threshold: 30
+# service_selector: .*kubernetes_node_host.*cpu.total.user
+# cooldown_time: 60
+# triggers:
+# execute_scale_workflow:
+# type: cloudify.policies.triggers.execute_workflow
+# parameters:
+# workflow: scale
+# workflow_parameters:
+# delta: 1
+# scalable_entity_name: kubernetes_node_host
+
+# heal_group:
+# members: [kubernetes_node_host]
+# policies:
+# simple_autoheal_policy:
+# type: cloudify.policies.types.host_failure
+# properties:
+# service:
+# - .*kubernetes_node_host.*.cpu.total.system
+# - .*kubernetes_node_host.*.process.hyperkube.cpu.percent
+# interval_between_workflows: 60
+# triggers:
+# auto_heal_trigger:
+# type: cloudify.policies.triggers.execute_workflow
+# parameters:
+# workflow: heal
+# workflow_parameters:
+# node_instance_id: { 'get_property': [ SELF, node_id ] }
+# diagnose_value: { 'get_property': [ SELF, diagnose ] }
diff --git a/TOSCA/kubernetes-cluster-TOSCA/openstack-blueprint.yaml b/TOSCA/kubernetes-cluster-TOSCA/openstack-blueprint.yaml
new file mode 100644
index 0000000000..5c348e957d
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/openstack-blueprint.yaml
@@ -0,0 +1,307 @@
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+tosca_definitions_version: cloudify_dsl_1_3
+
+description: >
+ This blueprint creates a Kubernetes Cluster.
+ It is based on this documentation: https://kubernetes.io/docs/getting-started-guides/kubeadm/
+
+imports:
+ - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml
+ - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-openstack-plugin/2.2.0/plugin.yaml
+ - https://raw.githubusercontent.com/cloudify-incubator/cloudify-utilities-plugin/1.2.5/plugin.yaml
+ - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-fabric-plugin/1.5/plugin.yaml
+ - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-diamond-plugin/1.3.5/plugin.yaml
+ - imports/cloud-config.yaml
+ - imports/kubernetes.yaml
+
+inputs:
+
+ image:
+ description: Image to be used when launching agent VMs
+ default: { get_secret: centos_core_image }
+
+ flavor:
+ description: Flavor of the agent VMs
+ default: { get_secret: large_image_flavor }
+
+ agent_user:
+ description: >
+ User for connecting to agent VMs
+ default: centos
+
+dsl_definitions:
+
+ openstack_config: &openstack_config
+ username: { get_secret: keystone_username }
+ password: { get_secret: keystone_password }
+ tenant_name: { get_secret: keystone_tenant_name }
+ auth_url: { get_secret: keystone_url }
+ region: { get_secret: region }
+
+node_templates:
+
+ nfs_server:
+ type: cloudify.nodes.SoftwareComponent
+ properties:
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ start:
+ implementation: fabric.fabric_plugin.tasks.run_script
+ inputs:
+ script_path: scripts/nfs.sh
+ use_sudo: true
+ process:
+ args:
+ fabric_env:
+ host_string: { get_attribute: [ kubernetes_master_host, ip ] }
+ user: { get_input: agent_user }
+ key: { get_secret: agent_key_private }
+ relationships:
+ - type: cloudify.relationships.contained_in
+ target: kubernetes_master_host
+
+ kubernetes_master_host:
+ type: cloudify.openstack.nodes.Server
+ properties:
+ openstack_config: *openstack_config
+ agent_config:
+ user: { get_input: agent_user }
+ install_method: remote
+ port: 22
+ key: { get_secret: agent_key_private }
+ server:
+ key_name: ''
+ image: ''
+ flavor: ''
+ management_network_name: { get_property: [ public_network, resource_id ] }
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ inputs:
+ args:
+ image: { get_input: image }
+ flavor: { get_input: flavor }
+ userdata: { get_attribute: [ cloudify_host_cloud_config, cloud_config ] }
+ relationships:
+ - target: kubernetes_master_port
+ type: cloudify.openstack.server_connected_to_port
+ - type: cloudify.relationships.depends_on
+ target: cloudify_host_cloud_config
+
+ kubernetes_node_host:
+ type: cloudify.openstack.nodes.Server
+ properties:
+ openstack_config: *openstack_config
+ agent_config:
+ user: { get_input: agent_user }
+ install_method: remote
+ port: 22
+ key: { get_secret: agent_key_private }
+ server:
+ key_name: ''
+ image: ''
+ flavor: ''
+ management_network_name: { get_property: [ private_network, resource_id ] }
+ relationships:
+ - type: cloudify.relationships.contained_in
+ target: k8s_node_scaling_tier
+ - target: kubernetes_node_port
+ type: cloudify.openstack.server_connected_to_port
+ interfaces:
+ cloudify.interfaces.lifecycle:
+ create:
+ inputs:
+ args:
+ image: { get_input: image }
+ flavor: { get_input: flavor }
+ userdata: { get_attribute: [ cloudify_host_cloud_config, cloud_config ] }
+ cloudify.interfaces.monitoring_agent:
+ install:
+ implementation: diamond.diamond_agent.tasks.install
+ inputs:
+ diamond_config:
+ interval: 1
+ start: diamond.diamond_agent.tasks.start
+ stop: diamond.diamond_agent.tasks.stop
+ uninstall: diamond.diamond_agent.tasks.uninstall
+ cloudify.interfaces.monitoring:
+ start:
+ implementation: diamond.diamond_agent.tasks.add_collectors
+ inputs:
+ collectors_config:
+ CPUCollector: {}
+ MemoryCollector: {}
+ LoadAverageCollector: {}
+ DiskUsageCollector:
+ config:
+ devices: x?vd[a-z]+[0-9]*$
+ NetworkCollector: {}
+ ProcessResourcesCollector:
+ config:
+ enabled: true
+ unit: B
+ measure_collector_time: true
+ cpu_interval: 0.5
+ process:
+ hyperkube:
+ name: hyperkube
+
+ kubernetes_security_group:
+ type: cloudify.openstack.nodes.SecurityGroup
+ properties:
+ openstack_config: *openstack_config
+ security_group:
+ name: kubernetes_security_group
+ description: kubernetes master security group
+ rules:
+ - remote_ip_prefix: 0.0.0.0/0
+ port_range_min: 1
+ port_range_max: 65535
+ protocol: tcp
+ direction: ingress
+ ethertype: IPv4
+ - remote_ip_prefix: 0.0.0.0/0
+ port_range_min: 1
+ port_range_max: 65535
+ protocol: tcp
+ direction: egress
+ ethertype: IPv4
+ - remote_ip_prefix: 0.0.0.0/0
+ port_range_min: 1
+ port_range_max: 65535
+ protocol: udp
+ direction: ingress
+ ethertype: IPv4
+ - remote_ip_prefix: 0.0.0.0/0
+ port_range_min: 1
+ port_range_max: 65535
+ protocol: udp
+ direction: egress
+ ethertype: IPv4
+
+ kubernetes_master_port:
+ type: cloudify.openstack.nodes.Port
+ properties:
+ openstack_config: *openstack_config
+ relationships:
+ - type: cloudify.relationships.contained_in
+ target: public_network
+ - type: cloudify.relationships.depends_on
+ target: public_subnet
+ - type: cloudify.openstack.port_connected_to_security_group
+ target: kubernetes_security_group
+ - type: cloudify.openstack.port_connected_to_floating_ip
+ target: kubernetes_master_ip
+
+ kubernetes_node_port:
+ type: cloudify.openstack.nodes.Port
+ properties:
+ openstack_config: *openstack_config
+ relationships:
+ - type: cloudify.relationships.contained_in
+ target: k8s_node_scaling_tier
+ - type: cloudify.relationships.connected_to
+ target: private_network
+ - type: cloudify.relationships.depends_on
+ target: private_subnet
+ - type: cloudify.openstack.port_connected_to_security_group
+ target: kubernetes_security_group
+
+ private_subnet:
+ type: cloudify.openstack.nodes.Subnet
+ properties:
+ openstack_config: *openstack_config
+ use_external_resource: true
+ resource_id: { get_secret: private_subnet_name }
+ relationships:
+ - target: private_network
+ type: cloudify.relationships.contained_in
+
+ private_network:
+ type: cloudify.openstack.nodes.Network
+ properties:
+ openstack_config: *openstack_config
+ use_external_resource: true
+ resource_id: { get_secret: private_network_name }
+
+ public_subnet:
+ type: cloudify.openstack.nodes.Subnet
+ properties:
+ openstack_config: *openstack_config
+ use_external_resource: true
+ resource_id: { get_secret: public_subnet_name }
+ relationships:
+ - target: public_network
+ type: cloudify.relationships.contained_in
+ - target: router
+ type: cloudify.openstack.subnet_connected_to_router
+
+ public_network:
+ type: cloudify.openstack.nodes.Network
+ properties:
+ openstack_config: *openstack_config
+ use_external_resource: true
+ resource_id: { get_secret: public_network_name }
+
+ router:
+ type: cloudify.openstack.nodes.Router
+ properties:
+ openstack_config: *openstack_config
+ use_external_resource: true
+ resource_id: { get_secret: router_name }
+ relationships:
+ - target: external_network
+ type: cloudify.relationships.connected_to
+
+ external_network:
+ type: cloudify.openstack.nodes.Network
+ properties:
+ openstack_config: *openstack_config
+ use_external_resource: true
+ resource_id: { get_secret: external_network_name }
+
+ k8s_node_scaling_tier:
+ type: cloudify.nodes.Root
+
+ kubernetes_master_ip:
+ type: cloudify.openstack.nodes.FloatingIP
+ properties:
+ openstack_config: *openstack_config
+ floatingip:
+ floating_network_name: { get_property: [ external_network, resource_id ] }
+
+groups:
+
+ k8s_node_group:
+ members:
+ - kubernetes_node_host
+ - kubernetes_node_port
+
+policies:
+
+ kubernetes_node_vms_scaling_policy:
+ type: cloudify.policies.scaling
+ properties:
+ default_instances: 6
+ targets: [k8s_node_group]
+
+outputs:
+
+ kubernetes_master_public_ip:
+ value: { get_attribute: [ kubernetes_master_ip, floating_ip_address ] }
diff --git a/TOSCA/kubernetes-cluster-TOSCA/policies/scale.clj b/TOSCA/kubernetes-cluster-TOSCA/policies/scale.clj
new file mode 100644
index 0000000000..369239a3b8
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/policies/scale.clj
@@ -0,0 +1,66 @@
+;;;; ============LICENSE_START==========================================
+;;;; ===================================================================
+;;;; Copyright © 2017 AT&T
+;;;;
+;;;; Licensed under the Apache License, Version 2.0 (the "License");
+;;;; you may not use this file except in compliance with the License.
+;;;; You may obtain a copy of the License at
+;;;;
+;;;; http://www.apache.org/licenses/LICENSE-2.0
+;;;;
+;;;; Unless required by applicable law or agreed to in writing, software
+;;;; distributed under the License is distributed on an "AS IS" BASIS,
+;;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+;;;; See the License for the specific language governing permissions and
+;;;; limitations under the License.
+;;;;============LICENSE_END============================================
+
+(where (service #"{{service_selector}}")
+ #(info "got event: " %)
+
+ (where (not (expired? event))
+ (moving-time-window {{moving_window_size}}
+ (fn [events]
+ (let [
+ hostmap (atom {})
+ hostcnt (atom {})
+ ]
+ (do
+ (doseq [m events]
+ (if (nil? (@hostmap (m :host)))
+ (do
+ (swap! hostmap assoc (m :host) (m :metric))
+ (swap! hostcnt assoc (m :host) 1)
+ )
+ (do
+ (swap! hostmap assoc (m :host) (+ (m :metric) (@hostmap (m :host))))
+ (swap! hostcnt assoc (m :host) (inc (@hostcnt (m :host))))
+ )
+ )
+ )
+ (doseq [entry @hostmap]
+ (swap! hostmap assoc (key entry) (/ (val entry) (@hostcnt (key entry))))
+ )
+
+ (let
+ [ hostcnt (count @hostmap)
+ conns (/ (apply + (map (fn [a] (val a)) @hostmap)) hostcnt)
+ cooling (not (nil? (riemann.index/lookup index "scaling" "suspended")))]
+
+ (do
+ (info "cooling=" cooling " scale_direction={{scale_direction}} hostcnt=" hostcnt " scale_threshold={{scale_threshold}} conns=" conns)
+ (if (and (not cooling) ({{scale_direction}} hostcnt {{scale_limit}}) ({{scale_direction}} {{scale_threshold}} conns))
+ (do
+ (info "=== SCALE ===" "{{scale_direction}}")
+ (process-policy-triggers {})
+ (riemann.index/update index {:host "scaling" :service "suspended" :time (unix-time) :description "cooldown flag" :metric 0 :ttl {{cooldown_time}} :state "ok"})
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+ )
+)
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/create.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/create.py
new file mode 100644
index 0000000000..4bb3710f06
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/create.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# This tack will be triggered after VM created. It will check whether docker is up and running.
+
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import OperationRetry
+
+
+def check_command(command):
+
+ try:
+ process = subprocess.Popen(
+ command.split()
+ )
+ except OSError:
+ return False
+
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(command))
+ ctx.logger.debug('output: {0} '.format(output))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(command))
+ return False
+
+ return True
+
+
+def execute_command(_command):
+
+ ctx.logger.debug('_command {0}.'.format(_command))
+
+ subprocess_args = {
+ 'args': _command.split(),
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE
+ }
+
+ ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+ process = subprocess.Popen(**subprocess_args)
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(_command))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(_command))
+ return False
+
+ return output
+
+
+if __name__ == '__main__':
+
+ # Check if Docker PS works
+ docker = check_command('docker ps')
+ if not docker:
+ raise OperationRetry(
+ 'Docker is not present on the system.')
+ ctx.logger.info('Docker is present on the system.')
+
+ # Next check if Cloud Init is running.
+ finished = False
+ ps = execute_command('ps -ef')
+ for line in ps.split('\n'):
+ if '/usr/bin/python /usr/bin/cloud-init modules' in line:
+ raise OperationRetry(
+ 'You provided a Cloud-init Cloud Config to configure instances. '
+ 'Waiting for Cloud-init to complete.')
+ ctx.logger.info('Cloud-init finished.')
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py
new file mode 100644
index 0000000000..7d5dffcc57
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/configure.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# This script will be executed on Kubernetes master host. It will initialize the master, and install a pod network.
+
+import pwd
+import grp
+import os
+import re
+import getpass
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import OperationRetry
+from cloudify_rest_client.exceptions import CloudifyClientError
+
+JOIN_COMMAND_REGEX = '^kubeadm join[\sA-Za-z0-9\.\:\-\_]*'
+BOOTSTRAP_TOKEN_REGEX = '[a-z0-9]{6}.[a-z0-9]{16}'
+IP_PORT_REGEX = '[0-9]+(?:\.[0-9]+){3}:[0-9]+'
+NOT_SHA_REGEX='^(?!.*sha256)'
+JCRE_COMPILED = re.compile(JOIN_COMMAND_REGEX)
+BTRE_COMPILED = re.compile(BOOTSTRAP_TOKEN_REGEX)
+IPRE_COMPILED = re.compile(IP_PORT_REGEX)
+SHA_COMPILED=re.compile(NOT_SHA_REGEX)
+
+def execute_command(_command):
+
+ ctx.logger.debug('_command {0}.'.format(_command))
+
+ subprocess_args = {
+ 'args': _command.split(),
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE
+ }
+
+ ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+ process = subprocess.Popen(**subprocess_args)
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(_command))
+ ctx.logger.debug('output: {0} '.format(output))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(_command))
+ return False
+
+ return output
+
+
+def cleanup_and_retry():
+ reset_cluster_command = 'sudo kubeadm reset'
+ output = execute_command(reset_cluster_command)
+ ctx.logger.info('reset_cluster_command {1}'.format(reset_cluster_command, output))
+ raise OperationRetry('Restarting kubernetes because of a problem.')
+
+
+def configure_admin_conf():
+ # Add the kubeadmin config to environment
+ agent_user = getpass.getuser()
+ uid = pwd.getpwnam(agent_user).pw_uid
+ gid = grp.getgrnam('docker').gr_gid
+ admin_file_dest = os.path.join(os.path.expanduser('~'), 'admin.conf')
+
+ execute_command('sudo cp {0} {1}'.format('/etc/kubernetes/admin.conf', admin_file_dest))
+ execute_command('sudo chown {0}:{1} {2}'.format(uid, gid, admin_file_dest))
+
+ with open(os.path.join(os.path.expanduser('~'), '.bashrc'), 'a') as outfile:
+ outfile.write('export KUBECONFIG=$HOME/admin.conf')
+ os.environ['KUBECONFIG'] = admin_file_dest
+
+
+def setup_secrets(_split_master_port, _bootstrap_token):
+ master_ip = split_master_port[0]
+ master_port = split_master_port[1]
+ ctx.instance.runtime_properties['master_ip'] = _split_master_port[0]
+ ctx.instance.runtime_properties['master_port'] = _split_master_port[1]
+ ctx.instance.runtime_properties['bootstrap_token'] = _bootstrap_token
+ from cloudify import manager
+ cfy_client = manager.get_rest_client()
+
+ _secret_key = 'kubernetes_master_ip'
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=master_ip)
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=master_ip)
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+
+ _secret_key = 'kubernetes_master_port'
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=master_port)
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=master_port)
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+
+ _secret_key = 'bootstrap_token'
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=_bootstrap_token)
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=_bootstrap_token)
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+
+
+if __name__ == '__main__':
+
+ ctx.instance.runtime_properties['KUBERNETES_MASTER'] = True
+ cniCommand1=subprocess.Popen(["sudo", "sysctl", 'net.bridge.bridge-nf-call-iptables=1'], stdout=subprocess.PIPE)
+ # Start Kubernetes Master
+ ctx.logger.info('Attempting to start Kubernetes master.')
+ start_master_command = 'sudo kubeadm init'
+ start_output = execute_command(start_master_command)
+ ctx.logger.debug('start_master_command output: {0}'.format(start_output))
+ # Check if start succeeded.
+ if start_output is False or not isinstance(start_output, basestring):
+ ctx.logger.error('Kubernetes master failed to start.')
+ cleanup_and_retry()
+ ctx.logger.info('Kubernetes master started successfully.')
+
+ # Slice and dice the start_master_command start_output.
+ ctx.logger.info('Attempting to retrieve Kubernetes cluster information.')
+ split_start_output = \
+ [line.strip() for line in start_output.split('\n') if line.strip()]
+ del line
+
+ ctx.logger.debug(
+ 'Kubernetes master start output, split and stripped: {0}'.format(
+ split_start_output))
+ split_join_command = ''
+ for li in split_start_output:
+ ctx.logger.debug('li in split_start_output: {0}'.format(li))
+ if re.match(JCRE_COMPILED, li):
+ split_join_command = re.split('\s', li)
+ del li
+ ctx.logger.info('split_join_command: {0}'.format(split_join_command))
+
+ if not split_join_command:
+ ctx.logger.error('No join command in split_start_output: {0}'.format(split_join_command))
+ cleanup_and_retry()
+
+ for li in split_join_command:
+ ctx.logger.info('Sorting bits and pieces: li: {0}'.format(li))
+ if (re.match(BTRE_COMPILED, li) and re.match(SHA_COMPILED, li)):
+ bootstrap_token = li
+ elif re.match(IPRE_COMPILED, li):
+ split_master_port = li.split(':')
+ setup_secrets(split_master_port, bootstrap_token)
+ configure_admin_conf()
+
+ weaveCommand1=subprocess.Popen(["kubectl", "version"], stdout=subprocess.PIPE)
+ weaveCommand2=subprocess.Popen(["base64"],stdin=weaveCommand1.stdout, stdout=subprocess.PIPE)
+ kubever = weaveCommand2.communicate()[0]
+ kubever = kubever.replace('\n', '').replace('\r', '')
+ ctx.logger.info("kubever :"+kubever)
+ weaveURL=('https://cloud.weave.works/k8s/net?k8s-version={0}'.format(kubever))
+ ctx.logger.info("weaveURL:" + weaveURL)
+ weaveCommand4=subprocess.Popen(["kubectl","apply","-f",weaveURL],stdout=subprocess.PIPE)
+ weaveResult= weaveCommand4.communicate()[0]
+ ctx.logger.info("weaveResult :"+weaveResult)
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py
new file mode 100644
index 0000000000..bbc166b134
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_master/start.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+#This script will be execute on master host. This script will check whether Kube-DNS is running, and set secrets in cloudify.
+
+import os
+import subprocess
+import pip
+try:
+ import yaml
+except ImportError:
+ pip.main(['install', 'pyyaml'])
+ import yaml
+
+from cloudify import ctx
+from cloudify.exceptions import RecoverableError
+from cloudify import manager
+
+
+def execute_command(_command):
+
+ ctx.logger.debug('_command {0}.'.format(_command))
+
+ subprocess_args = {
+ 'args': _command.split(),
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE
+ }
+
+ ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+ process = subprocess.Popen(**subprocess_args)
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(_command))
+ ctx.logger.debug('output: {0} '.format(output))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(_command))
+ return False
+
+ return output
+
+
+def check_kubedns_status(_get_pods):
+
+ ctx.logger.debug('get_pods: {0} '.format(_get_pods))
+
+ for pod_line in _get_pods.split('\n'):
+ ctx.logger.debug('pod_line: {0} '.format(pod_line))
+ try:
+ _namespace, _name, _ready, _status, _restarts, _age = pod_line.split()
+ except ValueError:
+ pass
+ else:
+ if 'kube-dns' in _name and 'Running' not in _status:
+ return False
+ elif 'kube-dns' in _name and 'Running' in _status:
+ return True
+ return False
+
+
+if __name__ == '__main__':
+
+ cfy_client = manager.get_rest_client()
+
+ # Checking if the Kubernetes DNS service is running (last step).
+ admin_file_dest = os.path.join(os.path.expanduser('~'), 'admin.conf')
+ os.environ['KUBECONFIG'] = admin_file_dest
+ get_pods = execute_command('kubectl get pods --all-namespaces')
+ if not check_kubedns_status(get_pods):
+ raise RecoverableError('kube-dns not Running')
+
+ # Storing the K master configuration.
+ kubernetes_master_config = {}
+ with open(admin_file_dest, 'r') as outfile:
+ try:
+ kubernetes_master_config = yaml.load(outfile)
+ except yaml.YAMLError as e:
+ RecoverableError(
+ 'Unable to read Kubernetes Admin file: {0}: {1}'.format(
+ admin_file_dest, str(e)))
+ ctx.instance.runtime_properties['configuration_file_content'] = \
+ kubernetes_master_config
+
+ clusters = kubernetes_master_config.get('clusters')
+ _clusters = {}
+ for cluster in clusters:
+ __name = cluster.get('name')
+ _cluster = cluster.get('cluster', {})
+ _secret_key = '%s_certificate_authority_data' % __name
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=_cluster.get('certificate-authority-data'))
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=_cluster.get('certificate-authority-data'))
+ ctx.instance.runtime_properties['%s_certificate_authority_data' % __name] = _cluster.get('certificate-authority-data')
+ _clusters[__name] = _cluster
+ del __name
+
+ contexts = kubernetes_master_config.get('contexts')
+ _contexts = {}
+ for context in contexts:
+ __name = context.get('name')
+ _context = context.get('context', {})
+ _contexts[__name] = _context
+ del __name
+
+ users = kubernetes_master_config.get('users')
+ _users = {}
+ for user in users:
+ __name = user.get('name')
+ _user = user.get('user', {})
+ _secret_key = '%s_client_certificate_data' % __name
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=_user.get('client-certificate-data'))
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=_user.get('client-certificate-data'))
+ _secret_key = '%s_client_key_data' % __name
+ if cfy_client and not len(cfy_client.secrets.list(key=_secret_key)) == 1:
+ cfy_client.secrets.create(key=_secret_key, value=_user.get('client-key-data'))
+ ctx.logger.info('Set secret: {0}.'.format(_secret_key))
+ else:
+ cfy_client.secrets.update(key=_secret_key, value=_user.get('client-key-data'))
+ ctx.instance.runtime_properties['%s_client_certificate_data' % __name] = _user.get('client-certificate-data')
+ ctx.instance.runtime_properties['%s_client_key_data' % __name] = _user.get('client-key-data')
+ _users[__name] = _user
+ del __name
+
+ ctx.instance.runtime_properties['kubernetes'] = {
+ 'clusters': _clusters,
+ 'contexts': _contexts,
+ 'users': _users
+ }
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py
new file mode 100644
index 0000000000..69faaa80d1
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/kubernetes_node/configure.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# Afther K8s master up and running. This script will be triggered in each worker nodes. It will join the nodes, and mount the NFS directory.
+
+import subprocess
+from cloudify import ctx
+from cloudify.exceptions import NonRecoverableError
+
+START_COMMAND = 'sudo kubeadm join --token {0} {1}:{2}'
+
+
+def execute_command(_command):
+
+ ctx.logger.debug('_command {0}.'.format(_command))
+
+ subprocess_args = {
+ 'args': _command.split(),
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE
+ }
+
+ ctx.logger.debug('subprocess_args {0}.'.format(subprocess_args))
+
+ process = subprocess.Popen(**subprocess_args)
+ output, error = process.communicate()
+
+ ctx.logger.debug('command: {0} '.format(_command))
+ ctx.logger.debug('output: {0} '.format(output))
+ ctx.logger.debug('error: {0} '.format(error))
+ ctx.logger.debug('process.returncode: {0} '.format(process.returncode))
+
+ if process.returncode:
+ ctx.logger.error('Running `{0}` returns error.'.format(_command))
+ return False
+
+ return output
+
+
+if __name__ == '__main__':
+
+ hostname = execute_command('hostname')
+ ctx.instance.runtime_properties['hostname'] = hostname.rstrip('\n')
+
+ # Get the master cluster info.
+ masters = \
+ [x for x in ctx.instance.relationships if
+ x.target.instance.runtime_properties.get(
+ 'KUBERNETES_MASTER', False)]
+ if len(masters) != 1:
+ raise NonRecoverableError(
+ 'Currently, a Kubernetes node must have a '
+ 'dependency on one Kubernetes master.')
+ master = masters[0]
+ bootstrap_token = \
+ master.target.instance.runtime_properties['bootstrap_token']
+ master_ip = \
+ master.target.instance.runtime_properties['master_ip']
+ master_port = \
+ master.target.instance.runtime_properties['master_port']
+
+ # Join the cluster.
+ cniCommand1=subprocess.Popen(["sudo", "sysctl", 'net.bridge.bridge-nf-call-iptables=1'], stdout=subprocess.PIPE)
+ join_command = \
+ 'sudo kubeadm join --token {0} {1}:{2}'.format(
+ bootstrap_token, master_ip, master_port)
+ execute_command(join_command)
+
+ #mount
+ mount_command=\
+ 'sudo mount -t nfs -o proto=tcp,port=2049 {0}:/dockerdata-nfs /dockerdata-nfs'.format(master_ip)
+ execute_command(mount_command) \ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh b/TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh
new file mode 100644
index 0000000000..2d59acd99d
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/nfs.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# this script will set the NFS server on k8s master.
+
+mkdir -p /dockerdata-nfs
+chmod 777 /dockerdata-nfs
+yum -y install nfs-utils
+systemctl enable nfs-server.service
+systemctl start nfs-server.service
+echo "/dockerdata-nfs *(rw,no_root_squash,no_subtree_check)" |sudo tee --append /etc/exports
+echo "/home/centos/dockerdata-nfs /dockerdata-nfs none bind 0 0" |sudo tee --append /etc/fstab
+exportfs -a \ No newline at end of file
diff --git a/TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py b/TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py
new file mode 100644
index 0000000000..7680fac957
--- /dev/null
+++ b/TOSCA/kubernetes-cluster-TOSCA/scripts/tasks.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# ============LICENSE_START==========================================
+# ===================================================================
+# Copyright © 2017 AT&T
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#============LICENSE_END============================================
+
+# here we define some tasks
+
+from fabric.api import run
+
+
+def label_node(labels, hostname):
+ if labels:
+ label_list = []
+ for key, value in labels.items():
+ label_pair_string = '%s=%s' % (key, value)
+ label_list.append(label_pair_string)
+ label_string = ' '.join(label_list)
+ command = 'kubectl label nodes %s %s' % (hostname, label_string)
+ run(command)
+
+
+def stop_node(hostname):
+ command = 'kubectl drain %s' % (hostname)
+ run(command)
+
+
+def delete_node(hostname):
+ command = 'kubectl delete no %s' % (hostname)
+ run(command)
diff --git a/kubernetes/README_HELM b/kubernetes/README_HELM
index c1d5fdf432..0f65120397 100644
--- a/kubernetes/README_HELM
+++ b/kubernetes/README_HELM
@@ -3,7 +3,7 @@ Prerequisites:
- Helm
In order to use Helm with Rancher, check the tiller version installed
-by runing "helm version" on the ranchr CLI
+by running "helm version" on the rancher CLI
and install the appropriate Helm.
Notice both tiller and helm are installed,
but you will need to install on your VM.
diff --git a/kubernetes/aai/resources/model-loader/conf/logback.xml b/kubernetes/aai/resources/config/log/model-loader/logback.xml
index d512d3b91c..d512d3b91c 100644
--- a/kubernetes/aai/resources/model-loader/conf/logback.xml
+++ b/kubernetes/aai/resources/config/log/model-loader/logback.xml
diff --git a/kubernetes/aai/resources/resources/conf/logback.xml b/kubernetes/aai/resources/config/log/resources/logback.xml
index 6cfc293f46..6cfc293f46 100644
--- a/kubernetes/aai/resources/resources/conf/logback.xml
+++ b/kubernetes/aai/resources/config/log/resources/logback.xml
diff --git a/kubernetes/aai/resources/search-data-service/conf/logback.xml b/kubernetes/aai/resources/config/log/search-data-service/logback.xml
index 14ed4142fe..14ed4142fe 100644
--- a/kubernetes/aai/resources/search-data-service/conf/logback.xml
+++ b/kubernetes/aai/resources/config/log/search-data-service/logback.xml
diff --git a/kubernetes/aai/resources/sparky-be/conf/logback.xml b/kubernetes/aai/resources/config/log/sparky-be/logback.xml
index c1b99c183f..c1b99c183f 100644
--- a/kubernetes/aai/resources/sparky-be/conf/logback.xml
+++ b/kubernetes/aai/resources/config/log/sparky-be/logback.xml
diff --git a/kubernetes/aai/resources/traversal/conf/logback.xml b/kubernetes/aai/resources/config/log/traversal/logback.xml
index fb8d8a53bf..fb8d8a53bf 100644
--- a/kubernetes/aai/resources/traversal/conf/logback.xml
+++ b/kubernetes/aai/resources/config/log/traversal/logback.xml
diff --git a/kubernetes/aai/templates/aai-deployment.yaml b/kubernetes/aai/templates/aai-deployment.yaml
index e0e422b43a..4e8d82f42f 100644
--- a/kubernetes/aai/templates/aai-deployment.yaml
+++ b/kubernetes/aai/templates/aai-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: aai-service
namespace: "{{ .Values.nsPrefix }}-aai"
spec:
+ replicas: {{ .Values.aaiServiceReplicas }}
selector:
matchLabels:
app: aai-service
diff --git a/kubernetes/aai/templates/aai-resources-deployment.yaml b/kubernetes/aai/templates/aai-resources-deployment.yaml
index 5ca0c165df..dd7d7cc9d9 100644
--- a/kubernetes/aai/templates/aai-resources-deployment.yaml
+++ b/kubernetes/aai/templates/aai-resources-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: aai-resources
namespace: "{{ .Values.nsPrefix }}-aai"
spec:
+ replicas: {{ .Values.aaiResourceReplicas }}
selector:
matchLabels:
app: aai-resources
@@ -97,16 +98,8 @@ spec:
emptyDir: {}
- name: aai-resources-log-conf
configMap:
- name: aai-resources-configmap
+ name: aai-resources-log-configmap
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: aai-resources-configmap
- namespace: {{ .Values.nsPrefix }}-aai
-data:
-{{ (.Files.Glob "resources/resources/conf/logback.xml").AsConfig | indent 2 }}
#{{ end }}
diff --git a/kubernetes/aai/templates/aai-resources-traversal-configmap.yaml b/kubernetes/aai/templates/aai-resources-traversal-configmap.yaml
index 772c3a79c8..ebd69132a2 100644
--- a/kubernetes/aai/templates/aai-resources-traversal-configmap.yaml
+++ b/kubernetes/aai/templates/aai-resources-traversal-configmap.yaml
@@ -14,4 +14,20 @@ metadata:
namespace: {{ .Values.nsPrefix }}-aai
data:
{{ tpl (.Files.Glob "resources/config/aai-data/environments/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aai-resources-log-configmap
+ namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/resources/logback.xml").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aai-traversal-log-configmap
+ namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/traversal/logback.xml").AsConfig . | indent 2 }}
#{{ end }}
diff --git a/kubernetes/aai/templates/aai-traversal-deployment.yaml b/kubernetes/aai/templates/aai-traversal-deployment.yaml
index fda0055d21..38e7fb0847 100644
--- a/kubernetes/aai/templates/aai-traversal-deployment.yaml
+++ b/kubernetes/aai/templates/aai-traversal-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: aai-traversal
namespace: "{{ .Values.nsPrefix }}-aai"
spec:
+ replicas: {{ .Values.aaiTraversalReplicas }}
selector:
matchLabels:
app: aai-traversal
@@ -99,16 +100,8 @@ spec:
emptyDir: {}
- name: aai-traversal-log-conf
configMap:
- name: aai-traversal-configmap
+ name: aai-traversal-log-configmap
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: aai-traversal-configmap
- namespace: {{ .Values.nsPrefix }}-aai
-data:
-{{ (.Files.Glob "resources/traversal/conf/logback.xml").AsConfig | indent 2 }}
#{{ end }}
diff --git a/kubernetes/aai/templates/data-router-deployment.yaml b/kubernetes/aai/templates/data-router-deployment.yaml
index 06dbc9118e..5a7f9e927f 100644
--- a/kubernetes/aai/templates/data-router-deployment.yaml
+++ b/kubernetes/aai/templates/data-router-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: data-router
namespace: "{{ .Values.nsPrefix }}-aai"
spec:
+ replicas: {{ .Values.dataRouterReplicas }}
selector:
matchLabels:
app: data-router
diff --git a/kubernetes/aai/templates/elasticsearch-deployment.yaml b/kubernetes/aai/templates/elasticsearch-deployment.yaml
index baa1cb8523..d3535d07c6 100644
--- a/kubernetes/aai/templates/elasticsearch-deployment.yaml
+++ b/kubernetes/aai/templates/elasticsearch-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: elasticsearch
namespace: "{{ .Values.nsPrefix }}-aai"
spec:
+ replicas: {{ .Values.elasticsearchReplicas }}
selector:
matchLabels:
app: elasticsearch
diff --git a/kubernetes/aai/templates/hbase-deployment.yaml b/kubernetes/aai/templates/hbase-deployment.yaml
index 3c1949c35a..5b2a98cc89 100644
--- a/kubernetes/aai/templates/hbase-deployment.yaml
+++ b/kubernetes/aai/templates/hbase-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: hbase
namespace: "{{ .Values.nsPrefix }}-aai"
spec:
+ replicas: {{ .Values.hbaseReplicas }}
selector:
matchLabels:
app: hbase
diff --git a/kubernetes/aai/templates/modelloader-deployment-configmap.yaml b/kubernetes/aai/templates/modelloader-deployment-configmap.yaml
index 7f37bd605b..e5a59a124d 100644
--- a/kubernetes/aai/templates/modelloader-deployment-configmap.yaml
+++ b/kubernetes/aai/templates/modelloader-deployment-configmap.yaml
@@ -15,4 +15,12 @@ metadata:
type: Opaque
data:
{{ tpl (.Files.Glob "resources/config/model-loader/appconfig/auth/*").AsSecrets . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aai-model-loader-log-configmap
+ namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/model-loader/logback.xml").AsConfig . | indent 2 }}
#{{ end }}
diff --git a/kubernetes/aai/templates/modelloader-deployment.yaml b/kubernetes/aai/templates/modelloader-deployment.yaml
index c81305817c..c5a788c287 100644
--- a/kubernetes/aai/templates/modelloader-deployment.yaml
+++ b/kubernetes/aai/templates/modelloader-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: model-loader-service
namespace: "{{ .Values.nsPrefix }}-aai"
spec:
+ replicas: {{ .Values.modelLoaderReplicas }}
selector:
matchLabels:
app: model-loader-service
@@ -68,16 +69,8 @@ spec:
emptyDir: {}
- name: aai-model-loader-log-conf
configMap:
- name: aai-model-loader-configmap
+ name: aai-model-loader-log-configmap
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: aai-model-loader-configmap
- namespace: {{ .Values.nsPrefix }}-aai
-data:
-{{ (.Files.Glob "resources/model-loader/conf/logback.xml").AsConfig | indent 2 }}
#{{ end }}
diff --git a/kubernetes/aai/templates/search-data-service-configmap.yaml b/kubernetes/aai/templates/search-data-service-configmap.yaml
index c392e412aa..8f707d4ca3 100644
--- a/kubernetes/aai/templates/search-data-service-configmap.yaml
+++ b/kubernetes/aai/templates/search-data-service-configmap.yaml
@@ -23,4 +23,12 @@ metadata:
namespace: {{ .Values.nsPrefix }}-aai
data:
{{ tpl (.Files.Glob "resources/config/search-data-service/appconfig/auth/search_policy.json").AsSecrets . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aai-search-data-service-log-configmap
+ namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/search-data-service/logback.xml").AsConfig . | indent 2 }}
#{{ end }}
diff --git a/kubernetes/aai/templates/search-data-service-deployment.yaml b/kubernetes/aai/templates/search-data-service-deployment.yaml
index 392a754459..7202e0d39c 100644
--- a/kubernetes/aai/templates/search-data-service-deployment.yaml
+++ b/kubernetes/aai/templates/search-data-service-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: search-data-service
namespace: "{{ .Values.nsPrefix }}-aai"
spec:
+ replicas: {{ .Values.searchDataServiceReplicas }}
selector:
matchLabels:
app: search-data-service
@@ -89,16 +90,8 @@ spec:
emptyDir: {}
- name: aai-search-data-service-log-conf
configMap:
- name: aai-search-data-service-configmap
+ name: aai-search-data-service-log-configmap
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: aai-search-data-service-configmap
- namespace: {{ .Values.nsPrefix }}-aai
-data:
-{{ (.Files.Glob "resources/search-data-service/conf/logback.xml").AsConfig | indent 2 }}
#{{ end }}
diff --git a/kubernetes/aai/templates/sparky-be-deployment-configmap.yaml b/kubernetes/aai/templates/sparky-be-deployment-configmap.yaml
index 55dea653cd..590608b566 100644
--- a/kubernetes/aai/templates/sparky-be-deployment-configmap.yaml
+++ b/kubernetes/aai/templates/sparky-be-deployment-configmap.yaml
@@ -31,4 +31,12 @@ metadata:
type: Opaque
data:
{{ tpl (.Files.Glob "resources/config/sparky-be/appconfig/auth/*").AsSecrets . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: aai-sparky-be-log-configmap
+ namespace: {{ .Values.nsPrefix }}-aai
+data:
+{{ tpl (.Files.Glob "resources/config/log/sparky-be/logback.xml").AsConfig . | indent 2 }}
#{{ end }}
diff --git a/kubernetes/aai/templates/sparky-be-deployment.yaml b/kubernetes/aai/templates/sparky-be-deployment.yaml
index dfeab6a394..e5339283ab 100644
--- a/kubernetes/aai/templates/sparky-be-deployment.yaml
+++ b/kubernetes/aai/templates/sparky-be-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: sparky-be
namespace: "{{ .Values.nsPrefix }}-aai"
spec:
+ replicas: {{ .Values.sparkyReplicas }}
selector:
matchLabels:
app: sparky-be
@@ -101,16 +102,8 @@ spec:
emptyDir: {}
- name: aai-sparky-be-log-conf
configMap:
- name: aai-sparky-be-configmap
+ name: aai-sparky-be-log-configmap
restartPolicy: Always
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: aai-sparky-be-configmap
- namespace: {{ .Values.nsPrefix }}-aai
-data:
-{{ (.Files.Glob "resources/sparky-be/conf/logback.xml").AsConfig | indent 2 }}
#{{ end }}
diff --git a/kubernetes/aai/values.yaml b/kubernetes/aai/values.yaml
index 6912c7374a..b05859737e 100644
--- a/kubernetes/aai/values.yaml
+++ b/kubernetes/aai/values.yaml
@@ -5,7 +5,15 @@ nodePortPrefix: 302
# POLICY hotfix - Note this must be temporary
# See https://jira.onap.org/browse/POLICY-510
aaiServiceClusterIp: 10.43.255.254
-
+aaiServiceReplicas: 1
+aaiResourceReplicas: 1
+aaiTraversalReplicas: 1
+dataRouterReplicas: 1
+elasticsearchReplicas: 1
+hbaseReplicas: 1
+modelLoaderReplicas: 1
+searchDataServiceReplicas: 1
+sparkyReplicas: 1
image:
readiness: oomk8s/readiness-check:1.0.0
aaiProxy: aaionap/haproxy
diff --git a/kubernetes/appc/templates/appc-deployment.yaml b/kubernetes/appc/templates/appc-deployment.yaml
index 8caf286043..35e947093c 100644
--- a/kubernetes/appc/templates/appc-deployment.yaml
+++ b/kubernetes/appc/templates/appc-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: appc
namespace: "{{ .Values.nsPrefix }}-appc"
spec:
+ replicas: {{ .Values.appcReplicas }}
selector:
matchLabels:
app: appc
diff --git a/kubernetes/appc/templates/db-deployment.yaml b/kubernetes/appc/templates/db-deployment.yaml
index 607a0df81f..0b29393cb5 100644
--- a/kubernetes/appc/templates/db-deployment.yaml
+++ b/kubernetes/appc/templates/db-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: appc-dbhost
namespace: "{{ .Values.nsPrefix }}-appc"
spec:
+ replicas: {{ .Values.dbReplicas }}
selector:
matchLabels:
app: appc-dbhost
@@ -45,4 +46,4 @@ spec:
claimName: appc-db
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }} \ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/appc/templates/dgbuilder-deployment.yaml b/kubernetes/appc/templates/dgbuilder-deployment.yaml
index ed69c05f38..3bbc6715aa 100644
--- a/kubernetes/appc/templates/dgbuilder-deployment.yaml
+++ b/kubernetes/appc/templates/dgbuilder-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: appc-dgbuilder
namespace: "{{ .Values.nsPrefix }}-appc"
spec:
+ replicas: {{ .Values.dgbuilderReplicas }}
selector:
matchLabels:
app: appc-dgbuilder
diff --git a/kubernetes/appc/values.yaml b/kubernetes/appc/values.yaml
index 882267c41d..0bf4da9c09 100644
--- a/kubernetes/appc/values.yaml
+++ b/kubernetes/appc/values.yaml
@@ -1,9 +1,12 @@
nsPrefix: onap
pullPolicy: Always
nodePortPrefix: 302
+appcReplicas: 1
+dbReplicas: 1
+dgbuilderReplicas: 1
image:
readiness: oomk8s/readiness-check:1.0.0
appc: nexus3.onap.org:10001/openecomp/appc-image:v1.2.0
mysqlServer: mysql/mysql-server:5.6
dgbuilderSdnc: nexus3.onap.org:10001/onap/ccsdk-dgbuilder-image:v0.1.0
- filebeat: docker.elastic.co/beats/filebeat:5.5.0 \ No newline at end of file
+ filebeat: docker.elastic.co/beats/filebeat:5.5.0
diff --git a/kubernetes/message-router/templates/message-router-dmaap.yaml b/kubernetes/message-router/templates/message-router-dmaap.yaml
index 1d9777e760..cecf69b86f 100644
--- a/kubernetes/message-router/templates/message-router-dmaap.yaml
+++ b/kubernetes/message-router/templates/message-router-dmaap.yaml
@@ -5,6 +5,7 @@ metadata:
name: dmaap
namespace: "{{ .Values.nsPrefix }}-message-router"
spec:
+ replicas: {{ .Values.dmaapReplicas }}
selector:
matchLabels:
app: dmaap
diff --git a/kubernetes/message-router/templates/message-router-kafka.yaml b/kubernetes/message-router/templates/message-router-kafka.yaml
index 509b15fc53..94e01b31c0 100644
--- a/kubernetes/message-router/templates/message-router-kafka.yaml
+++ b/kubernetes/message-router/templates/message-router-kafka.yaml
@@ -5,6 +5,7 @@ metadata:
name: global-kafka
namespace: "{{ .Values.nsPrefix }}-message-router"
spec:
+ replicas: {{ .Values.kafkaReplicas }}
selector:
matchLabels:
app: global-kafka
diff --git a/kubernetes/message-router/templates/message-router-zookeeper.yaml b/kubernetes/message-router/templates/message-router-zookeeper.yaml
index 47cdb51f2f..5a5bc9f355 100644
--- a/kubernetes/message-router/templates/message-router-zookeeper.yaml
+++ b/kubernetes/message-router/templates/message-router-zookeeper.yaml
@@ -5,6 +5,7 @@ metadata:
name: zookeeper
namespace: "{{ .Values.nsPrefix }}-message-router"
spec:
+ replicas: {{ .Values.zookeeperReplicas }}
selector:
matchLabels:
app: zookeeper
@@ -41,4 +42,4 @@ spec:
claimName: message-router-zookeeper
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }} \ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/message-router/values.yaml b/kubernetes/message-router/values.yaml
index 92067294f8..6782b71746 100644
--- a/kubernetes/message-router/values.yaml
+++ b/kubernetes/message-router/values.yaml
@@ -1,6 +1,9 @@
nsPrefix: onap
pullPolicy: Always
nodePortPrefix: 302
+dmaapReplicas: 1
+kafkaReplicas: 1
+zookeeperReplicas: 1
image:
readiness: oomk8s/readiness-check:1.0.0
dmaap: attos/dmaap:latest
diff --git a/kubernetes/msb/templates/msb-discovery-deployment.yaml b/kubernetes/msb/templates/msb-discovery-deployment.yaml
index 5f8db61ac5..5c01febd1e 100644
--- a/kubernetes/msb/templates/msb-discovery-deployment.yaml
+++ b/kubernetes/msb/templates/msb-discovery-deployment.yaml
@@ -16,6 +16,21 @@ spec:
name: msb-discovery
spec:
hostname: msb-discovery
+ initContainers:
+ - command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - msb-consul
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: msb-discovery-readiness
containers:
- args:
image: {{ .Values.image.discovery }}
@@ -34,4 +49,4 @@ spec:
imagePullPolicy: {{ .Values.pullPolicy }}
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }} \ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/msb/templates/msb-eag-deployment.yaml b/kubernetes/msb/templates/msb-eag-deployment.yaml
index 714f19b1a5..10cbbe115d 100644
--- a/kubernetes/msb/templates/msb-eag-deployment.yaml
+++ b/kubernetes/msb/templates/msb-eag-deployment.yaml
@@ -16,6 +16,21 @@ spec:
name: msb-eag
spec:
hostname: msb-eag
+ initContainers:
+ - command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - msb-discovery
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: msb-eag-readiness
containers:
- args:
image: {{ .Values.image.apigateway }}
@@ -38,4 +53,4 @@ spec:
imagePullPolicy: {{ .Values.pullPolicy}}
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }} \ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/msb/templates/msb-iag-deployment.yaml b/kubernetes/msb/templates/msb-iag-deployment.yaml
index ca4d5a033f..18dbc67f86 100644
--- a/kubernetes/msb/templates/msb-iag-deployment.yaml
+++ b/kubernetes/msb/templates/msb-iag-deployment.yaml
@@ -16,6 +16,21 @@ spec:
name: msb-iag
spec:
hostname: msb-iag
+ initContainers:
+ - command:
+ - /root/ready.py
+ args:
+ - --container-name
+ - msb-discovery
+ env:
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: metadata.namespace
+ image: {{ .Values.image.readiness }}
+ imagePullPolicy: {{ .Values.pullPolicy }}
+ name: msb-iag-readiness
containers:
- args:
image: {{ .Values.image.apigateway }}
@@ -38,4 +53,4 @@ spec:
imagePullPolicy: "{{ .Values.pullPolicy}}"
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }} \ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/msb/values.yaml b/kubernetes/msb/values.yaml
index 7c1b16b8dc..9bbdb929db 100644
--- a/kubernetes/msb/values.yaml
+++ b/kubernetes/msb/values.yaml
@@ -1,6 +1,7 @@
nsPrefix: onap
pullPolicy: IfNotPresent
image:
+ readiness: oomk8s/readiness-check:1.0.0
consul: consul:0.9.3
discovery: nexus3.onap.org:10001/onap/msb/msb_discovery:1.0.0
apigateway: nexus3.onap.org:10001/onap/msb/msb_apigateway:1.0.0
diff --git a/kubernetes/mso/templates/db-deployment.yaml b/kubernetes/mso/templates/db-deployment.yaml
index e6f09c565c..cc4c656a04 100644
--- a/kubernetes/mso/templates/db-deployment.yaml
+++ b/kubernetes/mso/templates/db-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: mariadb
namespace: "{{ .Values.nsPrefix }}-mso"
spec:
+ replicas: {{ .Values.dbReplicas }}
selector:
matchLabels:
app: mariadb
diff --git a/kubernetes/mso/templates/mso-deployment.yaml b/kubernetes/mso/templates/mso-deployment.yaml
index 580facdaa6..b4146401c4 100644
--- a/kubernetes/mso/templates/mso-deployment.yaml
+++ b/kubernetes/mso/templates/mso-deployment.yaml
@@ -5,7 +5,7 @@ metadata:
name: mso
namespace: "{{ .Values.nsPrefix }}-mso"
spec:
- replicas: 1
+ replicas: {{ .Values.msoReplicas }}
selector:
matchLabels:
app: mso
diff --git a/kubernetes/mso/values.yaml b/kubernetes/mso/values.yaml
index 73b9f31446..77d3d60edf 100644
--- a/kubernetes/mso/values.yaml
+++ b/kubernetes/mso/values.yaml
@@ -7,6 +7,8 @@ openStackKeyStoneUrl: "http://1.2.3.4:5000"
dmaapTopic: "AUTO"
openStackServiceTenantName: "service"
openStackEncryptedPasswordHere: "c124921a3a0efbe579782cde8227681e"
+msoReplicas: 1
+dbReplicas: 1
image:
readiness: oomk8s/readiness-check:1.0.0
mso: nexus3.onap.org:10001/openecomp/mso:v1.1.1
diff --git a/kubernetes/policy/templates/dep-brmsgw.yaml b/kubernetes/policy/templates/dep-brmsgw.yaml
index 667ccc6251..6c0533ce15 100644
--- a/kubernetes/policy/templates/dep-brmsgw.yaml
+++ b/kubernetes/policy/templates/dep-brmsgw.yaml
@@ -5,7 +5,7 @@ metadata:
name: brmsgw
namespace: "{{ .Values.nsPrefix }}-policy"
spec:
- replicas: 1
+ replicas: {{ .Values.brmsgwReplicas }}
selector:
matchLabels:
app: brmsgw
diff --git a/kubernetes/policy/templates/dep-drools.yaml b/kubernetes/policy/templates/dep-drools.yaml
index afa044abe9..1dc16cf374 100644
--- a/kubernetes/policy/templates/dep-drools.yaml
+++ b/kubernetes/policy/templates/dep-drools.yaml
@@ -5,7 +5,7 @@ metadata:
name: drools
namespace: "{{ .Values.nsPrefix }}-policy"
spec:
- replicas: 1
+ replicas: {{ .Values.droolsReplicas }}
selector:
matchLabels:
app: drools
diff --git a/kubernetes/policy/templates/dep-maria.yaml b/kubernetes/policy/templates/dep-maria.yaml
index c921e8c890..e42230f994 100644
--- a/kubernetes/policy/templates/dep-maria.yaml
+++ b/kubernetes/policy/templates/dep-maria.yaml
@@ -5,6 +5,7 @@ metadata:
name: mariadb
namespace: "{{ .Values.nsPrefix }}-policy"
spec:
+ replicas: {{ .Values.mariadbReplicas }}
selector:
matchLabels:
app: mariadb
@@ -35,4 +36,4 @@ spec:
path: /etc/localtime
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }} \ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/policy/templates/dep-nexus.yaml b/kubernetes/policy/templates/dep-nexus.yaml
index 4adb05f630..2db40b2763 100644
--- a/kubernetes/policy/templates/dep-nexus.yaml
+++ b/kubernetes/policy/templates/dep-nexus.yaml
@@ -5,6 +5,7 @@ metadata:
name: nexus
namespace: "{{ .Values.nsPrefix }}-policy"
spec:
+ replicas: {{ .Values.nexusReplicas }}
selector:
matchLabels:
app: nexus
diff --git a/kubernetes/policy/templates/dep-pap.yaml b/kubernetes/policy/templates/dep-pap.yaml
index d3d5ff3738..68ccb5a746 100644
--- a/kubernetes/policy/templates/dep-pap.yaml
+++ b/kubernetes/policy/templates/dep-pap.yaml
@@ -5,6 +5,7 @@ metadata:
name: pap
namespace: "{{ .Values.nsPrefix }}-policy"
spec:
+ replicas: {{ .Values.papReplicas }}
selector:
matchLabels:
app: pap
diff --git a/kubernetes/policy/templates/dep-pdp.yaml b/kubernetes/policy/templates/dep-pdp.yaml
index b62e280895..f867fa54e2 100644
--- a/kubernetes/policy/templates/dep-pdp.yaml
+++ b/kubernetes/policy/templates/dep-pdp.yaml
@@ -5,7 +5,7 @@ metadata:
name: pdp
namespace: "{{ .Values.nsPrefix }}-policy"
spec:
- replicas: 1
+ replicas: {{ .Values.pdpReplicas }}
selector:
matchLabels:
app: pdp
diff --git a/kubernetes/policy/values.yaml b/kubernetes/policy/values.yaml
index f52dc445e6..79b59cf58e 100644
--- a/kubernetes/policy/values.yaml
+++ b/kubernetes/policy/values.yaml
@@ -5,7 +5,12 @@ nodePortPrefix: 302
# POLICY hotfix - Note this must be temporary
# See https://jira.onap.org/browse/POLICY-510
aaiServiceClusterIp: 10.43.255.254
-
+brmsgwReplicas: 1
+droolsReplicas: 1
+mariadbReplicas: 1
+nexusReplicas: 1
+papReplicas: 1
+pdpReplicas: 1
image:
readiness: oomk8s/readiness-check
readinessVersion: 1.0.0
@@ -18,4 +23,4 @@ image:
policyNexus: nexus3.onap.org:10001/onap/policy/policy-nexus
policyNexusVersion: v1.1.1
ubuntu: ubuntu:16.04
- filebeat: docker.elastic.co/beats/filebeat:5.5.0 \ No newline at end of file
+ filebeat: docker.elastic.co/beats/filebeat:5.5.0
diff --git a/kubernetes/portal/templates/portal-apps-deployment.yaml b/kubernetes/portal/templates/portal-apps-deployment.yaml
index b45cec4946..c6a38bd4cf 100755
--- a/kubernetes/portal/templates/portal-apps-deployment.yaml
+++ b/kubernetes/portal/templates/portal-apps-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: portalapps
namespace: "{{ .Values.nsPrefix }}-portal"
spec:
+ replicas: {{ .Values.portalAppsReplicas }}
selector:
matchLabels:
app: portalapps
diff --git a/kubernetes/portal/templates/portal-mariadb-deployment.yaml b/kubernetes/portal/templates/portal-mariadb-deployment.yaml
index fafcad2772..9db512a120 100755
--- a/kubernetes/portal/templates/portal-mariadb-deployment.yaml
+++ b/kubernetes/portal/templates/portal-mariadb-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: portaldb
namespace: "{{ .Values.nsPrefix }}-portal"
spec:
+ replicas: {{ .Values.portalDbReplicas }}
selector:
matchLabels:
app: portaldb
diff --git a/kubernetes/portal/templates/portal-vnc-dep.yaml b/kubernetes/portal/templates/portal-vnc-dep.yaml
index d5f433816e..3a80572238 100644
--- a/kubernetes/portal/templates/portal-vnc-dep.yaml
+++ b/kubernetes/portal/templates/portal-vnc-dep.yaml
@@ -22,6 +22,7 @@ metadata:
name: vnc-portal
namespace: "{{ .Values.nsPrefix }}-portal"
spec:
+ replicas: {{ .Values.vncPortalReplicas }}
selector:
matchLabels:
app: vnc-portal
diff --git a/kubernetes/portal/templates/portal-widgets-deployment.yaml b/kubernetes/portal/templates/portal-widgets-deployment.yaml
index 0dd92027df..6f27ef6e80 100644
--- a/kubernetes/portal/templates/portal-widgets-deployment.yaml
+++ b/kubernetes/portal/templates/portal-widgets-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: portalwidgets
namespace: "{{ .Values.nsPrefix }}-portal"
spec:
+ replicas: {{ .Values.portalWidgetsReplicas }}
selector:
matchLabels:
app: portalwidgets
diff --git a/kubernetes/portal/values.yaml b/kubernetes/portal/values.yaml
index 34f5cc444b..f34e6a08fd 100644
--- a/kubernetes/portal/values.yaml
+++ b/kubernetes/portal/values.yaml
@@ -1,6 +1,10 @@
nsPrefix: onap
pullPolicy: Always
nodePortPrefix: 302
+portalAppsReplicas: 1
+portalDbReplicas: 1
+vncPortalReplicas: 1
+portalWidgetsReplicas: 1
image:
readiness: oomk8s/readiness-check:1.0.0
portalapps: nexus3.onap.org:10001/onap/portal-apps:v1.3.0
diff --git a/kubernetes/readiness/docker/init/ready.py b/kubernetes/readiness/docker/init/ready.py
index c5b55eef18..6d2edef81c 100644
--- a/kubernetes/readiness/docker/init/ready.py
+++ b/kubernetes/readiness/docker/init/ready.py
@@ -1,8 +1,13 @@
#!/usr/bin/python
-from kubernetes import client, config
-import time, argparse, logging, sys, os
+import getopt
+import logging
+import os
+import sys
+import time
-#extract env variables.
+from kubernetes import client
+
+# extract env variables.
namespace = os.environ['NAMESPACE']
cert = os.environ['CERT']
host = os.environ['KUBERNETES_SERVICE_HOST']
@@ -11,12 +16,7 @@ token_path = os.environ['TOKEN']
with open(token_path, 'r') as token_file:
token = token_file.read().replace('\n', '')
-client.configuration.host = "https://" + host
-client.configuration.ssl_ca_cert = cert
-client.configuration.api_key['authorization'] = token
-client.configuration.api_key_prefix['authorization'] = 'Bearer'
-
-#setup logging
+# setup logging
log = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
@@ -24,25 +24,29 @@ handler.setLevel(logging.INFO)
log.addHandler(handler)
log.setLevel(logging.INFO)
+configuration = client.Configuration()
+configuration.host = "https://" + host
+configuration.ssl_ca_cert = cert
+configuration.api_key['authorization'] = token
+configuration.api_key_prefix['authorization'] = 'Bearer'
+coreV1Api = client.CoreV1Api(client.ApiClient(configuration))
def is_ready(container_name):
- log.info( "Checking if " + container_name + " is ready")
- # config.load_kube_config() # for local testing
- # namespace='onap-sdc' # for local testing
- v1 = client.CoreV1Api()
-
ready = False
-
+ log.info("Checking if " + container_name + " is ready")
try:
- response = v1.list_namespaced_pod(namespace=namespace, watch=False)
+ response = coreV1Api.list_namespaced_pod(namespace=namespace, watch=False)
for i in response.items:
+ # container_statuses can be None, which is non-iterable.
+ if i.status.container_statuses is None:
+ continue
for s in i.status.container_statuses:
if s.name == container_name:
ready = s.ready
if not ready:
- log.info( container_name + " is not ready.")
+ log.info(container_name + " is not ready.")
else:
- log.info( container_name + " is ready!")
+ log.info(container_name + " is ready!")
else:
continue
return ready
@@ -50,27 +54,49 @@ def is_ready(container_name):
log.error("Exception when calling list_namespaced_pod: %s\n" % e)
-def main(args):
+DEF_TIMEOUT = 10
+DESCRIPTION = "Kubernetes container readiness check utility"
+USAGE = "Usage: ready.py [-t <timeout>] -c <container_name> [-c <container_name> ...]\n" \
+ "where\n" \
+ "<timeout> - wait for container readiness timeout in min, default is " + str(DEF_TIMEOUT) + "\n" \
+ "<container_name> - name of the container to wait for\n"
+
+def main(argv):
# args are a list of container names
- for container_name in args:
- # 5 min, TODO: make configurable
- timeout = time.time() + 60 * 10
+ container_names = []
+ timeout = DEF_TIMEOUT
+ try:
+ opts, args = getopt.getopt(argv, "hc:t:", ["container-name=", "timeout=", "help"])
+ for opt, arg in opts:
+ if opt in ("-h", "--help"):
+ print("%s\n\n%s" % (DESCRIPTION, USAGE))
+ sys.exit()
+ elif opt in ("-c", "--container-name"):
+ container_names.append(arg)
+ elif opt in ("-t", "--timeout"):
+ timeout = float(arg)
+ except (getopt.GetoptError, ValueError) as e:
+ print("Error parsing input parameters: %s\n" % e)
+ print(USAGE)
+ sys.exit(2)
+ if container_names.__len__() == 0:
+ print("Missing required input parameter(s)\n")
+ print(USAGE)
+ sys.exit(2)
+
+ for container_name in container_names:
+ timeout = time.time() + timeout * 60
while True:
ready = is_ready(container_name)
if ready is True:
break
elif time.time() > timeout:
- log.warning( "timed out waiting for '" + container_name + "' to be ready")
+ log.warning("timed out waiting for '" + container_name + "' to be ready")
exit(1)
else:
time.sleep(5)
if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='Process some names.')
- parser.add_argument('--container-name', action='append', required=True, help='A container name')
- args = parser.parse_args()
- arg_dict = vars(args)
+ main(sys.argv[1:])
- for arg in arg_dict.itervalues():
- main(arg)
diff --git a/kubernetes/sdnc/templates/dgbuilder-deployment.yaml b/kubernetes/sdnc/templates/dgbuilder-deployment.yaml
index e0f8b298e9..b9ec155c3f 100644
--- a/kubernetes/sdnc/templates/dgbuilder-deployment.yaml
+++ b/kubernetes/sdnc/templates/dgbuilder-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: sdnc-dgbuilder
namespace: "{{ .Values.nsPrefix }}-sdnc"
spec:
+ replicas: {{ .Values.dgbuilderReplicas }}
selector:
matchLabels:
app: sdnc-dgbuilder
diff --git a/kubernetes/sdnc/templates/dmaap-deployment.yaml b/kubernetes/sdnc/templates/dmaap-deployment.yaml
index b9ba346156..eb6f354abd 100644
--- a/kubernetes/sdnc/templates/dmaap-deployment.yaml
+++ b/kubernetes/sdnc/templates/dmaap-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: dmaap-listener
namespace: "{{ .Values.nsPrefix }}-sdnc"
spec:
+ replicas: {{ .Values.dmaapReplicas }}
selector:
matchLabels:
app: dmaap-listener
diff --git a/kubernetes/sdnc/templates/nfs-provisoner-deployment.yaml b/kubernetes/sdnc/templates/nfs-provisoner-deployment.yaml
index 9d3feeff9c..a283d0f626 100644
--- a/kubernetes/sdnc/templates/nfs-provisoner-deployment.yaml
+++ b/kubernetes/sdnc/templates/nfs-provisoner-deployment.yaml
@@ -5,7 +5,7 @@ metadata:
name: nfs-provisioner
namespace: "{{ .Values.nsPrefix }}-sdnc"
spec:
- replicas: 1
+ replicas: {{ .Values.nfsReplicas }}
strategy:
type: Recreate
template:
diff --git a/kubernetes/sdnc/templates/ueb-deployment.yaml b/kubernetes/sdnc/templates/ueb-deployment.yaml
index a7e5e748bd..c109acdaef 100644
--- a/kubernetes/sdnc/templates/ueb-deployment.yaml
+++ b/kubernetes/sdnc/templates/ueb-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: ueb-listener
namespace: "{{ .Values.nsPrefix }}-sdnc"
spec:
+ replicas: {{ .Values.uebReplicas }}
selector:
matchLabels:
app: ueb-listener
diff --git a/kubernetes/sdnc/templates/web-deployment.yaml b/kubernetes/sdnc/templates/web-deployment.yaml
index 7e8b59c1eb..73d8e0a6cb 100644
--- a/kubernetes/sdnc/templates/web-deployment.yaml
+++ b/kubernetes/sdnc/templates/web-deployment.yaml
@@ -5,6 +5,7 @@ metadata:
name: sdnc-portal
namespace: "{{ .Values.nsPrefix }}-sdnc"
spec:
+ replicas: {{ .Values.portalReplicas }}
selector:
matchLabels:
app: sdnc-portal
diff --git a/kubernetes/sdnc/values.yaml b/kubernetes/sdnc/values.yaml
index 395970957a..e91dc2a4cf 100644
--- a/kubernetes/sdnc/values.yaml
+++ b/kubernetes/sdnc/values.yaml
@@ -14,5 +14,10 @@ image:
enableODLCluster: false
numberOfODLReplicas: 1
numberOfDbReplicas: 1
+dgbuilderReplicas: 1
+dmaapReplicas: 1
+nfsReplicas: 1
+uebReplicas: 1
+portalReplicas: 1
disableSdncSdncDgbuilder: false
disableSdncSdncPortal: false
diff --git a/kubernetes/config/docker/init/src/config/vid/vid/lf_config/vid-my.cnf b/kubernetes/vid/resources/config/lf_config/vid-my.cnf
index 472bf4698e..472bf4698e 100755
--- a/kubernetes/config/docker/init/src/config/vid/vid/lf_config/vid-my.cnf
+++ b/kubernetes/vid/resources/config/lf_config/vid-my.cnf
diff --git a/kubernetes/config/docker/init/src/config/vid/vid/lf_config/vid-pre-init.sql b/kubernetes/vid/resources/config/lf_config/vid-pre-init.sql
index 57b797d03c..57b797d03c 100755
--- a/kubernetes/config/docker/init/src/config/vid/vid/lf_config/vid-pre-init.sql
+++ b/kubernetes/vid/resources/config/lf_config/vid-pre-init.sql
diff --git a/kubernetes/vid/resources/config/log/filebeat/filebeat.yml b/kubernetes/vid/resources/config/log/filebeat/filebeat.yml
new file mode 100644
index 0000000000..f316b866af
--- /dev/null
+++ b/kubernetes/vid/resources/config/log/filebeat/filebeat.yml
@@ -0,0 +1,41 @@
+filebeat.prospectors:
+#it is mandatory, in our case it's log
+- input_type: log
+ #This is the canolical path as mentioned in logback.xml, *.* means it will monitor all files in the directory.
+ paths:
+ - /var/log/onap/*/*/*/*.log
+ - /var/log/onap/*/*/*.log
+ - /var/log/onap/*/*.log
+ #Files older than this should be ignored.In our case it will be 48 hours i.e. 2 days. It is a helping flag for clean_inactive
+ ignore_older: 48h
+ # Remove the registry entry for a file that is more than the specified time. In our case it will be 96 hours, i.e. 4 days. It will help to keep registry records with in limit
+ clean_inactive: 96h
+
+
+# Name of the registry file. If a relative path is used, it is considered relative to the
+# data path. Else full qualified file name.
+#filebeat.registry_file: ${path.data}/registry
+
+
+output.logstash:
+ #List of logstash server ip addresses with port number.
+ #But, in our case, this will be the loadbalancer IP address.
+ #For the below property to work the loadbalancer or logstash should expose 5044 port to listen the filebeat events or port in the property should be changed appropriately.
+ hosts: ["logstash.onap-log:5044"]
+ #If enable will do load balancing among availabe Logstash, automatically.
+ loadbalance: true
+
+ #The list of root certificates for server verifications.
+ #If certificate_authorities is empty or not set, the trusted
+ #certificate authorities of the host system are used.
+ #ssl.certificate_authorities: $ssl.certificate_authorities
+
+ #The path to the certificate for SSL client authentication. If the certificate is not specified,
+ #client authentication is not available.
+ #ssl.certificate: $ssl.certificate
+
+ #The client certificate key used for client authentication.
+ #ssl.key: $ssl.key
+
+ #The passphrase used to decrypt an encrypted key stored in the configured key file
+ #ssl.key_passphrase: $ssl.key_passphrase
diff --git a/kubernetes/config/docker/init/src/config/log/vid/logback.xml b/kubernetes/vid/resources/config/log/vid/logback.xml
index 49db781b3e..49db781b3e 100644
--- a/kubernetes/config/docker/init/src/config/log/vid/logback.xml
+++ b/kubernetes/vid/resources/config/log/vid/logback.xml
diff --git a/kubernetes/vid/templates/vid-lfconfig-configmap.yaml b/kubernetes/vid/templates/vid-lfconfig-configmap.yaml
new file mode 100644
index 0000000000..1cc3f218b9
--- /dev/null
+++ b/kubernetes/vid/templates/vid-lfconfig-configmap.yaml
@@ -0,0 +1,9 @@
+#{{ if not .Values.disableVidVidMariadb }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vid-lfconfig-configmap
+ namespace: {{ .Values.nsPrefix }}-vid
+data:
+{{ tpl (.Files.Glob "resources/config/lf_config/*").AsConfig . | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/vid/templates/vid-log-configmap.yaml b/kubernetes/vid/templates/vid-log-configmap.yaml
new file mode 100644
index 0000000000..00481d259e
--- /dev/null
+++ b/kubernetes/vid/templates/vid-log-configmap.yaml
@@ -0,0 +1,17 @@
+#{{ if not .Values.disableVidVidServer }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vid-log-configmap
+ namespace: {{ .Values.nsPrefix }}-vid
+data:
+{{ tpl (.Files.Glob "resources/config/log/vid/*").AsConfig . | indent 2 }}
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vid-filebeat-configmap
+ namespace: {{ .Values.nsPrefix }}-vid
+data:
+{{ tpl (.Files.Glob "resources/config/log/filebeat/*").AsConfig . | indent 2 }}
+#{{ end }}
diff --git a/kubernetes/vid/templates/vid-mariadb-deployment.yaml b/kubernetes/vid/templates/vid-mariadb-deployment.yaml
index 55ef5da872..bf23c3f8b1 100644
--- a/kubernetes/vid/templates/vid-mariadb-deployment.yaml
+++ b/kubernetes/vid/templates/vid-mariadb-deployment.yaml
@@ -7,6 +7,7 @@ metadata:
name: vid-mariadb
namespace: "{{ .Values.nsPrefix }}-vid"
spec:
+ replicas: {{ .Values.vidMariaDbReplicas }}
selector:
matchLabels:
app: vid-mariadb
@@ -36,9 +37,11 @@ spec:
- mountPath: /var/lib/mysql
name: vid-mariadb-data
- mountPath: /docker-entrypoint-initdb.d/vid-pre-init.sql
- name: vid-pre-init
+ name: vid-lfconfig
+ subPath: vid-pre-init.sql
- mountPath: /etc/mysql/my.cnf
- name: my-cnf
+ name: vid-lfconfig
+ subPath: my.cnf
ports:
- containerPort: 3306
readinessProbe:
@@ -53,12 +56,15 @@ spec:
- name: vid-mariadb-data
persistentVolumeClaim:
claimName: vid-db
- - name: vid-pre-init
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/vid/vid/lf_config/vid-pre-init.sql
- - name: my-cnf
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/vid/vid/lf_config/vid-my.cnf
+ - name: vid-lfconfig
+ configMap:
+ name: vid-lfconfig-configmap
+ defaultMode: 0755
+ items:
+ - key: vid-my.cnf
+ path: my.cnf
+ - key: vid-pre-init.sql
+ path: vid-pre-init.sql
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
-#{{ end }} \ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/vid/templates/vid-pv-pvc.yaml b/kubernetes/vid/templates/vid-pv-pvc.yaml
index f513a87354..3315d093e3 100644
--- a/kubernetes/vid/templates/vid-pv-pvc.yaml
+++ b/kubernetes/vid/templates/vid-pv-pvc.yaml
@@ -13,7 +13,7 @@ spec:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/vid/mariadb/data
+ path: {{ .Values.dataRootDir }}/{{ .Values.nsPrefix }}/vid/mariadb/data
---
kind: PersistentVolumeClaim
apiVersion: v1
@@ -29,4 +29,4 @@ spec:
selector:
matchLabels:
name: "{{ .Values.nsPrefix }}-vid-db"
-#{{ end }} \ No newline at end of file
+#{{ end }}
diff --git a/kubernetes/vid/templates/vid-server-deployment.yaml b/kubernetes/vid/templates/vid-server-deployment.yaml
index 832fdeb93c..7f9cb653b2 100644
--- a/kubernetes/vid/templates/vid-server-deployment.yaml
+++ b/kubernetes/vid/templates/vid-server-deployment.yaml
@@ -7,6 +7,7 @@ metadata:
name: vid-server
namespace: "{{ .Values.nsPrefix }}-vid"
spec:
+ replicas: {{ .Values.vidServerReplicas }}
selector:
matchLabels:
app: vid-server
@@ -73,7 +74,7 @@ spec:
value: Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U
- name: VID_MYSQL_MAXCONNECTIONS
value: "5"
- image: {{ .Values.image.vid }}
+ image: {{ .Values.image.vid }}
imagePullPolicy: {{ .Values.pullPolicy }}
name: vid-server
lifecycle:
@@ -90,6 +91,7 @@ spec:
name: vid-logs
- mountPath: /tmp/logback.xml
name: vid-logback
+ subPath: logback.xml
readinessProbe:
tcpSocket:
port: 8080
@@ -101,6 +103,7 @@ spec:
volumeMounts:
- mountPath: /usr/share/filebeat/filebeat.yml
name: filebeat-conf
+ subPath: filebeat.yml
- mountPath: /var/log/onap
name: vid-logs
- mountPath: /usr/share/filebeat/data
@@ -110,15 +113,15 @@ spec:
hostPath:
path: /etc/localtime
- name: filebeat-conf
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/filebeat/logback/filebeat.yml
+ configMap:
+ name: vid-filebeat-configmap
- name: vid-logs
emptyDir: {}
- name: vid-data-filebeat
emptyDir: {}
- name: vid-logback
- hostPath:
- path: /dockerdata-nfs/{{ .Values.nsPrefix }}/log/vid/logback.xml
+ configMap:
+ name: vid-log-configmap
imagePullSecrets:
- name: "{{ .Values.nsPrefix }}-docker-registry-key"
#{{ end }}
diff --git a/kubernetes/vid/values.yaml b/kubernetes/vid/values.yaml
index 9b1748f507..64635889cf 100644
--- a/kubernetes/vid/values.yaml
+++ b/kubernetes/vid/values.yaml
@@ -1,8 +1,11 @@
nsPrefix: onap
pullPolicy: Always
nodePortPrefix: 302
+dataRootDir: /dockerdata-nfs
+vidMariaDbReplicas: 1
+vidServerReplicas: 1
image:
readiness: oomk8s/readiness-check:1.0.0
mariadb: nexus3.onap.org:10001/library/mariadb:10
vid: nexus3.onap.org:10001/openecomp/vid:v1.1.1
- filebeat: docker.elastic.co/beats/filebeat:5.5.0 \ No newline at end of file
+ filebeat: docker.elastic.co/beats/filebeat:5.5.0