aboutsummaryrefslogtreecommitdiffstats
path: root/vnfs/DAaaS/deploy/00-init
diff options
context:
space:
mode:
authorDileep Ranganathan <dileep.ranganathan@intel.com>2019-05-30 12:38:37 -0700
committerDileep Ranganathan <dileep.ranganathan@intel.com>2019-05-30 21:11:52 +0000
commit3d5a3e06530c1250d48f7d838c619f3bfbcd019d (patch)
tree349e370c43ce7318b3f7eb7736345de6872cbef2 /vnfs/DAaaS/deploy/00-init
parent31802660dfe74a8671ae29789f0018f0f887ea1a (diff)
Refactor Distributed Analytics project structure
Modified the project structure to improve maintainability and to add future CI and integration test support. Change-Id: Id30bfb1f83f23785a6b5f99e81f42f752d59c0f8 Issue-ID: ONAPARC-280 Signed-off-by: Dileep Ranganathan <dileep.ranganathan@intel.com>
Diffstat (limited to 'vnfs/DAaaS/deploy/00-init')
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/.helmignore28
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/Chart.yaml8
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/0-namespace.yaml10
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/10-ingress-deployment.yaml40
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/100-gloo-crds.yaml111
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/101-knative-crds-0.5.1.yaml343
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/11-ingress-proxy-deployment.yaml65
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/12-ingress-proxy-configmap.yaml52
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/13-ingress-proxy-service.yaml23
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml58
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml49
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/16-clusteringress-proxy-service.yaml21
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml982
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/18-settings.yaml30
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml29
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml29
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml29
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml22
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml22
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml21
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/3-gloo-deployment.yaml57
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/4-gloo-service.yaml18
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/5-discovery-deployment.yaml46
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/6-gateway-deployment.yaml47
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/7-gateway-proxy-deployment.yaml67
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/8-gateway-proxy-service.yaml35
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/templates/9-gateway-proxy-configmap.yaml54
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/values-ingress.yaml74
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/values-knative.yaml72
-rwxr-xr-xvnfs/DAaaS/deploy/00-init/gloo/values.yaml56
-rw-r--r--vnfs/DAaaS/deploy/00-init/istio/README.md31
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/Chart.yaml7
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/NOTES.txt5
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/_helpers.tpl16
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/cluster.yml180
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrole.yaml165
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrolebinding.yaml38
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/dashboard-external-http.yaml22
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/deployment.yaml108
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/psp.yaml35
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/resources.yaml177
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/role.yaml35
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/rolebinding.yaml19
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/serviceaccount.yaml8
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/storageclass.yml28
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/templates/tool-box.yml62
-rw-r--r--vnfs/DAaaS/deploy/00-init/rook-ceph/values.yaml75
47 files changed, 3509 insertions, 0 deletions
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/.helmignore b/vnfs/DAaaS/deploy/00-init/gloo/.helmignore
new file mode 100755
index 00000000..08c5989a
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/.helmignore
@@ -0,0 +1,28 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+
+# template files
+*-template.yaml
+
+# generator files
+*.go
+generate/
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/Chart.yaml b/vnfs/DAaaS/deploy/00-init/gloo/Chart.yaml
new file mode 100755
index 00000000..4f5e9315
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/Chart.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+description: Gloo Helm chart for Kubernetes
+home: https://gloo.solo.io/
+icon: https://raw.githubusercontent.com/solo-io/gloo/master/docs/img/Gloo-01.png
+name: gloo
+sources:
+- https://github.com/solo-io/gloo
+version: 0.13.18
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/0-namespace.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/0-namespace.yaml
new file mode 100755
index 00000000..92a37f9d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/0-namespace.yaml
@@ -0,0 +1,10 @@
+{{- if .Values.namespace.create -}}
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ .Release.Namespace }}
+ labels:
+ app: gloo
+ annotations:
+ "helm.sh/hook": pre-install
+{{- end}} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/10-ingress-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/10-ingress-deployment.yaml
new file mode 100755
index 00000000..7314b4e3
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/10-ingress-deployment.yaml
@@ -0,0 +1,40 @@
+{{- if or (.Values.ingress.enabled) (.Values.settings.integrations.knative.enabled) }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: ingress
+ name: ingress
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.ingress.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: ingress
+ template:
+ metadata:
+ labels:
+ gloo: ingress
+ spec:
+ containers:
+ - image: "{{ .Values.ingress.deployment.image.repository }}:{{ .Values.ingress.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.ingress.deployment.image.pullPolicy }}
+ name: ingress
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+{{- if .Values.settings.integrations.knative.enabled }}
+ - name: "ENABLE_KNATIVE_INGRESS"
+ value: "true"
+{{- end }}
+
+{{- if not (.Values.ingress.enabled) }}
+ - name: "DISABLE_KUBE_INGRESS"
+ value: "true"
+{{- end }}
+
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/100-gloo-crds.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/100-gloo-crds.yaml
new file mode 100755
index 00000000..2c111170
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/100-gloo-crds.yaml
@@ -0,0 +1,111 @@
+{{- if .Values.crds.create }}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: settings.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ gloo: settings
+spec:
+ group: gloo.solo.io
+ names:
+ kind: Settings
+ listKind: SettingsList
+ plural: settings
+ shortNames:
+ - st
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: gateways.gateway.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gateway.solo.io
+ names:
+ kind: Gateway
+ listKind: GatewayList
+ plural: gateways
+ shortNames:
+ - gw
+ singular: gateway
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: virtualservices.gateway.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gateway.solo.io
+ names:
+ kind: VirtualService
+ listKind: VirtualServiceList
+ plural: virtualservices
+ shortNames:
+ - vs
+ singular: virtualservice
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: proxies.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gloo.solo.io
+ names:
+ kind: Proxy
+ listKind: ProxyList
+ plural: proxies
+ shortNames:
+ - px
+ singular: proxy
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: upstreams.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gloo.solo.io
+ names:
+ kind: Upstream
+ listKind: UpstreamList
+ plural: upstreams
+ shortNames:
+ - us
+ singular: upstream
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: upstreamgroups.gloo.solo.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: gloo.solo.io
+ names:
+ kind: UpstreamGroup
+ listKind: UpstreamGroupList
+ plural: upstreamgroups
+ shortNames:
+ - ug
+ singular: upstreamgroup
+ scope: Namespaced
+ version: v1
+---
+{{- end}} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/101-knative-crds-0.5.1.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/101-knative-crds-0.5.1.yaml
new file mode 100755
index 00000000..3c9987ef
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/101-knative-crds-0.5.1.yaml
@@ -0,0 +1,343 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+
+---
+# ↓ required as knative dependency on istio crds is hard-coded right now ↓
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: virtualservices.networking.istio.io
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ app: istio-pilot
+spec:
+ group: networking.istio.io
+ names:
+ kind: VirtualService
+ listKind: VirtualServiceList
+ plural: virtualservices
+ singular: virtualservice
+ categories:
+ - istio-io
+ - networking-istio-io
+ scope: Namespaced
+ version: v1alpha3
+
+# ↑ required as knative dependency on istio crds is hard-coded right now ↑
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: certificates.networking.internal.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.conditions[?(@.type=="Ready")].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=="Ready")].reason
+ name: Reason
+ type: string
+ group: networking.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - networking
+ kind: Certificate
+ plural: certificates
+ shortNames:
+ - kcert
+ singular: certificate
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: clusteringresses.networking.internal.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: networking.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - networking
+ kind: ClusterIngress
+ plural: clusteringresses
+ singular: clusteringress
+ scope: Cluster
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: configurations.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.latestCreatedRevisionName
+ name: LatestCreated
+ type: string
+ - JSONPath: .status.latestReadyRevisionName
+ name: LatestReady
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Configuration
+ plural: configurations
+ shortNames:
+ - config
+ - cfg
+ singular: configuration
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ name: images.caching.internal.knative.dev
+spec:
+ group: caching.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - caching
+ kind: Image
+ plural: images
+ shortNames:
+ - img
+ singular: image
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: podautoscalers.autoscaling.internal.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: autoscaling.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - autoscaling
+ kind: PodAutoscaler
+ plural: podautoscalers
+ shortNames:
+ - kpa
+ singular: podautoscaler
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: revisions.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.serviceName
+ name: Service Name
+ type: string
+ - JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration']
+ name: Generation
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Revision
+ plural: revisions
+ shortNames:
+ - rev
+ singular: revision
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: routes.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.domain
+ name: Domain
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Route
+ plural: routes
+ shortNames:
+ - rt
+ singular: route
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: services.serving.knative.dev
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.domain
+ name: Domain
+ type: string
+ - JSONPath: .status.latestCreatedRevisionName
+ name: LatestCreated
+ type: string
+ - JSONPath: .status.latestReadyRevisionName
+ name: LatestReady
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].status
+ name: Ready
+ type: string
+ - JSONPath: .status.conditions[?(@.type=='Ready')].reason
+ name: Reason
+ type: string
+ group: serving.knative.dev
+ names:
+ categories:
+ - all
+ - knative
+ - serving
+ kind: Service
+ plural: services
+ shortNames:
+ - kservice
+ - ksvc
+ singular: service
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ "helm.sh/hook": crd-install
+ labels:
+ knative.dev/crd-install: "true"
+ serving.knative.dev/release: devel
+ name: serverlessservices.networking.internal.knative.dev
+spec:
+ group: networking.internal.knative.dev
+ names:
+ categories:
+ - all
+ - knative-internal
+ - networking
+ kind: ServerlessService
+ plural: serverlessservices
+ shortNames:
+ - sks
+ singular: serverlessservice
+ scope: Namespaced
+ subresources:
+ status: {}
+ version: v1alpha1
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/11-ingress-proxy-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/11-ingress-proxy-deployment.yaml
new file mode 100755
index 00000000..5dc131e5
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/11-ingress-proxy-deployment.yaml
@@ -0,0 +1,65 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: ingress-proxy
+ name: ingress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.ingressProxy.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: ingress-proxy
+ template:
+ metadata:
+ labels:
+ gloo: ingress-proxy
+{{- with .Values.ingressProxy.deployment.extraAnnotations }}
+ annotations:
+{{toYaml . | indent 8}}{{- end }}
+ spec:
+ containers:
+ - args: ["--disable-hot-restart"]
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: "{{ .Values.ingressProxy.deployment.image.repository }}:{{ .Values.ingressProxy.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.ingressProxy.deployment.image.pullPolicy }}
+ name: ingress-proxy
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ ports:
+ - containerPort: {{ .Values.ingressProxy.deployment.httpPort }}
+ name: http
+ protocol: TCP
+ - containerPort: {{ .Values.ingressProxy.deployment.httpsPort }}
+ name: https
+ protocol: TCP
+{{- with .Values.ingressProxy.deployment.extraPorts }}
+{{toYaml . | indent 8}}{{- end }}
+ volumeMounts:
+ - mountPath: /etc/envoy
+ name: envoy-config
+ {{- if .Values.ingressProxy.deployment.image.pullSecret }}
+ imagePullSecrets:
+ - name: {{ .Values.ingressProxy.deployment.image.pullSecret }}{{end}}
+ volumes:
+ - configMap:
+ name: ingress-envoy-config
+ name: envoy-config
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/12-ingress-proxy-configmap.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/12-ingress-proxy-configmap.yaml
new file mode 100755
index 00000000..8938a477
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/12-ingress-proxy-configmap.yaml
@@ -0,0 +1,52 @@
+{{- if .Values.ingress.enabled }}
+# configmap
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ingress-envoy-config
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: gateway-proxy
+data:
+{{ if (empty .Values.ingressProxy.configMap.data) }}
+ envoy.yaml: |
+ node:
+ cluster: ingress
+ id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+ metadata:
+ # this line must match !
+ role: "{{ "{{" }}.PodNamespace{{ "}}" }}~ingress-proxy"
+ static_resources:
+ clusters:
+ - name: xds_cluster
+ connect_timeout: 5.000s
+ load_assignment:
+ cluster_name: xds_cluster
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: gloo
+ port_value: {{ .Values.gloo.deployment.xdsPort }}
+ http2_protocol_options: {}
+ type: STRICT_DNS
+ dynamic_resources:
+ ads_config:
+ api_type: GRPC
+ grpc_services:
+ - envoy_grpc: {cluster_name: xds_cluster}
+ cds_config:
+ ads: {}
+ lds_config:
+ ads: {}
+ admin:
+ access_log_path: /dev/null
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 19000
+{{- else}}{{ toYaml .Values.ingressProxy.configMap.data | indent 2}}{{- end}}
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/13-ingress-proxy-service.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/13-ingress-proxy-service.yaml
new file mode 100755
index 00000000..583e8bcd
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/13-ingress-proxy-service.yaml
@@ -0,0 +1,23 @@
+{{- if .Values.ingress.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: ingress-proxy
+ name: ingress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ ports:
+ - port: {{ .Values.ingressProxy.deployment.httpPort }}
+ protocol: TCP
+ name: http
+ - port: {{ .Values.ingressProxy.deployment.httpsPort }}
+ protocol: TCP
+ name: https
+ selector:
+ gloo: ingress-proxy
+ type: LoadBalancer
+
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml
new file mode 100755
index 00000000..fb7874eb
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/14-clusteringress-proxy-deployment.yaml
@@ -0,0 +1,58 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: clusteringress-proxy
+ name: clusteringress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.settings.integrations.knative.proxy.replicas }}
+ selector:
+ matchLabels:
+ gloo: clusteringress-proxy
+ template:
+ metadata:
+ labels:
+ gloo: clusteringress-proxy
+ spec:
+ containers:
+ - args: ["--disable-hot-restart"]
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: {{ .Values.settings.integrations.knative.proxy.image.repository }}:{{ .Values.settings.integrations.knative.proxy.image.tag }}
+ imagePullPolicy: {{ .Values.settings.integrations.knative.proxy.image.pullPolicy }}
+ name: clusteringress-proxy
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ ports:
+ - containerPort: {{ .Values.settings.integrations.knative.proxy.httpPort }}
+ name: http
+ protocol: TCP
+ - containerPort: {{ .Values.settings.integrations.knative.proxy.httpsPort }}
+ name: https
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /etc/envoy
+ name: envoy-config
+ volumes:
+ - configMap:
+ name: clusteringress-envoy-config
+ name: envoy-config
+
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml
new file mode 100755
index 00000000..85a6421f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/15-clusteringress-proxy-configmap.yaml
@@ -0,0 +1,49 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+# configmap
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: clusteringress-envoy-config
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: clusteringress-proxy
+data:
+ envoy.yaml: |
+ node:
+ cluster: clusteringress
+ id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+ metadata:
+ # this line must match !
+ role: "{{ "{{" }}.PodNamespace{{ "}}" }}~clusteringress-proxy"
+ static_resources:
+ clusters:
+ - name: xds_cluster
+ connect_timeout: 5.000s
+ load_assignment:
+ cluster_name: xds_cluster
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: gloo
+ port_value: {{ .Values.gloo.deployment.xdsPort }}
+ http2_protocol_options: {}
+ type: STRICT_DNS
+ dynamic_resources:
+ ads_config:
+ api_type: GRPC
+ grpc_services:
+ - envoy_grpc: {cluster_name: xds_cluster}
+ cds_config:
+ ads: {}
+ lds_config:
+ ads: {}
+ admin:
+ access_log_path: /dev/null
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 19000
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/16-clusteringress-proxy-service.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/16-clusteringress-proxy-service.yaml
new file mode 100755
index 00000000..7e25bee9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/16-clusteringress-proxy-service.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: clusteringress-proxy
+ name: clusteringress-proxy
+ namespace: {{ .Release.Namespace }}
+spec:
+ ports:
+ - port: {{ .Values.settings.integrations.knative.proxy.httpPort }}
+ protocol: TCP
+ name: http
+ - port: {{ .Values.settings.integrations.knative.proxy.httpsPort }}
+ protocol: TCP
+ name: https
+ selector:
+ gloo: clusteringress-proxy
+ type: LoadBalancer
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml
new file mode 100755
index 00000000..a73cf1f2
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/17-knative-no-istio-0.5.1.yaml
@@ -0,0 +1,982 @@
+{{- if .Values.settings.integrations.knative.enabled }}
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app: gloo
+ istio-injection: enabled
+ serving.knative.dev/release: devel
+ name: knative-serving
+
+---
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ serving.knative.dev/controller: "true"
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: knative-serving-admin
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ serving.knative.dev/controller: "true"
+ serving.knative.dev/release: devel
+ name: knative-serving-core
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - namespaces
+ - secrets
+ - configmaps
+ - endpoints
+ - services
+ - events
+ - serviceaccounts
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - extensions
+ resources:
+ - ingresses
+ - deployments
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ - deployments/scale
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - serving.knative.dev
+ resources:
+ - configurations
+ - routes
+ - revisions
+ - services
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - serving.knative.dev
+ resources:
+ - configurations/status
+ - routes/status
+ - revisions/status
+ - services/status
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - autoscaling.internal.knative.dev
+ resources:
+ - podautoscalers
+ - podautoscalers/status
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - caching.internal.knative.dev
+ resources:
+ - images
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+ - apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - clusteringresses
+ - clusteringresses/status
+ - serverlessservices
+ - serverlessservices/status
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - deletecollection
+ - patch
+ - watch
+ - apiGroups:
+ - networking.istio.io
+ resources:
+ - virtualservices
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - patch
+ - watch
+
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: controller
+ namespace: knative-serving
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: knative-serving-controller-admin
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: knative-serving-admin
+subjects:
+ - kind: ServiceAccount
+ name: controller
+ namespace: knative-serving
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: activator
+ serving.knative.dev/release: devel
+ name: activator-service
+ namespace: knative-serving
+spec:
+ ports:
+ - name: http
+ nodePort: null
+ port: 80
+ protocol: TCP
+ targetPort: 8080
+ - name: http2
+ port: 81
+ protocol: TCP
+ targetPort: 8081
+ - name: metrics
+ nodePort: null
+ port: 9090
+ protocol: TCP
+ targetPort: 9090
+ selector:
+ app: activator
+ type: ClusterIP
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: controller
+ serving.knative.dev/release: devel
+ name: controller
+ namespace: knative-serving
+spec:
+ ports:
+ - name: metrics
+ port: 9090
+ protocol: TCP
+ targetPort: 9090
+ selector:
+ app: controller
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ role: webhook
+ serving.knative.dev/release: devel
+ name: webhook
+ namespace: knative-serving
+spec:
+ ports:
+ - port: 443
+ targetPort: 443
+ selector:
+ role: webhook
+
+---
+apiVersion: caching.internal.knative.dev/v1alpha1
+kind: Image
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: queue-proxy
+ namespace: knative-serving
+spec:
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/queue@sha256:b5c759e4ea6f36ae4498c1ec794653920345b9ad7492731fb1d6087e3b95dc43
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: activator
+ namespace: knative-serving
+spec:
+ selector:
+ matchLabels:
+ app: activator
+ role: activator
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "true"
+ labels:
+ app: activator
+ role: activator
+ serving.knative.dev/release: devel
+ spec:
+ containers:
+ - args:
+ - -logtostderr=false
+ - -stderrthreshold=FATAL
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/activator@sha256:60630ac88d8cb67debd1e2ab1ecd6ec3ff6cbab2336dda8e7ae1c01ebead76c0
+ livenessProbe:
+ httpGet:
+ path: /healthz
+ port: 8080
+ name: activator
+ ports:
+ - containerPort: 8080
+ name: http1-port
+ - containerPort: 8081
+ name: h2c-port
+ - containerPort: 9090
+ name: metrics-port
+ readinessProbe:
+ httpGet:
+ path: /healthz
+ port: 8080
+ resources:
+ limits:
+ cpu: 200m
+ memory: 600Mi
+ requests:
+ cpu: 20m
+ memory: 60Mi
+ volumeMounts:
+ - mountPath: /etc/config-logging
+ name: config-logging
+ - mountPath: /etc/config-observability
+ name: config-observability
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-logging
+ name: config-logging
+ - configMap:
+ name: config-observability
+ name: config-observability
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: autoscaler
+ serving.knative.dev/release: devel
+ name: autoscaler
+ namespace: knative-serving
+spec:
+ ports:
+ - name: http
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: metrics
+ port: 9090
+ protocol: TCP
+ targetPort: 9090
+ selector:
+ app: autoscaler
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: autoscaler
+ namespace: knative-serving
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: autoscaler
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "true"
+ labels:
+ app: autoscaler
+ spec:
+ containers:
+ - env:
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/autoscaler@sha256:442f99e3a55653b19137b44c1d00f681b594d322cb39c1297820eb717e2134ba
+ name: autoscaler
+ ports:
+ - containerPort: 8080
+ name: websocket
+ - containerPort: 9090
+ name: metrics
+ resources:
+ limits:
+ cpu: 300m
+ memory: 400Mi
+ requests:
+ cpu: 30m
+ memory: 40Mi
+ volumeMounts:
+ - mountPath: /etc/config-autoscaler
+ name: config-autoscaler
+ - mountPath: /etc/config-logging
+ name: config-logging
+ - mountPath: /etc/config-observability
+ name: config-observability
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-autoscaler
+ name: config-autoscaler
+ - configMap:
+ name: config-logging
+ name: config-logging
+ - configMap:
+ name: config-observability
+ name: config-observability
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # The Revision ContainerConcurrency field specifies the maximum number
+ # of requests the Container can handle at once. Container concurrency
+ # target percentage is how much of that maximum to use in a stable
+ # state. E.g. if a Revision specifies ContainerConcurrency of 10, then
+ # the Autoscaler will try to maintain 7 concurrent connections per pod
+ # on average. A value of 0.7 is chosen because the Autoscaler panics
+ # when concurrency exceeds 2x the desired set point. So we will panic
+ # before we reach the limit.
+ container-concurrency-target-percentage: "1.0"
+
+ # The container concurrency target default is what the Autoscaler will
+ # try to maintain when the Revision specifies unlimited concurrency.
+ # Even when specifying unlimited concurrency, the autoscaler will
+ # horizontally scale the application based on this target concurrency.
+ #
+ # A value of 100 is chosen because it's enough to allow vertical pod
+ # autoscaling to tune resource requests. E.g. maintaining 1 concurrent
+ # "hello world" request doesn't consume enough resources to allow VPA
+ # to achieve efficient resource usage (VPA CPU minimum is 300m).
+ container-concurrency-target-default: "100"
+
+ # When operating in a stable mode, the autoscaler operates on the
+ # average concurrency over the stable window.
+ stable-window: "60s"
+
+ # When observed average concurrency during the panic window reaches 2x
+ # the target concurrency, the autoscaler enters panic mode. When
+ # operating in panic mode, the autoscaler operates on the average
+ # concurrency over the panic window.
+ panic-window: "6s"
+
+ # Max scale up rate limits the rate at which the autoscaler will
+ # increase pod count. It is the maximum ratio of desired pods versus
+ # observed pods.
+ max-scale-up-rate: "10"
+
+ # Scale to zero feature flag
+ enable-scale-to-zero: "true"
+
+ # Tick interval is the time between autoscaling calculations.
+ tick-interval: "2s"
+
+ # Dynamic parameters (take effect when config map is updated):
+
+ # Scale to zero grace period is the time an inactive revision is left
+ # running before it is scaled to zero (min: 30s).
+ scale-to-zero-grace-period: "30s"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-autoscaler
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # List of repositories for which tag to digest resolving should be skipped
+ registriesSkippingTagResolving: "ko.local,dev.local"
+ queueSidecarImage: gcr.io/knative-releases/github.com/knative/serving/cmd/queue@sha256:b5c759e4ea6f36ae4498c1ec794653920345b9ad7492731fb1d6087e3b95dc43
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-controller
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # revision-timeout-seconds contains the default number of
+ # seconds to use for the revision's per-request timeout, if
+ # none is specified.
+ revision-timeout-seconds: "300" # 5 minutes
+
+ # revision-cpu-request contains the cpu allocation to assign
+ # to revisions by default.
+ revision-cpu-request: "400m" # 0.4 of a CPU (aka 400 milli-CPU)
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-defaults
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # Default value for domain.
+ # Although it will match all routes, it is the least-specific rule so it
+ # will only be used if no other domain matches.
+ example.com: |
+
+ # These are example settings of domain.
+ # example.org will be used for routes having app=nonprofit.
+ example.org: |
+ selector:
+ app: nonprofit
+
+ # Routes having domain suffix of 'svc.cluster.local' will not be exposed
+ # through Ingress. You can define your own label selector to assign that
+ # domain suffix to your Route here, or you can set the label
+ # "serving.knative.dev/visibility=cluster-local"
+ # to achieve the same effect. This shows how to make routes having
+ # the label app=secret only exposed to the local cluster.
+ svc.cluster.local: |
+ selector:
+ app: secret
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-domain
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # Delay after revision creation before considering it for GC
+ stale-revision-create-delay: "24h"
+
+ # Duration since a route has been pointed at a revision before it should be GC'd
+ # This minus lastpinned-debounce be longer than the controller resync period (10 hours)
+ stale-revision-timeout: "15h"
+
+ # Minimum number of generations of revisions to keep before considering for GC
+ stale-revision-minimum-generations: "1"
+
+ # To avoid constant updates, we allow an existing annotation to be stale by this
+ # amount before we update the timestamp
+ stale-revision-lastpinned-debounce: "5h"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-gc
+ namespace: knative-serving
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ networking.knative.dev/ingress-provider: istio
+ serving.knative.dev/release: devel
+ name: config-istio
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # Common configuration for all Knative codebase
+ zap-logger-config: |
+ {
+ "level": "info",
+ "development": false,
+ "outputPaths": ["stdout"],
+ "errorOutputPaths": ["stderr"],
+ "encoding": "json",
+ "encoderConfig": {
+ "timeKey": "ts",
+ "levelKey": "level",
+ "nameKey": "logger",
+ "callerKey": "caller",
+ "messageKey": "msg",
+ "stacktraceKey": "stacktrace",
+ "lineEnding": "",
+ "levelEncoder": "",
+ "timeEncoder": "iso8601",
+ "durationEncoder": "",
+ "callerEncoder": ""
+ }
+ }
+
+ # Log level overrides
+ # For all components except the autoscaler and queue proxy,
+ # changes are be picked up immediately.
+ # For autoscaler and queue proxy, changes require recreation of the pods.
+ loglevel.controller: "info"
+ loglevel.autoscaler: "info"
+ loglevel.queueproxy: "info"
+ loglevel.webhook: "info"
+ loglevel.activator: "info"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-logging
+ namespace: knative-serving
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-network
+ namespace: knative-serving
+
+---
+apiVersion: v1
+data:
+ _example: |
+ ################################
+ # #
+ # EXAMPLE CONFIGURATION #
+ # #
+ ################################
+
+ # This block is not actually functional configuration,
+ # but serves to illustrate the available configuration
+ # options and document them in a way that is accessible
+ # to users that `kubectl edit` this config map.
+ #
+ # These sample configuration options may be copied out of
+ # this block and unindented to actually change the configuration.
+
+ # logging.enable-var-log-collection defaults to false.
+ # A fluentd sidecar will be set up to collect var log if
+ # this flag is true.
+ logging.enable-var-log-collection: false
+
+ # logging.fluentd-sidecar-image provides the fluentd sidecar image
+ # to inject as a sidecar to collect logs from /var/log.
+ # Must be presented if logging.enable-var-log-collection is true.
+ logging.fluentd-sidecar-image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4
+
+ # logging.fluentd-sidecar-output-config provides the configuration
+ # for the fluentd sidecar, which will be placed into a configmap and
+ # mounted into the fluentd sidecar image.
+ logging.fluentd-sidecar-output-config: |
+ # Parse json log before sending to Elastic Search
+ <filter **>
+ @type parser
+ key_name log
+ <parse>
+ @type multi_format
+ <pattern>
+ format json
+ time_key fluentd-time # fluentd-time is reserved for structured logs
+ time_format %Y-%m-%dT%H:%M:%S.%NZ
+ </pattern>
+ <pattern>
+ format none
+ message_key log
+ </pattern>
+ </parse>
+ </filter>
+ # Send to Elastic Search
+ <match **>
+ @id elasticsearch
+ @type elasticsearch
+ @log_level info
+ include_tag_key true
+ # Elasticsearch service is in monitoring namespace.
+ host elasticsearch-logging.knative-monitoring
+ port 9200
+ logstash_format true
+ <buffer>
+ @type file
+ path /var/log/fluentd-buffers/kubernetes.system.buffer
+ flush_mode interval
+ retry_type exponential_backoff
+ flush_thread_count 2
+ flush_interval 5s
+ retry_forever
+ retry_max_interval 30
+ chunk_limit_size 2M
+ queue_limit_length 8
+ overflow_action block
+ </buffer>
+ </match>
+
+ # logging.revision-url-template provides a template to use for producing the
+ # logging URL that is injected into the status of each Revision.
+ # This value is what you might use the the Knative monitoring bundle, and provides
+ # access to Kibana after setting up kubectl proxy.
+ logging.revision-url-template: |
+ http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase))))
+
+ # If non-empty, this enables queue proxy writing request logs to stdout.
+ # The value determines the shape of the request logs and it must be a valid go text/template.
+ # It is important to keep this as a single line. Multiple lines are parsed as separate entities
+ # by most collection agents and will split the request logs into multiple records.
+ #
+ # The following fields and functions are available to the template:
+ #
+ # Request: An http.Request (see https://golang.org/pkg/net/http/#Request)
+ # representing an HTTP request received by the server.
+ #
+ # Response:
+ # struct {
+ # Code int // HTTP status code (see https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml)
+ # Size int // An int representing the size of the response.
+ # Latency float64 // A float64 representing the latency of the response in seconds.
+ # }
+ #
+ # Revision:
+ # struct {
+ # Name string // Knative revision name
+ # Namespace string // Knative revision namespace
+ # Service string // Knative service name
+ # Configuration string // Knative configuration name
+ # PodName string // Name of the pod hosting the revision
+ # PodIP string // IP of the pod hosting the revision
+ # }
+ #
+ logging.request-log-template: '{"httpRequest": {"requestMethod": "{{ "{{" }}.Request.Method{{ "{{" }}", "requestUrl": "{{ "{{" }}js .Request.RequestURI{{ "{{" }}", "requestSize": "{{ "{{" }}.Request.ContentLength{{ "{{" }}", "status": {{ "{{" }}.Response.Code{{ "{{" }}, "responseSize": "{{ "{{" }}.Response.Size{{ "{{" }}", "userAgent": "{{ "{{" }}js .Request.UserAgent{{ "{{" }}", "remoteIp": "{{ "{{" }}js .Request.RemoteAddr{{ "{{" }}", "serverIp": "{{ "{{" }}.Revision.PodIP{{ "{{" }}", "referer": "{{ "{{" }}js .Request.Referer{{ "{{" }}", "latency": "{{ "{{" }}.Response.Latency{{ "{{" }}s", "protocol": "{{ "{{" }}.Request.Proto{{ "{{" }}"}, "traceId": "{{ "{{" }}index .Request.Header "X-B3-Traceid"{{ "{{" }}"}'
+
+ # metrics.backend-destination field specifies the system metrics destination.
+ # It supports either prometheus (the default) or stackdriver.
+ # Note: Using stackdriver will incur additional charges
+ metrics.backend-destination: prometheus
+
+ # metrics.request-metrics-backend-destination specifies the request metrics
+ # destination. If non-empty, it enables queue proxy to send request metrics.
+ # Currently supported values: prometheus, stackdriver.
+ metrics.request-metrics-backend-destination: prometheus
+
+ # metrics.stackdriver-project-id field specifies the stackdriver project ID. This
+ # field is optional. When running on GCE, application default credentials will be
+ # used if this field is not provided.
+ metrics.stackdriver-project-id: "<your stackdriver project id>"
+
+ # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed to send metrics to
+ # Stackdriver using "global" resource type and custom metric type if the
+ # metrics are not supported by "knative_revision" resource type. Setting this
+ # flag to "true" could cause extra Stackdriver charge.
+ # If metrics.backend-destination is not Stackdriver, this is ignored.
+ metrics.allow-stackdriver-custom-metrics: "false"
+kind: ConfigMap
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: config-observability
+ namespace: knative-serving
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: controller
+ namespace: knative-serving
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: controller
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "false"
+ labels:
+ app: controller
+ spec:
+ containers:
+ - env:
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/controller@sha256:25af5f3adad8b65db3126e0d6e90aa36835c124c24d9d72ffbdd7ee739a7f571
+ name: controller
+ ports:
+ - containerPort: 9090
+ name: metrics
+ resources:
+ limits:
+ cpu: 1000m
+ memory: 1000Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ volumeMounts:
+ - mountPath: /etc/config-logging
+ name: config-logging
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-logging
+ name: config-logging
+
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ serving.knative.dev/release: devel
+ name: webhook
+ namespace: knative-serving
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: webhook
+ role: webhook
+ template:
+ metadata:
+ annotations:
+ sidecar.istio.io/inject: "false"
+ labels:
+ app: webhook
+ role: webhook
+ spec:
+ containers:
+ - env:
+ - name: SYSTEM_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: CONFIG_LOGGING_NAME
+ value: config-logging
+ image: gcr.io/knative-releases/github.com/knative/serving/cmd/webhook@sha256:d1ba3e2c0d739084ff508629db001619cea9cc8780685e85dd910363774eaef6
+ name: webhook
+ resources:
+ limits:
+ cpu: 200m
+ memory: 200Mi
+ requests:
+ cpu: 20m
+ memory: 20Mi
+ volumeMounts:
+ - mountPath: /etc/config-logging
+ name: config-logging
+ serviceAccountName: controller
+ volumes:
+ - configMap:
+ name: config-logging
+ name: config-logging
+
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/18-settings.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/18-settings.yaml
new file mode 100755
index 00000000..a2eec087
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/18-settings.yaml
@@ -0,0 +1,30 @@
+{{ if .Values.settings.create }}
+
+apiVersion: gloo.solo.io/v1
+kind: Settings
+metadata:
+ name: default
+ namespace: {{ .Release.Namespace }}
+ annotations:
+ "helm.sh/hook": pre-install
+spec:
+ bindAddr: 0.0.0.0:{{ .Values.gloo.deployment.xdsPort }}
+ discoveryNamespace: {{ .Values.settings.writeNamespace }}
+ kubernetesArtifactSource: {}
+ kubernetesConfigSource: {}
+ kubernetesSecretSource: {}
+ refreshRate: 60s
+
+{{- if .Values.settings.extensions }}
+ extensions:
+{{- toYaml .Values.settings.extensions | nindent 4 }}
+{{- end }}
+
+{{- with .Values.settings.watchNamespaces }}
+ watchNamespaces:
+ {{- range . }}
+ - {{ . }}
+ {{- end }}
+{{- end }}
+
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml
new file mode 100755
index 00000000..35fb5eb0
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/20-namespace-clusterrole-gateway.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.gateway.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-gateway
+ labels:
+ app: gloo
+ gloo: rbac
+rules:
+- apiGroups: [""]
+ resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+ resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+ verbs: ["*"]
+- apiGroups: ["gateway.solo.io"]
+ resources: ["virtualservices", "gateways"]
+ verbs: ["*"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml
new file mode 100755
index 00000000..15215b9f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/21-namespace-clusterrole-ingress.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.ingress.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-ingress
+ labels:
+ app: gloo
+ gloo: rbac
+rules:
+- apiGroups: [""]
+ resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+ resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+ verbs: ["*"]
+- apiGroups: ["extensions", ""]
+ resources: ["ingresses"]
+ verbs: ["*"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml
new file mode 100755
index 00000000..1bd2b95d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/22-namespace-clusterrole-knative.yaml
@@ -0,0 +1,29 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.settings.integrations.knative.enabled }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-knative
+ labels:
+ app: gloo
+ gloo: rbac
+rules:
+- apiGroups: [""]
+ resources: ["pods", "services", "secrets", "endpoints", "configmaps"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["namespaces"]
+ verbs: ["get", "list", "watch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "create"]
+- apiGroups: ["gloo.solo.io"]
+ resources: ["settings", "upstreams","upstreamgroups", "proxies","virtualservices"]
+ verbs: ["*"]
+- apiGroups: ["networking.internal.knative.dev"]
+ resources: ["clusteringresses"]
+ verbs: ["get", "list", "watch"]
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml
new file mode 100755
index 00000000..62198913
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/23-namespace-clusterrolebinding-gateway.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.gateway.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-binding-gateway-{{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: rbac
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: gloo-role-gateway
+ apiGroup: rbac.authorization.k8s.io
+
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml
new file mode 100755
index 00000000..7ef5cbae
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/24-namespace-clusterrolebinding-ingress.yaml
@@ -0,0 +1,22 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.ingress.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-binding-ingress-{{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: rbac
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: gloo-role-ingress
+ apiGroup: rbac.authorization.k8s.io
+
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml
new file mode 100755
index 00000000..5f05de96
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/25-namespace-clusterrolebinding-knative.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.rbac.create }}
+
+{{- if .Values.settings.integrations.knative.enabled }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: gloo-role-binding-knative-{{ .Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: rbac
+subjects:
+- kind: ServiceAccount
+ name: default
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: gloo-role-knative
+ apiGroup: rbac.authorization.k8s.io
+{{- end -}}
+
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/3-gloo-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/3-gloo-deployment.yaml
new file mode 100755
index 00000000..b3d8423f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/3-gloo-deployment.yaml
@@ -0,0 +1,57 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: gloo
+ name: gloo
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.gloo.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: gloo
+ template:
+ metadata:
+ labels:
+ gloo: gloo
+ {{- if .Values.gloo.deployment.stats }}
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9091"
+ prometheus.io/scrape: "true"
+ {{- end}}
+ spec:
+ containers:
+ - image: "{{ .Values.gloo.deployment.image.repository }}:{{ .Values.gloo.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.gloo.deployment.image.pullPolicy }}
+ name: gloo
+ resources:
+ requests:
+ cpu: 1
+ memory: 256Mi
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 10101
+ capabilities:
+ drop:
+ - ALL
+ ports:
+ - containerPort: {{ .Values.gloo.deployment.xdsPort }}
+ name: grpc
+ protocol: TCP
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.gloo.deployment.stats }}
+ - name: START_STATS_SERVER
+ value: "true"
+ {{- end}}
+ {{- if .Values.gloo.deployment.image.pullSecret }}
+ imagePullSecrets:
+ - name: {{ .Values.gloo.deployment.image.pullSecret }}{{end}}
+
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/4-gloo-service.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/4-gloo-service.yaml
new file mode 100755
index 00000000..ab49ea3f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/4-gloo-service.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: gloo
+ name: gloo
+ namespace: {{ .Release.Namespace }}
+spec:
+{{ if .Values.gloo.deployment.externalTrafficPolicy }}
+ externalTrafficPolicy: {{ .Values.gloo.deployment.externalTrafficPolicy }}
+{{- end }}
+ ports:
+ - name: grpc
+ port: {{ .Values.gloo.deployment.xdsPort }}
+ protocol: TCP
+ selector:
+ gloo: gloo
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/5-discovery-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/5-discovery-deployment.yaml
new file mode 100755
index 00000000..1a44e922
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/5-discovery-deployment.yaml
@@ -0,0 +1,46 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: discovery
+ name: discovery
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.discovery.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: discovery
+ template:
+ metadata:
+ labels:
+ gloo: discovery
+ {{- if .Values.discovery.deployment.stats }}
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9091"
+ prometheus.io/scrape: "true"
+ {{- end}}
+ spec:
+ containers:
+ - image: "{{ .Values.discovery.deployment.image.repository }}:{{ .Values.discovery.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.discovery.deployment.image.pullPolicy }}
+ name: discovery
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 10101
+ capabilities:
+ drop:
+ - ALL
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.discovery.deployment.stats }}
+ - name: START_STATS_SERVER
+ value: "true"
+ {{- end}}
+
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/6-gateway-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/6-gateway-deployment.yaml
new file mode 100755
index 00000000..0a32241e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/6-gateway-deployment.yaml
@@ -0,0 +1,47 @@
+{{- if .Values.gateway.enabled }}
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: gateway
+ name: gateway
+ namespace: {{ .Release.Namespace }}
+spec:
+ replicas: {{ .Values.gateway.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: gateway
+ template:
+ metadata:
+ labels:
+ gloo: gateway
+ {{- if .Values.gateway.deployment.stats }}
+ annotations:
+ prometheus.io/path: /metrics
+ prometheus.io/port: "9091"
+ prometheus.io/scrape: "true"
+ {{- end}}
+ spec:
+ containers:
+ - image: "{{ .Values.gateway.deployment.image.repository }}:{{ .Values.gateway.deployment.image.tag }}"
+ imagePullPolicy: {{ .Values.gateway.deployment.image.pullPolicy }}
+ name: gateway
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ runAsNonRoot: true
+ runAsUser: 10101
+ capabilities:
+ drop:
+ - ALL
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ {{- if .Values.gateway.deployment.stats }}
+ - name: START_STATS_SERVER
+ value: "true"
+ {{- end}}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/7-gateway-proxy-deployment.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/7-gateway-proxy-deployment.yaml
new file mode 100755
index 00000000..bb54e8f3
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/7-gateway-proxy-deployment.yaml
@@ -0,0 +1,67 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ app: gloo
+ gloo: {{ $key }}
+ name: {{ $key }}
+ namespace: {{ $.Release.Namespace }}
+spec:
+ replicas: {{ $spec.deployment.replicas }}
+ selector:
+ matchLabels:
+ gloo: {{ $key }}
+ template:
+ metadata:
+ labels:
+ gloo: {{ $key }}
+{{- with $spec.deployment.extraAnnotations }}
+ annotations:
+{{toYaml . | indent 8}}{{- end }}
+ spec:
+ containers:
+ - args: ["--disable-hot-restart"]
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ image: {{ $spec.deployment.image.repository }}:{{ $spec.deployment.image.tag }}
+ imagePullPolicy: {{ $spec.deployment.image.pullPolicy }}
+ name: gateway-proxy
+ securityContext:
+ readOnlyRootFilesystem: true
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ add:
+ - NET_BIND_SERVICE
+ ports:
+ - containerPort: {{ $spec.deployment.httpPort }}
+ name: http
+ protocol: TCP
+ - containerPort: {{ $spec.deployment.httpsPort }}
+ name: https
+ protocol: TCP
+{{- with $spec.deployment.extraPorts }}
+{{toYaml . | indent 8}}{{- end }}
+ volumeMounts:
+ - mountPath: /etc/envoy
+ name: envoy-config
+ {{- if $spec.deployment.image.pullSecret }}
+ imagePullSecrets:
+ - name: {{ $spec.deployment.image.pullSecret }}{{end}}
+ volumes:
+ - configMap:
+ name: {{ $key }}-envoy-config
+ name: envoy-config
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/8-gateway-proxy-service.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/8-gateway-proxy-service.yaml
new file mode 100755
index 00000000..f0b7d347
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/8-gateway-proxy-service.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: gloo
+ gloo: {{ $key }}
+ name: {{ $key }}
+ namespace: {{ $.Release.Namespace }}
+ {{- with $spec.service.extraAnnotations }}
+ annotations:
+{{toYaml . | indent 8}}{{- end }}
+spec:
+ ports:
+ - port: {{ $spec.service.httpPort }}
+ targetPort: {{ $spec.deployment.httpPort }}
+ protocol: TCP
+ name: http
+ - port: {{ $spec.service.httpsPort }}
+ targetPort: {{ $spec.deployment.httpsPort }}
+ protocol: TCP
+ name: https
+ selector:
+ gloo: {{ $key }}
+ type: {{ $spec.service.type }}
+ {{- if and (eq $spec.service.type "ClusterIP") $spec.service.clusterIP }}
+ clusterIP: {{ $spec.service.clusterIP }}
+ {{- end }}
+ {{- if and (eq $spec.service.type "LoadBalancer") $spec.service.loadBalancerIP }}
+ loadBalancerIP: {{ $spec.service.loadBalancerIP }}
+ {{- end }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/templates/9-gateway-proxy-configmap.yaml b/vnfs/DAaaS/deploy/00-init/gloo/templates/9-gateway-proxy-configmap.yaml
new file mode 100755
index 00000000..03c5a920
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/templates/9-gateway-proxy-configmap.yaml
@@ -0,0 +1,54 @@
+{{- if .Values.gateway.enabled }}
+{{- range $key, $spec := .Values.gatewayProxies }}
+---
+# config_map
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ $key }}-envoy-config
+ namespace: {{ $.Release.Namespace }}
+ labels:
+ app: gloo
+ gloo: {{ $key }}
+data:
+{{ if (empty $spec.configMap.data) }}
+ envoy.yaml: |
+ node:
+ cluster: gateway
+ id: "{{ "{{" }}.PodName{{ "}}" }}.{{ "{{" }}.PodNamespace{{ "}}" }}"
+ metadata:
+ # this line must match !
+ role: "{{ "{{" }}.PodNamespace{{ "}}" }}~gateway-proxy"
+ static_resources:
+ clusters:
+ - name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}
+ connect_timeout: 5.000s
+ load_assignment:
+ cluster_name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: gloo.{{ $.Release.Namespace }}.svc.cluster.local
+ port_value: {{ $.Values.gloo.deployment.xdsPort }}
+ http2_protocol_options: {}
+ type: STRICT_DNS
+ dynamic_resources:
+ ads_config:
+ api_type: GRPC
+ grpc_services:
+ - envoy_grpc: {cluster_name: gloo.{{ $.Release.Namespace }}.svc.cluster.local:{{ $.Values.gloo.deployment.xdsPort }}}
+ cds_config:
+ ads: {}
+ lds_config:
+ ads: {}
+ admin:
+ access_log_path: /dev/null
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 19000
+{{- else}}{{ toYaml $spec.configMap.data | indent 2}}{{- end}}
+{{- end }}
+{{- end }} \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/values-ingress.yaml b/vnfs/DAaaS/deploy/00-init/gloo/values-ingress.yaml
new file mode 100755
index 00000000..98dd42ae
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/values-ingress.yaml
@@ -0,0 +1,74 @@
+crds:
+ create: true
+discovery:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/discovery
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+gateway:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gateway
+ tag: ""
+ replicas: 1
+ stats: false
+ enabled: false
+gatewayProxies:
+ gateway-proxy:
+ configMap:
+ data: null
+ deployment:
+ httpPort: "8080"
+ httpsPort: "8443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: ""
+ replicas: 1
+ stats: false
+ service:
+ httpPort: "80"
+ httpsPort: "443"
+ type: LoadBalancer
+gloo:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ xdsPort: "9977"
+ingress:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/ingress
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ enabled: true
+ingressProxy:
+ configMap: {}
+ deployment:
+ httpPort: "80"
+ httpsPort: "443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+namespace:
+ create: false
+rbac:
+ create: true
+settings:
+ integrations:
+ knative:
+ enabled: false
+ writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/values-knative.yaml b/vnfs/DAaaS/deploy/00-init/gloo/values-knative.yaml
new file mode 100755
index 00000000..c53ca1a9
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/values-knative.yaml
@@ -0,0 +1,72 @@
+crds:
+ create: true
+discovery:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/discovery
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+gateway:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gateway
+ tag: ""
+ replicas: 1
+ stats: false
+ enabled: false
+gatewayProxies:
+ gateway-proxy:
+ configMap:
+ data: null
+ deployment:
+ httpPort: "8080"
+ httpsPort: "8443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: ""
+ replicas: 1
+ stats: false
+ service:
+ httpPort: "80"
+ httpsPort: "443"
+ type: LoadBalancer
+gloo:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ xdsPort: "9977"
+ingress:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/ingress
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ enabled: false
+namespace:
+ create: false
+rbac:
+ create: true
+settings:
+ integrations:
+ knative:
+ enabled: true
+ proxy:
+ httpPort: "80"
+ httpsPort: "443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/deploy/00-init/gloo/values.yaml b/vnfs/DAaaS/deploy/00-init/gloo/values.yaml
new file mode 100755
index 00000000..daeab0c3
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/gloo/values.yaml
@@ -0,0 +1,56 @@
+crds:
+ create: true
+discovery:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/discovery
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+gateway:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gateway
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ enabled: true
+gatewayProxies:
+ gateway-proxy:
+ configMap:
+ data: null
+ deployment:
+ httpPort: "8080"
+ httpsPort: "8443"
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo-envoy-wrapper
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ service:
+ httpPort: "80"
+ httpsPort: "443"
+ type: LoadBalancer
+gloo:
+ deployment:
+ image:
+ pullPolicy: Always
+ repository: quay.io/solo-io/gloo
+ tag: 0.13.18
+ replicas: 1
+ stats: false
+ xdsPort: "9977"
+ingress:
+ enabled: false
+namespace:
+ create: false
+rbac:
+ create: true
+settings:
+ integrations:
+ knative:
+ enabled: false
+ writeNamespace: gloo-system
diff --git a/vnfs/DAaaS/deploy/00-init/istio/README.md b/vnfs/DAaaS/deploy/00-init/istio/README.md
new file mode 100644
index 00000000..d19bcce0
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/istio/README.md
@@ -0,0 +1,31 @@
+Istio Installation
+
+1. Download the Source code
+curl -L https://git.io/getLatestIstio | ISTIO_VERSION=1.1.7 sh -
+
+2. Add the ISTIO helm chart repository. “helm repo add istio.io https://storage.googleapis.com/istio-release/releases/1.1.7/charts/”
+
+ NOTE : Make sure the helm client and helm server (tiller) is installed
+
+ Create a namespace istio-system where all the istio components are installed “kubectl create namespace istio-system”
+
+3. Install all the Istio Custom Resource Definitions (CRDs) using kubectl apply
+
+
+ “helm template install/kubernetes/helm/istio-init --name istio-init --namespace istio-system | kubectl apply -f -”.
+
+4. Verify that all 53 Istio CRDs were committed to the Kubernetes api-server using the following command:
+
+ “kubectl get crds | grep 'istio.io\|certmanager.k8s.io' | wc -l”
+
+5. Install istio with the sds as the configuration profile.
+
+ “helm template install/kubernetes/helm/istio --name istio --namespace istio-system --values install/kubernetes/helm/istio/values-istio-sds-auth.yaml | kubectl apply -f -”
+
+6. Verify the Installation
+
+ “kubectl get svc -n istio-system” && “kubectl get pods -n istio-system”
+
+ Reference -
+1. https://istio.io/docs/setup/kubernetes/install/helm/
+2. https://istio.io/docs/tasks/security/auth-sds/
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/Chart.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/Chart.yaml
new file mode 100644
index 00000000..21e90098
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: File, Block, and Object Storage Services for your Cloud-Native Environment
+name: rook-ceph
+version: 0.0.1
+icon: https://rook.io/images/logos/rook/rook-logo-color-on-transparent.png
+sources:
+ - https://github.com/rook/rook
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/NOTES.txt b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/NOTES.txt
new file mode 100644
index 00000000..0509b574
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/NOTES.txt
@@ -0,0 +1,5 @@
+The Rook Operator has been installed. Check its status by running:
+ kubectl --namespace {{ .Release.Namespace }} get pods -l "app=rook-ceph-operator"
+
+Visit https://rook.io/docs/rook/master for instructions on how
+to create & configure Rook clusters
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/_helpers.tpl b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/_helpers.tpl
new file mode 100644
index 00000000..f0d83d2e
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/_helpers.tpl
@@ -0,0 +1,16 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/cluster.yml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/cluster.yml
new file mode 100644
index 00000000..1cd33e8c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/cluster.yml
@@ -0,0 +1,180 @@
+#################################################################################
+# This example first defines some necessary namespace and RBAC security objects.
+# The actual Ceph Cluster CRD example can be found at the bottom of this example.
+#################################################################################
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph
+rules:
+- apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: [ "get", "list", "watch", "create", "update", "delete" ]
+---
+# Aspects of ceph-mgr that require access to the system namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-system
+ namespace: rook-ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+---
+# Aspects of ceph-mgr that operate within the cluster's namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - ceph.rook.io
+ resources:
+ - "*"
+ verbs:
+ - "*"
+---
+# Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-cluster-mgmt
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-cluster-mgmt
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: rook-ceph-system
+---
+# Allow the osd pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-osd
+ namespace: rook-ceph
+---
+# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-mgr
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-system
+ namespace: rook-ceph-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-mgr-system
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-cluster
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-mgr-cluster
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+#################################################################################
+# The Ceph Cluster CRD example
+#################################################################################
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+ name: rook-ceph
+ namespace: rook-ceph
+spec:
+ cephVersion:
+ # For the latest ceph images, see https://hub.docker.com/r/ceph/ceph/tags
+ image: ceph/ceph:v13.2.2-20181023
+ dataDirHostPath: /var/lib/rook
+ dashboard:
+ enabled: true
+ mon:
+ count: 3
+ allowMultiplePerNode: true
+ storage:
+ useAllNodes: true
+ useAllDevices: false
+ config:
+ databaseSizeMB: "1024"
+ journalSizeMB: "1024" \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrole.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrole.yaml
new file mode 100644
index 00000000..58a24d47
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrole.yaml
@@ -0,0 +1,165 @@
+{{- if .Values.rbacEnable }}
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-cluster-mgmt
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ - pods
+ - pods/log
+ - services
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - extensions
+ resources:
+ - deployments
+ - daemonsets
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+---
+# The cluster role for managing the Rook CRDs
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-global
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ # Pod access is needed for fencing
+ - pods
+ # Node access is needed for determining nodes where mons should run
+ - nodes
+ - nodes/proxy
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ # PVs and PVCs are managed by the Rook provisioner
+ - persistentvolumes
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - ceph.rook.io
+ resources:
+ - "*"
+ verbs:
+ - "*"
+- apiGroups:
+ - rook.io
+ resources:
+ - "*"
+ verbs:
+ - "*"
+---
+# Aspects of ceph-mgr that require cluster-wide access
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-cluster
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - nodes
+ - nodes/proxy
+ verbs:
+ - get
+ - list
+ - watch
+{{- if ((.Values.agent) and .Values.agent.mountSecurityMode) and ne .Values.agent.mountSecurityMode "Any" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-agent-mount
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+{{- end }}
+{{- if .Values.pspEnable }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-system-psp-user
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+rules:
+- apiGroups:
+ - extensions
+ resources:
+ - podsecuritypolicies
+ resourceNames:
+ - 00-rook-ceph-operator
+ verbs:
+ - use
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrolebinding.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..845eb6d7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/clusterrolebinding.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.rbacEnable }}
+# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-global
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-global
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+{{- if .Values.pspEnable }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: rook-ceph-system-psp-users
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-system-psp-user
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/dashboard-external-http.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/dashboard-external-http.yaml
new file mode 100644
index 00000000..ee521152
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/dashboard-external-http.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: rook-ceph-mgr-dashboard-external-http
+ namespace: rook-ceph
+ labels:
+ app: rook-ceph-mgr
+ rook_cluster: rook-ceph
+ annotations:
+ "helm.sh/hook": "post-install"
+ "helm.sh/hook-weight": "10"
+spec:
+ ports:
+ - name: dashboard
+ port: 7000
+ protocol: TCP
+ targetPort: 7000
+ selector:
+ app: rook-ceph-mgr
+ rook_cluster: rook-ceph
+ sessionAffinity: None
+ type: NodePort
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/deployment.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/deployment.yaml
new file mode 100644
index 00000000..13c6a763
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/deployment.yaml
@@ -0,0 +1,108 @@
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ name: rook-ceph-operator
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: rook-ceph-operator
+ template:
+ metadata:
+ labels:
+ app: rook-ceph-operator
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+{{- if .Values.annotations }}
+ annotations:
+{{ toYaml .Values.annotations | indent 8 }}
+{{- end }}
+ spec:
+ containers:
+ - name: rook-ceph-operator
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ args: ["ceph", "operator"]
+ env:
+{{- if not .Values.rbacEnable }}
+ - name: RBAC_ENABLED
+ value: "false"
+{{- end }}
+{{- if .Values.agent }}
+{{- if .Values.agent.toleration }}
+ - name: AGENT_TOLERATION
+ value: {{ .Values.agent.toleration }}
+{{- end }}
+{{- if .Values.agent.tolerationKey }}
+ - name: AGENT_TOLERATION_KEY
+ value: {{ .Values.agent.tolerationKey }}
+{{- end }}
+{{- if .Values.agent.mountSecurityMode }}
+ - name: AGENT_MOUNT_SECURITY_MODE
+ value: {{ .Values.agent.mountSecurityMode }}
+{{- end }}
+{{- if .Values.agent.flexVolumeDirPath }}
+ - name: FLEXVOLUME_DIR_PATH
+ value: {{ .Values.agent.flexVolumeDirPath }}
+{{- end }}
+{{- if .Values.agent.libModulesDirPath }}
+ - name: LIB_MODULES_DIR_PATH
+ value: {{ .Values.agent.libModulesDirPath }}
+{{- end }}
+{{- if .Values.agent.mounts }}
+ - name: AGENT_MOUNTS
+ value: {{ .Values.agent.mounts }}
+{{- end }}
+{{- end }}
+{{- if .Values.discover }}
+{{- if .Values.discover.toleration }}
+ - name: DISCOVER_TOLERATION
+ value: {{ .Values.agent.toleration }}
+{{- end }}
+{{- if .Values.discover.tolerationKey }}
+ - name: DISCOVER_TOLERATION_KEY
+ value: {{ .Values.discover.tolerationKey }}
+{{- end }}
+{{- end }}
+ - name: ROOK_LOG_LEVEL
+ value: {{ .Values.logLevel }}
+ - name: ROOK_ENABLE_SELINUX_RELABELING
+ value: {{ .Values.enableSelinuxRelabeling | quote }}
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+{{- if .Values.mon }}
+{{- if .Values.mon.healthCheckInterval }}
+ - name: ROOK_MON_HEALTHCHECK_INTERVAL
+ value: {{ .Values.mon.healthCheckInterval }}
+{{- end }}
+{{- if .Values.mon.monOutTimeout }}
+ - name: ROOK_MON_OUT_TIMEOUT
+ value: {{ .Values.mon.monOutTimeout }}
+{{- end }}
+{{- end }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+{{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+{{- end }}
+{{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+{{- end }}
+{{- if .Values.rbacEnable }}
+ serviceAccountName: rook-ceph-system
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/psp.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/psp.yaml
new file mode 100644
index 00000000..412b2437
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/psp.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.pspEnable }}
+# PSP for rook-ceph-operator
+
+# Most of the teams follow the kubernetes docs and have these PSPs.
+# * privileged (for kube-system namespace)
+# * restricted (for all logged in users)
+#
+# If we name it as `rook-ceph-operator`, it comes next to `restricted` PSP alphabetically,
+# and applies `restricted` capabilities to `rook-system`. Thats reason this is named with `00-rook-ceph-operator`,
+# so it stays somewhere close to top and `rook-system` gets the intended PSP.
+#
+# More info on PSP ordering : https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order
+
+apiVersion: extensions/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: 00-rook-ceph-operator
+spec:
+ fsGroup:
+ rule: RunAsAny
+ privileged: true
+ runAsUser:
+ rule: RunAsAny
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ volumes:
+ - '*'
+ allowedCapabilities:
+ - '*'
+ hostPID: true
+ hostIPC: true
+ hostNetwork: true
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/resources.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/resources.yaml
new file mode 100644
index 00000000..e296663f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/resources.yaml
@@ -0,0 +1,177 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephclusters.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephCluster
+ listKind: CephClusterList
+ plural: cephclusters
+ singular: cephcluster
+ scope: Namespaced
+ version: v1
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ cephVersion:
+ properties:
+ allowUnsupported:
+ type: boolean
+ image:
+ type: string
+ name:
+ pattern: ^(luminous|mimic|nautilus)$
+ type: string
+ dashboard:
+ properties:
+ enabled:
+ type: boolean
+ urlPrefix:
+ type: string
+ port:
+ type: integer
+ minimum: 0
+ maximum: 65535
+ dataDirHostPath:
+ pattern: ^/(\S+)
+ type: string
+ mon:
+ properties:
+ allowMultiplePerNode:
+ type: boolean
+ count:
+ maximum: 9
+ minimum: 1
+ type: integer
+ required:
+ - count
+ network:
+ properties:
+ hostNetwork:
+ type: boolean
+ storage:
+ properties:
+ nodes:
+ items: {}
+ type: array
+ useAllDevices: {}
+ useAllNodes:
+ type: boolean
+ required:
+ - mon
+ additionalPrinterColumns:
+ - name: DataDirHostPath
+ type: string
+ description: Directory used on the K8s nodes
+ JSONPath: .spec.dataDirHostPath
+ - name: MonCount
+ type: string
+ description: Number of MONs
+ JSONPath: .spec.mon.count
+ - name: Age
+ type: date
+ JSONPath: .metadata.creationTimestamp
+ - name: State
+ type: string
+ description: Current State
+ JSONPath: .status.state
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephfilesystems.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephFilesystem
+ listKind: CephFilesystemList
+ plural: cephfilesystems
+ singular: cephfilesystem
+ scope: Namespaced
+ version: v1
+ additionalPrinterColumns:
+ - name: MdsCount
+ type: string
+ description: Number of MDSs
+ JSONPath: .spec.metadataServer.activeCount
+ - name: Age
+ type: date
+ JSONPath: .metadata.creationTimestamp
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephobjectstores.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectStore
+ listKind: CephObjectStoreList
+ plural: cephobjectstores
+ singular: cephobjectstore
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephobjectstoreusers.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectStoreUser
+ listKind: CephObjectStoreUserList
+ plural: cephobjectstoreusers
+ singular: cephobjectstoreuser
+ shortNames:
+ - rcou
+ - objectuser
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephblockpools.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephBlockPool
+ listKind: CephBlockPoolList
+ plural: cephblockpools
+ singular: cephblockpool
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: volumes.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: rook.io
+ names:
+ kind: Volume
+ listKind: VolumeList
+ plural: volumes
+ singular: volume
+ shortNames:
+ - rv
+ scope: Namespaced
+ version: v1alpha2
+---
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/role.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/role.yaml
new file mode 100644
index 00000000..45122d32
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/role.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.rbacEnable }}
+# The role for the operator to manage resources in the system namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+ name: rook-ceph-system
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/rolebinding.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/rolebinding.yaml
new file mode 100644
index 00000000..3ef5897f
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/rolebinding.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.rbacEnable }}
+# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+ labels:
+ operator: rook
+ storage-backend: ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-system
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/serviceaccount.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/serviceaccount.yaml
new file mode 100644
index 00000000..7b42de17
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/serviceaccount.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-system
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/storageclass.yml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/storageclass.yml
new file mode 100644
index 00000000..38ddf5d7
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/storageclass.yml
@@ -0,0 +1,28 @@
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+ name: replicapool
+ namespace: rook-ceph
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+ "helm.sh/hook": post-install
+spec:
+ failureDomain: host
+ replicated:
+ size: 1
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: rook-ceph-block
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+ "helm.sh/hook": post-install
+provisioner: ceph.rook.io/block
+parameters:
+ blockPool: replicapool
+ # The value of "clusterNamespace" MUST be the same as the one in which your rook cluster exist
+ clusterNamespace: rook-ceph
+ # Specify the filesystem type of the volume. If not specified, it will use `ext4`.
+ fstype: xfs
+# Optional, default reclaimPolicy is "Delete". Other options are: "Retain", "Recycle" as documented in https://kubernetes.io/docs/concepts/storage/storage-classes/ \ No newline at end of file
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/tool-box.yml b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/tool-box.yml
new file mode 100644
index 00000000..98bc3c98
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/templates/tool-box.yml
@@ -0,0 +1,62 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: rook-ceph-tools
+ namespace: rook-ceph
+ labels:
+ app: rook-ceph-tools
+ annotations:
+ "helm.sh/hook": "post-install"
+ "helm.sh/hook-weight": "10"
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: rook-ceph-tools
+ template:
+ metadata:
+ labels:
+ app: rook-ceph-tools
+ spec:
+ dnsPolicy: ClusterFirstWithHostNet
+ containers:
+ - name: rook-ceph-tools
+ image: rook/ceph:v0.9.1
+ command: ["/tini"]
+ args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: ROOK_ADMIN_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: rook-ceph-mon
+ key: admin-secret
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /dev
+ name: dev
+ - mountPath: /sys/bus
+ name: sysbus
+ - mountPath: /lib/modules
+ name: libmodules
+ - name: mon-endpoint-volume
+ mountPath: /etc/rook
+ # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
+ hostNetwork: true
+ volumes:
+ - name: dev
+ hostPath:
+ path: /dev
+ - name: sysbus
+ hostPath:
+ path: /sys/bus
+ - name: libmodules
+ hostPath:
+ path: /lib/modules
+ - name: mon-endpoint-volume
+ configMap:
+ name: rook-ceph-mon-endpoints
+ items:
+ - key: data
+ path: mon-endpoints
diff --git a/vnfs/DAaaS/deploy/00-init/rook-ceph/values.yaml b/vnfs/DAaaS/deploy/00-init/rook-ceph/values.yaml
new file mode 100644
index 00000000..7b4d07bd
--- /dev/null
+++ b/vnfs/DAaaS/deploy/00-init/rook-ceph/values.yaml
@@ -0,0 +1,75 @@
+# Default values for rook-ceph-operator
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+image:
+ prefix: rook
+ repository: rook/ceph
+ tag: v0.9.1
+ pullPolicy: IfNotPresent
+
+hyperkube:
+ repository: k8s.gcr.io/hyperkube
+ tag: v1.7.12
+ pullPolicy: IfNotPresent
+
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+nodeSelector:
+# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
+# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+# disktype: ssd
+
+# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
+tolerations: []
+
+mon:
+ healthCheckInterval: "45s"
+ monOutTimeout: "300s"
+
+## Annotations to be added to pod
+annotations: {}
+
+## LogLevel can be set to: TRACE, DEBUG, INFO, NOTICE, WARNING, ERROR or CRITICAL
+logLevel: INFO
+
+## If true, create & use RBAC resources
+##
+rbacEnable: false
+
+## If true, create & use PSP resources
+##
+pspEnable: true
+
+## Rook Agent configuration
+## toleration: NoSchedule, PreferNoSchedule or NoExecute
+## tolerationKey: Set this to the specific key of the taint to tolerate
+## flexVolumeDirPath: The path where the Rook agent discovers the flex volume plugins
+## libModulesDirPath: The path where the Rook agent can find kernel modules
+# agent:
+# toleration: NoSchedule
+# tolerationKey: key
+# mountSecurityMode: Any
+## For information on FlexVolume path, please refer to https://rook.io/docs/rook/master/flexvolume.html
+# flexVolumeDirPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
+# libModulesDirPath: /lib/modules
+# mounts: mount1=/host/path:/container/path,/host/path2:/container/path2
+agent:
+ flexVolumeDirPath: /var/lib/kubelet/volumeplugins
+## Rook Discover configuration
+## toleration: NoSchedule, PreferNoSchedule or NoExecute
+## tolerationKey: Set this to the specific key of the taint to tolerate
+# discover:
+# toleration: NoSchedule
+# tolerationKey: key
+
+# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
+# Disable it here if you have similiar issues.
+# For more details see https://github.com/rook/rook/issues/2417
+enableSelinuxRelabeling: true