aboutsummaryrefslogtreecommitdiffstats
path: root/vnfs
diff options
context:
space:
mode:
authorDileep Ranganathan <dileep.ranganathan@intel.com>2019-04-10 15:29:23 -0700
committerDileep Ranganathan <dileep.ranganathan@intel.com>2019-04-10 15:33:20 -0700
commite339330753f696f21c13f1ef70087a474a85308d (patch)
treea4a5f2cf99644808300d0a2def9e00e5c60b5047 /vnfs
parent6fca0bf437c0917096a0105292514633c0ec3db3 (diff)
Init package for Distributed Analytics
Init package consists of Rook/Ceph, Istio Helm charts Change-Id: Ifcf9b838231937035d55d4b78f7e3c387af5fe92 Issue-ID: ONAPARC-366 Signed-off-by: Dileep Ranganathan <dileep.ranganathan@intel.com>
Diffstat (limited to 'vnfs')
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/Chart.yaml7
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/NOTES.txt5
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/_helpers.tpl16
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/cluster.yml180
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/clusterrole.yaml165
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/clusterrolebinding.yaml38
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/dashboard-external-http.yaml22
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/deployment.yaml108
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/psp.yaml35
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/resources.yaml177
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/role.yaml35
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/rolebinding.yaml19
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/serviceaccount.yaml8
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/storageclass.yml28
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/templates/tool-box.yml62
-rw-r--r--vnfs/DAaaS/00-init/rook-ceph/values.yaml75
16 files changed, 980 insertions, 0 deletions
diff --git a/vnfs/DAaaS/00-init/rook-ceph/Chart.yaml b/vnfs/DAaaS/00-init/rook-ceph/Chart.yaml
new file mode 100644
index 00000000..21e90098
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/Chart.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+description: File, Block, and Object Storage Services for your Cloud-Native Environment
+name: rook-ceph
+version: 0.0.1
+icon: https://rook.io/images/logos/rook/rook-logo-color-on-transparent.png
+sources:
+ - https://github.com/rook/rook
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/NOTES.txt b/vnfs/DAaaS/00-init/rook-ceph/templates/NOTES.txt
new file mode 100644
index 00000000..0509b574
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/NOTES.txt
@@ -0,0 +1,5 @@
+The Rook Operator has been installed. Check its status by running:
+ kubectl --namespace {{ .Release.Namespace }} get pods -l "app=rook-ceph-operator"
+
+Visit https://rook.io/docs/rook/master for instructions on how
+to create & configure Rook clusters
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/_helpers.tpl b/vnfs/DAaaS/00-init/rook-ceph/templates/_helpers.tpl
new file mode 100644
index 00000000..f0d83d2e
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/_helpers.tpl
@@ -0,0 +1,16 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "fullname" -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/cluster.yml b/vnfs/DAaaS/00-init/rook-ceph/templates/cluster.yml
new file mode 100644
index 00000000..1cd33e8c
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/cluster.yml
@@ -0,0 +1,180 @@
+#################################################################################
+# This example first defines some necessary namespace and RBAC security objects.
+# The actual Ceph Cluster CRD example can be found at the bottom of this example.
+#################################################################################
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph
+rules:
+- apiGroups: [""]
+ resources: ["configmaps"]
+ verbs: [ "get", "list", "watch", "create", "update", "delete" ]
+---
+# Aspects of ceph-mgr that require access to the system namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-system
+ namespace: rook-ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+---
+# Aspects of ceph-mgr that operate within the cluster's namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - ceph.rook.io
+ resources:
+ - "*"
+ verbs:
+ - "*"
+---
+# Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-cluster-mgmt
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-cluster-mgmt
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: rook-ceph-system
+---
+# Allow the osd pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-osd
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-osd
+ namespace: rook-ceph
+---
+# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-mgr
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-system
+ namespace: rook-ceph-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-mgr-system
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-cluster
+ namespace: rook-ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-mgr-cluster
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-mgr
+ namespace: rook-ceph
+---
+#################################################################################
+# The Ceph Cluster CRD example
+#################################################################################
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+ name: rook-ceph
+ namespace: rook-ceph
+spec:
+ cephVersion:
+ # For the latest ceph images, see https://hub.docker.com/r/ceph/ceph/tags
+ image: ceph/ceph:v13.2.2-20181023
+ dataDirHostPath: /var/lib/rook
+ dashboard:
+ enabled: true
+ mon:
+ count: 3
+ allowMultiplePerNode: true
+ storage:
+ useAllNodes: true
+ useAllDevices: false
+ config:
+ databaseSizeMB: "1024"
+ journalSizeMB: "1024" \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/clusterrole.yaml b/vnfs/DAaaS/00-init/rook-ceph/templates/clusterrole.yaml
new file mode 100644
index 00000000..58a24d47
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/clusterrole.yaml
@@ -0,0 +1,165 @@
+{{- if .Values.rbacEnable }}
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-cluster-mgmt
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ - pods
+ - pods/log
+ - services
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - extensions
+ resources:
+ - deployments
+ - daemonsets
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+---
+# The cluster role for managing the Rook CRDs
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-global
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ # Pod access is needed for fencing
+ - pods
+ # Node access is needed for determining nodes where mons should run
+ - nodes
+ - nodes/proxy
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ # PVs and PVCs are managed by the Rook provisioner
+ - persistentvolumes
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - ceph.rook.io
+ resources:
+ - "*"
+ verbs:
+ - "*"
+- apiGroups:
+ - rook.io
+ resources:
+ - "*"
+ verbs:
+ - "*"
+---
+# Aspects of ceph-mgr that require cluster-wide access
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-mgr-cluster
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - nodes
+ - nodes/proxy
+ verbs:
+ - get
+ - list
+ - watch
+{{- if ((.Values.agent) and .Values.agent.mountSecurityMode) and ne .Values.agent.mountSecurityMode "Any" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-agent-mount
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+{{- end }}
+{{- if .Values.pspEnable }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: rook-ceph-system-psp-user
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+rules:
+- apiGroups:
+ - extensions
+ resources:
+ - podsecuritypolicies
+ resourceNames:
+ - 00-rook-ceph-operator
+ verbs:
+ - use
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/clusterrolebinding.yaml b/vnfs/DAaaS/00-init/rook-ceph/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..845eb6d7
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/clusterrolebinding.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.rbacEnable }}
+# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-global
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-global
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+{{- if .Values.pspEnable }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: rook-ceph-system-psp-users
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: rook-ceph-system-psp-user
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+{{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/dashboard-external-http.yaml b/vnfs/DAaaS/00-init/rook-ceph/templates/dashboard-external-http.yaml
new file mode 100644
index 00000000..ee521152
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/dashboard-external-http.yaml
@@ -0,0 +1,22 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: rook-ceph-mgr-dashboard-external-http
+ namespace: rook-ceph
+ labels:
+ app: rook-ceph-mgr
+ rook_cluster: rook-ceph
+ annotations:
+ "helm.sh/hook": "post-install"
+ "helm.sh/hook-weight": "10"
+spec:
+ ports:
+ - name: dashboard
+ port: 7000
+ protocol: TCP
+ targetPort: 7000
+ selector:
+ app: rook-ceph-mgr
+ rook_cluster: rook-ceph
+ sessionAffinity: None
+ type: NodePort
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/deployment.yaml b/vnfs/DAaaS/00-init/rook-ceph/templates/deployment.yaml
new file mode 100644
index 00000000..13c6a763
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/deployment.yaml
@@ -0,0 +1,108 @@
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ name: rook-ceph-operator
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: rook-ceph-operator
+ template:
+ metadata:
+ labels:
+ app: rook-ceph-operator
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
+{{- if .Values.annotations }}
+ annotations:
+{{ toYaml .Values.annotations | indent 8 }}
+{{- end }}
+ spec:
+ containers:
+ - name: rook-ceph-operator
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ args: ["ceph", "operator"]
+ env:
+{{- if not .Values.rbacEnable }}
+ - name: RBAC_ENABLED
+ value: "false"
+{{- end }}
+{{- if .Values.agent }}
+{{- if .Values.agent.toleration }}
+ - name: AGENT_TOLERATION
+ value: {{ .Values.agent.toleration }}
+{{- end }}
+{{- if .Values.agent.tolerationKey }}
+ - name: AGENT_TOLERATION_KEY
+ value: {{ .Values.agent.tolerationKey }}
+{{- end }}
+{{- if .Values.agent.mountSecurityMode }}
+ - name: AGENT_MOUNT_SECURITY_MODE
+ value: {{ .Values.agent.mountSecurityMode }}
+{{- end }}
+{{- if .Values.agent.flexVolumeDirPath }}
+ - name: FLEXVOLUME_DIR_PATH
+ value: {{ .Values.agent.flexVolumeDirPath }}
+{{- end }}
+{{- if .Values.agent.libModulesDirPath }}
+ - name: LIB_MODULES_DIR_PATH
+ value: {{ .Values.agent.libModulesDirPath }}
+{{- end }}
+{{- if .Values.agent.mounts }}
+ - name: AGENT_MOUNTS
+ value: {{ .Values.agent.mounts }}
+{{- end }}
+{{- end }}
+{{- if .Values.discover }}
+{{- if .Values.discover.toleration }}
+ - name: DISCOVER_TOLERATION
+ value: {{ .Values.agent.toleration }}
+{{- end }}
+{{- if .Values.discover.tolerationKey }}
+ - name: DISCOVER_TOLERATION_KEY
+ value: {{ .Values.discover.tolerationKey }}
+{{- end }}
+{{- end }}
+ - name: ROOK_LOG_LEVEL
+ value: {{ .Values.logLevel }}
+ - name: ROOK_ENABLE_SELINUX_RELABELING
+ value: {{ .Values.enableSelinuxRelabeling | quote }}
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+{{- if .Values.mon }}
+{{- if .Values.mon.healthCheckInterval }}
+ - name: ROOK_MON_HEALTHCHECK_INTERVAL
+ value: {{ .Values.mon.healthCheckInterval }}
+{{- end }}
+{{- if .Values.mon.monOutTimeout }}
+ - name: ROOK_MON_OUT_TIMEOUT
+ value: {{ .Values.mon.monOutTimeout }}
+{{- end }}
+{{- end }}
+ resources:
+{{ toYaml .Values.resources | indent 10 }}
+{{- if .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml .Values.nodeSelector | indent 8 }}
+{{- end }}
+{{- if .Values.tolerations }}
+ tolerations:
+{{ toYaml .Values.tolerations | indent 8 }}
+{{- end }}
+{{- if .Values.rbacEnable }}
+ serviceAccountName: rook-ceph-system
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/psp.yaml b/vnfs/DAaaS/00-init/rook-ceph/templates/psp.yaml
new file mode 100644
index 00000000..412b2437
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/psp.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.pspEnable }}
+# PSP for rook-ceph-operator
+
+# Most of the teams follow the kubernetes docs and have these PSPs.
+# * privileged (for kube-system namespace)
+# * restricted (for all logged in users)
+#
+# If we name it as `rook-ceph-operator`, it comes next to `restricted` PSP alphabetically,
+# and applies `restricted` capabilities to `rook-system`. Thats reason this is named with `00-rook-ceph-operator`,
+# so it stays somewhere close to top and `rook-system` gets the intended PSP.
+#
+# More info on PSP ordering : https://kubernetes.io/docs/concepts/policy/pod-security-policy/#policy-order
+
+apiVersion: extensions/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: 00-rook-ceph-operator
+spec:
+ fsGroup:
+ rule: RunAsAny
+ privileged: true
+ runAsUser:
+ rule: RunAsAny
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ volumes:
+ - '*'
+ allowedCapabilities:
+ - '*'
+ hostPID: true
+ hostIPC: true
+ hostNetwork: true
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/resources.yaml b/vnfs/DAaaS/00-init/rook-ceph/templates/resources.yaml
new file mode 100644
index 00000000..e296663f
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/resources.yaml
@@ -0,0 +1,177 @@
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephclusters.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephCluster
+ listKind: CephClusterList
+ plural: cephclusters
+ singular: cephcluster
+ scope: Namespaced
+ version: v1
+ validation:
+ openAPIV3Schema:
+ properties:
+ spec:
+ properties:
+ cephVersion:
+ properties:
+ allowUnsupported:
+ type: boolean
+ image:
+ type: string
+ name:
+ pattern: ^(luminous|mimic|nautilus)$
+ type: string
+ dashboard:
+ properties:
+ enabled:
+ type: boolean
+ urlPrefix:
+ type: string
+ port:
+ type: integer
+ minimum: 0
+ maximum: 65535
+ dataDirHostPath:
+ pattern: ^/(\S+)
+ type: string
+ mon:
+ properties:
+ allowMultiplePerNode:
+ type: boolean
+ count:
+ maximum: 9
+ minimum: 1
+ type: integer
+ required:
+ - count
+ network:
+ properties:
+ hostNetwork:
+ type: boolean
+ storage:
+ properties:
+ nodes:
+ items: {}
+ type: array
+ useAllDevices: {}
+ useAllNodes:
+ type: boolean
+ required:
+ - mon
+ additionalPrinterColumns:
+ - name: DataDirHostPath
+ type: string
+ description: Directory used on the K8s nodes
+ JSONPath: .spec.dataDirHostPath
+ - name: MonCount
+ type: string
+ description: Number of MONs
+ JSONPath: .spec.mon.count
+ - name: Age
+ type: date
+ JSONPath: .metadata.creationTimestamp
+ - name: State
+ type: string
+ description: Current State
+ JSONPath: .status.state
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephfilesystems.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephFilesystem
+ listKind: CephFilesystemList
+ plural: cephfilesystems
+ singular: cephfilesystem
+ scope: Namespaced
+ version: v1
+ additionalPrinterColumns:
+ - name: MdsCount
+ type: string
+ description: Number of MDSs
+ JSONPath: .spec.metadataServer.activeCount
+ - name: Age
+ type: date
+ JSONPath: .metadata.creationTimestamp
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephobjectstores.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectStore
+ listKind: CephObjectStoreList
+ plural: cephobjectstores
+ singular: cephobjectstore
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephobjectstoreusers.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephObjectStoreUser
+ listKind: CephObjectStoreUserList
+ plural: cephobjectstoreusers
+ singular: cephobjectstoreuser
+ shortNames:
+ - rcou
+ - objectuser
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: cephblockpools.ceph.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: ceph.rook.io
+ names:
+ kind: CephBlockPool
+ listKind: CephBlockPoolList
+ plural: cephblockpools
+ singular: cephblockpool
+ scope: Namespaced
+ version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: volumes.rook.io
+ annotations:
+ "helm.sh/hook": crd-install
+spec:
+ group: rook.io
+ names:
+ kind: Volume
+ listKind: VolumeList
+ plural: volumes
+ singular: volume
+ shortNames:
+ - rv
+ scope: Namespaced
+ version: v1alpha2
+---
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/role.yaml b/vnfs/DAaaS/00-init/rook-ceph/templates/role.yaml
new file mode 100644
index 00000000..45122d32
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/role.yaml
@@ -0,0 +1,35 @@
+{{- if .Values.rbacEnable }}
+# The role for the operator to manage resources in the system namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+ name: rook-ceph-system
+ labels:
+ operator: rook
+ storage-backend: ceph
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - extensions
+ resources:
+ - daemonsets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/rolebinding.yaml b/vnfs/DAaaS/00-init/rook-ceph/templates/rolebinding.yaml
new file mode 100644
index 00000000..3ef5897f
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/rolebinding.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.rbacEnable }}
+# Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+ labels:
+ operator: rook
+ storage-backend: ceph
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: rook-ceph-system
+subjects:
+- kind: ServiceAccount
+ name: rook-ceph-system
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/serviceaccount.yaml b/vnfs/DAaaS/00-init/rook-ceph/templates/serviceaccount.yaml
new file mode 100644
index 00000000..7b42de17
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/serviceaccount.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: rook-ceph-system
+ labels:
+ operator: rook
+ storage-backend: ceph
+ chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/storageclass.yml b/vnfs/DAaaS/00-init/rook-ceph/templates/storageclass.yml
new file mode 100644
index 00000000..38ddf5d7
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/storageclass.yml
@@ -0,0 +1,28 @@
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+ name: replicapool
+ namespace: rook-ceph
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+ "helm.sh/hook": post-install
+spec:
+ failureDomain: host
+ replicated:
+ size: 1
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: rook-ceph-block
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+ "helm.sh/hook": post-install
+provisioner: ceph.rook.io/block
+parameters:
+ blockPool: replicapool
+ # The value of "clusterNamespace" MUST be the same as the one in which your rook cluster exist
+ clusterNamespace: rook-ceph
+ # Specify the filesystem type of the volume. If not specified, it will use `ext4`.
+ fstype: xfs
+# Optional, default reclaimPolicy is "Delete". Other options are: "Retain", "Recycle" as documented in https://kubernetes.io/docs/concepts/storage/storage-classes/ \ No newline at end of file
diff --git a/vnfs/DAaaS/00-init/rook-ceph/templates/tool-box.yml b/vnfs/DAaaS/00-init/rook-ceph/templates/tool-box.yml
new file mode 100644
index 00000000..98bc3c98
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/templates/tool-box.yml
@@ -0,0 +1,62 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: rook-ceph-tools
+ namespace: rook-ceph
+ labels:
+ app: rook-ceph-tools
+ annotations:
+ "helm.sh/hook": "post-install"
+ "helm.sh/hook-weight": "10"
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: rook-ceph-tools
+ template:
+ metadata:
+ labels:
+ app: rook-ceph-tools
+ spec:
+ dnsPolicy: ClusterFirstWithHostNet
+ containers:
+ - name: rook-ceph-tools
+ image: rook/ceph:v0.9.1
+ command: ["/tini"]
+ args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: ROOK_ADMIN_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: rook-ceph-mon
+ key: admin-secret
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: /dev
+ name: dev
+ - mountPath: /sys/bus
+ name: sysbus
+ - mountPath: /lib/modules
+ name: libmodules
+ - name: mon-endpoint-volume
+ mountPath: /etc/rook
+ # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
+ hostNetwork: true
+ volumes:
+ - name: dev
+ hostPath:
+ path: /dev
+ - name: sysbus
+ hostPath:
+ path: /sys/bus
+ - name: libmodules
+ hostPath:
+ path: /lib/modules
+ - name: mon-endpoint-volume
+ configMap:
+ name: rook-ceph-mon-endpoints
+ items:
+ - key: data
+ path: mon-endpoints
diff --git a/vnfs/DAaaS/00-init/rook-ceph/values.yaml b/vnfs/DAaaS/00-init/rook-ceph/values.yaml
new file mode 100644
index 00000000..7b4d07bd
--- /dev/null
+++ b/vnfs/DAaaS/00-init/rook-ceph/values.yaml
@@ -0,0 +1,75 @@
+# Default values for rook-ceph-operator
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+image:
+ prefix: rook
+ repository: rook/ceph
+ tag: v0.9.1
+ pullPolicy: IfNotPresent
+
+hyperkube:
+ repository: k8s.gcr.io/hyperkube
+ tag: v1.7.12
+ pullPolicy: IfNotPresent
+
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+nodeSelector:
+# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
+# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+# disktype: ssd
+
+# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
+tolerations: []
+
+mon:
+ healthCheckInterval: "45s"
+ monOutTimeout: "300s"
+
+## Annotations to be added to pod
+annotations: {}
+
+## LogLevel can be set to: TRACE, DEBUG, INFO, NOTICE, WARNING, ERROR or CRITICAL
+logLevel: INFO
+
+## If true, create & use RBAC resources
+##
+rbacEnable: false
+
+## If true, create & use PSP resources
+##
+pspEnable: true
+
+## Rook Agent configuration
+## toleration: NoSchedule, PreferNoSchedule or NoExecute
+## tolerationKey: Set this to the specific key of the taint to tolerate
+## flexVolumeDirPath: The path where the Rook agent discovers the flex volume plugins
+## libModulesDirPath: The path where the Rook agent can find kernel modules
+# agent:
+# toleration: NoSchedule
+# tolerationKey: key
+# mountSecurityMode: Any
+## For information on FlexVolume path, please refer to https://rook.io/docs/rook/master/flexvolume.html
+# flexVolumeDirPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
+# libModulesDirPath: /lib/modules
+# mounts: mount1=/host/path:/container/path,/host/path2:/container/path2
+agent:
+ flexVolumeDirPath: /var/lib/kubelet/volumeplugins
+## Rook Discover configuration
+## toleration: NoSchedule, PreferNoSchedule or NoExecute
+## tolerationKey: Set this to the specific key of the taint to tolerate
+# discover:
+# toleration: NoSchedule
+# tolerationKey: key
+
+# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
+# Disable it here if you have similiar issues.
+# For more details see https://github.com/rook/rook/issues/2417
+enableSelinuxRelabeling: true