summaryrefslogtreecommitdiffstats
path: root/vnfs/DAaaS/deploy/operator/charts/sparkoperator
diff options
context:
space:
mode:
authorDileep Ranganathan <dileep.ranganathan@intel.com>2019-05-30 12:38:37 -0700
committerDileep Ranganathan <dileep.ranganathan@intel.com>2019-05-30 21:11:52 +0000
commit3d5a3e06530c1250d48f7d838c619f3bfbcd019d (patch)
tree349e370c43ce7318b3f7eb7736345de6872cbef2 /vnfs/DAaaS/deploy/operator/charts/sparkoperator
parent31802660dfe74a8671ae29789f0018f0f887ea1a (diff)
Refactor Distributed Analytics project structure
Modified the project structure to improve maintainability and to add future CI and integration test support. Change-Id: Id30bfb1f83f23785a6b5f99e81f42f752d59c0f8 Issue-ID: ONAPARC-280 Signed-off-by: Dileep Ranganathan <dileep.ranganathan@intel.com>
Diffstat (limited to 'vnfs/DAaaS/deploy/operator/charts/sparkoperator')
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/sparkoperator/.helmignore1
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/sparkoperator/Chart.yaml5
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/README.md42
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/_helpers.tpl48
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-deployment.yaml79
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-rbac.yaml55
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-serviceaccount.yaml11
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-rbac.yaml44
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-serviceaccount.yaml12
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-cleanup-job.yaml32
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-init-job.yaml24
-rwxr-xr-xvnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-service.yaml19
-rw-r--r--vnfs/DAaaS/deploy/operator/charts/sparkoperator/values.yaml28
13 files changed, 400 insertions, 0 deletions
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/.helmignore b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/.helmignore
new file mode 100644
index 00000000..b7f6f9f1
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/.helmignore
@@ -0,0 +1 @@
+OWNERS
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/Chart.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/Chart.yaml
new file mode 100644
index 00000000..86d0c3ab
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: sparkoperator
+version: 0.1.0
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/README.md b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/README.md
new file mode 100755
index 00000000..ba0f05bc
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/README.md
@@ -0,0 +1,42 @@
+### Helm Chart for Spark Operator
+
+This is the Helm chart for the [Spark-on-Kubernetes Operator](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator).
+
+#### Prerequisites
+
+The Operator requires Kubernetes version 1.8 and above because it relies on garbage collection of custom resources. If customization of driver and executor pods (through mounting custom ConfigMaps and volumes) is desired, then the [Mutating Admission Webhook](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/quick-start-guide.md#using-the-mutating-admission-webhook) needs to be enabled and it only became beta in Kubernetes 1.9.
+
+#### Installing the chart
+
+The chart can be installed by running:
+
+```bash
+$ helm repo add incubator http://storage.googleapis.com/kubernetes-charts-incubator
+$ helm install incubator/sparkoperator --namespace spark-operator
+```
+
+Note that you need to use the `--namespace` flag during `helm install` to specify in which namespace you want to install the operator. The namespace can be existing or not. When it's not available, Helm would take care of creating the namespace. Note that this namespace has no relation to the namespace where you would like to deploy Spark jobs (i.e. the setting `sparkJobNamespace` shown in the table below). They can be the same namespace or different ones.
+
+#### Configuration
+
+The following table lists the configurable parameters of the Spark operator chart and their default values.
+
+| Parameter | Description | Default |
+| ------------------------- | ------------------------------------------------------------ | -------------------------------------- |
+| `operatorImageName` | The name of the operator image | `gcr.io/spark-operator/spark-operator` |
+| `operatorVersion` | The version of the operator to install | `v2.4.0-v1beta1-latest` |
+| `imagePullPolicy` | Docker image pull policy | `IfNotPresent` |
+| `sparkJobNamespace` | K8s namespace where Spark jobs are to be deployed | `default` |
+| `enableWebhook` | Whether to enable mutating admission webhook | false |
+| `enableMetrics` | Whether to expose metrics to be scraped by Premetheus | true |
+| `controllerThreads` | Number of worker threads used by the SparkApplication controller | 10 |
+| `ingressUrlFormat` | Ingress URL format | "" |
+| `installCrds` | Whether to install CRDs | true |
+| `metricsPort` | Port for the metrics endpoint | 10254 |
+| `metricsEndpoint` | Metrics endpoint | "/metrics" |
+| `metricsPrefix` | Prefix for the metrics | "" |
+| `resyncInterval` | Informer resync interval in seconds | 30 |
+| `webhookPort` | Service port of the webhook server | 8080 |
+
+Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
+
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/_helpers.tpl b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/_helpers.tpl
new file mode 100644
index 00000000..741b500d
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/_helpers.tpl
@@ -0,0 +1,48 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "sparkoperator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+ {{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "sparkoperator.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+ {{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "sparkoperator.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+ {{/*
+Create the name of the service account to use
+*/}}
+{{- define "sparkoperator.serviceAccountName" -}}
+{{- if .Values.serviceAccounts.sparkoperator.create -}}
+ {{ default (include "sparkoperator.fullname" .) .Values.serviceAccounts.sparkoperator.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccounts.sparkoperator.name }}
+{{- end -}}
+{{- end -}}
+{{- define "spark.serviceAccountName" -}}
+{{- if .Values.serviceAccounts.spark.create -}}
+ {{ $sparkServiceaccount := printf "%s-%s" .Release.Name "spark" }}
+ {{ default $sparkServiceaccount .Values.serviceAccounts.spark.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccounts.spark.name }}
+{{- end -}}
+{{- end -}}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-deployment.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-deployment.yaml
new file mode 100755
index 00000000..fdfc51a2
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-deployment.yaml
@@ -0,0 +1,79 @@
+# If the admission webhook is enabled, then a post-install step is required
+# to generate and install the secret in the operator namespace.
+
+# In the post-install hook, the token corresponding to the operator service account
+# is used to authenticate with the Kubernetes API server to install the secret bundle.
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "sparkoperator.fullname" . }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ app.kubernetes.io/version: {{ .Values.operatorVersion }}
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ {{- if .Values.enableMetrics }}
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "{{ .Values.metricsPort }}"
+ prometheus.io/path: {{ .Values.metricsEndpoint }}
+ {{- end }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ app.kubernetes.io/version: {{ .Values.operatorVersion }}
+ initializers:
+ pending: []
+ spec:
+ serviceAccountName: {{ include "sparkoperator.serviceAccountName" . }}
+ {{- if .Values.enableWebhook }}
+ volumes:
+ - name: webhook-certs
+ secret:
+ secretName: spark-webhook-certs
+ {{- end }}
+ containers:
+ - name: sparkoperator
+ image: {{ .Values.operatorImageName }}:{{ .Values.operatorVersion }}
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ {{- if .Values.enableWebhook }}
+ volumeMounts:
+ - name: webhook-certs
+ mountPath: /etc/webhook-certs
+ {{- end }}
+ {{- if .Values.enableMetrics }}
+ ports:
+ - containerPort: {{ .Values.metricsPort }}
+ {{ end }}
+ args:
+ - -v=2
+ - -namespace={{ .Values.sparkJobNamespace }}
+ - -ingress-url-format={{ .Values.ingressUrlFormat }}
+ - -install-crds={{ .Values.installCrds }}
+ - -controller-threads={{ .Values.controllerThreads }}
+ - -resync-interval={{ .Values.resyncInterval }}
+ - -logtostderr
+ {{- if .Values.enableMetrics }}
+ - -enable-metrics=true
+ - -metrics-labels=app_type
+ - -metrics-port={{ .Values.metricsPort }}
+ - -metrics-endpoint={{ .Values.metricsEndpoint }}
+ - -metrics-prefix={{ .Values.metricsPrefix }}
+ {{- end }}
+ {{- if .Values.enableWebhook }}
+ - -enable-webhook=true
+ - -webhook-svc-namespace={{ .Release.Namespace }}
+ - -webhook-port={{ .Values.webhookPort }}
+ - -webhook-svc-name={{ .Release.Name }}-webhook
+ - -webhook-config-name={{ include "sparkoperator.fullname" . }}-webhook-config
+ {{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-rbac.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-rbac.yaml
new file mode 100755
index 00000000..bd5fd3fe
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-rbac.yaml
@@ -0,0 +1,55 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "sparkoperator.fullname" . }}-cr
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+rules:
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["*"]
+- apiGroups: [""]
+ resources: ["services", "configmaps", "secrets"]
+ verbs: ["create", "get", "delete"]
+- apiGroups: ["extensions"]
+ resources: ["ingresses"]
+ verbs: ["create", "get", "delete"]
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get"]
+- apiGroups: [""]
+ resources: ["events"]
+ verbs: ["create", "update", "patch"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["create", "get", "update", "delete"]
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["mutatingwebhookconfigurations"]
+ verbs: ["create", "get", "update", "delete"]
+- apiGroups: ["sparkoperator.k8s.io"]
+ resources: ["sparkapplications", "scheduledsparkapplications"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "sparkoperator.fullname" . }}-crb
+ namespace: {{ .Release.Namespace }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+subjects:
+ - kind: ServiceAccount
+ name: {{ include "sparkoperator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ kind: ClusterRole
+ name: {{ include "sparkoperator.fullname" . }}-cr
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-serviceaccount.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-serviceaccount.yaml
new file mode 100755
index 00000000..5216f8dd
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-operator-serviceaccount.yaml
@@ -0,0 +1,11 @@
+{{- if .Values.serviceAccounts.sparkoperator.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "sparkoperator.serviceAccountName" . }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-rbac.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-rbac.yaml
new file mode 100755
index 00000000..fa066053
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-rbac.yaml
@@ -0,0 +1,44 @@
+{{- if and (.Values.rbac.create) (ne .Values.sparkJobNamespace "") }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ namespace: {{ .Values.sparkJobNamespace }}
+ name: spark-role
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+rules:
+- apiGroups:
+ - "" # "" indicates the core API group
+ resources:
+ - "pods"
+ verbs:
+ - "*"
+- apiGroups:
+ - "" # "" indicates the core API group
+ resources:
+ - "services"
+ verbs:
+ - "*"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: spark-role-binding
+ namespace: {{ .Values.sparkJobNamespace }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+subjects:
+- kind: ServiceAccount
+ name: {{ include "spark.serviceAccountName" . }}
+ namespace: {{ .Values.sparkJobNamespace }}
+roleRef:
+ kind: Role
+ name: spark-role
+ apiGroup: rbac.authorization.k8s.io
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-serviceaccount.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-serviceaccount.yaml
new file mode 100755
index 00000000..bb0e55ea
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/spark-serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccounts.spark.create }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "spark.serviceAccountName" . }}
+ namespace: {{ .Values.sparkJobNamespace }}
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-cleanup-job.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-cleanup-job.yaml
new file mode 100755
index 00000000..d6d9df7c
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-cleanup-job.yaml
@@ -0,0 +1,32 @@
+{{ if .Values.enableWebhook }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "sparkoperator.fullname" . }}-cleanup
+ annotations:
+ "helm.sh/hook": pre-delete, pre-upgrade
+ "helm.sh/hook-delete-policy": hook-succeeded
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ template:
+ spec:
+ serviceAccountName: {{ include "sparkoperator.serviceAccountName" . }}
+ restartPolicy: OnFailure
+ containers:
+ - name: main
+ image: {{ .Values.operatorImageName }}:{{ .Values.operatorVersion }}
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ command:
+ - "/bin/sh"
+ - "-c"
+ - "curl -ik \
+ -X DELETE \
+ -H \"Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)\" \
+ -H \"Accept: application/json\" \
+ -H \"Content-Type: application/json\" \
+ https://kubernetes.default.svc/api/v1/namespaces/{{ .Release.Namespace }}/secrets/spark-webhook-certs"
+{{ end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-init-job.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-init-job.yaml
new file mode 100755
index 00000000..a42c3097
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-init-job.yaml
@@ -0,0 +1,24 @@
+{{ if .Values.enableWebhook }}
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: {{ include "sparkoperator.fullname" . }}-init
+ annotations:
+ "helm.sh/hook": post-install, post-upgrade
+ "helm.sh/hook-delete-policy": hook-succeeded
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ template:
+ spec:
+ serviceAccountName: {{ include "sparkoperator.serviceAccountName" . }}
+ restartPolicy: OnFailure
+ containers:
+ - name: main
+ image: {{ .Values.operatorImageName }}:{{ .Values.operatorVersion }}
+ imagePullPolicy: {{ .Values.imagePullPolicy }}
+ command: ["/usr/bin/gencerts.sh", "-n", "{{ .Release.Namespace }}", "-s", "{{ .Release.Name }}-webhook", "-p"]
+{{ end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-service.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-service.yaml
new file mode 100755
index 00000000..42c5bc62
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/templates/webhook-service.yaml
@@ -0,0 +1,19 @@
+{{ if .Values.enableWebhook }}
+kind: Service
+apiVersion: v1
+metadata:
+ name: {{ .Release.Name }}-webhook
+ labels:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ helm.sh/chart: {{ include "sparkoperator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ ports:
+ - port: 443
+ targetPort: 8080
+ name: webhook
+ selector:
+ app.kubernetes.io/name: {{ include "sparkoperator.name" . }}
+ app.kubernetes.io/version: {{ .Values.operatorVersion }}
+{{ end }}
diff --git a/vnfs/DAaaS/deploy/operator/charts/sparkoperator/values.yaml b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/values.yaml
new file mode 100644
index 00000000..bfb03eab
--- /dev/null
+++ b/vnfs/DAaaS/deploy/operator/charts/sparkoperator/values.yaml
@@ -0,0 +1,28 @@
+operatorImageName: gcr.io/spark-operator/spark-operator
+operatorVersion: v2.4.0-v1beta1-latest
+imagePullPolicy: IfNotPresent
+
+rbac:
+ create: true
+
+serviceAccounts:
+ spark:
+ create: true
+ name:
+ sparkoperator:
+ create: true
+ name:
+
+sparkJobNamespace: ""
+
+enableWebhook: false
+enableMetrics: true
+
+controllerThreads: 10
+ingressUrlFormat: ""
+installCrds: true
+metricsPort: 10254
+metricsEndpoint: "/metrics"
+metricsPrefix: ""
+resyncInterval: 30
+webhookPort: 8080