summaryrefslogtreecommitdiffstats
path: root/vnfs
diff options
context:
space:
mode:
Diffstat (limited to 'vnfs')
-rw-r--r--vnfs/DAaaS/inference-core/.helmignore22
-rw-r--r--vnfs/DAaaS/inference-core/Chart.yaml5
-rw-r--r--vnfs/DAaaS/inference-core/charts/tf-serving/.helmignore22
-rw-r--r--vnfs/DAaaS/inference-core/charts/tf-serving/Chart.yaml5
-rw-r--r--vnfs/DAaaS/inference-core/charts/tf-serving/templates/NOTES.txt20
-rw-r--r--vnfs/DAaaS/inference-core/charts/tf-serving/templates/_helpers.tpl41
-rw-r--r--vnfs/DAaaS/inference-core/charts/tf-serving/templates/deployment.yaml138
-rw-r--r--vnfs/DAaaS/inference-core/charts/tf-serving/templates/ingress.yaml55
-rw-r--r--vnfs/DAaaS/inference-core/charts/tf-serving/templates/secrets.yaml31
-rw-r--r--vnfs/DAaaS/inference-core/charts/tf-serving/templates/service.yaml39
-rw-r--r--vnfs/DAaaS/inference-core/charts/tf-serving/templates/serviceaccount.yaml25
-rw-r--r--vnfs/DAaaS/inference-core/charts/tf-serving/values.yaml71
-rw-r--r--vnfs/DAaaS/inference-core/values.yaml30
13 files changed, 504 insertions, 0 deletions
diff --git a/vnfs/DAaaS/inference-core/.helmignore b/vnfs/DAaaS/inference-core/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/inference-core/Chart.yaml b/vnfs/DAaaS/inference-core/Chart.yaml
new file mode 100644
index 00000000..836b1a84
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for inference framework components
+name: inference-core
+version: 0.1.0
diff --git a/vnfs/DAaaS/inference-core/charts/tf-serving/.helmignore b/vnfs/DAaaS/inference-core/charts/tf-serving/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/charts/tf-serving/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/vnfs/DAaaS/inference-core/charts/tf-serving/Chart.yaml b/vnfs/DAaaS/inference-core/charts/tf-serving/Chart.yaml
new file mode 100644
index 00000000..2db1a483
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/charts/tf-serving/Chart.yaml
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: Helm chart for Tensorflow serving model server
+name: tf-serving
+version: 0.1.0
diff --git a/vnfs/DAaaS/inference-core/charts/tf-serving/templates/NOTES.txt b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/NOTES.txt
new file mode 100644
index 00000000..2dcf639e
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/NOTES.txt
@@ -0,0 +1,20 @@
+1. Get the tensorflow serving URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+ {{- range $.Values.ingress.paths }}
+ http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host }}{{ . }}
+ {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+ export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "tf-serving.fullname" . }})
+ export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+ echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+ NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+ You can watch the status of by running 'kubectl get svc -w {{ include "tf-serving.fullname" . }}'
+ export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "tf-serving.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+ echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+ export SVC_NAME=$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app={{ include "tf-serving.name" . }},modelName={{ .Values.modelName }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+ kubectl port-forward svc/$SVC_NAME 8500
+{{- end }}
diff --git a/vnfs/DAaaS/inference-core/charts/tf-serving/templates/_helpers.tpl b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/_helpers.tpl
new file mode 100644
index 00000000..ce3ce917
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/_helpers.tpl
@@ -0,0 +1,41 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "tf-serving.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "tf-serving.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s-%s" .Release.Name $name .Values.modelName | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "tf-serving.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/* Create the name of tf-serving service account to use */}}
+{{- define "tf-serving.serviceAccountName" -}}
+{{- if and .Values.global.rbac .Values.serviceAccount.create -}}
+ {{ default (include "tf-serving.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}} \ No newline at end of file
diff --git a/vnfs/DAaaS/inference-core/charts/tf-serving/templates/deployment.yaml b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/deployment.yaml
new file mode 100644
index 00000000..0a909e9f
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/deployment.yaml
@@ -0,0 +1,138 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: {{ include "tf-serving.fullname" . }}
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service | quote }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ include "tf-serving.name" . }}
+ release: {{ .Release.Name }}
+ modelName: {{ .Values.modelName }}
+ template:
+ metadata:
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ release: {{ .Release.Name }}
+ modelName: {{ .Values.modelName }}
+ spec:
+ serviceAccountName: {{ template "tf-serving.serviceAccountName" . }}
+ containers:
+ - name: tf-serving
+ image: "{{ .Values.image.tensorflowServing.repository }}:{{ .Values.image.tensorflowServing.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - "/usr/bin/tensorflow_model_server"
+ args:
+ - "--port={{ .Values.service.tensorflowServingPort }}"
+ - "--model_name={{ .Values.modelName }}"
+ - "--model_base_path={{ .Values.modelBasePath }}"
+ ports:
+ - name: tf-serving
+ containerPort: {{ .Values.service.tensorflowServingPort }}
+ protocol: TCP
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.minio.existingSecret }}{{ .Values.minio.existingSecret }}{{ else }}{{ template "tf-serving.fullname" . }}{{ end }}
+ key: accesskey
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.minio.existingSecret }}{{ .Values.minio.existingSecret }}{{ else }}{{ template "tf-serving.fullname" . }}{{ end }}
+ key: secretkey
+ {{- range $key, $val := .Values.minio.environment }}
+ - name: {{ $key }}
+ value: {{ $val | quote }}
+ {{- end}}
+ readinessProbe:
+ tcpSocket:
+ port: tf-serving
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ volumeMounts:
+ - mountPath: /models
+ name: models
+ - name: tensorboard
+ image: "{{ .Values.image.tensorboard.repository }}:{{ .Values.image.tensorboard.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - tensorboard
+ args:
+ - --logdir
+ - {{ .Values.modelBasePath }}
+ - --host
+ - 0.0.0.0
+ ports:
+ - name: tensorboard
+ containerPort: {{ .Values.service.tensorboardPort }}
+ protocol: TCP
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.minio.existingSecret }}{{ .Values.minio.existingSecret }}{{ else }}{{ template "tf-serving.fullname" . }}{{ end }}
+ key: accesskey
+ - name: AWS_SECRET_ACCESS_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ if .Values.minio.existingSecret }}{{ .Values.minio.existingSecret }}{{ else }}{{ template "tf-serving.fullname" . }}{{ end }}
+ key: secretkey
+ {{- range $key, $val := .Values.minio.environment }}
+ - name: {{ $key }}
+ value: {{ $val | quote }}
+ {{- end}}
+ livenessProbe:
+ httpGet:
+ path: /
+ port: {{ .Values.service.tensorboardPort }}
+ readinessProbe:
+ httpGet:
+ path: /
+ port: {{ .Values.service.tensorboardPort }}
+ volumeMounts:
+ - mountPath: /output/training_logs
+ name: training-logs-volume
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ volumes:
+ - name: models
+ emptyDir: {}
+ - name: training-logs-volume
+ emptyDir: {}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/vnfs/DAaaS/inference-core/charts/tf-serving/templates/ingress.yaml b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/ingress.yaml
new file mode 100644
index 00000000..b02fc8bb
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/ingress.yaml
@@ -0,0 +1,55 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "tf-serving.fullname" . -}}
+{{- $ingressPaths := .Values.ingress.paths -}}
+apiVersion: extensions/v1beta1
+kind: Ingress
+metadata:
+ name: {{ $fullName }}
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ release: {{ .Release.Name }}
+ modelName: {{ .Values.modelName }}
+ {{- with .Values.ingress.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+{{- if .Values.ingress.tls }}
+ tls:
+ {{- range .Values.ingress.tls }}
+ - hosts:
+ {{- range .hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .secretName }}
+ {{- end }}
+{{- end }}
+ rules:
+ {{- range .Values.ingress.hosts }}
+ - host: {{ . | quote }}
+ http:
+ paths:
+ {{- range $ingressPaths }}
+ - path: {{ . }}
+ backend:
+ serviceName: {{ $fullName }}
+ servicePort: http
+ {{- end }}
+ {{- end }}
+{{- end }}
diff --git a/vnfs/DAaaS/inference-core/charts/tf-serving/templates/secrets.yaml b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/secrets.yaml
new file mode 100644
index 00000000..f4b8fe89
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/secrets.yaml
@@ -0,0 +1,31 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+{{- if not .Values.minio.existingSecret }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ template "tf-serving.fullname" . }}
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service | quote }}
+type: Opaque
+data:
+ accesskey: {{ .Values.minio.accessKey | b64enc }}
+ secretkey: {{ .Values.minio.secretKey | b64enc }}
+{{- end }}
diff --git a/vnfs/DAaaS/inference-core/charts/tf-serving/templates/service.yaml b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/service.yaml
new file mode 100644
index 00000000..3ddcca66
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/service.yaml
@@ -0,0 +1,39 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "tf-serving.fullname" . }}
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ release: {{ .Release.Name }}
+ modelName: {{ .Values.modelName }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.tensorflowServingPort }}
+ targetPort: tf-serving
+ protocol: TCP
+ name: tf-serving
+ - port: {{ .Values.service.tensorboardPort }}
+ targetPort: tensorboard
+ protocol: TCP
+ name: tensorboard
+ selector:
+ app: {{ include "tf-serving.name" . }}
+ release: {{ .Release.Name }}
+ modelName: {{ .Values.modelName }}
diff --git a/vnfs/DAaaS/inference-core/charts/tf-serving/templates/serviceaccount.yaml b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/serviceaccount.yaml
new file mode 100644
index 00000000..af4987d8
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/charts/tf-serving/templates/serviceaccount.yaml
@@ -0,0 +1,25 @@
+{{/*
+# Copyright 2019 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+*/}}
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "tf-serving.fullname" . }}
+ labels:
+ app: {{ include "tf-serving.name" . }}
+ chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
+ release: {{ .Release.Name }}
+ heritage: {{ .Release.Service | quote }}
diff --git a/vnfs/DAaaS/inference-core/charts/tf-serving/values.yaml b/vnfs/DAaaS/inference-core/charts/tf-serving/values.yaml
new file mode 100644
index 00000000..ebf88683
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/charts/tf-serving/values.yaml
@@ -0,0 +1,71 @@
+# Default values for tf-serving.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+ tensorflowServing:
+ repository: tensorflow/serving
+ tag: latest
+ tensorboard:
+ repository: tensorflow/tensorflow
+ tag: latest
+ pullPolicy: IfNotPresent
+
+nameOverride: ""
+fullnameOverride: ""
+
+## Model information for tf-serving
+modelName: "mnist"
+modelBasePath: "s3://models/mnist"
+
+## Model repository information (Minio)
+minio:
+ existingSecret: ""
+ accessKey: "onapdaas"
+ secretKey: "onapsecretdaas"
+ environment:
+ AWS_REGION: ""
+ S3_REGION: ""
+ S3_ENDPOINT: "minio.minio.svc.cluster.local:9000"
+ AWS_ENDPOINT_URL: "http://minio.minio.svc.cluster.local:9000"
+ S3_USE_HTTPS: 0
+ S3_VERIFY_SSL: 0
+
+
+## Service account for tf-serving to use.
+serviceAccount:
+ create: true
+ name: ""
+
+service:
+ type: ClusterIP
+ tensorflowServingPort: 8500
+ tensorboardPort: 6006
+
+ingress:
+ enabled: false
+ annotations: {}
+ paths: []
+ hosts:
+ - chart-example.local
+ tls: []
+
+resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/vnfs/DAaaS/inference-core/values.yaml b/vnfs/DAaaS/inference-core/values.yaml
new file mode 100644
index 00000000..10714b88
--- /dev/null
+++ b/vnfs/DAaaS/inference-core/values.yaml
@@ -0,0 +1,30 @@
+# Copyright © 2019 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#################################################################
+# Global configuration defaults.
+#################################################################
+global:
+ nodePortPrefix: 310
+ rbac: true
+ repository: nexus3.onap.org:10001
+ readinessRepository: oomk8s
+ readinessImage: readiness-check:2.0.0
+ loggingRepository: docker.elastic.co
+ loggingImage: beats/filebeat:5.5.0
+
+#################################################################
+# k8s Operator Day-0 configuration defaults.
+#################################################################
+