summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kud/deployment_infra/helm/cpu-manager/.helmignore23
-rw-r--r--kud/deployment_infra/helm/cpu-manager/Chart.yaml25
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/_helpers.tpl63
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/clusterrole.yaml59
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/clusterrolebinding.yaml91
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/daemonset.yaml162
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/serviceaccount.yaml12
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/webhook.yaml156
-rw-r--r--kud/deployment_infra/helm/cpu-manager/values.yaml63
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/.helmignore23
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/Chart.yaml24
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/crds/network.yaml117
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/crds/networkchaining.yaml89
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/crds/providernetwork.yaml157
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/_helpers.tpl62
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/clusterrole.yaml54
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/clusterrolebinding.yaml16
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/configmap.yaml16
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/daemonset.yaml168
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/deployment.yaml55
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/ovn/daemonset.yaml102
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/ovn/deployment.yaml107
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/ovn/service.yaml37
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/service.yaml16
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/serviceaccount.yaml12
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/values.yaml177
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/.helmignore23
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/Chart.yaml25
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/templates/_helpers.tpl52
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/templates/config.yaml8
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/templates/daemonset.yaml60
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/templates/drivers/daemonset.yaml70
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/values.yaml49
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/.helmignore23
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/Chart.yaml27
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/crds/sriovibnetwork.yaml73
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetwork.yaml109
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodepolicy.yaml131
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodestate.yaml153
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/crds/sriovoperatorconfig.yaml89
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/_helpers.tpl63
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/clusterrole.yaml54
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/clusterrolebinding.yaml30
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/drivers/daemonset.yaml70
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/operator.yaml89
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/role.yaml107
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/rolebinding.yaml44
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/serviceaccount.yaml17
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/values.yaml100
-rw-r--r--kud/deployment_infra/helm/sriov-network/.helmignore23
-rw-r--r--kud/deployment_infra/helm/sriov-network/Chart.yaml24
-rw-r--r--kud/deployment_infra/helm/sriov-network/templates/_helpers.tpl34
-rw-r--r--kud/deployment_infra/helm/sriov-network/templates/sriovnetwork.yaml40
-rw-r--r--kud/deployment_infra/helm/sriov-network/templates/sriovnetworknodepolicy.yaml52
-rw-r--r--kud/deployment_infra/helm/sriov-network/values.yaml144
-rw-r--r--kud/deployment_infra/installers/Dockerfile.iavf-driver-installer20
-rw-r--r--kud/deployment_infra/installers/Dockerfile.qat-driver-installer21
-rw-r--r--kud/deployment_infra/installers/Makefile10
-rw-r--r--kud/deployment_infra/installers/_common.sh41
-rw-r--r--kud/deployment_infra/installers/_qat-driver-installer.sh514
-rwxr-xr-xkud/deployment_infra/installers/entrypoint-iavf-driver-installer.sh134
-rwxr-xr-xkud/deployment_infra/installers/entrypoint-qat-driver-installer.sh148
-rw-r--r--kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml10
-rw-r--r--kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml10
-rw-r--r--kud/tests/_common.sh4
-rwxr-xr-xkud/tests/ovn4nfv.sh4
-rwxr-xr-xkud/tests/qat.sh2
-rw-r--r--kud/tests/sriov-network.sh102
-rwxr-xr-xkud/tests/sriov.sh2
-rw-r--r--src/k8splugin/plugins/generic/plugin.go2
-rw-r--r--src/k8splugin/plugins/namespace/plugin.go2
-rw-r--r--src/k8splugin/plugins/service/plugin.go2
72 files changed, 4688 insertions, 9 deletions
diff --git a/kud/deployment_infra/helm/cpu-manager/.helmignore b/kud/deployment_infra/helm/cpu-manager/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/cpu-manager/Chart.yaml b/kud/deployment_infra/helm/cpu-manager/Chart.yaml
new file mode 100644
index 00000000..5635f21f
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/Chart.yaml
@@ -0,0 +1,25 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: v1.4.1-no-taint
+description: |
+ CPU Manager for Kubernetes provides basic core affinity for
+ NFV-style workloads.
+name: cpu-manager
+sources:
+ - https://github.com/integratedcloudnative/CPU-Manager-for-Kubernetes
+home: https://github.com/integratedcloudnative/CPU-Manager-for-Kubernetes
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/_helpers.tpl b/kud/deployment_infra/helm/cpu-manager/templates/_helpers.tpl
new file mode 100644
index 00000000..a0f94dc0
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cpu-manager.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cpu-manager.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cpu-manager.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "cpu-manager.labels" -}}
+helm.sh/chart: {{ include "cpu-manager.chart" . }}
+{{ include "cpu-manager.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "cpu-manager.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "cpu-manager.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "cpu-manager.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default ( printf "%s-serviceaccount" (include "cpu-manager.fullname" .) ) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/clusterrole.yaml b/kud/deployment_infra/helm/cpu-manager/templates/clusterrole.yaml
new file mode 100644
index 00000000..003a5b61
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/clusterrole.yaml
@@ -0,0 +1,59 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-custom-resource-definition-controller
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+rules:
+- apiGroups: ["intel.com"]
+ resources: ["*"]
+ verbs: ["*"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions", "customresourcedefinitions.extensions"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-daemonset-controller
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+rules:
+- apiGroups: ["extensions", "apps"]
+ resources: ["daemonsets", "daemonsets.extensions", "daemonsets.apps"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-version-controller
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+rules:
+ - nonResourceURLs: ["*"]
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-webhook-installer
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+rules:
+- apiGroups: ["", "apps", "extensions", "admissionregistration.k8s.io"]
+ resources: ["secrets", "configmaps", "deployments", "services", "mutatingwebhookconfigurations"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-node-lister
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["*"]
+{{- end }}
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/clusterrolebinding.yaml b/kud/deployment_infra/helm/cpu-manager/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..2d08c820
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/clusterrolebinding.yaml
@@ -0,0 +1,91 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-daemonset
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cpu-manager.fullname" . }}-daemonset-controller
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-node
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:node
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-crd
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cpu-manager.fullname" . }}-custom-resource-definition-controller
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-version
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cpu-manager.fullname" . }}-version-controller
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-webhook-installer
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cpu-manager.fullname" . }}-webhook-installer
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-node-lister
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cpu-manager.fullname" . }}-node-lister
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+{{- end }}
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/daemonset.yaml b/kud/deployment_infra/helm/cpu-manager/templates/daemonset.yaml
new file mode 100644
index 00000000..8b545133
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/daemonset.yaml
@@ -0,0 +1,162 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-reconcile-nodereport-ds
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ app: cmk-reconcile-nodereport-ds
+spec:
+ selector:
+ matchLabels:
+ {{- include "cpu-manager.selectorLabels" . | nindent 6 }}
+ app: cmk-reconcile-nodereport-ds
+ template:
+ metadata:
+ labels:
+ {{- include "cpu-manager.selectorLabels" . | nindent 8 }}
+ app: cmk-reconcile-nodereport-ds
+ annotations:
+ {{- toYaml .Values.annotations | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ serviceAccountName: {{ include "cpu-manager.serviceAccountName" . }}
+ initContainers:
+ - args:
+ - "/cmk/cmk.py init --conf-dir=/etc/cmk --num-exclusive-cores=$NUM_EXCLUSIVE_CORES --num-shared-cores=$NUM_SHARED_CORES"
+ command:
+ - "/bin/bash"
+ - "-c"
+ env:
+ - name: CMK_PROC_FS
+ value: '/proc'
+ - name: NUM_EXCLUSIVE_CORES
+ value: {{ .Values.exclusiveNumCores | quote }}
+ - name: NUM_SHARED_CORES
+ value: {{ .Values.sharedNumCores | quote }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-init-pod
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: "/etc/cmk"
+ name: cmk-conf-dir
+ - args:
+ - "/cmk/cmk.py discover --conf-dir=/etc/cmk {{ if .Values.untaintRequired }}--no-taint{{ end }}"
+ command:
+ - "/bin/bash"
+ - "-c"
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-discover-pod
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: "/etc/cmk"
+ name: cmk-conf-dir
+ - args:
+ - "/cmk/cmk.py install --install-dir=/opt/bin"
+ command:
+ - "/bin/bash"
+ - "-c"
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-install-pod
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: "/opt/bin"
+ name: cmk-install-dir
+ containers:
+ - args:
+ - "/cmk/cmk.py isolate --pool=infra /cmk/cmk.py -- reconcile --interval=$CMK_RECONCILE_SLEEP_TIME --publish"
+ command:
+ - "/bin/bash"
+ - "-c"
+ env:
+ - name: CMK_RECONCILE_SLEEP_TIME
+ value: {{ .Values.reconcileSleepTime | quote }}
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-reconcile
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: "/host/proc"
+ name: host-proc
+ readOnly: true
+ - mountPath: "/etc/cmk"
+ name: cmk-conf-dir
+ - args:
+ - "/cmk/cmk.py isolate --pool=infra /cmk/cmk.py -- node-report --interval=$CMK_NODE_REPORT_SLEEP_TIME --publish"
+ command:
+ - "/bin/bash"
+ - "-c"
+ env:
+ - name: CMK_NODE_REPORT_SLEEP_TIME
+ value: {{ .Values.nodeReportSleepTime | quote }}
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-nodereport
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: "/host/proc"
+ name: host-proc
+ readOnly: true
+ - mountPath: "/etc/cmk"
+ name: cmk-conf-dir
+ volumes:
+ - hostPath:
+ path: "/proc"
+ name: host-proc
+ - hostPath:
+ path: {{ .Values.configDir | quote }}
+ name: cmk-conf-dir
+ - hostPath:
+ path: {{ .Values.installDir | quote }}
+ name: cmk-install-dir
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/serviceaccount.yaml b/kud/deployment_infra/helm/cpu-manager/templates/serviceaccount.yaml
new file mode 100644
index 00000000..8f0b98e0
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/webhook.yaml b/kud/deployment_infra/helm/cpu-manager/templates/webhook.yaml
new file mode 100644
index 00000000..62e9fdbf
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/webhook.yaml
@@ -0,0 +1,156 @@
+{{- if .Values.webhook.enabled -}}
+{{- $altNames := list "cmk-webhook-service" ( printf "cmk-webhook-service.%s" .Release.Namespace ) ( printf "cmk-webhook-service.%s.svc" .Release.Namespace ) -}}
+{{- $cert := genSelfSignedCert ( printf "cmk-webhook-service.%s.svc" .Release.Namespace ) nil $altNames 36500 -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-webhook-certs
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+data:
+ cert.pem: {{ $cert.Cert | b64enc }}
+ key.pem: {{ $cert.Key | b64enc }}
+type: Opaque
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-webhook-configmap
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+data:
+ server.yaml: |
+ server:
+ binding-address: "0.0.0.0"
+ port: {{ .Values.webhook.service.port }}
+ cert: "/etc/ssl/cert.pem"
+ key: "/etc/ssl/key.pem"
+ mutations: "/etc/webhook/mutations.yaml"
+ mutations.yaml: |
+ mutations:
+ perPod:
+ metadata:
+ annotations:
+ cmk.intel.com/resources-injected: "true"
+ spec:
+ serviceAccount: {{ include "cpu-manager.serviceAccountName" . }}
+ tolerations:
+ - operator: Exists
+ volumes:
+ - name: cmk-host-proc
+ hostPath:
+ path: "/proc"
+ - name: cmk-config-dir
+ hostPath:
+ path: {{ .Values.configDir | quote }}
+ - name: cmk-install-dir
+ hostPath:
+ path: {{ .Values.installDir | quote }}
+ perContainer:
+ env:
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ volumeMounts:
+ - name: cmk-host-proc
+ mountPath: /host/proc
+ readOnly: true
+ - name: cmk-config-dir
+ mountPath: /etc/cmk
+ - name: cmk-install-dir
+ mountPath: /opt/bin
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmk-webhook-service
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ app: cmk-webhook-app
+spec:
+ ports:
+ - port: {{ .Values.webhook.service.port }}
+ targetPort: 443
+ selector:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ app: cmk-webhook-app
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ app: cmk-webhook-app
+ name: {{ include "cpu-manager.fullname" . }}-webhook-deployment
+spec:
+ replicas: {{ .Values.webhook.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "cpu-manager.selectorLabels" . | nindent 6 }}
+ app: cmk-webhook-app
+ template:
+ metadata:
+ labels:
+ {{- include "cpu-manager.selectorLabels" . | nindent 8 }}
+ app: cmk-webhook-app
+ annotations:
+ {{- toYaml .Values.webhook.annotations | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ tolerations:
+ - operator: "Exists"
+ containers:
+ - args:
+ - "/cmk/cmk.py webhook --conf-file /etc/webhook/server.yaml"
+ command:
+ - "/bin/bash"
+ - "-c"
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-webhook
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ volumeMounts:
+ - mountPath: /etc/webhook
+ name: cmk-webhook-configmap
+ - mountPath: /etc/ssl
+ name: cmk-webhook-certs
+ readOnly: True
+ volumes:
+ - name: cmk-webhook-configmap
+ configMap:
+ name: {{ include "cpu-manager.fullname" . }}-webhook-configmap
+ - name: cmk-webhook-certs
+ secret:
+ secretName: {{ include "cpu-manager.fullname" . }}-webhook-certs
+---
+apiVersion: admissionregistration.k8s.io/v1beta1
+kind: MutatingWebhookConfiguration
+metadata:
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ app: cmk-webhook-app
+ name: {{ include "cpu-manager.fullname" . }}-webhook-config
+webhooks:
+- clientConfig:
+ caBundle: {{ $cert.Cert | b64enc }}
+ service:
+ name: cmk-webhook-service
+ namespace: {{ $.Release.Namespace }}
+ path: /mutate
+ failurePolicy: Ignore
+ name: cmk.intel.com
+ rules:
+ - apiGroups:
+ - ""
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - pods
+{{- end }}
diff --git a/kud/deployment_infra/helm/cpu-manager/values.yaml b/kud/deployment_infra/helm/cpu-manager/values.yaml
new file mode 100644
index 00000000..29783441
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/values.yaml
@@ -0,0 +1,63 @@
+# sharedNumCores is the number of CPU cores to be assigned to the "shared" pool on each of the nodes
+sharedNumCores: 1
+# exclusiveNumCores is the number of CPU cores to be assigned to the "exclusive" pool on each of the nodes
+exclusiveNumCores: 2
+# untaintRequired removes the cmk=true:NoSchedule taint from each of the nodes
+untaintRequired: true
+# configDir is the CMK config dir in the host file system
+configDir: "/etc/cmk"
+# installDir is the CMK installation dir in the host file system
+installDir: "/opt/bin"
+# reconcileSleepTime is the sleep interval in seconds between consecutive CMK reconcile runs
+reconcileSleepTime: 60
+# nodeReportSleepTime is the sleep interval in seconds between consecutive CMK node report runs
+nodeReportSleepTime: 60
+
+image:
+ repository: integratedcloudnative/cmk
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+imagePullSecrets: []
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+nameOverride: ""
+fullnameOverride: ""
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+
+resources: {}
+
+annotations: {}
+
+affinity: {}
+
+tolerations: {}
+
+webhook:
+ # webhook.enabled runs the CMK mutating admission webhook server
+ enabled: true
+
+ service:
+ port: 443
+
+ replicaCount: 1
+
+ annotations: {}
+
+## RBAC parameteres
+## https://kubernetes.io/docs/reference/access-authn-authz/rbac/
+##
+rbac:
+ create: true
diff --git a/kud/deployment_infra/helm/ovn4nfv/.helmignore b/kud/deployment_infra/helm/ovn4nfv/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/ovn4nfv/Chart.yaml b/kud/deployment_infra/helm/ovn4nfv/Chart.yaml
new file mode 100644
index 00000000..5a4e69c1
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/Chart.yaml
@@ -0,0 +1,24 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: v2.2.0
+description: |
+ OVN4NFV K8s Plugin - Network controller
+home: https://github.com/opnfv/ovn4nfv-k8s-plugin
+name: ovn4nfv
+sources:
+ - https://github.com/opnfv/ovn4nfv-k8s-plugin
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/ovn4nfv/crds/network.yaml b/kud/deployment_infra/helm/ovn4nfv/crds/network.yaml
new file mode 100644
index 00000000..793261e0
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/crds/network.yaml
@@ -0,0 +1,117 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networks.k8s.plugin.opnfv.org
+spec:
+ group: k8s.plugin.opnfv.org
+ names:
+ kind: Network
+ listKind: NetworkList
+ plural: networks
+ singular: network
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ cniType:
+ description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
+ Important: Run "operator-sdk generate k8s" to regenerate code after
+ modifying this file Add custom validation using kubebuilder tags:
+ https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html'
+ type: string
+ dns:
+ properties:
+ domain:
+ type: string
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ type: string
+ type: array
+ search:
+ items:
+ type: string
+ type: array
+ type: object
+ ipv4Subnets:
+ items:
+ properties:
+ excludeIps:
+ type: string
+ gateway:
+ type: string
+ name:
+ type: string
+ subnet:
+ type: string
+ required:
+ - name
+ - subnet
+ type: object
+ type: array
+ ipv6Subnets:
+ items:
+ properties:
+ excludeIps:
+ type: string
+ gateway:
+ type: string
+ name:
+ type: string
+ subnet:
+ type: string
+ required:
+ - name
+ - subnet
+ type: object
+ type: array
+ routes:
+ items:
+ properties:
+ dst:
+ type: string
+ gw:
+ type: string
+ required:
+ - dst
+ type: object
+ type: array
+ required:
+ - cniType
+ - ipv4Subnets
+ type: object
+ status:
+ properties:
+ state:
+ description: 'INSERT ADDITIONAL STATUS FIELD - define observed state
+ of cluster Important: Run "operator-sdk generate k8s" to regenerate
+ code after modifying this file Add custom validation using kubebuilder
+ tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html'
+ type: string
+ required:
+ - state
+ type: object
+ version: v1alpha1
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
diff --git a/kud/deployment_infra/helm/ovn4nfv/crds/networkchaining.yaml b/kud/deployment_infra/helm/ovn4nfv/crds/networkchaining.yaml
new file mode 100644
index 00000000..77257c3b
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/crds/networkchaining.yaml
@@ -0,0 +1,89 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networkchainings.k8s.plugin.opnfv.org
+spec:
+ group: k8s.plugin.opnfv.org
+ names:
+ kind: NetworkChaining
+ listKind: NetworkChainingList
+ plural: networkchainings
+ singular: networkchaining
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ description: NetworkChaining is the Schema for the networkchainings API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NetworkChainingSpec defines the desired state of NetworkChaining
+ properties:
+ chainType:
+ type: string
+ routingSpec:
+ properties:
+ leftNetwork:
+ items:
+ properties:
+ gatewayIp:
+ type: string
+ networkName:
+ type: string
+ required:
+ - gatewayIp
+ - networkName
+ type: object
+ type: array
+ namespace:
+ type: string
+ networkChain:
+ type: string
+ rightNetwork:
+ items:
+ properties:
+ gatewayIp:
+ type: string
+ networkName:
+ type: string
+ required:
+ - gatewayIp
+ - networkName
+ type: object
+ type: array
+ required:
+ - leftNetwork
+ - namespace
+ - networkChain
+ - rightNetwork
+ type: object
+ required:
+ - chainType
+ - routingSpec
+ type: object
+ status:
+ description: NetworkChainingStatus defines the observed state of NetworkChaining
+ properties:
+ state:
+ type: string
+ required:
+ - state
+ type: object
+ type: object
+ version: v1alpha1
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
diff --git a/kud/deployment_infra/helm/ovn4nfv/crds/providernetwork.yaml b/kud/deployment_infra/helm/ovn4nfv/crds/providernetwork.yaml
new file mode 100644
index 00000000..fa058ff2
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/crds/providernetwork.yaml
@@ -0,0 +1,157 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: providernetworks.k8s.plugin.opnfv.org
+spec:
+ group: k8s.plugin.opnfv.org
+ names:
+ kind: ProviderNetwork
+ listKind: ProviderNetworkList
+ plural: providernetworks
+ singular: providernetwork
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ description: ProviderNetwork is the Schema for the providernetworks API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ProviderNetworkSpec defines the desired state of ProviderNetwork
+ properties:
+ cniType:
+ description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
+ Important: Run "operator-sdk generate k8s" to regenerate code after
+ modifying this file Add custom validation using kubebuilder tags:
+ https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html'
+ type: string
+ direct:
+ properties:
+ directNodeSelector:
+ type: string
+ nodeLabelList:
+ items:
+ type: string
+ type: array
+ providerInterfaceName:
+ type: string
+ required:
+ - directNodeSelector
+ - providerInterfaceName
+ type: object
+ dns:
+ properties:
+ domain:
+ type: string
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ type: string
+ type: array
+ search:
+ items:
+ type: string
+ type: array
+ type: object
+ ipv4Subnets:
+ items:
+ properties:
+ excludeIps:
+ type: string
+ gateway:
+ type: string
+ name:
+ type: string
+ subnet:
+ type: string
+ required:
+ - name
+ - subnet
+ type: object
+ type: array
+ ipv6Subnets:
+ items:
+ properties:
+ excludeIps:
+ type: string
+ gateway:
+ type: string
+ name:
+ type: string
+ subnet:
+ type: string
+ required:
+ - name
+ - subnet
+ type: object
+ type: array
+ providerNetType:
+ type: string
+ routes:
+ items:
+ properties:
+ dst:
+ type: string
+ gw:
+ type: string
+ required:
+ - dst
+ type: object
+ type: array
+ vlan:
+ properties:
+ logicalInterfaceName:
+ type: string
+ nodeLabelList:
+ items:
+ type: string
+ type: array
+ providerInterfaceName:
+ type: string
+ vlanId:
+ type: string
+ vlanNodeSelector:
+ type: string
+ required:
+ - providerInterfaceName
+ - vlanId
+ - vlanNodeSelector
+ type: object
+ required:
+ - cniType
+ - ipv4Subnets
+ - providerNetType
+ type: object
+ status:
+ description: ProviderNetworkStatus defines the observed state of ProviderNetwork
+ properties:
+ state:
+ description: 'INSERT ADDITIONAL STATUS FIELD - define observed state
+ of cluster Important: Run "operator-sdk generate k8s" to regenerate
+ code after modifying this file Add custom validation using kubebuilder
+ tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html'
+ type: string
+ required:
+ - state
+ type: object
+ type: object
+ version: v1alpha1
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/_helpers.tpl b/kud/deployment_infra/helm/ovn4nfv/templates/_helpers.tpl
new file mode 100644
index 00000000..4abd970e
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/_helpers.tpl
@@ -0,0 +1,62 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "ovn4nfv.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "ovn4nfv.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "ovn4nfv.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "ovn4nfv.labels" -}}
+helm.sh/chart: {{ include "ovn4nfv.chart" . }}
+{{ include "ovn4nfv.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "ovn4nfv.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "ovn4nfv.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "ovn4nfv.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "ovn4nfv.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/clusterrole.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/clusterrole.yaml
new file mode 100644
index 00000000..f10b111f
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/clusterrole.yaml
@@ -0,0 +1,54 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - pods/status
+ - services
+ - endpoints
+ - persistentvolumeclaims
+ - events
+ - configmaps
+ - secrets
+ - nodes
+ verbs:
+ - '*'
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ - daemonsets
+ - replicasets
+ - statefulsets
+ verbs:
+ - '*'
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - servicemonitors
+ verbs:
+ - get
+ - create
+- apiGroups:
+ - apps
+ resourceNames:
+ - nfn-operator
+ resources:
+ - deployments/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - k8s.plugin.opnfv.org
+ resources:
+ - '*'
+ - providernetworks
+ verbs:
+ - '*'
+{{- end }}
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/clusterrolebinding.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..0891458a
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/clusterrolebinding.yaml
@@ -0,0 +1,16 @@
+{{- if .Values.rbac.create }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "ovn4nfv.fullname" . }}
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: Group
+ name: system:serviceaccounts
+{{- end }}
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/configmap.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/configmap.yaml
new file mode 100644
index 00000000..7e1beba1
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/configmap.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.nfnOperator.config.name }}
+data:
+ {{ .Values.nfnOperator.config.data | nindent 2 }}
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ .Values.cni.config.name }}
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+data:
+ ovn4nfv_k8s.conf: | {{ .Values.cni.config.ovn4nfv_k8s | nindent 4 }}
+ 00-network.conf: | {{ .Values.cni.config.network | nindent 4 }}
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/daemonset.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/daemonset.yaml
new file mode 100644
index 00000000..bc8285f4
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/daemonset.yaml
@@ -0,0 +1,168 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}-cni
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: cni
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 6 }}
+ role: cni
+ template:
+ metadata:
+ labels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 8 }}
+ role: cni
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ {{- with .Values.cni.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.cni.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "ovn4nfv.serviceAccountName" . }}
+ containers:
+ - name: ovn4nfv
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command: ["/usr/local/bin/entrypoint", "cni"]
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/bash", "-c", "rm /host/etc/cni/net.d/00-network.conf"]
+ resources:
+ {{- toYaml .Values.cni.resources | nindent 10 }}
+ securityContext:
+ {{- toYaml .Values.cni.securityContext | nindent 10 }}
+ volumeMounts:
+ - name: cni
+ mountPath: /host/etc/cni/net.d
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: cniconf
+ mountPath: /host/etc/openvswitch
+ - name: ovn4nfv-cfg
+ mountPath: /tmp/ovn4nfv-conf
+ - name: ovn4nfv-cni-net-conf
+ mountPath: /tmp/ovn4nfv-cni
+ volumes:
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: cniconf
+ hostPath:
+ path: /etc/openvswitch
+ - name: ovn4nfv-cfg
+ configMap:
+ name: {{ .Values.cni.config.name }}
+ items:
+ - key: ovn4nfv_k8s.conf
+ path: ovn4nfv_k8s.conf
+ - name: ovn4nfv-cni-net-conf
+ configMap:
+ name: {{ .Values.cni.config.name }}
+ items:
+ - key: 00-network.conf
+ path: 00-network.conf
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}-nfn-agent
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: nfn-agent
+spec:
+ selector:
+ matchLabels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 6 }}
+ role: nfn-agent
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 8 }}
+ role: nfn-agent
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ hostPID: true
+ {{- with .Values.nfnAgent.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nfnAgent.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "ovn4nfv.serviceAccountName" . }}
+ containers:
+ - name: nfn-agent
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command: ["/usr/local/bin/entrypoint", "agent"]
+ resources:
+ {{- toYaml .Values.nfnAgent.resources | nindent 10 }}
+ env:
+ - name: NFN_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ securityContext:
+ {{- toYaml .Values.nfnAgent.securityContext | nindent 10 }}
+ volumeMounts:
+ - mountPath: /var/run/dbus/
+ name: host-var-run-dbus
+ readOnly: true
+ - mountPath: /run/openvswitch
+ name: host-run-ovs
+ - mountPath: /var/run/openvswitch
+ name: host-var-run-ovs
+ - mountPath: /var/run
+ name: host-var-run
+ - mountPath: /host/proc
+ name: host-proc
+ - mountPath: /host/sys
+ name: host-sys
+ - mountPath: /var/run/ovn4nfv-k8s-plugin
+ name: host-var-cniserver-socket-dir
+ volumes:
+ - name: host-run-ovs
+ hostPath:
+ path: /run/openvswitch
+ - name: host-var-run-ovs
+ hostPath:
+ path: /var/run/openvswitch
+ - name: host-var-run-dbus
+ hostPath:
+ path: /var/run/dbus
+ - name: host-var-cniserver-socket-dir
+ hostPath:
+ path: /var/run/ovn4nfv-k8s-plugin
+ - name: host-var-run
+ hostPath:
+ path: /var/run
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-sys
+ hostPath:
+ path: /sys
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/deployment.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/deployment.yaml
new file mode 100644
index 00000000..7613fef5
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}-nfn-operator
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: nfn-operator
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 6 }}
+ role: nfn-operator
+ template:
+ metadata:
+ labels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 8 }}
+ role: nfn-operator
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ {{- with .Values.nfnOperator.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nfnOperator.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nfnOperator.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "ovn4nfv.serviceAccountName" . }}
+ containers:
+ - name: nfn-operator
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command: ["/usr/local/bin/entrypoint", "operator"]
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.nfnOperator.config.name }}
+ ports:
+ - containerPort: 50000
+ protocol: TCP
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: OPERATOR_NAME
+ value: "nfn-operator"
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/ovn/daemonset.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/daemonset.yaml
new file mode 100644
index 00000000..2b71a9dd
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/daemonset.yaml
@@ -0,0 +1,102 @@
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}-ovn-controller
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: ovn-controller
+spec:
+ selector:
+ matchLabels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 6 }}
+ role: ovn-controller
+ updateStrategy:
+ type: OnDelete
+ template:
+ metadata:
+ labels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 8 }}
+ role: ovn-controller
+ spec:
+ {{- with .Values.ovn.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ hostPID: true
+ {{- with .Values.ovnController.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.ovnController.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ {{- with .Values.ovnController.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ priorityClassName: system-cluster-critical
+ containers:
+ - name: ovn-controller
+ image: "{{ .Values.ovn.image.repository }}:{{ .Values.ovn.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.ovn.image.pullPolicy }}
+ command: ["ovn4nfv-k8s", "start_ovn_controller"]
+ resources:
+ {{- toYaml .Values.ovnController.resources | nindent 12 }}
+ securityContext:
+ {{- toYaml .Values.ovnController.securityContext | nindent 12 }}
+ env:
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: host-modules
+ readOnly: true
+ - mountPath: /var/run/openvswitch
+ name: host-run-ovs
+ - mountPath: /var/run/ovn
+ name: host-run-ovn
+ - mountPath: /sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /etc/openvswitch
+ name: host-config-openvswitch
+ - mountPath: /var/log/openvswitch
+ name: host-log-ovs
+ - mountPath: /var/log/ovn
+ name: host-log-ovn
+ readinessProbe:
+ exec:
+ command: ["ovn4nfv-k8s", "check_ovn_controller"]
+ periodSeconds: 5
+ livenessProbe:
+ exec:
+ command: ["ovn4nfv-k8s", "check_ovn_controller"]
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ failureThreshold: 5
+ volumes:
+ - name: host-modules
+ hostPath:
+ path: /lib/modules
+ - name: host-run-ovs
+ hostPath:
+ path: /run/openvswitch
+ - name: host-run-ovn
+ hostPath:
+ path: /run/ovn
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-config-openvswitch
+ hostPath:
+ path: /etc/origin/openvswitch
+ - name: host-log-ovs
+ hostPath:
+ path: /var/log/openvswitch
+ - name: host-log-ovn
+ hostPath:
+ path: /var/log/ovn
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/ovn/deployment.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/deployment.yaml
new file mode 100644
index 00000000..a9dd4288
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/deployment.yaml
@@ -0,0 +1,107 @@
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}-ovn-control-plane
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: ovn-control-plane
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxSurge: 0%
+ maxUnavailable: 100%
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 6 }}
+ role: ovn-control-plane
+ template:
+ metadata:
+ labels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 8 }}
+ role: ovn-control-plane
+ spec:
+ {{- with .Values.ovn.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ {{- with .Values.ovnControlPlane.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.ovnControlPlane.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ {{- with .Values.ovnControlPlane.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ priorityClassName: system-cluster-critical
+ containers:
+ - name: ovn-control-plane
+ image: "{{ .Values.ovn.image.repository }}:{{ .Values.ovn.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.ovn.image.pullPolicy }}
+ command: ["ovn4nfv-k8s", "start_ovn_control_plane"]
+ resources:
+ {{- toYaml .Values.ovnControlPlane.resources | nindent 12 }}
+ securityContext:
+ {{- toYaml .Values.ovnControlPlane.securityContext | nindent 12 }}
+ env:
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - mountPath: /var/run/openvswitch
+ name: host-run-ovs
+ - mountPath: /var/run/ovn
+ name: host-run-ovn
+ - mountPath: /sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /etc/openvswitch
+ name: host-config-openvswitch
+ - mountPath: /var/log/openvswitch
+ name: host-log-ovs
+ - mountPath: /var/log/ovn
+ name: host-log-ovn
+ readinessProbe:
+ exec:
+ command: ["ovn4nfv-k8s", "check_ovn_control_plane"]
+ periodSeconds: 3
+ livenessProbe:
+ exec:
+ command: ["ovn4nfv-k8s", "check_ovn_control_plane"]
+ initialDelaySeconds: 30
+ periodSeconds: 7
+ failureThreshold: 5
+ volumes:
+ - name: host-run-ovs
+ hostPath:
+ path: /run/openvswitch
+ - name: host-run-ovn
+ hostPath:
+ path: /run/ovn
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-config-openvswitch
+ hostPath:
+ path: /etc/origin/openvswitch
+ - name: host-log-ovs
+ hostPath:
+ path: /var/log/openvswitch
+ - name: host-log-ovn
+ hostPath:
+ path: /var/log/ovn
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/ovn/service.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/service.yaml
new file mode 100644
index 00000000..c6d96e49
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/service.yaml
@@ -0,0 +1,37 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: ovn-nb-tcp
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: ovn-control-plane
+spec:
+ ports:
+ - name: ovn-nb-tcp
+ protocol: TCP
+ port: {{ .Values.ovnControlPlane.nbService.port }}
+ targetPort: 6641
+ type: {{ .Values.ovnControlPlane.nbService.type }}
+ selector:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 4 }}
+ role: ovn-control-plane
+ sessionAffinity: None
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: ovn-sb-tcp
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: ovn-control-plane
+spec:
+ ports:
+ - name: ovn-sb-tcp
+ protocol: TCP
+ port: {{ .Values.ovnControlPlane.sbService.port }}
+ targetPort: 6642
+ type: {{ .Values.ovnControlPlane.sbService.type }}
+ selector:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 4 }}
+ role: ovn-control-plane
+ sessionAffinity: None
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/service.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/service.yaml
new file mode 100644
index 00000000..a9e5747d
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: nfn-operator
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: nfn-operator
+spec:
+ type: {{ .Values.nfnOperator.service.type }}
+ ports:
+ - port: {{ .Values.nfnOperator.service.port }}
+ protocol: TCP
+ targetPort: 50000
+ selector:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 4 }}
+ role: nfn-operator
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/serviceaccount.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/serviceaccount.yaml
new file mode 100644
index 00000000..853e2ca1
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "ovn4nfv.serviceAccountName" . }}
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/ovn4nfv/values.yaml b/kud/deployment_infra/helm/ovn4nfv/values.yaml
new file mode 100644
index 00000000..660c4eba
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/values.yaml
@@ -0,0 +1,177 @@
+image:
+ repository: docker.io/integratedcloudnative/ovn4nfv-k8s-plugin
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+imagePullSecrets: []
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+nameOverride: ""
+fullnameOverride: ""
+
+cni:
+ securityContext:
+ privileged: true
+
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ limits:
+ cpu: 100m
+ memory: 50Mi
+
+ config:
+ name: ovn4nfv-cni-config
+ ovn4nfv_k8s: |
+ [logging]
+ loglevel=5
+ logfile=/var/log/openvswitch/ovn4k8s.log
+
+ [cni]
+ conf-dir=/etc/cni/net.d
+ plugin=ovn4nfvk8s-cni
+
+ [kubernetes]
+ kubeconfig=/etc/cni/net.d/ovn4nfv-k8s.d/ovn4nfv-k8s.kubeconfig
+ network: |
+ {
+ "name": "ovn4nfv-k8s-plugin",
+ "type": "ovn4nfvk8s-cni",
+ "cniVersion": "0.3.1"
+ }
+
+nfnAgent:
+ securityContext:
+ runAsUser: 0
+ capabilities:
+ add: ["NET_ADMIN", "SYS_ADMIN", "SYS_PTRACE"]
+ privileged: true
+
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ limits:
+ cpu: 100m
+ memory: 50Mi
+
+nfnOperator:
+ nodeSelector: {}
+
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: role
+ operator: In
+ values:
+ - ovn-control-plane
+ topologyKey: "kubernetes.io/hostname"
+
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+
+ config:
+ name: ovn-controller-network
+ data: |
+ OVN_SUBNET: 10.154.142.0/18
+ OVN_GATEWAYIP: 10.154.142.1/18
+
+ service:
+ type: NodePort
+ port: 50000
+
+ovn:
+ image:
+ repository: docker.io/integratedcloudnative/ovn-images
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+ imagePullSecrets: []
+
+ovnControlPlane:
+ securityContext:
+ capabilities:
+ add: ["SYS_NICE"]
+
+ nodeSelector:
+ beta.kubernetes.io/os: "linux"
+ node-role.kubernetes.io/master: ""
+
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ role: ovn-control-plane
+ topologyKey: kubernetes.io/hostname
+
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+
+ resources:
+ requests:
+ cpu: 500m
+ memory: 300Mi
+
+ nbService:
+ type: ClusterIP
+ port: 6641
+
+ sbService:
+ type: ClusterIP
+ port: 6642
+
+ovnController:
+ securityContext:
+ runAsUser: 0
+ privileged: true
+
+ nodeSelector:
+ beta.kubernetes.io/os: "linux"
+
+ affinity: {}
+
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+
+ resources:
+ requests:
+ cpu: 200m
+ memory: 300Mi
+ limits:
+ cpu: 1000m
+ memory: 800Mi
+
+## RBAC parameteres
+## https://kubernetes.io/docs/reference/access-authn-authz/rbac/
+##
+rbac:
+ create: true
diff --git a/kud/deployment_infra/helm/qat-device-plugin/.helmignore b/kud/deployment_infra/helm/qat-device-plugin/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/qat-device-plugin/Chart.yaml b/kud/deployment_infra/helm/qat-device-plugin/Chart.yaml
new file mode 100644
index 00000000..1697abb0
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/Chart.yaml
@@ -0,0 +1,25 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: 0.19.0-kerneldrv
+description: |
+ A device plugin that provides support for Intel QuickAssist
+ Technology (QAT) devices under Kubernetes.
+home: https://github.com/intel/intel-device-plugins-for-kubernetes
+name: qat-device-plugin
+sources:
+ - https://github.com/intel/intel-device-plugins-for-kubernetes
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/qat-device-plugin/templates/_helpers.tpl b/kud/deployment_infra/helm/qat-device-plugin/templates/_helpers.tpl
new file mode 100644
index 00000000..77889d5d
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/templates/_helpers.tpl
@@ -0,0 +1,52 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "qat-device-plugin.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "qat-device-plugin.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "qat-device-plugin.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "qat-device-plugin.labels" -}}
+helm.sh/chart: {{ include "qat-device-plugin.chart" . }}
+{{ include "qat-device-plugin.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "qat-device-plugin.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "qat-device-plugin.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/qat-device-plugin/templates/config.yaml b/kud/deployment_infra/helm/qat-device-plugin/templates/config.yaml
new file mode 100644
index 00000000..24ffaa4a
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/templates/config.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.config.name }}
+ labels:
+ {{- include "qat-device-plugin.labels" . | nindent 4 }}
+data:
+ VERBOSITY: {{ .Values.config.logLevel | quote }}
diff --git a/kud/deployment_infra/helm/qat-device-plugin/templates/daemonset.yaml b/kud/deployment_infra/helm/qat-device-plugin/templates/daemonset.yaml
new file mode 100644
index 00000000..c94ff330
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/templates/daemonset.yaml
@@ -0,0 +1,60 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "qat-device-plugin.fullname" . }}
+ labels:
+ {{- include "qat-device-plugin.labels" . | nindent 4 }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "qat-device-plugin.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "qat-device-plugin.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ containers:
+ - name: intel-qat-kernel-plugin
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ env:
+ - name: VERBOSITY
+ valueFrom:
+ configMapKeyRef:
+ name: {{ .Values.config.name }}
+ key: VERBOSITY
+ args: ["-mode", "kernel", "-v", "$(VERBOSITY)"]
+ volumeMounts:
+ - name: devfs
+ mountPath: /dev
+ - name: etcdir
+ mountPath: /etc
+ readOnly: true
+ - name: kubeletsockets
+ mountPath: /var/lib/kubelet/device-plugins
+ volumes:
+ - name: etcdir
+ hostPath:
+ path: /etc
+ - name: kubeletsockets
+ hostPath:
+ path: /var/lib/kubelet/device-plugins
+ - name: devfs
+ hostPath:
+ path: /dev
diff --git a/kud/deployment_infra/helm/qat-device-plugin/templates/drivers/daemonset.yaml b/kud/deployment_infra/helm/qat-device-plugin/templates/drivers/daemonset.yaml
new file mode 100644
index 00000000..7ba04047
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/templates/drivers/daemonset.yaml
@@ -0,0 +1,70 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "qat-device-plugin.fullname" . }}-qat-driver-installer
+ labels:
+ {{- include "qat-device-plugin.labels" . | nindent 4 }}
+ role: qat-driver-installer
+spec:
+ selector:
+ matchLabels:
+ {{- include "qat-device-plugin.selectorLabels" . | nindent 6 }}
+ role: qat-driver-installer
+ template:
+ metadata:
+ labels:
+ {{- include "qat-device-plugin.selectorLabels" . | nindent 8 }}
+ role: qat-driver-installer
+ spec:
+ hostPID: true
+ {{- with .Values.qatDriver.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.qatDriver.podSecurityContext | nindent 8 }}
+ initContainers:
+ - image: "{{ .Values.qatDriver.image.repository }}:{{ .Values.qatDriver.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.qatDriver.image.pullPolicy }}
+ name: qat-driver-installer
+ securityContext:
+ {{- toYaml .Values.qatDriver.securityContext | nindent 12 }}
+ resources:
+ {{- toYaml .Values.qatDriver.resources | nindent 12 }}
+ volumeMounts:
+ - name: qat-install-dir
+ mountPath: "/usr/local/qat"
+ - name: root-dir
+ mountPath: "/root"
+ - name: lib-modules-dir
+ mountPath: "/root/lib/modules"
+ - name: run-systemd-dir
+ mountPath: "/root/run/systemd/system"
+ containers:
+ - image: "gcr.io/google-containers/pause:3.2"
+ name: pause
+ volumes:
+ - name: qat-install-dir
+ hostPath:
+ path: "/opt/qat"
+ - name: root-dir
+ hostPath:
+ path: "/"
+ - name: lib-modules-dir
+ hostPath:
+ path: "/lib/modules"
+ - name: run-systemd-dir
+ hostPath:
+ path: "/run/systemd/system"
+ {{- with .Values.qatDriver.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.qatDriver.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.qatDriver.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/qat-device-plugin/values.yaml b/kud/deployment_infra/helm/qat-device-plugin/values.yaml
new file mode 100644
index 00000000..459c36b1
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/values.yaml
@@ -0,0 +1,49 @@
+config:
+ name: intel-qat-plugin-config
+
+ # logLevel sets the plugin's log level.
+ logLevel: 4
+
+imagePullSecrets: []
+
+image:
+ repository: integratedcloudnative/intel-qat-plugin
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+
+nodeSelector:
+ feature.node.kubernetes.io/iommu-enabled: "true"
+ feature.node.kubernetes.io/custom-qat: "true"
+
+securityContext:
+ readOnlyRootFilesystem: true
+ privileged: true
+
+resources: {}
+
+affinity: {}
+
+tolerations: {}
+
+qatDriver:
+ image:
+ repository: integratedcloudnative/qat-driver-installer
+ pullPolicy: IfNotPresent
+ tag: latest
+ imagePullSecrets: []
+
+ podSecurityContext: {}
+ # fsGroup: 2000
+
+ securityContext:
+ privileged: true
+
+ nodeSelector:
+ feature.node.kubernetes.io/iommu-enabled: "true"
+ feature.node.kubernetes.io/custom-qat: "true"
+
+ resources: {}
+
+ affinity: {}
+
+ tolerations: {}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/.helmignore b/kud/deployment_infra/helm/sriov-network-operator/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/sriov-network-operator/Chart.yaml b/kud/deployment_infra/helm/sriov-network-operator/Chart.yaml
new file mode 100644
index 00000000..ba056e07
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/Chart.yaml
@@ -0,0 +1,27 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: 4.8.0
+description: |
+ The Sriov Network Operator is designed to help the user to provision
+ and configure SR-IOV CNI plugin and Device plugin in the Kubernetes
+ cluster.
+name: sriov-network-operator
+sources:
+ - https://github.com/k8snetworkplumbingwg/sriov-network-operator
+ - https://downloadcenter.intel.com/download/24693/Intel-Network-Adapter-Linux-Virtual-Function-Driver-for-Intel-Ethernet-Controller-700-and-E810-Series
+home: https://github.com/k8snetworkplumbingwg/sriov-network-operator
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/sriov-network-operator/crds/sriovibnetwork.yaml b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovibnetwork.yaml
new file mode 100644
index 00000000..21e9e48b
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovibnetwork.yaml
@@ -0,0 +1,73 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ creationTimestamp: null
+ name: sriovibnetworks.sriovnetwork.openshift.io
+spec:
+ group: sriovnetwork.openshift.io
+ names:
+ kind: SriovIBNetwork
+ listKind: SriovIBNetworkList
+ plural: sriovibnetworks
+ singular: sriovibnetwork
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: SriovIBNetwork is the Schema for the sriovibnetworks API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SriovIBNetworkSpec defines the desired state of SriovIBNetwork
+ properties:
+ capabilities:
+ description: 'Capabilities to be configured for this network. Capabilities
+ supported: (infinibandGUID), e.g. ''{"infinibandGUID": true}'''
+ type: string
+ ipam:
+ description: IPAM configuration to be used for this network.
+ type: string
+ linkState:
+ description: VF link state (enable|disable|auto)
+ enum:
+ - auto
+ - enable
+ - disable
+ type: string
+ networkNamespace:
+ description: Namespace of the NetworkAttachmentDefinition custom resource
+ type: string
+ resourceName:
+ description: SRIOV Network device plugin endpoint resource name
+ type: string
+ required:
+ - resourceName
+ type: object
+ status:
+ description: SriovIBNetworkStatus defines the observed state of SriovIBNetwork
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetwork.yaml b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetwork.yaml
new file mode 100644
index 00000000..d69e5608
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetwork.yaml
@@ -0,0 +1,109 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ creationTimestamp: null
+ name: sriovnetworks.sriovnetwork.openshift.io
+spec:
+ group: sriovnetwork.openshift.io
+ names:
+ kind: SriovNetwork
+ listKind: SriovNetworkList
+ plural: sriovnetworks
+ singular: sriovnetwork
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: SriovNetwork is the Schema for the sriovnetworks API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SriovNetworkSpec defines the desired state of SriovNetwork
+ properties:
+ capabilities:
+ description: 'Capabilities to be configured for this network. Capabilities
+ supported: (mac|ips), e.g. ''{"mac": true}'''
+ type: string
+ ipam:
+ description: IPAM configuration to be used for this network.
+ type: string
+ linkState:
+ description: VF link state (enable|disable|auto)
+ enum:
+ - auto
+ - enable
+ - disable
+ type: string
+ maxTxRate:
+ description: Maximum tx rate, in Mbps, for the VF. Defaults to 0 (no
+ rate limiting)
+ minimum: 0
+ type: integer
+ metaPlugins:
+ description: MetaPluginsConfig configuration to be used in order to
+ chain metaplugins to the sriov interface returned by the operator.
+ type: string
+ minTxRate:
+ description: Minimum tx rate, in Mbps, for the VF. Defaults to 0 (no
+ rate limiting). min_tx_rate should be <= max_tx_rate.
+ minimum: 0
+ type: integer
+ networkNamespace:
+ description: Namespace of the NetworkAttachmentDefinition custom resource
+ type: string
+ resourceName:
+ description: SRIOV Network device plugin endpoint resource name
+ type: string
+ spoofChk:
+ description: VF spoof check, (on|off)
+ enum:
+ - "on"
+ - "off"
+ type: string
+ trust:
+ description: VF trust mode (on|off)
+ enum:
+ - "on"
+ - "off"
+ type: string
+ vlan:
+ description: VLAN ID to assign for the VF. Defaults to 0.
+ maximum: 4096
+ minimum: 0
+ type: integer
+ vlanQoS:
+ description: VLAN QoS ID to assign for the VF. Defaults to 0.
+ maximum: 7
+ minimum: 0
+ type: integer
+ required:
+ - resourceName
+ type: object
+ status:
+ description: SriovNetworkStatus defines the observed state of SriovNetwork
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodepolicy.yaml b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodepolicy.yaml
new file mode 100644
index 00000000..315ea262
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodepolicy.yaml
@@ -0,0 +1,131 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ creationTimestamp: null
+ name: sriovnetworknodepolicies.sriovnetwork.openshift.io
+spec:
+ group: sriovnetwork.openshift.io
+ names:
+ kind: SriovNetworkNodePolicy
+ listKind: SriovNetworkNodePolicyList
+ plural: sriovnetworknodepolicies
+ singular: sriovnetworknodepolicy
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: SriovNetworkNodePolicy is the Schema for the sriovnetworknodepolicies
+ API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SriovNetworkNodePolicySpec defines the desired state of SriovNetworkNodePolicy
+ properties:
+ deviceType:
+ description: The driver type for configured VFs. Allowed value "netdevice",
+ "vfio-pci". Defaults to netdevice.
+ enum:
+ - netdevice
+ - vfio-pci
+ type: string
+ eSwitchMode:
+ description: NIC Device Mode. Allowed value "legacy","switchdev".
+ enum:
+ - legacy
+ - switchdev
+ type: string
+ isRdma:
+ description: RDMA mode. Defaults to false.
+ type: boolean
+ linkType:
+ description: NIC Link Type. Allowed value "eth", "ETH", "ib", and
+ "IB".
+ enum:
+ - eth
+ - ETH
+ - ib
+ - IB
+ type: string
+ mtu:
+ description: MTU of VF
+ minimum: 1
+ type: integer
+ nicSelector:
+ description: NicSelector selects the NICs to be configured
+ properties:
+ deviceID:
+ description: The device hex code of SR-IoV device. Allowed value
+ "0d58", "1572", "158b", "1013", "1015", "1017", "101b".
+ type: string
+ netFilter:
+ description: Infrastructure Networking selection filter. Allowed
+ value "openstack/NetworkID:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ type: string
+ pfNames:
+ description: Name of SR-IoV PF.
+ items:
+ type: string
+ type: array
+ rootDevices:
+ description: PCI address of SR-IoV PF.
+ items:
+ type: string
+ type: array
+ vendor:
+ description: The vendor hex code of SR-IoV device. Allowed value
+ "8086", "15b3".
+ type: string
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector selects the nodes to be configured
+ type: object
+ numVfs:
+ description: Number of VFs for each PF
+ minimum: 0
+ type: integer
+ priority:
+ description: Priority of the policy, higher priority policies can
+ override lower ones.
+ maximum: 99
+ minimum: 0
+ type: integer
+ resourceName:
+ description: SRIOV Network device plugin endpoint resource name
+ type: string
+ required:
+ - nicSelector
+ - nodeSelector
+ - numVfs
+ - resourceName
+ type: object
+ status:
+ description: SriovNetworkNodePolicyStatus defines the observed state of
+ SriovNetworkNodePolicy
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodestate.yaml b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodestate.yaml
new file mode 100644
index 00000000..bae83794
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodestate.yaml
@@ -0,0 +1,153 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ creationTimestamp: null
+ name: sriovnetworknodestates.sriovnetwork.openshift.io
+spec:
+ group: sriovnetwork.openshift.io
+ names:
+ kind: SriovNetworkNodeState
+ listKind: SriovNetworkNodeStateList
+ plural: sriovnetworknodestates
+ singular: sriovnetworknodestate
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: SriovNetworkNodeState is the Schema for the sriovnetworknodestates
+ API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SriovNetworkNodeStateSpec defines the desired state of SriovNetworkNodeState
+ properties:
+ dpConfigVersion:
+ type: string
+ interfaces:
+ items:
+ properties:
+ eSwitchMode:
+ type: string
+ linkType:
+ type: string
+ mtu:
+ type: integer
+ name:
+ type: string
+ numVfs:
+ type: integer
+ pciAddress:
+ type: string
+ vfGroups:
+ items:
+ properties:
+ deviceType:
+ type: string
+ policyName:
+ type: string
+ resourceName:
+ type: string
+ vfRange:
+ type: string
+ type: object
+ type: array
+ required:
+ - pciAddress
+ type: object
+ type: array
+ type: object
+ status:
+ description: SriovNetworkNodeStateStatus defines the observed state of
+ SriovNetworkNodeState
+ properties:
+ interfaces:
+ items:
+ properties:
+ Vfs:
+ items:
+ properties:
+ Vlan:
+ type: integer
+ assigned:
+ type: string
+ deviceID:
+ type: string
+ driver:
+ type: string
+ mac:
+ type: string
+ mtu:
+ type: integer
+ name:
+ type: string
+ pciAddress:
+ type: string
+ vendor:
+ type: string
+ vfID:
+ type: integer
+ required:
+ - pciAddress
+ - vfID
+ type: object
+ type: array
+ deviceID:
+ type: string
+ driver:
+ type: string
+ eSwitchMode:
+ type: string
+ linkSpeed:
+ type: string
+ linkType:
+ type: string
+ mac:
+ type: string
+ mtu:
+ type: integer
+ name:
+ type: string
+ netFilter:
+ type: string
+ numVfs:
+ type: integer
+ pciAddress:
+ type: string
+ totalvfs:
+ type: integer
+ vendor:
+ type: string
+ required:
+ - pciAddress
+ type: object
+ type: array
+ lastSyncError:
+ type: string
+ syncStatus:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/kud/deployment_infra/helm/sriov-network-operator/crds/sriovoperatorconfig.yaml b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovoperatorconfig.yaml
new file mode 100644
index 00000000..b3e360c8
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovoperatorconfig.yaml
@@ -0,0 +1,89 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ creationTimestamp: null
+ name: sriovoperatorconfigs.sriovnetwork.openshift.io
+spec:
+ group: sriovnetwork.openshift.io
+ names:
+ kind: SriovOperatorConfig
+ listKind: SriovOperatorConfigList
+ plural: sriovoperatorconfigs
+ singular: sriovoperatorconfig
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: SriovOperatorConfig is the Schema for the sriovoperatorconfigs
+ API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SriovOperatorConfigSpec defines the desired state of SriovOperatorConfig
+ properties:
+ configDaemonNodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector selects the nodes to be configured
+ type: object
+ disableDrain:
+ description: Flag to disable nodes drain during debugging
+ type: boolean
+ enableInjector:
+ description: Flag to control whether the network resource injector
+ webhook shall be deployed
+ type: boolean
+ enableOperatorWebhook:
+ description: Flag to control whether the operator admission controller
+ webhook shall be deployed
+ type: boolean
+ enableOvsOffload:
+ description: Flag to enable OVS hardware offload. Set to 'true' to
+ provision switchdev-configuration.service and enable OpenvSwitch
+ hw-offload on nodes.
+ type: boolean
+ logLevel:
+ description: Flag to control the log verbose level of the operator.
+ Set to '0' to show only the basic logs. And set to '2' to show all
+ the available logs.
+ maximum: 2
+ minimum: 0
+ type: integer
+ type: object
+ status:
+ description: SriovOperatorConfigStatus defines the observed state of SriovOperatorConfig
+ properties:
+ injector:
+ description: Show the runtime status of the network resource injector
+ webhook
+ type: string
+ operatorWebhook:
+ description: Show the runtime status of the operator admission controller
+ webhook
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/_helpers.tpl b/kud/deployment_infra/helm/sriov-network-operator/templates/_helpers.tpl
new file mode 100644
index 00000000..2d2bd47f
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "sriov-network-operator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "sriov-network-operator.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "sriov-network-operator.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "sriov-network-operator.labels" -}}
+helm.sh/chart: {{ include "sriov-network-operator.chart" . }}
+{{ include "sriov-network-operator.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "sriov-network-operator.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "sriov-network-operator.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "sriov-network-operator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "sriov-network-operator.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrole.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrole.yaml
new file mode 100644
index 00000000..1a37667e
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrole.yaml
@@ -0,0 +1,54 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch", "patch", "update"]
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["*"]
+- apiGroups: ["apps"]
+ resources: ["daemonsets"]
+ verbs: ["get"]
+- apiGroups: [""]
+ resources: [namespaces, serviceaccounts]
+ verbs: ["*"]
+- apiGroups: ["k8s.cni.cncf.io"]
+ resources: ["network-attachment-definitions"]
+ verbs: ["*"]
+- apiGroups: ["rbac.authorization.k8s.io"]
+ resources: [clusterroles, clusterrolebindings]
+ verbs: ["*"]
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
+ verbs: ["*"]
+- apiGroups: ["sriovnetwork.openshift.io"]
+ resources: ["*"]
+ verbs: ["*"]
+- apiGroups: ["machineconfiguration.openshift.io"]
+ resources: ["*"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: sriov-network-config-daemon
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch", "patch", "update"]
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["*"]
+- apiGroups: ["apps"]
+ resources: ["daemonsets"]
+ verbs: ["get"]
+- apiGroups: [""]
+ resources: ["pods/eviction"]
+ verbs: ["create"]
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrolebinding.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..acf15ee5
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrolebinding.yaml
@@ -0,0 +1,30 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "sriov-network-operator.fullname" . }}
+subjects:
+- kind: ServiceAccount
+ name: {{ include "sriov-network-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: sriov-network-config-daemon
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: sriov-network-config-daemon
+subjects:
+- kind: ServiceAccount
+ name: sriov-network-config-daemon
+ namespace: {{ .Release.Namespace }}
+
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/drivers/daemonset.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/drivers/daemonset.yaml
new file mode 100644
index 00000000..b86ee383
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/drivers/daemonset.yaml
@@ -0,0 +1,70 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}-iavf-driver-installer
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+ role: iavf-driver-installer
+spec:
+ selector:
+ matchLabels:
+ {{- include "sriov-network-operator.selectorLabels" . | nindent 6 }}
+ role: iavf-driver-installer
+ template:
+ metadata:
+ labels:
+ {{- include "sriov-network-operator.selectorLabels" . | nindent 8 }}
+ role: iavf-driver-installer
+ spec:
+ hostPID: true
+ {{- with .Values.iavfDriver.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.iavfDriver.podSecurityContext | nindent 8 }}
+ initContainers:
+ - image: "{{ .Values.iavfDriver.image.repository }}:{{ .Values.iavfDriver.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.iavfDriver.image.pullPolicy }}
+ name: iavf-driver-installer
+ securityContext:
+ {{- toYaml .Values.iavfDriver.securityContext | nindent 12 }}
+ resources:
+ {{- toYaml .Values.iavfDriver.resources | nindent 12 }}
+ volumeMounts:
+ - name: iavf-install-dir
+ mountPath: "/usr/local/iavf"
+ - name: root-dir
+ mountPath: "/root"
+ - name: lib-modules-dir
+ mountPath: "/root/lib/modules"
+ - name: run-systemd-dir
+ mountPath: "/root/run/systemd/system"
+ containers:
+ - image: "gcr.io/google-containers/pause:3.2"
+ name: pause
+ volumes:
+ - name: iavf-install-dir
+ hostPath:
+ path: "/opt/iavf"
+ - name: root-dir
+ hostPath:
+ path: "/"
+ - name: lib-modules-dir
+ hostPath:
+ path: "/lib/modules"
+ - name: run-systemd-dir
+ hostPath:
+ path: "/run/systemd/system"
+ {{- with .Values.iavfDriver.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.iavfDriver.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.iavfDriver.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/operator.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/operator.yaml
new file mode 100644
index 00000000..679ed269
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/operator.yaml
@@ -0,0 +1,89 @@
+apiVersion: sriovnetwork.openshift.io/v1
+kind: SriovOperatorConfig
+metadata:
+ name: default
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+spec:
+ {{- with .Values.configDaemon.nodeSelector }}
+ configDaemonNodeSelector:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ enableInjector: {{ .Values.enableInjector }}
+ enableOperatorWebhook: {{ .Values.enableOperatorWebhook }}
+ logLevel: {{ .Values.logLevel }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "sriov-network-operator.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "sriov-network-operator.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "sriov-network-operator.serviceAccountName" . }}
+ containers:
+ - name: sriov-network-operator
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - sriov-network-operator
+ env:
+ - name: WATCH_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SRIOV_CNI_IMAGE
+ value: "{{ .Values.cni.image.repository }}:{{ .Values.cni.image.tag | default .Chart.AppVersion }}"
+ - name: SRIOV_INFINIBAND_CNI_IMAGE
+ value: "{{ .Values.infinibandCni.image.repository }}:{{ .Values.infinibandCni.image.tag | default .Chart.AppVersion }}"
+ - name: SRIOV_DEVICE_PLUGIN_IMAGE
+ value: "{{ .Values.devicePlugin.image.repository }}:{{ .Values.devicePlugin.image.tag | default .Chart.AppVersion }}"
+ - name: NETWORK_RESOURCES_INJECTOR_IMAGE
+ value: "{{ .Values.resourcesInjector.image.repository }}:{{ .Values.resourcesInjector.image.tag | default .Chart.AppVersion }}"
+ - name: OPERATOR_NAME
+ value: "sriov-network-operator"
+ - name: SRIOV_NETWORK_CONFIG_DAEMON_IMAGE
+ value: "{{ .Values.configDaemon.image.repository }}:{{ .Values.configDaemon.image.tag | default .Chart.AppVersion }}"
+ - name: SRIOV_NETWORK_WEBHOOK_IMAGE
+ value: "{{ .Values.webhook.image.repository }}:{{ .Values.webhook.image.tag | default .Chart.AppVersion }}"
+ - name: RESOURCE_PREFIX
+ value: "{{ .Values.resourcePrefix }}"
+ - name: ENABLE_ADMISSION_CONTROLLER
+ value: "false"
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: RELEASE_VERSION
+ value: "4.3.0"
+ - name: SRIOV_CNI_BIN_PATH
+ value: "/opt/cni/bin"
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/role.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/role.yaml
new file mode 100644
index 00000000..96fae762
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/role.yaml
@@ -0,0 +1,107 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ - endpoints
+ - persistentvolumeclaims
+ - events
+ - configmaps
+ - secrets
+ verbs:
+ - '*'
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ - daemonsets
+ - replicasets
+ - statefulsets
+ verbs:
+ - '*'
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - servicemonitors
+ verbs:
+ - get
+ - create
+- apiGroups:
+ - apps
+ resourceNames:
+ - sriov-network-operator
+ resources:
+ - deployments/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - serviceaccounts
+ - roles
+ - rolebindings
+ verbs:
+ - '*'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: sriov-network-config-daemon
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - '*'
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ verbs:
+ - '*'
+- apiGroups:
+ - sriovnetwork.openshift.io
+ resources:
+ - '*'
+ - sriovnetworknodestates
+ verbs:
+ - '*'
+- apiGroups:
+ - security.openshift.io
+ resourceNames:
+ - privileged
+ resources:
+ - securitycontextconstraints
+ verbs:
+ - use
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: operator-webhook-sa
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/rolebinding.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/rolebinding.yaml
new file mode 100644
index 00000000..1f8498af
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/rolebinding.yaml
@@ -0,0 +1,44 @@
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "sriov-network-operator.fullname" . }}
+subjects:
+- kind: ServiceAccount
+ name: {{ include "sriov-network-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: sriov-network-config-daemon
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: sriov-network-config-daemon
+subjects:
+- kind: ServiceAccount
+ name: sriov-network-config-daemon
+ namespace: {{ .Release.Namespace }}
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: operator-webhook-sa
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: operator-webhook-sa
+subjects:
+- kind: ServiceAccount
+ name: operator-webhook-sa
+ namespace: {{ .Release.Namespace }}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/serviceaccount.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/serviceaccount.yaml
new file mode 100644
index 00000000..eb0ec10c
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/serviceaccount.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "sriov-network-operator.serviceAccountName" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: sriov-network-config-daemon
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/values.yaml b/kud/deployment_infra/helm/sriov-network-operator/values.yaml
new file mode 100644
index 00000000..59f257e4
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/values.yaml
@@ -0,0 +1,100 @@
+# enableInjector controls whether the network resource injector
+# webhook shall be deployed
+enableInjector: false
+
+# enableOperatorWebhook controls whether the operator admission
+# controller webhook shall be deployed
+enableOperatorWebhook: false
+
+# logLevel controls the log verbose level of the operator. Set to '0'
+# to show only the basic logs. And set to '2' to show all the
+# available logs.
+logLevel: 2
+
+# resourcePrefix is the device plugin resource prefix.
+resourcePrefix: "intel.com"
+
+image:
+ repository: integratedcloudnative/origin-sriov-network-operator
+ # This should be set to 'IfNotPresent' for released version
+ pullPolicy: IfNotPresent
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+ # tag
+imagePullSecrets: []
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+nameOverride: ""
+fullnameOverride: ""
+
+nodeSelector:
+ node-role.kubernetes.io/master: ""
+
+affinity: {}
+
+tolerations:
+- effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+
+cni:
+ image:
+ repository: integratedcloudnative/origin-sriov-cni
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+infinibandCni:
+ image:
+ repository: integratedcloudnative/origin-sriov-infiniband-cni
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+devicePlugin:
+ image:
+ repository: integratedcloudnative/origin-sriov-network-device-plugin
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+resourcesInjector:
+ image:
+ repository: integratedcloudnative/origin-sriov-dp-admission-controller
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+configDaemon:
+ image:
+ repository: integratedcloudnative/sriov-network-config-daemon
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+ nodeSelector:
+ beta.kubernetes.io/os: "linux"
+
+webhook:
+ image:
+ repository: integratedcloudnative/origin-sriov-network-webhook
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+iavfDriver:
+ image:
+ repository: integratedcloudnative/iavf-driver-installer
+ pullPolicy: IfNotPresent
+ tag: latest
+ imagePullSecrets: []
+
+ nodeSelector:
+ feature.node.kubernetes.io/custom-iavf: "true"
+
+ podSecurityContext: {}
+ # fsGroup: 2000
+
+ securityContext:
+ privileged: true
+
+ resources: {}
+
+ affinity: {}
+
+ tolerations: {}
diff --git a/kud/deployment_infra/helm/sriov-network/.helmignore b/kud/deployment_infra/helm/sriov-network/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/sriov-network/Chart.yaml b/kud/deployment_infra/helm/sriov-network/Chart.yaml
new file mode 100644
index 00000000..8cf3a1d5
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/Chart.yaml
@@ -0,0 +1,24 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: 4.8.0
+description: |
+ SR-IOV CNI plugin and Device plugin configuration.
+name: sriov-network
+sources:
+ - https://github.com/k8snetworkplumbingwg/sriov-network-operator
+home: https://github.com/k8snetworkplumbingwg/sriov-network-operator
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/sriov-network/templates/_helpers.tpl b/kud/deployment_infra/helm/sriov-network/templates/_helpers.tpl
new file mode 100644
index 00000000..08baf040
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/templates/_helpers.tpl
@@ -0,0 +1,34 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "sriov-network.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "sriov-network.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "sriov-network.labels" -}}
+helm.sh/chart: {{ include "sriov-network.chart" . }}
+{{ include "sriov-network.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "sriov-network.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "sriov-network.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/sriov-network/templates/sriovnetwork.yaml b/kud/deployment_infra/helm/sriov-network/templates/sriovnetwork.yaml
new file mode 100644
index 00000000..550f00dc
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/templates/sriovnetwork.yaml
@@ -0,0 +1,40 @@
+{{- range $network := .Values.networks }}
+---
+apiVersion: sriovnetwork.openshift.io/v1
+kind: SriovNetwork
+metadata:
+ name: {{ $network.networkName }}
+ labels:
+ {{- include "sriov-network.labels" $ | nindent 4 }}
+spec:
+ {{- with $network.capabilities }}
+ capabilities: | {{ . | nindent 4 }}
+ {{- end }}
+ ipam: | {{ $network.ipam | nindent 4 }}
+ {{- if $network.linkState }}
+ linkState: {{ $network.linkState }}
+ {{- end }}
+ {{- if $network.maxTxRate }}
+ maxTxRate: {{ $network.maxTxRate }}
+ {{- end }}
+ {{- with $network.metaPlugins }}
+ metaPlugins: | {{ . | nindent 4 }}
+ {{- end }}
+ {{- if $network.minTxRate }}
+ minTxRate: {{ $network.minTxRate }}
+ {{- end }}
+ networkNamespace: {{ $network.networkNamespace }}
+ resourceName: {{ $network.resourceName }}
+ {{- if $network.spoofChk }}
+ spoofChk: {{ $network.spoofChk }}
+ {{- end }}
+ {{- if $network.trust }}
+ trust: {{ $network.trust }}
+ {{- end }}
+ {{- if $network.vlan }}
+ vlan: {{ $network.vlan }}
+ {{- end }}
+ {{- if $network.vlanQoS }}
+ vlanQoS: {{ $network.vlanQoS }}
+ {{- end }}
+{{- end }}
diff --git a/kud/deployment_infra/helm/sriov-network/templates/sriovnetworknodepolicy.yaml b/kud/deployment_infra/helm/sriov-network/templates/sriovnetworknodepolicy.yaml
new file mode 100644
index 00000000..382df562
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/templates/sriovnetworknodepolicy.yaml
@@ -0,0 +1,52 @@
+{{- range $policy := .Values.policies }}
+---
+apiVersion: sriovnetwork.openshift.io/v1
+kind: SriovNetworkNodePolicy
+metadata:
+ name: {{ $policy.policyName }}
+ labels:
+ {{- include "sriov-network.labels" $ | nindent 4 }}
+spec:
+ {{- if $policy.deviceType }}
+ deviceType: {{ $policy.deviceType }}
+ {{- end }}
+ {{- if $policy.eSwitchMode }}
+ eSwitchMode: {{ $policy.eSwitchMode }}
+ {{- end }}
+ nicSelector:
+ {{- if $policy.nicSelector.deviceID }}
+ deviceID: {{ $policy.nicSelector.deviceID | quote }}
+ {{- end }}
+ {{- if $policy.nicSelector.netFilter }}
+ netFilter: {{ $policy.nicSelector.netFilter | quote }}
+ {{- end }}
+ {{- with $policy.nicSelector.pfNames }}
+ pfNames:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with $policy.nicSelector.rootDevices }}
+ rootDevices:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- if $policy.nicSelector.vendor }}
+ vendor: {{ $policy.nicSelector.vendor | quote }}
+ {{- end }}
+ {{- if $policy.isRdma }}
+ isRdma: {{ $policy.isRdma }}
+ {{- end }}
+ {{- if $policy.linkType }}
+ linkType: {{ $policy.linkType }}
+ {{- end }}
+ {{- if $policy.mtu }}
+ mtu: {{ $policy.mtu }}
+ {{- end }}
+ {{- with $policy.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ numVfs: {{ $policy.numVfs }}
+ {{- if $policy.priority }}
+ priority: {{ $policy.priority }}
+ {{- end }}
+ resourceName: {{ $policy.resourceName }}
+{{- end }} \ No newline at end of file
diff --git a/kud/deployment_infra/helm/sriov-network/values.yaml b/kud/deployment_infra/helm/sriov-network/values.yaml
new file mode 100644
index 00000000..d9a38222
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/values.yaml
@@ -0,0 +1,144 @@
+nameOverride: ""
+
+policies:
+- # policyName is the name of the policy
+ policyName: "policy-xl710"
+
+ # nicSelector selects the NICs to be configured. At least one of
+ # vendor, deviceId, pfNames, or rootDevices must be deined.
+ nicSelector:
+ # deviceID is the device hex code of SR-IOV device.
+ deviceID: "1583"
+
+ # netFilter is the infrastructure networking selection
+ # filter. Allowed values are
+ # "openstack/NetworkID:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ #netFilter: ""
+
+ # pfNames is a list of the SR-IOV PF names.
+ #pfNames: []
+
+ # rootDevices is a list of the PCI addresses of SR-IOV PFs.
+ #rootDevices: []
+
+ # vendor is the vendor hex code of SR-IOV device. Allowed values are
+ # "8086", "15b3".
+ vendor: "8086"
+
+ # nodeSelector selects the nodes to be configured.
+ nodeSelector:
+ feature.node.kubernetes.io/network-sriov.capable: "true"
+ feature.node.kubernetes.io/pci-0200_8086_1583.present: "true"
+
+ # numVfs is the Number of VFs for each PF
+ numVfs: 8
+
+ # resourceName is the SR-IOV Network device plugin endpoint.
+ resourceName: "intel_sriov_nic"
+
+- policyName: "policy-82599es"
+ nicSelector:
+ deviceID: "10fb"
+ vendor: "8086"
+ nodeSelector:
+ feature.node.kubernetes.io/network-sriov.capable: "true"
+ feature.node.kubernetes.io/pci-0200_8086_10fb.present: "true"
+ numVfs: 8
+ resourceName: "intel_sriov_nic"
+
+- policyName: "policy-i350"
+ nicSelector:
+ deviceID: "1521"
+ vendor: "8086"
+ nodeSelector:
+ feature.node.kubernetes.io/network-sriov.capable: "true"
+ feature.node.kubernetes.io/pci-0200_8086_1521.present: "true"
+ numVfs: 2
+ resourceName: "intel_sriov_nic"
+
+networks:
+- # networkName is the name of both the SriovNetwork and the created
+ # NetworkAttachmentDefinition custom resource.
+ networkName: "sriov-intel"
+
+ # networkNamespace of the NetworkAttachmentDefinition custom resource.
+ networkNamespace: "default"
+
+ # ipam configuration to be used for the network.
+ ipam: |
+ {
+ "type": "host-local",
+ "subnet": "10.56.206.0/24",
+ "routes": [{
+ "dst": "0.0.0.0/0"
+ }],
+ "gateway": "10.56.206.1"
+ }
+
+ #
+ # Optional values are listed below.
+ #
+
+ # deviceType is the driver type for configured VFs. Allowed values are
+ # "netdevice" and "vfio-pci".
+ #deviceType: "netdevice"
+
+ # eSwitchMode NIC Device Mode. Allowed values are "legacy" and
+ # "switchdev".
+ #eSwitchMode: "switchdev"
+
+ # isRdma defaults to false.
+ #isRdma: true
+
+ # linkType is the NIC link type. Allowed values are "eth", "ETH",
+ # "ib", and "IB".
+ #linkType: "eth"
+
+ # mtu of VF
+ #mtu: 9000
+
+ # priority of the policy, higher priority policies can override lower
+ # ones.
+ #priority: 99
+
+ # capabilities to be configured for this network. Capabilities
+ # supported: (mac|ips), e.g. '{"mac": true}'
+ #capabilities: |
+ # {
+ # "mac": true
+ # }
+
+ # linkState of VF (enable|disable|auto).
+ #linkState: "enable"
+
+ # maxTxRate, in Mbps, for the VF. Defaults to 0 (no rate limiting).
+ #maxTxRate: 0
+
+ # metaPlugins configuration to be used in order to chain metaplugins
+ # to the SR-IOV interface returned by the operator.
+ #metaPlugins: |
+ # {
+ # "type": "tuning",
+ # "sysctl": {
+ # "net.core.somaxconn": "500"
+ # }
+ # }
+
+ # minTxRate, in Mbps, for the VF. Defaults to 0 (no rate
+ # limiting). min_tx_rate should be <= max_tx_rate.
+ #minTxRate: 0
+
+ # spoofChk for VF, (on|off)
+ #spoofChk: "off"
+
+ # trust mode of VF (on|off)
+ #trust: "off"
+
+ # vlan ID to assign for the VF. Defaults to 0.
+ #vlan: 0
+
+ # vlanQoS ID to assign for the VF. Defaults to 0.
+ #vlanQoS: 0
+
+ # resourceName is the SR-IOV Network device plugin endpoint.
+ resourceName: "intel_sriov_nic"
diff --git a/kud/deployment_infra/installers/Dockerfile.iavf-driver-installer b/kud/deployment_infra/installers/Dockerfile.iavf-driver-installer
new file mode 100644
index 00000000..9bbfd372
--- /dev/null
+++ b/kud/deployment_infra/installers/Dockerfile.iavf-driver-installer
@@ -0,0 +1,20 @@
+FROM ubuntu:18.04
+
+RUN apt-get update && \
+ apt-get install -y \
+ bc \
+ build-essential \
+ curl \
+ kmod \
+ libelf-dev \
+ libssl-dev \
+ libudev-dev \
+ pciutils \
+ pkg-config \
+ && \
+ rm -rf /var/lib/apt/lists/*
+
+COPY _common.sh /
+COPY entrypoint-iavf-driver-installer.sh /entrypoint.sh
+
+CMD /entrypoint.sh
diff --git a/kud/deployment_infra/installers/Dockerfile.qat-driver-installer b/kud/deployment_infra/installers/Dockerfile.qat-driver-installer
new file mode 100644
index 00000000..7d885a59
--- /dev/null
+++ b/kud/deployment_infra/installers/Dockerfile.qat-driver-installer
@@ -0,0 +1,21 @@
+FROM ubuntu:18.04
+
+RUN apt-get update && \
+ apt-get install -y \
+ bc \
+ build-essential \
+ curl \
+ kmod \
+ libelf-dev \
+ libssl-dev \
+ libudev-dev \
+ pciutils \
+ pkg-config \
+ && \
+ rm -rf /var/lib/apt/lists/*
+
+COPY _common.sh /
+COPY _qat-driver-installer.sh /
+COPY entrypoint-qat-driver-installer.sh /entrypoint.sh
+
+CMD /entrypoint.sh
diff --git a/kud/deployment_infra/installers/Makefile b/kud/deployment_infra/installers/Makefile
new file mode 100644
index 00000000..99aadbc0
--- /dev/null
+++ b/kud/deployment_infra/installers/Makefile
@@ -0,0 +1,10 @@
+REGISTRY?=integratedcloudnative
+TAG?=latest
+IMAGES=iavf-driver-installer qat-driver-installer
+
+.PHONY: all $(IMAGES)
+
+all: $(IMAGES)
+
+$(IMAGES):
+ docker build -t $(REGISTRY)/$@:$(TAG) -f Dockerfile.$@ .
diff --git a/kud/deployment_infra/installers/_common.sh b/kud/deployment_infra/installers/_common.sh
new file mode 100644
index 00000000..87badfc9
--- /dev/null
+++ b/kud/deployment_infra/installers/_common.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+set -o errexit
+set -o pipefail
+set -u
+
+ROOT_MOUNT_DIR="${ROOT_MOUNT_DIR:-/root}"
+ROOT_OS_RELEASE="${ROOT_OS_RELEASE:-$ROOT_MOUNT_DIR/etc/os-release}"
+KERNEL_SRC_DIR=$(readlink -f "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/build")
+[[ "${KERNEL_SRC_DIR}" == "${ROOT_MOUNT_DIR}/*" ]] || KERNEL_SRC_DIR="${ROOT_MOUNT_DIR}${KERNEL_SRC_DIR}"
+KERNEL_MOD_SIGN_CMD="${KERNEL_MOD_SIGN_CMD:-}"
+
+RETCODE_SUCCESS=0
+RETCODE_ERROR=1
+
+_log() {
+ local -r prefix="$1"
+ shift
+ echo "[${prefix}$(date -u "+%Y-%m-%d %H:%M:%S %Z")] ""$*" >&2
+}
+
+info() {
+ _log "INFO " "$*"
+}
+
+warn() {
+ _log "WARNING " "$*"
+}
+
+error() {
+ _log "ERROR " "$*"
+}
+
+load_etc_os_release() {
+ if [[ ! -f "${ROOT_OS_RELEASE}" ]]; then
+ error "File ${ROOT_OS_RELEASE} not found, /etc/os-release from host must be mounted"
+ exit ${RETCODE_ERROR}
+ fi
+ . "${ROOT_OS_RELEASE}"
+ info "Running on ${NAME} kernel version $(uname -r)"
+}
diff --git a/kud/deployment_infra/installers/_qat-driver-installer.sh b/kud/deployment_infra/installers/_qat-driver-installer.sh
new file mode 100644
index 00000000..5ecc2f5a
--- /dev/null
+++ b/kud/deployment_infra/installers/_qat-driver-installer.sh
@@ -0,0 +1,514 @@
+#!/bin/bash
+#
+# The functions below are captured from the Makefile targets. They
+# cannot be run in a container as-is due to absolute paths, so they
+# are recreated here.
+#
+# Note also that the portions of qat-driver-install that deal with
+# rc.d are removed: they are intended to be handled by the deployed
+# DaemonSet. The rest is contained in _qat_service_start.
+#
+# The checks for loaded modules are moved to _qat_check_started.
+
+BIN_LIST="qat_c3xxx.bin qat_c3xxx_mmp.bin qat_c62x.bin \
+ qat_c62x_mmp.bin qat_mmp.bin qat_d15xx.bin qat_d15xx_mmp.bin \
+ qat_200xx.bin qat_200xx_mmp.bin qat_895xcc.bin qat_895xcc_mmp.bin"
+
+numDh895xDevicesP=$(lspci -n | grep -c "8086:0435") || true
+numDh895xDevicesV=$(lspci -n | grep -c "8086:0443") || true
+numC62xDevicesP=$(lspci -n | grep -c "8086:37c8") || true
+numC62xDevicesV=$(lspci -n | grep -c "8086:37c9") || true
+numD15xxDevicesP=$(lspci -n | grep -c "8086:6f54") || true
+numD15xxDevicesV=$(lspci -n | grep -c "8086:6f55") || true
+numC3xxxDevicesP=$(lspci -n | grep -c "8086:19e2") || true
+numC3xxxDevicesV=$(lspci -n | grep -c "8086:19e3") || true
+num200xxDevicesP=$(lspci -n | grep -c "8086:18ee") || true
+num200xxDevicesV=$(lspci -n | grep -c "8086:18ef") || true
+
+_qat_driver_install() {
+ info "Installing drivers"
+ if [[ -z "${KERNEL_MOD_SIGN_CMD}" ]]; then
+ info "No driver signing required"
+ INSTALL_MOD_PATH=${ROOT_MOUNT_DIR} make KDIR="${KERNEL_SRC_DIR}" -C "${QAT_INSTALL_DIR_CONTAINER}/quickassist/qat" mod_sign_cmd=":" modules_install
+ else
+ info "Driver signing is required"
+ INSTALL_MOD_PATH=${ROOT_MOUNT_DIR} make KDIR="${KERNEL_SRC_DIR}" -C "${QAT_INSTALL_DIR_CONTAINER}/quickassist/qat" mod_sign_cmd="${KERNEL_MOD_SIGN_CMD}" modules_install
+ fi
+}
+
+_adf_ctl_install() {
+ info "Installing adf_ctl"
+ install -D -m 750 "${QAT_INSTALL_DIR_CONTAINER}/quickassist/utilities/adf_ctl/adf_ctl" "${ROOT_MOUNT_DIR}/usr/local/bin/adf_ctl"
+}
+
+_adf_ctl_uninstall() {
+ info "Uninstalling adf_ctl"
+ # rm ${ROOT_MOUNT_DIR}/usr/local/bin/adf_ctl
+ return 0
+}
+
+_rename_ssl_conf_section() {
+ info "Renaming SSL section in conf files"
+ restore_nullglob=$(shopt -p | grep nullglob)
+ shopt -s nullglob
+ for file in ${ROOT_MOUNT_DIR}/etc/dh895xcc_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/dh895xcc_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/c6xx_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/c6xx_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/d15xx_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/d15xx_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/c3xxx_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/c3xxx_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/200xx_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/200xx_dev${dev}.conf"
+ done
+
+ for file in ${ROOT_MOUNT_DIR}/etc/dh895xccvf_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/dh895xccvf_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/c6xxvf_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/c6xxvf_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/d15xxvf_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/d15xxvf_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/c3xxxvf_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/c3xxxvf_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/200xxvf_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/200xxvf_dev${dev}.conf"
+ done
+ $restore_nullglob
+}
+
+_qat_service_install() {
+ local -r QAT_DH895XCC_NUM_VFS=32
+ local -r QAT_DHC62X_NUM_VFS=16
+ local -r QAT_DHD15XX_NUM_VFS=16
+ local -r QAT_DHC3XXX_NUM_VFS=16
+ local -r QAT_DH200XX_NUM_VFS=16
+ local -r DEVICES="0435 0443 37c8 37c9 6f54 6f55 19e2 19e3 18ee 18ef"
+
+ info "Installing service"
+ pushd "${QAT_INSTALL_DIR_CONTAINER}/build" > /dev/null
+
+ if [[ ! -d ${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup ]]; then
+ mkdir -p "${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup"
+ fi
+ for bin in ${BIN_LIST}; do
+ if [[ -e ${ROOT_MOUNT_DIR}/lib/firmware/${bin} ]]; then
+ mv "${ROOT_MOUNT_DIR}/lib/firmware/${bin}" "${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup/${bin}"
+ fi
+ if [[ -e ${bin} ]]; then
+ install -D -m 750 "${bin}" "${ROOT_MOUNT_DIR}/lib/firmware/${bin}"
+ fi
+ done
+ if [[ ! -d ${ROOT_MOUNT_DIR}/etc/qat_conf_backup ]]; then
+ mkdir "${ROOT_MOUNT_DIR}/etc/qat_conf_backup"
+ fi
+ mv "${ROOT_MOUNT_DIR}/etc/dh895xcc*.conf" "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/" 2>/dev/null || true
+ mv "${ROOT_MOUNT_DIR}/etc/c6xx*.conf" "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/" 2>/dev/null || true
+ mv "${ROOT_MOUNT_DIR}/etc/d15xx*.conf" "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/" 2>/dev/null || true
+ mv "${ROOT_MOUNT_DIR}/etc/c3xxx*.conf" "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/" 2>/dev/null || true
+ mv "${ROOT_MOUNT_DIR}/etc/200xx*.conf" "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/" 2>/dev/null || true
+ if [[ "${QAT_ENABLE_SRIOV}" != "guest" ]]; then
+ for ((dev=0; dev<numDh895xDevicesP; dev++)); do
+ install -D -m 640 dh895xcc_dev0.conf "${ROOT_MOUNT_DIR}/etc/dh895xcc_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numC62xDevicesP; dev++)); do
+ install -D -m 640 c6xx_dev$((dev%3)).conf "${ROOT_MOUNT_DIR}/etc/c6xx_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numD15xxDevicesP; dev++)); do
+ install -D -m 640 d15xx_dev$((dev%3)).conf "${ROOT_MOUNT_DIR}/etc/d15xx_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numC3xxxDevicesP; dev++)); do
+ install -D -m 640 c3xxx_dev0.conf "${ROOT_MOUNT_DIR}/etc/c3xxx_dev${dev}.conf"
+ done
+ for ((dev=0; dev<num200xxDevicesP; dev++)); do
+ install -D -m 640 200xx_dev0.conf "${ROOT_MOUNT_DIR}/etc/200xx_dev${dev}.conf"
+ done
+ fi
+ if [[ "${QAT_ENABLE_SRIOV}" == "host" ]]; then
+ for ((dev=0; dev<numDh895xDevicesP; dev++)); do
+ for ((vf_dev=0; vf_dev<QAT_DH895XCC_NUM_VFS; vf_dev++)); do
+ vf_dev_num=$((dev * QAT_DH895XCC_NUM_VFS + vf_dev))
+ install -D -m 640 dh895xccvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/dh895xccvf_dev${vf_dev_num}.conf"
+ done
+ done
+ for ((dev=0; dev<numC62xDevicesP; dev++)); do
+ for ((vf_dev=0; vf_dev<QAT_DHC62X_NUM_VFS; vf_dev++)); do
+ vf_dev_num=$((dev * QAT_DHC62X_NUM_VFS + vf_dev))
+ install -D -m 640 c6xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/c6xxvf_dev${vf_dev_num}.conf"
+ done
+ done
+ for ((dev=0; dev<numD15xxDevicesP; dev++)); do
+ for ((vf_dev=0; vf_dev<QAT_DHD15XX_NUM_VFS; vf_dev++)); do
+ vf_dev_num=$((dev * QAT_DHD15XX_NUM_VFS + vf_dev))
+ install -D -m 640 d15xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/d15xxvf_dev${vf_dev_num}.conf"
+ done
+ done
+ for ((dev=0; dev<numC3xxxDevicesP; dev++)); do
+ for ((vf_dev=0; vf_dev<QAT_DHC3XXX_NUM_VFS; vf_dev++)); do
+ vf_dev_num=$((dev * QAT_DHC3XXX_NUM_VFS + vf_dev))
+ install -D -m 640 c3xxxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/c3xxxvf_dev${vf_dev_num}.conf"
+ done
+ done
+ for ((dev=0; dev<num200xxDevicesP; dev++)); do
+ for ((vf_dev=0; vf_dev<QAT_DH200XX_NUM_VFS; vf_dev++)); do
+ vf_dev_num=$((dev * QAT_DH200XX_NUM_VFS + vf_dev))
+ install -D -m 640 200xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/200xxvf_dev${vf_dev_num}.conf"
+ done
+ done
+ else
+ for ((dev=0; dev<numDh895xDevicesV; dev++)); do
+ install -D -m 640 dh895xccvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/dh895xccvf_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numC62xDevicesV; dev++)); do
+ install -D -m 640 c6xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/c6xxvf_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numD15xxDevicesV; dev++)); do
+ install -D -m 640 d15xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/d15xxvf_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numC3xxxDevicesV; dev++)); do
+ install -D -m 640 c3xxxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/c3xxxvf_dev${dev}.conf"
+ done
+ for ((dev=0; dev<num200xxDevicesV; dev++)); do
+ install -D -m 640 200xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/200xxvf_dev${dev}.conf"
+ done
+ fi
+ _rename_ssl_conf_section
+ info "Creating startup and kill scripts"
+ install -D -m 750 qat_service "${ROOT_MOUNT_DIR}/etc/init.d/qat_service"
+ if [[ "${QAT_ENABLE_SRIOV}" == "host" ]]; then
+ install -D -m 750 qat_service_vfs "${ROOT_MOUNT_DIR}/etc/init.d/qat_service_vfs"
+ fi
+ if [[ "${QAT_ENABLE_SRIOV}" == "host" || "${QAT_ENABLE_SRIOV}" == "guest" ]]; then
+ echo "# Comment or remove next line to disable sriov" > "${ROOT_MOUNT_DIR}/etc/default/qat"
+ echo "SRIOV_ENABLE=1" >> "${ROOT_MOUNT_DIR}/etc/default/qat"
+ else
+ echo "# Remove comment on next line to enable sriov" > "${ROOT_MOUNT_DIR}/etc/default/qat"
+ echo "#SRIOV_ENABLE=1" >> "${ROOT_MOUNT_DIR}/etc/default/qat"
+ fi
+ echo "#LEGACY_LOADED=1" >> "${ROOT_MOUNT_DIR}/etc/default/qat"
+ rm -f "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ if [[ "${QAT_ENABLE_SRIOV}" == "host" ]]; then
+ if [[ ${numDh895xDevicesP} != 0 ]]; then
+ echo "blacklist qat_dh895xccvf" >> "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ fi
+ if [[ ${numC3xxxDevicesP} != 0 ]]; then
+ echo "blacklist qat_c3xxxvf" >> "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ fi
+ if [[ ${num200xxDevicesP} != 0 ]]; then
+ echo "blacklist qat_200xxvf" >> "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ fi
+ if [[ ${numC62xDevicesP} != 0 ]]; then
+ echo "blacklist qat_c62xvf" >> "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ fi
+ if [[ ${numD15xxDevicesP} != 0 ]]; then
+ echo "blacklist qat_d15xxvf" >> "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ fi
+ fi
+ echo "#ENABLE_KAPI=1" >> "${ROOT_MOUNT_DIR}/etc/default/qat"
+ info "Copying libqat_s.so to ${ROOT_MOUNT_DIR}/usr/local/lib"
+ install -D -m 755 libqat_s.so "${ROOT_MOUNT_DIR}/usr/local/lib/libqat_s.so"
+ info "Copying libusdm_drv_s.so to ${ROOT_MOUNT_DIR}/usr/local/lib"
+ install -D -m 755 libusdm_drv_s.so "${ROOT_MOUNT_DIR}/usr/local/lib/libusdm_drv_s.so"
+ echo /usr/local/lib > "${ROOT_MOUNT_DIR}/etc/ld.so.conf.d/qat.conf"
+ ldconfig -r "${ROOT_MOUNT_DIR}"
+ info "Copying usdm module to system drivers"
+ if [[ ! -z "${KERNEL_MOD_SIGN_CMD}" ]]; then
+ info "Need to sign driver usdm_drv.ko"
+ ${KERNEL_MOD_SIGN_CMD} usdm_drv.ko
+ info "Need to sign driver qat_api.ko"
+ ${KERNEL_MOD_SIGN_CMD} qat_api.ko
+ fi
+ install usdm_drv.ko "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/kernel/drivers"
+ install qat_api.ko "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/kernel/drivers"
+ if [[ ! $(chroot "${ROOT_MOUNT_DIR}" getent group qat) ]]; then
+ info "Creating qat group"
+ groupadd -R "${ROOT_MOUNT_DIR}" qat
+ else
+ info "Group qat already exists"
+ fi
+ info "Creating udev rules"
+ rm -f "${ROOT_MOUNT_DIR}/etc/udev/rules.d/00-qat.rules"
+ {
+ echo 'KERNEL=="qat_adf_ctl" MODE="0660" GROUP="qat"';
+ echo 'KERNEL=="qat_dev_processes" MODE="0660" GROUP="qat"';
+ echo 'KERNEL=="usdm_drv" MODE="0660" GROUP="qat"';
+ echo 'ACTION=="add", DEVPATH=="/module/usdm_drv" SUBSYSTEM=="module" RUN+="/bin/mkdir /dev/hugepages/qat"';
+ echo 'ACTION=="add", DEVPATH=="/module/usdm_drv" SUBSYSTEM=="module" RUN+="/bin/chgrp qat /dev/hugepages/qat"';
+ echo 'ACTION=="add", DEVPATH=="/module/usdm_drv" SUBSYSTEM=="module" RUN+="/bin/chmod 0770 /dev/hugepages/qat"';
+ echo 'ACTION=="remove", DEVPATH=="/module/usdm_drv" SUBSYSTEM=="module" RUN+="/bin/rmdir /dev/hugepages/qat"';
+ for dev in ${DEVICES}; do
+ echo 'KERNEL=="uio*", ATTRS{vendor}=="0x'"$(echo "8086" | tr -d \")"'", ATTRS{device}=="0x'"$(echo "${dev}" | tr -d \")"'" MODE="0660" GROUP="qat"';
+ done
+ } > "${ROOT_MOUNT_DIR}/etc/udev/rules.d/00-qat.rules"
+ info "Creating module.dep file for QAT released kernel object"
+ info "This will take a few moments"
+ depmod -a -b "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/depmod.d"
+
+ popd > /dev/null
+}
+
+_qat_service_start() {
+ if [[ $(lsmod | grep -c "usdm_drv") != "0" ]]; then
+ rmmod usdm_drv
+ fi
+ info "Starting QAT service"
+ info "... shutting down"
+ chroot "${ROOT_MOUNT_DIR}" /etc/init.d/qat_service shutdown || true
+ sleep 3
+ info "... starting"
+ chroot "${ROOT_MOUNT_DIR}" /etc/init.d/qat_service start
+ if [[ "${QAT_ENABLE_SRIOV}" == "host" ]]; then
+ modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" vfio-pci
+ chroot "${ROOT_MOUNT_DIR}" /etc/init.d/qat_service_vfs start
+ fi
+ info "... started"
+}
+
+_qat_check_started() {
+ if [[ $(lsmod | grep -c "usdm_drv") == "0" ]]; then
+ error "usdm_drv module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ if [[ ${numDh895xDevicesP} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_dh895xcc") == "0" ]]; then
+ error "qat_dh895xcc module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numC62xDevicesP} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_c62x") == "0" ]]; then
+ error "qat_c62x module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numD15xxDevicesP} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_d15xx") == "0" ]]; then
+ error "qat_d15xx module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numC3xxxDevicesP} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_c3xxx") == "0" ]]; then
+ error "qat_c3xxx module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${num200xxDevicesP} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_200xx") == "0" ]]; then
+ error "qat_200xx module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ "${QAT_ENABLE_SRIOV}" == "guest" ]]; then
+ if [[ ${numDh895xDevicesV} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_dh895xccvf") == "0" ]]; then
+ error "qat_dh895xccvf module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numC62xDevicesV} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_c62xvf") == "0" ]]; then
+ error "qat_c62xvf module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numD15xxDevicesV} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_d15xxvf") == "0" ]]; then
+ error "qat_d15xxvf module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numC3xxxDevicesV} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_c3xxxvf") == "0" ]]; then
+ error "qat_c3xxxvf module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${num200xxDevicesV} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_200xxvf") == "0" ]]; then
+ error "qat_200xxvf module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ fi
+ if [[ $("${ROOT_MOUNT_DIR}/usr/local/bin/adf_ctl" status | grep -c "state: down") != "0" ]]; then
+ error "QAT driver not activated"
+ return "${RETCODE_ERROR}"
+ fi
+}
+
+_qat_service_shutdown() {
+ info "Stopping service"
+ if [[ $(lsmod | grep -c "qat") != "0" || -e ${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/updates/drivers/crypto/qat/qat_common/intel_qat.ko ]]; then
+ if [[ $(lsmod | grep -c "usdm_drv") != "0" ]]; then
+ rmmod usdm_drv
+ fi
+ if [[ -e ${ROOT_MOUNT_DIR}/etc/init.d/qat_service_upstream ]]; then
+ until chroot "${ROOT_MOUNT_DIR}" /etc/init.d/qat_service_upstream shutdown; do
+ sleep 1
+ done
+ elif [[ -e ${ROOT_MOUNT_DIR}/etc/init.d/qat_service ]]; then
+ until chroot "${ROOT_MOUNT_DIR}" /etc/init.d/qat_service shutdown; do
+ sleep 1
+ done
+ fi
+ fi
+}
+
+_qat_service_uninstall() {
+ info "Uninstalling service"
+ if [[ $(lsmod | grep -c "qat") != "0" || -e ${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/updates/drivers/crypto/qat/qat_common/intel_qat.ko ]]; then
+ info "Removing the QAT firmware"
+ for bin in ${BIN_LIST}; do
+ if [[ -e ${ROOT_MOUNT_DIR}/lib/firmware/${bin} ]]; then
+ rm "${ROOT_MOUNT_DIR}/lib/firmware/${bin}"
+ fi
+ if [[ -e ${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup/${bin} ]]; then
+ mv "${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup/${bin}" "${ROOT_MOUNT_DIR}/lib/firmware/${bin}"
+ fi
+ done
+
+ if [[ -d ${ROOT_MOUNT_DIR}/lib/firmware/qat_fw ]]; then
+ rm "${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup"
+ fi
+
+ if [[ -e ${ROOT_MOUNT_DIR}/etc/init.d/qat_service_upstream ]]; then
+ rm "${ROOT_MOUNT_DIR}/etc/init.d/qat_service_upstream"
+ rm "${ROOT_MOUNT_DIR}/usr/local/bin/adf_ctl"
+ elif [[ -e ${ROOT_MOUNT_DIR}/etc/init.d/qat_service ]]; then
+ rm "${ROOT_MOUNT_DIR}/etc/init.d/qat_service"
+ rm "${ROOT_MOUNT_DIR}/usr/local/bin/adf_ctl"
+ fi
+ rm -f "${ROOT_MOUNT_DIR}/etc/init.d/qat_service_vfs"
+ rm -f "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+
+ rm -f "${ROOT_MOUNT_DIR}/usr/local/lib/libqat_s.so"
+ rm -f "${ROOT_MOUNT_DIR}/usr/local/lib/libusdm_drv_s.so"
+ rm -f "${ROOT_MOUNT_DIR}/etc/ld.so.conf.d/qat.conf"
+ ldconfig -r "${ROOT_MOUNT_DIR}"
+
+ info "Removing config files"
+ rm -f "${ROOT_MOUNT_DIR}/etc/dh895xcc*.conf"
+ rm -f "${ROOT_MOUNT_DIR}/etc/c6xx*.conf"
+ rm -f "${ROOT_MOUNT_DIR}/etc/d15xx*.conf"
+ rm -f "${ROOT_MOUNT_DIR}/etc/c3xxx*.conf"
+ rm -f "${ROOT_MOUNT_DIR}/etc/200xx*.conf"
+ rm -f "${ROOT_MOUNT_DIR}/etc/udev/rules.d/00-qat.rules"
+
+ mv -f "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/dh895xcc*.conf" "${ROOT_MOUNT_DIR}/etc/" 2>/dev/null || true
+ mv -f "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/c6xx*.conf" "${ROOT_MOUNT_DIR}/etc/" 2>/dev/null || true
+ mv -f "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/d15xx*.conf" "${ROOT_MOUNT_DIR}/etc/" 2>/dev/null || true
+ mv -f "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/c3xxx*.conf" "${ROOT_MOUNT_DIR}/etc/" 2>/dev/null || true
+ mv -f "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/200xx*.conf" "${ROOT_MOUNT_DIR}/etc/" 2>/dev/null || true
+
+ info "Removing drivers modules"
+ rm -rf "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/updates/drivers/crypto/qat"
+ rm -f "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/kernel/drivers/usdm_drv.ko"
+ rm -f "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/kernel/drivers/qat_api.ko"
+ info "Creating module.dep file for QAT released kernel object"
+ depmod -a -b "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/depmod.d"
+
+ if [[ $(lsmod | grep -c "usdm_drv|intel_qat") != "0" ]]; then
+ if [[ $(modinfo intel_qat | grep -c "updates") == "0" ]]; then
+ info "In-tree driver loaded"
+ info "Acceleration uninstall complete"
+ else
+ error "Some modules not removed properly"
+ error "Acceleration uninstall failed"
+ fi
+ else
+ info "Acceleration uninstall complete"
+ fi
+ if [[ ${numDh895xDevicesP} != 0 ]]; then
+ lsmod | grep qat_dh895xcc >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_dh895xcc >/dev/null 2>&1 || true
+ fi
+ if [[ ${numC62xDevicesP} != 0 ]]; then
+ lsmod | grep qat_c62x >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_c62x >/dev/null 2>&1 || true
+ fi
+ if [[ ${numD15xxDevicesP} != 0 ]]; then
+ lsmod | grep qat_d15xx >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_d15xx >/dev/null 2>&1 || true
+ fi
+ if [[ ${numC3xxxDevicesP} != 0 ]]; then
+ lsmod | grep qat_c3xxx >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_c3xxx >/dev/null 2>&1 || true
+ fi
+ if [[ ${num200xxDevicesP} != 0 ]]; then
+ lsmod | grep qat_200xx >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_200xx >/dev/null 2>&1 || true
+ fi
+ if [[ ${numDh895xDevicesV} != 0 ]]; then
+ lsmod | grep qat_dh895xccvf >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_dh895xccvf >/dev/null 2>&1 || true
+ fi
+ if [[ ${numC62xDevicesV} != 0 ]]; then
+ lsmod | grep qat_c62xvf >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_c62xvf >/dev/null 2>&1 || true
+ fi
+ if [[ ${numD15xxDevicesV} != 0 ]]; then
+ lsmod | grep qat_d15xxvf >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_d15xxvf >/dev/null 2>&1 || true
+ fi
+ if [[ ${numC3xxxDevicesV} != 0 ]]; then
+ lsmod | grep qat_c3xxxvf >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_c3xxxvf >/dev/null 2>&1 || true
+ fi
+ if [[ ${num200xxDevicesV} != 0 ]]; then
+ lsmod | grep qat_200xxvf >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_200xxvf >/dev/null 2>&1 || true
+ fi
+ else
+ info "Acceleration package not installed"
+ fi
+}
+
+_qat_sample_install() {
+ info "Installing samples"
+ if [[ -f ${QAT_INSTALL_DIR_CONTAINER}/quickassist/utilities/libusdm_drv/linux/build/linux_2.6/user_space/libusdm_drv.a ]]; then
+ ICP_ROOT="${QAT_INSTALL_DIR_CONTAINER}" make perf_user -C "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code"
+ cp "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code/performance/build/linux_2.6/user_space/cpa_sample_code" "${QAT_INSTALL_DIR_CONTAINER}/build"
+ ICP_ROOT="${QAT_INSTALL_DIR_CONTAINER}" KERNEL_SOURCE_ROOT="${KERNEL_SRC_DIR}" make perf_kernel -C "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code"
+ cp "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code/performance/build/linux_2.6/kernel_space/cpa_sample_code.ko" "${QAT_INSTALL_DIR_CONTAINER}/build"
+ else
+ error "No libusdm_drv library found - build the project (make all) before samples"
+ return "${RETCODE_ERROR}"
+ fi
+
+ if [[ ! -d ${ROOT_MOUNT_DIR}/lib/firmware ]]; then
+ mkdir "${ROOT_MOUNT_DIR}/lib/firmware"
+ fi
+
+ cp "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/calgary" "${ROOT_MOUNT_DIR}/lib/firmware"
+ cp "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/calgary32" "${ROOT_MOUNT_DIR}/lib/firmware"
+ cp "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/canterbury" "${ROOT_MOUNT_DIR}/lib/firmware"
+ if [[ ! -z "${KERNEL_MOD_SIGN_CMD}" ]]; then
+ if [[ -f ${QAT_INSTALL_DIR_CONTAINER}/build/cpa_sample_code.ko ]]; then
+ echo "Need to sign sample code ${QAT_INSTALL_DIR_CONTAINER}/build/cpa_sample_code.ko."
+ "${KERNEL_MOD_SIGN_CMD}" "${QAT_INSTALL_DIR_CONTAINER}/build/cpa_sample_code.ko"
+ fi
+ fi
+
+ install -D -m 750 "${QAT_INSTALL_DIR_CONTAINER}/build/cpa_sample_code" "${ROOT_MOUNT_DIR}/usr/local/bin/cpa_sample_code"
+ install -D -m 750 "${QAT_INSTALL_DIR_CONTAINER}/build/cpa_sample_code.ko" "${ROOT_MOUNT_DIR}/usr/local/bin/cpa_sample_code.ko"
+ info "cpa_sample_code installed under ${ROOT_MOUNT_DIR}/usr/local/bin directory"
+}
+
+_qat_sample_uninstall() {
+ info "Uninstalling samples"
+ rm -f "${ROOT_MOUNT_DIR}/lib/firmware/calgary"
+ rm -f "${ROOT_MOUNT_DIR}/lib/firmware/calgary32"
+ rm -f "${ROOT_MOUNT_DIR}/lib/firmware/canterbury"
+
+ rm -f "${ROOT_MOUNT_DIR}/usr/local/bin/cpa_sample_code"
+ rm -f "${ROOT_MOUNT_DIR}/usr/local/bin/cpa_sample_code.ko"
+}
diff --git a/kud/deployment_infra/installers/entrypoint-iavf-driver-installer.sh b/kud/deployment_infra/installers/entrypoint-iavf-driver-installer.sh
new file mode 100755
index 00000000..1418d0df
--- /dev/null
+++ b/kud/deployment_infra/installers/entrypoint-iavf-driver-installer.sh
@@ -0,0 +1,134 @@
+#!/bin/bash
+
+#set -x
+source _common.sh
+
+IAVF_DRIVER_VERSION="${IAVF_DRIVER_VERSION:-4.0.2}"
+IAVF_DRIVER_DOWNLOAD_URL_DEFAULT="https://downloadmirror.intel.com/24693/eng/iavf-${IAVF_DRIVER_VERSION}.tar.gz"
+IAVF_DRIVER_DOWNLOAD_URL="${IAVF_DRIVER_DOWNLOAD_URL:-$IAVF_DRIVER_DOWNLOAD_URL_DEFAULT}"
+IAVF_DRIVER_ARCHIVE="$(basename "${IAVF_DRIVER_DOWNLOAD_URL}")"
+IAVF_INSTALL_DIR_HOST="${IAVF_INSTALL_DIR_HOST:-/opt/iavf}"
+IAVF_INSTALL_DIR_CONTAINER="${IAVF_INSTALL_DIR_CONTAINER:-/usr/local/iavf}"
+CACHE_FILE="${IAVF_INSTALL_DIR_CONTAINER}/.cache"
+
+check_adapter() {
+ local -r nic_models="X710 XL710 X722"
+ if [[ $(lspci | grep -c "Ethernet .* \(${nic_models// /\\|}\)") != "0" ]]; then
+ info "Found adapter"
+ else
+ error "Missing adapter"
+ exit "${RETCODE_ERROR}"
+ fi
+}
+
+download_iavf_src() {
+ info "Downloading IAVF source ... "
+ mkdir -p "${IAVF_INSTALL_DIR_CONTAINER}"
+ pushd "${IAVF_INSTALL_DIR_CONTAINER}" > /dev/null
+ curl -L -sS "${IAVF_DRIVER_DOWNLOAD_URL}" -o "${IAVF_DRIVER_ARCHIVE}"
+ tar xf "${IAVF_DRIVER_ARCHIVE}" --strip-components=1
+ popd > /dev/null
+}
+
+build_iavf_src() {
+ info "Building IAVF source ... "
+ pushd "${IAVF_INSTALL_DIR_CONTAINER}/src" > /dev/null
+ KSRC=${KERNEL_SRC_DIR} SYSTEM_MAP_FILE="${ROOT_MOUNT_DIR}/boot/System.map-$(uname -r)" INSTALL_MOD_PATH="${ROOT_MOUNT_DIR}" make install
+ # TODO Unable to update initramfs. You may need to do this manaully.
+ popd > /dev/null
+}
+
+install_iavf() {
+ check_adapter
+ download_iavf_src
+ build_iavf_src
+}
+
+uninstall_iavf() {
+ if [[ $(lsmod | grep -c "iavf") != "0" ]]; then
+ rmmod iavf
+ fi
+ if [[ $(lsmod | grep -c "i40evf") != "0" ]]; then
+ rmmod i40evf
+ fi
+ if [[ -d "${IAVF_INSTALL_DIR_CONTAINER}/src" ]]; then
+ pushd "${IAVF_INSTALL_DIR_CONTAINER}/src" > /dev/null
+ KSRC=${KERNEL_SRC_DIR} SYSTEM_MAP_FILE="${ROOT_MOUNT_DIR}/boot/System.map-$(uname -r)" INSTALL_MOD_PATH="${ROOT_MOUNT_DIR}" make uninstall
+ popd > /dev/null
+ fi
+ # This is a workaround for missing INSTALL_MOD_PATH prefix in the Makefile:
+ rm -f "${ROOT_MOUNT_DIR}/etc/modprobe.d/iavf.conf"
+}
+
+check_cached_version() {
+ info "Checking cached version"
+ if [[ ! -f "${CACHE_FILE}" ]]; then
+ info "Cache file ${CACHE_FILE} not found"
+ return "${RETCODE_ERROR}"
+ fi
+ # Source the cache file and check if the cached driver matches
+ # currently running kernel and driver versions.
+ . "${CACHE_FILE}"
+ if [[ "$(uname -r)" == "${CACHE_KERNEL_VERSION}" ]]; then
+ if [[ "${IAVF_DRIVER_VERSION}" == "${CACHE_IAVF_DRIVER_VERSION}" ]]; then
+ info "Found existing driver installation for kernel version $(uname -r) and driver version ${IAVF_DRIVER_VERSION}"
+ return "${RETCODE_SUCCESS}"
+ fi
+ fi
+ return "${RETCODE_ERROR}"
+}
+
+update_cached_version() {
+ cat >"${CACHE_FILE}"<<__EOF__
+CACHE_KERNEL_VERSION=$(uname -r)
+CACHE_IAVF_DRIVER_VERSION=${IAVF_DRIVER_VERSION}
+__EOF__
+
+ info "Updated cached version as:"
+ cat "${CACHE_FILE}"
+}
+
+upgrade_driver() {
+ uninstall_iavf
+ install_iavf
+}
+
+check_driver_started() {
+ if [[ $(lsmod | grep -c "iavf") == "0" ]]; then
+ return "${RETCODE_ERROR}"
+ fi
+ return 0
+}
+
+start_driver() {
+ modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" iavf
+ if ! check_driver_started; then
+ error "Driver not started"
+ fi
+}
+
+uninstall_driver() {
+ uninstall_iavf
+ rm -f "${CACHE_FILE}"
+}
+
+main() {
+ load_etc_os_release
+ local -r cmd="${1:-install}"
+ case $cmd in
+ install)
+ if ! check_cached_version; then
+ upgrade_driver
+ update_cached_version
+ fi
+ if ! check_driver_started; then
+ start_driver
+ fi
+ ;;
+ uninstall)
+ uninstall_driver
+ ;;
+ esac
+}
+
+main "$@"
diff --git a/kud/deployment_infra/installers/entrypoint-qat-driver-installer.sh b/kud/deployment_infra/installers/entrypoint-qat-driver-installer.sh
new file mode 100755
index 00000000..f9221309
--- /dev/null
+++ b/kud/deployment_infra/installers/entrypoint-qat-driver-installer.sh
@@ -0,0 +1,148 @@
+#!/bin/bash
+
+#set -x
+source _common.sh
+source _qat-driver-installer.sh
+
+# IMPORTANT: If the driver version is changed, review the QAT Makefile
+# against _qat.sh. The steps in _qat.sh are from the Makefile and
+# have been modified to run inside a container.
+QAT_DRIVER_VERSION="${QAT_DRIVER_VERSION:-1.7.l.4.12.0-00011}"
+QAT_DRIVER_DOWNLOAD_URL_DEFAULT="https://01.org/sites/default/files/downloads/qat${QAT_DRIVER_VERSION}.tar.gz"
+QAT_DRIVER_DOWNLOAD_URL="${QAT_DRIVER_DOWNLOAD_URL:-$QAT_DRIVER_DOWNLOAD_URL_DEFAULT}"
+QAT_DRIVER_ARCHIVE="$(basename "${QAT_DRIVER_DOWNLOAD_URL}")"
+QAT_INSTALL_DIR_HOST="${QAT_INSTALL_DIR_HOST:-/opt/qat}"
+QAT_INSTALL_DIR_CONTAINER="${QAT_INSTALL_DIR_CONTAINER:-/usr/local/qat}"
+QAT_ENABLE_SRIOV="${QAT_ENABLE_SRIOV:-host}"
+CACHE_FILE="${QAT_INSTALL_DIR_CONTAINER}/.cache"
+
+check_kernel_boot_parameter() {
+ if [[ $(grep -c intel_iommu=on /proc/cmdline) != "0" ]]; then
+ info "Found intel_iommu=on kernel boot parameter"
+ else
+ error "Missing intel_iommu=on kernel boot parameter"
+ exit "${RETCODE_ERROR}"
+ fi
+}
+
+check_sriov_hardware_capabilities() {
+ if [[ $(lspci -vn -d 8086:0435 | grep -c SR-IOV) != "0" ]]; then
+ info "Found dh895xcc SR-IOV hardware capabilities"
+ elif [[ $(lspci -vn -d 8086:37c8 | grep -c SR-IOV) != "0" ]]; then
+ info "Found c6xx SR-IOV hardware capabilities"
+ elif [[ $(lspci -vn -d 8086:6f54 | grep -c SR-IOV) != "0" ]]; then
+ info "Found d15xx SR-IOV hardware capabilities"
+ elif [[ $(lspci -vn -d 8086:19e2 | grep -c SR-IOV) != "0" ]]; then
+ info "Found c3xxx SR-IOV hardware capabilities"
+ else
+ error "Missing SR-IOV hardware capabilities"
+ exit "${RETCODE_ERROR}"
+ fi
+}
+
+download_qat_src() {
+ info "Downloading QAT source ... "
+ mkdir -p "${QAT_INSTALL_DIR_CONTAINER}"
+ pushd "${QAT_INSTALL_DIR_CONTAINER}" > /dev/null
+ curl -L -sS "${QAT_DRIVER_DOWNLOAD_URL}" -o "${QAT_DRIVER_ARCHIVE}"
+ tar xf "${QAT_DRIVER_ARCHIVE}"
+ popd > /dev/null
+}
+
+build_qat_src() {
+ info "Building QAT source ... "
+ pushd "${QAT_INSTALL_DIR_CONTAINER}" > /dev/null
+ KERNEL_SOURCE_ROOT="${KERNEL_SRC_DIR}" ./configure --enable-icp-sriov="${QAT_ENABLE_SRIOV}"
+ make
+ popd > /dev/null
+}
+
+install_qat() {
+ check_kernel_boot_parameter
+ check_sriov_hardware_capabilities
+ download_qat_src
+ build_qat_src
+ _qat_driver_install
+ _adf_ctl_install
+ _qat_service_install
+}
+
+uninstall_qat() {
+ _adf_ctl_uninstall
+ _qat_service_shutdown
+ _qat_service_uninstall
+}
+
+check_cached_version() {
+ info "Checking cached version"
+ if [[ ! -f "${CACHE_FILE}" ]]; then
+ info "Cache file ${CACHE_FILE} not found"
+ return "${RETCODE_ERROR}"
+ fi
+ # Source the cache file and check if the cached driver matches
+ # currently running kernel and driver versions.
+ . "${CACHE_FILE}"
+ if [[ "$(uname -r)" == "${CACHE_KERNEL_VERSION}" ]]; then
+ if [[ "${QAT_DRIVER_VERSION}" == "${CACHE_QAT_DRIVER_VERSION}" ]]; then
+ info "Found existing driver installation for kernel version $(uname -r) and driver version ${QAT_DRIVER_VERSION}"
+ return "${RETCODE_SUCCESS}"
+ fi
+ fi
+ return "${RETCODE_ERROR}"
+}
+
+update_cached_version() {
+ cat >"${CACHE_FILE}"<<__EOF__
+CACHE_KERNEL_VERSION=$(uname -r)
+CACHE_QAT_DRIVER_VERSION=${QAT_DRIVER_VERSION}
+__EOF__
+
+ info "Updated cached version as:"
+ cat "${CACHE_FILE}"
+}
+
+upgrade_driver() {
+ uninstall_qat
+ install_qat
+}
+
+check_driver_started() {
+ _qat_check_started
+}
+
+start_driver() {
+ _qat_service_start
+ _qat_check_started
+}
+
+uninstall_driver() {
+ uninstall_qat
+ rm -f "${CACHE_FILE}"
+}
+
+main() {
+ load_etc_os_release
+ local -r cmd="${1:-install}"
+ case $cmd in
+ install)
+ if ! check_cached_version; then
+ upgrade_driver
+ update_cached_version
+ fi
+ if ! check_driver_started; then
+ start_driver
+ fi
+ ;;
+ uninstall)
+ uninstall_driver
+ ;;
+ install-sample)
+ _qat_sample_install
+ ;;
+ uninstall-sample)
+ _qat_sample_uninstall
+ ;;
+ esac
+}
+
+main "$@"
diff --git a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
index 30e8bc42..7d0404a5 100644
--- a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
@@ -87,10 +87,20 @@ podsecuritypolicy_enabled: true
# allowedCapabilities:
# - '*'
# by
+# allowedCapabilities:
+# - NET_ADMIN
+# - SYS_ADMIN
+# - SYS_NICE
+# - SYS_PTRACE
# requiredDropCapabilities:
# - NET_RAW
podsecuritypolicy_restricted_spec:
privileged: true
+ allowedCapabilities:
+ - NET_ADMIN
+ - SYS_ADMIN
+ - SYS_NICE
+ - SYS_PTRACE
allowPrivilegeEscalation: true
volumes:
- '*'
diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
index 8d4795be..7803f27a 100644
--- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
@@ -84,10 +84,20 @@ podsecuritypolicy_enabled: true
# allowedCapabilities:
# - '*'
# by
+# allowedCapabilities:
+# - NET_ADMIN
+# - SYS_ADMIN
+# - SYS_NICE
+# - SYS_PTRACE
# requiredDropCapabilities:
# - NET_RAW
podsecuritypolicy_restricted_spec:
privileged: true
+ allowedCapabilities:
+ - NET_ADMIN
+ - SYS_ADMIN
+ - SYS_NICE
+ - SYS_PTRACE
allowPrivilegeEscalation: true
volumes:
- '*'
diff --git a/kud/tests/_common.sh b/kud/tests/_common.sh
index b56972c8..ff975544 100644
--- a/kud/tests/_common.sh
+++ b/kud/tests/_common.sh
@@ -1108,8 +1108,8 @@ spec:
app: ovn4nfv
annotations:
k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
- k8s.plugin.opnfv.org/nfn-network: '{ "type": "ovn4nfv", "interface": [{ "name": "ovn-port-net", "interface": "net0" , "defaultGateway": "false"},
- { "name": "ovn-priv-net", "interface": "net1" , "defaultGateway": "false"}]}'
+ k8s.plugin.opnfv.org/nfn-network: '{ "type": "ovn4nfv", "interface": [{ "name": "ovn-port-net", "interface": "net2" , "defaultGateway": "false"},
+ { "name": "ovn-priv-net", "interface": "net3" , "defaultGateway": "false"}]}'
spec:
containers:
- name: $ovn4nfv_deployment_name
diff --git a/kud/tests/ovn4nfv.sh b/kud/tests/ovn4nfv.sh
index cd2664ad..e25c2f09 100755
--- a/kud/tests/ovn4nfv.sh
+++ b/kud/tests/ovn4nfv.sh
@@ -34,8 +34,8 @@ echo "===== $deployment_pod details ====="
kubectl exec -it $deployment_pod -- ip a
ovn_nic=$(kubectl exec -it $deployment_pod -- ip a )
-if [[ $ovn_nic != *"net1"* ]]; then
- echo "The $deployment_pod pod doesn't contain the net1 nic"
+if [[ $ovn_nic != *"net3"* ]]; then
+ echo "The $deployment_pod pod doesn't contain the net3 nic"
exit 1
else
echo "Test Completed!"
diff --git a/kud/tests/qat.sh b/kud/tests/qat.sh
index 8365f700..11fb6ca0 100755
--- a/kud/tests/qat.sh
+++ b/kud/tests/qat.sh
@@ -10,7 +10,7 @@
set -o pipefail
-qat_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.capacity."qat.intel.com/cy2_dc2">="1") | .metadata.name')
+qat_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."qat.intel.com/cy2_dc2"|tonumber)>=1) | .metadata.name')
if [ -z "$qat_capable_nodes" ]; then
echo "This test case cannot run. QAT device unavailable."
QAT_ENABLED=False
diff --git a/kud/tests/sriov-network.sh b/kud/tests/sriov-network.sh
new file mode 100644
index 00000000..3191c2f3
--- /dev/null
+++ b/kud/tests/sriov-network.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o pipefail
+
+sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_nic"|tonumber)>=2) | .metadata.name')
+if [ -z "$sriov_capable_nodes" ]; then
+ echo "SRIOV test case cannot run on the cluster."
+ exit 0
+else
+ echo "SRIOV option avaiable in the cluster."
+fi
+
+pod_name=pod-case-01
+
+function create_pod_yaml_with_single_VF {
+
+cat << POD > $HOME/$pod_name-single.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-case-01
+ annotations:
+ k8s.v1.cni.cncf.io/networks: sriov-intel
+spec:
+ containers:
+ - name: test-pod
+ image: docker.io/centos/tools:latest
+ command:
+ - /sbin/init
+ resources:
+ requests:
+ intel.com/intel_sriov_nic: '1'
+ limits:
+ intel.com/intel_sriov_nic: '1'
+POD
+}
+
+function create_pod_yaml_with_multiple_VF {
+
+cat << POD > $HOME/$pod_name-multiple.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-case-01
+ annotations:
+ k8s.v1.cni.cncf.io/networks: sriov-intel, sriov-intel
+spec:
+ containers:
+ - name: test-pod
+ image: docker.io/centos/tools:latest
+ command:
+ - /sbin/init
+ resources:
+ requests:
+ intel.com/intel_sriov_nic: '2'
+ limits:
+ intel.com/intel_sriov_nic: '2'
+POD
+}
+create_pod_yaml_with_single_VF
+create_pod_yaml_with_multiple_VF
+
+for podType in ${POD_TYPE:-single multiple}; do
+
+ kubectl delete pod $pod_name --ignore-not-found=true --now --wait
+ allocated_node_resource=$(kubectl describe node | grep "intel.com/intel_sriov_nic" | tail -n1 |awk '{print $(NF)}')
+
+ echo "The allocated resource of the node is: " $allocated_node_resource
+
+ kubectl create -f $HOME/$pod_name-$podType.yaml --validate=false
+
+ for pod in $pod_name; do
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $pod-$podType : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Running" ]]; then
+ echo "Pod is up and running.."
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ done
+ allocated_node_resource=$(kubectl describe node | grep "intel.com/intel_sriov_nic" | tail -n1 |awk '{print $(NF)}')
+
+ echo " The current resource allocation after the pod creation is: " $allocated_node_resource
+ kubectl delete pod $pod_name --now
+ echo "Test complete."
+
+done
diff --git a/kud/tests/sriov.sh b/kud/tests/sriov.sh
index e617ea62..7aa97f0c 100755
--- a/kud/tests/sriov.sh
+++ b/kud/tests/sriov.sh
@@ -10,7 +10,7 @@
set -o pipefail
-sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.capacity."intel.com/intel_sriov_700">="2") | .metadata.name')
+sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_700"|tonumber)>=2) | .metadata.name')
if [ -z "$sriov_capable_nodes" ]; then
echo "SRIOV test case cannot run on the cluster."
exit 0
diff --git a/src/k8splugin/plugins/generic/plugin.go b/src/k8splugin/plugins/generic/plugin.go
index 5f73ad22..f38fee78 100644
--- a/src/k8splugin/plugins/generic/plugin.go
+++ b/src/k8splugin/plugins/generic/plugin.go
@@ -212,7 +212,7 @@ func (g genericPlugin) Delete(resource helm.KubernetesResource, namespace string
}
gvr := mapping.Resource
- deletePolicy := metav1.DeletePropagationForeground
+ deletePolicy := metav1.DeletePropagationBackground
opts := metav1.DeleteOptions{
PropagationPolicy: &deletePolicy,
}
diff --git a/src/k8splugin/plugins/namespace/plugin.go b/src/k8splugin/plugins/namespace/plugin.go
index 59defa36..851a5568 100644
--- a/src/k8splugin/plugins/namespace/plugin.go
+++ b/src/k8splugin/plugins/namespace/plugin.go
@@ -65,7 +65,7 @@ func (p namespacePlugin) Get(resource helm.KubernetesResource, namespace string,
// Delete an existing namespace hosted in a specific Kubernetes cluster
func (p namespacePlugin) Delete(resource helm.KubernetesResource, namespace string, client plugin.KubernetesConnector) error {
- deletePolicy := metaV1.DeletePropagationForeground
+ deletePolicy := metaV1.DeletePropagationBackground
opts := metaV1.DeleteOptions{
PropagationPolicy: &deletePolicy,
}
diff --git a/src/k8splugin/plugins/service/plugin.go b/src/k8splugin/plugins/service/plugin.go
index 06f4b1d5..ba1decbb 100644
--- a/src/k8splugin/plugins/service/plugin.go
+++ b/src/k8splugin/plugins/service/plugin.go
@@ -111,7 +111,7 @@ func (p servicePlugin) Delete(resource helm.KubernetesResource, namespace string
namespace = "default"
}
- deletePolicy := metaV1.DeletePropagationForeground
+ deletePolicy := metaV1.DeletePropagationBackground
opts := metaV1.DeleteOptions{
PropagationPolicy: &deletePolicy,
}