summaryrefslogtreecommitdiffstats
path: root/kud
diff options
context:
space:
mode:
Diffstat (limited to 'kud')
-rw-r--r--kud/build/Dockerfile8
-rw-r--r--kud/deployment_infra/emco/.gitignore2
-rw-r--r--kud/deployment_infra/emco/Makefile48
-rw-r--r--kud/deployment_infra/emco/composite-app.yaml110
-rw-r--r--kud/deployment_infra/emco/examples/README.md59
-rw-r--r--kud/deployment_infra/emco/examples/prerequisites.yaml113
-rw-r--r--kud/deployment_infra/emco/examples/values-resources.yaml.example19
-rw-r--r--kud/deployment_infra/emco/examples/values.yaml.example24
-rw-r--r--kud/deployment_infra/helm/.gitignore1
-rw-r--r--kud/deployment_infra/helm/Makefile51
-rw-r--r--kud/deployment_infra/helm/cpu-manager/.helmignore23
-rw-r--r--kud/deployment_infra/helm/cpu-manager/Chart.yaml25
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/_helpers.tpl63
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/clusterrole.yaml59
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/clusterrolebinding.yaml91
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/daemonset.yaml162
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/serviceaccount.yaml12
-rw-r--r--kud/deployment_infra/helm/cpu-manager/templates/webhook.yaml156
-rw-r--r--kud/deployment_infra/helm/cpu-manager/values.yaml63
-rw-r--r--kud/deployment_infra/helm/multus-cni/.helmignore23
-rw-r--r--kud/deployment_infra/helm/multus-cni/Chart.yaml26
-rw-r--r--kud/deployment_infra/helm/multus-cni/crds/net-attach-def.yaml45
-rw-r--r--kud/deployment_infra/helm/multus-cni/templates/_helpers.tpl62
-rw-r--r--kud/deployment_infra/helm/multus-cni/templates/clusterrole.yaml31
-rw-r--r--kud/deployment_infra/helm/multus-cni/templates/clusterrolebinding.yaml16
-rw-r--r--kud/deployment_infra/helm/multus-cni/templates/cni-conf.yaml9
-rw-r--r--kud/deployment_infra/helm/multus-cni/templates/daemonset.yaml83
-rw-r--r--kud/deployment_infra/helm/multus-cni/templates/serviceaccount.yaml12
-rw-r--r--kud/deployment_infra/helm/multus-cni/values.yaml126
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/.helmignore23
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/Chart.yaml29
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/templates/_helpers.tpl63
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/templates/clusterrole.yaml21
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/templates/clusterrolebinding.yaml16
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/templates/master.yaml86
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/templates/nfd-worker-conf.yaml9
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/templates/service.yaml16
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/templates/serviceaccount.yaml12
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/templates/worker.yaml119
-rw-r--r--kud/deployment_infra/helm/node-feature-discovery/values.yaml225
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/.helmignore23
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/Chart.yaml24
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/crds/network.yaml117
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/crds/networkchaining.yaml89
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/crds/providernetwork.yaml157
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/_helpers.tpl62
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/clusterrole.yaml54
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/clusterrolebinding.yaml16
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/configmap.yaml16
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/daemonset.yaml169
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/deployment.yaml55
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/ovn/daemonset.yaml102
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/ovn/deployment.yaml107
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/ovn/service.yaml37
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/service.yaml16
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/templates/serviceaccount.yaml12
-rw-r--r--kud/deployment_infra/helm/ovn4nfv/values.yaml177
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/.helmignore23
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/Chart.yaml25
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/templates/_helpers.tpl52
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/templates/config.yaml8
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/templates/daemonset.yaml60
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/templates/drivers/daemonset.yaml70
-rw-r--r--kud/deployment_infra/helm/qat-device-plugin/values.yaml49
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/.helmignore23
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/Chart.yaml27
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/crds/sriovibnetwork.yaml73
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetwork.yaml109
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodepolicy.yaml131
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodestate.yaml153
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/crds/sriovoperatorconfig.yaml89
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/_helpers.tpl63
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/clusterrole.yaml54
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/clusterrolebinding.yaml30
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/drivers/daemonset.yaml70
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/operator.yaml89
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/role.yaml107
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/rolebinding.yaml44
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/templates/serviceaccount.yaml17
-rw-r--r--kud/deployment_infra/helm/sriov-network-operator/values.yaml100
-rw-r--r--kud/deployment_infra/helm/sriov-network/.helmignore23
-rw-r--r--kud/deployment_infra/helm/sriov-network/Chart.yaml24
-rw-r--r--kud/deployment_infra/helm/sriov-network/templates/_helpers.tpl34
-rw-r--r--kud/deployment_infra/helm/sriov-network/templates/sriovnetwork.yaml40
-rw-r--r--kud/deployment_infra/helm/sriov-network/templates/sriovnetworknodepolicy.yaml52
-rw-r--r--kud/deployment_infra/helm/sriov-network/values.yaml144
-rw-r--r--kud/deployment_infra/installers/Dockerfile.iavf-driver-installer20
-rw-r--r--kud/deployment_infra/installers/Dockerfile.qat-driver-installer21
-rw-r--r--kud/deployment_infra/installers/Makefile10
-rw-r--r--kud/deployment_infra/installers/_common.sh41
-rw-r--r--kud/deployment_infra/installers/_qat-driver-installer.sh514
-rwxr-xr-xkud/deployment_infra/installers/entrypoint-iavf-driver-installer.sh134
-rwxr-xr-xkud/deployment_infra/installers/entrypoint-qat-driver-installer.sh148
-rw-r--r--kud/deployment_infra/playbooks/configure-emco-reset.yml50
-rw-r--r--kud/deployment_infra/playbooks/configure-emco.yml161
-rw-r--r--kud/deployment_infra/playbooks/configure-kata-webhook-reset.yml30
-rw-r--r--kud/deployment_infra/playbooks/configure-kata-webhook.yml69
-rw-r--r--kud/deployment_infra/playbooks/configure-kata.yml29
-rw-r--r--kud/deployment_infra/playbooks/emco-monitor-openness-21.03.patch13
-rw-r--r--kud/deployment_infra/playbooks/emcoconfig.yaml.j221
-rw-r--r--kud/deployment_infra/playbooks/emcoctl-openness-21.03.patch13
-rwxr-xr-xkud/deployment_infra/playbooks/install_iavf_drivers.sh2
-rw-r--r--kud/deployment_infra/playbooks/kud-vars.yml48
-rw-r--r--kud/deployment_infra/playbooks/sriov_hardware_check.sh10
-rw-r--r--kud/deployment_infra/profiles/cpu-manager/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/cpu-manager/override_values.yaml0
-rw-r--r--kud/deployment_infra/profiles/multus-cni/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/multus-cni/override_values.yaml0
-rw-r--r--kud/deployment_infra/profiles/node-feature-discovery/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/node-feature-discovery/override_values.yaml0
-rw-r--r--kud/deployment_infra/profiles/ovn4nfv/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/ovn4nfv/override_values.yaml0
-rw-r--r--kud/deployment_infra/profiles/qat-device-plugin/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/qat-device-plugin/override_values.yaml0
-rw-r--r--kud/deployment_infra/profiles/sriov-network-operator/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/sriov-network-operator/override_values.yaml0
-rw-r--r--kud/deployment_infra/profiles/sriov-network/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/sriov-network/override_values.yaml0
-rw-r--r--kud/hosting_providers/containerized/README.md23
-rw-r--r--kud/hosting_providers/containerized/addons/README.md.tmpl45
-rw-r--r--kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl19
-rw-r--r--kud/hosting_providers/containerized/addons/values.yaml.tmpl24
-rwxr-xr-xkud/hosting_providers/containerized/installer.sh149
-rw-r--r--kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml11
-rw-r--r--kud/hosting_providers/vagrant/README.md14
-rwxr-xr-xkud/hosting_providers/vagrant/installer.sh95
-rw-r--r--kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml11
-rwxr-xr-xkud/hosting_providers/vagrant/setup.sh3
-rw-r--r--kud/tests/_common.sh4
-rwxr-xr-xkud/tests/_functions.sh5
-rwxr-xr-xkud/tests/emco.sh122
-rw-r--r--kud/tests/kata-clh.yml26
-rw-r--r--kud/tests/kata-qemu.yml26
-rwxr-xr-xkud/tests/kata.sh47
-rwxr-xr-xkud/tests/multus.sh4
-rwxr-xr-xkud/tests/ovn4nfv.sh4
-rwxr-xr-xkud/tests/qat.sh2
-rwxr-xr-xkud/tests/sriov-network.sh102
-rwxr-xr-xkud/tests/sriov.sh2
-rwxr-xr-xkud/tests/topology-manager.sh3
140 files changed, 7115 insertions, 145 deletions
diff --git a/kud/build/Dockerfile b/kud/build/Dockerfile
index 38c63295..72c46f73 100644
--- a/kud/build/Dockerfile
+++ b/kud/build/Dockerfile
@@ -1,8 +1,16 @@
FROM ubuntu:18.04 as base
ARG KUD_ENABLE_TESTS=false
ARG KUD_PLUGIN_ENABLED=false
+ARG CONTAINER_RUNTIME=docker
+ARG KUD_DEBUG=""
+ARG ENABLE_KATA_WEBHOOK=false
+ARG KATA_WEBHOOK_RUNTIMECLASS=kata-clh
ENV KUD_ENABLE_TESTS=$KUD_ENABLE_TESTS
ENV KUD_PLUGIN_ENABLED=$KUD_PLUGIN_ENABLED
+ENV CONTAINER_RUNTIME=$CONTAINER_RUNTIME
+ENV KUD_DEBUG=$KUD_DEBUG
+ENV ENABLE_KATA_WEBHOOK=$ENABLE_KATA_WEBHOOK
+ENV KATA_WEBHOOK_RUNTIMECLASS=$KATA_WEBHOOK_RUNTIMECLASS
ADD . /usr/src/multicloud-k8s
USER root
SHELL ["/bin/bash", "-c"]
diff --git a/kud/deployment_infra/emco/.gitignore b/kud/deployment_infra/emco/.gitignore
new file mode 100644
index 00000000..c971bc78
--- /dev/null
+++ b/kud/deployment_infra/emco/.gitignore
@@ -0,0 +1,2 @@
+output
+examples/values.yaml
diff --git a/kud/deployment_infra/emco/Makefile b/kud/deployment_infra/emco/Makefile
new file mode 100644
index 00000000..de41bfc5
--- /dev/null
+++ b/kud/deployment_infra/emco/Makefile
@@ -0,0 +1,48 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
+OUTPUT_DIR := $(ROOT_DIR)/output
+PACKAGE_DIR := $(OUTPUT_DIR)/packages
+
+ADDONS := multus-cni ovn4nfv node-feature-discovery sriov-network-operator sriov-network qat-device-plugin cpu-manager
+
+.PHONY: $(ADDONS)
+
+all: $(ADDONS)
+
+$(ADDONS):
+ @echo "\n[$@]"
+ @make chart-$@
+ @make profile-$@
+
+dep-%:
+ @if grep "^dependencies:" ../helm/$*/Chart.yaml; then helm dep up ../helm/$*; fi
+
+lint-%: dep-%
+ @helm lint ../helm/$*
+
+chart-%: lint-%
+ @mkdir -p $(PACKAGE_DIR)
+ @tar -czf $(PACKAGE_DIR)/$*.tar.gz -C ../helm $*
+
+profile-%:
+ @mkdir -p $(PACKAGE_DIR)
+ @tar -czf $(PACKAGE_DIR)/$*_profile.tar.gz -C ../profiles/$* .
+
+clean:
+ @rm -rf $(OUTPUT_DIR)
+
+%:
+ @:
diff --git a/kud/deployment_infra/emco/composite-app.yaml b/kud/deployment_infra/emco/composite-app.yaml
new file mode 100644
index 00000000..869447ad
--- /dev/null
+++ b/kud/deployment_infra/emco/composite-app.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
+
+---
+#creating composite app entry
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps
+metadata :
+ name: {{ .CompositeApp }}
+ description: "KUD addons"
+spec:
+ version: v1
+
+{{- range $index, $addon := .Apps }}
+---
+#adding app to the composite app
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.CompositeApp }}/v1/apps
+metadata :
+ name: {{ $addon }}
+file:
+ {{ $.PackagesPath }}/{{ $addon }}.tar.gz
+{{- end }}
+
+---
+#creating composite profile entry
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/composite-profiles
+metadata :
+ name: {{ .CompositeProfile }}
+
+{{- range $index, $addon := .Apps }}
+---
+#adding app profiles to the composite profile
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.CompositeApp }}/v1/composite-profiles/{{ $.CompositeProfile }}/profiles
+metadata :
+ name: {{ $addon }}-profile
+spec:
+ app-name: {{ $addon }}
+file:
+ {{ $.PackagesPath }}/{{ $addon }}_profile.tar.gz
+{{- end }}
+
+---
+#create deployment intent group
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups
+metadata :
+ name: {{ .DeploymentIntentGroup }}
+ description: "description"
+spec:
+ profile: {{ .CompositeProfile }}
+ version: r1
+ logical-cloud: {{ .LogicalCloud }}
+ override-values: []
+
+---
+#create intent in deployment intent group
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/intents
+metadata :
+ name: {{ .DeploymentIntent }}
+spec:
+ intent:
+ genericPlacementIntent: {{ .GenericPlacementIntent }}
+
+---
+#create the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/generic-placement-intents
+metadata :
+ name: {{ .GenericPlacementIntent }}
+spec:
+ logical-cloud: {{ .LogicalCloud }}
+
+{{- range $index, $addon := .Apps }}
+---
+#add the app placement intent to the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.CompositeApp }}/v1/deployment-intent-groups/{{ $.DeploymentIntentGroup }}/generic-placement-intents/{{ $.GenericPlacementIntent }}/app-intents
+metadata:
+ name: {{ $addon }}-placement-intent
+spec:
+ app-name: {{ $addon }}
+ intent:
+ allOf:
+ - provider-name: {{ $.ClusterProvider }}
+ cluster-label-name: {{ $.ClusterLabel }}
+{{- end }}
+
+---
+#Approve
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/approve
+
+---
+#Instantiate
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/instantiate
diff --git a/kud/deployment_infra/emco/examples/README.md b/kud/deployment_infra/emco/examples/README.md
new file mode 100644
index 00000000..b91cce10
--- /dev/null
+++ b/kud/deployment_infra/emco/examples/README.md
@@ -0,0 +1,59 @@
+#### SPDX-License-Identifier: Apache-2.0
+#### Copyright (c) 2021 Intel Corporation
+
+# Installing KUD addons with emcoctl
+
+This folder contains KUD addons to deploy with EMCO. The example
+configuration assumes one edge cluster to deploy to. EMCO needs to be
+installed on the cluster before deploying these addons and emcoctl
+needs to be installed and configured for the edge cluster.
+
+1. Multus CNI
+2. OVN4NFV K8s Plugin
+3. Node Feature Discovery
+4. SR-IOV Network Operator
+5. SR-IOV Network
+6. QuickAssist Technology (QAT) Device Plugin
+7. CPU Manager for Kubernetes
+
+## Setup environment to deploy addons
+
+1. Export environment variables
+ - KUBE_PATH: where the kubeconfig for edge cluster is located, and
+ - HOST_IP: IP address of the cluster where EMCO is installed.
+
+#### NOTE: For HOST_IP, assuming here that nodeports are used to access all EMCO services both from outside and between the EMCO services.
+
+2. Customize values.yaml.
+
+ `$ envsubst < values.yaml.example > values.yaml`
+ `$ envsubst < values-resources.yaml.example > values-resources.yaml`
+
+## Create prerequisites to deploy addons
+
+Apply prerequisites.yaml. This creates controllers, one project, one
+cluster, and default logical cloud. This step is required to be done
+only once.
+
+ `$ emcoctl apply -f prerequisites.yaml -v values.yaml`
+
+## Deploying addons
+
+Apply composite-app.yaml. This deploys the addons listed in the `Apps`
+value.
+
+ `$ emcoctl apply -f ../output/composite-app.yaml -v values.yaml`
+ `$ emcoctl apply -f ../output/composite-app.yaml -v values-resources.yaml`
+
+## Cleanup
+
+1. Delete addons.
+
+ `$ emcoctl delete -f ../output/composite-app.yaml -v values-resources.yaml`
+ `$ emcoctl delete -f ../output/composite-app.yaml -v values.yaml`
+
+2. Cleanup prerequisites.
+
+ `$ emcoctl delete -f prerequisites.yaml -v values.yaml`
+
+#### NOTE: Known issue: Deletion of the resources fails sometimes as some resources can't be deleted before others are deleted. This can happen due to timing issue. In that case try deleting again and the deletion should succeed.
diff --git a/kud/deployment_infra/emco/examples/prerequisites.yaml b/kud/deployment_infra/emco/examples/prerequisites.yaml
new file mode 100644
index 00000000..a44546e0
--- /dev/null
+++ b/kud/deployment_infra/emco/examples/prerequisites.yaml
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
+
+---
+#create project
+version: emco/v2
+resourceContext:
+ anchor: projects
+metadata :
+ name: {{ .ProjectName }}
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: rsync
+spec:
+ host: {{ .HostIP }}
+ port: {{ .RsyncPort }}
+
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: gac
+spec:
+ host: {{ .HostIP }}
+ port: {{ .GacPort }}
+ type: "action"
+ priority: 1
+
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: ovnaction
+spec:
+ host: {{ .HostIP }}
+ port: {{ .OvnPort }}
+ type: "action"
+ priority: 1
+
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: dtc
+spec:
+ host: {{ .HostIP }}
+ port: {{ .DtcPort }}
+ type: "action"
+ priority: 1
+
+---
+#creating cluster provider
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers
+metadata :
+ name: {{ .ClusterProvider }}
+
+---
+#creating cluster
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{ .ClusterProvider }}/clusters
+metadata :
+ name: {{ .Cluster1 }}
+file:
+ {{ .KubeConfig }}
+
+---
+#Add label cluster
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{ .ClusterProvider }}/clusters/{{ .Cluster1 }}/labels
+label-name: {{ .ClusterLabel }}
+
+---
+#create default logical cloud with admin permissions
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/logical-clouds
+metadata:
+ name: {{ .LogicalCloud }}
+spec:
+ level: "0"
+
+---
+#add cluster reference to logical cloud
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/logical-clouds/{{ .LogicalCloud }}/cluster-references
+metadata:
+ name: lc-cl-1
+spec:
+ cluster-provider: {{ .ClusterProvider }}
+ cluster-name: {{ .Cluster1 }}
+ loadbalancer-ip: "0.0.0.0"
+
+---
+#instantiate logical cloud
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/logical-clouds/{{ .LogicalCloud }}/instantiate
+
diff --git a/kud/deployment_infra/emco/examples/values-resources.yaml.example b/kud/deployment_infra/emco/examples/values-resources.yaml.example
new file mode 100644
index 00000000..acfd903c
--- /dev/null
+++ b/kud/deployment_infra/emco/examples/values-resources.yaml.example
@@ -0,0 +1,19 @@
+HostIP: $HOST_IP
+KubeConfig: $KUBE_PATH
+PackagesPath: $PWD/../output/packages
+ProjectName: proj1
+RsyncPort: 30441
+GacPort: 30493
+OvnPort: 30473
+DtcPort: 30483
+ClusterProvider: provider1
+Cluster1: cluster1
+ClusterLabel: edge-cluster
+LogicalCloud: default
+CompositeApp: addon-resources
+CompositeProfile: addon-resources-profile
+DeploymentIntentGroup: addon-resources-deployment-intent-group
+DeploymentIntent: addon-resources-deployment-intent
+GenericPlacementIntent: addon-resources-placement-intent
+Apps:
+- sriov-network
diff --git a/kud/deployment_infra/emco/examples/values.yaml.example b/kud/deployment_infra/emco/examples/values.yaml.example
new file mode 100644
index 00000000..37ddacf6
--- /dev/null
+++ b/kud/deployment_infra/emco/examples/values.yaml.example
@@ -0,0 +1,24 @@
+HostIP: $HOST_IP
+KubeConfig: $KUBE_PATH
+PackagesPath: $PWD/../output/packages
+ProjectName: proj1
+RsyncPort: 30441
+GacPort: 30493
+OvnPort: 30473
+DtcPort: 30483
+ClusterProvider: provider1
+Cluster1: cluster1
+ClusterLabel: edge-cluster
+LogicalCloud: default
+CompositeApp: addons
+CompositeProfile: addons-profile
+DeploymentIntentGroup: addons-deployment-intent-group
+DeploymentIntent: addons-deployment-intent
+GenericPlacementIntent: addons-placement-intent
+Apps:
+- multus-cni
+- ovn4nfv
+- node-feature-discovery
+- sriov-network-operator
+- qat-device-plugin
+- cpu-manager
diff --git a/kud/deployment_infra/helm/.gitignore b/kud/deployment_infra/helm/.gitignore
new file mode 100644
index 00000000..1521c8b7
--- /dev/null
+++ b/kud/deployment_infra/helm/.gitignore
@@ -0,0 +1 @@
+dist
diff --git a/kud/deployment_infra/helm/Makefile b/kud/deployment_infra/helm/Makefile
new file mode 100644
index 00000000..0cc09007
--- /dev/null
+++ b/kud/deployment_infra/helm/Makefile
@@ -0,0 +1,51 @@
+# Copyright © 2017 Amdocs, Bell Canada
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
+OUTPUT_DIR := $(ROOT_DIR)/dist
+PACKAGE_DIR := $(OUTPUT_DIR)/packages
+
+EXCLUDES := sdewan_controllers
+HELM_CHARTS := $(filter-out $(EXCLUDES), $(patsubst %/.,%,$(wildcard */.)))
+
+.PHONY: $(HELM_CHARTS)
+
+all: $(HELM_CHARTS)
+
+$(HELM_CHARTS):
+ @echo "\n[$@]"
+ @make package-$@
+
+make-%:
+ @if [ -f $*/Makefile ]; then make -C $*; fi
+
+dep-%: make-%
+ @if grep "^dependencies:" $*/Chart.yaml; then helm dep up $*; fi
+
+lint-%: dep-%
+ @if [ -f $*/Chart.yaml ]; then helm lint $*; fi
+
+package-%: lint-%
+ @mkdir -p $(PACKAGE_DIR)
+ @if [ -f $*/Chart.yaml ]; then helm package -d $(PACKAGE_DIR) $*; fi
+ @helm repo index $(PACKAGE_DIR)
+
+clean:
+ @rm -f */Chart.lock
+ @find . -type f -name '*.tgz' -delete
+ @rm -rf $(PACKAGE_DIR)/*
+ @rm -rf $(OUTPUT_DIR)
+
+%:
+ @:
diff --git a/kud/deployment_infra/helm/cpu-manager/.helmignore b/kud/deployment_infra/helm/cpu-manager/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/cpu-manager/Chart.yaml b/kud/deployment_infra/helm/cpu-manager/Chart.yaml
new file mode 100644
index 00000000..5635f21f
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/Chart.yaml
@@ -0,0 +1,25 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: v1.4.1-no-taint
+description: |
+ CPU Manager for Kubernetes provides basic core affinity for
+ NFV-style workloads.
+name: cpu-manager
+sources:
+ - https://github.com/integratedcloudnative/CPU-Manager-for-Kubernetes
+home: https://github.com/integratedcloudnative/CPU-Manager-for-Kubernetes
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/_helpers.tpl b/kud/deployment_infra/helm/cpu-manager/templates/_helpers.tpl
new file mode 100644
index 00000000..a0f94dc0
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cpu-manager.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cpu-manager.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cpu-manager.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "cpu-manager.labels" -}}
+helm.sh/chart: {{ include "cpu-manager.chart" . }}
+{{ include "cpu-manager.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "cpu-manager.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "cpu-manager.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "cpu-manager.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default ( printf "%s-serviceaccount" (include "cpu-manager.fullname" .) ) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/clusterrole.yaml b/kud/deployment_infra/helm/cpu-manager/templates/clusterrole.yaml
new file mode 100644
index 00000000..003a5b61
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/clusterrole.yaml
@@ -0,0 +1,59 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-custom-resource-definition-controller
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+rules:
+- apiGroups: ["intel.com"]
+ resources: ["*"]
+ verbs: ["*"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions", "customresourcedefinitions.extensions"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-daemonset-controller
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+rules:
+- apiGroups: ["extensions", "apps"]
+ resources: ["daemonsets", "daemonsets.extensions", "daemonsets.apps"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-version-controller
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+rules:
+ - nonResourceURLs: ["*"]
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-webhook-installer
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+rules:
+- apiGroups: ["", "apps", "extensions", "admissionregistration.k8s.io"]
+ resources: ["secrets", "configmaps", "deployments", "services", "mutatingwebhookconfigurations"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-node-lister
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["*"]
+{{- end }}
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/clusterrolebinding.yaml b/kud/deployment_infra/helm/cpu-manager/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..2d08c820
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/clusterrolebinding.yaml
@@ -0,0 +1,91 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-daemonset
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cpu-manager.fullname" . }}-daemonset-controller
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-node
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:node
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-crd
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cpu-manager.fullname" . }}-custom-resource-definition-controller
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-version
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cpu-manager.fullname" . }}-version-controller
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-webhook-installer
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cpu-manager.fullname" . }}-webhook-installer
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-role-binding-node-lister
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "cpu-manager.fullname" . }}-node-lister
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+{{- end }}
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/daemonset.yaml b/kud/deployment_infra/helm/cpu-manager/templates/daemonset.yaml
new file mode 100644
index 00000000..8b545133
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/daemonset.yaml
@@ -0,0 +1,162 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-reconcile-nodereport-ds
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ app: cmk-reconcile-nodereport-ds
+spec:
+ selector:
+ matchLabels:
+ {{- include "cpu-manager.selectorLabels" . | nindent 6 }}
+ app: cmk-reconcile-nodereport-ds
+ template:
+ metadata:
+ labels:
+ {{- include "cpu-manager.selectorLabels" . | nindent 8 }}
+ app: cmk-reconcile-nodereport-ds
+ annotations:
+ {{- toYaml .Values.annotations | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ serviceAccountName: {{ include "cpu-manager.serviceAccountName" . }}
+ initContainers:
+ - args:
+ - "/cmk/cmk.py init --conf-dir=/etc/cmk --num-exclusive-cores=$NUM_EXCLUSIVE_CORES --num-shared-cores=$NUM_SHARED_CORES"
+ command:
+ - "/bin/bash"
+ - "-c"
+ env:
+ - name: CMK_PROC_FS
+ value: '/proc'
+ - name: NUM_EXCLUSIVE_CORES
+ value: {{ .Values.exclusiveNumCores | quote }}
+ - name: NUM_SHARED_CORES
+ value: {{ .Values.sharedNumCores | quote }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-init-pod
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: "/etc/cmk"
+ name: cmk-conf-dir
+ - args:
+ - "/cmk/cmk.py discover --conf-dir=/etc/cmk {{ if .Values.untaintRequired }}--no-taint{{ end }}"
+ command:
+ - "/bin/bash"
+ - "-c"
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-discover-pod
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: "/etc/cmk"
+ name: cmk-conf-dir
+ - args:
+ - "/cmk/cmk.py install --install-dir=/opt/bin"
+ command:
+ - "/bin/bash"
+ - "-c"
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-install-pod
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: "/opt/bin"
+ name: cmk-install-dir
+ containers:
+ - args:
+ - "/cmk/cmk.py isolate --pool=infra /cmk/cmk.py -- reconcile --interval=$CMK_RECONCILE_SLEEP_TIME --publish"
+ command:
+ - "/bin/bash"
+ - "-c"
+ env:
+ - name: CMK_RECONCILE_SLEEP_TIME
+ value: {{ .Values.reconcileSleepTime | quote }}
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-reconcile
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: "/host/proc"
+ name: host-proc
+ readOnly: true
+ - mountPath: "/etc/cmk"
+ name: cmk-conf-dir
+ - args:
+ - "/cmk/cmk.py isolate --pool=infra /cmk/cmk.py -- node-report --interval=$CMK_NODE_REPORT_SLEEP_TIME --publish"
+ command:
+ - "/bin/bash"
+ - "-c"
+ env:
+ - name: CMK_NODE_REPORT_SLEEP_TIME
+ value: {{ .Values.nodeReportSleepTime | quote }}
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-nodereport
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: "/host/proc"
+ name: host-proc
+ readOnly: true
+ - mountPath: "/etc/cmk"
+ name: cmk-conf-dir
+ volumes:
+ - hostPath:
+ path: "/proc"
+ name: host-proc
+ - hostPath:
+ path: {{ .Values.configDir | quote }}
+ name: cmk-conf-dir
+ - hostPath:
+ path: {{ .Values.installDir | quote }}
+ name: cmk-install-dir
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/serviceaccount.yaml b/kud/deployment_infra/helm/cpu-manager/templates/serviceaccount.yaml
new file mode 100644
index 00000000..8f0b98e0
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "cpu-manager.serviceAccountName" . }}
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/cpu-manager/templates/webhook.yaml b/kud/deployment_infra/helm/cpu-manager/templates/webhook.yaml
new file mode 100644
index 00000000..62e9fdbf
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/templates/webhook.yaml
@@ -0,0 +1,156 @@
+{{- if .Values.webhook.enabled -}}
+{{- $altNames := list "cmk-webhook-service" ( printf "cmk-webhook-service.%s" .Release.Namespace ) ( printf "cmk-webhook-service.%s.svc" .Release.Namespace ) -}}
+{{- $cert := genSelfSignedCert ( printf "cmk-webhook-service.%s.svc" .Release.Namespace ) nil $altNames 36500 -}}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-webhook-certs
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+data:
+ cert.pem: {{ $cert.Cert | b64enc }}
+ key.pem: {{ $cert.Key | b64enc }}
+type: Opaque
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "cpu-manager.fullname" . }}-webhook-configmap
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+data:
+ server.yaml: |
+ server:
+ binding-address: "0.0.0.0"
+ port: {{ .Values.webhook.service.port }}
+ cert: "/etc/ssl/cert.pem"
+ key: "/etc/ssl/key.pem"
+ mutations: "/etc/webhook/mutations.yaml"
+ mutations.yaml: |
+ mutations:
+ perPod:
+ metadata:
+ annotations:
+ cmk.intel.com/resources-injected: "true"
+ spec:
+ serviceAccount: {{ include "cpu-manager.serviceAccountName" . }}
+ tolerations:
+ - operator: Exists
+ volumes:
+ - name: cmk-host-proc
+ hostPath:
+ path: "/proc"
+ - name: cmk-config-dir
+ hostPath:
+ path: {{ .Values.configDir | quote }}
+ - name: cmk-install-dir
+ hostPath:
+ path: {{ .Values.installDir | quote }}
+ perContainer:
+ env:
+ - name: CMK_PROC_FS
+ value: "/host/proc"
+ volumeMounts:
+ - name: cmk-host-proc
+ mountPath: /host/proc
+ readOnly: true
+ - name: cmk-config-dir
+ mountPath: /etc/cmk
+ - name: cmk-install-dir
+ mountPath: /opt/bin
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cmk-webhook-service
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ app: cmk-webhook-app
+spec:
+ ports:
+ - port: {{ .Values.webhook.service.port }}
+ targetPort: 443
+ selector:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ app: cmk-webhook-app
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ app: cmk-webhook-app
+ name: {{ include "cpu-manager.fullname" . }}-webhook-deployment
+spec:
+ replicas: {{ .Values.webhook.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "cpu-manager.selectorLabels" . | nindent 6 }}
+ app: cmk-webhook-app
+ template:
+ metadata:
+ labels:
+ {{- include "cpu-manager.selectorLabels" . | nindent 8 }}
+ app: cmk-webhook-app
+ annotations:
+ {{- toYaml .Values.webhook.annotations | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.podSecurityContext | nindent 8 }}
+ tolerations:
+ - operator: "Exists"
+ containers:
+ - args:
+ - "/cmk/cmk.py webhook --conf-file /etc/webhook/server.yaml"
+ command:
+ - "/bin/bash"
+ - "-c"
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ name: cmk-webhook
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ volumeMounts:
+ - mountPath: /etc/webhook
+ name: cmk-webhook-configmap
+ - mountPath: /etc/ssl
+ name: cmk-webhook-certs
+ readOnly: True
+ volumes:
+ - name: cmk-webhook-configmap
+ configMap:
+ name: {{ include "cpu-manager.fullname" . }}-webhook-configmap
+ - name: cmk-webhook-certs
+ secret:
+ secretName: {{ include "cpu-manager.fullname" . }}-webhook-certs
+---
+apiVersion: admissionregistration.k8s.io/v1beta1
+kind: MutatingWebhookConfiguration
+metadata:
+ labels:
+ {{- include "cpu-manager.labels" . | nindent 4 }}
+ app: cmk-webhook-app
+ name: {{ include "cpu-manager.fullname" . }}-webhook-config
+webhooks:
+- clientConfig:
+ caBundle: {{ $cert.Cert | b64enc }}
+ service:
+ name: cmk-webhook-service
+ namespace: {{ $.Release.Namespace }}
+ path: /mutate
+ failurePolicy: Ignore
+ name: cmk.intel.com
+ rules:
+ - apiGroups:
+ - ""
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ resources:
+ - pods
+{{- end }}
diff --git a/kud/deployment_infra/helm/cpu-manager/values.yaml b/kud/deployment_infra/helm/cpu-manager/values.yaml
new file mode 100644
index 00000000..29783441
--- /dev/null
+++ b/kud/deployment_infra/helm/cpu-manager/values.yaml
@@ -0,0 +1,63 @@
+# sharedNumCores is the number of CPU cores to be assigned to the "shared" pool on each of the nodes
+sharedNumCores: 1
+# exclusiveNumCores is the number of CPU cores to be assigned to the "exclusive" pool on each of the nodes
+exclusiveNumCores: 2
+# untaintRequired removes the cmk=true:NoSchedule taint from each of the nodes
+untaintRequired: true
+# configDir is the CMK config dir in the host file system
+configDir: "/etc/cmk"
+# installDir is the CMK installation dir in the host file system
+installDir: "/opt/bin"
+# reconcileSleepTime is the sleep interval in seconds between consecutive CMK reconcile runs
+reconcileSleepTime: 60
+# nodeReportSleepTime is the sleep interval in seconds between consecutive CMK node report runs
+nodeReportSleepTime: 60
+
+image:
+ repository: integratedcloudnative/cmk
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+imagePullSecrets: []
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+nameOverride: ""
+fullnameOverride: ""
+
+podSecurityContext: {}
+ # fsGroup: 2000
+
+securityContext: {}
+
+resources: {}
+
+annotations: {}
+
+affinity: {}
+
+tolerations: {}
+
+webhook:
+ # webhook.enabled runs the CMK mutating admission webhook server
+ enabled: true
+
+ service:
+ port: 443
+
+ replicaCount: 1
+
+ annotations: {}
+
+## RBAC parameteres
+## https://kubernetes.io/docs/reference/access-authn-authz/rbac/
+##
+rbac:
+ create: true
diff --git a/kud/deployment_infra/helm/multus-cni/.helmignore b/kud/deployment_infra/helm/multus-cni/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/multus-cni/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/multus-cni/Chart.yaml b/kud/deployment_infra/helm/multus-cni/Chart.yaml
new file mode 100644
index 00000000..84d2255c
--- /dev/null
+++ b/kud/deployment_infra/helm/multus-cni/Chart.yaml
@@ -0,0 +1,26 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: v3.7
+description: |
+ Multus CNI is a container network interface (CNI) plugin for
+ Kubernetes that enables attaching multiple network interfaces to
+ pods.
+home: https://github.com/intel/multus-cni
+name: multus-cni
+sources:
+ - https://github.com/intel/multus-cni
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/multus-cni/crds/net-attach-def.yaml b/kud/deployment_infra/helm/multus-cni/crds/net-attach-def.yaml
new file mode 100644
index 00000000..85347bd3
--- /dev/null
+++ b/kud/deployment_infra/helm/multus-cni/crds/net-attach-def.yaml
@@ -0,0 +1,45 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ name: network-attachment-definitions.k8s.cni.cncf.io
+spec:
+ group: k8s.cni.cncf.io
+ scope: Namespaced
+ names:
+ plural: network-attachment-definitions
+ singular: network-attachment-definition
+ kind: NetworkAttachmentDefinition
+ shortNames:
+ - net-attach-def
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ schema:
+ openAPIV3Schema:
+ description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing
+ Working Group to express the intent for attaching pods to one or more logical or physical
+ networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec'
+ type: object
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this represen
+ tation of an object. Servers should convert recognized schemas to the
+ latest internal value, and may reject unrecognized values. More info:
+ https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment'
+ type: object
+ properties:
+ config:
+ description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration'
+ type: string
diff --git a/kud/deployment_infra/helm/multus-cni/templates/_helpers.tpl b/kud/deployment_infra/helm/multus-cni/templates/_helpers.tpl
new file mode 100644
index 00000000..71aee739
--- /dev/null
+++ b/kud/deployment_infra/helm/multus-cni/templates/_helpers.tpl
@@ -0,0 +1,62 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "multus.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "multus.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "multus.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "multus.labels" -}}
+helm.sh/chart: {{ include "multus.chart" . }}
+{{ include "multus.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "multus.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "multus.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "multus.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "multus.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/multus-cni/templates/clusterrole.yaml b/kud/deployment_infra/helm/multus-cni/templates/clusterrole.yaml
new file mode 100644
index 00000000..1a3a87e0
--- /dev/null
+++ b/kud/deployment_infra/helm/multus-cni/templates/clusterrole.yaml
@@ -0,0 +1,31 @@
+{{- if .Values.rbac.create }}
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "multus.fullname" . }}
+ labels:
+ {{- include "multus.labels" . | nindent 4 }}
+rules:
+ - apiGroups: ["k8s.cni.cncf.io"]
+ resources:
+ - '*'
+ verbs:
+ - '*'
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ - pods/status
+ verbs:
+ - get
+ - update
+ - apiGroups:
+ - ""
+ - events.k8s.io
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+ - update
+{{- end }} \ No newline at end of file
diff --git a/kud/deployment_infra/helm/multus-cni/templates/clusterrolebinding.yaml b/kud/deployment_infra/helm/multus-cni/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..4e626480
--- /dev/null
+++ b/kud/deployment_infra/helm/multus-cni/templates/clusterrolebinding.yaml
@@ -0,0 +1,16 @@
+{{- if .Values.rbac.create }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "multus.fullname" . }}
+ labels:
+ {{- include "multus.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "multus.fullname" . }}
+subjects:
+- kind: ServiceAccount
+ name: {{ include "multus.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+{{- end }} \ No newline at end of file
diff --git a/kud/deployment_infra/helm/multus-cni/templates/cni-conf.yaml b/kud/deployment_infra/helm/multus-cni/templates/cni-conf.yaml
new file mode 100644
index 00000000..b1212139
--- /dev/null
+++ b/kud/deployment_infra/helm/multus-cni/templates/cni-conf.yaml
@@ -0,0 +1,9 @@
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ .Values.config.name }}
+ labels:
+ {{- include "multus.labels" . | nindent 4 }}
+ tier: node
+data:
+ cni-conf.json: | {{ .Values.config.data | toPrettyJson | nindent 4}}
diff --git a/kud/deployment_infra/helm/multus-cni/templates/daemonset.yaml b/kud/deployment_infra/helm/multus-cni/templates/daemonset.yaml
new file mode 100644
index 00000000..551d6db6
--- /dev/null
+++ b/kud/deployment_infra/helm/multus-cni/templates/daemonset.yaml
@@ -0,0 +1,83 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "multus.fullname" . }}-ds
+ labels:
+ {{- include "multus.labels" . | nindent 4 }}
+ tier: node
+spec:
+ selector:
+ matchLabels:
+ {{- include "multus.selectorLabels" . | nindent 6 }}
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ {{- include "multus.selectorLabels" . | nindent 8 }}
+ tier: node
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "multus.serviceAccountName" . }}
+ containers:
+ - name: kube-multus
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ command:
+ {{- if .Values.config.enabled }}
+ - /bin/bash
+ - -cex
+ - |
+ #!/bin/bash
+ sed "s|__KUBERNETES_NODE_NAME__|${KUBERNETES_NODE_NAME}|g" /tmp/multus-conf/{{ .Values.config.path }}.template > /tmp/{{ .Values.config.path }}
+ /entrypoint.sh --multus-conf-file=/tmp/{{ .Values.config.path }}
+ {{- else }}
+ - /entrypoint.sh
+ - "--multus-conf-file=auto"
+ - "--cni-version=0.3.1"
+ {{- end }}
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/bash", "-c", "rm /host/etc/cni/net.d/*-multus.conf"]
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ volumeMounts:
+ - name: cni
+ mountPath: /host/etc/cni/net.d
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: multus-cfg
+ mountPath: /tmp/multus-conf
+ volumes:
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: multus-cfg
+ configMap:
+ name: {{ .Values.config.name }}
+ items:
+ - key: cni-conf.json
+ path: {{ .Values.config.path }}.template
diff --git a/kud/deployment_infra/helm/multus-cni/templates/serviceaccount.yaml b/kud/deployment_infra/helm/multus-cni/templates/serviceaccount.yaml
new file mode 100644
index 00000000..144a098a
--- /dev/null
+++ b/kud/deployment_infra/helm/multus-cni/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "multus.serviceAccountName" . }}
+ labels:
+ {{- include "multus.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/multus-cni/values.yaml b/kud/deployment_infra/helm/multus-cni/values.yaml
new file mode 100644
index 00000000..e08f665e
--- /dev/null
+++ b/kud/deployment_infra/helm/multus-cni/values.yaml
@@ -0,0 +1,126 @@
+image:
+ repository: nfvpe/multus
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+nameOverride: ""
+fullnameOverride: ""
+
+securityContext:
+ privileged: true
+
+resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+
+nodeSelector:
+ kubernetes.io/arch: amd64
+
+tolerations:
+- operator: Exists
+ effect: NoSchedule
+
+# NOTE: If you'd prefer to manually apply a configuration file, you
+# may create one here. Additionally -- you should ensure that the
+# name "{{ .Values.config.path }}" is the alphabetically first name in
+# the /etc/cni/net.d/ directory on each node, otherwise, it will not
+# be used by the Kubelet.
+#
+# __KUBERNETES_NODE_NAME__ below is replaced by spec.nodeName at
+# startup.
+config:
+ enabled: true
+ name: multus-cni-config
+ path: 00-multus.conf
+ # data:
+ # {
+ # "name": "multus-cni-network",
+ # "type": "multus",
+ # "capabilities": {
+ # "portMappings": true
+ # },
+ # "delegates": [
+ # {
+ # "cniVersion": "0.3.1",
+ # "name": "default-cni-network",
+ # "plugins": [
+ # {
+ # "name": "k8s-pod-network",
+ # "cniVersion": "0.3.1",
+ # "type": "calico",
+ # "log_level": "info",
+ # "datastore_type": "kubernetes",
+ # "nodename": "__KUBERNETES_NODE_NAME__",
+ # "mtu": 1440,
+ # "ipam": {
+ # "type": "calico-ipam"
+ # },
+ # "policy": {
+ # "type": "k8s"
+ # },
+ # "kubernetes": {
+ # "kubeconfig": "/etc/cni/net.d/calico-kubeconfig"
+ # }
+ # },
+ # {
+ # "type": "portmap",
+ # "snat": true,
+ # "capabilities": {"portMappings": true}
+ # }
+ # ]
+ # }
+ # ],
+ # "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig"
+ # }
+ data:
+ {
+ "cniVersion": "0.3.1",
+ "name": "multus-cni-network",
+ "type": "multus",
+ "capabilities": {
+ "portMappings": true
+ },
+ "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig",
+ "delegates": [
+ {
+ "name": "cbr0",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "flannel",
+ "delegate": {
+ "isDefaultGateway": true,
+ "hairpinMode": true
+ }
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ }
+ }
+ ]
+ }
+ ]
+ }
+
+## RBAC parameteres
+## https://kubernetes.io/docs/reference/access-authn-authz/rbac/
+##
+rbac:
+ create: true
+ serviceAccountName:
diff --git a/kud/deployment_infra/helm/node-feature-discovery/.helmignore b/kud/deployment_infra/helm/node-feature-discovery/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/node-feature-discovery/Chart.yaml b/kud/deployment_infra/helm/node-feature-discovery/Chart.yaml
new file mode 100644
index 00000000..387794f4
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/Chart.yaml
@@ -0,0 +1,29 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: v0.7.0
+description: |
+ Detects hardware features available on each node in a Kubernetes cluster, and advertises
+ those features using node labels.
+name: node-feature-discovery
+sources:
+ - https://github.com/kubernetes-sigs/node-feature-discovery
+home: https://github.com/kubernetes-sigs/node-feature-discovery
+keywords:
+ - feature-discovery
+ - feature-detection
+ - node-labels
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/node-feature-discovery/templates/_helpers.tpl b/kud/deployment_infra/helm/node-feature-discovery/templates/_helpers.tpl
new file mode 100644
index 00000000..73784a54
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "node-feature-discovery.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "node-feature-discovery.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "node-feature-discovery.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "node-feature-discovery.labels" -}}
+helm.sh/chart: {{ include "node-feature-discovery.chart" . }}
+{{ include "node-feature-discovery.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "node-feature-discovery.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "node-feature-discovery.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "node-feature-discovery.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "node-feature-discovery.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/node-feature-discovery/templates/clusterrole.yaml b/kud/deployment_infra/helm/node-feature-discovery/templates/clusterrole.yaml
new file mode 100644
index 00000000..a4da2303
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/templates/clusterrole.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "node-feature-discovery.fullname" . }}
+ labels:
+ {{- include "node-feature-discovery.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ # when using command line flag --resource-labels to create extended resources
+ # you will need to uncomment "- nodes/status"
+ # - nodes/status
+ verbs:
+ - get
+ - patch
+ - update
+ - list
+{{- end }}
diff --git a/kud/deployment_infra/helm/node-feature-discovery/templates/clusterrolebinding.yaml b/kud/deployment_infra/helm/node-feature-discovery/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..4766d9a1
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/templates/clusterrolebinding.yaml
@@ -0,0 +1,16 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "node-feature-discovery.fullname" . }}
+ labels:
+ {{- include "node-feature-discovery.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "node-feature-discovery.fullname" . }}
+subjects:
+- kind: ServiceAccount
+ name: {{ include "node-feature-discovery.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+{{- end }}
diff --git a/kud/deployment_infra/helm/node-feature-discovery/templates/master.yaml b/kud/deployment_infra/helm/node-feature-discovery/templates/master.yaml
new file mode 100644
index 00000000..7ea68ff9
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/templates/master.yaml
@@ -0,0 +1,86 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "node-feature-discovery.fullname" . }}-master
+ labels:
+ {{- include "node-feature-discovery.labels" . | nindent 4 }}
+ role: master
+spec:
+ replicas: {{ .Values.master.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "node-feature-discovery.selectorLabels" . | nindent 6 }}
+ role: master
+ template:
+ metadata:
+ labels:
+ {{- include "node-feature-discovery.selectorLabels" . | nindent 8 }}
+ role: master
+ annotations:
+ {{- toYaml .Values.master.annotations | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "node-feature-discovery.serviceAccountName" . }}
+ securityContext:
+ {{- toYaml .Values.master.podSecurityContext | nindent 8 }}
+ containers:
+ - name: master
+ securityContext:
+ {{- toYaml .Values.master.securityContext | nindent 12 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - containerPort: 8080
+ name: grpc
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ command:
+ - "nfd-master"
+ resources:
+ {{- toYaml .Values.master.resources | nindent 12 }}
+ args:
+ {{- if .Values.master.instance | empty | not }}
+ - "--instance={{ .Values.master.instance }}"
+ {{- end }}
+## Enable TLS authentication
+## The example below assumes having the root certificate named ca.crt stored in
+## a ConfigMap named nfd-ca-cert, and, the TLS authentication credentials stored
+## in a TLS Secret named nfd-master-cert.
+## Additional hardening can be enabled by specifying --verify-node-name in
+## args, in which case every nfd-worker requires a individual node-specific
+## TLS certificate.
+# - "--ca-file=/etc/kubernetes/node-feature-discovery/trust/ca.crt"
+# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key"
+# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt"
+# volumeMounts:
+# - name: nfd-ca-cert
+# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
+# readOnly: true
+# - name: nfd-master-cert
+# mountPath: "/etc/kubernetes/node-feature-discovery/certs"
+# readOnly: true
+# volumes:
+# - name: nfd-ca-cert
+# configMap:
+# name: nfd-ca-cert
+# - name: nfd-master-cert
+# secret:
+# secretName: nfd-master-cert
+ {{- with .Values.master.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.master.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.master.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/node-feature-discovery/templates/nfd-worker-conf.yaml b/kud/deployment_infra/helm/node-feature-discovery/templates/nfd-worker-conf.yaml
new file mode 100644
index 00000000..56763fe1
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/templates/nfd-worker-conf.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.worker.configmapName }}
+ labels:
+ {{- include "node-feature-discovery.labels" . | nindent 4 }}
+data:
+ nfd-worker.conf: |
+ {{- .Values.worker.config | nindent 4 }}
diff --git a/kud/deployment_infra/helm/node-feature-discovery/templates/service.yaml b/kud/deployment_infra/helm/node-feature-discovery/templates/service.yaml
new file mode 100644
index 00000000..65483625
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/templates/service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: nfd-master
+ labels:
+ {{- include "node-feature-discovery.labels" . | nindent 4 }}
+ role: master
+spec:
+ type: {{ .Values.master.service.type }}
+ ports:
+ - port: {{ .Values.master.service.port }}
+ targetPort: grpc
+ protocol: TCP
+ name: grpc
+ selector:
+ {{- include "node-feature-discovery.selectorLabels" . | nindent 4 }}
diff --git a/kud/deployment_infra/helm/node-feature-discovery/templates/serviceaccount.yaml b/kud/deployment_infra/helm/node-feature-discovery/templates/serviceaccount.yaml
new file mode 100644
index 00000000..e4b09bad
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "node-feature-discovery.serviceAccountName" . }}
+ labels:
+ {{- include "node-feature-discovery.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/node-feature-discovery/templates/worker.yaml b/kud/deployment_infra/helm/node-feature-discovery/templates/worker.yaml
new file mode 100644
index 00000000..998a0686
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/templates/worker.yaml
@@ -0,0 +1,119 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "node-feature-discovery.fullname" . }}-worker
+ labels:
+ {{- include "node-feature-discovery.labels" . | nindent 4 }}
+ role: worker
+spec:
+ selector:
+ matchLabels:
+ {{- include "node-feature-discovery.selectorLabels" . | nindent 6 }}
+ role: worker
+ template:
+ metadata:
+ labels:
+ {{- include "node-feature-discovery.selectorLabels" . | nindent 8 }}
+ role: worker
+ annotations:
+ {{- toYaml .Values.worker.annotations | nindent 8 }}
+ spec:
+ dnsPolicy: ClusterFirstWithHostNet
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.worker.podSecurityContext | nindent 8 }}
+ containers:
+ - name: worker
+ securityContext:
+ {{- toYaml .Values.worker.securityContext | nindent 12 }}
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ resources:
+ {{- toYaml .Values.worker.resources | nindent 12 }}
+ command:
+ - "nfd-worker"
+ args:
+ - "--sleep-interval={{ .Values.worker.sleepTime }}s"
+ - "--server=nfd-master:{{ .Values.master.service.port }}"
+## Enable TLS authentication (1/3)
+## The example below assumes having the root certificate named ca.crt stored in
+## a ConfigMap named nfd-ca-cert, and, the TLS authentication credentials stored
+## in a TLS Secret named nfd-worker-cert
+# - "--ca-file=/etc/kubernetes/node-feature-discovery/trust/ca.crt"
+# - "--key-file=/etc/kubernetes/node-feature-discovery/certs/tls.key"
+# - "--cert-file=/etc/kubernetes/node-feature-discovery/certs/tls.crt"
+ volumeMounts:
+ - name: host-boot
+ mountPath: "/host-boot"
+ readOnly: true
+ - name: host-os-release
+ mountPath: "/host-etc/os-release"
+ readOnly: true
+ - name: host-sys
+ mountPath: "/host-sys"
+ readOnly: true
+ - name: source-d
+ mountPath: "/etc/kubernetes/node-feature-discovery/source.d/"
+ readOnly: true
+ - name: features-d
+ mountPath: "/etc/kubernetes/node-feature-discovery/features.d/"
+ readOnly: true
+ - name: nfd-worker-conf
+ mountPath: "/etc/kubernetes/node-feature-discovery"
+ readOnly: true
+## Enable TLS authentication (2/3)
+# - name: nfd-ca-cert
+# mountPath: "/etc/kubernetes/node-feature-discovery/trust"
+# readOnly: true
+# - name: nfd-worker-cert
+# mountPath: "/etc/kubernetes/node-feature-discovery/certs"
+# readOnly: true
+ volumes:
+ - name: host-boot
+ hostPath:
+ path: "/boot"
+ - name: host-os-release
+ hostPath:
+ path: "/etc/os-release"
+ - name: host-sys
+ hostPath:
+ path: "/sys"
+ - name: source-d
+ hostPath:
+ path: "/etc/kubernetes/node-feature-discovery/source.d/"
+ - name: features-d
+ hostPath:
+ path: "/etc/kubernetes/node-feature-discovery/features.d/"
+ - name: nfd-worker-conf
+ configMap:
+ name: {{ .Values.worker.configmapName }}
+ items:
+ - key: nfd-worker.conf
+ path: nfd-worker.conf
+## Enable TLS authentication (3/3)
+# - name: nfd-ca-cert
+# configMap:
+# name: nfd-ca-cert
+# - name: nfd-worker-cert
+# secret:
+# secretName: nfd-worker-cert
+ {{- with .Values.worker.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.worker.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.worker.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/node-feature-discovery/values.yaml b/kud/deployment_infra/helm/node-feature-discovery/values.yaml
new file mode 100644
index 00000000..3b9091b1
--- /dev/null
+++ b/kud/deployment_infra/helm/node-feature-discovery/values.yaml
@@ -0,0 +1,225 @@
+image:
+ repository: k8s.gcr.io/nfd/node-feature-discovery
+ # This should be set to 'IfNotPresent' for released version
+ pullPolicy: IfNotPresent
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+ # tag
+imagePullSecrets: []
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+nameOverride: ""
+fullnameOverride: ""
+
+master:
+ instance:
+ replicaCount: 1
+
+ podSecurityContext: {}
+ # fsGroup: 2000
+
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop: [ "ALL" ]
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ # runAsUser: 1000
+
+ service:
+ type: ClusterIP
+ port: 8080
+
+ resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ nodeSelector: {}
+
+ tolerations:
+ - key: "node-role.kubernetes.io/master"
+ operator: "Equal"
+ value: ""
+ effect: "NoSchedule"
+
+ annotations: {}
+
+ affinity:
+ nodeAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 1
+ preference:
+ matchExpressions:
+ - key: "node-role.kubernetes.io/master"
+ operator: In
+ values: [""]
+
+worker:
+ configmapName: nfd-worker-conf
+ config: |
+ sources:
+ custom:
+ - name: "iavf"
+ matchOn:
+ - pciId:
+ class: ["0200"]
+ vendor: ["8086"]
+ device: ["1574", "1580", "1583", "1584", "1587", "1588", "37ce", "37cf", "37d0", "37d1", "37d2", "37d3"]
+ - name: "qat"
+ matchOn:
+ - pciId:
+ class: ["0b40"]
+ vendor: ["8086"]
+ device: ["0435", "37c8", "6f54", "19e2"]
+ pci:
+ deviceClassWhitelist:
+ - "03"
+ - "12"
+ - "0200"
+ - "0b40"
+ deviceLabelFields:
+ - "class"
+ - "vendor"
+ - "device"
+ #sources:
+ # cpu:
+ # cpuid:
+ ## NOTE: whitelist has priority over blacklist
+ # attributeBlacklist:
+ # - "BMI1"
+ # - "BMI2"
+ # - "CLMUL"
+ # - "CMOV"
+ # - "CX16"
+ # - "ERMS"
+ # - "F16C"
+ # - "HTT"
+ # - "LZCNT"
+ # - "MMX"
+ # - "MMXEXT"
+ # - "NX"
+ # - "POPCNT"
+ # - "RDRAND"
+ # - "RDSEED"
+ # - "RDTSCP"
+ # - "SGX"
+ # - "SSE"
+ # - "SSE2"
+ # - "SSE3"
+ # - "SSE4.1"
+ # - "SSE4.2"
+ # - "SSSE3"
+ # attributeWhitelist:
+ # kernel:
+ # kconfigFile: "/path/to/kconfig"
+ # configOpts:
+ # - "NO_HZ"
+ # - "X86"
+ # - "DMI"
+ # pci:
+ # deviceClassWhitelist:
+ # - "0200"
+ # - "03"
+ # - "12"
+ # deviceLabelFields:
+ # - "class"
+ # - "vendor"
+ # - "device"
+ # - "subsystem_vendor"
+ # - "subsystem_device"
+ # usb:
+ # deviceClassWhitelist:
+ # - "0e"
+ # - "ef"
+ # - "fe"
+ # - "ff"
+ # deviceLabelFields:
+ # - "class"
+ # - "vendor"
+ # - "device"
+ # custom:
+ # - name: "my.kernel.feature"
+ # matchOn:
+ # - loadedKMod: ["example_kmod1", "example_kmod2"]
+ # - name: "my.pci.feature"
+ # matchOn:
+ # - pciId:
+ # class: ["0200"]
+ # vendor: ["15b3"]
+ # device: ["1014", "1017"]
+ # - pciId :
+ # vendor: ["8086"]
+ # device: ["1000", "1100"]
+ # - name: "my.usb.feature"
+ # matchOn:
+ # - usbId:
+ # class: ["ff"]
+ # vendor: ["03e7"]
+ # device: ["2485"]
+ # - usbId:
+ # class: ["fe"]
+ # vendor: ["1a6e"]
+ # device: ["089a"]
+ # - name: "my.combined.feature"
+ # matchOn:
+ # - pciId:
+ # vendor: ["15b3"]
+ # device: ["1014", "1017"]
+ # loadedKMod : ["vendor_kmod1", "vendor_kmod2"]
+
+ podSecurityContext: {}
+ # fsGroup: 2000
+
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop: [ "ALL" ]
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ # runAsUser: 1000
+
+ resources: {}
+ # We usually recommend not to specify default resources and to leave this as a conscious
+ # choice for the user. This also increases chances charts run on environments with little
+ # resources, such as Minikube. If you do want to specify resources, uncomment the following
+ # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ nodeSelector: {}
+
+ tolerations: {}
+
+ annotations: {}
+
+ # sleepTime is the sleep interval in seconds between consecutive worker runs
+ sleepTime: 60
+
+## RBAC parameteres
+## https://kubernetes.io/docs/reference/access-authn-authz/rbac/
+##
+rbac:
+ create: true
+ serviceAccountName:
+ ## Annotations for the Service Account
+ ##
+ serviceAccountAnnotations: {}
diff --git a/kud/deployment_infra/helm/ovn4nfv/.helmignore b/kud/deployment_infra/helm/ovn4nfv/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/ovn4nfv/Chart.yaml b/kud/deployment_infra/helm/ovn4nfv/Chart.yaml
new file mode 100644
index 00000000..5a4e69c1
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/Chart.yaml
@@ -0,0 +1,24 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: v2.2.0
+description: |
+ OVN4NFV K8s Plugin - Network controller
+home: https://github.com/opnfv/ovn4nfv-k8s-plugin
+name: ovn4nfv
+sources:
+ - https://github.com/opnfv/ovn4nfv-k8s-plugin
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/ovn4nfv/crds/network.yaml b/kud/deployment_infra/helm/ovn4nfv/crds/network.yaml
new file mode 100644
index 00000000..793261e0
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/crds/network.yaml
@@ -0,0 +1,117 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networks.k8s.plugin.opnfv.org
+spec:
+ group: k8s.plugin.opnfv.org
+ names:
+ kind: Network
+ listKind: NetworkList
+ plural: networks
+ singular: network
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ cniType:
+ description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
+ Important: Run "operator-sdk generate k8s" to regenerate code after
+ modifying this file Add custom validation using kubebuilder tags:
+ https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html'
+ type: string
+ dns:
+ properties:
+ domain:
+ type: string
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ type: string
+ type: array
+ search:
+ items:
+ type: string
+ type: array
+ type: object
+ ipv4Subnets:
+ items:
+ properties:
+ excludeIps:
+ type: string
+ gateway:
+ type: string
+ name:
+ type: string
+ subnet:
+ type: string
+ required:
+ - name
+ - subnet
+ type: object
+ type: array
+ ipv6Subnets:
+ items:
+ properties:
+ excludeIps:
+ type: string
+ gateway:
+ type: string
+ name:
+ type: string
+ subnet:
+ type: string
+ required:
+ - name
+ - subnet
+ type: object
+ type: array
+ routes:
+ items:
+ properties:
+ dst:
+ type: string
+ gw:
+ type: string
+ required:
+ - dst
+ type: object
+ type: array
+ required:
+ - cniType
+ - ipv4Subnets
+ type: object
+ status:
+ properties:
+ state:
+ description: 'INSERT ADDITIONAL STATUS FIELD - define observed state
+ of cluster Important: Run "operator-sdk generate k8s" to regenerate
+ code after modifying this file Add custom validation using kubebuilder
+ tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html'
+ type: string
+ required:
+ - state
+ type: object
+ version: v1alpha1
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
diff --git a/kud/deployment_infra/helm/ovn4nfv/crds/networkchaining.yaml b/kud/deployment_infra/helm/ovn4nfv/crds/networkchaining.yaml
new file mode 100644
index 00000000..77257c3b
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/crds/networkchaining.yaml
@@ -0,0 +1,89 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networkchainings.k8s.plugin.opnfv.org
+spec:
+ group: k8s.plugin.opnfv.org
+ names:
+ kind: NetworkChaining
+ listKind: NetworkChainingList
+ plural: networkchainings
+ singular: networkchaining
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ description: NetworkChaining is the Schema for the networkchainings API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: NetworkChainingSpec defines the desired state of NetworkChaining
+ properties:
+ chainType:
+ type: string
+ routingSpec:
+ properties:
+ leftNetwork:
+ items:
+ properties:
+ gatewayIp:
+ type: string
+ networkName:
+ type: string
+ required:
+ - gatewayIp
+ - networkName
+ type: object
+ type: array
+ namespace:
+ type: string
+ networkChain:
+ type: string
+ rightNetwork:
+ items:
+ properties:
+ gatewayIp:
+ type: string
+ networkName:
+ type: string
+ required:
+ - gatewayIp
+ - networkName
+ type: object
+ type: array
+ required:
+ - leftNetwork
+ - namespace
+ - networkChain
+ - rightNetwork
+ type: object
+ required:
+ - chainType
+ - routingSpec
+ type: object
+ status:
+ description: NetworkChainingStatus defines the observed state of NetworkChaining
+ properties:
+ state:
+ type: string
+ required:
+ - state
+ type: object
+ type: object
+ version: v1alpha1
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
diff --git a/kud/deployment_infra/helm/ovn4nfv/crds/providernetwork.yaml b/kud/deployment_infra/helm/ovn4nfv/crds/providernetwork.yaml
new file mode 100644
index 00000000..fa058ff2
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/crds/providernetwork.yaml
@@ -0,0 +1,157 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: providernetworks.k8s.plugin.opnfv.org
+spec:
+ group: k8s.plugin.opnfv.org
+ names:
+ kind: ProviderNetwork
+ listKind: ProviderNetworkList
+ plural: providernetworks
+ singular: providernetwork
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ description: ProviderNetwork is the Schema for the providernetworks API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ProviderNetworkSpec defines the desired state of ProviderNetwork
+ properties:
+ cniType:
+ description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
+ Important: Run "operator-sdk generate k8s" to regenerate code after
+ modifying this file Add custom validation using kubebuilder tags:
+ https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html'
+ type: string
+ direct:
+ properties:
+ directNodeSelector:
+ type: string
+ nodeLabelList:
+ items:
+ type: string
+ type: array
+ providerInterfaceName:
+ type: string
+ required:
+ - directNodeSelector
+ - providerInterfaceName
+ type: object
+ dns:
+ properties:
+ domain:
+ type: string
+ nameservers:
+ items:
+ type: string
+ type: array
+ options:
+ items:
+ type: string
+ type: array
+ search:
+ items:
+ type: string
+ type: array
+ type: object
+ ipv4Subnets:
+ items:
+ properties:
+ excludeIps:
+ type: string
+ gateway:
+ type: string
+ name:
+ type: string
+ subnet:
+ type: string
+ required:
+ - name
+ - subnet
+ type: object
+ type: array
+ ipv6Subnets:
+ items:
+ properties:
+ excludeIps:
+ type: string
+ gateway:
+ type: string
+ name:
+ type: string
+ subnet:
+ type: string
+ required:
+ - name
+ - subnet
+ type: object
+ type: array
+ providerNetType:
+ type: string
+ routes:
+ items:
+ properties:
+ dst:
+ type: string
+ gw:
+ type: string
+ required:
+ - dst
+ type: object
+ type: array
+ vlan:
+ properties:
+ logicalInterfaceName:
+ type: string
+ nodeLabelList:
+ items:
+ type: string
+ type: array
+ providerInterfaceName:
+ type: string
+ vlanId:
+ type: string
+ vlanNodeSelector:
+ type: string
+ required:
+ - providerInterfaceName
+ - vlanId
+ - vlanNodeSelector
+ type: object
+ required:
+ - cniType
+ - ipv4Subnets
+ - providerNetType
+ type: object
+ status:
+ description: ProviderNetworkStatus defines the observed state of ProviderNetwork
+ properties:
+ state:
+ description: 'INSERT ADDITIONAL STATUS FIELD - define observed state
+ of cluster Important: Run "operator-sdk generate k8s" to regenerate
+ code after modifying this file Add custom validation using kubebuilder
+ tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html'
+ type: string
+ required:
+ - state
+ type: object
+ type: object
+ version: v1alpha1
+ versions:
+ - name: v1alpha1
+ served: true
+ storage: true
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/_helpers.tpl b/kud/deployment_infra/helm/ovn4nfv/templates/_helpers.tpl
new file mode 100644
index 00000000..4abd970e
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/_helpers.tpl
@@ -0,0 +1,62 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "ovn4nfv.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "ovn4nfv.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "ovn4nfv.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "ovn4nfv.labels" -}}
+helm.sh/chart: {{ include "ovn4nfv.chart" . }}
+{{ include "ovn4nfv.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "ovn4nfv.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "ovn4nfv.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "ovn4nfv.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "ovn4nfv.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/clusterrole.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/clusterrole.yaml
new file mode 100644
index 00000000..f10b111f
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/clusterrole.yaml
@@ -0,0 +1,54 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - pods/status
+ - services
+ - endpoints
+ - persistentvolumeclaims
+ - events
+ - configmaps
+ - secrets
+ - nodes
+ verbs:
+ - '*'
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ - daemonsets
+ - replicasets
+ - statefulsets
+ verbs:
+ - '*'
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - servicemonitors
+ verbs:
+ - get
+ - create
+- apiGroups:
+ - apps
+ resourceNames:
+ - nfn-operator
+ resources:
+ - deployments/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - k8s.plugin.opnfv.org
+ resources:
+ - '*'
+ - providernetworks
+ verbs:
+ - '*'
+{{- end }}
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/clusterrolebinding.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..0891458a
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/clusterrolebinding.yaml
@@ -0,0 +1,16 @@
+{{- if .Values.rbac.create }}
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "ovn4nfv.fullname" . }}
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+ kind: Group
+ name: system:serviceaccounts
+{{- end }}
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/configmap.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/configmap.yaml
new file mode 100644
index 00000000..7e1beba1
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/configmap.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.nfnOperator.config.name }}
+data:
+ {{ .Values.nfnOperator.config.data | nindent 2 }}
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: {{ .Values.cni.config.name }}
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+data:
+ ovn4nfv_k8s.conf: | {{ .Values.cni.config.ovn4nfv_k8s | nindent 4 }}
+ 00-network.conf: | {{ .Values.cni.config.network | nindent 4 }}
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/daemonset.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/daemonset.yaml
new file mode 100644
index 00000000..aeecb797
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/daemonset.yaml
@@ -0,0 +1,169 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}-cni
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: cni
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 6 }}
+ role: cni
+ template:
+ metadata:
+ labels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 8 }}
+ role: cni
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ {{- with .Values.cni.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.cni.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "ovn4nfv.serviceAccountName" . }}
+ containers:
+ - name: ovn4nfv
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command: ["/usr/local/bin/entrypoint", "cni"]
+ lifecycle:
+ preStop:
+ exec:
+ command: ["/bin/bash", "-c", "rm /host/etc/cni/net.d/00-network.conf"]
+ resources:
+ {{- toYaml .Values.cni.resources | nindent 10 }}
+ securityContext:
+ {{- toYaml .Values.cni.securityContext | nindent 10 }}
+ volumeMounts:
+ - name: cni
+ mountPath: /host/etc/cni/net.d
+ - name: cnibin
+ mountPath: /host/opt/cni/bin
+ - name: cniconf
+ mountPath: /host/etc/openvswitch
+ - name: ovn4nfv-cfg
+ mountPath: /tmp/ovn4nfv-conf
+ - name: ovn4nfv-cni-net-conf
+ mountPath: /tmp/ovn4nfv-cni
+ volumes:
+ - name: cni
+ hostPath:
+ path: /etc/cni/net.d
+ - name: cnibin
+ hostPath:
+ path: /opt/cni/bin
+ - name: cniconf
+ hostPath:
+ path: /etc/openvswitch
+ - name: ovn4nfv-cfg
+ configMap:
+ name: {{ .Values.cni.config.name }}
+ items:
+ - key: ovn4nfv_k8s.conf
+ path: ovn4nfv_k8s.conf
+ - name: ovn4nfv-cni-net-conf
+ configMap:
+ name: {{ .Values.cni.config.name }}
+ items:
+ - key: 00-network.conf
+ path: 00-network.conf
+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}-nfn-agent
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: nfn-agent
+spec:
+ selector:
+ matchLabels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 6 }}
+ role: nfn-agent
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 8 }}
+ role: nfn-agent
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ hostPID: true
+ {{- with .Values.nfnAgent.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nfnAgent.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "ovn4nfv.serviceAccountName" . }}
+ containers:
+ - name: nfn-agent
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command: ["/usr/local/bin/entrypoint", "agent"]
+ resources:
+ {{- toYaml .Values.nfnAgent.resources | nindent 10 }}
+ env:
+ - name: NFN_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ securityContext:
+ {{- toYaml .Values.nfnAgent.securityContext | nindent 10 }}
+ volumeMounts:
+ - mountPath: /var/run/dbus/
+ name: host-var-run-dbus
+ readOnly: true
+ - mountPath: /run/openvswitch
+ name: host-run-ovs
+ - mountPath: /var/run/openvswitch
+ name: host-var-run-ovs
+ - mountPath: /var/run
+ name: host-var-run
+ mountPropagation: Bidirectional
+ - mountPath: /host/proc
+ name: host-proc
+ - mountPath: /host/sys
+ name: host-sys
+ - mountPath: /var/run/ovn4nfv-k8s-plugin
+ name: host-var-cniserver-socket-dir
+ volumes:
+ - name: host-run-ovs
+ hostPath:
+ path: /run/openvswitch
+ - name: host-var-run-ovs
+ hostPath:
+ path: /var/run/openvswitch
+ - name: host-var-run-dbus
+ hostPath:
+ path: /var/run/dbus
+ - name: host-var-cniserver-socket-dir
+ hostPath:
+ path: /var/run/ovn4nfv-k8s-plugin
+ - name: host-var-run
+ hostPath:
+ path: /var/run
+ - name: host-proc
+ hostPath:
+ path: /proc
+ - name: host-sys
+ hostPath:
+ path: /sys
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/deployment.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/deployment.yaml
new file mode 100644
index 00000000..7613fef5
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}-nfn-operator
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: nfn-operator
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 6 }}
+ role: nfn-operator
+ template:
+ metadata:
+ labels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 8 }}
+ role: nfn-operator
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ {{- with .Values.nfnOperator.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nfnOperator.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nfnOperator.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ serviceAccountName: {{ include "ovn4nfv.serviceAccountName" . }}
+ containers:
+ - name: nfn-operator
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command: ["/usr/local/bin/entrypoint", "operator"]
+ envFrom:
+ - configMapRef:
+ name: {{ .Values.nfnOperator.config.name }}
+ ports:
+ - containerPort: 50000
+ protocol: TCP
+ env:
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: OPERATOR_NAME
+ value: "nfn-operator"
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/ovn/daemonset.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/daemonset.yaml
new file mode 100644
index 00000000..2b71a9dd
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/daemonset.yaml
@@ -0,0 +1,102 @@
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}-ovn-controller
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: ovn-controller
+spec:
+ selector:
+ matchLabels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 6 }}
+ role: ovn-controller
+ updateStrategy:
+ type: OnDelete
+ template:
+ metadata:
+ labels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 8 }}
+ role: ovn-controller
+ spec:
+ {{- with .Values.ovn.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ hostPID: true
+ {{- with .Values.ovnController.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.ovnController.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ {{- with .Values.ovnController.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ priorityClassName: system-cluster-critical
+ containers:
+ - name: ovn-controller
+ image: "{{ .Values.ovn.image.repository }}:{{ .Values.ovn.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.ovn.image.pullPolicy }}
+ command: ["ovn4nfv-k8s", "start_ovn_controller"]
+ resources:
+ {{- toYaml .Values.ovnController.resources | nindent 12 }}
+ securityContext:
+ {{- toYaml .Values.ovnController.securityContext | nindent 12 }}
+ env:
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: host-modules
+ readOnly: true
+ - mountPath: /var/run/openvswitch
+ name: host-run-ovs
+ - mountPath: /var/run/ovn
+ name: host-run-ovn
+ - mountPath: /sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /etc/openvswitch
+ name: host-config-openvswitch
+ - mountPath: /var/log/openvswitch
+ name: host-log-ovs
+ - mountPath: /var/log/ovn
+ name: host-log-ovn
+ readinessProbe:
+ exec:
+ command: ["ovn4nfv-k8s", "check_ovn_controller"]
+ periodSeconds: 5
+ livenessProbe:
+ exec:
+ command: ["ovn4nfv-k8s", "check_ovn_controller"]
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ failureThreshold: 5
+ volumes:
+ - name: host-modules
+ hostPath:
+ path: /lib/modules
+ - name: host-run-ovs
+ hostPath:
+ path: /run/openvswitch
+ - name: host-run-ovn
+ hostPath:
+ path: /run/ovn
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-config-openvswitch
+ hostPath:
+ path: /etc/origin/openvswitch
+ - name: host-log-ovs
+ hostPath:
+ path: /var/log/openvswitch
+ - name: host-log-ovn
+ hostPath:
+ path: /var/log/ovn
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/ovn/deployment.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/deployment.yaml
new file mode 100644
index 00000000..a9dd4288
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/deployment.yaml
@@ -0,0 +1,107 @@
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: {{ include "ovn4nfv.fullname" . }}-ovn-control-plane
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: ovn-control-plane
+spec:
+ replicas: 1
+ strategy:
+ rollingUpdate:
+ maxSurge: 0%
+ maxUnavailable: 100%
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 6 }}
+ role: ovn-control-plane
+ template:
+ metadata:
+ labels:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 8 }}
+ role: ovn-control-plane
+ spec:
+ {{- with .Values.ovn.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ hostNetwork: true
+ {{- with .Values.ovnControlPlane.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.ovnControlPlane.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ {{- with .Values.ovnControlPlane.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ priorityClassName: system-cluster-critical
+ containers:
+ - name: ovn-control-plane
+ image: "{{ .Values.ovn.image.repository }}:{{ .Values.ovn.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.ovn.image.pullPolicy }}
+ command: ["ovn4nfv-k8s", "start_ovn_control_plane"]
+ resources:
+ {{- toYaml .Values.ovnControlPlane.resources | nindent 12 }}
+ securityContext:
+ {{- toYaml .Values.ovnControlPlane.securityContext | nindent 12 }}
+ env:
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - mountPath: /var/run/openvswitch
+ name: host-run-ovs
+ - mountPath: /var/run/ovn
+ name: host-run-ovn
+ - mountPath: /sys
+ name: host-sys
+ readOnly: true
+ - mountPath: /etc/openvswitch
+ name: host-config-openvswitch
+ - mountPath: /var/log/openvswitch
+ name: host-log-ovs
+ - mountPath: /var/log/ovn
+ name: host-log-ovn
+ readinessProbe:
+ exec:
+ command: ["ovn4nfv-k8s", "check_ovn_control_plane"]
+ periodSeconds: 3
+ livenessProbe:
+ exec:
+ command: ["ovn4nfv-k8s", "check_ovn_control_plane"]
+ initialDelaySeconds: 30
+ periodSeconds: 7
+ failureThreshold: 5
+ volumes:
+ - name: host-run-ovs
+ hostPath:
+ path: /run/openvswitch
+ - name: host-run-ovn
+ hostPath:
+ path: /run/ovn
+ - name: host-sys
+ hostPath:
+ path: /sys
+ - name: host-config-openvswitch
+ hostPath:
+ path: /etc/origin/openvswitch
+ - name: host-log-ovs
+ hostPath:
+ path: /var/log/openvswitch
+ - name: host-log-ovn
+ hostPath:
+ path: /var/log/ovn
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/ovn/service.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/service.yaml
new file mode 100644
index 00000000..c6d96e49
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/ovn/service.yaml
@@ -0,0 +1,37 @@
+kind: Service
+apiVersion: v1
+metadata:
+ name: ovn-nb-tcp
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: ovn-control-plane
+spec:
+ ports:
+ - name: ovn-nb-tcp
+ protocol: TCP
+ port: {{ .Values.ovnControlPlane.nbService.port }}
+ targetPort: 6641
+ type: {{ .Values.ovnControlPlane.nbService.type }}
+ selector:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 4 }}
+ role: ovn-control-plane
+ sessionAffinity: None
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: ovn-sb-tcp
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: ovn-control-plane
+spec:
+ ports:
+ - name: ovn-sb-tcp
+ protocol: TCP
+ port: {{ .Values.ovnControlPlane.sbService.port }}
+ targetPort: 6642
+ type: {{ .Values.ovnControlPlane.sbService.type }}
+ selector:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 4 }}
+ role: ovn-control-plane
+ sessionAffinity: None
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/service.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/service.yaml
new file mode 100644
index 00000000..a9e5747d
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: nfn-operator
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ role: nfn-operator
+spec:
+ type: {{ .Values.nfnOperator.service.type }}
+ ports:
+ - port: {{ .Values.nfnOperator.service.port }}
+ protocol: TCP
+ targetPort: 50000
+ selector:
+ {{- include "ovn4nfv.selectorLabels" . | nindent 4 }}
+ role: nfn-operator
diff --git a/kud/deployment_infra/helm/ovn4nfv/templates/serviceaccount.yaml b/kud/deployment_infra/helm/ovn4nfv/templates/serviceaccount.yaml
new file mode 100644
index 00000000..853e2ca1
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "ovn4nfv.serviceAccountName" . }}
+ labels:
+ {{- include "ovn4nfv.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/ovn4nfv/values.yaml b/kud/deployment_infra/helm/ovn4nfv/values.yaml
new file mode 100644
index 00000000..660c4eba
--- /dev/null
+++ b/kud/deployment_infra/helm/ovn4nfv/values.yaml
@@ -0,0 +1,177 @@
+image:
+ repository: docker.io/integratedcloudnative/ovn4nfv-k8s-plugin
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+imagePullSecrets: []
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+nameOverride: ""
+fullnameOverride: ""
+
+cni:
+ securityContext:
+ privileged: true
+
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ limits:
+ cpu: 100m
+ memory: 50Mi
+
+ config:
+ name: ovn4nfv-cni-config
+ ovn4nfv_k8s: |
+ [logging]
+ loglevel=5
+ logfile=/var/log/openvswitch/ovn4k8s.log
+
+ [cni]
+ conf-dir=/etc/cni/net.d
+ plugin=ovn4nfvk8s-cni
+
+ [kubernetes]
+ kubeconfig=/etc/cni/net.d/ovn4nfv-k8s.d/ovn4nfv-k8s.kubeconfig
+ network: |
+ {
+ "name": "ovn4nfv-k8s-plugin",
+ "type": "ovn4nfvk8s-cni",
+ "cniVersion": "0.3.1"
+ }
+
+nfnAgent:
+ securityContext:
+ runAsUser: 0
+ capabilities:
+ add: ["NET_ADMIN", "SYS_ADMIN", "SYS_PTRACE"]
+ privileged: true
+
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 50Mi
+ limits:
+ cpu: 100m
+ memory: 50Mi
+
+nfnOperator:
+ nodeSelector: {}
+
+ affinity:
+ podAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: role
+ operator: In
+ values:
+ - ovn-control-plane
+ topologyKey: "kubernetes.io/hostname"
+
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+
+ config:
+ name: ovn-controller-network
+ data: |
+ OVN_SUBNET: 10.154.142.0/18
+ OVN_GATEWAYIP: 10.154.142.1/18
+
+ service:
+ type: NodePort
+ port: 50000
+
+ovn:
+ image:
+ repository: docker.io/integratedcloudnative/ovn-images
+ pullPolicy: IfNotPresent
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+ imagePullSecrets: []
+
+ovnControlPlane:
+ securityContext:
+ capabilities:
+ add: ["SYS_NICE"]
+
+ nodeSelector:
+ beta.kubernetes.io/os: "linux"
+ node-role.kubernetes.io/master: ""
+
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ role: ovn-control-plane
+ topologyKey: kubernetes.io/hostname
+
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+
+ resources:
+ requests:
+ cpu: 500m
+ memory: 300Mi
+
+ nbService:
+ type: ClusterIP
+ port: 6641
+
+ sbService:
+ type: ClusterIP
+ port: 6642
+
+ovnController:
+ securityContext:
+ runAsUser: 0
+ privileged: true
+
+ nodeSelector:
+ beta.kubernetes.io/os: "linux"
+
+ affinity: {}
+
+ tolerations:
+ - operator: Exists
+ effect: NoSchedule
+
+ resources:
+ requests:
+ cpu: 200m
+ memory: 300Mi
+ limits:
+ cpu: 1000m
+ memory: 800Mi
+
+## RBAC parameteres
+## https://kubernetes.io/docs/reference/access-authn-authz/rbac/
+##
+rbac:
+ create: true
diff --git a/kud/deployment_infra/helm/qat-device-plugin/.helmignore b/kud/deployment_infra/helm/qat-device-plugin/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/qat-device-plugin/Chart.yaml b/kud/deployment_infra/helm/qat-device-plugin/Chart.yaml
new file mode 100644
index 00000000..1697abb0
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/Chart.yaml
@@ -0,0 +1,25 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: 0.19.0-kerneldrv
+description: |
+ A device plugin that provides support for Intel QuickAssist
+ Technology (QAT) devices under Kubernetes.
+home: https://github.com/intel/intel-device-plugins-for-kubernetes
+name: qat-device-plugin
+sources:
+ - https://github.com/intel/intel-device-plugins-for-kubernetes
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/qat-device-plugin/templates/_helpers.tpl b/kud/deployment_infra/helm/qat-device-plugin/templates/_helpers.tpl
new file mode 100644
index 00000000..77889d5d
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/templates/_helpers.tpl
@@ -0,0 +1,52 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "qat-device-plugin.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "qat-device-plugin.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "qat-device-plugin.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "qat-device-plugin.labels" -}}
+helm.sh/chart: {{ include "qat-device-plugin.chart" . }}
+{{ include "qat-device-plugin.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "qat-device-plugin.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "qat-device-plugin.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/qat-device-plugin/templates/config.yaml b/kud/deployment_infra/helm/qat-device-plugin/templates/config.yaml
new file mode 100644
index 00000000..24ffaa4a
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/templates/config.yaml
@@ -0,0 +1,8 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ .Values.config.name }}
+ labels:
+ {{- include "qat-device-plugin.labels" . | nindent 4 }}
+data:
+ VERBOSITY: {{ .Values.config.logLevel | quote }}
diff --git a/kud/deployment_infra/helm/qat-device-plugin/templates/daemonset.yaml b/kud/deployment_infra/helm/qat-device-plugin/templates/daemonset.yaml
new file mode 100644
index 00000000..c94ff330
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/templates/daemonset.yaml
@@ -0,0 +1,60 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "qat-device-plugin.fullname" . }}
+ labels:
+ {{- include "qat-device-plugin.labels" . | nindent 4 }}
+spec:
+ selector:
+ matchLabels:
+ {{- include "qat-device-plugin.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "qat-device-plugin.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ containers:
+ - name: intel-qat-kernel-plugin
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 10 }}
+ env:
+ - name: VERBOSITY
+ valueFrom:
+ configMapKeyRef:
+ name: {{ .Values.config.name }}
+ key: VERBOSITY
+ args: ["-mode", "kernel", "-v", "$(VERBOSITY)"]
+ volumeMounts:
+ - name: devfs
+ mountPath: /dev
+ - name: etcdir
+ mountPath: /etc
+ readOnly: true
+ - name: kubeletsockets
+ mountPath: /var/lib/kubelet/device-plugins
+ volumes:
+ - name: etcdir
+ hostPath:
+ path: /etc
+ - name: kubeletsockets
+ hostPath:
+ path: /var/lib/kubelet/device-plugins
+ - name: devfs
+ hostPath:
+ path: /dev
diff --git a/kud/deployment_infra/helm/qat-device-plugin/templates/drivers/daemonset.yaml b/kud/deployment_infra/helm/qat-device-plugin/templates/drivers/daemonset.yaml
new file mode 100644
index 00000000..7ba04047
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/templates/drivers/daemonset.yaml
@@ -0,0 +1,70 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "qat-device-plugin.fullname" . }}-qat-driver-installer
+ labels:
+ {{- include "qat-device-plugin.labels" . | nindent 4 }}
+ role: qat-driver-installer
+spec:
+ selector:
+ matchLabels:
+ {{- include "qat-device-plugin.selectorLabels" . | nindent 6 }}
+ role: qat-driver-installer
+ template:
+ metadata:
+ labels:
+ {{- include "qat-device-plugin.selectorLabels" . | nindent 8 }}
+ role: qat-driver-installer
+ spec:
+ hostPID: true
+ {{- with .Values.qatDriver.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.qatDriver.podSecurityContext | nindent 8 }}
+ initContainers:
+ - image: "{{ .Values.qatDriver.image.repository }}:{{ .Values.qatDriver.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.qatDriver.image.pullPolicy }}
+ name: qat-driver-installer
+ securityContext:
+ {{- toYaml .Values.qatDriver.securityContext | nindent 12 }}
+ resources:
+ {{- toYaml .Values.qatDriver.resources | nindent 12 }}
+ volumeMounts:
+ - name: qat-install-dir
+ mountPath: "/usr/local/qat"
+ - name: root-dir
+ mountPath: "/root"
+ - name: lib-modules-dir
+ mountPath: "/root/lib/modules"
+ - name: run-systemd-dir
+ mountPath: "/root/run/systemd/system"
+ containers:
+ - image: "gcr.io/google-containers/pause:3.2"
+ name: pause
+ volumes:
+ - name: qat-install-dir
+ hostPath:
+ path: "/opt/qat"
+ - name: root-dir
+ hostPath:
+ path: "/"
+ - name: lib-modules-dir
+ hostPath:
+ path: "/lib/modules"
+ - name: run-systemd-dir
+ hostPath:
+ path: "/run/systemd/system"
+ {{- with .Values.qatDriver.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.qatDriver.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.qatDriver.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/qat-device-plugin/values.yaml b/kud/deployment_infra/helm/qat-device-plugin/values.yaml
new file mode 100644
index 00000000..459c36b1
--- /dev/null
+++ b/kud/deployment_infra/helm/qat-device-plugin/values.yaml
@@ -0,0 +1,49 @@
+config:
+ name: intel-qat-plugin-config
+
+ # logLevel sets the plugin's log level.
+ logLevel: 4
+
+imagePullSecrets: []
+
+image:
+ repository: integratedcloudnative/intel-qat-plugin
+ # Overrides the image tag whose default is the chart appVersion.
+ tag: ""
+
+nodeSelector:
+ feature.node.kubernetes.io/iommu-enabled: "true"
+ feature.node.kubernetes.io/custom-qat: "true"
+
+securityContext:
+ readOnlyRootFilesystem: true
+ privileged: true
+
+resources: {}
+
+affinity: {}
+
+tolerations: {}
+
+qatDriver:
+ image:
+ repository: integratedcloudnative/qat-driver-installer
+ pullPolicy: IfNotPresent
+ tag: latest
+ imagePullSecrets: []
+
+ podSecurityContext: {}
+ # fsGroup: 2000
+
+ securityContext:
+ privileged: true
+
+ nodeSelector:
+ feature.node.kubernetes.io/iommu-enabled: "true"
+ feature.node.kubernetes.io/custom-qat: "true"
+
+ resources: {}
+
+ affinity: {}
+
+ tolerations: {}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/.helmignore b/kud/deployment_infra/helm/sriov-network-operator/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/sriov-network-operator/Chart.yaml b/kud/deployment_infra/helm/sriov-network-operator/Chart.yaml
new file mode 100644
index 00000000..ba056e07
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/Chart.yaml
@@ -0,0 +1,27 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: 4.8.0
+description: |
+ The Sriov Network Operator is designed to help the user to provision
+ and configure SR-IOV CNI plugin and Device plugin in the Kubernetes
+ cluster.
+name: sriov-network-operator
+sources:
+ - https://github.com/k8snetworkplumbingwg/sriov-network-operator
+ - https://downloadcenter.intel.com/download/24693/Intel-Network-Adapter-Linux-Virtual-Function-Driver-for-Intel-Ethernet-Controller-700-and-E810-Series
+home: https://github.com/k8snetworkplumbingwg/sriov-network-operator
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/sriov-network-operator/crds/sriovibnetwork.yaml b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovibnetwork.yaml
new file mode 100644
index 00000000..21e9e48b
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovibnetwork.yaml
@@ -0,0 +1,73 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ creationTimestamp: null
+ name: sriovibnetworks.sriovnetwork.openshift.io
+spec:
+ group: sriovnetwork.openshift.io
+ names:
+ kind: SriovIBNetwork
+ listKind: SriovIBNetworkList
+ plural: sriovibnetworks
+ singular: sriovibnetwork
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: SriovIBNetwork is the Schema for the sriovibnetworks API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SriovIBNetworkSpec defines the desired state of SriovIBNetwork
+ properties:
+ capabilities:
+ description: 'Capabilities to be configured for this network. Capabilities
+ supported: (infinibandGUID), e.g. ''{"infinibandGUID": true}'''
+ type: string
+ ipam:
+ description: IPAM configuration to be used for this network.
+ type: string
+ linkState:
+ description: VF link state (enable|disable|auto)
+ enum:
+ - auto
+ - enable
+ - disable
+ type: string
+ networkNamespace:
+ description: Namespace of the NetworkAttachmentDefinition custom resource
+ type: string
+ resourceName:
+ description: SRIOV Network device plugin endpoint resource name
+ type: string
+ required:
+ - resourceName
+ type: object
+ status:
+ description: SriovIBNetworkStatus defines the observed state of SriovIBNetwork
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetwork.yaml b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetwork.yaml
new file mode 100644
index 00000000..d69e5608
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetwork.yaml
@@ -0,0 +1,109 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ creationTimestamp: null
+ name: sriovnetworks.sriovnetwork.openshift.io
+spec:
+ group: sriovnetwork.openshift.io
+ names:
+ kind: SriovNetwork
+ listKind: SriovNetworkList
+ plural: sriovnetworks
+ singular: sriovnetwork
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: SriovNetwork is the Schema for the sriovnetworks API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SriovNetworkSpec defines the desired state of SriovNetwork
+ properties:
+ capabilities:
+ description: 'Capabilities to be configured for this network. Capabilities
+ supported: (mac|ips), e.g. ''{"mac": true}'''
+ type: string
+ ipam:
+ description: IPAM configuration to be used for this network.
+ type: string
+ linkState:
+ description: VF link state (enable|disable|auto)
+ enum:
+ - auto
+ - enable
+ - disable
+ type: string
+ maxTxRate:
+ description: Maximum tx rate, in Mbps, for the VF. Defaults to 0 (no
+ rate limiting)
+ minimum: 0
+ type: integer
+ metaPlugins:
+ description: MetaPluginsConfig configuration to be used in order to
+ chain metaplugins to the sriov interface returned by the operator.
+ type: string
+ minTxRate:
+ description: Minimum tx rate, in Mbps, for the VF. Defaults to 0 (no
+ rate limiting). min_tx_rate should be <= max_tx_rate.
+ minimum: 0
+ type: integer
+ networkNamespace:
+ description: Namespace of the NetworkAttachmentDefinition custom resource
+ type: string
+ resourceName:
+ description: SRIOV Network device plugin endpoint resource name
+ type: string
+ spoofChk:
+ description: VF spoof check, (on|off)
+ enum:
+ - "on"
+ - "off"
+ type: string
+ trust:
+ description: VF trust mode (on|off)
+ enum:
+ - "on"
+ - "off"
+ type: string
+ vlan:
+ description: VLAN ID to assign for the VF. Defaults to 0.
+ maximum: 4096
+ minimum: 0
+ type: integer
+ vlanQoS:
+ description: VLAN QoS ID to assign for the VF. Defaults to 0.
+ maximum: 7
+ minimum: 0
+ type: integer
+ required:
+ - resourceName
+ type: object
+ status:
+ description: SriovNetworkStatus defines the observed state of SriovNetwork
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodepolicy.yaml b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodepolicy.yaml
new file mode 100644
index 00000000..315ea262
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodepolicy.yaml
@@ -0,0 +1,131 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ creationTimestamp: null
+ name: sriovnetworknodepolicies.sriovnetwork.openshift.io
+spec:
+ group: sriovnetwork.openshift.io
+ names:
+ kind: SriovNetworkNodePolicy
+ listKind: SriovNetworkNodePolicyList
+ plural: sriovnetworknodepolicies
+ singular: sriovnetworknodepolicy
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: SriovNetworkNodePolicy is the Schema for the sriovnetworknodepolicies
+ API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SriovNetworkNodePolicySpec defines the desired state of SriovNetworkNodePolicy
+ properties:
+ deviceType:
+ description: The driver type for configured VFs. Allowed value "netdevice",
+ "vfio-pci". Defaults to netdevice.
+ enum:
+ - netdevice
+ - vfio-pci
+ type: string
+ eSwitchMode:
+ description: NIC Device Mode. Allowed value "legacy","switchdev".
+ enum:
+ - legacy
+ - switchdev
+ type: string
+ isRdma:
+ description: RDMA mode. Defaults to false.
+ type: boolean
+ linkType:
+ description: NIC Link Type. Allowed value "eth", "ETH", "ib", and
+ "IB".
+ enum:
+ - eth
+ - ETH
+ - ib
+ - IB
+ type: string
+ mtu:
+ description: MTU of VF
+ minimum: 1
+ type: integer
+ nicSelector:
+ description: NicSelector selects the NICs to be configured
+ properties:
+ deviceID:
+ description: The device hex code of SR-IoV device. Allowed value
+ "0d58", "1572", "158b", "1013", "1015", "1017", "101b".
+ type: string
+ netFilter:
+ description: Infrastructure Networking selection filter. Allowed
+ value "openstack/NetworkID:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ type: string
+ pfNames:
+ description: Name of SR-IoV PF.
+ items:
+ type: string
+ type: array
+ rootDevices:
+ description: PCI address of SR-IoV PF.
+ items:
+ type: string
+ type: array
+ vendor:
+ description: The vendor hex code of SR-IoV device. Allowed value
+ "8086", "15b3".
+ type: string
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector selects the nodes to be configured
+ type: object
+ numVfs:
+ description: Number of VFs for each PF
+ minimum: 0
+ type: integer
+ priority:
+ description: Priority of the policy, higher priority policies can
+ override lower ones.
+ maximum: 99
+ minimum: 0
+ type: integer
+ resourceName:
+ description: SRIOV Network device plugin endpoint resource name
+ type: string
+ required:
+ - nicSelector
+ - nodeSelector
+ - numVfs
+ - resourceName
+ type: object
+ status:
+ description: SriovNetworkNodePolicyStatus defines the observed state of
+ SriovNetworkNodePolicy
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodestate.yaml b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodestate.yaml
new file mode 100644
index 00000000..bae83794
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovnetworknodestate.yaml
@@ -0,0 +1,153 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ creationTimestamp: null
+ name: sriovnetworknodestates.sriovnetwork.openshift.io
+spec:
+ group: sriovnetwork.openshift.io
+ names:
+ kind: SriovNetworkNodeState
+ listKind: SriovNetworkNodeStateList
+ plural: sriovnetworknodestates
+ singular: sriovnetworknodestate
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: SriovNetworkNodeState is the Schema for the sriovnetworknodestates
+ API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SriovNetworkNodeStateSpec defines the desired state of SriovNetworkNodeState
+ properties:
+ dpConfigVersion:
+ type: string
+ interfaces:
+ items:
+ properties:
+ eSwitchMode:
+ type: string
+ linkType:
+ type: string
+ mtu:
+ type: integer
+ name:
+ type: string
+ numVfs:
+ type: integer
+ pciAddress:
+ type: string
+ vfGroups:
+ items:
+ properties:
+ deviceType:
+ type: string
+ policyName:
+ type: string
+ resourceName:
+ type: string
+ vfRange:
+ type: string
+ type: object
+ type: array
+ required:
+ - pciAddress
+ type: object
+ type: array
+ type: object
+ status:
+ description: SriovNetworkNodeStateStatus defines the observed state of
+ SriovNetworkNodeState
+ properties:
+ interfaces:
+ items:
+ properties:
+ Vfs:
+ items:
+ properties:
+ Vlan:
+ type: integer
+ assigned:
+ type: string
+ deviceID:
+ type: string
+ driver:
+ type: string
+ mac:
+ type: string
+ mtu:
+ type: integer
+ name:
+ type: string
+ pciAddress:
+ type: string
+ vendor:
+ type: string
+ vfID:
+ type: integer
+ required:
+ - pciAddress
+ - vfID
+ type: object
+ type: array
+ deviceID:
+ type: string
+ driver:
+ type: string
+ eSwitchMode:
+ type: string
+ linkSpeed:
+ type: string
+ linkType:
+ type: string
+ mac:
+ type: string
+ mtu:
+ type: integer
+ name:
+ type: string
+ netFilter:
+ type: string
+ numVfs:
+ type: integer
+ pciAddress:
+ type: string
+ totalvfs:
+ type: integer
+ vendor:
+ type: string
+ required:
+ - pciAddress
+ type: object
+ type: array
+ lastSyncError:
+ type: string
+ syncStatus:
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/kud/deployment_infra/helm/sriov-network-operator/crds/sriovoperatorconfig.yaml b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovoperatorconfig.yaml
new file mode 100644
index 00000000..b3e360c8
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/crds/sriovoperatorconfig.yaml
@@ -0,0 +1,89 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.3.0
+ creationTimestamp: null
+ name: sriovoperatorconfigs.sriovnetwork.openshift.io
+spec:
+ group: sriovnetwork.openshift.io
+ names:
+ kind: SriovOperatorConfig
+ listKind: SriovOperatorConfigList
+ plural: sriovoperatorconfigs
+ singular: sriovoperatorconfig
+ scope: Namespaced
+ versions:
+ - name: v1
+ schema:
+ openAPIV3Schema:
+ description: SriovOperatorConfig is the Schema for the sriovoperatorconfigs
+ API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: SriovOperatorConfigSpec defines the desired state of SriovOperatorConfig
+ properties:
+ configDaemonNodeSelector:
+ additionalProperties:
+ type: string
+ description: NodeSelector selects the nodes to be configured
+ type: object
+ disableDrain:
+ description: Flag to disable nodes drain during debugging
+ type: boolean
+ enableInjector:
+ description: Flag to control whether the network resource injector
+ webhook shall be deployed
+ type: boolean
+ enableOperatorWebhook:
+ description: Flag to control whether the operator admission controller
+ webhook shall be deployed
+ type: boolean
+ enableOvsOffload:
+ description: Flag to enable OVS hardware offload. Set to 'true' to
+ provision switchdev-configuration.service and enable OpenvSwitch
+ hw-offload on nodes.
+ type: boolean
+ logLevel:
+ description: Flag to control the log verbose level of the operator.
+ Set to '0' to show only the basic logs. And set to '2' to show all
+ the available logs.
+ maximum: 2
+ minimum: 0
+ type: integer
+ type: object
+ status:
+ description: SriovOperatorConfigStatus defines the observed state of SriovOperatorConfig
+ properties:
+ injector:
+ description: Show the runtime status of the network resource injector
+ webhook
+ type: string
+ operatorWebhook:
+ description: Show the runtime status of the operator admission controller
+ webhook
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/_helpers.tpl b/kud/deployment_infra/helm/sriov-network-operator/templates/_helpers.tpl
new file mode 100644
index 00000000..2d2bd47f
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "sriov-network-operator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "sriov-network-operator.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "sriov-network-operator.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "sriov-network-operator.labels" -}}
+helm.sh/chart: {{ include "sriov-network-operator.chart" . }}
+{{ include "sriov-network-operator.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "sriov-network-operator.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "sriov-network-operator.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "sriov-network-operator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "sriov-network-operator.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrole.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrole.yaml
new file mode 100644
index 00000000..1a37667e
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrole.yaml
@@ -0,0 +1,54 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch", "patch", "update"]
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["*"]
+- apiGroups: ["apps"]
+ resources: ["daemonsets"]
+ verbs: ["get"]
+- apiGroups: [""]
+ resources: [namespaces, serviceaccounts]
+ verbs: ["*"]
+- apiGroups: ["k8s.cni.cncf.io"]
+ resources: ["network-attachment-definitions"]
+ verbs: ["*"]
+- apiGroups: ["rbac.authorization.k8s.io"]
+ resources: [clusterroles, clusterrolebindings]
+ verbs: ["*"]
+- apiGroups: ["admissionregistration.k8s.io"]
+ resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
+ verbs: ["*"]
+- apiGroups: ["sriovnetwork.openshift.io"]
+ resources: ["*"]
+ verbs: ["*"]
+- apiGroups: ["machineconfiguration.openshift.io"]
+ resources: ["*"]
+ verbs: ["*"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: sriov-network-config-daemon
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+rules:
+- apiGroups: [""]
+ resources: ["nodes"]
+ verbs: ["get", "list", "watch", "patch", "update"]
+- apiGroups: [""]
+ resources: ["pods"]
+ verbs: ["*"]
+- apiGroups: ["apps"]
+ resources: ["daemonsets"]
+ verbs: ["get"]
+- apiGroups: [""]
+ resources: ["pods/eviction"]
+ verbs: ["create"]
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrolebinding.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..acf15ee5
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/clusterrolebinding.yaml
@@ -0,0 +1,30 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "sriov-network-operator.fullname" . }}
+subjects:
+- kind: ServiceAccount
+ name: {{ include "sriov-network-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: sriov-network-config-daemon
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: sriov-network-config-daemon
+subjects:
+- kind: ServiceAccount
+ name: sriov-network-config-daemon
+ namespace: {{ .Release.Namespace }}
+
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/drivers/daemonset.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/drivers/daemonset.yaml
new file mode 100644
index 00000000..b86ee383
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/drivers/daemonset.yaml
@@ -0,0 +1,70 @@
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}-iavf-driver-installer
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+ role: iavf-driver-installer
+spec:
+ selector:
+ matchLabels:
+ {{- include "sriov-network-operator.selectorLabels" . | nindent 6 }}
+ role: iavf-driver-installer
+ template:
+ metadata:
+ labels:
+ {{- include "sriov-network-operator.selectorLabels" . | nindent 8 }}
+ role: iavf-driver-installer
+ spec:
+ hostPID: true
+ {{- with .Values.iavfDriver.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.iavfDriver.podSecurityContext | nindent 8 }}
+ initContainers:
+ - image: "{{ .Values.iavfDriver.image.repository }}:{{ .Values.iavfDriver.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.iavfDriver.image.pullPolicy }}
+ name: iavf-driver-installer
+ securityContext:
+ {{- toYaml .Values.iavfDriver.securityContext | nindent 12 }}
+ resources:
+ {{- toYaml .Values.iavfDriver.resources | nindent 12 }}
+ volumeMounts:
+ - name: iavf-install-dir
+ mountPath: "/usr/local/iavf"
+ - name: root-dir
+ mountPath: "/root"
+ - name: lib-modules-dir
+ mountPath: "/root/lib/modules"
+ - name: run-systemd-dir
+ mountPath: "/root/run/systemd/system"
+ containers:
+ - image: "gcr.io/google-containers/pause:3.2"
+ name: pause
+ volumes:
+ - name: iavf-install-dir
+ hostPath:
+ path: "/opt/iavf"
+ - name: root-dir
+ hostPath:
+ path: "/"
+ - name: lib-modules-dir
+ hostPath:
+ path: "/lib/modules"
+ - name: run-systemd-dir
+ hostPath:
+ path: "/run/systemd/system"
+ {{- with .Values.iavfDriver.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.iavfDriver.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.iavfDriver.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/operator.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/operator.yaml
new file mode 100644
index 00000000..679ed269
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/operator.yaml
@@ -0,0 +1,89 @@
+apiVersion: sriovnetwork.openshift.io/v1
+kind: SriovOperatorConfig
+metadata:
+ name: default
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+spec:
+ {{- with .Values.configDaemon.nodeSelector }}
+ configDaemonNodeSelector:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ enableInjector: {{ .Values.enableInjector }}
+ enableOperatorWebhook: {{ .Values.enableOperatorWebhook }}
+ logLevel: {{ .Values.logLevel }}
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "sriov-network-operator.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ labels:
+ {{- include "sriov-network-operator.selectorLabels" . | nindent 8 }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "sriov-network-operator.serviceAccountName" . }}
+ containers:
+ - name: sriov-network-operator
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - sriov-network-operator
+ env:
+ - name: WATCH_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: SRIOV_CNI_IMAGE
+ value: "{{ .Values.cni.image.repository }}:{{ .Values.cni.image.tag | default .Chart.AppVersion }}"
+ - name: SRIOV_INFINIBAND_CNI_IMAGE
+ value: "{{ .Values.infinibandCni.image.repository }}:{{ .Values.infinibandCni.image.tag | default .Chart.AppVersion }}"
+ - name: SRIOV_DEVICE_PLUGIN_IMAGE
+ value: "{{ .Values.devicePlugin.image.repository }}:{{ .Values.devicePlugin.image.tag | default .Chart.AppVersion }}"
+ - name: NETWORK_RESOURCES_INJECTOR_IMAGE
+ value: "{{ .Values.resourcesInjector.image.repository }}:{{ .Values.resourcesInjector.image.tag | default .Chart.AppVersion }}"
+ - name: OPERATOR_NAME
+ value: "sriov-network-operator"
+ - name: SRIOV_NETWORK_CONFIG_DAEMON_IMAGE
+ value: "{{ .Values.configDaemon.image.repository }}:{{ .Values.configDaemon.image.tag | default .Chart.AppVersion }}"
+ - name: SRIOV_NETWORK_WEBHOOK_IMAGE
+ value: "{{ .Values.webhook.image.repository }}:{{ .Values.webhook.image.tag | default .Chart.AppVersion }}"
+ - name: RESOURCE_PREFIX
+ value: "{{ .Values.resourcePrefix }}"
+ - name: ENABLE_ADMISSION_CONTROLLER
+ value: "false"
+ - name: NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: RELEASE_VERSION
+ value: "4.3.0"
+ - name: SRIOV_CNI_BIN_PATH
+ value: "/opt/cni/bin"
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/role.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/role.yaml
new file mode 100644
index 00000000..96fae762
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/role.yaml
@@ -0,0 +1,107 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ - endpoints
+ - persistentvolumeclaims
+ - events
+ - configmaps
+ - secrets
+ verbs:
+ - '*'
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ - daemonsets
+ - replicasets
+ - statefulsets
+ verbs:
+ - '*'
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - servicemonitors
+ verbs:
+ - get
+ - create
+- apiGroups:
+ - apps
+ resourceNames:
+ - sriov-network-operator
+ resources:
+ - deployments/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - serviceaccounts
+ - roles
+ - rolebindings
+ verbs:
+ - '*'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: sriov-network-config-daemon
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - '*'
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ verbs:
+ - '*'
+- apiGroups:
+ - sriovnetwork.openshift.io
+ resources:
+ - '*'
+ - sriovnetworknodestates
+ verbs:
+ - '*'
+- apiGroups:
+ - security.openshift.io
+ resourceNames:
+ - privileged
+ resources:
+ - securitycontextconstraints
+ verbs:
+ - use
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - update
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: operator-webhook-sa
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/rolebinding.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/rolebinding.yaml
new file mode 100644
index 00000000..1f8498af
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/rolebinding.yaml
@@ -0,0 +1,44 @@
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "sriov-network-operator.fullname" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: {{ include "sriov-network-operator.fullname" . }}
+subjects:
+- kind: ServiceAccount
+ name: {{ include "sriov-network-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: sriov-network-config-daemon
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: sriov-network-config-daemon
+subjects:
+- kind: ServiceAccount
+ name: sriov-network-config-daemon
+ namespace: {{ .Release.Namespace }}
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: operator-webhook-sa
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: operator-webhook-sa
+subjects:
+- kind: ServiceAccount
+ name: operator-webhook-sa
+ namespace: {{ .Release.Namespace }}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/templates/serviceaccount.yaml b/kud/deployment_infra/helm/sriov-network-operator/templates/serviceaccount.yaml
new file mode 100644
index 00000000..eb0ec10c
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/templates/serviceaccount.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "sriov-network-operator.serviceAccountName" . }}
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: sriov-network-config-daemon
+ labels:
+ {{- include "sriov-network-operator.labels" . | nindent 4 }}
diff --git a/kud/deployment_infra/helm/sriov-network-operator/values.yaml b/kud/deployment_infra/helm/sriov-network-operator/values.yaml
new file mode 100644
index 00000000..59f257e4
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network-operator/values.yaml
@@ -0,0 +1,100 @@
+# enableInjector controls whether the network resource injector
+# webhook shall be deployed
+enableInjector: false
+
+# enableOperatorWebhook controls whether the operator admission
+# controller webhook shall be deployed
+enableOperatorWebhook: false
+
+# logLevel controls the log verbose level of the operator. Set to '0'
+# to show only the basic logs. And set to '2' to show all the
+# available logs.
+logLevel: 2
+
+# resourcePrefix is the device plugin resource prefix.
+resourcePrefix: "intel.com"
+
+image:
+ repository: integratedcloudnative/origin-sriov-network-operator
+ # This should be set to 'IfNotPresent' for released version
+ pullPolicy: IfNotPresent
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+ # tag
+imagePullSecrets: []
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+nameOverride: ""
+fullnameOverride: ""
+
+nodeSelector:
+ node-role.kubernetes.io/master: ""
+
+affinity: {}
+
+tolerations:
+- effect: NoSchedule
+ key: node-role.kubernetes.io/master
+ operator: Exists
+
+cni:
+ image:
+ repository: integratedcloudnative/origin-sriov-cni
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+infinibandCni:
+ image:
+ repository: integratedcloudnative/origin-sriov-infiniband-cni
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+devicePlugin:
+ image:
+ repository: integratedcloudnative/origin-sriov-network-device-plugin
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+resourcesInjector:
+ image:
+ repository: integratedcloudnative/origin-sriov-dp-admission-controller
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+configDaemon:
+ image:
+ repository: integratedcloudnative/sriov-network-config-daemon
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+ nodeSelector:
+ beta.kubernetes.io/os: "linux"
+
+webhook:
+ image:
+ repository: integratedcloudnative/origin-sriov-network-webhook
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+iavfDriver:
+ image:
+ repository: integratedcloudnative/iavf-driver-installer
+ pullPolicy: IfNotPresent
+ tag: latest
+ imagePullSecrets: []
+
+ nodeSelector:
+ feature.node.kubernetes.io/custom-iavf: "true"
+
+ podSecurityContext: {}
+ # fsGroup: 2000
+
+ securityContext:
+ privileged: true
+
+ resources: {}
+
+ affinity: {}
+
+ tolerations: {}
diff --git a/kud/deployment_infra/helm/sriov-network/.helmignore b/kud/deployment_infra/helm/sriov-network/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/sriov-network/Chart.yaml b/kud/deployment_infra/helm/sriov-network/Chart.yaml
new file mode 100644
index 00000000..8cf3a1d5
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/Chart.yaml
@@ -0,0 +1,24 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: 4.8.0
+description: |
+ SR-IOV CNI plugin and Device plugin configuration.
+name: sriov-network
+sources:
+ - https://github.com/k8snetworkplumbingwg/sriov-network-operator
+home: https://github.com/k8snetworkplumbingwg/sriov-network-operator
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/sriov-network/templates/_helpers.tpl b/kud/deployment_infra/helm/sriov-network/templates/_helpers.tpl
new file mode 100644
index 00000000..08baf040
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/templates/_helpers.tpl
@@ -0,0 +1,34 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "sriov-network.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "sriov-network.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "sriov-network.labels" -}}
+helm.sh/chart: {{ include "sriov-network.chart" . }}
+{{ include "sriov-network.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "sriov-network.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "sriov-network.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/sriov-network/templates/sriovnetwork.yaml b/kud/deployment_infra/helm/sriov-network/templates/sriovnetwork.yaml
new file mode 100644
index 00000000..550f00dc
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/templates/sriovnetwork.yaml
@@ -0,0 +1,40 @@
+{{- range $network := .Values.networks }}
+---
+apiVersion: sriovnetwork.openshift.io/v1
+kind: SriovNetwork
+metadata:
+ name: {{ $network.networkName }}
+ labels:
+ {{- include "sriov-network.labels" $ | nindent 4 }}
+spec:
+ {{- with $network.capabilities }}
+ capabilities: | {{ . | nindent 4 }}
+ {{- end }}
+ ipam: | {{ $network.ipam | nindent 4 }}
+ {{- if $network.linkState }}
+ linkState: {{ $network.linkState }}
+ {{- end }}
+ {{- if $network.maxTxRate }}
+ maxTxRate: {{ $network.maxTxRate }}
+ {{- end }}
+ {{- with $network.metaPlugins }}
+ metaPlugins: | {{ . | nindent 4 }}
+ {{- end }}
+ {{- if $network.minTxRate }}
+ minTxRate: {{ $network.minTxRate }}
+ {{- end }}
+ networkNamespace: {{ $network.networkNamespace }}
+ resourceName: {{ $network.resourceName }}
+ {{- if $network.spoofChk }}
+ spoofChk: {{ $network.spoofChk }}
+ {{- end }}
+ {{- if $network.trust }}
+ trust: {{ $network.trust }}
+ {{- end }}
+ {{- if $network.vlan }}
+ vlan: {{ $network.vlan }}
+ {{- end }}
+ {{- if $network.vlanQoS }}
+ vlanQoS: {{ $network.vlanQoS }}
+ {{- end }}
+{{- end }}
diff --git a/kud/deployment_infra/helm/sriov-network/templates/sriovnetworknodepolicy.yaml b/kud/deployment_infra/helm/sriov-network/templates/sriovnetworknodepolicy.yaml
new file mode 100644
index 00000000..382df562
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/templates/sriovnetworknodepolicy.yaml
@@ -0,0 +1,52 @@
+{{- range $policy := .Values.policies }}
+---
+apiVersion: sriovnetwork.openshift.io/v1
+kind: SriovNetworkNodePolicy
+metadata:
+ name: {{ $policy.policyName }}
+ labels:
+ {{- include "sriov-network.labels" $ | nindent 4 }}
+spec:
+ {{- if $policy.deviceType }}
+ deviceType: {{ $policy.deviceType }}
+ {{- end }}
+ {{- if $policy.eSwitchMode }}
+ eSwitchMode: {{ $policy.eSwitchMode }}
+ {{- end }}
+ nicSelector:
+ {{- if $policy.nicSelector.deviceID }}
+ deviceID: {{ $policy.nicSelector.deviceID | quote }}
+ {{- end }}
+ {{- if $policy.nicSelector.netFilter }}
+ netFilter: {{ $policy.nicSelector.netFilter | quote }}
+ {{- end }}
+ {{- with $policy.nicSelector.pfNames }}
+ pfNames:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with $policy.nicSelector.rootDevices }}
+ rootDevices:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- if $policy.nicSelector.vendor }}
+ vendor: {{ $policy.nicSelector.vendor | quote }}
+ {{- end }}
+ {{- if $policy.isRdma }}
+ isRdma: {{ $policy.isRdma }}
+ {{- end }}
+ {{- if $policy.linkType }}
+ linkType: {{ $policy.linkType }}
+ {{- end }}
+ {{- if $policy.mtu }}
+ mtu: {{ $policy.mtu }}
+ {{- end }}
+ {{- with $policy.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ numVfs: {{ $policy.numVfs }}
+ {{- if $policy.priority }}
+ priority: {{ $policy.priority }}
+ {{- end }}
+ resourceName: {{ $policy.resourceName }}
+{{- end }} \ No newline at end of file
diff --git a/kud/deployment_infra/helm/sriov-network/values.yaml b/kud/deployment_infra/helm/sriov-network/values.yaml
new file mode 100644
index 00000000..d9a38222
--- /dev/null
+++ b/kud/deployment_infra/helm/sriov-network/values.yaml
@@ -0,0 +1,144 @@
+nameOverride: ""
+
+policies:
+- # policyName is the name of the policy
+ policyName: "policy-xl710"
+
+ # nicSelector selects the NICs to be configured. At least one of
+ # vendor, deviceId, pfNames, or rootDevices must be deined.
+ nicSelector:
+ # deviceID is the device hex code of SR-IOV device.
+ deviceID: "1583"
+
+ # netFilter is the infrastructure networking selection
+ # filter. Allowed values are
+ # "openstack/NetworkID:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ #netFilter: ""
+
+ # pfNames is a list of the SR-IOV PF names.
+ #pfNames: []
+
+ # rootDevices is a list of the PCI addresses of SR-IOV PFs.
+ #rootDevices: []
+
+ # vendor is the vendor hex code of SR-IOV device. Allowed values are
+ # "8086", "15b3".
+ vendor: "8086"
+
+ # nodeSelector selects the nodes to be configured.
+ nodeSelector:
+ feature.node.kubernetes.io/network-sriov.capable: "true"
+ feature.node.kubernetes.io/pci-0200_8086_1583.present: "true"
+
+ # numVfs is the Number of VFs for each PF
+ numVfs: 8
+
+ # resourceName is the SR-IOV Network device plugin endpoint.
+ resourceName: "intel_sriov_nic"
+
+- policyName: "policy-82599es"
+ nicSelector:
+ deviceID: "10fb"
+ vendor: "8086"
+ nodeSelector:
+ feature.node.kubernetes.io/network-sriov.capable: "true"
+ feature.node.kubernetes.io/pci-0200_8086_10fb.present: "true"
+ numVfs: 8
+ resourceName: "intel_sriov_nic"
+
+- policyName: "policy-i350"
+ nicSelector:
+ deviceID: "1521"
+ vendor: "8086"
+ nodeSelector:
+ feature.node.kubernetes.io/network-sriov.capable: "true"
+ feature.node.kubernetes.io/pci-0200_8086_1521.present: "true"
+ numVfs: 2
+ resourceName: "intel_sriov_nic"
+
+networks:
+- # networkName is the name of both the SriovNetwork and the created
+ # NetworkAttachmentDefinition custom resource.
+ networkName: "sriov-intel"
+
+ # networkNamespace of the NetworkAttachmentDefinition custom resource.
+ networkNamespace: "default"
+
+ # ipam configuration to be used for the network.
+ ipam: |
+ {
+ "type": "host-local",
+ "subnet": "10.56.206.0/24",
+ "routes": [{
+ "dst": "0.0.0.0/0"
+ }],
+ "gateway": "10.56.206.1"
+ }
+
+ #
+ # Optional values are listed below.
+ #
+
+ # deviceType is the driver type for configured VFs. Allowed values are
+ # "netdevice" and "vfio-pci".
+ #deviceType: "netdevice"
+
+ # eSwitchMode NIC Device Mode. Allowed values are "legacy" and
+ # "switchdev".
+ #eSwitchMode: "switchdev"
+
+ # isRdma defaults to false.
+ #isRdma: true
+
+ # linkType is the NIC link type. Allowed values are "eth", "ETH",
+ # "ib", and "IB".
+ #linkType: "eth"
+
+ # mtu of VF
+ #mtu: 9000
+
+ # priority of the policy, higher priority policies can override lower
+ # ones.
+ #priority: 99
+
+ # capabilities to be configured for this network. Capabilities
+ # supported: (mac|ips), e.g. '{"mac": true}'
+ #capabilities: |
+ # {
+ # "mac": true
+ # }
+
+ # linkState of VF (enable|disable|auto).
+ #linkState: "enable"
+
+ # maxTxRate, in Mbps, for the VF. Defaults to 0 (no rate limiting).
+ #maxTxRate: 0
+
+ # metaPlugins configuration to be used in order to chain metaplugins
+ # to the SR-IOV interface returned by the operator.
+ #metaPlugins: |
+ # {
+ # "type": "tuning",
+ # "sysctl": {
+ # "net.core.somaxconn": "500"
+ # }
+ # }
+
+ # minTxRate, in Mbps, for the VF. Defaults to 0 (no rate
+ # limiting). min_tx_rate should be <= max_tx_rate.
+ #minTxRate: 0
+
+ # spoofChk for VF, (on|off)
+ #spoofChk: "off"
+
+ # trust mode of VF (on|off)
+ #trust: "off"
+
+ # vlan ID to assign for the VF. Defaults to 0.
+ #vlan: 0
+
+ # vlanQoS ID to assign for the VF. Defaults to 0.
+ #vlanQoS: 0
+
+ # resourceName is the SR-IOV Network device plugin endpoint.
+ resourceName: "intel_sriov_nic"
diff --git a/kud/deployment_infra/installers/Dockerfile.iavf-driver-installer b/kud/deployment_infra/installers/Dockerfile.iavf-driver-installer
new file mode 100644
index 00000000..9bbfd372
--- /dev/null
+++ b/kud/deployment_infra/installers/Dockerfile.iavf-driver-installer
@@ -0,0 +1,20 @@
+FROM ubuntu:18.04
+
+RUN apt-get update && \
+ apt-get install -y \
+ bc \
+ build-essential \
+ curl \
+ kmod \
+ libelf-dev \
+ libssl-dev \
+ libudev-dev \
+ pciutils \
+ pkg-config \
+ && \
+ rm -rf /var/lib/apt/lists/*
+
+COPY _common.sh /
+COPY entrypoint-iavf-driver-installer.sh /entrypoint.sh
+
+CMD /entrypoint.sh
diff --git a/kud/deployment_infra/installers/Dockerfile.qat-driver-installer b/kud/deployment_infra/installers/Dockerfile.qat-driver-installer
new file mode 100644
index 00000000..7d885a59
--- /dev/null
+++ b/kud/deployment_infra/installers/Dockerfile.qat-driver-installer
@@ -0,0 +1,21 @@
+FROM ubuntu:18.04
+
+RUN apt-get update && \
+ apt-get install -y \
+ bc \
+ build-essential \
+ curl \
+ kmod \
+ libelf-dev \
+ libssl-dev \
+ libudev-dev \
+ pciutils \
+ pkg-config \
+ && \
+ rm -rf /var/lib/apt/lists/*
+
+COPY _common.sh /
+COPY _qat-driver-installer.sh /
+COPY entrypoint-qat-driver-installer.sh /entrypoint.sh
+
+CMD /entrypoint.sh
diff --git a/kud/deployment_infra/installers/Makefile b/kud/deployment_infra/installers/Makefile
new file mode 100644
index 00000000..99aadbc0
--- /dev/null
+++ b/kud/deployment_infra/installers/Makefile
@@ -0,0 +1,10 @@
+REGISTRY?=integratedcloudnative
+TAG?=latest
+IMAGES=iavf-driver-installer qat-driver-installer
+
+.PHONY: all $(IMAGES)
+
+all: $(IMAGES)
+
+$(IMAGES):
+ docker build -t $(REGISTRY)/$@:$(TAG) -f Dockerfile.$@ .
diff --git a/kud/deployment_infra/installers/_common.sh b/kud/deployment_infra/installers/_common.sh
new file mode 100644
index 00000000..87badfc9
--- /dev/null
+++ b/kud/deployment_infra/installers/_common.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+set -o errexit
+set -o pipefail
+set -u
+
+ROOT_MOUNT_DIR="${ROOT_MOUNT_DIR:-/root}"
+ROOT_OS_RELEASE="${ROOT_OS_RELEASE:-$ROOT_MOUNT_DIR/etc/os-release}"
+KERNEL_SRC_DIR=$(readlink -f "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/build")
+[[ "${KERNEL_SRC_DIR}" == "${ROOT_MOUNT_DIR}/*" ]] || KERNEL_SRC_DIR="${ROOT_MOUNT_DIR}${KERNEL_SRC_DIR}"
+KERNEL_MOD_SIGN_CMD="${KERNEL_MOD_SIGN_CMD:-}"
+
+RETCODE_SUCCESS=0
+RETCODE_ERROR=1
+
+_log() {
+ local -r prefix="$1"
+ shift
+ echo "[${prefix}$(date -u "+%Y-%m-%d %H:%M:%S %Z")] ""$*" >&2
+}
+
+info() {
+ _log "INFO " "$*"
+}
+
+warn() {
+ _log "WARNING " "$*"
+}
+
+error() {
+ _log "ERROR " "$*"
+}
+
+load_etc_os_release() {
+ if [[ ! -f "${ROOT_OS_RELEASE}" ]]; then
+ error "File ${ROOT_OS_RELEASE} not found, /etc/os-release from host must be mounted"
+ exit ${RETCODE_ERROR}
+ fi
+ . "${ROOT_OS_RELEASE}"
+ info "Running on ${NAME} kernel version $(uname -r)"
+}
diff --git a/kud/deployment_infra/installers/_qat-driver-installer.sh b/kud/deployment_infra/installers/_qat-driver-installer.sh
new file mode 100644
index 00000000..5ecc2f5a
--- /dev/null
+++ b/kud/deployment_infra/installers/_qat-driver-installer.sh
@@ -0,0 +1,514 @@
+#!/bin/bash
+#
+# The functions below are captured from the Makefile targets. They
+# cannot be run in a container as-is due to absolute paths, so they
+# are recreated here.
+#
+# Note also that the portions of qat-driver-install that deal with
+# rc.d are removed: they are intended to be handled by the deployed
+# DaemonSet. The rest is contained in _qat_service_start.
+#
+# The checks for loaded modules are moved to _qat_check_started.
+
+BIN_LIST="qat_c3xxx.bin qat_c3xxx_mmp.bin qat_c62x.bin \
+ qat_c62x_mmp.bin qat_mmp.bin qat_d15xx.bin qat_d15xx_mmp.bin \
+ qat_200xx.bin qat_200xx_mmp.bin qat_895xcc.bin qat_895xcc_mmp.bin"
+
+numDh895xDevicesP=$(lspci -n | grep -c "8086:0435") || true
+numDh895xDevicesV=$(lspci -n | grep -c "8086:0443") || true
+numC62xDevicesP=$(lspci -n | grep -c "8086:37c8") || true
+numC62xDevicesV=$(lspci -n | grep -c "8086:37c9") || true
+numD15xxDevicesP=$(lspci -n | grep -c "8086:6f54") || true
+numD15xxDevicesV=$(lspci -n | grep -c "8086:6f55") || true
+numC3xxxDevicesP=$(lspci -n | grep -c "8086:19e2") || true
+numC3xxxDevicesV=$(lspci -n | grep -c "8086:19e3") || true
+num200xxDevicesP=$(lspci -n | grep -c "8086:18ee") || true
+num200xxDevicesV=$(lspci -n | grep -c "8086:18ef") || true
+
+_qat_driver_install() {
+ info "Installing drivers"
+ if [[ -z "${KERNEL_MOD_SIGN_CMD}" ]]; then
+ info "No driver signing required"
+ INSTALL_MOD_PATH=${ROOT_MOUNT_DIR} make KDIR="${KERNEL_SRC_DIR}" -C "${QAT_INSTALL_DIR_CONTAINER}/quickassist/qat" mod_sign_cmd=":" modules_install
+ else
+ info "Driver signing is required"
+ INSTALL_MOD_PATH=${ROOT_MOUNT_DIR} make KDIR="${KERNEL_SRC_DIR}" -C "${QAT_INSTALL_DIR_CONTAINER}/quickassist/qat" mod_sign_cmd="${KERNEL_MOD_SIGN_CMD}" modules_install
+ fi
+}
+
+_adf_ctl_install() {
+ info "Installing adf_ctl"
+ install -D -m 750 "${QAT_INSTALL_DIR_CONTAINER}/quickassist/utilities/adf_ctl/adf_ctl" "${ROOT_MOUNT_DIR}/usr/local/bin/adf_ctl"
+}
+
+_adf_ctl_uninstall() {
+ info "Uninstalling adf_ctl"
+ # rm ${ROOT_MOUNT_DIR}/usr/local/bin/adf_ctl
+ return 0
+}
+
+_rename_ssl_conf_section() {
+ info "Renaming SSL section in conf files"
+ restore_nullglob=$(shopt -p | grep nullglob)
+ shopt -s nullglob
+ for file in ${ROOT_MOUNT_DIR}/etc/dh895xcc_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/dh895xcc_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/c6xx_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/c6xx_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/d15xx_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/d15xx_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/c3xxx_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/c3xxx_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/200xx_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/200xx_dev${dev}.conf"
+ done
+
+ for file in ${ROOT_MOUNT_DIR}/etc/dh895xccvf_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/dh895xccvf_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/c6xxvf_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/c6xxvf_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/d15xxvf_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/d15xxvf_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/c3xxxvf_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/c3xxxvf_dev${dev}.conf"
+ done
+ for file in ${ROOT_MOUNT_DIR}/etc/200xxvf_dev*.conf; do
+ dev=$(echo "$file" | cut -d '_' -f 2 | tr -cd '[:digit:]')
+ sed -i "s/\[SSL\]/\[SSL${dev}\]/g" "${ROOT_MOUNT_DIR}/etc/200xxvf_dev${dev}.conf"
+ done
+ $restore_nullglob
+}
+
+_qat_service_install() {
+ local -r QAT_DH895XCC_NUM_VFS=32
+ local -r QAT_DHC62X_NUM_VFS=16
+ local -r QAT_DHD15XX_NUM_VFS=16
+ local -r QAT_DHC3XXX_NUM_VFS=16
+ local -r QAT_DH200XX_NUM_VFS=16
+ local -r DEVICES="0435 0443 37c8 37c9 6f54 6f55 19e2 19e3 18ee 18ef"
+
+ info "Installing service"
+ pushd "${QAT_INSTALL_DIR_CONTAINER}/build" > /dev/null
+
+ if [[ ! -d ${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup ]]; then
+ mkdir -p "${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup"
+ fi
+ for bin in ${BIN_LIST}; do
+ if [[ -e ${ROOT_MOUNT_DIR}/lib/firmware/${bin} ]]; then
+ mv "${ROOT_MOUNT_DIR}/lib/firmware/${bin}" "${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup/${bin}"
+ fi
+ if [[ -e ${bin} ]]; then
+ install -D -m 750 "${bin}" "${ROOT_MOUNT_DIR}/lib/firmware/${bin}"
+ fi
+ done
+ if [[ ! -d ${ROOT_MOUNT_DIR}/etc/qat_conf_backup ]]; then
+ mkdir "${ROOT_MOUNT_DIR}/etc/qat_conf_backup"
+ fi
+ mv "${ROOT_MOUNT_DIR}/etc/dh895xcc*.conf" "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/" 2>/dev/null || true
+ mv "${ROOT_MOUNT_DIR}/etc/c6xx*.conf" "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/" 2>/dev/null || true
+ mv "${ROOT_MOUNT_DIR}/etc/d15xx*.conf" "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/" 2>/dev/null || true
+ mv "${ROOT_MOUNT_DIR}/etc/c3xxx*.conf" "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/" 2>/dev/null || true
+ mv "${ROOT_MOUNT_DIR}/etc/200xx*.conf" "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/" 2>/dev/null || true
+ if [[ "${QAT_ENABLE_SRIOV}" != "guest" ]]; then
+ for ((dev=0; dev<numDh895xDevicesP; dev++)); do
+ install -D -m 640 dh895xcc_dev0.conf "${ROOT_MOUNT_DIR}/etc/dh895xcc_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numC62xDevicesP; dev++)); do
+ install -D -m 640 c6xx_dev$((dev%3)).conf "${ROOT_MOUNT_DIR}/etc/c6xx_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numD15xxDevicesP; dev++)); do
+ install -D -m 640 d15xx_dev$((dev%3)).conf "${ROOT_MOUNT_DIR}/etc/d15xx_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numC3xxxDevicesP; dev++)); do
+ install -D -m 640 c3xxx_dev0.conf "${ROOT_MOUNT_DIR}/etc/c3xxx_dev${dev}.conf"
+ done
+ for ((dev=0; dev<num200xxDevicesP; dev++)); do
+ install -D -m 640 200xx_dev0.conf "${ROOT_MOUNT_DIR}/etc/200xx_dev${dev}.conf"
+ done
+ fi
+ if [[ "${QAT_ENABLE_SRIOV}" == "host" ]]; then
+ for ((dev=0; dev<numDh895xDevicesP; dev++)); do
+ for ((vf_dev=0; vf_dev<QAT_DH895XCC_NUM_VFS; vf_dev++)); do
+ vf_dev_num=$((dev * QAT_DH895XCC_NUM_VFS + vf_dev))
+ install -D -m 640 dh895xccvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/dh895xccvf_dev${vf_dev_num}.conf"
+ done
+ done
+ for ((dev=0; dev<numC62xDevicesP; dev++)); do
+ for ((vf_dev=0; vf_dev<QAT_DHC62X_NUM_VFS; vf_dev++)); do
+ vf_dev_num=$((dev * QAT_DHC62X_NUM_VFS + vf_dev))
+ install -D -m 640 c6xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/c6xxvf_dev${vf_dev_num}.conf"
+ done
+ done
+ for ((dev=0; dev<numD15xxDevicesP; dev++)); do
+ for ((vf_dev=0; vf_dev<QAT_DHD15XX_NUM_VFS; vf_dev++)); do
+ vf_dev_num=$((dev * QAT_DHD15XX_NUM_VFS + vf_dev))
+ install -D -m 640 d15xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/d15xxvf_dev${vf_dev_num}.conf"
+ done
+ done
+ for ((dev=0; dev<numC3xxxDevicesP; dev++)); do
+ for ((vf_dev=0; vf_dev<QAT_DHC3XXX_NUM_VFS; vf_dev++)); do
+ vf_dev_num=$((dev * QAT_DHC3XXX_NUM_VFS + vf_dev))
+ install -D -m 640 c3xxxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/c3xxxvf_dev${vf_dev_num}.conf"
+ done
+ done
+ for ((dev=0; dev<num200xxDevicesP; dev++)); do
+ for ((vf_dev=0; vf_dev<QAT_DH200XX_NUM_VFS; vf_dev++)); do
+ vf_dev_num=$((dev * QAT_DH200XX_NUM_VFS + vf_dev))
+ install -D -m 640 200xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/200xxvf_dev${vf_dev_num}.conf"
+ done
+ done
+ else
+ for ((dev=0; dev<numDh895xDevicesV; dev++)); do
+ install -D -m 640 dh895xccvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/dh895xccvf_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numC62xDevicesV; dev++)); do
+ install -D -m 640 c6xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/c6xxvf_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numD15xxDevicesV; dev++)); do
+ install -D -m 640 d15xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/d15xxvf_dev${dev}.conf"
+ done
+ for ((dev=0; dev<numC3xxxDevicesV; dev++)); do
+ install -D -m 640 c3xxxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/c3xxxvf_dev${dev}.conf"
+ done
+ for ((dev=0; dev<num200xxDevicesV; dev++)); do
+ install -D -m 640 200xxvf_dev0.conf.vm "${ROOT_MOUNT_DIR}/etc/200xxvf_dev${dev}.conf"
+ done
+ fi
+ _rename_ssl_conf_section
+ info "Creating startup and kill scripts"
+ install -D -m 750 qat_service "${ROOT_MOUNT_DIR}/etc/init.d/qat_service"
+ if [[ "${QAT_ENABLE_SRIOV}" == "host" ]]; then
+ install -D -m 750 qat_service_vfs "${ROOT_MOUNT_DIR}/etc/init.d/qat_service_vfs"
+ fi
+ if [[ "${QAT_ENABLE_SRIOV}" == "host" || "${QAT_ENABLE_SRIOV}" == "guest" ]]; then
+ echo "# Comment or remove next line to disable sriov" > "${ROOT_MOUNT_DIR}/etc/default/qat"
+ echo "SRIOV_ENABLE=1" >> "${ROOT_MOUNT_DIR}/etc/default/qat"
+ else
+ echo "# Remove comment on next line to enable sriov" > "${ROOT_MOUNT_DIR}/etc/default/qat"
+ echo "#SRIOV_ENABLE=1" >> "${ROOT_MOUNT_DIR}/etc/default/qat"
+ fi
+ echo "#LEGACY_LOADED=1" >> "${ROOT_MOUNT_DIR}/etc/default/qat"
+ rm -f "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ if [[ "${QAT_ENABLE_SRIOV}" == "host" ]]; then
+ if [[ ${numDh895xDevicesP} != 0 ]]; then
+ echo "blacklist qat_dh895xccvf" >> "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ fi
+ if [[ ${numC3xxxDevicesP} != 0 ]]; then
+ echo "blacklist qat_c3xxxvf" >> "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ fi
+ if [[ ${num200xxDevicesP} != 0 ]]; then
+ echo "blacklist qat_200xxvf" >> "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ fi
+ if [[ ${numC62xDevicesP} != 0 ]]; then
+ echo "blacklist qat_c62xvf" >> "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ fi
+ if [[ ${numD15xxDevicesP} != 0 ]]; then
+ echo "blacklist qat_d15xxvf" >> "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+ fi
+ fi
+ echo "#ENABLE_KAPI=1" >> "${ROOT_MOUNT_DIR}/etc/default/qat"
+ info "Copying libqat_s.so to ${ROOT_MOUNT_DIR}/usr/local/lib"
+ install -D -m 755 libqat_s.so "${ROOT_MOUNT_DIR}/usr/local/lib/libqat_s.so"
+ info "Copying libusdm_drv_s.so to ${ROOT_MOUNT_DIR}/usr/local/lib"
+ install -D -m 755 libusdm_drv_s.so "${ROOT_MOUNT_DIR}/usr/local/lib/libusdm_drv_s.so"
+ echo /usr/local/lib > "${ROOT_MOUNT_DIR}/etc/ld.so.conf.d/qat.conf"
+ ldconfig -r "${ROOT_MOUNT_DIR}"
+ info "Copying usdm module to system drivers"
+ if [[ ! -z "${KERNEL_MOD_SIGN_CMD}" ]]; then
+ info "Need to sign driver usdm_drv.ko"
+ ${KERNEL_MOD_SIGN_CMD} usdm_drv.ko
+ info "Need to sign driver qat_api.ko"
+ ${KERNEL_MOD_SIGN_CMD} qat_api.ko
+ fi
+ install usdm_drv.ko "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/kernel/drivers"
+ install qat_api.ko "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/kernel/drivers"
+ if [[ ! $(chroot "${ROOT_MOUNT_DIR}" getent group qat) ]]; then
+ info "Creating qat group"
+ groupadd -R "${ROOT_MOUNT_DIR}" qat
+ else
+ info "Group qat already exists"
+ fi
+ info "Creating udev rules"
+ rm -f "${ROOT_MOUNT_DIR}/etc/udev/rules.d/00-qat.rules"
+ {
+ echo 'KERNEL=="qat_adf_ctl" MODE="0660" GROUP="qat"';
+ echo 'KERNEL=="qat_dev_processes" MODE="0660" GROUP="qat"';
+ echo 'KERNEL=="usdm_drv" MODE="0660" GROUP="qat"';
+ echo 'ACTION=="add", DEVPATH=="/module/usdm_drv" SUBSYSTEM=="module" RUN+="/bin/mkdir /dev/hugepages/qat"';
+ echo 'ACTION=="add", DEVPATH=="/module/usdm_drv" SUBSYSTEM=="module" RUN+="/bin/chgrp qat /dev/hugepages/qat"';
+ echo 'ACTION=="add", DEVPATH=="/module/usdm_drv" SUBSYSTEM=="module" RUN+="/bin/chmod 0770 /dev/hugepages/qat"';
+ echo 'ACTION=="remove", DEVPATH=="/module/usdm_drv" SUBSYSTEM=="module" RUN+="/bin/rmdir /dev/hugepages/qat"';
+ for dev in ${DEVICES}; do
+ echo 'KERNEL=="uio*", ATTRS{vendor}=="0x'"$(echo "8086" | tr -d \")"'", ATTRS{device}=="0x'"$(echo "${dev}" | tr -d \")"'" MODE="0660" GROUP="qat"';
+ done
+ } > "${ROOT_MOUNT_DIR}/etc/udev/rules.d/00-qat.rules"
+ info "Creating module.dep file for QAT released kernel object"
+ info "This will take a few moments"
+ depmod -a -b "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/depmod.d"
+
+ popd > /dev/null
+}
+
+_qat_service_start() {
+ if [[ $(lsmod | grep -c "usdm_drv") != "0" ]]; then
+ rmmod usdm_drv
+ fi
+ info "Starting QAT service"
+ info "... shutting down"
+ chroot "${ROOT_MOUNT_DIR}" /etc/init.d/qat_service shutdown || true
+ sleep 3
+ info "... starting"
+ chroot "${ROOT_MOUNT_DIR}" /etc/init.d/qat_service start
+ if [[ "${QAT_ENABLE_SRIOV}" == "host" ]]; then
+ modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" vfio-pci
+ chroot "${ROOT_MOUNT_DIR}" /etc/init.d/qat_service_vfs start
+ fi
+ info "... started"
+}
+
+_qat_check_started() {
+ if [[ $(lsmod | grep -c "usdm_drv") == "0" ]]; then
+ error "usdm_drv module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ if [[ ${numDh895xDevicesP} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_dh895xcc") == "0" ]]; then
+ error "qat_dh895xcc module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numC62xDevicesP} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_c62x") == "0" ]]; then
+ error "qat_c62x module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numD15xxDevicesP} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_d15xx") == "0" ]]; then
+ error "qat_d15xx module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numC3xxxDevicesP} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_c3xxx") == "0" ]]; then
+ error "qat_c3xxx module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${num200xxDevicesP} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_200xx") == "0" ]]; then
+ error "qat_200xx module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ "${QAT_ENABLE_SRIOV}" == "guest" ]]; then
+ if [[ ${numDh895xDevicesV} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_dh895xccvf") == "0" ]]; then
+ error "qat_dh895xccvf module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numC62xDevicesV} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_c62xvf") == "0" ]]; then
+ error "qat_c62xvf module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numD15xxDevicesV} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_d15xxvf") == "0" ]]; then
+ error "qat_d15xxvf module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${numC3xxxDevicesV} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_c3xxxvf") == "0" ]]; then
+ error "qat_c3xxxvf module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ if [[ ${num200xxDevicesV} != 0 ]]; then
+ if [[ $(lsmod | grep -c "qat_200xxvf") == "0" ]]; then
+ error "qat_200xxvf module not installed"
+ return "${RETCODE_ERROR}"
+ fi
+ fi
+ fi
+ if [[ $("${ROOT_MOUNT_DIR}/usr/local/bin/adf_ctl" status | grep -c "state: down") != "0" ]]; then
+ error "QAT driver not activated"
+ return "${RETCODE_ERROR}"
+ fi
+}
+
+_qat_service_shutdown() {
+ info "Stopping service"
+ if [[ $(lsmod | grep -c "qat") != "0" || -e ${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/updates/drivers/crypto/qat/qat_common/intel_qat.ko ]]; then
+ if [[ $(lsmod | grep -c "usdm_drv") != "0" ]]; then
+ rmmod usdm_drv
+ fi
+ if [[ -e ${ROOT_MOUNT_DIR}/etc/init.d/qat_service_upstream ]]; then
+ until chroot "${ROOT_MOUNT_DIR}" /etc/init.d/qat_service_upstream shutdown; do
+ sleep 1
+ done
+ elif [[ -e ${ROOT_MOUNT_DIR}/etc/init.d/qat_service ]]; then
+ until chroot "${ROOT_MOUNT_DIR}" /etc/init.d/qat_service shutdown; do
+ sleep 1
+ done
+ fi
+ fi
+}
+
+_qat_service_uninstall() {
+ info "Uninstalling service"
+ if [[ $(lsmod | grep -c "qat") != "0" || -e ${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/updates/drivers/crypto/qat/qat_common/intel_qat.ko ]]; then
+ info "Removing the QAT firmware"
+ for bin in ${BIN_LIST}; do
+ if [[ -e ${ROOT_MOUNT_DIR}/lib/firmware/${bin} ]]; then
+ rm "${ROOT_MOUNT_DIR}/lib/firmware/${bin}"
+ fi
+ if [[ -e ${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup/${bin} ]]; then
+ mv "${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup/${bin}" "${ROOT_MOUNT_DIR}/lib/firmware/${bin}"
+ fi
+ done
+
+ if [[ -d ${ROOT_MOUNT_DIR}/lib/firmware/qat_fw ]]; then
+ rm "${ROOT_MOUNT_DIR}/lib/firmware/qat_fw_backup"
+ fi
+
+ if [[ -e ${ROOT_MOUNT_DIR}/etc/init.d/qat_service_upstream ]]; then
+ rm "${ROOT_MOUNT_DIR}/etc/init.d/qat_service_upstream"
+ rm "${ROOT_MOUNT_DIR}/usr/local/bin/adf_ctl"
+ elif [[ -e ${ROOT_MOUNT_DIR}/etc/init.d/qat_service ]]; then
+ rm "${ROOT_MOUNT_DIR}/etc/init.d/qat_service"
+ rm "${ROOT_MOUNT_DIR}/usr/local/bin/adf_ctl"
+ fi
+ rm -f "${ROOT_MOUNT_DIR}/etc/init.d/qat_service_vfs"
+ rm -f "${ROOT_MOUNT_DIR}/etc/modprobe.d/blacklist-qat-vfs.conf"
+
+ rm -f "${ROOT_MOUNT_DIR}/usr/local/lib/libqat_s.so"
+ rm -f "${ROOT_MOUNT_DIR}/usr/local/lib/libusdm_drv_s.so"
+ rm -f "${ROOT_MOUNT_DIR}/etc/ld.so.conf.d/qat.conf"
+ ldconfig -r "${ROOT_MOUNT_DIR}"
+
+ info "Removing config files"
+ rm -f "${ROOT_MOUNT_DIR}/etc/dh895xcc*.conf"
+ rm -f "${ROOT_MOUNT_DIR}/etc/c6xx*.conf"
+ rm -f "${ROOT_MOUNT_DIR}/etc/d15xx*.conf"
+ rm -f "${ROOT_MOUNT_DIR}/etc/c3xxx*.conf"
+ rm -f "${ROOT_MOUNT_DIR}/etc/200xx*.conf"
+ rm -f "${ROOT_MOUNT_DIR}/etc/udev/rules.d/00-qat.rules"
+
+ mv -f "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/dh895xcc*.conf" "${ROOT_MOUNT_DIR}/etc/" 2>/dev/null || true
+ mv -f "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/c6xx*.conf" "${ROOT_MOUNT_DIR}/etc/" 2>/dev/null || true
+ mv -f "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/d15xx*.conf" "${ROOT_MOUNT_DIR}/etc/" 2>/dev/null || true
+ mv -f "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/c3xxx*.conf" "${ROOT_MOUNT_DIR}/etc/" 2>/dev/null || true
+ mv -f "${ROOT_MOUNT_DIR}/etc/qat_conf_backup/200xx*.conf" "${ROOT_MOUNT_DIR}/etc/" 2>/dev/null || true
+
+ info "Removing drivers modules"
+ rm -rf "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/updates/drivers/crypto/qat"
+ rm -f "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/kernel/drivers/usdm_drv.ko"
+ rm -f "${ROOT_MOUNT_DIR}/lib/modules/$(uname -r)/kernel/drivers/qat_api.ko"
+ info "Creating module.dep file for QAT released kernel object"
+ depmod -a -b "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/depmod.d"
+
+ if [[ $(lsmod | grep -c "usdm_drv|intel_qat") != "0" ]]; then
+ if [[ $(modinfo intel_qat | grep -c "updates") == "0" ]]; then
+ info "In-tree driver loaded"
+ info "Acceleration uninstall complete"
+ else
+ error "Some modules not removed properly"
+ error "Acceleration uninstall failed"
+ fi
+ else
+ info "Acceleration uninstall complete"
+ fi
+ if [[ ${numDh895xDevicesP} != 0 ]]; then
+ lsmod | grep qat_dh895xcc >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_dh895xcc >/dev/null 2>&1 || true
+ fi
+ if [[ ${numC62xDevicesP} != 0 ]]; then
+ lsmod | grep qat_c62x >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_c62x >/dev/null 2>&1 || true
+ fi
+ if [[ ${numD15xxDevicesP} != 0 ]]; then
+ lsmod | grep qat_d15xx >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_d15xx >/dev/null 2>&1 || true
+ fi
+ if [[ ${numC3xxxDevicesP} != 0 ]]; then
+ lsmod | grep qat_c3xxx >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_c3xxx >/dev/null 2>&1 || true
+ fi
+ if [[ ${num200xxDevicesP} != 0 ]]; then
+ lsmod | grep qat_200xx >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_200xx >/dev/null 2>&1 || true
+ fi
+ if [[ ${numDh895xDevicesV} != 0 ]]; then
+ lsmod | grep qat_dh895xccvf >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_dh895xccvf >/dev/null 2>&1 || true
+ fi
+ if [[ ${numC62xDevicesV} != 0 ]]; then
+ lsmod | grep qat_c62xvf >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_c62xvf >/dev/null 2>&1 || true
+ fi
+ if [[ ${numD15xxDevicesV} != 0 ]]; then
+ lsmod | grep qat_d15xxvf >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_d15xxvf >/dev/null 2>&1 || true
+ fi
+ if [[ ${numC3xxxDevicesV} != 0 ]]; then
+ lsmod | grep qat_c3xxxvf >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_c3xxxvf >/dev/null 2>&1 || true
+ fi
+ if [[ ${num200xxDevicesV} != 0 ]]; then
+ lsmod | grep qat_200xxvf >/dev/null 2>&1 || modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" -b -q qat_200xxvf >/dev/null 2>&1 || true
+ fi
+ else
+ info "Acceleration package not installed"
+ fi
+}
+
+_qat_sample_install() {
+ info "Installing samples"
+ if [[ -f ${QAT_INSTALL_DIR_CONTAINER}/quickassist/utilities/libusdm_drv/linux/build/linux_2.6/user_space/libusdm_drv.a ]]; then
+ ICP_ROOT="${QAT_INSTALL_DIR_CONTAINER}" make perf_user -C "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code"
+ cp "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code/performance/build/linux_2.6/user_space/cpa_sample_code" "${QAT_INSTALL_DIR_CONTAINER}/build"
+ ICP_ROOT="${QAT_INSTALL_DIR_CONTAINER}" KERNEL_SOURCE_ROOT="${KERNEL_SRC_DIR}" make perf_kernel -C "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code"
+ cp "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code/performance/build/linux_2.6/kernel_space/cpa_sample_code.ko" "${QAT_INSTALL_DIR_CONTAINER}/build"
+ else
+ error "No libusdm_drv library found - build the project (make all) before samples"
+ return "${RETCODE_ERROR}"
+ fi
+
+ if [[ ! -d ${ROOT_MOUNT_DIR}/lib/firmware ]]; then
+ mkdir "${ROOT_MOUNT_DIR}/lib/firmware"
+ fi
+
+ cp "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/calgary" "${ROOT_MOUNT_DIR}/lib/firmware"
+ cp "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/calgary32" "${ROOT_MOUNT_DIR}/lib/firmware"
+ cp "${QAT_INSTALL_DIR_CONTAINER}/quickassist/lookaside/access_layer/src/sample_code/performance/compression/canterbury" "${ROOT_MOUNT_DIR}/lib/firmware"
+ if [[ ! -z "${KERNEL_MOD_SIGN_CMD}" ]]; then
+ if [[ -f ${QAT_INSTALL_DIR_CONTAINER}/build/cpa_sample_code.ko ]]; then
+ echo "Need to sign sample code ${QAT_INSTALL_DIR_CONTAINER}/build/cpa_sample_code.ko."
+ "${KERNEL_MOD_SIGN_CMD}" "${QAT_INSTALL_DIR_CONTAINER}/build/cpa_sample_code.ko"
+ fi
+ fi
+
+ install -D -m 750 "${QAT_INSTALL_DIR_CONTAINER}/build/cpa_sample_code" "${ROOT_MOUNT_DIR}/usr/local/bin/cpa_sample_code"
+ install -D -m 750 "${QAT_INSTALL_DIR_CONTAINER}/build/cpa_sample_code.ko" "${ROOT_MOUNT_DIR}/usr/local/bin/cpa_sample_code.ko"
+ info "cpa_sample_code installed under ${ROOT_MOUNT_DIR}/usr/local/bin directory"
+}
+
+_qat_sample_uninstall() {
+ info "Uninstalling samples"
+ rm -f "${ROOT_MOUNT_DIR}/lib/firmware/calgary"
+ rm -f "${ROOT_MOUNT_DIR}/lib/firmware/calgary32"
+ rm -f "${ROOT_MOUNT_DIR}/lib/firmware/canterbury"
+
+ rm -f "${ROOT_MOUNT_DIR}/usr/local/bin/cpa_sample_code"
+ rm -f "${ROOT_MOUNT_DIR}/usr/local/bin/cpa_sample_code.ko"
+}
diff --git a/kud/deployment_infra/installers/entrypoint-iavf-driver-installer.sh b/kud/deployment_infra/installers/entrypoint-iavf-driver-installer.sh
new file mode 100755
index 00000000..1418d0df
--- /dev/null
+++ b/kud/deployment_infra/installers/entrypoint-iavf-driver-installer.sh
@@ -0,0 +1,134 @@
+#!/bin/bash
+
+#set -x
+source _common.sh
+
+IAVF_DRIVER_VERSION="${IAVF_DRIVER_VERSION:-4.0.2}"
+IAVF_DRIVER_DOWNLOAD_URL_DEFAULT="https://downloadmirror.intel.com/24693/eng/iavf-${IAVF_DRIVER_VERSION}.tar.gz"
+IAVF_DRIVER_DOWNLOAD_URL="${IAVF_DRIVER_DOWNLOAD_URL:-$IAVF_DRIVER_DOWNLOAD_URL_DEFAULT}"
+IAVF_DRIVER_ARCHIVE="$(basename "${IAVF_DRIVER_DOWNLOAD_URL}")"
+IAVF_INSTALL_DIR_HOST="${IAVF_INSTALL_DIR_HOST:-/opt/iavf}"
+IAVF_INSTALL_DIR_CONTAINER="${IAVF_INSTALL_DIR_CONTAINER:-/usr/local/iavf}"
+CACHE_FILE="${IAVF_INSTALL_DIR_CONTAINER}/.cache"
+
+check_adapter() {
+ local -r nic_models="X710 XL710 X722"
+ if [[ $(lspci | grep -c "Ethernet .* \(${nic_models// /\\|}\)") != "0" ]]; then
+ info "Found adapter"
+ else
+ error "Missing adapter"
+ exit "${RETCODE_ERROR}"
+ fi
+}
+
+download_iavf_src() {
+ info "Downloading IAVF source ... "
+ mkdir -p "${IAVF_INSTALL_DIR_CONTAINER}"
+ pushd "${IAVF_INSTALL_DIR_CONTAINER}" > /dev/null
+ curl -L -sS "${IAVF_DRIVER_DOWNLOAD_URL}" -o "${IAVF_DRIVER_ARCHIVE}"
+ tar xf "${IAVF_DRIVER_ARCHIVE}" --strip-components=1
+ popd > /dev/null
+}
+
+build_iavf_src() {
+ info "Building IAVF source ... "
+ pushd "${IAVF_INSTALL_DIR_CONTAINER}/src" > /dev/null
+ KSRC=${KERNEL_SRC_DIR} SYSTEM_MAP_FILE="${ROOT_MOUNT_DIR}/boot/System.map-$(uname -r)" INSTALL_MOD_PATH="${ROOT_MOUNT_DIR}" make install
+ # TODO Unable to update initramfs. You may need to do this manaully.
+ popd > /dev/null
+}
+
+install_iavf() {
+ check_adapter
+ download_iavf_src
+ build_iavf_src
+}
+
+uninstall_iavf() {
+ if [[ $(lsmod | grep -c "iavf") != "0" ]]; then
+ rmmod iavf
+ fi
+ if [[ $(lsmod | grep -c "i40evf") != "0" ]]; then
+ rmmod i40evf
+ fi
+ if [[ -d "${IAVF_INSTALL_DIR_CONTAINER}/src" ]]; then
+ pushd "${IAVF_INSTALL_DIR_CONTAINER}/src" > /dev/null
+ KSRC=${KERNEL_SRC_DIR} SYSTEM_MAP_FILE="${ROOT_MOUNT_DIR}/boot/System.map-$(uname -r)" INSTALL_MOD_PATH="${ROOT_MOUNT_DIR}" make uninstall
+ popd > /dev/null
+ fi
+ # This is a workaround for missing INSTALL_MOD_PATH prefix in the Makefile:
+ rm -f "${ROOT_MOUNT_DIR}/etc/modprobe.d/iavf.conf"
+}
+
+check_cached_version() {
+ info "Checking cached version"
+ if [[ ! -f "${CACHE_FILE}" ]]; then
+ info "Cache file ${CACHE_FILE} not found"
+ return "${RETCODE_ERROR}"
+ fi
+ # Source the cache file and check if the cached driver matches
+ # currently running kernel and driver versions.
+ . "${CACHE_FILE}"
+ if [[ "$(uname -r)" == "${CACHE_KERNEL_VERSION}" ]]; then
+ if [[ "${IAVF_DRIVER_VERSION}" == "${CACHE_IAVF_DRIVER_VERSION}" ]]; then
+ info "Found existing driver installation for kernel version $(uname -r) and driver version ${IAVF_DRIVER_VERSION}"
+ return "${RETCODE_SUCCESS}"
+ fi
+ fi
+ return "${RETCODE_ERROR}"
+}
+
+update_cached_version() {
+ cat >"${CACHE_FILE}"<<__EOF__
+CACHE_KERNEL_VERSION=$(uname -r)
+CACHE_IAVF_DRIVER_VERSION=${IAVF_DRIVER_VERSION}
+__EOF__
+
+ info "Updated cached version as:"
+ cat "${CACHE_FILE}"
+}
+
+upgrade_driver() {
+ uninstall_iavf
+ install_iavf
+}
+
+check_driver_started() {
+ if [[ $(lsmod | grep -c "iavf") == "0" ]]; then
+ return "${RETCODE_ERROR}"
+ fi
+ return 0
+}
+
+start_driver() {
+ modprobe -d "${ROOT_MOUNT_DIR}" -C "${ROOT_MOUNT_DIR}/etc/modprobe.d" iavf
+ if ! check_driver_started; then
+ error "Driver not started"
+ fi
+}
+
+uninstall_driver() {
+ uninstall_iavf
+ rm -f "${CACHE_FILE}"
+}
+
+main() {
+ load_etc_os_release
+ local -r cmd="${1:-install}"
+ case $cmd in
+ install)
+ if ! check_cached_version; then
+ upgrade_driver
+ update_cached_version
+ fi
+ if ! check_driver_started; then
+ start_driver
+ fi
+ ;;
+ uninstall)
+ uninstall_driver
+ ;;
+ esac
+}
+
+main "$@"
diff --git a/kud/deployment_infra/installers/entrypoint-qat-driver-installer.sh b/kud/deployment_infra/installers/entrypoint-qat-driver-installer.sh
new file mode 100755
index 00000000..f9221309
--- /dev/null
+++ b/kud/deployment_infra/installers/entrypoint-qat-driver-installer.sh
@@ -0,0 +1,148 @@
+#!/bin/bash
+
+#set -x
+source _common.sh
+source _qat-driver-installer.sh
+
+# IMPORTANT: If the driver version is changed, review the QAT Makefile
+# against _qat.sh. The steps in _qat.sh are from the Makefile and
+# have been modified to run inside a container.
+QAT_DRIVER_VERSION="${QAT_DRIVER_VERSION:-1.7.l.4.12.0-00011}"
+QAT_DRIVER_DOWNLOAD_URL_DEFAULT="https://01.org/sites/default/files/downloads/qat${QAT_DRIVER_VERSION}.tar.gz"
+QAT_DRIVER_DOWNLOAD_URL="${QAT_DRIVER_DOWNLOAD_URL:-$QAT_DRIVER_DOWNLOAD_URL_DEFAULT}"
+QAT_DRIVER_ARCHIVE="$(basename "${QAT_DRIVER_DOWNLOAD_URL}")"
+QAT_INSTALL_DIR_HOST="${QAT_INSTALL_DIR_HOST:-/opt/qat}"
+QAT_INSTALL_DIR_CONTAINER="${QAT_INSTALL_DIR_CONTAINER:-/usr/local/qat}"
+QAT_ENABLE_SRIOV="${QAT_ENABLE_SRIOV:-host}"
+CACHE_FILE="${QAT_INSTALL_DIR_CONTAINER}/.cache"
+
+check_kernel_boot_parameter() {
+ if [[ $(grep -c intel_iommu=on /proc/cmdline) != "0" ]]; then
+ info "Found intel_iommu=on kernel boot parameter"
+ else
+ error "Missing intel_iommu=on kernel boot parameter"
+ exit "${RETCODE_ERROR}"
+ fi
+}
+
+check_sriov_hardware_capabilities() {
+ if [[ $(lspci -vn -d 8086:0435 | grep -c SR-IOV) != "0" ]]; then
+ info "Found dh895xcc SR-IOV hardware capabilities"
+ elif [[ $(lspci -vn -d 8086:37c8 | grep -c SR-IOV) != "0" ]]; then
+ info "Found c6xx SR-IOV hardware capabilities"
+ elif [[ $(lspci -vn -d 8086:6f54 | grep -c SR-IOV) != "0" ]]; then
+ info "Found d15xx SR-IOV hardware capabilities"
+ elif [[ $(lspci -vn -d 8086:19e2 | grep -c SR-IOV) != "0" ]]; then
+ info "Found c3xxx SR-IOV hardware capabilities"
+ else
+ error "Missing SR-IOV hardware capabilities"
+ exit "${RETCODE_ERROR}"
+ fi
+}
+
+download_qat_src() {
+ info "Downloading QAT source ... "
+ mkdir -p "${QAT_INSTALL_DIR_CONTAINER}"
+ pushd "${QAT_INSTALL_DIR_CONTAINER}" > /dev/null
+ curl -L -sS "${QAT_DRIVER_DOWNLOAD_URL}" -o "${QAT_DRIVER_ARCHIVE}"
+ tar xf "${QAT_DRIVER_ARCHIVE}"
+ popd > /dev/null
+}
+
+build_qat_src() {
+ info "Building QAT source ... "
+ pushd "${QAT_INSTALL_DIR_CONTAINER}" > /dev/null
+ KERNEL_SOURCE_ROOT="${KERNEL_SRC_DIR}" ./configure --enable-icp-sriov="${QAT_ENABLE_SRIOV}"
+ make
+ popd > /dev/null
+}
+
+install_qat() {
+ check_kernel_boot_parameter
+ check_sriov_hardware_capabilities
+ download_qat_src
+ build_qat_src
+ _qat_driver_install
+ _adf_ctl_install
+ _qat_service_install
+}
+
+uninstall_qat() {
+ _adf_ctl_uninstall
+ _qat_service_shutdown
+ _qat_service_uninstall
+}
+
+check_cached_version() {
+ info "Checking cached version"
+ if [[ ! -f "${CACHE_FILE}" ]]; then
+ info "Cache file ${CACHE_FILE} not found"
+ return "${RETCODE_ERROR}"
+ fi
+ # Source the cache file and check if the cached driver matches
+ # currently running kernel and driver versions.
+ . "${CACHE_FILE}"
+ if [[ "$(uname -r)" == "${CACHE_KERNEL_VERSION}" ]]; then
+ if [[ "${QAT_DRIVER_VERSION}" == "${CACHE_QAT_DRIVER_VERSION}" ]]; then
+ info "Found existing driver installation for kernel version $(uname -r) and driver version ${QAT_DRIVER_VERSION}"
+ return "${RETCODE_SUCCESS}"
+ fi
+ fi
+ return "${RETCODE_ERROR}"
+}
+
+update_cached_version() {
+ cat >"${CACHE_FILE}"<<__EOF__
+CACHE_KERNEL_VERSION=$(uname -r)
+CACHE_QAT_DRIVER_VERSION=${QAT_DRIVER_VERSION}
+__EOF__
+
+ info "Updated cached version as:"
+ cat "${CACHE_FILE}"
+}
+
+upgrade_driver() {
+ uninstall_qat
+ install_qat
+}
+
+check_driver_started() {
+ _qat_check_started
+}
+
+start_driver() {
+ _qat_service_start
+ _qat_check_started
+}
+
+uninstall_driver() {
+ uninstall_qat
+ rm -f "${CACHE_FILE}"
+}
+
+main() {
+ load_etc_os_release
+ local -r cmd="${1:-install}"
+ case $cmd in
+ install)
+ if ! check_cached_version; then
+ upgrade_driver
+ update_cached_version
+ fi
+ if ! check_driver_started; then
+ start_driver
+ fi
+ ;;
+ uninstall)
+ uninstall_driver
+ ;;
+ install-sample)
+ _qat_sample_install
+ ;;
+ uninstall-sample)
+ _qat_sample_uninstall
+ ;;
+ esac
+}
+
+main "$@"
diff --git a/kud/deployment_infra/playbooks/configure-emco-reset.yml b/kud/deployment_infra/playbooks/configure-emco-reset.yml
index 7cad36e4..d13bb9e7 100644
--- a/kud/deployment_infra/playbooks/configure-emco-reset.yml
+++ b/kud/deployment_infra/playbooks/configure-emco-reset.yml
@@ -8,40 +8,44 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- hosts: kube-master
+- hosts: localhost
+ become: yes
tasks:
- name: Load kud variables
include_vars:
file: kud-vars.yml
- - name: Change the emco directory and run helm delete
- command: /usr/local/bin/helm uninstall --namespace emco emco
- register: helm_delete
- args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
+ - name: Check if emco is installed
+ command: /usr/local/bin/helm -n emco list
+ register: helm_list
- - debug:
- var: helm_delete.stdout_lines
+ - name: Set helm_installed fact
+ set_fact:
+ helm_installed: "{{ helm_list.stdout | regex_findall('^\\S+', multiline=True) }}"
- - name: Change the emco directory and delete the emco namespace
- command: /usr/local/bin/kubectl delete ns emco
- register: delete_emco_ns
- args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
+ - name: Uninstall monitor helm chart
+ command: /usr/local/bin/helm uninstall --namespace emco monitor
+ when: '"monitor" in helm_installed'
- - debug:
- var: delete_emco_ns.stdout_lines
+ - name: Uninstall emco helm charts
+ command: /usr/local/bin/helm uninstall --namespace emco emco
+ when: '"emco" in helm_installed'
- - name: Change the emco directory and make clean
+ - name: Change to the emco directory and delete the emco namespace
+ command: /usr/local/bin/kubectl delete ns emco --ignore-not-found=true
+
+ - name: Check if emco directory exists
+ stat:
+ path: "{{ emco_dir }}"
+ register: emco_dir_stat
+
+ - name: Change to the emco directory and make clean
command: /usr/bin/make clean
- register: make_clean
args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
-
- - debug:
- var: make_clean.stdout_lines
+ chdir: "{{ emco_dir }}/deployments/helm/emcoOpenNESS"
+ when: emco_dir_stat.stat.exists
- - name: clean multicloud-k8s path
+ - name: Clean emco directory
file:
state: absent
- path: /opt/multicloud
+ path: "{{ emco_dir }}"
diff --git a/kud/deployment_infra/playbooks/configure-emco.yml b/kud/deployment_infra/playbooks/configure-emco.yml
index 96b4a23d..82ce61ad 100644
--- a/kud/deployment_infra/playbooks/configure-emco.yml
+++ b/kud/deployment_infra/playbooks/configure-emco.yml
@@ -8,7 +8,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-- hosts: kube-master
+- hosts: localhost
+ become: yes
tasks:
- name: Load kud variables
include_vars:
@@ -16,43 +17,159 @@
- name: Getting emco code in /opt folder
git:
- repo: 'https://github.com/onap/multicloud-k8s.git'
- dest: /opt/multicloud
+ repo: "{{ emco_git_url }}"
+ version: "{{ emco_version }}"
+ dest: "{{ emco_dir }}"
+ depth: 1
+ force: yes
- - name: install make package for ubuntu systems
+ - name: Install make package for Ubuntu systems
apt: name=make state=present update_cache=yes
when: ansible_distribution == "Ubuntu"
- - name: install make package for centos systems
+ - name: Install make package for CentOS systems
yum: name=make state=present update_cache=yes
when: ansible_distribution == "CentOS"
- - name: Change the emco directory and run the command make all
- command: /usr/bin/make all
- register: make_all
- args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
-
- - debug:
- var: make_all.stdout_lines
-
- name: Create emco namespace
shell: "/usr/local/bin/kubectl create namespace emco"
ignore_errors: True
- name: Create pod security policy role bindings
- shell: "/usr/local/bin/kubectl -n emco create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=emco:default --serviceaccount=emco:emco-fluentd"
+ shell: "/usr/local/bin/kubectl -n emco create rolebinding psp:default:privileged --clusterrole=psp:privileged --serviceaccount=emco:default --serviceaccount=emco:emco-fluentd --serviceaccount=emco:monitor"
ignore_errors: True
+ - name: Set artifacts_dir fact
+ set_fact:
+ artifacts_dir: "{{ hostvars[groups['kube-master'][0]]['inventory_dir'] }}/artifacts"
+
+ - name: Make emco helm charts
+ command: /usr/bin/make all
+ args:
+ chdir: "{{ emco_dir }}/deployments/helm/emcoOpenNESS"
+ when: "'emco' in emco_roles"
+
- name: Get cluster name
- shell: "kubectl -n kube-system get configmap/kubeadm-config -o yaml | grep clusterName: | awk '{print $2}'"
+ shell: "/usr/local/bin/kubectl -n kube-system get configmap/kubeadm-config -o yaml | grep clusterName: | awk '{print $2}'"
register: cluster_name
+ when: "'emco' in emco_roles"
+
+ - name: Create helm override values
+ copy:
+ dest: "{{ emco_dir }}/deployments/helm/emcoOpenNESS/helm_value_overrides.yaml"
+ content: |
+ {{ emco_values | to_nice_yaml(indent=2) }}
+ when: "'emco' in emco_roles"
+
+ - name: Install emco helm charts
+ command: /usr/local/bin/helm install --wait --namespace emco -f helm_value_overrides.yaml --set emco-db.etcd.clusterDomain={{ cluster_name.stdout }} --set emco-tools.fluentd.clusterDomain={{ cluster_name.stdout }} emco dist/packages/emco-0.1.0.tgz
+ args:
+ chdir: "{{ emco_dir }}/deployments/helm/emcoOpenNESS"
+ when: "'emco' in emco_roles"
+
+ - name: Apply patch to emcoctl
+ patch:
+ src: emcoctl-openness-21.03.patch
+ basedir: "{{ emco_dir }}"
+ strip: 1
+ when: emco_version == "openness-21.03"
+
+ - name: Make emcoctl
+ command: /usr/bin/make
+ args:
+ chdir: "{{ emco_dir }}/src/tools/emcoctl"
+ when: "'emco' in emco_roles"
- - name: Change the emco directory and run the command helm install
- command: /usr/local/bin/helm install --namespace emco --set emco-tools.fluentd.clusterDomain={{ cluster_name.stdout }} emco dist/packages/emco-0.1.0.tgz
- register: helm_install
+ - name: Get emco host address
+ shell: "/usr/local/bin/kubectl -n kube-system get configmap/kubeadm-config -o yaml | awk '/advertiseAddress:/ {print $2;exit}'"
+ register: emco_host
+ when: "'emco' in emco_roles"
+
+ - name: Write emcoctl config on ansible host
+ template:
+ src: emcoconfig.yaml.j2
+ dest: "{{ artifacts_dir }}/emcoconfig.yaml"
+ mode: 0640
+ become: no
+ run_once: yes
+ vars:
+ host: "{{ emco_host.stdout }}"
+ when:
+ - emcoconfig_localhost
+ - "'emco' in emco_roles"
+
+ - name: Copy emcoctl binary to ansible host
+ copy:
+ src: "{{ emco_dir }}/bin/emcoctl/emcoctl"
+ dest: "{{ artifacts_dir }}/emcoctl"
+ mode: 0755
+ become: no
+ run_once: yes
+ when:
+ - emcoctl_localhost
+ - "'emco' in emco_roles"
+
+ - name: Create helper script emcoctl.sh on ansible host
+ copy:
+ content: |
+ #!/bin/bash
+ ${BASH_SOURCE%/*}/emcoctl --config ${BASH_SOURCE%/*}/emcoconfig.yaml "$@"
+ dest: "{{ artifacts_dir }}/emcoctl.sh"
+ mode: 0755
+ become: no
+ run_once: yes
+ when:
+ - emcoctl_localhost
+ - emcoconfig_localhost
+ - "'emco' in emco_roles"
+
+ - name: Apply patch to monitor chart
+ patch:
+ src: emco-monitor-openness-21.03.patch
+ basedir: "{{ emco_dir }}"
+ strip: 1
+ when:
+ - emco_version == "openness-21.03"
+ - "'monitor' in emco_roles"
+
+ - name: Package monitor chart
+ command: /usr/local/bin/helm package monitor
+ args:
+ chdir: "{{ emco_dir }}/deployments/helm"
+ when: "'monitor' in emco_roles"
+
+ - name: Install monitor helm chart
+ command: /usr/local/bin/helm install --wait --namespace emco --set registryPrefix={{ emco_repository }} --set tag={{ emco_version }} monitor monitor-0.1.0.tgz
args:
- chdir: /opt/multicloud/deployments/helm/v2/emco
+ chdir: "{{ emco_dir }}/deployments/helm"
+ when: "'monitor' in emco_roles"
+
+- hosts: kube-master
+ become: yes
+ tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+
+ - name: Get emco host address
+ shell: "/usr/local/bin/kubectl -n kube-system get configmap/kubeadm-config -o yaml | grep advertiseAddress: | awk '{print $2}'"
+ register: emco_host
+ when: "'emco' in emco_roles"
+
+ - name: Install emcoctl config
+ template:
+ src: emcoconfig.yaml.j2
+ dest: "~/.emco.yaml"
+ mode: 0640
+ become: no
+ run_once: yes
+ vars:
+ host: "{{ emco_host.stdout }}"
+ when: "'emco' in emco_roles"
- - debug:
- var: helm_install.stdout_lines
+ - name: Install emcoctl
+ copy:
+ src: "{{ emco_dir }}/bin/emcoctl/emcoctl"
+ dest: "/usr/local/bin/emcoctl"
+ mode: 0755
+ when: "'emco' in emco_roles"
diff --git a/kud/deployment_infra/playbooks/configure-kata-webhook-reset.yml b/kud/deployment_infra/playbooks/configure-kata-webhook-reset.yml
new file mode 100644
index 00000000..4c25613a
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-kata-webhook-reset.yml
@@ -0,0 +1,30 @@
+---
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+- hosts: localhost
+ become: yes
+ pre_tasks:
+ - name: Load kud variables
+ include_vars:
+ file: "{{ playbook_dir }}/kud-vars.yml"
+ tasks:
+ - name: Remove Kata webhook
+ command: "/usr/local/bin/kubectl delete -f {{ kata_webhook_dest }}/deploy/webhook-{{ kata_webhook_runtimeclass }}.yaml"
+
+ - name: Remove Kata mutating webhook configuration
+ command: "/usr/local/bin/kubectl delete -f {{ kata_webhook_dest }}/deploy/webhook-registration.yaml"
+
+ - name: Remove Kata webhook certs
+ command: "/usr/local/bin/kubectl delete -f {{ kata_webhook_dest }}/deploy/webhook-certs.yaml" \ No newline at end of file
diff --git a/kud/deployment_infra/playbooks/configure-kata-webhook.yml b/kud/deployment_infra/playbooks/configure-kata-webhook.yml
new file mode 100644
index 00000000..cb11bdf7
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-kata-webhook.yml
@@ -0,0 +1,69 @@
+---
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+- hosts: localhost
+ become: yes
+ pre_tasks:
+ - name: Load kud variables
+ include_vars:
+ file: "{{ playbook_dir }}/kud-vars.yml"
+ tasks:
+ - name: Create Kata webook folder
+ file:
+ state: directory
+ path: "{{ kata_webhook_dest }}/deploy"
+ ignore_errors: yes
+
+ - name: Download Kata webhook script
+ get_url:
+ url: "{{ kata_webhook_script_url }}"
+ dest: "{{ kata_webhook_dest }}"
+
+ - name: Download Kata webhook registration yaml
+ get_url:
+ url: "{{ kata_webhook_registration_url }}"
+ dest: "{{ kata_webhook_dest }}/deploy"
+
+ - name: Download Kata webhook deployment yaml
+ get_url:
+ url: "{{ kata_webhook_deployment_url }}"
+ dest: "{{ kata_webhook_dest }}/deploy"
+
+ - name: Changing perm of create-certs.sh, adding "+x"
+ shell: "chmod +x create-certs.sh"
+ args:
+ chdir: "{{ kata_webhook_dest }}"
+ warn: False
+
+ - name: Modify webhook for {{ kata_webhook_runtimeclass }} Runtimeclass
+ shell: "sed 's/value: kata/value: {{ kata_webhook_runtimeclass }}/g' webhook.yaml | tee webhook-{{ kata_webhook_runtimeclass }}.yaml"
+ args:
+ chdir: "{{ kata_webhook_dest }}/deploy"
+ warn: False
+
+ - name: Create Kata webhook secret
+ command: "{{ kata_webhook_dest }}/create-certs.sh"
+ args:
+ chdir: "{{ kata_webhook_dest }}"
+ warn: False
+
+ - name: Apply Kata webhook certs
+ command: "/usr/local/bin/kubectl apply -f {{ kata_webhook_dest }}/deploy/webhook-certs.yaml"
+
+ - name: Apply Kata mutating webhook configuration
+ command: "/usr/local/bin/kubectl apply -f {{ kata_webhook_dest }}/deploy/webhook-registration.yaml"
+
+ - name: Apply Kata webhook
+ command: "/usr/local/bin/kubectl apply -f {{ kata_webhook_dest }}/deploy/webhook-{{ kata_webhook_runtimeclass }}.yaml" \ No newline at end of file
diff --git a/kud/deployment_infra/playbooks/configure-kata.yml b/kud/deployment_infra/playbooks/configure-kata.yml
new file mode 100644
index 00000000..664d3521
--- /dev/null
+++ b/kud/deployment_infra/playbooks/configure-kata.yml
@@ -0,0 +1,29 @@
+---
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+- hosts: kube-master
+ become: yes
+ pre_tasks:
+ - name: Load kud variables
+ include_vars:
+ file: kud-vars.yml
+ tasks:
+ - name: Apply Kata rbac roles
+ command: "/usr/local/bin/kubectl apply -f {{ kata_rbac_url }}"
+ - name: Apply Kata Deploy
+ command: "/usr/local/bin/kubectl apply -f {{ kata_deploy_url }}"
+ - name: Setup Kata runtime classes
+ command: "/usr/local/bin/kubectl apply -f {{ kata_runtimeclass_url }}"
+
diff --git a/kud/deployment_infra/playbooks/emco-monitor-openness-21.03.patch b/kud/deployment_infra/playbooks/emco-monitor-openness-21.03.patch
new file mode 100644
index 00000000..44c72b6c
--- /dev/null
+++ b/kud/deployment_infra/playbooks/emco-monitor-openness-21.03.patch
@@ -0,0 +1,13 @@
+diff --git a/deployments/helm/monitor/templates/clusterrolebinding.yaml b/deployments/helm/monitor/templates/clusterrolebinding.yaml
+index 70305e50..6616787b 100644
+--- a/deployments/helm/monitor/templates/clusterrolebinding.yaml
++++ b/deployments/helm/monitor/templates/clusterrolebinding.yaml
+@@ -7,7 +7,7 @@ metadata:
+ subjects:
+ - kind: ServiceAccount
+ name: monitor
+- namespace: default
++ namespace: {{ .Release.Namespace }}
+ roleRef:
+ kind: ClusterRole
+ name: monitor
diff --git a/kud/deployment_infra/playbooks/emcoconfig.yaml.j2 b/kud/deployment_infra/playbooks/emcoconfig.yaml.j2
new file mode 100644
index 00000000..0131cd88
--- /dev/null
+++ b/kud/deployment_infra/playbooks/emcoconfig.yaml.j2
@@ -0,0 +1,21 @@
+orchestrator:
+ host: {{ host }}
+ port: 30415
+clm:
+ host: {{ host }}
+ port: 30461
+ncm:
+ host: {{ host }}
+ port: 30431
+ovnaction:
+ host: {{ host }}
+ port: 30471
+dcm:
+ host: {{ host }}
+ port: 30477
+gac:
+ host: {{ host }}
+ port: 30491
+dtc:
+ host: {{ host }}
+ port: 30481
diff --git a/kud/deployment_infra/playbooks/emcoctl-openness-21.03.patch b/kud/deployment_infra/playbooks/emcoctl-openness-21.03.patch
new file mode 100644
index 00000000..a0b308d3
--- /dev/null
+++ b/kud/deployment_infra/playbooks/emcoctl-openness-21.03.patch
@@ -0,0 +1,13 @@
+diff --git a/src/tools/emcoctl/cmd/utils.go b/src/tools/emcoctl/cmd/utils.go
+index 9f0821e..3d16b92 100644
+--- a/src/tools/emcoctl/cmd/utils.go
++++ b/src/tools/emcoctl/cmd/utils.go
+@@ -106,7 +106,7 @@ func readResources() []Resources {
+ return []Resources{}
+ }
+ valDec := yaml.NewDecoder(v)
+- var mapDoc map[string]string
++ var mapDoc interface{}
+ if valDec.Decode(&mapDoc) != nil {
+ fmt.Println("Values file format incorrect:", "error", err, "filename", valuesFiles[0])
+ return []Resources{}
diff --git a/kud/deployment_infra/playbooks/install_iavf_drivers.sh b/kud/deployment_infra/playbooks/install_iavf_drivers.sh
index 7a54e9f2..dd01b062 100755
--- a/kud/deployment_infra/playbooks/install_iavf_drivers.sh
+++ b/kud/deployment_infra/playbooks/install_iavf_drivers.sh
@@ -3,7 +3,7 @@
# Based on:
# https://gerrit.akraino.org/r/#/c/icn/+/1359/1/deploy/kud-plugin-addons/device-plugins/sriov/driver/install_iavf_drivers.sh
-nic_models=(XL710 X722)
+nic_models=(X710 XL710 X722)
nic_drivers=(i40e)
device_checkers=(is_not_used is_driver_match is_model_match)
diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml
index 51607020..24a9ef98 100644
--- a/kud/deployment_infra/playbooks/kud-vars.yml
+++ b/kud/deployment_infra/playbooks/kud-vars.yml
@@ -41,10 +41,8 @@ istio_version: 1.0.3
istio_url: "https://github.com/istio/istio/releases/download/{{ istio_version }}/istio-{{ istio_version }}-linux.tar.gz"
# Intel CPU Manager for Kubernetes
-cmk_enabled: true
cmk_namespace: kube-system
cmk_use_all_hosts: false # 'true' will deploy CMK on the master nodes too
-cmk_untaint_nodes: [compute01, compute02]
cmk_shared_num_cores: 1 # number of CPU cores to be assigned to the "shared" pool on each of the nodes
cmk_exclusive_num_cores: 2 # number of CPU cores to be assigned to the "exclusive" pool on each of the nodes
cmk_git_url: "https://github.com/intel/CPU-Manager-for-Kubernetes.git"
@@ -53,10 +51,8 @@ cmk_dir: "/tmp/cmk"
registry_local_address: "localhost:5000"
cmk_pkgs: make,jq
cmk_untaint_required: true
-#cmk_shared_mode: packed # choose between: packed, spread, default: packed
-#cmk_exclusive_mode: packed # choose between: packed, spread, default: packed
-go_version: '1.12.5'
+go_version: '1.14.15'
kubespray_version: 2.14.1
# This matches the helm_version from kubespray defaults
helm_client_version: 3.2.4
@@ -88,3 +84,45 @@ cpu_manager:
checkpoint_file: "/var/lib/kubelet/cpu_manager_state"
topology_manager:
policy: "best-effort" # Options: none (disabled), best-effort (default), restricted, single-numa-node
+
+emco_git_url: "https://github.com/open-ness/EMCO.git"
+emco_repository: "integratedcloudnative/"
+emco_version: "openness-21.03"
+emco_dir: "/opt/emco"
+emco_values:
+ global:
+ repository: "{{ emco_repository }}"
+ pullPolicy: IfNotPresent
+ emco-services:
+ orchestrator:
+ imageTag: "{{ emco_version }}"
+ ncm:
+ imageTag: "{{ emco_version }}"
+ rsync:
+ imageTag: "{{ emco_version }}"
+ clm:
+ imageTag: "{{ emco_version }}"
+ ovnaction:
+ imageTag: "{{ emco_version }}"
+ dcm:
+ imageTag: "{{ emco_version }}"
+ dtc:
+ imageTag: "{{ emco_version }}"
+ gac:
+ imageTag: "{{ emco_version }}"
+emcoconfig_localhost: true
+emcoctl_localhost: true
+emco_roles:
+- emco
+- monitor
+
+kata_version: 2.1.0-rc0
+kata_rbac_url: "https://raw.githubusercontent.com/kata-containers/kata-containers/{{ kata_version }}/tools/packaging/kata-deploy/kata-rbac/base/kata-rbac.yaml"
+kata_deploy_url: "https://raw.githubusercontent.com/kata-containers/kata-containers/{{ kata_version }}/tools/packaging/kata-deploy/kata-deploy/base/kata-deploy.yaml"
+kata_runtimeclass_url: "https://raw.githubusercontent.com/kata-containers/kata-containers/{{ kata_version }}/tools/packaging/kata-deploy/runtimeclasses/kata-runtimeClasses.yaml"
+kata_webhook_dest: "{{ base_dest }}/kata_webhook"
+kata_webhook_version: 2.1.0-rc0
+kata_webhook_script_url: "https://raw.githubusercontent.com/kata-containers/tests/{{ kata_webhook_version }}/kata-webhook/create-certs.sh"
+kata_webhook_registration_url: "https://raw.githubusercontent.com/kata-containers/tests/{{ kata_webhook_version }}/kata-webhook/deploy/webhook-registration.yaml.tpl"
+kata_webhook_deployment_url: "https://raw.githubusercontent.com/kata-containers/tests/{{ kata_webhook_version }}/kata-webhook/deploy/webhook.yaml"
+kata_webhook_runtimeclass: "kata-clh"
diff --git a/kud/deployment_infra/playbooks/sriov_hardware_check.sh b/kud/deployment_infra/playbooks/sriov_hardware_check.sh
index 662c28c8..980fef67 100644
--- a/kud/deployment_infra/playbooks/sriov_hardware_check.sh
+++ b/kud/deployment_infra/playbooks/sriov_hardware_check.sh
@@ -12,14 +12,8 @@ set -o pipefail
source /etc/environment
-ethernet_adpator_version=$( lspci | grep "Ethernet Controller XL710" | head -n 1 | cut -d " " -f 8 )
-if [ -z "$ethernet_adpator_version" ]; then
- echo "False"
- exit 0
-fi
-SRIOV_ENABLED=${ethernet_adpator_version:-"false"}
-#checking for the right hardware version of NIC on the machine
-if [ "$ethernet_adpator_version" == "XL710" ]; then
+adaptors="X710 XL710 X722"
+if [[ $(lspci | grep -c "Ethernet .* \(${adaptors// /\\|}\)") != "0" ]]; then
echo "True"
else
echo "False"
diff --git a/kud/deployment_infra/profiles/cpu-manager/manifest.yaml b/kud/deployment_infra/profiles/cpu-manager/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/cpu-manager/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/cpu-manager/override_values.yaml b/kud/deployment_infra/profiles/cpu-manager/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/cpu-manager/override_values.yaml
diff --git a/kud/deployment_infra/profiles/multus-cni/manifest.yaml b/kud/deployment_infra/profiles/multus-cni/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/multus-cni/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/multus-cni/override_values.yaml b/kud/deployment_infra/profiles/multus-cni/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/multus-cni/override_values.yaml
diff --git a/kud/deployment_infra/profiles/node-feature-discovery/manifest.yaml b/kud/deployment_infra/profiles/node-feature-discovery/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/node-feature-discovery/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/node-feature-discovery/override_values.yaml b/kud/deployment_infra/profiles/node-feature-discovery/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/node-feature-discovery/override_values.yaml
diff --git a/kud/deployment_infra/profiles/ovn4nfv/manifest.yaml b/kud/deployment_infra/profiles/ovn4nfv/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/ovn4nfv/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/ovn4nfv/override_values.yaml b/kud/deployment_infra/profiles/ovn4nfv/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/ovn4nfv/override_values.yaml
diff --git a/kud/deployment_infra/profiles/qat-device-plugin/manifest.yaml b/kud/deployment_infra/profiles/qat-device-plugin/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/qat-device-plugin/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/qat-device-plugin/override_values.yaml b/kud/deployment_infra/profiles/qat-device-plugin/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/qat-device-plugin/override_values.yaml
diff --git a/kud/deployment_infra/profiles/sriov-network-operator/manifest.yaml b/kud/deployment_infra/profiles/sriov-network-operator/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/sriov-network-operator/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/sriov-network-operator/override_values.yaml b/kud/deployment_infra/profiles/sriov-network-operator/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/sriov-network-operator/override_values.yaml
diff --git a/kud/deployment_infra/profiles/sriov-network/manifest.yaml b/kud/deployment_infra/profiles/sriov-network/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/sriov-network/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/sriov-network/override_values.yaml b/kud/deployment_infra/profiles/sriov-network/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/sriov-network/override_values.yaml
diff --git a/kud/hosting_providers/containerized/README.md b/kud/hosting_providers/containerized/README.md
index 2f9a9e52..bd5b08a8 100644
--- a/kud/hosting_providers/containerized/README.md
+++ b/kud/hosting_providers/containerized/README.md
@@ -21,9 +21,9 @@ KUD installation installer is divided into two regions with args - `--install-pk
* Container image is build using the `installer --install-pkg` arg and Kubernetes job is used to install the cluster using `installer --cluster <cluster-name>`. Installer will invoke the kubespray cluster.yml, kud-addsons and plugins ansible cluster.
-Installer script finds the `hosts.init` for each cluster in `/opt/multi-cluster/<cluster-name>`
+Installer script finds the `hosts.ini` for each cluster in `/opt/multi-cluster/<cluster-name>`
-Kubernetes jobs(a cluster per job) are used to install multiple clusters and logs of each cluster deployments are stored in the `/opt/kud/multi-cluster/<cluster-name>/logs` and artifacts are stored as follows `/opt/kud/multi-cluster/<cluster-name>/artifacts`
+Kubernetes jobs (a cluster per job) are used to install multiple clusters and logs of each cluster deployments are stored in the `/opt/kud/multi-cluster/<cluster-name>/logs` and artifacts are stored as follows `/opt/kud/multi-cluster/<cluster-name>/artifacts`
## Creating TestBed for Testing and Development
@@ -38,26 +38,31 @@ $ pushd multicloud-k8s/kud/hosting_providers/containerized/testing
$ vagrant up
$ popd
```
-Do following steps to keep note of
+Do the following steps to keep note of
1. Get the IP address for the Vagrant machine - <VAGRANT_IP_ADDRESS>
2. Copy the host /root/.ssh/id_rsa.pub into the vagrant /root/.ssh/authorized_keys
3. From host make sure to ssh into vagrant without password ssh root@<VAGRANT_IP_ADDRESS>
## Quickstart Installation Guide
-Build the kud docker images as follows, add KUD_ENABLE_TESTS & KUD_PLUGIN_ENABLED for the testing only:
+Build the kud docker images as follows. Add `KUD_ENABLE_TESTS` & `KUD_PLUGIN_ENABLED`
+for the testing only. Currently only docker and containerd are supported CRI
+runtimes and can be configured using the `CONTAINER_RUNTIME` environment variable.
+To be able to run secure containers using Kata Containers, it is required to
+change the CRI runtime to containerd.
```
$ git clone https://github.com/onap/multicloud-k8s.git && cd multicloud-k8s
-$ docker build --rm \
+$ docker build --rm \
--build-arg http_proxy=${http_proxy} \
--build-arg HTTP_PROXY=${HTTP_PROXY} \
--build-arg https_proxy=${https_proxy} \
--build-arg HTTPS_PROXY=${HTTPS_PROXY} \
--build-arg no_proxy=${no_proxy} \
--build-arg NO_PROXY=${NO_PROXY} \
- --build-arg KUD_ENABLE_TESTS=true \
- --build-arg KUD_PLUGIN_ENABLED=true \
+ --build-arg KUD_ENABLE_TESTS=true \
+ --build-arg KUD_PLUGIN_ENABLED=true \
+ --build-arg CONTAINER_RUNTIME=docker \
-t github.com/onap/multicloud-k8s:latest . -f kud/build/Dockerfile
```
Let's create a cluster-101 and cluster-102 hosts.ini as follows
@@ -66,7 +71,7 @@ Let's create a cluster-101 and cluster-102 hosts.ini as follows
$ mkdir -p /opt/kud/multi-cluster/{cluster-101,cluster-102}
```
-Create hosts.ini as follows in the direcotry cluster-101(c01 IP address 10.10.10.3) and cluster-102(c02 IP address 10.10.10.5). If user used Vagrant setup as mentioned in the above steps, replace the IP address with vagrant IP address
+Create the hosts.ini as follows in the directory cluster-101(c01 IP address 10.10.10.3) and cluster-102(c02 IP address 10.10.10.5). If the user used a Vagrant setup as mentioned in the above steps, replace the IP address with the vagrant IP address.
```
$ cat /opt/kud/multi-cluster/cluster-101/hosts.ini
@@ -97,7 +102,7 @@ kube-master
```
Do the same for the cluster-102 with c01 and IP address 10.10.10.5.
-Create the ssh secret for Baremetal or VM based on your deployment. and Launch the kubernetes job as follows
+Create the ssh secret for Baremetal or VM based on your deployment. Launch the kubernetes job as follows.
```
$ kubectl create secret generic ssh-key-secret --from-file=id_rsa=/root/.ssh/id_rsa --from-file=id_rsa.pub=/root/.ssh/id_rsa.pub
$ CLUSTER_NAME=cluster-101
diff --git a/kud/hosting_providers/containerized/addons/README.md.tmpl b/kud/hosting_providers/containerized/addons/README.md.tmpl
new file mode 100644
index 00000000..8ab16104
--- /dev/null
+++ b/kud/hosting_providers/containerized/addons/README.md.tmpl
@@ -0,0 +1,45 @@
+# Installing KUD addons with emcoctl
+
+1. Customize values.yaml and values-resources.yaml as needed
+
+To create a customized profile for a specific addon, edit the profile
+as needed, and then (for example, cpu-manager):
+
+```
+ tar -czf /opt/kud/multi-cluster/addons/cpu-manager.tar.gz -C /opt/kud/multi-cluster/addons/cpu-manager/helm .
+ tar -czf /opt/kud/multi-cluster/addons/collectd_profile.tar.gz -C /opt/kud/multi-cluster/addons/cpu-manager/profile .
+```
+
+2. Create prerequisites to deploy addons
+
+Apply prerequisites.yaml. This step is optional. If there are
+existing resources in the cluster, it is sufficient to customize
+values.yaml with the values of those resources. The supplied
+prequisites.yaml creates controllers, one project, one cluster, and
+one logical cloud.
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f prerequisites.yaml -v values.yaml\`
+
+3. Deploy addons
+
+Apply addons.yaml. This deploys the addons listed in the \`Addons\`
+value in values.yaml.
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values-resources.yaml\`
+
+# Uninstalling KUD addons with emcoctl
+
+1. Delete addons
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values-resources.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values.yaml\`
+
+2. Cleanup prerequisites
+
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f prerequisites.yaml -v values.yaml\`
+
+#### NOTE: Known issue: deletion of the resources fails sometimes as
+some resources can't be deleted before others are deleted. This can
+happen due to timing issue. In that case try deleting again and the
+deletion should succeed.
diff --git a/kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl b/kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl
new file mode 100644
index 00000000..ed568238
--- /dev/null
+++ b/kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl
@@ -0,0 +1,19 @@
+HostIP: ${HOST_IP}
+KubeConfig: ${KUBE_PATH}
+PackagesPath: ${PACKAGES_PATH}
+ProjectName: proj1
+RsyncPort: 30441
+GacPort: 30493
+OvnPort: 30473
+DtcPort: 30483
+ClusterProvider: provider1
+Cluster1: cluster1
+ClusterLabel: edge-cluster
+LogicalCloud: default
+Apps:
+- sriov-network
+CompositeApp: addon-resources
+CompositeProfile: addon-resources-profile
+DeploymentIntentGroup: addon-resources-deployment-intent-group
+DeploymentIntent: addon-resources-deployment-intent
+GenericPlacementIntent: addon-resources-placement-intent
diff --git a/kud/hosting_providers/containerized/addons/values.yaml.tmpl b/kud/hosting_providers/containerized/addons/values.yaml.tmpl
new file mode 100644
index 00000000..62936beb
--- /dev/null
+++ b/kud/hosting_providers/containerized/addons/values.yaml.tmpl
@@ -0,0 +1,24 @@
+HostIP: ${HOST_IP}
+KubeConfig: ${KUBE_PATH}
+PackagesPath: ${PACKAGES_PATH}
+ProjectName: proj1
+RsyncPort: 30441
+GacPort: 30493
+OvnPort: 30473
+DtcPort: 30483
+ClusterProvider: provider1
+Cluster1: cluster1
+ClusterLabel: edge-cluster
+LogicalCloud: default
+Apps:
+- multus-cni
+- ovn4nfv
+- node-feature-discovery
+- sriov-network-operator
+- qat-device-plugin
+- cpu-manager
+CompositeApp: addons
+CompositeProfile: addons-profile
+DeploymentIntentGroup: addons-deployment-intent-group
+DeploymentIntent: addons-deployment-intent
+GenericPlacementIntent: addons-placement-intent
diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh
index b2ec52af..427850ab 100755
--- a/kud/hosting_providers/containerized/installer.sh
+++ b/kud/hosting_providers/containerized/installer.sh
@@ -22,7 +22,7 @@ function install_prerequisites {
find /etc/apt/sources.list.d -maxdepth 1 -name '*jonathonf*' -delete || true
apt-get update
apt-get install -y curl vim wget git \
- software-properties-common python-pip sudo
+ software-properties-common python-pip sudo gettext-base
add-apt-repository -y ppa:longsleep/golang-backports
apt-get update
apt-get install -y golang-go rsync
@@ -77,29 +77,44 @@ function install_kubespray {
fi
}
-# install_k8s() - Install Kubernetes using kubespray tool
+# install_k8s() - Install Kubernetes using kubespray tool including Kata
function install_k8s {
local cluster_name=$1
ansible-playbook $verbose -i \
$kud_inventory $kud_playbooks/preconfigure-kubespray.yml \
--become --become-user=root | \
tee $cluster_log/setup-kubernetes.log
- ansible-playbook $verbose -i \
- $kud_inventory $dest_folder/kubespray-$version/cluster.yml \
- -e cluster_name=$cluster_name --become --become-user=root | \
- tee $cluster_log/setup-kubernetes.log
+ if [ "$container_runtime" == "docker" ]; then
+ echo "Docker will be used as the container runtime interface"
+ ansible-playbook $verbose -i \
+ $kud_inventory $dest_folder/kubespray-$version/cluster.yml \
+ -e cluster_name=$cluster_name --become --become-user=root | \
+ tee $cluster_log/setup-kubernetes.log
+ elif [ "$container_runtime" == "containerd" ]; then
+ echo "Containerd will be used as the container runtime interface"
+ ansible-playbook $verbose -i \
+ $kud_inventory $dest_folder/kubespray-$version/cluster.yml \
+ -e $kud_kata_override_variables -e cluster_name=$cluster_name \
+ --become --become-user=root | \
+ tee $cluster_log/setup-kubernetes.log
+ #Install Kata Containers in containerd scenario
+ ansible-playbook $verbose -i \
+ $kud_inventory -e "base_dest=$HOME" \
+ $kud_playbooks/configure-kata.yml | \
+ tee $cluster_log/setup-kata.log
+ else
+ echo "Only Docker or Containerd are supported container runtimes"
+ exit 1
+ fi
# Configure environment
+ # Requires kubeconfig_localhost and kubectl_localhost to be true
+ # in inventory/group_vars/k8s-cluster.yml
mkdir -p $HOME/.kube
cp $kud_inventory_folder/artifacts/admin.conf $HOME/.kube/config
- # Copy Kubespray kubectl to be usable in host running Ansible.
- # Requires kubectl_localhost: true in inventory/group_vars/k8s-cluster.yml
if !(which kubectl); then
cp $kud_inventory_folder/artifacts/kubectl /usr/local/bin/
fi
-
- cp -rf $kud_inventory_folder/artifacts \
- /opt/kud/multi-cluster/$cluster_name/
}
# install_addons() - Install Kubenertes AddOns
@@ -118,21 +133,37 @@ function install_addons {
$kud_infra_folder/galaxy-requirements.yml --ignore-errors
ansible-playbook $verbose -i \
- $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | \
- tee $cluster_log/setup-kud.log
- # The order of KUD_ADDONS is important: some plugins (sriov, qat)
- # require nfd to be enabled.
- for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk $plugins_name}; do
+ $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml \
+ | tee $cluster_log/setup-kud.log
+
+ kud_addons="${KUD_ADDONS:-} ${plugins_name}"
+
+ for addon in ${kud_addons}; do
echo "Deploying $addon using configure-$addon.yml playbook.."
ansible-playbook $verbose -i \
- $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-${addon}.yml | \
+ $kud_inventory -e "base_dest=$HOME" \
+ $kud_playbooks/configure-${addon}.yml | \
tee $cluster_log/setup-${addon}.log
done
echo "Run the test cases if testing_enabled is set to true."
if [[ "${testing_enabled}" == "true" ]]; then
failed_kud_tests=""
- for addon in ${KUD_ADDONS:-virtlet ovn4nfv nfd sriov qat cmk $plugins_name}; do
+ # Run Kata test first if Kata was installed
+ if [ "$container_runtime" == "containerd" ]; then
+ #Install Kata webhook for test pods
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
+ $kud_playbooks/configure-kata-webhook.yml \
+ --become --become-user=root | \
+ sudo tee $cluster_log/setup-kata-webhook.log
+ kata_webhook_deployed=true
+ pushd $kud_tests
+ bash kata.sh || failed_kud_tests="${failed_kud_tests} kata"
+ popd
+ fi
+ #Run other plugin tests
+ for addon in ${kud_addons}; do
pushd $kud_tests
bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}"
case $addon in
@@ -144,21 +175,80 @@ function install_addons {
;;
"emco" )
echo "Test the emco plugin installation"
- for functional_test in plugin_fw_v2; do
- bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
- done
+ # TODO plugin_fw_v2 requires virtlet and a patched multus to succeed
+ # for functional_test in plugin_fw_v2; do
+ # bash ${functional_test}.sh --external || failed_kud_tests="${failed_kud_tests} ${functional_test}"
+ # done
;;
esac
popd
done
+ # Remove Kata webhook if user didn't want it permanently installed
+ if ! [ "$enable_kata_webhook" == "true" ] && [ "$kata_webhook_deployed" == "true" ]; then
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
+ $kud_playbooks/configure-kata-webhook-reset.yml \
+ --become --become-user=root | \
+ sudo tee $cluster_log/kata-webhook-reset.log
+ kata_webhook_deployed=false
+ fi
if [[ ! -z "$failed_kud_tests" ]]; then
echo "Test cases failed:${failed_kud_tests}"
return 1
fi
fi
+
+ # Check if Kata webhook should be installed and isn't already installed
+ if [ "$enable_kata_webhook" == "true" ] && ! [ "$kata_webhook_deployed" == "true" ]; then
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
+ $kud_playbooks/configure-kata-webhook.yml \
+ --become --become-user=root | \
+ sudo tee $cluster_log/setup-kata-webhook.log
+ fi
+
echo "Add-ons deployment complete..."
}
+function master_ip {
+ kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | awk -F '[:/]' '{print $4}'
+}
+
+# Copy installation artifacts to be usable in host running Ansible
+function install_host_artifacts {
+ local -r cluster_name=$1
+ local -r host_dir="/opt/kud/multi-cluster"
+ local -r host_addons_dir="${host_dir}/addons"
+ local -r host_artifacts_dir="${host_dir}/${cluster_name}/artifacts"
+
+ for addon in cpu-manager multus-cni node-feature-discovery ovn4nfv qat-device-plugin sriov-network sriov-network-operator; do
+ mkdir -p ${host_addons_dir}/${addon}/{helm,profile}
+ cp -r ${kud_infra_folder}/helm/${addon} ${host_addons_dir}/${addon}/helm
+ cp -r ${kud_infra_folder}/profiles/${addon}/* ${host_addons_dir}/${addon}/profile
+ tar -czf ${host_addons_dir}/${addon}.tar.gz -C ${host_addons_dir}/${addon}/helm .
+ tar -czf ${host_addons_dir}/${addon}_profile.tar.gz -C ${host_addons_dir}/${addon}/profile .
+ done
+
+ mkdir -p ${host_addons_dir}/tests
+ for test in _common _common_test _functions multus ovn4nfv nfd sriov-network qat cmk; do
+ cp ${kud_tests}/${test}.sh ${host_addons_dir}/tests
+ done
+
+ mkdir -p ${host_artifacts_dir}
+ cp -rf ${kud_inventory_folder}/artifacts/* ${host_artifacts_dir}
+
+ mkdir -p ${host_artifacts_dir}/addons
+ cp ${kud_infra_folder}/emco/examples/prerequisites.yaml ${host_artifacts_dir}/addons
+ cp ${kud_infra_folder}/emco/composite-app.yaml ${host_artifacts_dir}/addons
+ for template in addons/*.tmpl; do
+ CLUSTER_NAME="${cluster_name}" \
+ HOST_IP="$(master_ip)" \
+ KUBE_PATH="${host_artifacts_dir}/admin.conf" \
+ PACKAGES_PATH="${host_addons_dir}" \
+ envsubst <${template} >${host_artifacts_dir}/${template%.tmpl}
+ done
+}
+
# _print_kubernetes_info() - Prints the login Kubernetes information
function _print_kubernetes_info {
if ! $(kubectl version &>/dev/null); then
@@ -172,11 +262,8 @@ function _print_kubernetes_info {
KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" \
kubectl -n kube-system edit service kubernetes-dashboard
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
- awk -F ":" '{print $2}')
-
printf "Kubernetes Info\n===============\n" > $k8s_info_file
- echo "Dashboard URL: https:$master_ip:$node_port" >> $k8s_info_file
+ echo "Dashboard URL: https://$(master_ip):$node_port" >> $k8s_info_file
echo "Admin user: kube" >> $k8s_info_file
echo "Admin password: secret" >> $k8s_info_file
}
@@ -195,6 +282,15 @@ kud_playbooks=$kud_infra_folder/playbooks
kud_tests=$kud_folder/../../tests
k8s_info_file=$kud_folder/k8s_info.log
testing_enabled=${KUD_ENABLE_TESTS:-false}
+container_runtime=${CONTAINER_RUNTIME:-docker}
+enable_kata_webhook=${ENABLE_KATA_WEBHOOK:-false}
+kata_webhook_runtimeclass=${KATA_WEBHOOK_RUNTIMECLASS:-kata-qemu}
+kata_webhook_deployed=false
+# For containerd the etcd_deployment_type: docker is the default and doesn't work.
+# You have to use either etcd_kubeadm_enabled: true or etcd_deployment_type: host
+# See https://github.com/kubernetes-sigs/kubespray/issues/5713
+kud_kata_override_variables="container_manager=containerd \
+ -e etcd_deployment_type=host -e kubelet_cgroup_driver=cgroupfs"
mkdir -p /opt/csar
export CSAR_DIR=/opt/csar
@@ -219,6 +315,8 @@ function install_cluster {
fi
echo "installed the addons"
+ install_host_artifacts $1
+
_print_kubernetes_info
}
@@ -299,6 +397,7 @@ if [ "$1" == "--cluster" ]; then
exit 0
fi
+
echo "Error: Refer the installer usage"
usage
exit 1
diff --git a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
index 18a55035..7d0404a5 100644
--- a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
@@ -51,6 +51,7 @@ local_volume_provisioner_enabled: true
# Helm deployment
helm_enabled: true
+helm_stable_repo_url: "https://charts.helm.sh/stable"
# Kube-proxy proxyMode configuration.
# NOTE: Ipvs is based on netfilter hook function, but uses hash table as the underlying data structure and
@@ -86,10 +87,20 @@ podsecuritypolicy_enabled: true
# allowedCapabilities:
# - '*'
# by
+# allowedCapabilities:
+# - NET_ADMIN
+# - SYS_ADMIN
+# - SYS_NICE
+# - SYS_PTRACE
# requiredDropCapabilities:
# - NET_RAW
podsecuritypolicy_restricted_spec:
privileged: true
+ allowedCapabilities:
+ - NET_ADMIN
+ - SYS_ADMIN
+ - SYS_NICE
+ - SYS_PTRACE
allowPrivilegeEscalation: true
volumes:
- '*'
diff --git a/kud/hosting_providers/vagrant/README.md b/kud/hosting_providers/vagrant/README.md
index 3d0766b3..3a93a73e 100644
--- a/kud/hosting_providers/vagrant/README.md
+++ b/kud/hosting_providers/vagrant/README.md
@@ -39,6 +39,20 @@ the following instructions:
In-depth documentation and use cases of various Vagrant commands [Vagrant commands][3]
is available on the Vagrant site.
+### CRI Runtimes
+
+Currently both docker and containerd are supported CRI runtimes. If nothing is
+specified then docker will be used by default. This can be changed by setting
+the `CONTAINER_RUNTIME` environment variable. To be able to run secure
+containers using Kata Containers it is required to change the CRI runtime to
+containerd.
+
+```
+$ export CONTAINER_RUNTIME=containerd
+```
+
+
+
## License
Apache-2.0
diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh
index 2a15de33..c88dc9e6 100755
--- a/kud/hosting_providers/vagrant/installer.sh
+++ b/kud/hosting_providers/vagrant/installer.sh
@@ -36,6 +36,8 @@ function _install_go {
export PATH=$PATH:/usr/local/go/bin
sudo sed -i "s|^PATH=.*|PATH=\"$PATH\"|" /etc/environment
+ #allow golang to work with sudo
+ sudo sed -i 's|secure_path="\([^"]\+\)"|secure_path="\1:/usr/local/go/bin"|' /etc/sudoers
}
# _install_pip() - Install Python Package Manager
@@ -140,8 +142,31 @@ function install_k8s {
echo "https_proxy: \"$https_proxy\"" | tee --append $kud_inventory_folder/group_vars/all.yml
fi
export ANSIBLE_CONFIG=$dest_folder/kubespray-$version/ansible.cfg
- ansible-playbook $verbose -i $kud_inventory $kud_playbooks/preconfigure-kubespray.yml --become --become-user=root | sudo tee $log_folder/setup-kubernetes.log
- ansible-playbook $verbose -i $kud_inventory $dest_folder/kubespray-$version/cluster.yml --become --become-user=root | sudo tee $log_folder/setup-kubernetes.log
+
+ ansible-playbook $verbose -i $kud_inventory \
+ $kud_playbooks/preconfigure-kubespray.yml --become --become-user=root \
+ | sudo tee $log_folder/setup-kubernetes.log
+ if [ "$container_runtime" == "docker" ]; then
+ /bin/echo -e "\n\e[1;42mDocker will be used as the container runtime interface\e[0m"
+ ansible-playbook $verbose -i $kud_inventory \
+ $dest_folder/kubespray-$version/cluster.yml --become \
+ --become-user=root | sudo tee $log_folder/setup-kubernetes.log
+ elif [ "$container_runtime" == "containerd" ]; then
+ /bin/echo -e "\n\e[1;42mContainerd will be used as the container runtime interface\e[0m"
+ # Because the kud_kata_override_variable has its own quotations in it
+ # a eval command is needed to properly execute the ansible script
+ ansible_kubespray_cmd="ansible-playbook $verbose -i $kud_inventory \
+ $dest_folder/kubespray-$version/cluster.yml \
+ -e ${kud_kata_override_variables} --become --become-user=root | \
+ sudo tee $log_folder/setup-kubernetes.log"
+ eval $ansible_kubespray_cmd
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ $kud_playbooks/configure-kata.yml --become --become-user=root | \
+ sudo tee $log_folder/setup-kata.log
+ else
+ echo "Only Docker or Containerd are supported container runtimes"
+ exit 1
+ fi
# Configure environment
mkdir -p $HOME/.kube
@@ -157,32 +182,72 @@ function install_addons {
_install_ansible
sudo ansible-galaxy install $verbose -r $kud_infra_folder/galaxy-requirements.yml --ignore-errors
ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-kud.yml | sudo tee $log_folder/setup-kud.log
+
# The order of KUD_ADDONS is important: some plugins (sriov, qat)
- # require nfd to be enabled.
- for addon in ${KUD_ADDONS:-topology-manager virtlet ovn4nfv nfd sriov qat optane cmk}; do
+ # require nfd to be enabled. Some addons are not currently supported with containerd
+ if [ "${container_runtime}" == "docker" ]; then
+ kud_addons=${KUD_ADDONS:-topology-manager virtlet ovn4nfv nfd sriov \
+ qat optane cmk}
+ elif [ "${container_runtime}" == "containerd" ]; then
+ kud_addons=${KUD_ADDONS:-ovn4nfv nfd}
+ fi
+
+ for addon in ${kud_addons}; do
echo "Deploying $addon using configure-$addon.yml playbook.."
- ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" $kud_playbooks/configure-${addon}.yml | sudo tee $log_folder/setup-${addon}.log
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ $kud_playbooks/configure-${addon}.yml | \
+ sudo tee $log_folder/setup-${addon}.log
done
+
echo "Run the test cases if testing_enabled is set to true."
if [[ "${testing_enabled}" == "true" ]]; then
failed_kud_tests=""
- for addon in ${KUD_ADDONS:-multus topology-manager virtlet ovn4nfv nfd sriov qat optane cmk}; do
+ # Run Kata test first if Kata was installed
+ if [ "${container_runtime}" == "containerd" ]; then
+ #Install Kata webhook for test pods
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
+ $kud_playbooks/configure-kata-webhook.yml \
+ --become --become-user=root | \
+ sudo tee $log_folder/setup-kata-webhook.log
+ kata_webhook_deployed=true
+ pushd $kud_tests
+ bash kata.sh || failed_kud_tests="${failed_kud_tests} kata"
+ popd
+ fi
+ # Run other plugin tests
+ for addon in ${kud_addons}; do
pushd $kud_tests
bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}"
popd
done
+ # Remove Kata webhook if user didn't want it permanently installed
+ if ! [ "${enable_kata_webhook}" == "true" ]; then
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
+ $kud_playbooks/configure-kata-webhook-reset.yml \
+ --become --become-user=root | \
+ sudo tee $log_folder/kata-webhook-reset.log
+ fi
if [[ ! -z "$failed_kud_tests" ]]; then
echo "Test cases failed:${failed_kud_tests}"
return 1
fi
fi
+ # Check if Kata webhook should be installed and isn't already installed
+ if [ "$enable_kata_webhook" == "true" ] && ! [ "$kata_webhook_deployed" == "true" ]; then
+ ansible-playbook $verbose -i $kud_inventory -e "base_dest=$HOME" \
+ -e "kata_webhook_runtimeclass=$kata_webhook_runtimeclass" \
+ $kud_playbooks/configure-kata-webhook.yml \
+ --become --become-user=root | \
+ sudo tee $log_folder/setup-kata-webhook.log
+ fi
echo "Add-ons deployment complete..."
}
# install_plugin() - Install ONAP Multicloud Kubernetes plugin
function install_plugin {
echo "Installing multicloud/k8s plugin"
- _install_go
_install_docker
sudo -E pip install --no-cache-dir docker-compose
@@ -212,10 +277,10 @@ function _print_kubernetes_info {
KUBE_EDITOR="sed -i \"s|type\: ClusterIP|type\: NodePort|g\"" kubectl -n kube-system edit service kubernetes-dashboard
KUBE_EDITOR="sed -i \"s|nodePort\: .*|nodePort\: $node_port|g\"" kubectl -n kube-system edit service kubernetes-dashboard
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F ":" '{print $2}')
+ master_ip=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | awk -F '[:/]' '{print $4}')
printf "Kubernetes Info\n===============\n" > $k8s_info_file
- echo "Dashboard URL: https:$master_ip:$node_port" >> $k8s_info_file
+ echo "Dashboard URL: https://$master_ip:$node_port" >> $k8s_info_file
echo "Admin user: kube" >> $k8s_info_file
echo "Admin password: secret" >> $k8s_info_file
}
@@ -247,6 +312,17 @@ kud_playbooks=$kud_infra_folder/playbooks
kud_tests=$kud_folder/../../tests
k8s_info_file=$kud_folder/k8s_info.log
testing_enabled=${KUD_ENABLE_TESTS:-false}
+container_runtime=${CONTAINER_RUNTIME:-docker}
+enable_kata_webhook=${ENABLE_KATA_WEBHOOK:-false}
+kata_webhook_runtimeclass=${KATA_WEBHOOK_RUNTIMECLASS:-kata-clh}
+kata_webhook_deployed=false
+# For containerd the etcd_deployment_type: docker is the default and doesn't work.
+# You have to use either etcd_kubeadm_enabled: true or etcd_deployment_type: host
+# See https://github.com/kubernetes-sigs/kubespray/issues/5713
+kud_kata_override_variables="container_manager=containerd \
+ -e etcd_deployment_type=host -e kubelet_cgroup_driver=cgroupfs \
+ -e \"{'download_localhost': false}\" -e \"{'download_run_once': false}\""
+
sudo mkdir -p $log_folder
sudo mkdir -p /opt/csar
sudo chown -R $USER /opt/csar
@@ -260,6 +336,7 @@ echo "Removing ppa for jonathonf/python-3.6"
sudo ls /etc/apt/sources.list.d/ || true
sudo find /etc/apt/sources.list.d -maxdepth 1 -name '*jonathonf*' -delete || true
sudo apt-get update
+_install_go
install_k8s
_set_environment_file
install_addons
diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
index 5b06b788..7803f27a 100644
--- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
@@ -52,6 +52,7 @@ local_volume_provisioner_enabled: true
# Helm deployment
helm_enabled: true
+helm_stable_repo_url: "https://charts.helm.sh/stable"
# Kube-proxy proxyMode configuration.
# NOTE: Ipvs is based on netfilter hook function, but uses hash table as the underlying data structure and
@@ -83,10 +84,20 @@ podsecuritypolicy_enabled: true
# allowedCapabilities:
# - '*'
# by
+# allowedCapabilities:
+# - NET_ADMIN
+# - SYS_ADMIN
+# - SYS_NICE
+# - SYS_PTRACE
# requiredDropCapabilities:
# - NET_RAW
podsecuritypolicy_restricted_spec:
privileged: true
+ allowedCapabilities:
+ - NET_ADMIN
+ - SYS_ADMIN
+ - SYS_NICE
+ - SYS_PTRACE
allowPrivilegeEscalation: true
volumes:
- '*'
diff --git a/kud/hosting_providers/vagrant/setup.sh b/kud/hosting_providers/vagrant/setup.sh
index db6a732c..7251872a 100755
--- a/kud/hosting_providers/vagrant/setup.sh
+++ b/kud/hosting_providers/vagrant/setup.sh
@@ -11,7 +11,7 @@
set -o nounset
set -o pipefail
-vagrant_version=2.2.4
+vagrant_version=2.2.14
if ! vagrant version &>/dev/null; then
enable_vagrant_install=true
else
@@ -94,7 +94,6 @@ case ${ID,,} in
;;
ubuntu|debian)
- libvirt_group="libvirtd"
INSTALLER_CMD="sudo -H -E apt-get -y -q=3 install"
packages+=(python-dev)
diff --git a/kud/tests/_common.sh b/kud/tests/_common.sh
index b56972c8..ff975544 100644
--- a/kud/tests/_common.sh
+++ b/kud/tests/_common.sh
@@ -1108,8 +1108,8 @@ spec:
app: ovn4nfv
annotations:
k8s.v1.cni.cncf.io/networks: '[{ "name": "$ovn_multus_network_name"}]'
- k8s.plugin.opnfv.org/nfn-network: '{ "type": "ovn4nfv", "interface": [{ "name": "ovn-port-net", "interface": "net0" , "defaultGateway": "false"},
- { "name": "ovn-priv-net", "interface": "net1" , "defaultGateway": "false"}]}'
+ k8s.plugin.opnfv.org/nfn-network: '{ "type": "ovn4nfv", "interface": [{ "name": "ovn-port-net", "interface": "net2" , "defaultGateway": "false"},
+ { "name": "ovn-priv-net", "interface": "net3" , "defaultGateway": "false"}]}'
spec:
containers:
- name: $ovn4nfv_deployment_name
diff --git a/kud/tests/_functions.sh b/kud/tests/_functions.sh
index 720470eb..7a3e97ab 100755
--- a/kud/tests/_functions.sh
+++ b/kud/tests/_functions.sh
@@ -25,6 +25,11 @@ function print_msg {
echo -e "${RED} $msg ---------------------------------------${NC}"
}
+function ssh_cluster {
+ master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F '[:/]' '{print $4}')
+ ssh -o StrictHostKeyChecking=no ${master_ip} -- "$@"
+}
+
function get_ovn_central_address {
#Reuse OVN_CENTRAL_ADDRESS if available (bypassable by --force flag)
if [[ "${1:-}" != "--force" ]] && [[ -n "${OVN_CENTRAL_ADDRESS:-}" ]]; then
diff --git a/kud/tests/emco.sh b/kud/tests/emco.sh
index 2b8eab1e..7cc3ca33 100755
--- a/kud/tests/emco.sh
+++ b/kud/tests/emco.sh
@@ -1,19 +1,7 @@
#!/bin/bash
-# Copyright 2020 Intel Corporation, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
set -o errexit
set -o nounset
@@ -35,6 +23,7 @@ rsync_service_port=30441
rsync_service_host="$master_ip"
base_url_orchestrator=${base_url_orchestrator:-"http://$master_ip:30415/v2"}
base_url_clm=${base_url_clm:-"http://$master_ip:30461/v2"}
+base_url_dcm=${base_url_dcm:-"http://$master_ip:30477/v2"}
CSAR_DIR="/opt/csar"
csar_id="cb009bfe-bbee-11e8-9766-525400435678"
@@ -94,6 +83,41 @@ labeldata="$(cat<<EOF
EOF
)"
+admin_logical_cloud_name="lcadmin"
+admin_logical_cloud_data="$(cat << EOF
+{
+ "metadata" : {
+ "name": "${admin_logical_cloud_name}",
+ "description": "logical cloud description",
+ "userData1":"<user data>",
+ "userData2":"<user data>"
+ },
+ "spec" : {
+ "level": "0"
+ }
+ }
+}
+EOF
+)"
+
+lc_cluster_1_name="lc1-c1"
+cluster_1_data="$(cat << EOF
+{
+ "metadata" : {
+ "name": "${lc_cluster_1_name}",
+ "description": "logical cloud cluster 1 description",
+ "userData1":"<user data>",
+ "userData2":"<user data>"
+ },
+
+ "spec" : {
+ "cluster-provider": "${clusterprovidername}",
+ "cluster-name": "${clustername}",
+ "loadbalancer-ip" : "0.0.0.0"
+ }
+}
+EOF
+)"
# add the rsync controller entry
rsynccontrollername="rsync"
@@ -316,7 +340,7 @@ deployment_intent_group_data="$(cat <<EOF
"profile":"${collection_composite_profile_name}",
"version":"${release}",
"override-values":[],
- "logical-cloud":"unused_logical_cloud"
+ "logical-cloud":"${admin_logical_cloud_name}"
}
}
EOF
@@ -352,6 +376,8 @@ function createOrchestratorData {
print_msg "creating project entry"
call_api -d "${projectdata}" "${base_url_orchestrator}/projects"
+ createLogicalCloudData
+
print_msg "creating collection composite app entry"
call_api -d "${compositeapp_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps"
@@ -403,27 +429,30 @@ function deleteOrchestratorData {
print_msg "Begin deleteOrchestratorData"
- delete_resource "${base_url_orchestrator}/controllers/${rsynccontrollername}"
+ delete_resource_nox "${base_url_orchestrator}/controllers/${rsynccontrollername}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${prometheus_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${collectd_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${prometheus_placement_intent_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${collectd_placement_intent_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${prometheus_profile_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${collectd_profile_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${prometheus_profile_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}/profiles/${collectd_profile_name}"
delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/composite-profiles/${collection_composite_profile_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${prometheus_app_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${prometheus_app_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${collectd_app_name}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/apps/${collectd_app_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}"
+ delete_resource_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}"
+
+ deleteLogicalCloud
+
delete_resource_nox "${base_url_orchestrator}/projects/${projectname}"
print_msg "deleteOrchestratorData done"
@@ -443,12 +472,28 @@ function createClmData {
function deleteClmData {
print_msg "begin deleteClmData"
- delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
+ delete_resource_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}/labels/${labelname}"
delete_resource_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${clustername}"
- delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}"
+ delete_resource_nox "${base_url_clm}/cluster-providers/${clusterprovidername}"
print_msg "deleteClmData done"
}
+function createLogicalCloudData {
+ print_msg "creating logical cloud"
+ call_api -d "${admin_logical_cloud_data}" "${base_url_dcm}/projects/${projectname}/logical-clouds"
+ call_api -d "${cluster_1_data}" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/cluster-references"
+}
+
+function getLogicalCloudData {
+ call_api_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}"
+ call_api_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/cluster-references/${lc_cluster_1_name}"
+}
+
+function deleteLogicalCloud {
+ delete_resource_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/cluster-references/${lc_cluster_1_name}"
+ delete_resource_nox "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}"
+}
+
function createData {
createClmData
createOrchestratorData
@@ -460,13 +505,25 @@ function deleteData {
}
function instantiate {
+ call_api -d "{ }" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/instantiate"
call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/approve"
- call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"
+ # instantiate may fail due to the logical cloud not yet instantiated, so retry
+ try=0
+ until call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"; do
+ if [[ $try -lt 10 ]]; then
+ sleep 1s
+ else
+ return 1
+ fi
+ try=$((try + 1))
+ done
+ return 0
}
-
function terminateOrchData {
+ call_api -d "{ }" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/terminate"
call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${collection_compositeapp_name}/${compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/terminate"
+ call_api -d "{ }" "${base_url_dcm}/projects/${projectname}/logical-clouds/${admin_logical_cloud_name}/terminate"
}
function status {
@@ -479,13 +536,13 @@ function waitFor {
# Setup
-function setup {
+function setupEmcoTest {
install_deps
populate_CSAR_composite_app_helm "$csar_id"
}
function start {
- setup
+ setupEmcoTest
deleteData
print_msg "Before creating, deleting the data success"
createData
@@ -516,6 +573,7 @@ function usage {
if [[ "$#" -gt 0 ]] ; then
case "$1" in
+ "setup" ) setupEmcoTest ;;
"start" ) start ;;
"stop" ) stop ;;
"create" ) createData ;;
diff --git a/kud/tests/kata-clh.yml b/kud/tests/kata-clh.yml
new file mode 100644
index 00000000..6498213f
--- /dev/null
+++ b/kud/tests/kata-clh.yml
@@ -0,0 +1,26 @@
+---
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+kind: Pod
+apiVersion: v1
+metadata:
+ name: kata-clh
+spec:
+ runtimeClassName: kata-clh
+ containers:
+ - name: busybox
+ image: busybox
+ imagePullPolicy: Always
+ command: [ "sleep", "100000" ] \ No newline at end of file
diff --git a/kud/tests/kata-qemu.yml b/kud/tests/kata-qemu.yml
new file mode 100644
index 00000000..d95748e2
--- /dev/null
+++ b/kud/tests/kata-qemu.yml
@@ -0,0 +1,26 @@
+---
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+kind: Pod
+apiVersion: v1
+metadata:
+ name: kata-qemu
+spec:
+ runtimeClassName: kata-qemu
+ containers:
+ - name: busybox
+ image: busybox
+ imagePullPolicy: Always
+ command: [ "sleep", "100000" ] \ No newline at end of file
diff --git a/kud/tests/kata.sh b/kud/tests/kata.sh
new file mode 100755
index 00000000..f55d8cd3
--- /dev/null
+++ b/kud/tests/kata.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+#source _common_test.sh
+#source _common.sh
+#source _functions.sh
+
+kata_pods="kata-qemu kata-clh"
+
+function wait_for_pod {
+ status_phase=""
+ while [[ "$status_phase" != "Running" ]]; do
+ new_phase="$(kubectl get pods -o wide | grep ^$1 | awk '{print $3}')"
+ if [[ "$new_phase" != "$status_phase" ]]; then
+ status_phase="$new_phase"
+ fi
+ if [[ "$new_phase" == "Err"* ]]; then
+ exit 1
+ fi
+ sleep 2
+ done
+}
+
+for pod in ${kata_pods};do
+ echo "Deploying ${pod} pod"
+ kubectl apply -f ${pod}.yml
+ wait_for_pod ${pod}
+ echo "Pod ${pod} deployed successfully"
+ kubectl delete -f ${pod}.yml
+done
diff --git a/kud/tests/multus.sh b/kud/tests/multus.sh
index ad3a3909..4f94791f 100755
--- a/kud/tests/multus.sh
+++ b/kud/tests/multus.sh
@@ -41,7 +41,7 @@ NET
function generate_CRD_for_macvlan_cni {
local csar_id=$1
- local master_name=`route | grep 'default' | awk '{print $8}' |head -n 1`
+ local master_name=$(ssh_cluster route | grep 'default' | awk '{print $8}' |head -n 1)
_checks_args $csar_id
pushd ${CSAR_DIR}/${csar_id}
@@ -67,7 +67,7 @@ NET
function generate_CRD_for_ipvlan_cni {
local csar_id=$1
- local master_name=`route | grep 'default' | awk '{print $8}' |head -n 1`
+ local master_name=$(ssh_cluster route | grep 'default' | awk '{print $8}' |head -n 1)
_checks_args $csar_id
pushd ${CSAR_DIR}/${csar_id}
diff --git a/kud/tests/ovn4nfv.sh b/kud/tests/ovn4nfv.sh
index cd2664ad..e25c2f09 100755
--- a/kud/tests/ovn4nfv.sh
+++ b/kud/tests/ovn4nfv.sh
@@ -34,8 +34,8 @@ echo "===== $deployment_pod details ====="
kubectl exec -it $deployment_pod -- ip a
ovn_nic=$(kubectl exec -it $deployment_pod -- ip a )
-if [[ $ovn_nic != *"net1"* ]]; then
- echo "The $deployment_pod pod doesn't contain the net1 nic"
+if [[ $ovn_nic != *"net3"* ]]; then
+ echo "The $deployment_pod pod doesn't contain the net3 nic"
exit 1
else
echo "Test Completed!"
diff --git a/kud/tests/qat.sh b/kud/tests/qat.sh
index 8365f700..11fb6ca0 100755
--- a/kud/tests/qat.sh
+++ b/kud/tests/qat.sh
@@ -10,7 +10,7 @@
set -o pipefail
-qat_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.capacity."qat.intel.com/cy2_dc2">="1") | .metadata.name')
+qat_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."qat.intel.com/cy2_dc2"|tonumber)>=1) | .metadata.name')
if [ -z "$qat_capable_nodes" ]; then
echo "This test case cannot run. QAT device unavailable."
QAT_ENABLED=False
diff --git a/kud/tests/sriov-network.sh b/kud/tests/sriov-network.sh
new file mode 100755
index 00000000..3191c2f3
--- /dev/null
+++ b/kud/tests/sriov-network.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o pipefail
+
+sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_nic"|tonumber)>=2) | .metadata.name')
+if [ -z "$sriov_capable_nodes" ]; then
+ echo "SRIOV test case cannot run on the cluster."
+ exit 0
+else
+ echo "SRIOV option avaiable in the cluster."
+fi
+
+pod_name=pod-case-01
+
+function create_pod_yaml_with_single_VF {
+
+cat << POD > $HOME/$pod_name-single.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-case-01
+ annotations:
+ k8s.v1.cni.cncf.io/networks: sriov-intel
+spec:
+ containers:
+ - name: test-pod
+ image: docker.io/centos/tools:latest
+ command:
+ - /sbin/init
+ resources:
+ requests:
+ intel.com/intel_sriov_nic: '1'
+ limits:
+ intel.com/intel_sriov_nic: '1'
+POD
+}
+
+function create_pod_yaml_with_multiple_VF {
+
+cat << POD > $HOME/$pod_name-multiple.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: pod-case-01
+ annotations:
+ k8s.v1.cni.cncf.io/networks: sriov-intel, sriov-intel
+spec:
+ containers:
+ - name: test-pod
+ image: docker.io/centos/tools:latest
+ command:
+ - /sbin/init
+ resources:
+ requests:
+ intel.com/intel_sriov_nic: '2'
+ limits:
+ intel.com/intel_sriov_nic: '2'
+POD
+}
+create_pod_yaml_with_single_VF
+create_pod_yaml_with_multiple_VF
+
+for podType in ${POD_TYPE:-single multiple}; do
+
+ kubectl delete pod $pod_name --ignore-not-found=true --now --wait
+ allocated_node_resource=$(kubectl describe node | grep "intel.com/intel_sriov_nic" | tail -n1 |awk '{print $(NF)}')
+
+ echo "The allocated resource of the node is: " $allocated_node_resource
+
+ kubectl create -f $HOME/$pod_name-$podType.yaml --validate=false
+
+ for pod in $pod_name; do
+ status_phase=""
+ while [[ $status_phase != "Running" ]]; do
+ new_phase=$(kubectl get pods $pod | awk 'NR==2{print $3}')
+ if [[ $new_phase != $status_phase ]]; then
+ echo "$(date +%H:%M:%S) - $pod-$podType : $new_phase"
+ status_phase=$new_phase
+ fi
+ if [[ $new_phase == "Running" ]]; then
+ echo "Pod is up and running.."
+ fi
+ if [[ $new_phase == "Err"* ]]; then
+ exit 1
+ fi
+ done
+ done
+ allocated_node_resource=$(kubectl describe node | grep "intel.com/intel_sriov_nic" | tail -n1 |awk '{print $(NF)}')
+
+ echo " The current resource allocation after the pod creation is: " $allocated_node_resource
+ kubectl delete pod $pod_name --now
+ echo "Test complete."
+
+done
diff --git a/kud/tests/sriov.sh b/kud/tests/sriov.sh
index e617ea62..7aa97f0c 100755
--- a/kud/tests/sriov.sh
+++ b/kud/tests/sriov.sh
@@ -10,7 +10,7 @@
set -o pipefail
-sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.capacity."intel.com/intel_sriov_700">="2") | .metadata.name')
+sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_700"|tonumber)>=2) | .metadata.name')
if [ -z "$sriov_capable_nodes" ]; then
echo "SRIOV test case cannot run on the cluster."
exit 0
diff --git a/kud/tests/topology-manager.sh b/kud/tests/topology-manager.sh
index 7d434386..5c9f900d 100755
--- a/kud/tests/topology-manager.sh
+++ b/kud/tests/topology-manager.sh
@@ -15,7 +15,8 @@ set -o pipefail
source _common.sh
source _functions.sh
-if [ -z "$( lspci | grep "Ethernet Controller XL710" | head -n 1 | cut -d " " -f 8 )" ]; then
+adaptors="X710 XL710 X722"
+if [[ $(lspci | grep -c "Ethernet .* \(${adaptors// /\\|}\)") == "0" ]]; then
echo "Ethernet adaptor version is not set. Topology manager test case cannot run on this machine"
exit 0
else