summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--kud/demo/composite-firewall/sink/templates/_helpers.tpl11
-rw-r--r--kud/demo/composite-firewall/sink/templates/deployment.yaml1
-rw-r--r--kud/demo/composite-firewall/sink/templates/rolebinding.yaml14
-rw-r--r--kud/demo/composite-firewall/sink/templates/serviceaccount.yaml10
-rw-r--r--kud/demo/composite-firewall/sink/values.yaml17
-rw-r--r--kud/deployment_infra/emco/Makefile13
-rw-r--r--kud/deployment_infra/emco/composite-app.yaml110
-rw-r--r--kud/deployment_infra/emco/examples/00-controllers.yaml52
-rw-r--r--kud/deployment_infra/emco/examples/01-cluster.yaml29
-rw-r--r--kud/deployment_infra/emco/examples/02-project.yaml40
-rw-r--r--kud/deployment_infra/emco/examples/03-addons-app.yaml110
-rw-r--r--kud/deployment_infra/emco/examples/04-addon-resources-app.yaml110
-rw-r--r--kud/deployment_infra/emco/examples/README.md27
-rw-r--r--kud/deployment_infra/emco/examples/prerequisites.yaml113
-rw-r--r--kud/deployment_infra/emco/examples/values-resources.yaml.example19
-rw-r--r--kud/deployment_infra/emco/examples/values.yaml.example36
-rw-r--r--kud/deployment_infra/helm/cdi-operator/.helmignore23
-rw-r--r--kud/deployment_infra/helm/cdi-operator/Chart.yaml26
-rw-r--r--kud/deployment_infra/helm/cdi-operator/crds/cdi.yaml1892
-rw-r--r--kud/deployment_infra/helm/cdi-operator/templates/_helpers.tpl63
-rw-r--r--kud/deployment_infra/helm/cdi-operator/templates/clusterrole.yaml203
-rw-r--r--kud/deployment_infra/helm/cdi-operator/templates/clusterrolebinding.yaml15
-rw-r--r--kud/deployment_infra/helm/cdi-operator/templates/configmap.yaml7
-rw-r--r--kud/deployment_infra/helm/cdi-operator/templates/deployment.yaml70
-rw-r--r--kud/deployment_infra/helm/cdi-operator/templates/role.yaml47
-rw-r--r--kud/deployment_infra/helm/cdi-operator/templates/rolebinding.yaml15
-rw-r--r--kud/deployment_infra/helm/cdi-operator/templates/serviceaccount.yaml12
-rw-r--r--kud/deployment_infra/helm/cdi-operator/values.yaml57
-rw-r--r--kud/deployment_infra/helm/cdi/Chart.yaml26
-rw-r--r--kud/deployment_infra/helm/cdi/templates/_helpers.tpl43
-rw-r--r--kud/deployment_infra/helm/cdi/templates/cdi.yaml29
-rw-r--r--kud/deployment_infra/helm/cdi/values.yaml34
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/.helmignore23
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/Chart.yaml24
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/crds/kubevirt.yaml2285
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/templates/_helpers.tpl70
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/templates/clusterrole.yaml668
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/templates/clusterrolebinding.yaml15
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/templates/deployment.yaml86
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/templates/priorityclass.yaml7
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/templates/role.yaml30
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/templates/rolebinding.yaml15
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/templates/serviceaccount.yaml11
-rw-r--r--kud/deployment_infra/helm/kubevirt-operator/values.yaml49
-rw-r--r--kud/deployment_infra/helm/kubevirt/Chart.yaml24
-rw-r--r--kud/deployment_infra/helm/kubevirt/templates/_helpers.tpl43
-rw-r--r--kud/deployment_infra/helm/kubevirt/templates/kubevirt.yaml55
-rw-r--r--kud/deployment_infra/helm/kubevirt/values.yaml61
-rw-r--r--kud/deployment_infra/helm/sdewan_cnf/.helmignore23
-rw-r--r--kud/deployment_infra/helm/sdewan_cnf/Chart.yaml21
-rw-r--r--kud/deployment_infra/helm/sdewan_cnf/templates/_helpers.tpl79
-rw-r--r--kud/deployment_infra/helm/sdewan_cnf/templates/cm.yaml80
-rw-r--r--kud/deployment_infra/helm/sdewan_cnf/templates/deployment.yaml94
-rw-r--r--kud/deployment_infra/helm/sdewan_cnf/values.yaml54
-rw-r--r--kud/deployment_infra/profiles/cdi-operator/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/cdi-operator/override_values.yaml0
-rw-r--r--kud/deployment_infra/profiles/cdi/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/cdi/override_values.yaml0
-rw-r--r--kud/deployment_infra/profiles/kubevirt-operator/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/kubevirt-operator/override_values.yaml0
-rw-r--r--kud/deployment_infra/profiles/kubevirt/manifest.yaml4
-rw-r--r--kud/deployment_infra/profiles/kubevirt/override_values.yaml0
-rw-r--r--kud/hosting_providers/containerized/addons/README.md.tmpl34
-rw-r--r--kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl19
-rw-r--r--kud/hosting_providers/containerized/addons/values.yaml.tmpl36
-rwxr-xr-xkud/hosting_providers/containerized/installer.sh7
-rw-r--r--kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml2
-rwxr-xr-xkud/hosting_providers/vagrant/installer.sh38
-rw-r--r--kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml2
-rwxr-xr-xkud/tests/_functions.sh2
-rwxr-xr-xkud/tests/plugin_fw_v2.sh1045
-rw-r--r--kud/tests/plugin_fw_v2.yaml411
-rwxr-xr-xkud/tests/qat.sh2
-rwxr-xr-xkud/tests/sriov-network.sh2
-rwxr-xr-xkud/tests/sriov.sh2
-rwxr-xr-xkud/tests/topology-manager.sh4
77 files changed, 7400 insertions, 1315 deletions
diff --git a/.gitignore b/.gitignore
index 0abc24af..d59bc24e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,6 +25,8 @@ src/k8splugin/csar/mock_plugins/*.so
src/k8splugin/plugins/**/*.so
# Tests
+kud/tests/plugin_fw_v2_config.yaml
+kud/tests/plugin_fw_v2_values.yaml
*.test
*.out
diff --git a/kud/demo/composite-firewall/sink/templates/_helpers.tpl b/kud/demo/composite-firewall/sink/templates/_helpers.tpl
index 7d82d08d..f60b7ce6 100644
--- a/kud/demo/composite-firewall/sink/templates/_helpers.tpl
+++ b/kud/demo/composite-firewall/sink/templates/_helpers.tpl
@@ -30,3 +30,14 @@ Create chart name and version as used by the chart label.
{{- define "sink.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "sink.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "sink.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/kud/demo/composite-firewall/sink/templates/deployment.yaml b/kud/demo/composite-firewall/sink/templates/deployment.yaml
index f1f56b28..e65a64fb 100644
--- a/kud/demo/composite-firewall/sink/templates/deployment.yaml
+++ b/kud/demo/composite-firewall/sink/templates/deployment.yaml
@@ -18,6 +18,7 @@ spec:
app: {{ include "sink.name" . }}
release: {{ .Release.Name }}
spec:
+ serviceAccountName: {{ include "sink.serviceAccountName" . }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.sinkrepo }}:{{ .Values.image.sinktag }}"
diff --git a/kud/demo/composite-firewall/sink/templates/rolebinding.yaml b/kud/demo/composite-firewall/sink/templates/rolebinding.yaml
new file mode 100644
index 00000000..14c5b758
--- /dev/null
+++ b/kud/demo/composite-firewall/sink/templates/rolebinding.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.rbac.create }}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ include "sink.fullname" . }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:privileged
+subjects:
+- kind: ServiceAccount
+ name: {{ include "sink.serviceAccountName" . }}
+ namespace: {{ $.Release.Namespace }}
+{{- end }}
diff --git a/kud/demo/composite-firewall/sink/templates/serviceaccount.yaml b/kud/demo/composite-firewall/sink/templates/serviceaccount.yaml
new file mode 100644
index 00000000..2dcd900c
--- /dev/null
+++ b/kud/demo/composite-firewall/sink/templates/serviceaccount.yaml
@@ -0,0 +1,10 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "sink.serviceAccountName" . }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+{{- end -}}
diff --git a/kud/demo/composite-firewall/sink/values.yaml b/kud/demo/composite-firewall/sink/values.yaml
index 245c9dea..b7ba1913 100644
--- a/kud/demo/composite-firewall/sink/values.yaml
+++ b/kud/demo/composite-firewall/sink/values.yaml
@@ -59,3 +59,20 @@ global:
demoArtifactsVersion: 1.6.0
dcaeCollectorIp: 10.0.4.1
dcaeCollectorPort: 8081
+
+###
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name:
+
+## RBAC parameteres
+## https://kubernetes.io/docs/reference/access-authn-authz/rbac/
+##
+rbac:
+ create: true
diff --git a/kud/deployment_infra/emco/Makefile b/kud/deployment_infra/emco/Makefile
index de41bfc5..e06b5f6b 100644
--- a/kud/deployment_infra/emco/Makefile
+++ b/kud/deployment_infra/emco/Makefile
@@ -16,7 +16,18 @@ ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
OUTPUT_DIR := $(ROOT_DIR)/output
PACKAGE_DIR := $(OUTPUT_DIR)/packages
-ADDONS := multus-cni ovn4nfv node-feature-discovery sriov-network-operator sriov-network qat-device-plugin cpu-manager
+ADDONS := \
+ cdi \
+ cdi-operator \
+ cpu-manager \
+ kubevirt \
+ kubevirt-operator \
+ multus-cni \
+ node-feature-discovery \
+ ovn4nfv \
+ sriov-network \
+ sriov-network-operator \
+ qat-device-plugin
.PHONY: $(ADDONS)
diff --git a/kud/deployment_infra/emco/composite-app.yaml b/kud/deployment_infra/emco/composite-app.yaml
deleted file mode 100644
index 869447ad..00000000
--- a/kud/deployment_infra/emco/composite-app.yaml
+++ /dev/null
@@ -1,110 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright (c) 2020 Intel Corporation
-
----
-#creating composite app entry
-version: emco/v2
-resourceContext:
- anchor: projects/{{ .ProjectName }}/composite-apps
-metadata :
- name: {{ .CompositeApp }}
- description: "KUD addons"
-spec:
- version: v1
-
-{{- range $index, $addon := .Apps }}
----
-#adding app to the composite app
-version: emco/v2
-resourceContext:
- anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.CompositeApp }}/v1/apps
-metadata :
- name: {{ $addon }}
-file:
- {{ $.PackagesPath }}/{{ $addon }}.tar.gz
-{{- end }}
-
----
-#creating composite profile entry
-version: emco/v2
-resourceContext:
- anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/composite-profiles
-metadata :
- name: {{ .CompositeProfile }}
-
-{{- range $index, $addon := .Apps }}
----
-#adding app profiles to the composite profile
-version: emco/v2
-resourceContext:
- anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.CompositeApp }}/v1/composite-profiles/{{ $.CompositeProfile }}/profiles
-metadata :
- name: {{ $addon }}-profile
-spec:
- app-name: {{ $addon }}
-file:
- {{ $.PackagesPath }}/{{ $addon }}_profile.tar.gz
-{{- end }}
-
----
-#create deployment intent group
-version: emco/v2
-resourceContext:
- anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups
-metadata :
- name: {{ .DeploymentIntentGroup }}
- description: "description"
-spec:
- profile: {{ .CompositeProfile }}
- version: r1
- logical-cloud: {{ .LogicalCloud }}
- override-values: []
-
----
-#create intent in deployment intent group
-version: emco/v2
-resourceContext:
- anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/intents
-metadata :
- name: {{ .DeploymentIntent }}
-spec:
- intent:
- genericPlacementIntent: {{ .GenericPlacementIntent }}
-
----
-#create the generic placement intent
-version: emco/v2
-resourceContext:
- anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/generic-placement-intents
-metadata :
- name: {{ .GenericPlacementIntent }}
-spec:
- logical-cloud: {{ .LogicalCloud }}
-
-{{- range $index, $addon := .Apps }}
----
-#add the app placement intent to the generic placement intent
-version: emco/v2
-resourceContext:
- anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.CompositeApp }}/v1/deployment-intent-groups/{{ $.DeploymentIntentGroup }}/generic-placement-intents/{{ $.GenericPlacementIntent }}/app-intents
-metadata:
- name: {{ $addon }}-placement-intent
-spec:
- app-name: {{ $addon }}
- intent:
- allOf:
- - provider-name: {{ $.ClusterProvider }}
- cluster-label-name: {{ $.ClusterLabel }}
-{{- end }}
-
----
-#Approve
-version: emco/v2
-resourceContext:
- anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/approve
-
----
-#Instantiate
-version: emco/v2
-resourceContext:
- anchor: projects/{{ .ProjectName }}/composite-apps/{{ .CompositeApp }}/v1/deployment-intent-groups/{{ .DeploymentIntentGroup }}/instantiate
diff --git a/kud/deployment_infra/emco/examples/00-controllers.yaml b/kud/deployment_infra/emco/examples/00-controllers.yaml
new file mode 100644
index 00000000..c023ab87
--- /dev/null
+++ b/kud/deployment_infra/emco/examples/00-controllers.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
+
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: rsync
+spec:
+ host: {{ .HostIP }}
+ port: {{ .RsyncPort }}
+
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: gac
+spec:
+ host: {{ .HostIP }}
+ port: {{ .GacPort }}
+ type: "action"
+ priority: 1
+
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: ovnaction
+spec:
+ host: {{ .HostIP }}
+ port: {{ .OvnPort }}
+ type: "action"
+ priority: 1
+
+---
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: dtc
+spec:
+ host: {{ .HostIP }}
+ port: {{ .DtcPort }}
+ type: "action"
+ priority: 1
diff --git a/kud/deployment_infra/emco/examples/01-cluster.yaml b/kud/deployment_infra/emco/examples/01-cluster.yaml
new file mode 100644
index 00000000..6f7ce4ba
--- /dev/null
+++ b/kud/deployment_infra/emco/examples/01-cluster.yaml
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
+
+---
+#creating cluster provider
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers
+metadata :
+ name: {{ .ClusterProvider }}
+
+{{- range $index, $cluster := .Clusters }}
+---
+#creating cluster
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{ $.ClusterProvider }}/clusters
+metadata :
+ name: {{ $cluster.Name }}
+file:
+ {{ $cluster.KubeConfig }}
+
+---
+#Add label cluster
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{ $.ClusterProvider }}/clusters/{{ $cluster.Name }}/labels
+label-name: {{ $.ClustersLabel }}
+{{- end }} \ No newline at end of file
diff --git a/kud/deployment_infra/emco/examples/02-project.yaml b/kud/deployment_infra/emco/examples/02-project.yaml
new file mode 100644
index 00000000..98ecfdb4
--- /dev/null
+++ b/kud/deployment_infra/emco/examples/02-project.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
+
+---
+#create project
+version: emco/v2
+resourceContext:
+ anchor: projects
+metadata :
+ name: {{ .ProjectName }}
+
+---
+#create default logical cloud with admin permissions
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/logical-clouds
+metadata:
+ name: {{ .LogicalCloud }}
+spec:
+ level: "0"
+
+{{- range $index, $cluster := .Clusters }}
+---
+#add cluster reference to logical cloud
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/logical-clouds/{{ $.LogicalCloud }}/cluster-references
+metadata:
+ name: {{ $cluster.Name }}
+spec:
+ cluster-provider: {{ $.ClusterProvider }}
+ cluster-name: {{ $cluster.Name }}
+ loadbalancer-ip: "0.0.0.0"
+{{- end }}
+
+---
+#instantiate logical cloud
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/logical-clouds/{{ .LogicalCloud }}/instantiate
diff --git a/kud/deployment_infra/emco/examples/03-addons-app.yaml b/kud/deployment_infra/emco/examples/03-addons-app.yaml
new file mode 100644
index 00000000..0fd15e0f
--- /dev/null
+++ b/kud/deployment_infra/emco/examples/03-addons-app.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
+
+---
+#creating composite app entry
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps
+metadata :
+ name: {{ .AddonsApp }}
+ description: "KUD addons"
+spec:
+ version: v1
+
+{{- range $index, $addon := .Addons }}
+---
+#adding app to the composite app
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.AddonsApp }}/v1/apps
+metadata :
+ name: {{ $addon }}
+file:
+ {{ $.PackagesPath }}/{{ $addon }}.tar.gz
+{{- end }}
+
+---
+#creating composite profile entry
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonsApp }}/v1/composite-profiles
+metadata :
+ name: {{ .AddonsProfile }}
+
+{{- range $index, $addon := .Addons }}
+---
+#adding app profiles to the composite profile
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.AddonsApp }}/v1/composite-profiles/{{ $.AddonsProfile }}/profiles
+metadata :
+ name: {{ $addon }}-profile
+spec:
+ app-name: {{ $addon }}
+file:
+ {{ $.PackagesPath }}/{{ $addon }}_profile.tar.gz
+{{- end }}
+
+---
+#create deployment intent group
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonsApp }}/v1/deployment-intent-groups
+metadata :
+ name: {{ .AddonsDeploymentIntentGroup }}
+ description: "description"
+spec:
+ profile: {{ .AddonsProfile }}
+ version: r1
+ logical-cloud: {{ .LogicalCloud }}
+ override-values: []
+
+---
+#create intent in deployment intent group
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonsApp }}/v1/deployment-intent-groups/{{ .AddonsDeploymentIntentGroup }}/intents
+metadata :
+ name: {{ .AddonsDeploymentIntent }}
+spec:
+ intent:
+ genericPlacementIntent: {{ .AddonsPlacementIntent }}
+
+---
+#create the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonsApp }}/v1/deployment-intent-groups/{{ .AddonsDeploymentIntentGroup }}/generic-placement-intents
+metadata :
+ name: {{ .AddonsPlacementIntent }}
+spec:
+ logical-cloud: {{ .LogicalCloud }}
+
+{{- range $index, $addon := .Addons }}
+---
+#add the app placement intent to the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.AddonsApp }}/v1/deployment-intent-groups/{{ $.AddonsDeploymentIntentGroup }}/generic-placement-intents/{{ $.AddonsPlacementIntent }}/app-intents
+metadata:
+ name: {{ $addon }}-placement-intent
+spec:
+ app-name: {{ $addon }}
+ intent:
+ allOf:
+ - provider-name: {{ $.ClusterProvider }}
+ cluster-label-name: {{ $.ClustersLabel }}
+{{- end }}
+
+---
+#Approve
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonsApp }}/v1/deployment-intent-groups/{{ .AddonsDeploymentIntentGroup }}/approve
+
+---
+#Instantiate
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonsApp }}/v1/deployment-intent-groups/{{ .AddonsDeploymentIntentGroup }}/instantiate
diff --git a/kud/deployment_infra/emco/examples/04-addon-resources-app.yaml b/kud/deployment_infra/emco/examples/04-addon-resources-app.yaml
new file mode 100644
index 00000000..92fd9539
--- /dev/null
+++ b/kud/deployment_infra/emco/examples/04-addon-resources-app.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
+
+---
+#creating composite app entry
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps
+metadata :
+ name: {{ .AddonResourcesApp }}
+ description: "KUD addons"
+spec:
+ version: v1
+
+{{- range $index, $addon := .AddonResources }}
+---
+#adding app to the composite app
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.AddonResourcesApp }}/v1/apps
+metadata :
+ name: {{ $addon }}
+file:
+ {{ $.PackagesPath }}/{{ $addon }}.tar.gz
+{{- end }}
+
+---
+#creating composite profile entry
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonResourcesApp }}/v1/composite-profiles
+metadata :
+ name: {{ .AddonResourcesProfile }}
+
+{{- range $index, $addon := .AddonResources }}
+---
+#adding app profiles to the composite profile
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.AddonResourcesApp }}/v1/composite-profiles/{{ $.AddonResourcesProfile }}/profiles
+metadata :
+ name: {{ $addon }}-profile
+spec:
+ app-name: {{ $addon }}
+file:
+ {{ $.PackagesPath }}/{{ $addon }}_profile.tar.gz
+{{- end }}
+
+---
+#create deployment intent group
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonResourcesApp }}/v1/deployment-intent-groups
+metadata :
+ name: {{ .AddonResourcesDeploymentIntentGroup }}
+ description: "description"
+spec:
+ profile: {{ .AddonResourcesProfile }}
+ version: r1
+ logical-cloud: {{ .LogicalCloud }}
+ override-values: []
+
+---
+#create intent in deployment intent group
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonResourcesApp }}/v1/deployment-intent-groups/{{ .AddonResourcesDeploymentIntentGroup }}/intents
+metadata :
+ name: {{ .AddonResourcesDeploymentIntent }}
+spec:
+ intent:
+ genericPlacementIntent: {{ .AddonResourcesPlacementIntent }}
+
+---
+#create the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonResourcesApp }}/v1/deployment-intent-groups/{{ .AddonResourcesDeploymentIntentGroup }}/generic-placement-intents
+metadata :
+ name: {{ .AddonResourcesPlacementIntent }}
+spec:
+ logical-cloud: {{ .LogicalCloud }}
+
+{{- range $index, $addon := .AddonResources }}
+---
+#add the app placement intent to the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ $.ProjectName }}/composite-apps/{{ $.AddonResourcesApp }}/v1/deployment-intent-groups/{{ $.AddonResourcesDeploymentIntentGroup }}/generic-placement-intents/{{ $.AddonResourcesPlacementIntent }}/app-intents
+metadata:
+ name: {{ $addon }}-placement-intent
+spec:
+ app-name: {{ $addon }}
+ intent:
+ allOf:
+ - provider-name: {{ $.ClusterProvider }}
+ cluster-label-name: {{ $.ClustersLabel }}
+{{- end }}
+
+---
+#Approve
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonResourcesApp }}/v1/deployment-intent-groups/{{ .AddonResourcesDeploymentIntentGroup }}/approve
+
+---
+#Instantiate
+version: emco/v2
+resourceContext:
+ anchor: projects/{{ .ProjectName }}/composite-apps/{{ .AddonResourcesApp }}/v1/deployment-intent-groups/{{ .AddonResourcesDeploymentIntentGroup }}/instantiate
diff --git a/kud/deployment_infra/emco/examples/README.md b/kud/deployment_infra/emco/examples/README.md
index b91cce10..203b83fd 100644
--- a/kud/deployment_infra/emco/examples/README.md
+++ b/kud/deployment_infra/emco/examples/README.md
@@ -27,33 +27,36 @@ needs to be installed and configured for the edge cluster.
2. Customize values.yaml.
`$ envsubst < values.yaml.example > values.yaml`
- `$ envsubst < values-resources.yaml.example > values-resources.yaml`
## Create prerequisites to deploy addons
-Apply prerequisites.yaml. This creates controllers, one project, one
-cluster, and default logical cloud. This step is required to be done
-only once.
+Apply the prerequisites. This creates the controllers, one or more
+clusters, one project, and one default logical cloud. This step is
+required to be done only once.
- `$ emcoctl apply -f prerequisites.yaml -v values.yaml`
+ `$ emcoctl apply -f 00-controllers.yaml -v values.yaml`
+ `$ emcoctl apply -f 01-cluster.yaml -v values.yaml`
+ `$ emcoctl apply -f 02-project.yaml -v values.yaml`
## Deploying addons
-Apply composite-app.yaml. This deploys the addons listed in the `Apps`
-value.
+This deploys the applications listed in the `Addons` and
+`AddonResources` values.
- `$ emcoctl apply -f ../output/composite-app.yaml -v values.yaml`
- `$ emcoctl apply -f ../output/composite-app.yaml -v values-resources.yaml`
+ `$ emcoctl apply -f 03-addons-app.yaml -v values.yaml`
+ `$ emcoctl apply -f 04-addon-resources-app.yaml -v values.yaml`
## Cleanup
1. Delete addons.
- `$ emcoctl delete -f ../output/composite-app.yaml -v values-resources.yaml`
- `$ emcoctl delete -f ../output/composite-app.yaml -v values.yaml`
+ `$ emcoctl delete -f 04-addon-resources-app.yaml -v values.yaml`
+ `$ emcoctl delete -f 03-addons-app.yaml -v values.yaml`
2. Cleanup prerequisites.
- `$ emcoctl delete -f prerequisites.yaml -v values.yaml`
+ `$ emcoctl delete -f 02-project.yaml -v values.yaml`
+ `$ emcoctl delete -f 01-cluster.yaml -v values.yaml`
+ `$ emcoctl delete -f 00-controllers.yaml -v values.yaml`
#### NOTE: Known issue: Deletion of the resources fails sometimes as some resources can't be deleted before others are deleted. This can happen due to timing issue. In that case try deleting again and the deletion should succeed.
diff --git a/kud/deployment_infra/emco/examples/prerequisites.yaml b/kud/deployment_infra/emco/examples/prerequisites.yaml
deleted file mode 100644
index a44546e0..00000000
--- a/kud/deployment_infra/emco/examples/prerequisites.yaml
+++ /dev/null
@@ -1,113 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# Copyright (c) 2020 Intel Corporation
-
----
-#create project
-version: emco/v2
-resourceContext:
- anchor: projects
-metadata :
- name: {{ .ProjectName }}
----
-#creating controller entries
-version: emco/v2
-resourceContext:
- anchor: controllers
-metadata :
- name: rsync
-spec:
- host: {{ .HostIP }}
- port: {{ .RsyncPort }}
-
----
-#creating controller entries
-version: emco/v2
-resourceContext:
- anchor: controllers
-metadata :
- name: gac
-spec:
- host: {{ .HostIP }}
- port: {{ .GacPort }}
- type: "action"
- priority: 1
-
----
-#creating controller entries
-version: emco/v2
-resourceContext:
- anchor: controllers
-metadata :
- name: ovnaction
-spec:
- host: {{ .HostIP }}
- port: {{ .OvnPort }}
- type: "action"
- priority: 1
-
----
-#creating controller entries
-version: emco/v2
-resourceContext:
- anchor: controllers
-metadata :
- name: dtc
-spec:
- host: {{ .HostIP }}
- port: {{ .DtcPort }}
- type: "action"
- priority: 1
-
----
-#creating cluster provider
-version: emco/v2
-resourceContext:
- anchor: cluster-providers
-metadata :
- name: {{ .ClusterProvider }}
-
----
-#creating cluster
-version: emco/v2
-resourceContext:
- anchor: cluster-providers/{{ .ClusterProvider }}/clusters
-metadata :
- name: {{ .Cluster1 }}
-file:
- {{ .KubeConfig }}
-
----
-#Add label cluster
-version: emco/v2
-resourceContext:
- anchor: cluster-providers/{{ .ClusterProvider }}/clusters/{{ .Cluster1 }}/labels
-label-name: {{ .ClusterLabel }}
-
----
-#create default logical cloud with admin permissions
-version: emco/v2
-resourceContext:
- anchor: projects/{{ .ProjectName }}/logical-clouds
-metadata:
- name: {{ .LogicalCloud }}
-spec:
- level: "0"
-
----
-#add cluster reference to logical cloud
-version: emco/v2
-resourceContext:
- anchor: projects/{{ .ProjectName }}/logical-clouds/{{ .LogicalCloud }}/cluster-references
-metadata:
- name: lc-cl-1
-spec:
- cluster-provider: {{ .ClusterProvider }}
- cluster-name: {{ .Cluster1 }}
- loadbalancer-ip: "0.0.0.0"
-
----
-#instantiate logical cloud
-version: emco/v2
-resourceContext:
- anchor: projects/{{ .ProjectName }}/logical-clouds/{{ .LogicalCloud }}/instantiate
-
diff --git a/kud/deployment_infra/emco/examples/values-resources.yaml.example b/kud/deployment_infra/emco/examples/values-resources.yaml.example
deleted file mode 100644
index acfd903c..00000000
--- a/kud/deployment_infra/emco/examples/values-resources.yaml.example
+++ /dev/null
@@ -1,19 +0,0 @@
-HostIP: $HOST_IP
-KubeConfig: $KUBE_PATH
-PackagesPath: $PWD/../output/packages
-ProjectName: proj1
-RsyncPort: 30441
-GacPort: 30493
-OvnPort: 30473
-DtcPort: 30483
-ClusterProvider: provider1
-Cluster1: cluster1
-ClusterLabel: edge-cluster
-LogicalCloud: default
-CompositeApp: addon-resources
-CompositeProfile: addon-resources-profile
-DeploymentIntentGroup: addon-resources-deployment-intent-group
-DeploymentIntent: addon-resources-deployment-intent
-GenericPlacementIntent: addon-resources-placement-intent
-Apps:
-- sriov-network
diff --git a/kud/deployment_infra/emco/examples/values.yaml.example b/kud/deployment_infra/emco/examples/values.yaml.example
index 37ddacf6..4f5e45ed 100644
--- a/kud/deployment_infra/emco/examples/values.yaml.example
+++ b/kud/deployment_infra/emco/examples/values.yaml.example
@@ -1,24 +1,36 @@
HostIP: $HOST_IP
-KubeConfig: $KUBE_PATH
-PackagesPath: $PWD/../output/packages
-ProjectName: proj1
RsyncPort: 30441
GacPort: 30493
OvnPort: 30473
DtcPort: 30483
-ClusterProvider: provider1
-Cluster1: cluster1
-ClusterLabel: edge-cluster
+
+ClusterProvider: kud
+ClustersLabel: kud-cluster
+Clusters:
+- KubeConfig: $KUBE_PATH
+ Name: cluster
+
+ProjectName: kud
LogicalCloud: default
-CompositeApp: addons
-CompositeProfile: addons-profile
-DeploymentIntentGroup: addons-deployment-intent-group
-DeploymentIntent: addons-deployment-intent
-GenericPlacementIntent: addons-placement-intent
-Apps:
+
+PackagesPath: $PWD/../output/packages
+AddonsApp: addons
+AddonsProfile: addons-profile
+AddonsDeploymentIntentGroup: addons-deployment-intent-group
+AddonsDeploymentIntent: addons-deployment-intent
+AddonsPlacementIntent: addons-placement-intent
+Addons:
- multus-cni
- ovn4nfv
- node-feature-discovery
- sriov-network-operator
- qat-device-plugin
- cpu-manager
+
+AddonResourcesApp: addon-resources
+AddonResourcesProfile: addon-resources-profile
+AddonResourcesDeploymentIntentGroup: addon-resources-deployment-intent-group
+AddonResourcesDeploymentIntent: addon-resources-deployment-intent
+AddonResourcesPlacementIntent: addon-resources-placement-intent
+AddonResources:
+- sriov-network
diff --git a/kud/deployment_infra/helm/cdi-operator/.helmignore b/kud/deployment_infra/helm/cdi-operator/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/cdi-operator/Chart.yaml b/kud/deployment_infra/helm/cdi-operator/Chart.yaml
new file mode 100644
index 00000000..920f21b8
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/Chart.yaml
@@ -0,0 +1,26 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: v1.34.1
+description: |
+ Containerized-Data-Importer (CDI) is a persistent storage management
+ add-on for Kubernetes. It's primary goal is to provide a declarative
+ way to build Virtual Machine Disks on PVCs for Kubevirt VMs.
+name: cdi-operator
+sources:
+ - https://github.com/kubevirt/containerized-data-importer
+home: https://github.com/kubevirt/containerized-data-importer
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/cdi-operator/crds/cdi.yaml b/kud/deployment_infra/helm/cdi-operator/crds/cdi.yaml
new file mode 100644
index 00000000..1341b52e
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/crds/cdi.yaml
@@ -0,0 +1,1892 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ labels:
+ operator.cdi.kubevirt.io: ""
+ name: cdis.cdi.kubevirt.io
+spec:
+ conversion:
+ strategy: None
+ group: cdi.kubevirt.io
+ names:
+ kind: CDI
+ listKind: CDIList
+ plural: cdis
+ shortNames:
+ - cdi
+ - cdis
+ singular: cdi
+ scope: Cluster
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: CDI is the CDI Operator CRD
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: CDISpec defines our specification for the CDI installation
+ properties:
+ certConfig:
+ description: certificate configuration
+ properties:
+ ca:
+ description: CA configuration CA certs are kept in the CA bundle as long as they are valid
+ properties:
+ duration:
+ description: The requested 'duration' (i.e. lifetime) of the Certificate.
+ type: string
+ renewBefore:
+ description: The amount of time before the currently issued certificate's `notAfter` time that we will begin to attempt to renew the certificate.
+ type: string
+ type: object
+ server:
+ description: Server configuration Certs are rotated and discarded
+ properties:
+ duration:
+ description: The requested 'duration' (i.e. lifetime) of the Certificate.
+ type: string
+ renewBefore:
+ description: The amount of time before the currently issued certificate's `notAfter` time that we will begin to attempt to renew the certificate.
+ type: string
+ type: object
+ type: object
+ cloneStrategyOverride:
+ description: 'Clone strategy override: should we use a host-assisted copy even if snapshots are available?'
+ enum:
+ - copy
+ - snapshot
+ type: string
+ config:
+ description: CDIConfig at CDI level
+ properties:
+ featureGates:
+ description: FeatureGates are a list of specific enabled feature gates
+ items:
+ type: string
+ type: array
+ filesystemOverhead:
+ description: FilesystemOverhead describes the space reserved for overhead when using Filesystem volumes. A value is between 0 and 1, if not defined it is 0.055 (5.5% overhead)
+ properties:
+ global:
+ description: Global is how much space of a Filesystem volume should be reserved for overhead. This value is used unless overridden by a more specific value (per storageClass)
+ pattern: ^(0(?:\.\d{1,3})?|1)$
+ type: string
+ storageClass:
+ additionalProperties:
+ description: 'Percent is a string that can only be a value between [0,1) (Note: we actually rely on reconcile to reject invalid values)'
+ pattern: ^(0(?:\.\d{1,3})?|1)$
+ type: string
+ description: StorageClass specifies how much space of a Filesystem volume should be reserved for safety. The keys are the storageClass and the values are the overhead. This value overrides the global value
+ type: object
+ type: object
+ importProxy:
+ description: ImportProxy contains importer pod proxy configuration.
+ properties:
+ HTTPProxy:
+ description: HTTPProxy is the URL http://<username>:<pswd>@<ip>:<port> of the import proxy for HTTP requests. Empty means unset and will not result in the import pod env var.
+ type: string
+ HTTPSProxy:
+ description: HTTPSProxy is the URL https://<username>:<pswd>@<ip>:<port> of the import proxy for HTTPS requests. Empty means unset and will not result in the import pod env var.
+ type: string
+ noProxy:
+ description: NoProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in the import pod env var.
+ type: string
+ trustedCAProxy:
+ description: "TrustedCAProxy is the name of a ConfigMap in the cdi namespace that contains a user-provided trusted certificate authority (CA) bundle. The TrustedCAProxy field is consumed by the import controller that is resposible for coping it to a config map named trusted-ca-proxy-bundle-cm in the cdi namespace. Here is an example of the ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: trusted-ca-proxy-bundle-cm namespace: cdi data: ca.pem: | -----BEGIN CERTIFICATE----- \t ... <base64 encoded cert> ... \t -----END CERTIFICATE-----"
+ type: string
+ type: object
+ insecureRegistries:
+ description: InsecureRegistries is a list of TLS disabled registries
+ items:
+ type: string
+ type: array
+ podResourceRequirements:
+ description: ResourceRequirements describes the compute resource requirements.
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ preallocation:
+ description: Preallocation controls whether storage for DataVolumes should be allocated in advance.
+ type: boolean
+ scratchSpaceStorageClass:
+ description: 'Override the storage class to used for scratch space during transfer operations. The scratch space storage class is determined in the following order: 1. value of scratchSpaceStorageClass, if that doesn''t exist, use the default storage class, if there is no default storage class, use the storage class of the DataVolume, if no storage class specified, use no storage class for scratch space'
+ type: string
+ uploadProxyURLOverride:
+ description: Override the URL used when uploading to a DataVolume
+ type: string
+ type: object
+ imagePullPolicy:
+ description: PullPolicy describes a policy for if/when to pull a container image
+ enum:
+ - Always
+ - IfNotPresent
+ - Never
+ type: string
+ infra:
+ description: Rules on which nodes CDI infrastructure pods will be scheduled
+ properties:
+ affinity:
+ description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector'
+ type: object
+ tolerations:
+ description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones.
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ uninstallStrategy:
+ description: CDIUninstallStrategy defines the state to leave CDI on uninstall
+ enum:
+ - RemoveWorkloads
+ - BlockUninstallIfWorkloadsExist
+ type: string
+ workload:
+ description: Restrict on which nodes CDI workload pods will be scheduled
+ properties:
+ affinity:
+ description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector'
+ type: object
+ tolerations:
+ description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones.
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ status:
+ description: CDIStatus defines the status of the installation
+ properties:
+ conditions:
+ description: A list of current conditions of the resource
+ items:
+ description: Condition represents the state of the operator's reconciliation functionality.
+ properties:
+ lastHeartbeatTime:
+ format: date-time
+ type: string
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType is the state of the operator's reconciliation functionality.
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ observedVersion:
+ description: The observed version of the resource
+ type: string
+ operatorVersion:
+ description: The version of the resource as defined by the operator
+ type: string
+ phase:
+ description: Phase is the current phase of the deployment
+ type: string
+ targetVersion:
+ description: The desired version of the resource
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: false
+ subresources: {}
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: CDI is the CDI Operator CRD
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: CDISpec defines our specification for the CDI installation
+ properties:
+ certConfig:
+ description: certificate configuration
+ properties:
+ ca:
+ description: CA configuration CA certs are kept in the CA bundle as long as they are valid
+ properties:
+ duration:
+ description: The requested 'duration' (i.e. lifetime) of the Certificate.
+ type: string
+ renewBefore:
+ description: The amount of time before the currently issued certificate's `notAfter` time that we will begin to attempt to renew the certificate.
+ type: string
+ type: object
+ server:
+ description: Server configuration Certs are rotated and discarded
+ properties:
+ duration:
+ description: The requested 'duration' (i.e. lifetime) of the Certificate.
+ type: string
+ renewBefore:
+ description: The amount of time before the currently issued certificate's `notAfter` time that we will begin to attempt to renew the certificate.
+ type: string
+ type: object
+ type: object
+ cloneStrategyOverride:
+ description: 'Clone strategy override: should we use a host-assisted copy even if snapshots are available?'
+ enum:
+ - copy
+ - snapshot
+ type: string
+ config:
+ description: CDIConfig at CDI level
+ properties:
+ featureGates:
+ description: FeatureGates are a list of specific enabled feature gates
+ items:
+ type: string
+ type: array
+ filesystemOverhead:
+ description: FilesystemOverhead describes the space reserved for overhead when using Filesystem volumes. A value is between 0 and 1, if not defined it is 0.055 (5.5% overhead)
+ properties:
+ global:
+ description: Global is how much space of a Filesystem volume should be reserved for overhead. This value is used unless overridden by a more specific value (per storageClass)
+ pattern: ^(0(?:\.\d{1,3})?|1)$
+ type: string
+ storageClass:
+ additionalProperties:
+ description: 'Percent is a string that can only be a value between [0,1) (Note: we actually rely on reconcile to reject invalid values)'
+ pattern: ^(0(?:\.\d{1,3})?|1)$
+ type: string
+ description: StorageClass specifies how much space of a Filesystem volume should be reserved for safety. The keys are the storageClass and the values are the overhead. This value overrides the global value
+ type: object
+ type: object
+ importProxy:
+ description: ImportProxy contains importer pod proxy configuration.
+ properties:
+ HTTPProxy:
+ description: HTTPProxy is the URL http://<username>:<pswd>@<ip>:<port> of the import proxy for HTTP requests. Empty means unset and will not result in the import pod env var.
+ type: string
+ HTTPSProxy:
+ description: HTTPSProxy is the URL https://<username>:<pswd>@<ip>:<port> of the import proxy for HTTPS requests. Empty means unset and will not result in the import pod env var.
+ type: string
+ noProxy:
+ description: NoProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in the import pod env var.
+ type: string
+ trustedCAProxy:
+ description: "TrustedCAProxy is the name of a ConfigMap in the cdi namespace that contains a user-provided trusted certificate authority (CA) bundle. The TrustedCAProxy field is consumed by the import controller that is resposible for coping it to a config map named trusted-ca-proxy-bundle-cm in the cdi namespace. Here is an example of the ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: trusted-ca-proxy-bundle-cm namespace: cdi data: ca.pem: | -----BEGIN CERTIFICATE----- \t ... <base64 encoded cert> ... \t -----END CERTIFICATE-----"
+ type: string
+ type: object
+ insecureRegistries:
+ description: InsecureRegistries is a list of TLS disabled registries
+ items:
+ type: string
+ type: array
+ podResourceRequirements:
+ description: ResourceRequirements describes the compute resource requirements.
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+ type: object
+ type: object
+ preallocation:
+ description: Preallocation controls whether storage for DataVolumes should be allocated in advance.
+ type: boolean
+ scratchSpaceStorageClass:
+ description: 'Override the storage class to used for scratch space during transfer operations. The scratch space storage class is determined in the following order: 1. value of scratchSpaceStorageClass, if that doesn''t exist, use the default storage class, if there is no default storage class, use the storage class of the DataVolume, if no storage class specified, use no storage class for scratch space'
+ type: string
+ uploadProxyURLOverride:
+ description: Override the URL used when uploading to a DataVolume
+ type: string
+ type: object
+ imagePullPolicy:
+ description: PullPolicy describes a policy for if/when to pull a container image
+ enum:
+ - Always
+ - IfNotPresent
+ - Never
+ type: string
+ infra:
+ description: Rules on which nodes CDI infrastructure pods will be scheduled
+ properties:
+ affinity:
+ description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector'
+ type: object
+ tolerations:
+ description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones.
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ uninstallStrategy:
+ description: CDIUninstallStrategy defines the state to leave CDI on uninstall
+ enum:
+ - RemoveWorkloads
+ - BlockUninstallIfWorkloadsExist
+ type: string
+ workload:
+ description: Restrict on which nodes CDI workload pods will be scheduled
+ properties:
+ affinity:
+ description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector'
+ type: object
+ tolerations:
+ description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones.
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ status:
+ description: CDIStatus defines the status of the installation
+ properties:
+ conditions:
+ description: A list of current conditions of the resource
+ items:
+ description: Condition represents the state of the operator's reconciliation functionality.
+ properties:
+ lastHeartbeatTime:
+ format: date-time
+ type: string
+ lastTransitionTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ description: ConditionType is the state of the operator's reconciliation functionality.
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ observedVersion:
+ description: The observed version of the resource
+ type: string
+ operatorVersion:
+ description: The version of the resource as defined by the operator
+ type: string
+ phase:
+ description: Phase is the current phase of the deployment
+ type: string
+ targetVersion:
+ description: The desired version of the resource
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources: {}
diff --git a/kud/deployment_infra/helm/cdi-operator/templates/_helpers.tpl b/kud/deployment_infra/helm/cdi-operator/templates/_helpers.tpl
new file mode 100644
index 00000000..ebb48a4d
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/templates/_helpers.tpl
@@ -0,0 +1,63 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cdi-operator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cdi-operator.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cdi-operator.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "cdi-operator.labels" -}}
+helm.sh/chart: {{ include "cdi-operator.chart" . }}
+{{ include "cdi-operator.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "cdi-operator.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "cdi-operator.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "cdi-operator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "cdi-operator.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/cdi-operator/templates/clusterrole.yaml b/kud/deployment_infra/helm/cdi-operator/templates/clusterrole.yaml
new file mode 100644
index 00000000..3f813e58
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/templates/clusterrole.yaml
@@ -0,0 +1,203 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: cdi-operator-cluster
+ labels:
+ {{- include "cdi-operator.labels" . | nindent 4 }}
+ operator.cdi.kubevirt.io: ""
+rules:
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - clusterrolebindings
+ - clusterroles
+ verbs:
+ - '*'
+- apiGroups:
+ - security.openshift.io
+ resources:
+ - securitycontextconstraints
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - create
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - delete
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - '*'
+- apiGroups:
+ - cdi.kubevirt.io
+ - upload.cdi.kubevirt.io
+ resources:
+ - '*'
+ verbs:
+ - '*'
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingwebhookconfigurations
+ - mutatingwebhookconfigurations
+ verbs:
+ - '*'
+- apiGroups:
+ - apiregistration.k8s.io
+ resources:
+ - apiservices
+ verbs:
+ - '*'
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+- apiGroups:
+ - cdi.kubevirt.io
+ resources:
+ - datavolumes
+ verbs:
+ - list
+ - get
+- apiGroups:
+ - cdi.kubevirt.io
+ resources:
+ - cdis
+ verbs:
+ - get
+- apiGroups:
+ - cdi.kubevirt.io
+ resources:
+ - cdis/finalizers
+ verbs:
+ - '*'
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ - persistentvolumeclaims
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims/finalizers
+ - pods/finalizers
+ - volumesnapshots/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+- apiGroups:
+ - extensions
+ resources:
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+- apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - route.openshift.io
+ resources:
+ - routes
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - config.openshift.io
+ resources:
+ - proxies
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - cdi.kubevirt.io
+ resources:
+ - '*'
+ verbs:
+ - '*'
+- apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - '*'
+ verbs:
+ - '*'
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
diff --git a/kud/deployment_infra/helm/cdi-operator/templates/clusterrolebinding.yaml b/kud/deployment_infra/helm/cdi-operator/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..9f6b1c37
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/templates/clusterrolebinding.yaml
@@ -0,0 +1,15 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cdi-operator
+ labels:
+ {{- include "cdi-operator.labels" . | nindent 4 }}
+ operator.cdi.kubevirt.io: ""
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cdi-operator-cluster
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cdi-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
diff --git a/kud/deployment_infra/helm/cdi-operator/templates/configmap.yaml b/kud/deployment_infra/helm/cdi-operator/templates/configmap.yaml
new file mode 100644
index 00000000..b04c3c9a
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/templates/configmap.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: cdi-operator-leader-election-helper
+ labels:
+ {{- include "cdi-operator.labels" . | nindent 4 }}
+ operator.cdi.kubevirt.io: ""
diff --git a/kud/deployment_infra/helm/cdi-operator/templates/deployment.yaml b/kud/deployment_infra/helm/cdi-operator/templates/deployment.yaml
new file mode 100644
index 00000000..3010d6e7
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/templates/deployment.yaml
@@ -0,0 +1,70 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "cdi-operator.fullname" . }}
+ labels:
+ {{- include "cdi-operator.labels" . | nindent 4 }}
+ operator.cdi.kubevirt.io: ""
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ {{- include "cdi-operator.selectorLabels" . | nindent 6 }}
+ operator.cdi.kubevirt.io: ""
+ strategy: {}
+ template:
+ metadata:
+ labels:
+ {{- include "cdi-operator.selectorLabels" . | nindent 8 }}
+ operator.cdi.kubevirt.io: ""
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "cdi-operator.serviceAccountName" . }}
+ containers:
+ - name: cdi-operator
+ image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: DEPLOY_CLUSTER_RESOURCES
+ value: "true"
+ - name: OPERATOR_VERSION
+ value: {{ .Values.image.tag | default .Chart.AppVersion }}
+ - name: CONTROLLER_IMAGE
+ value: {{ .Values.controllerImage.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
+ - name: IMPORTER_IMAGE
+ value: {{ .Values.importerImage.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
+ - name: CLONER_IMAGE
+ value: {{ .Values.clonerImage.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
+ - name: APISERVER_IMAGE
+ value: {{ .Values.apiserverImage.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
+ - name: UPLOAD_SERVER_IMAGE
+ value: {{ .Values.uploadServerImage.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
+ - name: UPLOAD_PROXY_IMAGE
+ value: {{ .Values.uploadProxyImage.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
+ - name: VERBOSITY
+ value: "1"
+ - name: PULL_POLICY
+ value: {{ .Values.image.pullPolicy }}
+ ports:
+ - containerPort: 60000
+ name: metrics
+ protocol: TCP
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 8 }}
diff --git a/kud/deployment_infra/helm/cdi-operator/templates/role.yaml b/kud/deployment_infra/helm/cdi-operator/templates/role.yaml
new file mode 100644
index 00000000..6dc2cafb
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/templates/role.yaml
@@ -0,0 +1,47 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: cdi-operator
+ labels:
+ {{- include "cdi-operator.labels" . | nindent 4 }}
+ cdi.kubevirt.io: ""
+rules:
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - rolebindings
+ - roles
+ verbs:
+ - '*'
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ - configmaps
+ - events
+ - secrets
+ - services
+ verbs:
+ - '*'
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ - deployments/finalizers
+ verbs:
+ - '*'
+- apiGroups:
+ - route.openshift.io
+ resources:
+ - routes
+ - routes/custom-host
+ verbs:
+ - '*'
+- apiGroups:
+ - config.openshift.io
+ resources:
+ - proxies
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/kud/deployment_infra/helm/cdi-operator/templates/rolebinding.yaml b/kud/deployment_infra/helm/cdi-operator/templates/rolebinding.yaml
new file mode 100644
index 00000000..51122838
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/templates/rolebinding.yaml
@@ -0,0 +1,15 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: cdi-operator
+ labels:
+ {{- include "cdi-operator.labels" . | nindent 4 }}
+ cdi.kubevirt.io: ""
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: cdi-operator
+subjects:
+- kind: ServiceAccount
+ name: {{ include "cdi-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
diff --git a/kud/deployment_infra/helm/cdi-operator/templates/serviceaccount.yaml b/kud/deployment_infra/helm/cdi-operator/templates/serviceaccount.yaml
new file mode 100644
index 00000000..bb328dd6
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "cdi-operator.serviceAccountName" . }}
+ labels:
+ {{- include "cdi-operator.labels" . | nindent 4 }}
+ operator.cdi.kubevirt.io: ""
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+
diff --git a/kud/deployment_infra/helm/cdi-operator/values.yaml b/kud/deployment_infra/helm/cdi-operator/values.yaml
new file mode 100644
index 00000000..2e1813d9
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi-operator/values.yaml
@@ -0,0 +1,57 @@
+image:
+ repository: quay.io/kubevirt/cdi-operator
+ # This should be set to 'IfNotPresent' for released version
+ pullPolicy: IfNotPresent
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+ # tag
+imagePullSecrets: []
+
+controllerImage:
+ repository : quay.io/kubevirt/cdi-controller
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+importerImage:
+ repository: quay.io/kubevirt/cdi-importer
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+clonerImage:
+ repository: quay.io/kubevirt/cdi-cloner
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+apiserverImage:
+ repository: quay.io/kubevirt/cdi-apiserver
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+uploadServerImage:
+ repository: quay.io/kubevirt/cdi-uploadserver
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+uploadProxyImage:
+ repository: quay.io/kubevirt/cdi-uploadproxy
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: cdi-operator
+
+nameOverride: ""
+fullnameOverride: ""
+
+resources: {}
+
+securityContext:
+ runAsNonRoot: true
+
+nodeSelector:
+ kubernetes.io/os: linux
+
+affinity: {}
+
+tolerations:
+- key: CriticalAddonsOnly
+ operator: Exists
diff --git a/kud/deployment_infra/helm/cdi/Chart.yaml b/kud/deployment_infra/helm/cdi/Chart.yaml
new file mode 100644
index 00000000..cca673de
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi/Chart.yaml
@@ -0,0 +1,26 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: v1.34.1
+description: |
+ Containerized-Data-Importer (CDI) is a persistent storage management
+ add-on for Kubernetes. It's primary goal is to provide a declarative
+ way to build Virtual Machine Disks on PVCs for Kubevirt VMs.
+name: cdi
+sources:
+ - https://github.com/kubevirt/containerized-data-importer
+home: https://github.com/kubevirt/containerized-data-importer
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/cdi/templates/_helpers.tpl b/kud/deployment_infra/helm/cdi/templates/_helpers.tpl
new file mode 100644
index 00000000..080f7b56
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi/templates/_helpers.tpl
@@ -0,0 +1,43 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cdi.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cdi.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cdi.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "cdi.labels" -}}
+helm.sh/chart: {{ include "cdi.chart" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/cdi/templates/cdi.yaml b/kud/deployment_infra/helm/cdi/templates/cdi.yaml
new file mode 100644
index 00000000..25fee9d5
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi/templates/cdi.yaml
@@ -0,0 +1,29 @@
+apiVersion: cdi.kubevirt.io/v1beta1
+kind: CDI
+metadata:
+ name: {{ include "cdi.fullname" . }}
+ labels:
+ {{- include "cdi.labels" . | nindent 4 }}
+spec:
+ {{- with .Values.certConfig }}
+ certConfig:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.cloneStrategyOverride }}
+ cloneStrategyOverride: {{ . }}
+ {{- end }}
+ {{- with .Values.config }}
+ config:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.imagePullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ {{- with .Values.infra }}
+ infra:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.workload }}
+ workload:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/cdi/values.yaml b/kud/deployment_infra/helm/cdi/values.yaml
new file mode 100644
index 00000000..31bc3247
--- /dev/null
+++ b/kud/deployment_infra/helm/cdi/values.yaml
@@ -0,0 +1,34 @@
+nameOverride: ""
+fullnameOverride: ""
+
+# certConfig is the certificate configuration.
+#certConfig: {}
+
+# cloneStrategyOverride: should we use a host-assisted copy even if
+# snapshots are available?
+#cloneStrategyOverride: ""
+
+# config is the CDIConfig at CDI level.
+#config: {}
+
+# imagePullPolicy describes a policy for if/when to pull a container
+# image.
+imagePullPolicy: IfNotPresent
+
+# infra contains the rules on which nodes CDI infrastructure pods will
+# be scheduled.
+infra:
+ nodeSelector:
+ kubernetes.io/os: linux
+ tolerations:
+ - key: CriticalAddonsOnly
+ operator: Exists
+
+# uninstallStrategy defines the state to leave CDI on uninstall.
+#uninstallStrategy: ""
+
+# workload restricts on which nodes CDI workload pods will be
+# scheduled.
+workload:
+ nodeSelector:
+ kubernetes.io/os: linux
diff --git a/kud/deployment_infra/helm/kubevirt-operator/.helmignore b/kud/deployment_infra/helm/kubevirt-operator/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/kubevirt-operator/Chart.yaml b/kud/deployment_infra/helm/kubevirt-operator/Chart.yaml
new file mode 100644
index 00000000..d515ea83
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/Chart.yaml
@@ -0,0 +1,24 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: v0.41.0
+description: |
+ KubeVirt is a virtual machine management add-on for Kubernetes.
+name: kubevirt-operator
+sources:
+ - https://github.com/kubevirt/kubevirt
+home: https://github.com/kubevirt/kubevirt
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/kubevirt-operator/crds/kubevirt.yaml b/kud/deployment_infra/helm/kubevirt-operator/crds/kubevirt.yaml
new file mode 100644
index 00000000..bdba1fac
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/crds/kubevirt.yaml
@@ -0,0 +1,2285 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ labels:
+ operator.kubevirt.io: ""
+ name: kubevirts.kubevirt.io
+spec:
+ group: kubevirt.io
+ names:
+ categories:
+ - all
+ kind: KubeVirt
+ plural: kubevirts
+ shortNames:
+ - kv
+ - kvs
+ singular: kubevirt
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: KubeVirt represents the object deploying all KubeVirt resources
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ certificateRotateStrategy:
+ properties:
+ selfSigned:
+ properties:
+ ca:
+ description: CA configuration CA certs are kept in the CA bundle as long as they are valid
+ properties:
+ duration:
+ description: The requested 'duration' (i.e. lifetime) of the Certificate.
+ type: string
+ renewBefore:
+ description: The amount of time before the currently issued certificate's "notAfter" time that we will begin to attempt to renew the certificate.
+ type: string
+ type: object
+ caOverlapInterval:
+ description: Deprecated. Use CA.Duration and CA.RenewBefore instead
+ type: string
+ caRotateInterval:
+ description: Deprecated. Use CA.Duration instead
+ type: string
+ certRotateInterval:
+ description: Deprecated. Use Server.Duration instead
+ type: string
+ server:
+ description: Server configuration Certs are rotated and discarded
+ properties:
+ duration:
+ description: The requested 'duration' (i.e. lifetime) of the Certificate.
+ type: string
+ renewBefore:
+ description: The amount of time before the currently issued certificate's "notAfter" time that we will begin to attempt to renew the certificate.
+ type: string
+ type: object
+ type: object
+ type: object
+ configuration:
+ description: holds kubevirt configurations. same as the virt-configMap
+ properties:
+ cpuModel:
+ type: string
+ cpuRequest:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ developerConfiguration:
+ description: DeveloperConfiguration holds developer options
+ properties:
+ cpuAllocationRatio:
+ type: integer
+ featureGates:
+ items:
+ type: string
+ type: array
+ logVerbosity:
+ description: LogVerbosity sets log verbosity level of various components
+ properties:
+ nodeVerbosity:
+ additionalProperties:
+ type: integer
+ description: NodeVerbosity represents a map of nodes with a specific verbosity level
+ type: object
+ virtAPI:
+ type: integer
+ virtController:
+ type: integer
+ virtHandler:
+ type: integer
+ virtLauncher:
+ type: integer
+ virtOperator:
+ type: integer
+ type: object
+ memoryOvercommit:
+ type: integer
+ nodeSelectors:
+ additionalProperties:
+ type: string
+ type: object
+ pvcTolerateLessSpaceUpToPercent:
+ type: integer
+ useEmulation:
+ type: boolean
+ type: object
+ emulatedMachines:
+ items:
+ type: string
+ type: array
+ imagePullPolicy:
+ description: PullPolicy describes a policy for if/when to pull a container image
+ type: string
+ machineType:
+ type: string
+ memBalloonStatsPeriod:
+ format: int32
+ type: integer
+ migrations:
+ description: MigrationConfiguration holds migration options
+ properties:
+ allowAutoConverge:
+ type: boolean
+ allowPostCopy:
+ type: boolean
+ bandwidthPerMigration:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ completionTimeoutPerGiB:
+ format: int64
+ type: integer
+ nodeDrainTaintKey:
+ type: string
+ parallelMigrationsPerCluster:
+ format: int32
+ type: integer
+ parallelOutboundMigrationsPerNode:
+ format: int32
+ type: integer
+ progressTimeout:
+ format: int64
+ type: integer
+ unsafeMigrationOverride:
+ type: boolean
+ type: object
+ minCPUModel:
+ type: string
+ network:
+ description: NetworkConfiguration holds network options
+ properties:
+ defaultNetworkInterface:
+ type: string
+ permitBridgeInterfaceOnPodNetwork:
+ type: boolean
+ permitSlirpInterface:
+ type: boolean
+ type: object
+ obsoleteCPUModels:
+ additionalProperties:
+ type: boolean
+ type: object
+ ovmfPath:
+ type: string
+ permittedHostDevices:
+ description: PermittedHostDevices holds inforamtion about devices allowed for passthrough
+ properties:
+ mediatedDevices:
+ items:
+ description: MediatedHostDevice represents a host mediated device allowed for passthrough
+ properties:
+ externalResourceProvider:
+ type: boolean
+ mdevNameSelector:
+ type: string
+ resourceName:
+ type: string
+ required:
+ - mdevNameSelector
+ - resourceName
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ pciHostDevices:
+ items:
+ description: PciHostDevice represents a host PCI device allowed for passthrough
+ properties:
+ externalResourceProvider:
+ type: boolean
+ pciVendorSelector:
+ type: string
+ resourceName:
+ type: string
+ required:
+ - pciVendorSelector
+ - resourceName
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ selinuxLauncherType:
+ type: string
+ smbios:
+ properties:
+ family:
+ type: string
+ manufacturer:
+ type: string
+ product:
+ type: string
+ sku:
+ type: string
+ version:
+ type: string
+ type: object
+ supportedGuestAgentVersions:
+ description: deprecated
+ items:
+ type: string
+ type: array
+ type: object
+ customizeComponents:
+ properties:
+ patches:
+ items:
+ properties:
+ patch:
+ type: string
+ resourceName:
+ minLength: 1
+ type: string
+ resourceType:
+ minLength: 1
+ type: string
+ type:
+ type: string
+ required:
+ - patch
+ - resourceName
+ - resourceType
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ imagePullPolicy:
+ description: The ImagePullPolicy to use.
+ type: string
+ imageRegistry:
+ description: The image registry to pull the container images from Defaults to the same registry the operator's container image is pulled from.
+ type: string
+ imageTag:
+ description: The image tag to use for the continer images installed. Defaults to the same tag as the operator's container image.
+ type: string
+ infra:
+ description: selectors and tolerations that should apply to KubeVirt infrastructure components
+ properties:
+ nodePlacement:
+ description: nodePlacement decsribes scheduling confiuguration for specific KubeVirt components
+ properties:
+ affinity:
+ description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector'
+ type: object
+ tolerations:
+ description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones.
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ monitorAccount:
+ description: The name of the Prometheus service account that needs read-access to KubeVirt endpoints Defaults to prometheus-k8s
+ type: string
+ monitorNamespace:
+ description: The namespace Prometheus is deployed in Defaults to openshift-monitor
+ type: string
+ productName:
+ description: Designate the apps.kubevirt.io/part-of label for KubeVirt components. Useful if KubeVirt is included as part of a product. If ProductName is not specified, the part-of label will be omitted.
+ type: string
+ productVersion:
+ description: Designate the apps.kubevirt.io/version label for KubeVirt components. Useful if KubeVirt is included as part of a product. If ProductVersion is not specified, KubeVirt's version will be used.
+ type: string
+ uninstallStrategy:
+ description: Specifies if kubevirt can be deleted if workloads are still present. This is mainly a precaution to avoid accidental data loss
+ type: string
+ workloadUpdateStrategy:
+ description: WorkloadUpdateStrategy defines at the cluster level how to handle automated workload updates
+ properties:
+ batchEvictionInterval:
+ description: "BatchEvictionInterval Represents the interval to wait before issuing the next batch of shutdowns \n Defaults to 1 minute"
+ type: string
+ batchEvictionSize:
+ description: "BatchEvictionSize Represents the number of VMIs that can be forced updated per the BatchShutdownInteral interval \n Defaults to 10"
+ type: integer
+ workloadUpdateMethods:
+ description: "WorkloadUpdateMethods defines the methods that can be used to disrupt workloads during automated workload updates. When multiple methods are present, the least disruptive method takes precedence over more disruptive methods. For example if both LiveMigrate and Shutdown methods are listed, only VMs which are not live migratable will be restarted/shutdown \n An empty list defaults to no automated workload updating"
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ workloads:
+ description: selectors and tolerations that should apply to KubeVirt workloads
+ properties:
+ nodePlacement:
+ description: nodePlacement decsribes scheduling confiuguration for specific KubeVirt components
+ properties:
+ affinity:
+ description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector'
+ type: object
+ tolerations:
+ description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones.
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ type: object
+ status:
+ description: KubeVirtStatus represents information pertaining to a KubeVirt deployment.
+ properties:
+ conditions:
+ items:
+ description: KubeVirtCondition represents a condition of a KubeVirt deployment
+ properties:
+ lastProbeTime:
+ format: date-time
+ nullable: true
+ type: string
+ lastTransitionTime:
+ format: date-time
+ nullable: true
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ generations:
+ items:
+ description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.
+ properties:
+ group:
+ description: group is the group of the thing you're tracking
+ type: string
+ hash:
+ description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps
+ type: string
+ lastGeneration:
+ description: lastGeneration is the last generation of the workload controller involved
+ format: int64
+ type: integer
+ name:
+ description: name is the name of the thing you're tracking
+ type: string
+ namespace:
+ description: namespace is where the thing you're tracking is
+ type: string
+ resource:
+ description: resource is the resource type of the thing you're tracking
+ type: string
+ required:
+ - group
+ - lastGeneration
+ - name
+ - resource
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ observedDeploymentConfig:
+ type: string
+ observedDeploymentID:
+ type: string
+ observedKubeVirtRegistry:
+ type: string
+ observedKubeVirtVersion:
+ type: string
+ operatorVersion:
+ type: string
+ outdatedVirtualMachineInstanceWorkloads:
+ type: integer
+ phase:
+ description: KubeVirtPhase is a label for the phase of a KubeVirt deployment at the current time.
+ type: string
+ targetDeploymentConfig:
+ type: string
+ targetDeploymentID:
+ type: string
+ targetKubeVirtRegistry:
+ type: string
+ targetKubeVirtVersion:
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ name: v1alpha3
+ schema:
+ openAPIV3Schema:
+ description: KubeVirt represents the object deploying all KubeVirt resources
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ certificateRotateStrategy:
+ properties:
+ selfSigned:
+ properties:
+ ca:
+ description: CA configuration CA certs are kept in the CA bundle as long as they are valid
+ properties:
+ duration:
+ description: The requested 'duration' (i.e. lifetime) of the Certificate.
+ type: string
+ renewBefore:
+ description: The amount of time before the currently issued certificate's "notAfter" time that we will begin to attempt to renew the certificate.
+ type: string
+ type: object
+ caOverlapInterval:
+ description: Deprecated. Use CA.Duration and CA.RenewBefore instead
+ type: string
+ caRotateInterval:
+ description: Deprecated. Use CA.Duration instead
+ type: string
+ certRotateInterval:
+ description: Deprecated. Use Server.Duration instead
+ type: string
+ server:
+ description: Server configuration Certs are rotated and discarded
+ properties:
+ duration:
+ description: The requested 'duration' (i.e. lifetime) of the Certificate.
+ type: string
+ renewBefore:
+ description: The amount of time before the currently issued certificate's "notAfter" time that we will begin to attempt to renew the certificate.
+ type: string
+ type: object
+ type: object
+ type: object
+ configuration:
+ description: holds kubevirt configurations. same as the virt-configMap
+ properties:
+ cpuModel:
+ type: string
+ cpuRequest:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ developerConfiguration:
+ description: DeveloperConfiguration holds developer options
+ properties:
+ cpuAllocationRatio:
+ type: integer
+ featureGates:
+ items:
+ type: string
+ type: array
+ logVerbosity:
+ description: LogVerbosity sets log verbosity level of various components
+ properties:
+ nodeVerbosity:
+ additionalProperties:
+ type: integer
+ description: NodeVerbosity represents a map of nodes with a specific verbosity level
+ type: object
+ virtAPI:
+ type: integer
+ virtController:
+ type: integer
+ virtHandler:
+ type: integer
+ virtLauncher:
+ type: integer
+ virtOperator:
+ type: integer
+ type: object
+ memoryOvercommit:
+ type: integer
+ nodeSelectors:
+ additionalProperties:
+ type: string
+ type: object
+ pvcTolerateLessSpaceUpToPercent:
+ type: integer
+ useEmulation:
+ type: boolean
+ type: object
+ emulatedMachines:
+ items:
+ type: string
+ type: array
+ imagePullPolicy:
+ description: PullPolicy describes a policy for if/when to pull a container image
+ type: string
+ machineType:
+ type: string
+ memBalloonStatsPeriod:
+ format: int32
+ type: integer
+ migrations:
+ description: MigrationConfiguration holds migration options
+ properties:
+ allowAutoConverge:
+ type: boolean
+ allowPostCopy:
+ type: boolean
+ bandwidthPerMigration:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ completionTimeoutPerGiB:
+ format: int64
+ type: integer
+ nodeDrainTaintKey:
+ type: string
+ parallelMigrationsPerCluster:
+ format: int32
+ type: integer
+ parallelOutboundMigrationsPerNode:
+ format: int32
+ type: integer
+ progressTimeout:
+ format: int64
+ type: integer
+ unsafeMigrationOverride:
+ type: boolean
+ type: object
+ minCPUModel:
+ type: string
+ network:
+ description: NetworkConfiguration holds network options
+ properties:
+ defaultNetworkInterface:
+ type: string
+ permitBridgeInterfaceOnPodNetwork:
+ type: boolean
+ permitSlirpInterface:
+ type: boolean
+ type: object
+ obsoleteCPUModels:
+ additionalProperties:
+ type: boolean
+ type: object
+ ovmfPath:
+ type: string
+ permittedHostDevices:
+ description: PermittedHostDevices holds inforamtion about devices allowed for passthrough
+ properties:
+ mediatedDevices:
+ items:
+ description: MediatedHostDevice represents a host mediated device allowed for passthrough
+ properties:
+ externalResourceProvider:
+ type: boolean
+ mdevNameSelector:
+ type: string
+ resourceName:
+ type: string
+ required:
+ - mdevNameSelector
+ - resourceName
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ pciHostDevices:
+ items:
+ description: PciHostDevice represents a host PCI device allowed for passthrough
+ properties:
+ externalResourceProvider:
+ type: boolean
+ pciVendorSelector:
+ type: string
+ resourceName:
+ type: string
+ required:
+ - pciVendorSelector
+ - resourceName
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ selinuxLauncherType:
+ type: string
+ smbios:
+ properties:
+ family:
+ type: string
+ manufacturer:
+ type: string
+ product:
+ type: string
+ sku:
+ type: string
+ version:
+ type: string
+ type: object
+ supportedGuestAgentVersions:
+ description: deprecated
+ items:
+ type: string
+ type: array
+ type: object
+ customizeComponents:
+ properties:
+ patches:
+ items:
+ properties:
+ patch:
+ type: string
+ resourceName:
+ minLength: 1
+ type: string
+ resourceType:
+ minLength: 1
+ type: string
+ type:
+ type: string
+ required:
+ - patch
+ - resourceName
+ - resourceType
+ - type
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ imagePullPolicy:
+ description: The ImagePullPolicy to use.
+ type: string
+ imageRegistry:
+ description: The image registry to pull the container images from Defaults to the same registry the operator's container image is pulled from.
+ type: string
+ imageTag:
+ description: The image tag to use for the continer images installed. Defaults to the same tag as the operator's container image.
+ type: string
+ infra:
+ description: selectors and tolerations that should apply to KubeVirt infrastructure components
+ properties:
+ nodePlacement:
+ description: nodePlacement decsribes scheduling confiuguration for specific KubeVirt components
+ properties:
+ affinity:
+ description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector'
+ type: object
+ tolerations:
+ description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones.
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ monitorAccount:
+ description: The name of the Prometheus service account that needs read-access to KubeVirt endpoints Defaults to prometheus-k8s
+ type: string
+ monitorNamespace:
+ description: The namespace Prometheus is deployed in Defaults to openshift-monitor
+ type: string
+ productName:
+ description: Designate the apps.kubevirt.io/part-of label for KubeVirt components. Useful if KubeVirt is included as part of a product. If ProductName is not specified, the part-of label will be omitted.
+ type: string
+ productVersion:
+ description: Designate the apps.kubevirt.io/version label for KubeVirt components. Useful if KubeVirt is included as part of a product. If ProductVersion is not specified, KubeVirt's version will be used.
+ type: string
+ uninstallStrategy:
+ description: Specifies if kubevirt can be deleted if workloads are still present. This is mainly a precaution to avoid accidental data loss
+ type: string
+ workloadUpdateStrategy:
+ description: WorkloadUpdateStrategy defines at the cluster level how to handle automated workload updates
+ properties:
+ batchEvictionInterval:
+ description: "BatchEvictionInterval Represents the interval to wait before issuing the next batch of shutdowns \n Defaults to 1 minute"
+ type: string
+ batchEvictionSize:
+ description: "BatchEvictionSize Represents the number of VMIs that can be forced updated per the BatchShutdownInteral interval \n Defaults to 10"
+ type: integer
+ workloadUpdateMethods:
+ description: "WorkloadUpdateMethods defines the methods that can be used to disrupt workloads during automated workload updates. When multiple methods are present, the least disruptive method takes precedence over more disruptive methods. For example if both LiveMigrate and Shutdown methods are listed, only VMs which are not live migratable will be restarted/shutdown \n An empty list defaults to no automated workload updating"
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ workloads:
+ description: selectors and tolerations that should apply to KubeVirt workloads
+ properties:
+ nodePlacement:
+ description: nodePlacement decsribes scheduling confiuguration for specific KubeVirt components
+ properties:
+ affinity:
+ description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
+ items:
+ description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ weight:
+ description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms. The terms are ORed.
+ items:
+ description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements by node's labels.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements by node's fields.
+ items:
+ description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector applies to.
+ type: string
+ operator:
+ description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key <topologyKey> matches that of any node on which a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: A label query over a set of resources, in this case pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector applies to.
+ type: string
+ operator:
+ description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ namespaces:
+ description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector'
+ type: object
+ tolerations:
+ description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones.
+ items:
+ description: The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
+ properties:
+ effect:
+ description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ type: object
+ type: object
+ type: object
+ status:
+ description: KubeVirtStatus represents information pertaining to a KubeVirt deployment.
+ properties:
+ conditions:
+ items:
+ description: KubeVirtCondition represents a condition of a KubeVirt deployment
+ properties:
+ lastProbeTime:
+ format: date-time
+ nullable: true
+ type: string
+ lastTransitionTime:
+ format: date-time
+ nullable: true
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - status
+ - type
+ type: object
+ type: array
+ generations:
+ items:
+ description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.
+ properties:
+ group:
+ description: group is the group of the thing you're tracking
+ type: string
+ hash:
+ description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps
+ type: string
+ lastGeneration:
+ description: lastGeneration is the last generation of the workload controller involved
+ format: int64
+ type: integer
+ name:
+ description: name is the name of the thing you're tracking
+ type: string
+ namespace:
+ description: namespace is where the thing you're tracking is
+ type: string
+ resource:
+ description: resource is the resource type of the thing you're tracking
+ type: string
+ required:
+ - group
+ - lastGeneration
+ - name
+ - resource
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ observedDeploymentConfig:
+ type: string
+ observedDeploymentID:
+ type: string
+ observedKubeVirtRegistry:
+ type: string
+ observedKubeVirtVersion:
+ type: string
+ operatorVersion:
+ type: string
+ outdatedVirtualMachineInstanceWorkloads:
+ type: integer
+ phase:
+ description: KubeVirtPhase is a label for the phase of a KubeVirt deployment at the current time.
+ type: string
+ targetDeploymentConfig:
+ type: string
+ targetDeploymentID:
+ type: string
+ targetKubeVirtRegistry:
+ type: string
+ targetKubeVirtVersion:
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/kud/deployment_infra/helm/kubevirt-operator/templates/_helpers.tpl b/kud/deployment_infra/helm/kubevirt-operator/templates/_helpers.tpl
new file mode 100644
index 00000000..369224de
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/templates/_helpers.tpl
@@ -0,0 +1,70 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "kubevirt-operator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "kubevirt-operator.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "kubevirt-operator.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "kubevirt-operator.labels" -}}
+helm.sh/chart: {{ include "kubevirt-operator.chart" . }}
+{{ include "kubevirt-operator.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "kubevirt-operator.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "kubevirt-operator.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "kubevirt-operator.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "kubevirt-operator.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create the name of the priority class to use
+*/}}
+{{- define "kubevirt-operator.priorityClassName" -}}
+{{ default (include "kubevirt-operator.fullname" .) .Values.priorityClass.name }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/kubevirt-operator/templates/clusterrole.yaml b/kud/deployment_infra/helm/kubevirt-operator/templates/clusterrole.yaml
new file mode 100644
index 00000000..ae8faf3b
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/templates/clusterrole.yaml
@@ -0,0 +1,668 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: kubevirt.io:operator
+ labels:
+ {{- include "kubevirt-operator.labels" . | nindent 4 }}
+ operator.kubevirt.io: ""
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+rules:
+ - apiGroups:
+ - kubevirt.io
+ resources:
+ - kubevirts
+ verbs:
+ - get
+ - delete
+ - create
+ - update
+ - patch
+ - list
+ - watch
+ - deletecollection
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: kubevirt-operator
+ labels:
+ {{- include "kubevirt-operator.labels" . | nindent 4 }}
+ kubevirt.io: ""
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - get
+ - update
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - kubevirts
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ - services
+ - endpoints
+ - pods/exec
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - patch
+ - delete
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ - daemonsets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - clusterroles
+ - clusterrolebindings
+ - roles
+ - rolebindings
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+ - update
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - patch
+- apiGroups:
+ - security.openshift.io
+ resources:
+ - securitycontextconstraints
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+- apiGroups:
+ - security.openshift.io
+ resourceNames:
+ - privileged
+ resources:
+ - securitycontextconstraints
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - security.openshift.io
+ resourceNames:
+ - kubevirt-handler
+ - kubevirt-controller
+ resources:
+ - securitycontextconstraints
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - delete
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingwebhookconfigurations
+ - mutatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - update
+ - patch
+- apiGroups:
+ - apiregistration.k8s.io
+ resources:
+ - apiservices
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - update
+ - patch
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - servicemonitors
+ - prometheusrules
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - delete
+ - update
+ - patch
+- apiGroups:
+ - subresources.kubevirt.io
+ resources:
+ - virtualmachines/start
+ - virtualmachines/stop
+ - virtualmachines/restart
+ verbs:
+ - put
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - delete
+ - patch
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachines
+ - virtualmachineinstances
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachines/status
+ verbs:
+ - patch
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachineinstancemigrations
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - patch
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachineinstancepresets
+ verbs:
+ - watch
+ - list
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - limitranges
+ verbs:
+ - watch
+ - list
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - kubevirts
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - snapshot.kubevirt.io
+ resources:
+ - virtualmachinesnapshots
+ - virtualmachinerestores
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - watch
+ - delete
+ - create
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - configmaps
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - delete
+ - update
+ - create
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - update
+ - create
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - pods/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - pods/eviction
+ verbs:
+ - create
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+- apiGroups:
+ - apps
+ resources:
+ - daemonsets
+ verbs:
+ - list
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+ - patch
+- apiGroups:
+ - snapshot.kubevirt.io
+ resources:
+ - '*'
+ verbs:
+ - '*'
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - '*'
+ verbs:
+ - '*'
+- apiGroups:
+ - subresources.kubevirt.io
+ resources:
+ - virtualmachineinstances/addvolume
+ - virtualmachineinstances/removevolume
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - cdi.kubevirt.io
+ resources:
+ - '*'
+ verbs:
+ - '*'
+- apiGroups:
+ - k8s.cni.cncf.io
+ resources:
+ - network-attachment-definitions
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+- apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshotclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - delete
+- apiGroups:
+ - storage.k8s.io
+ resources:
+ - storageclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachineinstances
+ verbs:
+ - update
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - patch
+ - list
+ - watch
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - kubevirts
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - subresources.kubevirt.io
+ resources:
+ - version
+ verbs:
+ - get
+ - list
+- apiGroups:
+ - subresources.kubevirt.io
+ resources:
+ - virtualmachineinstances/console
+ - virtualmachineinstances/vnc
+ verbs:
+ - get
+- apiGroups:
+ - subresources.kubevirt.io
+ resources:
+ - virtualmachineinstances/pause
+ - virtualmachineinstances/unpause
+ - virtualmachineinstances/addvolume
+ - virtualmachineinstances/removevolume
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - subresources.kubevirt.io
+ resources:
+ - virtualmachines/start
+ - virtualmachines/stop
+ - virtualmachines/restart
+ verbs:
+ - update
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachines
+ - virtualmachineinstances
+ - virtualmachineinstancepresets
+ - virtualmachineinstancereplicasets
+ - virtualmachineinstancemigrations
+ verbs:
+ - get
+ - delete
+ - create
+ - update
+ - patch
+ - list
+ - watch
+ - deletecollection
+- apiGroups:
+ - snapshot.kubevirt.io
+ resources:
+ - virtualmachinesnapshots
+ - virtualmachinesnapshotcontents
+ - virtualmachinerestores
+ verbs:
+ - get
+ - delete
+ - create
+ - update
+ - patch
+ - list
+ - watch
+ - deletecollection
+- apiGroups:
+ - subresources.kubevirt.io
+ resources:
+ - virtualmachineinstances/console
+ - virtualmachineinstances/vnc
+ verbs:
+ - get
+- apiGroups:
+ - subresources.kubevirt.io
+ resources:
+ - virtualmachineinstances/pause
+ - virtualmachineinstances/unpause
+ - virtualmachineinstances/addvolume
+ - virtualmachineinstances/removevolume
+ verbs:
+ - get
+ - update
+- apiGroups:
+ - subresources.kubevirt.io
+ resources:
+ - virtualmachines/start
+ - virtualmachines/stop
+ - virtualmachines/restart
+ verbs:
+ - update
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachines
+ - virtualmachineinstances
+ - virtualmachineinstancepresets
+ - virtualmachineinstancereplicasets
+ - virtualmachineinstancemigrations
+ verbs:
+ - get
+ - delete
+ - create
+ - update
+ - patch
+ - list
+ - watch
+- apiGroups:
+ - snapshot.kubevirt.io
+ resources:
+ - virtualmachinesnapshots
+ - virtualmachinesnapshotcontents
+ - virtualmachinerestores
+ verbs:
+ - get
+ - delete
+ - create
+ - update
+ - patch
+ - list
+ - watch
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - kubevirts
+ verbs:
+ - get
+ - list
+- apiGroups:
+ - kubevirt.io
+ resources:
+ - virtualmachines
+ - virtualmachineinstances
+ - virtualmachineinstancepresets
+ - virtualmachineinstancereplicasets
+ - virtualmachineinstancemigrations
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - snapshot.kubevirt.io
+ resources:
+ - virtualmachinesnapshots
+ - virtualmachinesnapshotcontents
+ - virtualmachinerestores
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
diff --git a/kud/deployment_infra/helm/kubevirt-operator/templates/clusterrolebinding.yaml b/kud/deployment_infra/helm/kubevirt-operator/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..a1e5a642
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/templates/clusterrolebinding.yaml
@@ -0,0 +1,15 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: kubevirt-operator
+ labels:
+ {{- include "kubevirt-operator.labels" . | nindent 4 }}
+ kubevirt.io: ""
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kubevirt-operator
+subjects:
+- kind: ServiceAccount
+ name: {{ include "kubevirt-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
diff --git a/kud/deployment_infra/helm/kubevirt-operator/templates/deployment.yaml b/kud/deployment_infra/helm/kubevirt-operator/templates/deployment.yaml
new file mode 100644
index 00000000..1cd64725
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/templates/deployment.yaml
@@ -0,0 +1,86 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "kubevirt-operator.fullname" . }}
+ labels:
+ {{- include "kubevirt-operator.labels" . | nindent 4 }}
+ kubevirt.io: virt-operator
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ {{- include "kubevirt-operator.selectorLabels" . | nindent 6 }}
+ kubevirt.io: virt-operator
+ strategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ""
+ labels:
+ {{- include "kubevirt-operator.selectorLabels" . | nindent 8 }}
+ kubevirt.io: virt-operator
+ prometheus.kubevirt.io: ""
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "kubevirt-operator.serviceAccountName" . }}
+ containers:
+ - name: virt-operator
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command:
+ - virt-operator
+ - --port
+ - "8443"
+ - -v
+ - "2"
+ env:
+ - name: OPERATOR_IMAGE
+ value: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
+ - name: WATCH_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.annotations['olm.targetNamespaces']
+ ports:
+ - containerPort: 8443
+ name: metrics
+ protocol: TCP
+ - containerPort: 8444
+ name: webhooks
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /metrics
+ port: 8443
+ scheme: HTTPS
+ initialDelaySeconds: 5
+ timeoutSeconds: 10
+ resources:
+ {{- toYaml .Values.resources | nindent 10 }}
+ volumeMounts:
+ - mountPath: /etc/virt-operator/certificates
+ name: kubevirt-operator-certs
+ readOnly: true
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- toYaml . | nindent 6 }}
+ {{- end }}
+ priorityClassName: {{ include "kubevirt-operator.priorityClassName" . }}
+ securityContext:
+ {{- toYaml .Values.securityContext | nindent 8 }}
+ volumes:
+ - name: kubevirt-operator-certs
+ secret:
+ optional: true
+ secretName: kubevirt-operator-certs
diff --git a/kud/deployment_infra/helm/kubevirt-operator/templates/priorityclass.yaml b/kud/deployment_infra/helm/kubevirt-operator/templates/priorityclass.yaml
new file mode 100644
index 00000000..c3e533dd
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/templates/priorityclass.yaml
@@ -0,0 +1,7 @@
+apiVersion: scheduling.k8s.io/v1
+kind: PriorityClass
+metadata:
+ name: {{ include "kubevirt-operator.priorityClassName" . }}
+value: 1000000000
+globalDefault: false
+description: "This priority class should be used for core kubevirt components only."
diff --git a/kud/deployment_infra/helm/kubevirt-operator/templates/role.yaml b/kud/deployment_infra/helm/kubevirt-operator/templates/role.yaml
new file mode 100644
index 00000000..42aa2d75
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/templates/role.yaml
@@ -0,0 +1,30 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: kubevirt-operator
+ labels:
+ {{- include "kubevirt-operator.labels" . | nindent 4 }}
+ kubevirt.io: ""
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - patch
+ - delete
diff --git a/kud/deployment_infra/helm/kubevirt-operator/templates/rolebinding.yaml b/kud/deployment_infra/helm/kubevirt-operator/templates/rolebinding.yaml
new file mode 100644
index 00000000..89489f9a
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/templates/rolebinding.yaml
@@ -0,0 +1,15 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: kubevirt-operator-rolebinding
+ labels:
+ {{- include "kubevirt-operator.labels" . | nindent 4 }}
+ kubevirt.io: ""
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: kubevirt-operator
+subjects:
+- kind: ServiceAccount
+ name: {{ include "kubevirt-operator.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
diff --git a/kud/deployment_infra/helm/kubevirt-operator/templates/serviceaccount.yaml b/kud/deployment_infra/helm/kubevirt-operator/templates/serviceaccount.yaml
new file mode 100644
index 00000000..99703a03
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/templates/serviceaccount.yaml
@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "kubevirt-operator.serviceAccountName" . }}
+ labels:
+ {{- include "kubevirt-operator.labels" . | nindent 4 }}
+ kubevirt.io: ""
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/kubevirt-operator/values.yaml b/kud/deployment_infra/helm/kubevirt-operator/values.yaml
new file mode 100644
index 00000000..3095ffd2
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt-operator/values.yaml
@@ -0,0 +1,49 @@
+image:
+ repository: quay.io/kubevirt/virt-operator
+ # This should be set to 'IfNotPresent' for released version
+ pullPolicy: IfNotPresent
+ # tag, if defined will use the given image tag, else Chart.AppVersion will be used
+ # tag
+imagePullSecrets: []
+
+serviceAccount:
+ # Specifies whether a service account should be created
+ create: true
+ # Annotations to add to the service account
+ annotations: {}
+ # The name of the service account to use.
+ # If not set and create is true, a name is generated using the fullname template
+ name: kubevirt-operator
+
+priorityClass:
+ name: kubevirt-cluster-critical
+
+nameOverride: ""
+fullnameOverride: ""
+
+resources:
+ requests:
+ cpu: 10m
+ memory: 150Mi
+
+securityContext:
+ runAsNonRoot: true
+
+nodeSelector: {}
+
+affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: kubevirt.io
+ operator: In
+ values:
+ - virt-operator
+ topologyKey: kubernetes.io/hostname
+ weight: 1
+
+tolerations:
+- key: CriticalAddonsOnly
+ operator: Exists
diff --git a/kud/deployment_infra/helm/kubevirt/Chart.yaml b/kud/deployment_infra/helm/kubevirt/Chart.yaml
new file mode 100644
index 00000000..75d31626
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt/Chart.yaml
@@ -0,0 +1,24 @@
+# Copyright 2021 Intel Corporation, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v2
+appVersion: v0.41.0
+description: |
+ KubeVirt is a virtual machine management add-on for Kubernetes.
+name: kubevirt
+sources:
+ - https://github.com/kubevirt/kubevirt
+home: https://github.com/kubevirt/kubevirt
+type: application
+version: 0.1.0
diff --git a/kud/deployment_infra/helm/kubevirt/templates/_helpers.tpl b/kud/deployment_infra/helm/kubevirt/templates/_helpers.tpl
new file mode 100644
index 00000000..3935c906
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt/templates/_helpers.tpl
@@ -0,0 +1,43 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "kubevirt.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "kubevirt.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "kubevirt.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "kubevirt.labels" -}}
+helm.sh/chart: {{ include "kubevirt.chart" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
diff --git a/kud/deployment_infra/helm/kubevirt/templates/kubevirt.yaml b/kud/deployment_infra/helm/kubevirt/templates/kubevirt.yaml
new file mode 100644
index 00000000..c8f9d60a
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt/templates/kubevirt.yaml
@@ -0,0 +1,55 @@
+apiVersion: kubevirt.io/v1
+kind: KubeVirt
+metadata:
+ name: {{ include "kubevirt.fullname" . }}
+ labels:
+ {{- include "kubevirt.labels" . | nindent 4 }}
+spec:
+ {{- with .Values.certificateRotateStrategy }}
+ certificateRotateStrategy:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.configuration }}
+ configuration:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.customizeComponents }}
+ customizeComponents:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.imagePullPolicy }}
+ imagePullPolicy: {{ . }}
+ {{- end }}
+ {{- with .Values.imageRegistry }}
+ imageRegistry: {{ . }}
+ {{- end }}
+ {{- with .Values.imageTag }}
+ imageTag: {{ . }}
+ {{- end }}
+ {{- with .Values.infra }}
+ infra:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.monitorAccount }}
+ monitorAccount: {{ . }}
+ {{- end }}
+ {{- with .Values.monitorNamespace }}
+ monitorNamespace: {{ . }}
+ {{- end }}
+ {{- with .Values.productName }}
+ productName: {{ . }}
+ {{- end }}
+ {{- with .Values.productVersion }}
+ productVersion: {{ . }}
+ {{- end }}
+ {{- with .Values.uninstallStrategy }}
+ uninstallStrategy: {{ . }}
+ {{- end }}
+ {{- with .Values.workloadUpdateStrategy }}
+ workloadUpdateStrategy:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+ {{- with .Values.workloads }}
+ workloads:
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
diff --git a/kud/deployment_infra/helm/kubevirt/values.yaml b/kud/deployment_infra/helm/kubevirt/values.yaml
new file mode 100644
index 00000000..c08df7ea
--- /dev/null
+++ b/kud/deployment_infra/helm/kubevirt/values.yaml
@@ -0,0 +1,61 @@
+nameOverride: ""
+fullnameOverride: ""
+
+certificateRotateStrategy: {}
+
+# configuration holds kubevirt configurations. Same as the virt-configMap.
+configuration:
+ developerConfiguration:
+ featureGates: []
+
+customizeComponents: {}
+
+# imagePullPolicy is the policy to use.
+imagePullPolicy: IfNotPresent
+
+# imageRegistry is the image registry to pull the container images
+# from. Defaults to the same registry the operator's container image
+# is pulled from.
+#imageRegistry: ""
+
+# imageTag is the tag to use for the continer images
+# installed. Defaults to the same tag as the operator's container
+# image.
+#imageTag: ""
+
+# infra is the selectors and tolerations that should apply to KubeVirt
+# infrastructure components.
+#infra: {}
+
+# monitorAccount is the name of the Prometheus service account that
+# needs read-access to KubeVirt endpoints. Defaults to prometheus-k8s.
+#monitorAccount: ""
+
+# monitorNamespace is the namespace Prometheus is deployed
+# in. Defaults to openshift-monitor.
+#monitorNamespace: ""
+
+# productName designates the apps.kubevirt.io/part-of label for
+# KubeVirt components. Useful if KubeVirt is included as part of a
+# product. If ProductName is not specified, the part-of label will be
+# omitted.
+#productName: ""
+
+# productVersion designates the apps.kubevirt.io/version label for
+# KubeVirt components. Useful if KubeVirt is included as part of a
+# product. If ProductVersion is not specified, KubeVirt's version will
+# be used.
+#productVersion: ""
+
+# uninstallStrategy specifies if kubevirt can be deleted if workloads
+# are still present. This is mainly a precaution to avoid accidental
+# data loss.
+#uninstallStrategy: ""
+
+# workloadUpdateStrategy defines at the cluster level how to handle
+# automated workload updates.
+workloadUpdateStrategy: {}
+
+# workloads contains the selectors and tolerations that should apply
+# to KubeVirt workloads.
+#workloads: {}
diff --git a/kud/deployment_infra/helm/sdewan_cnf/.helmignore b/kud/deployment_infra/helm/sdewan_cnf/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/kud/deployment_infra/helm/sdewan_cnf/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/kud/deployment_infra/helm/sdewan_cnf/Chart.yaml b/kud/deployment_infra/helm/sdewan_cnf/Chart.yaml
new file mode 100644
index 00000000..a7221426
--- /dev/null
+++ b/kud/deployment_infra/helm/sdewan_cnf/Chart.yaml
@@ -0,0 +1,21 @@
+#/*
+# * Copyright 2021 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+apiVersion: v1
+name: sdewan_cnf
+description: A Helm chart for Kubernetes - SDEWAN CNF
+version: 0.1.0
+appVersion: "1.0"
diff --git a/kud/deployment_infra/helm/sdewan_cnf/templates/_helpers.tpl b/kud/deployment_infra/helm/sdewan_cnf/templates/_helpers.tpl
new file mode 100644
index 00000000..d3e0f7c4
--- /dev/null
+++ b/kud/deployment_infra/helm/sdewan_cnf/templates/_helpers.tpl
@@ -0,0 +1,79 @@
+{{/*
+# * Copyright 2021 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+*/}}
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "cnf.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "cnf.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "cnf.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "cnf.labels" -}}
+helm.sh/chart: {{ include "cnf.chart" . }}
+{{ include "cnf.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "cnf.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "cnf.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "cnf.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include "cnf.fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
diff --git a/kud/deployment_infra/helm/sdewan_cnf/templates/cm.yaml b/kud/deployment_infra/helm/sdewan_cnf/templates/cm.yaml
new file mode 100644
index 00000000..29660add
--- /dev/null
+++ b/kud/deployment_infra/helm/sdewan_cnf/templates/cm.yaml
@@ -0,0 +1,80 @@
+#/*
+# * Copyright 2021 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+apiVersion: v1
+data:
+ entrypoint.sh: |-
+ #!/bin/bash
+ # Always exit on errors.
+ set -ex
+ echo "" > /etc/config/network
+ cat > /etc/config/mwan3 <<EOF
+ config globals 'globals'
+ option mmx_mask '0x3F00'
+ option local_source 'lan'
+ EOF
+ eval "networks=$(grep nfn-network /tmp/podinfo/annotations | awk -F '=' '{print $2}')"
+ for net in $(echo -e $networks | jq -c ".interface[]")
+ do
+ interface=$(echo $net | jq -r .interface)
+ ipaddr=$(ifconfig $interface | awk '/inet/{print $2}' | cut -f2 -d ":" | awk 'NR==1 {print $1}')
+ vif="$interface"
+ netmask=$(ifconfig $interface | awk '/inet/{print $4}'| cut -f2 -d ":" | head -1)
+ cat >> /etc/config/network <<EOF
+ config interface '$vif'
+ option ifname '$interface'
+ option proto 'static'
+ option ipaddr '$ipaddr'
+ option netmask '$netmask'
+ EOF
+ cat >> /etc/config/mwan3 <<EOF
+ config interface '$vif'
+ option enabled '1'
+ option family 'ipv4'
+ option reliability '2'
+ option count '1'
+ option timeout '2'
+ option failure_latency '1000'
+ option recovery_latency '500'
+ option failure_loss '20'
+ option recovery_loss '5'
+ option interval '5'
+ option down '3'
+ option up '8'
+ EOF
+ done
+ /sbin/procd &
+ /sbin/ubusd &
+ iptables -S
+ sleep 1
+ /etc/init.d/rpcd start
+ /etc/init.d/dnsmasq start
+ /etc/init.d/network start
+ /etc/init.d/odhcpd start
+ /etc/init.d/uhttpd start
+ /etc/init.d/log start
+ /etc/init.d/dropbear start
+ /etc/init.d/mwan3 restart
+ /etc/init.d/firewall restart
+ sysctl -w net.ipv4.conf.all.rp_filter=1
+ sysctl -w net.ipv4.ip_forward=1
+ echo "Entering sleep... (success)"
+ # Sleep forever.
+ while true; do sleep 100; done
+kind: ConfigMap
+metadata:
+ name: sdewan-sh
+ namespace: default
diff --git a/kud/deployment_infra/helm/sdewan_cnf/templates/deployment.yaml b/kud/deployment_infra/helm/sdewan_cnf/templates/deployment.yaml
new file mode 100644
index 00000000..90c81380
--- /dev/null
+++ b/kud/deployment_infra/helm/sdewan_cnf/templates/deployment.yaml
@@ -0,0 +1,94 @@
+#/*
+# * Copyright 2021 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ .Values.metadata.name }}
+ namespace: {{ .Values.metadata.namespace }}
+ labels:
+ sdewanPurpose: {{ .Values.metadata.labels }}
+spec:
+ progressDeadlineSeconds: {{ .Values.spec.progressDeadlineSeconds }}
+ replicas: {{ .Values.spec.replicas }}
+ selector:
+ matchLabels:
+ sdewanPurpose: {{ .Values.metadata.labels }}
+ strategy:
+ rollingUpdate:
+ maxSurge: {{ .Values.strategy.maxSurge }}
+ maxUnavailable: {{ .Values.strategy.maxUnavailable }}
+ type: RollingUpdate
+ template:
+ metadata:
+ annotations:
+ k8s.plugin.opnfv.org/nfn-network: |-
+ { "type": "ovn4nfv", "interface": [
+ {{- range .Values.nfn }} {{- with . }}
+ {
+ "defaultGateway": "{{- .defaultGateway -}}",
+ "interface": "{{- .interface -}}",
+ "ipAddress": "{{- .ipAddress -}}",
+ "name": "{{- .name -}}"
+ } {{- .separate -}}
+ {{- end }} {{- end }}
+ ]}
+ k8s.v1.cni.cncf.io/networks: '[{ "name": "ovn-networkobj"}]'
+ labels:
+ sdewanPurpose: {{ .Values.metadata.labels }}
+ spec:
+ containers:
+ - command:
+ - /usr/bin/sudo
+ - /bin/sh
+ - /tmp/sdewan/entrypoint.sh
+ image: {{ .Values.containers.image }}
+ imagePullPolicy: {{ .Values.containers.imagePullPolicy }}
+ name: {{ .Values.containers.name }}
+ readinessProbe:
+ failureThreshold: 5
+ httpGet:
+ path: /
+ port: 80
+ scheme: HTTP
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ successThreshold: 1
+ timeoutSeconds: 1
+ securityContext:
+ privileged: true
+ procMount: Default
+ volumeMounts:
+ - mountPath: /tmp/sdewan
+ name: sdewan-sh
+ readOnly: true
+ - mountPath: /tmp/podinfo
+ name: podinfo
+ readOnly: true
+ nodeSelector:
+ {{ .Values.labelName }}: "{{ .Values.labelValue }}"
+ restartPolicy: {{ .Values.restartPolicy }}
+ volumes:
+ - configMap:
+ defaultMode: 420
+ name: sdewan-sh
+ name: sdewan-sh
+ - name: podinfo
+ downwardAPI:
+ items:
+ - path: "annotations"
+ fieldRef:
+ fieldPath: metadata.annotations
diff --git a/kud/deployment_infra/helm/sdewan_cnf/values.yaml b/kud/deployment_infra/helm/sdewan_cnf/values.yaml
new file mode 100644
index 00000000..c882378e
--- /dev/null
+++ b/kud/deployment_infra/helm/sdewan_cnf/values.yaml
@@ -0,0 +1,54 @@
+#/*
+# * Copyright 2021 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Default values for cnf.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+metadata:
+ name: sdewan-cnf
+ namespace: default
+ labels: sdewan-cnf
+
+spec:
+ progressDeadlineSeconds: 600
+ replicas: 1
+
+strategy:
+ maxSurge: 25%
+ maxUnavailable: 25%
+
+nfn:
+ - defaultGateway: false
+ interface: net2
+ ipAddress: 10.10.10.15
+ name: pnetwork
+ separate: ","
+ - defaultGateway: false
+ interface: net0
+ ipAddress: 172.16.30.10
+ name: ovn-network
+ separate: ""
+
+containers:
+ image: integratedcloudnative/openwrt:0.3.1
+ imagePullPolicy: IfNotPresent
+ name: sdewan
+
+labelName: "node-role.kubernetes.io/master"
+labelValue: ""
+
+restartPolicy: Always
diff --git a/kud/deployment_infra/profiles/cdi-operator/manifest.yaml b/kud/deployment_infra/profiles/cdi-operator/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/cdi-operator/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/cdi-operator/override_values.yaml b/kud/deployment_infra/profiles/cdi-operator/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/cdi-operator/override_values.yaml
diff --git a/kud/deployment_infra/profiles/cdi/manifest.yaml b/kud/deployment_infra/profiles/cdi/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/cdi/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/cdi/override_values.yaml b/kud/deployment_infra/profiles/cdi/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/cdi/override_values.yaml
diff --git a/kud/deployment_infra/profiles/kubevirt-operator/manifest.yaml b/kud/deployment_infra/profiles/kubevirt-operator/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/kubevirt-operator/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/kubevirt-operator/override_values.yaml b/kud/deployment_infra/profiles/kubevirt-operator/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/kubevirt-operator/override_values.yaml
diff --git a/kud/deployment_infra/profiles/kubevirt/manifest.yaml b/kud/deployment_infra/profiles/kubevirt/manifest.yaml
new file mode 100644
index 00000000..4d381d02
--- /dev/null
+++ b/kud/deployment_infra/profiles/kubevirt/manifest.yaml
@@ -0,0 +1,4 @@
+---
+version: v1
+type:
+ values: "override_values.yaml"
diff --git a/kud/deployment_infra/profiles/kubevirt/override_values.yaml b/kud/deployment_infra/profiles/kubevirt/override_values.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/kud/deployment_infra/profiles/kubevirt/override_values.yaml
diff --git a/kud/hosting_providers/containerized/addons/README.md.tmpl b/kud/hosting_providers/containerized/addons/README.md.tmpl
index 8ab16104..0cef7923 100644
--- a/kud/hosting_providers/containerized/addons/README.md.tmpl
+++ b/kud/hosting_providers/containerized/addons/README.md.tmpl
@@ -1,43 +1,47 @@
# Installing KUD addons with emcoctl
-1. Customize values.yaml and values-resources.yaml as needed
+1. Customize values.yaml as needed
To create a customized profile for a specific addon, edit the profile
as needed, and then (for example, cpu-manager):
```
tar -czf /opt/kud/multi-cluster/addons/cpu-manager.tar.gz -C /opt/kud/multi-cluster/addons/cpu-manager/helm .
- tar -czf /opt/kud/multi-cluster/addons/collectd_profile.tar.gz -C /opt/kud/multi-cluster/addons/cpu-manager/profile .
+ tar -czf /opt/kud/multi-cluster/addons/cpu-manager_profile.tar.gz -C /opt/kud/multi-cluster/addons/cpu-manager/profile .
```
2. Create prerequisites to deploy addons
-Apply prerequisites.yaml. This step is optional. If there are
-existing resources in the cluster, it is sufficient to customize
-values.yaml with the values of those resources. The supplied
-prequisites.yaml creates controllers, one project, one cluster, and
-one logical cloud.
+Apply prerequisites. This step is optional. If there are existing
+resources in the cluster, it is sufficient to customize values.yaml
+with the values of those resources. The supplied YAML files creates
+the controllers, one or more clusters, one project, and one logical
+cloud.
- \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f prerequisites.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f 00-controllers.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f 01-cluster.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f 02-project.yaml -v values.yaml\`
3. Deploy addons
-Apply addons.yaml. This deploys the addons listed in the \`Addons\`
-value in values.yaml.
+This deploys the addons listed in the \`Addons\` and
+\`AddonResources\` values in values.yaml.
- \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values.yaml\`
- \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values-resources.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f 03-addons-app.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f 04-addon-resources-app.yaml -v values.yaml\`
# Uninstalling KUD addons with emcoctl
1. Delete addons
- \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values-resources.yaml\`
- \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f 04-addon-resources-app.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f 03-addons-app.yaml -v values.yaml\`
2. Cleanup prerequisites
- \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f prerequisites.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f 02-project.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f 01-cluster.yaml -v values.yaml\`
+ \`$ /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f 00-controllers.yaml -v values.yaml\`
#### NOTE: Known issue: deletion of the resources fails sometimes as
some resources can't be deleted before others are deleted. This can
diff --git a/kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl b/kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl
deleted file mode 100644
index ed568238..00000000
--- a/kud/hosting_providers/containerized/addons/values-resources.yaml.tmpl
+++ /dev/null
@@ -1,19 +0,0 @@
-HostIP: ${HOST_IP}
-KubeConfig: ${KUBE_PATH}
-PackagesPath: ${PACKAGES_PATH}
-ProjectName: proj1
-RsyncPort: 30441
-GacPort: 30493
-OvnPort: 30473
-DtcPort: 30483
-ClusterProvider: provider1
-Cluster1: cluster1
-ClusterLabel: edge-cluster
-LogicalCloud: default
-Apps:
-- sriov-network
-CompositeApp: addon-resources
-CompositeProfile: addon-resources-profile
-DeploymentIntentGroup: addon-resources-deployment-intent-group
-DeploymentIntent: addon-resources-deployment-intent
-GenericPlacementIntent: addon-resources-placement-intent
diff --git a/kud/hosting_providers/containerized/addons/values.yaml.tmpl b/kud/hosting_providers/containerized/addons/values.yaml.tmpl
index 62936beb..f4f0b76c 100644
--- a/kud/hosting_providers/containerized/addons/values.yaml.tmpl
+++ b/kud/hosting_providers/containerized/addons/values.yaml.tmpl
@@ -1,24 +1,36 @@
HostIP: ${HOST_IP}
-KubeConfig: ${KUBE_PATH}
-PackagesPath: ${PACKAGES_PATH}
-ProjectName: proj1
RsyncPort: 30441
GacPort: 30493
OvnPort: 30473
DtcPort: 30483
-ClusterProvider: provider1
-Cluster1: cluster1
-ClusterLabel: edge-cluster
+
+ClusterProvider: kud
+ClustersLabel: kud-cluster
+Clusters:
+- KubeConfig: ${KUBE_PATH}
+ Name: cluster
+
+ProjectName: kud
LogicalCloud: default
-Apps:
+
+PackagesPath: ${PACKAGES_PATH}
+AddonsApp: addons
+AddonsProfile: addons-profile
+AddonsDeploymentIntentGroup: addons-deployment-intent-group
+AddonsDeploymentIntent: addons-deployment-intent
+AddonsPlacementIntent: addons-placement-intent
+Addons:
- multus-cni
- ovn4nfv
- node-feature-discovery
- sriov-network-operator
- qat-device-plugin
- cpu-manager
-CompositeApp: addons
-CompositeProfile: addons-profile
-DeploymentIntentGroup: addons-deployment-intent-group
-DeploymentIntent: addons-deployment-intent
-GenericPlacementIntent: addons-placement-intent
+
+AddonResourcesApp: addon-resources
+AddonResourcesProfile: addon-resources-profile
+AddonResourcesDeploymentIntentGroup: addon-resources-deployment-intent-group
+AddonResourcesDeploymentIntent: addon-resources-deployment-intent
+AddonResourcesPlacementIntent: addon-resources-placement-intent
+AddonResources:
+- sriov-network
diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh
index 7365a14f..844c154d 100755
--- a/kud/hosting_providers/containerized/installer.sh
+++ b/kud/hosting_providers/containerized/installer.sh
@@ -221,7 +221,7 @@ function install_host_artifacts {
local -r host_addons_dir="${host_dir}/addons"
local -r host_artifacts_dir="${host_dir}/${cluster_name}/artifacts"
- for addon in cpu-manager multus-cni node-feature-discovery ovn4nfv qat-device-plugin sriov-network sriov-network-operator; do
+ for addon in cdi cdi-operator cpu-manager kubevirt kubevirt-operator multus-cni node-feature-discovery ovn4nfv qat-device-plugin sriov-network sriov-network-operator; do
mkdir -p ${host_addons_dir}/${addon}/{helm,profile}
cp -r ${kud_infra_folder}/helm/${addon} ${host_addons_dir}/${addon}/helm
cp -r ${kud_infra_folder}/profiles/${addon}/* ${host_addons_dir}/${addon}/profile
@@ -238,8 +238,9 @@ function install_host_artifacts {
cp -rf ${kud_inventory_folder}/artifacts/* ${host_artifacts_dir}
mkdir -p ${host_artifacts_dir}/addons
- cp ${kud_infra_folder}/emco/examples/prerequisites.yaml ${host_artifacts_dir}/addons
- cp ${kud_infra_folder}/emco/composite-app.yaml ${host_artifacts_dir}/addons
+ for yaml in ${kud_infra_folder}/emco/examples/*.yaml; do
+ cp ${yaml} ${host_artifacts_dir}/addons
+ done
for template in addons/*.tmpl; do
CLUSTER_NAME="${cluster_name}" \
HOST_IP="$(master_ip)" \
diff --git a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
index a13d8412..bfbd57b3 100644
--- a/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/containerized/inventory/group_vars/k8s-cluster.yml
@@ -14,8 +14,6 @@
# Editing those values will almost surely break something.
system_namespace: kube-system
-docker_version: 'latest'
-
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh
index 39da50e7..8c8ff7f3 100755
--- a/kud/hosting_providers/vagrant/installer.sh
+++ b/kud/hosting_providers/vagrant/installer.sh
@@ -61,42 +61,6 @@ function _install_ansible {
sudo -E pip install --no-cache-dir ansible==$version
}
-# _install_docker() - Download and install docker-engine
-function _install_docker {
- local max_concurrent_downloads=${1:-3}
-
- if $(docker version &>/dev/null); then
- return
- fi
- sudo apt-get install -y apt-transport-https ca-certificates curl
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- sudo apt-get update
- sudo apt-get install -y docker-ce
-
- sudo mkdir -p /etc/systemd/system/docker.service.d
- if [ ${http_proxy:-} ]; then
- echo "[Service]" | sudo tee /etc/systemd/system/docker.service.d/http-proxy.conf
- echo "Environment=\"HTTP_PROXY=$http_proxy\"" | sudo tee --append /etc/systemd/system/docker.service.d/http-proxy.conf
- fi
- if [ ${https_proxy:-} ]; then
- echo "[Service]" | sudo tee /etc/systemd/system/docker.service.d/https-proxy.conf
- echo "Environment=\"HTTPS_PROXY=$https_proxy\"" | sudo tee --append /etc/systemd/system/docker.service.d/https-proxy.conf
- fi
- if [ ${no_proxy:-} ]; then
- echo "[Service]" | sudo tee /etc/systemd/system/docker.service.d/no-proxy.conf
- echo "Environment=\"NO_PROXY=$no_proxy\"" | sudo tee --append /etc/systemd/system/docker.service.d/no-proxy.conf
- fi
- sudo systemctl daemon-reload
- echo "DOCKER_OPTS=\"-H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --max-concurrent-downloads $max_concurrent_downloads \"" | sudo tee --append /etc/default/docker
- if [[ -z $(groups | grep docker) ]]; then
- sudo usermod -aG docker $USER
- fi
-
- sudo systemctl restart docker
- sleep 10
-}
-
function _set_environment_file {
# By default ovn central interface is the first active network interface on localhost. If other wanted, need to export this variable in aio.sh or Vagrant file.
OVN_CENTRAL_INTERFACE="${OVN_CENTRAL_INTERFACE:-$(ip addr show | awk '/inet.*brd/{print $NF; exit}')}"
@@ -116,7 +80,6 @@ function install_k8s {
local tarball=v$version.tar.gz
sudo apt-get install -y sshpass make unzip # install make to run mitogen target and unzip is mitogen playbook dependency
sudo apt-get install -y gnupg2 software-properties-common
- _install_docker
_install_ansible
wget https://github.com/kubernetes-incubator/kubespray/archive/$tarball
sudo tar -C $dest_folder -xzf $tarball
@@ -250,7 +213,6 @@ function install_addons {
# install_plugin() - Install ONAP Multicloud Kubernetes plugin
function install_plugin {
echo "Installing multicloud/k8s plugin"
- _install_docker
sudo -E pip install --no-cache-dir docker-compose
sudo mkdir -p /opt/{kubeconfig,consul/config}
diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
index bf6f8c84..53b5a141 100644
--- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
@@ -14,8 +14,6 @@
# Editing those values will almost surely break something.
system_namespace: kube-system
-docker_version: 'latest'
-
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
diff --git a/kud/tests/_functions.sh b/kud/tests/_functions.sh
index 7a3e97ab..10f8e090 100755
--- a/kud/tests/_functions.sh
+++ b/kud/tests/_functions.sh
@@ -27,7 +27,7 @@ function print_msg {
function ssh_cluster {
master_ip=$(kubectl cluster-info | grep "Kubernetes master" | awk -F '[:/]' '{print $4}')
- ssh -o StrictHostKeyChecking=no ${master_ip} -- "$@"
+ ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${master_ip} -- "$@"
}
function get_ovn_central_address {
diff --git a/kud/tests/plugin_fw_v2.sh b/kud/tests/plugin_fw_v2.sh
index ed4a5ad7..d6254ac3 100755
--- a/kud/tests/plugin_fw_v2.sh
+++ b/kud/tests/plugin_fw_v2.sh
@@ -16,7 +16,8 @@ source _common_test.sh
source _functions.sh
source _functions.sh
-kubeconfig_path="$HOME/.kube/config"
+# TODO KUBECONFIG may be a list of paths
+kubeconfig_path="${KUBECONFIG:-$HOME/.kube/config}"
clusters="${KUD_PLUGIN_FW_CLUSTERS:-$(cat <<EOF
[
@@ -55,16 +56,8 @@ while [[ $# -gt 0 ]]; do
case $arg in
"--external" )
- master_ip=$(kubectl cluster-info | grep "Kubernetes master" | \
+ service_host=$(kubectl cluster-info | grep "Kubernetes master" | \
awk -F ":" '{print $2}' | awk -F "//" '{print $2}')
- base_url_clm=${base_url_clm:-"http://$master_ip:30461/v2"}
- base_url_ncm=${base_url_ncm:-"http://$master_ip:30431/v2"}
- base_url_orchestrator=${base_url_orchestrator:-"http://$master_ip:30415/v2"}
- base_url_ovnaction=${base_url_ovnaction:-"http://$master_ip:30471/v2"}
- rsync_service_port=30441
- rsync_service_host="$master_ip"
- ovnaction_service_port=30473
- ovnaction_service_host="$master_ip"
shift
;;
* )
@@ -75,25 +68,11 @@ while [[ $# -gt 0 ]]; do
done
set -- "${ARGS[@]}" # restore positional parameters
-base_url_clm=${base_url_clm:-"http://localhost:9061/v2"}
-base_url_ncm=${base_url_ncm:-"http://localhost:9031/v2"}
-base_url_orchestrator=${base_url_orchestrator:-"http://localhost:9015/v2"}
-base_url_ovnaction=${base_url_ovnaction:-"http://localhost:9053/v2"}
-rsync_service_port=${rsync_service_port:-9041}
-rsync_service_host=${rsync_service_host:-"localhost"}
-ovnaction_service_port=${ovnaction_service_port:-9053}
-ovnaction_service_host=${ovnaction_service_host:-"localhost"}
+service_host=${service_host:-"localhost"}
CSAR_DIR="/opt/csar"
csar_id="4bf66240-a0be-4ce2-aebd-a01df7725f16"
-packetgen_helm_path="$CSAR_DIR/$csar_id/packetgen.tar.gz"
-packetgen_profile_targz="$CSAR_DIR/$csar_id/profile.tar.gz"
-firewall_helm_path="$CSAR_DIR/$csar_id/firewall.tar.gz"
-firewall_profile_targz="$CSAR_DIR/$csar_id/profile.tar.gz"
-sink_helm_path="$CSAR_DIR/$csar_id/sink.tar.gz"
-sink_profile_targz="$CSAR_DIR/$csar_id/profile.tar.gz"
-
demo_folder=$test_folder/../demo
function populate_CSAR_compositevfw_helm {
@@ -108,966 +87,133 @@ function populate_CSAR_compositevfw_helm {
popd
}
+project="testvfw"
+composite_app="compositevfw"
+version="v1"
+deployment_intent_group="vfw_deployment_intent_group"
+
function setup {
install_deps
populate_CSAR_compositevfw_helm "$csar_id"
-}
-
-clusterprovidername="vfw-cluster-provider"
-clusterproviderdata="$(cat<<EOF
-{
- "metadata": {
- "name": "$clusterprovidername",
- "description": "description of $clusterprovidername",
- "userData1": "$clusterprovidername user data 1",
- "userData2": "$clusterprovidername user data 2"
- }
-}
-EOF
-)"
-
-labelname="LabelA"
-labeldata="$(cat<<EOF
-{"label-name": "$labelname"}
-EOF
-)"
-
-# add the rsync controller entry
-rsynccontrollername="rsync"
-rsynccontrollerdata="$(cat<<EOF
-{
- "metadata": {
- "name": "rsync",
- "description": "description of $rsynccontrollername controller",
- "userData1": "user data 1 for $rsynccontrollername",
- "userData2": "user data 2 for $rsynccontrollername"
- },
- "spec": {
- "host": "${rsync_service_host}",
- "port": ${rsync_service_port}
- }
-}
-EOF
-)"
-
-# add the ovn action controller entry
-ovnactioncontrollername="ovnaction"
-ovnactioncontrollerdata="$(cat<<EOF
-{
- "metadata": {
- "name": "$ovnactioncontrollername",
- "description": "description of $ovnactioncontrollername controller",
- "userData1": "user data 2 for $ovnactioncontrollername",
- "userData2": "user data 2 for $ovnactioncontrollername"
- },
- "spec": {
- "host": "${ovnaction_service_host}",
- "type": "action",
- "priority": 1,
- "port": ${ovnaction_service_port}
- }
-}
-EOF
-)"
-
-# define networks and providernetworks intents to ncm for the clusters
-# define emco-private-net and unprotexted-private-net as provider networks
-
-emcoprovidernetworkname="emco-private-net"
-emcoprovidernetworkdata="$(cat<<EOF
-{
- "metadata": {
- "name": "$emcoprovidernetworkname",
- "description": "description of $emcoprovidernetworkname",
- "userData1": "user data 1 for $emcoprovidernetworkname",
- "userData2": "user data 2 for $emcoprovidernetworkname"
- },
- "spec": {
- "cniType": "ovn4nfv",
- "ipv4Subnets": [
- {
- "subnet": "10.10.20.0/24",
- "name": "subnet1",
- "gateway": "10.10.20.1/24"
- }
- ],
- "providerNetType": "VLAN",
- "vlan": {
- "vlanId": "102",
- "providerInterfaceName": "eth1",
- "logicalInterfaceName": "eth1.102",
- "vlanNodeSelector": "specific",
- "nodeLabelList": [
- "kubernetes.io/hostname=localhost"
- ]
- }
- }
-}
-EOF
-)"
-
-unprotectedprovidernetworkname="unprotected-private-net"
-unprotectedprovidernetworkdata="$(cat<<EOF
-{
- "metadata": {
- "name": "$unprotectedprovidernetworkname",
- "description": "description of $unprotectedprovidernetworkname",
- "userData1": "user data 2 for $unprotectedprovidernetworkname",
- "userData2": "user data 2 for $unprotectedprovidernetworkname"
- },
- "spec": {
- "cniType": "ovn4nfv",
- "ipv4Subnets": [
- {
- "subnet": "192.168.10.0/24",
- "name": "subnet1",
- "gateway": "192.168.10.1/24"
- }
- ],
- "providerNetType": "VLAN",
- "vlan": {
- "vlanId": "100",
- "providerInterfaceName": "eth1",
- "logicalInterfaceName": "eth1.100",
- "vlanNodeSelector": "specific",
- "nodeLabelList": [
- "kubernetes.io/hostname=localhost"
- ]
- }
- }
-}
-EOF
-)"
-
-protectednetworkname="protected-private-net"
-protectednetworkdata="$(cat<<EOF
-{
- "metadata": {
- "name": "$protectednetworkname",
- "description": "description of $protectednetworkname",
- "userData1": "user data 1 for $protectednetworkname",
- "userData2": "user data 1 for $protectednetworkname"
- },
- "spec": {
- "cniType": "ovn4nfv",
- "ipv4Subnets": [
- {
- "subnet": "192.168.20.0/24",
- "name": "subnet1",
- "gateway": "192.168.20.100/32"
- }
- ]
- }
-}
-EOF
-)"
-
-# define a project
-projectname="testvfw"
-projectdata="$(cat<<EOF
-{
- "metadata": {
- "name": "$projectname",
- "description": "description of $projectname controller",
- "userData1": "$projectname user data 1",
- "userData2": "$projectname user data 2"
- }
-}
-EOF
-)"
-
-# define a composite application
-vfw_compositeapp_name="compositevfw"
-vfw_compositeapp_version="v1"
-vfw_compositeapp_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${vfw_compositeapp_name}",
- "description": "description of ${vfw_compositeapp_name}",
- "userData1": "user data 1 for ${vfw_compositeapp_name}",
- "userData2": "user data 2 for ${vfw_compositeapp_name}"
- },
- "spec":{
- "version":"${vfw_compositeapp_version}"
- }
-}
-EOF
-)"
-
-# define app entries for the composite application
-# includes the multipart tgz of the helm chart for vfw
-# BEGIN: Create entries for app1&app2 in the database
-packetgen_app_name="packetgen"
-packetgen_helm_chart=${packetgen_helm_path}
-packetgen_app_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${packetgen_app_name}",
- "description": "description for app ${packetgen_app_name}",
- "userData1": "user data 2 for ${packetgen_app_name}",
- "userData2": "user data 2 for ${packetgen_app_name}"
- }
-}
-EOF
-)"
-
-firewall_app_name="firewall"
-firewall_helm_chart=${firewall_helm_path}
-firewall_app_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${firewall_app_name}",
- "description": "description for app ${firewall_app_name}",
- "userData1": "user data 2 for ${firewall_app_name}",
- "userData2": "user data 2 for ${firewall_app_name}"
- }
-}
-EOF
-)"
-
-sink_app_name="sink"
-sink_helm_chart=${sink_helm_path}
-sink_app_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${sink_app_name}",
- "description": "description for app ${sink_app_name}",
- "userData1": "user data 2 for ${sink_app_name}",
- "userData2": "user data 2 for ${sink_app_name}"
- }
-}
-EOF
-)"
-
-# Add the composite profile
-vfw_composite_profile_name="vfw_composite-profile"
-vfw_composite_profile_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${vfw_composite_profile_name}",
- "description":"description of ${vfw_composite_profile_name}",
- "userData1":"user data 1 for ${vfw_composite_profile_name}",
- "userData2":"user data 2 for ${vfw_composite_profile_name}"
- }
-}
-EOF
-)"
-
-# define the packetgen profile data
-packetgen_profile_name="packetgen-profile"
-packetgen_profile_file=${packetgen_profile_targz}
-packetgen_profile_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${packetgen_profile_name}",
- "description":"description of ${packetgen_profile_name}",
- "userData1":"user data 1 for ${packetgen_profile_name}",
- "userData2":"user data 2 for ${packetgen_profile_name}"
- },
- "spec":{
- "app-name": "${packetgen_app_name}"
- }
-}
-EOF
-)"
-
-# define the firewall profile data
-firewall_profile_name="firewall-profile"
-firewall_profile_file=${firewall_profile_targz}
-firewall_profile_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${firewall_profile_name}",
- "description":"description of ${firewall_profile_name}",
- "userData1":"user data 1 for ${firewall_profile_name}",
- "userData2":"user data 2 for ${firewall_profile_name}"
- },
- "spec":{
- "app-name": "${firewall_app_name}"
- }
-}
-EOF
-)"
-
-# define the sink profile data
-sink_profile_name="sink-profile"
-sink_profile_file=${sink_profile_targz}
-sink_profile_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${sink_profile_name}",
- "description":"description of ${sink_profile_name}",
- "userData1":"user data 1 for ${sink_profile_name}",
- "userData2":"user data 2 for ${sink_profile_name}"
- },
- "spec":{
- "app-name": "${sink_app_name}"
- }
-}
-EOF
-)"
-
-# define the generic placement intent
-generic_placement_intent_name="generic-placement-intent"
-generic_placement_intent_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${generic_placement_intent_name}",
- "description":"${generic_placement_intent_name}",
- "userData1":"${generic_placement_intent_name}",
- "userData2":"${generic_placement_intent_name}"
- }
-}
-EOF
-)"
-
-# define app placement intent for packetgen
-packetgen_placement_intent_name="packetgen-placement-intent"
-packetgen_placement_intent_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${packetgen_placement_intent_name}",
- "description":"description of ${packetgen_placement_intent_name}",
- "userData1":"user data 1 for ${packetgen_placement_intent_name}",
- "userData2":"user data 2 for ${packetgen_placement_intent_name}"
- },
- "spec":{
- "app-name":"${packetgen_app_name}",
- "intent":{
- "allOf":[
- { "provider-name":"${clusterprovidername}",
- "cluster-label-name":"${labelname}"
- }
- ]
- }
- }
-}
-EOF
-)"
-
-# define app placement intent for firewall
-firewall_placement_intent_name="firewall-placement-intent"
-firewall_placement_intent_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${firewall_placement_intent_name}",
- "description":"description of ${firewall_placement_intent_name}",
- "userData1":"user data 1 for ${firewall_placement_intent_name}",
- "userData2":"user data 2 for ${firewall_placement_intent_name}"
- },
- "spec":{
- "app-name":"${firewall_app_name}",
- "intent":{
- "allOf":[
- { "provider-name":"${clusterprovidername}",
- "cluster-label-name":"${labelname}"
- }
- ]
- }
- }
-}
-EOF
-)"
-
-# define app placement intent for sink
-sink_placement_intent_name="sink-placement-intent"
-sink_placement_intent_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${sink_placement_intent_name}",
- "description":"description of ${sink_placement_intent_name}",
- "userData1":"user data 1 for ${sink_placement_intent_name}",
- "userData2":"user data 2 for ${sink_placement_intent_name}"
- },
- "spec":{
- "app-name":"${sink_app_name}",
- "intent":{
- "allOf":[
- { "provider-name":"${clusterprovidername}",
- "cluster-label-name":"${labelname}"
- }
- ]
- }
- }
-}
-EOF
-)"
-
-# define a deployment intent group
-release="fw0"
-deployment_intent_group_name="vfw_deployment_intent_group"
-deployment_intent_group_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${deployment_intent_group_name}",
- "description":"descriptiont of ${deployment_intent_group_name}",
- "userData1":"user data 1 for ${deployment_intent_group_name}",
- "userData2":"user data 2 for ${deployment_intent_group_name}"
- },
- "spec":{
- "profile":"${vfw_composite_profile_name}",
- "version":"${release}",
- "logical-cloud":"unused_logical_cloud",
- "override-values":[
- {
- "app-name":"${packetgen_app_name}",
- "values": {
- ".Values.service.ports.nodePort":"30888"
- }
- },
- {
- "app-name":"${firewall_app_name}",
- "values": {
- ".Values.global.dcaeCollectorIp":"1.2.3.4",
- ".Values.global.dcaeCollectorPort":"8888"
- }
- },
- {
- "app-name":"${sink_app_name}",
- "values": {
- ".Values.service.ports.nodePort":"30677"
- }
- }
- ]
- }
-}
-EOF
-)"
-
-# define the network-control-intent for the vfw composite app
-vfw_ovnaction_intent_name="vfw_ovnaction_intent"
-vfw_ovnaction_intent_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${vfw_ovnaction_intent_name}",
- "description":"descriptionf of ${vfw_ovnaction_intent_name}",
- "userData1":"user data 1 for ${vfw_ovnaction_intent_name}",
- "userData2":"user data 2 for ${vfw_ovnaction_intent_name}"
- }
-}
-EOF
-)"
-
-# define the network workload intent for packetgen app
-packetgen_workload_intent_name="packetgen_workload_intent"
-packetgen_workload_intent_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${packetgen_workload_intent_name}",
- "description": "description of ${packetgen_workload_intent_name}",
- "userData1": "useer data 2 for ${packetgen_workload_intent_name}",
- "userData2": "useer data 2 for ${packetgen_workload_intent_name}"
- },
- "spec": {
- "application-name": "${packetgen_app_name}",
- "workload-resource": "${release}-${packetgen_app_name}",
- "type": "Deployment"
- }
-}
-EOF
-)"
-
-# define the network workload intent for firewall app
-firewall_workload_intent_name="firewall_workload_intent"
-firewall_workload_intent_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${firewall_workload_intent_name}",
- "description": "description of ${firewall_workload_intent_name}",
- "userData1": "useer data 2 for ${firewall_workload_intent_name}",
- "userData2": "useer data 2 for ${firewall_workload_intent_name}"
- },
- "spec": {
- "application-name": "${firewall_app_name}",
- "workload-resource": "${release}-${firewall_app_name}",
- "type": "Deployment"
- }
-}
-EOF
-)"
-
-# define the network workload intent for sink app
-sink_workload_intent_name="sink_workload_intent"
-sink_workload_intent_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${sink_workload_intent_name}",
- "description": "description of ${sink_workload_intent_name}",
- "userData1": "useer data 2 for ${sink_workload_intent_name}",
- "userData2": "useer data 2 for ${sink_workload_intent_name}"
- },
- "spec": {
- "application-name": "${sink_app_name}",
- "workload-resource": "${release}-${sink_app_name}",
- "type": "Deployment"
- }
-}
-EOF
-)"
-
-# define the network interface intents for the packetgen workload intent
-packetgen_unprotected_interface_name="packetgen_unprotected_if"
-packetgen_unprotected_interface_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${packetgen_unprotected_interface_name}",
- "description": "description of ${packetgen_unprotected_interface_name}",
- "userData1": "useer data 2 for ${packetgen_unprotected_interface_name}",
- "userData2": "useer data 2 for ${packetgen_unprotected_interface_name}"
- },
- "spec": {
- "interface": "eth1",
- "name": "${unprotectedprovidernetworkname}",
- "defaultGateway": "false",
- "ipAddress": "192.168.10.2"
- }
-}
-EOF
-)"
-
-packetgen_emco_interface_name="packetgen_emco_if"
-packetgen_emco_interface_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${packetgen_emco_interface_name}",
- "description": "description of ${packetgen_emco_interface_name}",
- "userData1": "useer data 2 for ${packetgen_emco_interface_name}",
- "userData2": "useer data 2 for ${packetgen_emco_interface_name}"
- },
- "spec": {
- "interface": "eth2",
- "name": "${emcoprovidernetworkname}",
- "defaultGateway": "false",
- "ipAddress": "10.10.20.2"
- }
-}
-EOF
-)"
-
-# define the network interface intents for the firewall workload intent
-firewall_unprotected_interface_name="firewall_unprotected_if"
-firewall_unprotected_interface_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${firewall_unprotected_interface_name}",
- "description": "description of ${firewall_unprotected_interface_name}",
- "userData1": "useer data 2 for ${firewall_unprotected_interface_name}",
- "userData2": "useer data 2 for ${firewall_unprotected_interface_name}"
- },
- "spec": {
- "interface": "eth1",
- "name": "${unprotectedprovidernetworkname}",
- "defaultGateway": "false",
- "ipAddress": "192.168.10.3"
- }
-}
-EOF
-)"
-
-firewall_protected_interface_name="firewall_protected_if"
-firewall_protected_interface_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${firewall_protected_interface_name}",
- "description": "description of ${firewall_protected_interface_name}",
- "userData1": "useer data 2 for ${firewall_protected_interface_name}",
- "userData2": "useer data 2 for ${firewall_protected_interface_name}"
- },
- "spec": {
- "interface": "eth2",
- "name": "${protectednetworkname}",
- "defaultGateway": "false",
- "ipAddress": "192.168.20.2"
- }
-}
-EOF
-)"
-
-firewall_emco_interface_name="firewall_emco_if"
-firewall_emco_interface_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${firewall_emco_interface_name}",
- "description": "description of ${firewall_emco_interface_name}",
- "userData1": "useer data 2 for ${firewall_emco_interface_name}",
- "userData2": "useer data 2 for ${firewall_emco_interface_name}"
- },
- "spec": {
- "interface": "eth3",
- "name": "${emcoprovidernetworkname}",
- "defaultGateway": "false",
- "ipAddress": "10.10.20.3"
- }
-}
-EOF
-)"
-
-# define the network interface intents for the sink workload intent
-sink_protected_interface_name="sink_protected_if"
-sink_protected_interface_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${sink_protected_interface_name}",
- "description": "description of ${sink_protected_interface_name}",
- "userData1": "useer data 2 for ${sink_protected_interface_name}",
- "userData2": "useer data 2 for ${sink_protected_interface_name}"
- },
- "spec": {
- "interface": "eth1",
- "name": "${protectednetworkname}",
- "defaultGateway": "false",
- "ipAddress": "192.168.20.3"
- }
-}
-EOF
-)"
-
-sink_emco_interface_name="sink_emco_if"
-sink_emco_interface_data="$(cat <<EOF
-{
- "metadata": {
- "name": "${sink_emco_interface_name}",
- "description": "description of ${sink_emco_interface_name}",
- "userData1": "useer data 2 for ${sink_emco_interface_name}",
- "userData2": "useer data 2 for ${sink_emco_interface_name}"
- },
- "spec": {
- "interface": "eth2",
- "name": "${emcoprovidernetworkname}",
- "defaultGateway": "false",
- "ipAddress": "10.10.20.4"
- }
-}
-EOF
-)"
-
-# define the intents to be used by the group
-deployment_intents_in_group_name="vfw_deploy_intents"
-deployment_intents_in_group_data="$(cat <<EOF
-{
- "metadata":{
- "name":"${deployment_intents_in_group_name}",
- "description":"descriptionf of ${deployment_intents_in_group_name}",
- "userData1":"user data 1 for ${deployment_intents_in_group_name}",
- "userData2":"user data 2 for ${deployment_intents_in_group_name}"
- },
- "spec":{
- "intent":{
- "genericPlacementIntent":"${generic_placement_intent_name}",
- "ovnaction" : "${vfw_ovnaction_intent_name}"
- }
- }
-}
-EOF
-)"
-
-function createOvnactionData {
- call_api -d "${vfw_ovnaction_intent_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent"
-
- call_api -d "${packetgen_workload_intent_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents"
- call_api -d "${firewall_workload_intent_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents"
- call_api -d "${sink_workload_intent_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents"
-
- call_api -d "${packetgen_emco_interface_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces"
- call_api -d "${packetgen_unprotected_interface_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces"
-
- call_api -d "${firewall_emco_interface_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces"
- call_api -d "${firewall_unprotected_interface_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces"
- call_api -d "${firewall_protected_interface_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces"
-
- call_api -d "${sink_emco_interface_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces"
- call_api -d "${sink_protected_interface_data}" \
- "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces"
-}
-
-function createOrchData {
- print_msg "Creating controller entries"
- call_api -d "${rsynccontrollerdata}" "${base_url_orchestrator}/controllers"
- call_api -d "${ovnactioncontrollerdata}" "${base_url_orchestrator}/controllers"
-
- print_msg "Creating project entry"
- call_api -d "${projectdata}" "${base_url_orchestrator}/projects"
-
- print_msg "Creating vfw composite app entry"
- call_api -d "${vfw_compositeapp_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps"
-
- print_msg "Adding vfw apps to the composite app"
- call_api -F "metadata=${packetgen_app_data}" \
- -F "file=@${packetgen_helm_chart}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps"
- call_api -F "metadata=${firewall_app_data}" \
- -F "file=@${firewall_helm_chart}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps"
- call_api -F "metadata=${sink_app_data}" \
- -F "file=@${sink_helm_chart}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps"
-
- print_msg "Creating vfw composite profile entry"
- call_api -d "${vfw_composite_profile_data}" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles"
-
- print_msg "Adding vfw app profiles to the composite profile"
- call_api -F "metadata=${packetgen_profile_data}" \
- -F "file=@${packetgen_profile_file}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles"
- call_api -F "metadata=${firewall_profile_data}" \
- -F "file=@${firewall_profile_file}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles"
- call_api -F "metadata=${sink_profile_data}" \
- -F "file=@${sink_profile_file}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles"
-
- print_msg "Create the deployment intent group"
- call_api -d "${deployment_intent_group_data}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups"
- call_api -d "${deployment_intents_in_group_data}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents"
-
- createOvnactionData
-
- print_msg "Create the generic placement intent"
- call_api -d "${generic_placement_intent_data}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents"
-
- print_msg "Add the vfw app placement intents to the generic placement intent"
- call_api -d "${packetgen_placement_intent_data}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
- call_api -d "${firewall_placement_intent_data}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
- call_api -d "${sink_placement_intent_data}" \
- "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents"
-}
-
-function createNcmData {
- print_msg "Creating cluster provider ${clusterprovidername}"
- call_api -d "${clusterproviderdata}" "${base_url_clm}/cluster-providers"
-
- for name in $(cluster_names); do
- metadata=$(cluster_metadata "$name")
- file=$(cluster_file "$name")
- print_msg "Creating cluster ${name}"
- call_api -H "Content-Type: multipart/form-data" -F "metadata=$metadata" -F "file=@$file" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters"
- call_api -d "${labeldata}" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${name}/labels"
-
- print_msg "Creating provider network and network intents for ${name}"
- call_api -d "${emcoprovidernetworkdata}" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/provider-networks"
- call_api -d "${unprotectedprovidernetworkdata}" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/provider-networks"
- call_api -d "${protectednetworkdata}" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/networks"
+ cat <<EOF >plugin_fw_v2_config.yaml
+orchestrator:
+ host: ${service_host}
+ port: 30415
+clm:
+ host: ${service_host}
+ port: 30461
+ncm:
+ host: ${service_host}
+ port: 30431
+ovnaction:
+ host: ${service_host}
+ port: 30471
+dcm:
+ host: ${service_host}
+ port: 30477
+gac:
+ host: ${service_host}
+ port: 30491
+dtc:
+ host: ${service_host}
+ port: 30481
+EOF
+ cat <<EOF >plugin_fw_v2_values.yaml
+ClusterProvider: vfw-cluster-provider
+ClusterLabel: LabelA
+Clusters:
+EOF
+ echo $clusters | jq -r '.[] | "- Name: \(.metadata.name)\n KubeConfig: \(.file)"' >>plugin_fw_v2_values.yaml
+ cat <<EOF >>plugin_fw_v2_values.yaml
+EmcoProviderNetwork: emco-private-net
+UnprotectedProviderNetwork: unprotected-private-net
+ProtectedNetwork: protected-private-net
+Project: ${project}
+LogicalCloud: lcadmin
+CompositeApp: ${composite_app}
+Version: ${version}
+PackagesPath: ${CSAR_DIR}/${csar_id}
+CompositeProfile: vfw_composite-profile
+DeploymentIntentGroup: ${deployment_intent_group}
+Release: fw0
+DeploymentIntentsInGroup: vfw_deploy_intents
+GenericPlacementIntent: generic-placement-intent
+OvnActionIntent: vfw_ovnaction_intent
+EOF
+}
+
+function call_emcoctl {
+ rc=$1
+ shift
+ # retry due to known issue with emcoctl and instantiating/terminating multiple resources
+ try=0
+ until [[ $(emcoctl $@ | awk '/Response Code:/ {code=$3} END{print code}') =~ $rc ]]; do
+ if [[ $try -lt 10 ]]; then
+ sleep 1s
+ else
+ return 1
+ fi
+ try=$((try + 1))
done
+ return 0
}
function createData {
- setup
- createNcmData
- createOrchData # this will call createOvnactionData
-}
-
-function getOvnactionData {
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}"
-
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}"
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}"
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}"
-
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces/${packetgen_emco_interface_name}"
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces/${packetgen_unprotected_interface_name}"
-
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_emco_interface_name}"
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_unprotected_interface_name}"
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_protected_interface_name}"
-
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces/${sink_emco_interface_name}"
- call_api_nox "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces/${sink_protected_interface_name}"
-}
-
-function getOrchData {
- call_api_nox "${base_url_orchestrator}/controllers/${rsynccontrollername}"
- call_api_nox "${base_url_orchestrator}/controllers/${ovnactioncontrollername}"
-
- call_api_nox "${base_url_orchestrator}/projects/${projectname}"
- call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}"
-
- call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${packetgen_app_name}"
- call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${firewall_app_name}"
- call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${sink_app_name}"
-
- call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}"
-
- call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${packetgen_profile_name}"
- call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${firewall_profile_name}"
- call_api_nox -H "Accept: application/json" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${sink_profile_name}"
-
- call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}"
-
- call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${packetgen_placement_intent_name}"
- call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${firewall_placement_intent_name}"
- call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${sink_placement_intent_name}"
-
- call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}"
- call_api_nox "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
-}
-
-function getNcmData {
- call_api_nox "${base_url_clm}/cluster-providers/${clusterprovidername}"
- call_api_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters?label=${labelname}"
-
- for name in $(cluster_names); do
- call_api_nox -H "Accept: application/json" "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${name}"
- call_api_nox "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${name}/labels/${labelname}"
- call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/provider-networks/${emcoprovidernetworkname}"
- call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/provider-networks/${unprotectedprovidernetworkname}"
- call_api_nox "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/networks/${protectednetworkname}"
- done
+ call_emcoctl 2.. --config plugin_fw_v2_config.yaml apply -f plugin_fw_v2.yaml -v plugin_fw_v2_values.yaml
}
function getData {
- getNcmData
- getOrchData
- getOvnactionData
-}
-
-function deleteOvnactionData {
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces/${sink_protected_interface_name}"
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}/interfaces/${sink_emco_interface_name}"
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_protected_interface_name}"
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_unprotected_interface_name}"
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}/interfaces/${firewall_emco_interface_name}"
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces/${packetgen_unprotected_interface_name}"
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}/interfaces/${packetgen_emco_interface_name}"
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${sink_workload_intent_name}"
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${firewall_workload_intent_name}"
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}/workload-intents/${packetgen_workload_intent_name}"
- delete_resource "${base_url_ovnaction}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/network-controller-intent/${vfw_ovnaction_intent_name}"
-}
-
-function deleteOrchData {
- delete_resource "${base_url_orchestrator}/controllers/${rsynccontrollername}"
- delete_resource "${base_url_orchestrator}/controllers/${ovnactioncontrollername}"
-
- deleteOvnactionData
-
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${sink_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${firewall_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}/app-intents/${packetgen_placement_intent_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/generic-placement-intents/${generic_placement_intent_name}"
-
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/intents/${deployment_intents_in_group_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}"
-
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${sink_profile_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${firewall_profile_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}/profiles/${packetgen_profile_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/composite-profiles/${vfw_composite_profile_name}"
-
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${sink_app_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${firewall_app_name}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/apps/${packetgen_app_name}"
-
- delete_resource "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}"
- delete_resource "${base_url_orchestrator}/projects/${projectname}"
-}
-
-function deleteNcmData {
- for name in $(cluster_names); do
- delete_resource "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/networks/${protectednetworkname}"
- delete_resource "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/provider-networks/${unprotectedprovidernetworkname}"
- delete_resource "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/provider-networks/${emcoprovidernetworkname}"
- delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${name}/labels/${labelname}"
- delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}/clusters/${name}"
- done
-
- delete_resource "${base_url_clm}/cluster-providers/${clusterprovidername}"
+ emcoctl --config plugin_fw_v2_config.yaml get -f plugin_fw_v2.yaml -v plugin_fw_v2_values.yaml
}
function deleteData {
- deleteNcmData
- deleteOrchData
-}
-
-# apply the network and providernetwork to an appcontext and instantiate with rsync
-function applyNcmData {
- for name in $(cluster_names); do
- call_api -d "{ }" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/apply"
- done
-}
-
-# deletes the network resources from the clusters and the associated appcontext entries
-function terminateNcmData {
- for name in $(cluster_names); do
- call_api -d "{ }" "${base_url_ncm}/cluster-providers/${clusterprovidername}/clusters/${name}/terminate"
- done
-}
-
-# terminates the vfw resources
-function terminateOrchData {
- call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/terminate"
-}
-
-# terminates the vfw and ncm resources
-function terminateVfw {
- terminateOrchData
- terminateNcmData
-}
-
-function instantiateVfw {
- call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/approve"
- call_api -d "{ }" "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/instantiate"
+ call_emcoctl 4.. --config plugin_fw_v2_config.yaml delete -f plugin_fw_v2.yaml -v plugin_fw_v2_values.yaml
}
function statusVfw {
- call_api "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/status"
+ emcoctl --config plugin_fw_v2_config.yaml get projects/${project}/composite-apps/${composite_app}/${version}/deployment-intent-groups/${deployment_intent_group}/status
}
function waitForVfw {
- wait_for_deployment_status "${base_url_orchestrator}/projects/${projectname}/composite-apps/${vfw_compositeapp_name}/${vfw_compositeapp_version}/deployment-intent-groups/${deployment_intent_group_name}/status" $1
+ for try in {0..59}; do
+ sleep 1
+ new_phase="$(emcoctl --config plugin_fw_v2_config.yaml get projects/${project}/composite-apps/${composite_app}/${version}/deployment-intent-groups/${deployment_intent_group}/status | awk '/Response: / {print $2}' | jq -r .status)"
+ echo "$(date +%H:%M:%S) - Filter=[$*] : $new_phase"
+ if [[ "$new_phase" == "$1" ]]; then
+ return 0
+ fi
+ done
}
function usage {
- echo "Usage: $0 create|get|delete|apply|terminate|instantiate"
+ echo "Usage: $0 setup|create|get|destroy|status"
+ echo " setup - creates the emcoctl files and packages needed for vfw"
echo " create - creates all ncm, ovnaction, clm resources needed for vfw"
echo " get - queries all resources in ncm, ovnaction, clm resources created for vfw"
- echo " delete - deletes all resources in ncm, ovnaction, clm resources created for vfw"
- echo " apply - applys the network intents - e.g. networks created in ncm"
- echo " instantiate - approves and instantiates the composite app via the generic deployment intent"
+ echo " destroy - deletes all resources in ncm, ovnaction, clm resources created for vfw"
echo " status - get status of deployed resources"
- echo " terminate - remove the vFW composite app resources and network resources create by 'instantiate' and 'apply'"
echo ""
echo " a reasonable test sequence:"
- echo " 1. create"
- echo " 2. apply"
- echo " 3. instantiate"
- echo " 4. status"
- echo " 5. terminate"
- echo " 6. destroy"
+ echo " 1. setup"
+ echo " 2. create"
+ echo " 3. destroy"
exit
}
if [[ "$#" -gt 0 ]] ; then
case "$1" in
+ "setup" ) setup ;;
"create" ) createData ;;
"get" ) getData ;;
- "apply" ) applyNcmData ;;
- "instantiate" ) instantiateVfw ;;
"status" ) statusVfw ;;
"wait" ) waitForVfw "Instantiated" ;;
- "terminate" ) terminateVfw ;;
"delete" ) deleteData ;;
*) usage ;;
esac
else
+ setup
createData
- applyNcmData
- instantiateVfw
print_msg "[BEGIN] Basic checks for instantiated resource"
print_msg "Wait for deployment to be instantiated"
@@ -1075,16 +221,14 @@ else
for name in $(cluster_names); do
print_msg "Check that networks were created on cluster $name"
file=$(cluster_file "$name")
- KUBECONFIG=$file kubectl get network protected-private-net
- KUBECONFIG=$file kubectl get providernetwork emco-private-net
- KUBECONFIG=$file kubectl get providernetwork unprotected-private-net
+ KUBECONFIG=$file kubectl get network protected-private-net -o name
+ KUBECONFIG=$file kubectl get providernetwork emco-private-net -o name
+ KUBECONFIG=$file kubectl get providernetwork unprotected-private-net -o name
done
for name in $(cluster_names); do
print_msg "Wait for all pods to start on cluster $name"
file=$(cluster_file "$name")
- KUBECONFIG=$file wait_for_pod -l app=sink
- KUBECONFIG=$file wait_for_pod -l app=firewall
- KUBECONFIG=$file wait_for_pod -l app=packetgen
+ KUBECONFIG=$file kubectl wait pod -l release=fw0 --for=condition=Ready
done
# TODO: Provide some health check to verify vFW work
print_msg "Not waiting for vFW to fully install as no further checks are implemented in testcase"
@@ -1092,7 +236,6 @@ else
#sleep 8m
print_msg "[END] Basic checks for instantiated resource"
- terminateVfw
- waitForVfw "Terminated"
+ print_msg "Delete deployment"
deleteData
fi
diff --git a/kud/tests/plugin_fw_v2.yaml b/kud/tests/plugin_fw_v2.yaml
new file mode 100644
index 00000000..be436106
--- /dev/null
+++ b/kud/tests/plugin_fw_v2.yaml
@@ -0,0 +1,411 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2020 Intel Corporation
+
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers
+metadata:
+ name: {{.ClusterProvider}}
+
+{{- range $index, $cluster := .Clusters }}
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{$.ClusterProvider}}/clusters
+metadata:
+ name: {{$cluster.Name}}
+file:
+ {{$cluster.KubeConfig}}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{$.ClusterProvider}}/clusters/{{$cluster.Name}}/labels
+label-name: {{$.ClusterLabel}}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{$.ClusterProvider}}/clusters/{{$cluster.Name}}/provider-networks
+metadata:
+ name: {{$.EmcoProviderNetwork}}
+spec:
+ cniType: ovn4nfv
+ ipv4Subnets:
+ - subnet: 10.10.20.0/24
+ name: subnet1
+ gateway: 10.10.20.1/24
+ providerNetType: VLAN
+ vlan:
+ vlanId: "102"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.102
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
+
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{$.ClusterProvider}}/clusters/{{$cluster.Name}}/provider-networks
+metadata:
+ name: {{$.UnprotectedProviderNetwork}}
+spec:
+ cniType: ovn4nfv
+ ipv4Subnets:
+ - subnet: 192.168.10.0/24
+ name: subnet1
+ gateway: 192.168.10.1/24
+ providerNetType: VLAN
+ vlan:
+ vlanId: "100"
+ providerInterfaceName: eth1
+ logicalInterfaceName: eth1.100
+ vlanNodeSelector: specific
+ nodeLabelList:
+ - kubernetes.io/hostname=localhost
+
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{$.ClusterProvider}}/clusters/{{$cluster.Name}}/networks
+metadata:
+ name: {{$.ProtectedNetwork}}
+spec:
+ cniType: ovn4nfv
+ ipv4Subnets:
+ - subnet: 192.168.20.0/24
+ name: subnet1
+ gateway: 192.168.20.100/32
+{{- end }}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects
+metadata:
+ name: {{.Project}}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/logical-clouds
+metadata:
+ name: {{.LogicalCloud}}
+spec:
+ level: "0"
+
+{{- range $index, $cluster := .Clusters }}
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{$.Project}}/logical-clouds/{{$.LogicalCloud}}/cluster-references
+metadata:
+ name: {{$cluster.Name}}
+spec:
+ cluster-provider: {{$.ClusterProvider}}
+ cluster-name: {{$cluster.Name}}
+ loadbalancer-ip: "0.0.0.0"
+{{- end }}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps
+metadata:
+ name: {{.CompositeApp}}
+spec:
+ version: {{.Version}}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/apps
+metadata:
+ name: packetgen
+file:
+ {{.PackagesPath}}/packetgen.tar.gz
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/apps
+metadata:
+ name: firewall
+file:
+ {{.PackagesPath}}/firewall.tar.gz
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/apps
+metadata:
+ name: sink
+file:
+ {{.PackagesPath}}/sink.tar.gz
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/composite-profiles
+metadata:
+ name: {{.CompositeProfile}}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/composite-profiles/{{.CompositeProfile}}/profiles
+metadata :
+ name: packetgen-profile
+spec:
+ app-name: packetgen
+file:
+ {{.PackagesPath}}/profile.tar.gz
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/composite-profiles/{{.CompositeProfile}}/profiles
+metadata :
+ name: firewall-profile
+spec:
+ app-name: firewall
+file:
+ {{.PackagesPath}}/profile.tar.gz
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/composite-profiles/{{.CompositeProfile}}/profiles
+metadata :
+ name: sink-profile
+spec:
+ app-name: sink
+file:
+ {{.PackagesPath}}/profile.tar.gz
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups
+metadata:
+ name: {{.DeploymentIntentGroup}}
+spec:
+ profile: {{.CompositeProfile}}
+ version: {{.Release}}
+ logical-cloud: {{.LogicalCloud}}
+ override-values:
+ - app-name: packetgen
+ values:
+ ".Values.service.ports.nodePort": '30888'
+ - app-name: firewall
+ values:
+ ".Values.global.dcaeCollectorIp": 1.2.3.4
+ ".Values.global.dcaeCollectorPort": '8888'
+ - app-name: sink
+ values:
+ ".Values.service.ports.nodePort": '30677'
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/intents
+metadata:
+ name: {{.DeploymentIntentsInGroup}}
+spec:
+ intent:
+ genericPlacementIntent: {{.GenericPlacementIntent}}
+ ovnaction: {{.OvnActionIntent}}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent
+metadata:
+ name: {{.OvnActionIntent}}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent/{{.OvnActionIntent}}/workload-intents
+metadata:
+ name: packetgen_workload_intent
+spec:
+ application-name: packetgen
+ workload-resource: {{.Release}}-packetgen
+ type: Deployment
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent/{{.OvnActionIntent}}/workload-intents
+metadata:
+ name: firewall_workload_intent
+spec:
+ application-name: firewall
+ workload-resource: {{.Release}}-firewall
+ type: Deployment
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent/{{.OvnActionIntent}}/workload-intents
+metadata:
+ name: sink_workload_intent
+spec:
+ application-name: sink
+ workload-resource: {{.Release}}-sink
+ type: Deployment
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent/{{.OvnActionIntent}}/workload-intents/packetgen_workload_intent/interfaces
+metadata:
+ name: packetgen_unprotected_if
+spec:
+ interface: eth1
+ name: {{.UnprotectedProviderNetwork}}
+ defaultGateway: "false"
+ ipAddress: 192.168.10.2
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent/{{.OvnActionIntent}}/workload-intents/packetgen_workload_intent/interfaces
+metadata:
+ name: packetgen_emco_if
+spec:
+ interface: eth2
+ name: {{.EmcoProviderNetwork}}
+ defaultGateway: "false"
+ ipAddress: 10.10.20.2
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent/{{.OvnActionIntent}}/workload-intents/firewall_workload_intent/interfaces
+metadata:
+ name: firewall_unprotected_if
+spec:
+ interface: eth1
+ name: {{.UnprotectedProviderNetwork}}
+ defaultGateway: "false"
+ ipAddress: 192.168.10.3
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent/{{.OvnActionIntent}}/workload-intents/firewall_workload_intent/interfaces
+metadata:
+ name: firewall_protected_if
+spec:
+ interface: eth2
+ name: {{.ProtectedNetwork}}
+ defaultGateway: "false"
+ ipAddress: 192.168.20.2
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent/{{.OvnActionIntent}}/workload-intents/firewall_workload_intent/interfaces
+metadata:
+ name: firewall_emco_if
+spec:
+ interface: eth3
+ name: {{.EmcoProviderNetwork}}
+ defaultGateway: "false"
+ ipAddress: 10.10.20.3
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent/{{.OvnActionIntent}}/workload-intents/sink_workload_intent/interfaces
+metadata:
+ name: sink_protected_if
+spec:
+ interface: eth1
+ name: {{.ProtectedNetwork}}
+ defaultGateway: "false"
+ ipAddress: 192.168.20.3
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/network-controller-intent/{{.OvnActionIntent}}/workload-intents/sink_workload_intent/interfaces
+metadata:
+ name: sink_emco_if
+spec:
+ interface: eth2
+ name: {{.EmcoProviderNetwork}}
+ defaultGateway: "false"
+ ipAddress: 10.10.20.4
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/generic-placement-intents
+metadata:
+ name: {{.GenericPlacementIntent}}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/generic-placement-intents/{{.GenericPlacementIntent}}/app-intents
+metadata:
+ name: packetgen-placement-intent
+spec:
+ app-name: packetgen
+ intent:
+ allOf:
+ - provider-name: {{.ClusterProvider}}
+ cluster-label-name: {{.ClusterLabel}}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/generic-placement-intents/{{.GenericPlacementIntent}}/app-intents
+metadata:
+ name: firewall-placement-intent
+spec:
+ app-name: firewall
+ intent:
+ allOf:
+ - provider-name: {{.ClusterProvider}}
+ cluster-label-name: {{.ClusterLabel}}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/generic-placement-intents/{{.GenericPlacementIntent}}/app-intents
+metadata:
+ name: sink-placement-intent
+spec:
+ app-name: sink
+ intent:
+ allOf:
+ - provider-name: {{.ClusterProvider}}
+ cluster-label-name: {{.ClusterLabel}}
+
+{{- range $index, $cluster := .Clusters }}
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/{{$.ClusterProvider}}/clusters/{{$cluster.Name}}/apply
+{{- end }}
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/logical-clouds/{{.LogicalCloud}}/instantiate
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/approve
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/{{.Project}}/composite-apps/{{.CompositeApp}}/{{.Version}}/deployment-intent-groups/{{.DeploymentIntentGroup}}/instantiate
diff --git a/kud/tests/qat.sh b/kud/tests/qat.sh
index 11fb6ca0..98c0bb72 100755
--- a/kud/tests/qat.sh
+++ b/kud/tests/qat.sh
@@ -10,7 +10,7 @@
set -o pipefail
-qat_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."qat.intel.com/cy2_dc2"|tonumber)>=1) | .metadata.name')
+qat_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."qat.intel.com/cy2_dc2"!=null) and ((.status.capacity."qat.intel.com/cy2_dc2"|tonumber)>=1)) | .metadata.name')
if [ -z "$qat_capable_nodes" ]; then
echo "This test case cannot run. QAT device unavailable."
QAT_ENABLED=False
diff --git a/kud/tests/sriov-network.sh b/kud/tests/sriov-network.sh
index 3191c2f3..db1613be 100755
--- a/kud/tests/sriov-network.sh
+++ b/kud/tests/sriov-network.sh
@@ -10,7 +10,7 @@
set -o pipefail
-sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_nic"|tonumber)>=2) | .metadata.name')
+sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_nic"!=null) and ((.status.capacity."intel.com/intel_sriov_nic"|tonumber)>=2)) | .metadata.name')
if [ -z "$sriov_capable_nodes" ]; then
echo "SRIOV test case cannot run on the cluster."
exit 0
diff --git a/kud/tests/sriov.sh b/kud/tests/sriov.sh
index 7aa97f0c..a44aba04 100755
--- a/kud/tests/sriov.sh
+++ b/kud/tests/sriov.sh
@@ -10,7 +10,7 @@
set -o pipefail
-sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_700"|tonumber)>=2) | .metadata.name')
+sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_700"!=null) and ((.status.capacity."intel.com/intel_sriov_700"|tonumber)>=2)) | .metadata.name')
if [ -z "$sriov_capable_nodes" ]; then
echo "SRIOV test case cannot run on the cluster."
exit 0
diff --git a/kud/tests/topology-manager.sh b/kud/tests/topology-manager.sh
index b1126aac..772dcfed 100755
--- a/kud/tests/topology-manager.sh
+++ b/kud/tests/topology-manager.sh
@@ -15,8 +15,8 @@ set -o pipefail
source _common.sh
source _functions.sh
-adaptors="X710 XL710 X722"
-if [[ $(lspci | grep -c "Ethernet .* \(${adaptors// /\\|}\)") == "0" ]]; then
+sriov_capable_nodes=$(kubectl get nodes -o json | jq -r '.items[] | select((.status.capacity."intel.com/intel_sriov_700"!=null) and ((.status.capacity."intel.com/intel_sriov_700"|tonumber)>=2)) | .metadata.name')
+if [ -z "$sriov_capable_nodes" ]; then
echo "Ethernet adaptor version is not set. Topology manager test case cannot run on this machine"
exit 0
else