summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO.yaml33
-rw-r--r--deployments/helm/servicemesh/istio-operator/.helmignore22
-rw-r--r--deployments/helm/servicemesh/istio-operator/Chart.yaml20
-rw-r--r--deployments/helm/servicemesh/istio-operator/README.md55
-rw-r--r--deployments/helm/servicemesh/istio-operator/templates/_helpers.tpl32
-rw-r--r--deployments/helm/servicemesh/istio-operator/templates/authproxy-rbac.yaml54
-rw-r--r--deployments/helm/servicemesh/istio-operator/templates/authproxy-service.yaml30
-rw-r--r--deployments/helm/servicemesh/istio-operator/templates/operator-istio-1.2-crd.yaml676
-rw-r--r--deployments/helm/servicemesh/istio-operator/templates/operator-rbac.yaml315
-rw-r--r--deployments/helm/servicemesh/istio-operator/templates/operator-remoteistio-1.2-crd.yaml268
-rw-r--r--deployments/helm/servicemesh/istio-operator/templates/operator-service.yaml33
-rw-r--r--deployments/helm/servicemesh/istio-operator/templates/operator-statefulset.yaml87
-rw-r--r--deployments/helm/servicemesh/istio-operator/values.yaml40
-rw-r--r--kud/deployment_infra/galaxy-requirements.yml2
-rw-r--r--kud/deployment_infra/playbooks/configure-kud.yml5
-rw-r--r--kud/deployment_infra/playbooks/configure-ovn4nfv.yml8
-rw-r--r--kud/deployment_infra/playbooks/kud-vars.yml4
-rwxr-xr-xkud/hosting_providers/vagrant/installer.sh2
-rwxr-xr-xkud/tests/_common.sh32
-rwxr-xr-xkud/tests/_functions.sh20
-rwxr-xr-xkud/tests/plugin.sh75
-rwxr-xr-xkud/tests/plugin_edgex.sh1
-rwxr-xr-xkud/tests/plugin_fw.sh116
-rw-r--r--src/k8splugin/internal/app/client.go19
-rw-r--r--src/k8splugin/internal/app/client_test.go2
-rw-r--r--src/k8splugin/internal/app/config_backend.go4
-rw-r--r--src/k8splugin/internal/app/instance.go8
-rw-r--r--src/k8splugin/internal/config/config.go54
-rw-r--r--src/k8splugin/internal/plugin/helpers.go58
-rw-r--r--src/k8splugin/internal/utils.go4
-rw-r--r--src/k8splugin/plugins/generic/plugin.go26
-rw-r--r--src/k8splugin/plugins/namespace/plugin.go3
-rw-r--r--src/k8splugin/plugins/namespace/plugin_test.go4
-rw-r--r--src/k8splugin/plugins/network/plugin.go22
-rw-r--r--src/k8splugin/plugins/network/plugin_test.go13
-rw-r--r--src/k8splugin/plugins/service/plugin.go12
-rw-r--r--src/k8splugin/plugins/service/plugin_test.go7
37 files changed, 2045 insertions, 121 deletions
diff --git a/INFO.yaml b/INFO.yaml
index d808cb30..eb39f34d 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -52,11 +52,6 @@ committers:
company: 'ATT'
id: 'bh526r'
timezone: 'America/Los_Angeles'
- - name: 'Victor Morales'
- email: 'victor.morales@intel.com'
- company: 'Intel'
- id: 'electrocucaracha'
- timezone: 'America/Los_Angeles'
- name: 'Ethan Lynn'
email: 'ethanlynnl@vmware.com'
company: 'VMWare'
@@ -77,6 +72,21 @@ committers:
company: 'Wind River'
id: 'Xiaohua626'
timezone: 'Asia/Shanghai'
+ - name: 'Kiran Kamineni'
+ email: 'kiran.k.kamineni@intel.com'
+ company: 'Intel'
+ id: 'kirankamineni'
+ timezone: 'America/Los_Angeles'
+ - name: 'Ritu Sood'
+ email: 'ritu.sood@intel.com'
+ company: 'Intel'
+ id: 'ritusood'
+ timezone: 'America/Los_Angeles'
+ - name: 'Liexiang Yue'
+ email: 'yueliexiang@chinamobile.com'
+ company: 'China Mobile'
+ id: 'YueLiexiang'
+ timezone: 'Asia/Shanghai'
tsc:
approval: 'https://lists.onap.org/pipermail/onap-tsc'
changes:
@@ -87,7 +97,8 @@ tsc:
- 'Ethan Lynn'
- 'Huang Haibin'
- 'Sudhakar Reddy'
- link: 'http://ircbot.wl.linuxfoundation.org/meetings/onap-meeting/2018/onap-meeting.2018-08-16-13.45.html'
+ link: "http://ircbot.wl.linuxfoundation.org/meetings/\
+ onap-meeting/2018/onap-meeting.2018-08-16-13.45.html"
- type: 'Remove'
name:
- 'yun huang'
@@ -97,3 +108,13 @@ tsc:
- type: 'Addition'
name: 'Xiaohua Zhang'
link: 'https://lists.onap.org/g/onap-tsc/message/4772'
+ - type: 'Remove'
+ name:
+ - 'Victor Morales'
+ link: 'https://lists.onap.org/g/onap-discuss/message/18280'
+ - type: 'Addition'
+ name:
+ - 'Kiran Kamineni'
+ - 'Liexiang Yue'
+ - 'Ritu Sood'
+ link: 'https://lists.onap.org/g/onap-discuss/message/18280'
diff --git a/deployments/helm/servicemesh/istio-operator/.helmignore b/deployments/helm/servicemesh/istio-operator/.helmignore
new file mode 100644
index 00000000..50af0317
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/.helmignore
@@ -0,0 +1,22 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/deployments/helm/servicemesh/istio-operator/Chart.yaml b/deployments/helm/servicemesh/istio-operator/Chart.yaml
new file mode 100644
index 00000000..1da83af4
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/Chart.yaml
@@ -0,0 +1,20 @@
+
+
+#/*Copyright 2019 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+name: istio-operator
+version: 0.0.15
+description: istio-operator manages Istio deployments on Kubernetes
+appVersion: 0.2.1
diff --git a/deployments/helm/servicemesh/istio-operator/README.md b/deployments/helm/servicemesh/istio-operator/README.md
new file mode 100644
index 00000000..4611a81e
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/README.md
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2019 Intel Corporation, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+## Prerequisites
+
+- Kubernetes 1.10.0+
+
+## Installing the chart
+
+To install the chart from local directory:
+
+```
+helm install --name=istio-operator --namespace=istio-system istio-operator
+```
+
+## Uninstalling the Chart
+
+To uninstall/delete the `istio-operator` release:
+
+```
+$ helm del --purge istio-operator
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+The following table lists the configurable parameters of the Banzaicloud Istio Operator chart and their default values.
+
+Parameter | Description | Default
+--------- | ----------- | -------
+`operator.image.repository` | Operator container image repository | `banzaicloud/istio-operator`
+`operator.image.tag` | Operator container image tag | `0.2.1`
+`operator.image.pullPolicy` | Operator container image pull policy | `IfNotPresent`
+`operator.resources` | CPU/Memory resource requests/limits (YAML) | Memory: `128Mi/256Mi`, CPU: `100m/200m`
+`istioVersion` | Supported Istio version | `1.2`
+`prometheusMetrics.enabled` | If true, use direct access for Prometheus metrics | `false`
+`prometheusMetrics.authProxy.enabled` | If true, use auth proxy for Prometheus metrics | `true`
+`prometheusMetrics.authProxy.image.repository` | Auth proxy container image repository | `gcr.io/kubebuilder/kube-rbac-proxy`
+`prometheusMetrics.authProxy.image.tag` | Auth proxy container image tag | `v0.4.0`
+`prometheusMetrics.authProxy.image.pullPolicy` | Auth proxy container image pull policy | `IfNotPresent`
+`rbac.enabled` | Create rbac service account and roles | `true`
diff --git a/deployments/helm/servicemesh/istio-operator/templates/_helpers.tpl b/deployments/helm/servicemesh/istio-operator/templates/_helpers.tpl
new file mode 100644
index 00000000..065bc1e3
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/templates/_helpers.tpl
@@ -0,0 +1,32 @@
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "istio-operator.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "istio-operator.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "istio-operator.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
diff --git a/deployments/helm/servicemesh/istio-operator/templates/authproxy-rbac.yaml b/deployments/helm/servicemesh/istio-operator/templates/authproxy-rbac.yaml
new file mode 100644
index 00000000..8a047e03
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/templates/authproxy-rbac.yaml
@@ -0,0 +1,54 @@
+{{- if and .Values.rbac.enabled .Values.prometheusMetrics.enabled .Values.prometheusMetrics.authProxy.enabled }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "istio-operator.fullname" . }}-authproxy
+ labels:
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: authproxy
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: "{{ include "istio-operator.fullname" . }}-authproxy"
+ labels:
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: authproxy
+rules:
+- apiGroups: ["authentication.k8s.io"]
+ resources:
+ - tokenreviews
+ verbs: ["create"]
+- apiGroups: ["authorization.k8s.io"]
+ resources:
+ - subjectaccessreviews
+ verbs: ["create"]
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: "{{ include "istio-operator.fullname" . }}-authproxy"
+ labels:
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: authproxy
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: "{{ include "istio-operator.fullname" . }}-authproxy"
+subjects:
+- kind: ServiceAccount
+ name: {{ include "istio-operator.fullname" . }}-authproxy
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/deployments/helm/servicemesh/istio-operator/templates/authproxy-service.yaml b/deployments/helm/servicemesh/istio-operator/templates/authproxy-service.yaml
new file mode 100644
index 00000000..aad8a2be
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/templates/authproxy-service.yaml
@@ -0,0 +1,30 @@
+{{- if and .Values.prometheusMetrics.enabled .Values.prometheusMetrics.authProxy.enabled }}
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "istio-operator.fullname" . }}-authproxy
+ annotations:
+ prometheus.io/port: "8443"
+ prometheus.io/scheme: https
+ prometheus.io/scrape: "true"
+ labels:
+ control-plane: controller-manager
+ controller-tools.k8s.io: "1.0"
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: authproxy
+spec:
+ ports:
+ - name: https
+ port: 8443
+ targetPort: https
+ selector:
+ control-plane: controller-manager
+ controller-tools.k8s.io: "1.0"
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: operator
+{{- end }}
diff --git a/deployments/helm/servicemesh/istio-operator/templates/operator-istio-1.2-crd.yaml b/deployments/helm/servicemesh/istio-operator/templates/operator-istio-1.2-crd.yaml
new file mode 100644
index 00000000..b52ffc39
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/templates/operator-istio-1.2-crd.yaml
@@ -0,0 +1,676 @@
+{{ if eq .Values.istioVersion 1.2 }}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: istios.istio.banzaicloud.io
+ labels:
+ controller-tools.k8s.io: "1.0"
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: operator
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.Status
+ description: Status of the resource
+ name: Status
+ type: string
+ - JSONPath: .status.ErrorMessage
+ description: Error message
+ name: Error
+ type: string
+ - JSONPath: .status.GatewayAddress
+ description: Ingress gateways of the resource
+ name: Gateways
+ type: string
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ group: istio.banzaicloud.io
+ names:
+ kind: Istio
+ plural: istios
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ autoInjectionNamespaces:
+ description: List of namespaces to label with sidecar auto injection
+ enabled
+ items:
+ type: string
+ type: array
+ citadel:
+ description: Citadel configuration options
+ properties:
+ affinity:
+ type: object
+ caSecretName:
+ type: string
+ enabled:
+ type: boolean
+ healthCheck:
+ description: Enable health checking on the Citadel CSR signing API.
+ https://istio.io/docs/tasks/security/health-check/
+ type: boolean
+ image:
+ type: string
+ maxWorkloadCertTTL:
+ description: Citadel uses a flag max-workload-cert-ttl to control
+ the maximum lifetime for Istio certificates issued to workloads.
+ The default value is 90 days. If workload-cert-ttl on Citadel
+ or node agent is greater than max-workload-cert-ttl, Citadel will
+ fail issuing the certificate.
+ type: string
+ nodeSelector:
+ type: object
+ resources:
+ type: object
+ tolerations:
+ items:
+ type: object
+ type: array
+ workloadCertTTL:
+ description: For the workloads running in Kubernetes, the lifetime
+ of their Istio certificates is controlled by the workload-cert-ttl
+ flag on Citadel. The default value is 90 days. This value should
+ be no greater than max-workload-cert-ttl of Citadel.
+ type: string
+ type: object
+ controlPlaneSecurityEnabled:
+ description: ControlPlaneSecurityEnabled control plane services are
+ communicating through mTLS
+ type: boolean
+ defaultConfigVisibility:
+ description: Set the default set of namespaces to which services, service
+ entries, virtual services, destination rules should be exported to
+ type: string
+ defaultPodDisruptionBudget:
+ description: Enable pod disruption budget for the control plane, which
+ is used to ensure Istio control plane components are gradually upgraded
+ or recovered
+ properties:
+ enabled:
+ type: boolean
+ type: object
+ defaultResources:
+ description: DefaultResources are applied for all Istio components by
+ default, can be overridden for each component
+ type: object
+ excludeIPRanges:
+ description: ExcludeIPRanges the range where not to capture egress traffic
+ type: string
+ galley:
+ description: Galley configuration options
+ properties:
+ affinity:
+ type: object
+ enabled:
+ type: boolean
+ image:
+ type: string
+ nodeSelector:
+ type: object
+ replicaCount:
+ format: int32
+ type: integer
+ resources:
+ type: object
+ tolerations:
+ items:
+ type: object
+ type: array
+ type: object
+ gateways:
+ description: Gateways configuration options
+ properties:
+ egress:
+ properties:
+ affinity:
+ type: object
+ applicationPorts:
+ type: string
+ enabled:
+ type: boolean
+ loadBalancerIP:
+ type: string
+ maxReplicas:
+ format: int32
+ type: integer
+ minReplicas:
+ format: int32
+ type: integer
+ nodeSelector:
+ type: object
+ ports:
+ items:
+ type: object
+ type: array
+ replicaCount:
+ format: int32
+ type: integer
+ requestedNetworkView:
+ type: string
+ resources:
+ type: object
+ sds:
+ properties:
+ enabled:
+ type: boolean
+ image:
+ type: string
+ resources:
+ type: object
+ type: object
+ serviceAnnotations:
+ type: object
+ serviceLabels:
+ type: object
+ serviceType:
+ enum:
+ - ClusterIP
+ - NodePort
+ - LoadBalancer
+ type: string
+ tolerations:
+ items:
+ type: object
+ type: array
+ type: object
+ enabled:
+ type: boolean
+ ingress:
+ properties:
+ affinity:
+ type: object
+ applicationPorts:
+ type: string
+ enabled:
+ type: boolean
+ loadBalancerIP:
+ type: string
+ maxReplicas:
+ format: int32
+ type: integer
+ minReplicas:
+ format: int32
+ type: integer
+ nodeSelector:
+ type: object
+ ports:
+ items:
+ type: object
+ type: array
+ replicaCount:
+ format: int32
+ type: integer
+ requestedNetworkView:
+ type: string
+ resources:
+ type: object
+ sds:
+ properties:
+ enabled:
+ type: boolean
+ image:
+ type: string
+ resources:
+ type: object
+ type: object
+ serviceAnnotations:
+ type: object
+ serviceLabels:
+ type: object
+ serviceType:
+ enum:
+ - ClusterIP
+ - NodePort
+ - LoadBalancer
+ type: string
+ tolerations:
+ items:
+ type: object
+ type: array
+ type: object
+ type: object
+ imagePullPolicy:
+ description: ImagePullPolicy describes a policy for if/when to pull
+ a container image
+ enum:
+ - Always
+ - Never
+ - IfNotPresent
+ type: string
+ includeIPRanges:
+ description: IncludeIPRanges the range where to capture egress traffic
+ type: string
+ istioCoreDNS:
+ description: Istio CoreDNS provides DNS resolution for services in multi
+ mesh setups
+ properties:
+ affinity:
+ type: object
+ enabled:
+ type: boolean
+ image:
+ type: string
+ nodeSelector:
+ type: object
+ pluginImage:
+ type: string
+ replicaCount:
+ format: int32
+ type: integer
+ resources:
+ type: object
+ tolerations:
+ items:
+ type: object
+ type: array
+ type: object
+ localityLB:
+ description: Locality based load balancing distribution or failover
+ settings.
+ properties:
+ distribute:
+ description: 'Optional: only one of distribute or failover can be
+ set. Explicitly specify loadbalancing weight across different
+ zones and geographical locations. Refer to [Locality weighted
+ load balancing](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/load_balancing/locality_weight)
+ If empty, the locality weight is set according to the endpoints
+ number within it.'
+ items:
+ properties:
+ from:
+ description: Originating locality, '/' separated, e.g. 'region/zone'.
+ type: string
+ to:
+ description: Map of upstream localities to traffic distribution
+ weights. The sum of all weights should be == 100. Any locality
+ not assigned a weight will receive no traffic.
+ type: object
+ type: object
+ type: array
+ enabled:
+ description: If set to true, locality based load balancing will
+ be enabled
+ type: boolean
+ failover:
+ description: 'Optional: only failover or distribute can be set.
+ Explicitly specify the region traffic will land on when endpoints
+ in local region becomes unhealthy. Should be used together with
+ OutlierDetection to detect unhealthy endpoints. Note: if no OutlierDetection
+ specified, this will not take effect.'
+ items:
+ properties:
+ from:
+ description: Originating region.
+ type: string
+ to:
+ description: Destination region the traffic will fail over
+ to when endpoints in the 'from' region becomes unhealthy.
+ type: string
+ type: object
+ type: array
+ type: object
+ meshExpansion:
+ description: If set to true, the pilot and citadel mtls will be exposed
+ on the ingress gateway also the remote istios will be connected through
+ gateways
+ type: boolean
+ mixer:
+ description: Mixer configuration options
+ properties:
+ affinity:
+ type: object
+ enabled:
+ type: boolean
+ image:
+ type: string
+ maxReplicas:
+ format: int32
+ type: integer
+ minReplicas:
+ format: int32
+ type: integer
+ multiClusterSupport:
+ description: Turn it on if you use mixer that supports multi cluster
+ telemetry
+ type: boolean
+ nodeSelector:
+ type: object
+ replicaCount:
+ format: int32
+ type: integer
+ resources:
+ type: object
+ tolerations:
+ items:
+ type: object
+ type: array
+ type: object
+ mtls:
+ description: MTLS enables or disables global mTLS
+ type: boolean
+ multiMesh:
+ description: Set to true to connect two or more meshes via their respective
+ ingressgateway services when workloads in each cluster cannot directly
+ talk to one another. All meshes should be using Istio mTLS and must
+ have a shared root CA for this model to work.
+ type: boolean
+ nodeAgent:
+ description: NodeAgent configuration options
+ properties:
+ affinity:
+ type: object
+ enabled:
+ type: boolean
+ image:
+ type: string
+ nodeSelector:
+ type: object
+ resources:
+ type: object
+ tolerations:
+ items:
+ type: object
+ type: array
+ type: object
+ outboundTrafficPolicy:
+ description: Set the default behavior of the sidecar for handling outbound
+ traffic from the application (ALLOW_ANY or REGISTRY_ONLY)
+ properties:
+ mode:
+ enum:
+ - ALLOW_ANY
+ - REGISTRY_ONLY
+ type: string
+ type: object
+ pilot:
+ description: Pilot configuration options
+ properties:
+ affinity:
+ type: object
+ enabled:
+ type: boolean
+ image:
+ type: string
+ maxReplicas:
+ format: int32
+ type: integer
+ minReplicas:
+ format: int32
+ type: integer
+ nodeSelector:
+ type: object
+ replicaCount:
+ format: int32
+ type: integer
+ resources:
+ type: object
+ sidecar:
+ type: boolean
+ tolerations:
+ items:
+ type: object
+ type: array
+ traceSampling:
+ format: float
+ type: number
+ type: object
+ proxy:
+ description: Proxy configuration options
+ properties:
+ componentLogLevel:
+ description: Per Component log level for proxy, applies to gateways
+ and sidecars. If a component level is not set, then the "LogLevel"
+ will be used. If left empty, "misc:error" is used.
+ type: string
+ dnsRefreshRate:
+ description: Configure the DNS refresh rate for Envoy cluster of
+ type STRICT_DNS This must be given it terms of seconds. For example,
+ 300s is valid but 5m is invalid.
+ pattern: ^[0-9]{1,5}s$
+ type: string
+ enableCoreDump:
+ description: If set, newly injected sidecars will have core dumps
+ enabled.
+ type: boolean
+ image:
+ type: string
+ logLevel:
+ description: 'Log level for proxy, applies to gateways and sidecars.
+ If left empty, "warning" is used. Expected values are: trace|debug|info|warning|error|critical|off'
+ enum:
+ - trace
+ - debug
+ - info
+ - warning
+ - error
+ - critical
+ - "off"
+ type: string
+ privileged:
+ description: If set to true, istio-proxy container will have privileged
+ securityContext
+ type: boolean
+ resources:
+ type: object
+ type: object
+ proxyInit:
+ description: Proxy Init configuration options
+ properties:
+ image:
+ type: string
+ type: object
+ sds:
+ description: If SDS is configured, mTLS certificates for the sidecars
+ will be distributed through the SecretDiscoveryService instead of
+ using K8S secrets to mount the certificates
+ properties:
+ customTokenDirectory:
+ type: string
+ enabled:
+ description: If set to true, mTLS certificates for the sidecars
+ will be distributed through the SecretDiscoveryService instead
+ of using K8S secrets to mount the certificates.
+ type: boolean
+ udsPath:
+ description: Unix Domain Socket through which envoy communicates
+ with NodeAgent SDS to get key/cert for mTLS. Use secret-mount
+ files instead of SDS if set to empty.
+ type: string
+ useNormalJwt:
+ description: If set to true, envoy will fetch normal k8s service
+ account JWT from '/var/run/secrets/kubernetes.io/serviceaccount/token'
+ (https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod)
+ and pass to sds server, which will be used to request key/cert
+ eventually this flag is ignored if UseTrustworthyJwt is set
+ type: boolean
+ useTrustworthyJwt:
+ description: 'If set to true, Istio will inject volumes mount for
+ k8s service account JWT, so that K8s API server mounts k8s service
+ account JWT to envoy container, which will be used to generate
+ key/cert eventually. (prerequisite: https://kubernetes.io/docs/concepts/storage/volumes/#projected)'
+ type: boolean
+ type: object
+ sidecarInjector:
+ description: SidecarInjector configuration options
+ properties:
+ affinity:
+ type: object
+ alwaysInjectSelector:
+ description: 'AlwaysInjectSelector: Forces the injection on pods
+ whose labels match this selector. It''s an array of label selectors,
+ that will be OR''ed, meaning we will iterate over it and stop
+ at the first match'
+ items:
+ type: object
+ type: array
+ autoInjectionPolicyEnabled:
+ description: This controls the 'policy' in the sidecar injector
+ type: boolean
+ enableNamespacesByDefault:
+ description: This controls whether the webhook looks for namespaces
+ for injection enabled or disabled
+ type: boolean
+ enabled:
+ type: boolean
+ image:
+ type: string
+ init:
+ properties:
+ resources:
+ type: object
+ type: object
+ initCNIConfiguration:
+ properties:
+ affinity:
+ type: object
+ binDir:
+ description: Must be the same as the environment’s --cni-bin-dir
+ setting (kubelet parameter)
+ type: string
+ confDir:
+ description: Must be the same as the environment’s --cni-conf-dir
+ setting (kubelet parameter)
+ type: string
+ enabled:
+ description: If true, the privileged initContainer istio-init
+ is not needed to perform the traffic redirect settings for
+ the istio-proxy
+ type: boolean
+ excludeNamespaces:
+ description: List of namespaces to exclude from Istio pod check
+ items:
+ type: string
+ type: array
+ image:
+ type: string
+ logLevel:
+ description: Logging level for CNI binary
+ type: string
+ type: object
+ neverInjectSelector:
+ description: 'NeverInjectSelector: Refuses the injection on pods
+ whose labels match this selector. It''s an array of label selectors,
+ that will be OR''ed, meaning we will iterate over it and stop
+ at the first match Takes precedence over AlwaysInjectSelector.'
+ items:
+ type: object
+ type: array
+ nodeSelector:
+ type: object
+ replicaCount:
+ format: int32
+ type: integer
+ resources:
+ type: object
+ rewriteAppHTTPProbe:
+ description: If true, sidecar injector will rewrite PodSpec for
+ liveness health check to redirect request to sidecar. This makes
+ liveness check work even when mTLS is enabled.
+ type: boolean
+ tolerations:
+ items:
+ type: object
+ type: array
+ type: object
+ tracing:
+ description: Configuration for each of the supported tracers
+ properties:
+ datadog:
+ properties:
+ address:
+ description: Host:Port for submitting traces to the Datadog
+ agent.
+ pattern: ^[^\:]+:[0-9]{1,5}$
+ type: string
+ type: object
+ enabled:
+ type: boolean
+ lightstep:
+ properties:
+ accessToken:
+ description: required for sending data to the pool
+ type: string
+ address:
+ description: the <host>:<port> of the satellite pool
+ pattern: ^[^\:]+:[0-9]{1,5}$
+ type: string
+ cacertPath:
+ description: the path to the file containing the cacert to use
+ when verifying TLS. If secure is true, this is required. If
+ a value is specified then a secret called "lightstep.cacert"
+ must be created in the destination namespace with the key
+ matching the base of the provided cacertPath and the value
+ being the cacert itself.
+ type: string
+ secure:
+ description: specifies whether data should be sent with TLS
+ type: boolean
+ type: object
+ tracer:
+ enum:
+ - zipkin
+ - lightstep
+ - datadog
+ type: string
+ zipkin:
+ properties:
+ address:
+ description: Host:Port for reporting trace data in zipkin format.
+ If not specified, will default to zipkin service (port 9411)
+ in the same namespace as the other istio components.
+ pattern: ^[^\:]+:[0-9]{1,5}$
+ type: string
+ type: object
+ type: object
+ useMCP:
+ description: Use the Mesh Control Protocol (MCP) for configuring Mixer
+ and Pilot. Requires galley.
+ type: boolean
+ version:
+ description: Contains the intended Istio version
+ pattern: ^1.2
+ type: string
+ watchAdapterCRDs:
+ description: Whether or not to establish watches for adapter-specific
+ CRDs
+ type: boolean
+ watchOneNamespace:
+ description: Whether to restrict the applications namespace the controller
+ manages
+ type: boolean
+ required:
+ - version
+ - mtls
+ type: object
+ status:
+ type: object
+ version: v1beta1
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+{{- end }}
diff --git a/deployments/helm/servicemesh/istio-operator/templates/operator-rbac.yaml b/deployments/helm/servicemesh/istio-operator/templates/operator-rbac.yaml
new file mode 100644
index 00000000..d506ee41
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/templates/operator-rbac.yaml
@@ -0,0 +1,315 @@
+{{- if .Values.rbac.enabled }}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "istio-operator.fullname" . }}-operator
+ labels:
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: operator
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "istio-operator.fullname" . }}-operator
+ labels:
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: operator
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ - services
+ - endpoints
+ - pods
+ - replicationcontrollers
+ - services
+ - endpoints
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+- apiGroups:
+ - apps
+ resources:
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ - daemonsets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - apps
+ resources:
+ - deployments/status
+ verbs:
+ - get
+ - update
+ - patch
+- apiGroups:
+ - extensions
+ resources:
+ - ingresses
+ - ingresses/status
+ verbs:
+ - '*'
+- apiGroups:
+ - extensions
+ resources:
+ - deployments
+ verbs:
+ - get
+- apiGroups:
+ - extensions
+ resources:
+ - deployments/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - extensions
+ resources:
+ - replicasets
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - autoscaling
+ resources:
+ - horizontalpodautoscalers
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - '*'
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - clusterroles
+ - clusterrolebindings
+ - roles
+ - rolebindings
+ - ""
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+- apiGroups:
+ - istio.banzaicloud.io
+ resources:
+ - istios
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - istio.banzaicloud.io
+ resources:
+ - istios/status
+ verbs:
+ - get
+ - update
+ - patch
+- apiGroups:
+ - authentication.istio.io
+ - cloud.istio.io
+ - config.istio.io
+ - istio.istio.io
+ - networking.istio.io
+ - rbac.istio.io
+ - scalingpolicy.istio.io
+ resources:
+ - '*'
+ verbs:
+ - '*'
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - apps
+ resources:
+ - deployments/status
+ verbs:
+ - get
+ - update
+ - patch
+- apiGroups:
+ - istio.banzaicloud.io
+ resources:
+ - remoteistios
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - istio.banzaicloud.io
+ resources:
+ - remoteistios/status
+ verbs:
+ - get
+ - update
+ - patch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - istio.banzaicloud.io
+ resources:
+ - istios
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ - validatingwebhookconfigurations
+ verbs:
+ - '*'
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{ include "istio-operator.fullname" . }}-operator
+ labels:
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: operator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ include "istio-operator.fullname" . }}-operator
+subjects:
+- kind: ServiceAccount
+ name: {{ include "istio-operator.fullname" . }}-operator
+ namespace: {{ .Release.Namespace }}
+{{- end }}
diff --git a/deployments/helm/servicemesh/istio-operator/templates/operator-remoteistio-1.2-crd.yaml b/deployments/helm/servicemesh/istio-operator/templates/operator-remoteistio-1.2-crd.yaml
new file mode 100644
index 00000000..37741898
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/templates/operator-remoteistio-1.2-crd.yaml
@@ -0,0 +1,268 @@
+{{ if eq .Values.istioVersion 1.2 }}
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: remoteistios.istio.banzaicloud.io
+ labels:
+ controller-tools.k8s.io: "1.0"
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: operator
+spec:
+ additionalPrinterColumns:
+ - JSONPath: .status.Status
+ description: Status of the resource
+ name: Status
+ type: string
+ - JSONPath: .status.ErrorMessage
+ description: Error message
+ name: Error
+ type: string
+ - JSONPath: .status.GatewayAddress
+ description: Ingress gateways of the resource
+ name: Gateways
+ type: string
+ - JSONPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ group: istio.banzaicloud.io
+ names:
+ kind: RemoteIstio
+ plural: remoteistios
+ scope: Namespaced
+ subresources:
+ status: {}
+ validation:
+ openAPIV3Schema:
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ properties:
+ autoInjectionNamespaces:
+ description: List of namespaces to label with sidecar auto injection
+ enabled
+ items:
+ type: string
+ type: array
+ citadel:
+ description: Citadel configuration options
+ properties:
+ affinity:
+ type: object
+ caSecretName:
+ type: string
+ enabled:
+ type: boolean
+ healthCheck:
+ description: Enable health checking on the Citadel CSR signing API.
+ https://istio.io/docs/tasks/security/health-check/
+ type: boolean
+ image:
+ type: string
+ maxWorkloadCertTTL:
+ description: Citadel uses a flag max-workload-cert-ttl to control
+ the maximum lifetime for Istio certificates issued to workloads.
+ The default value is 90 days. If workload-cert-ttl on Citadel
+ or node agent is greater than max-workload-cert-ttl, Citadel will
+ fail issuing the certificate.
+ type: string
+ nodeSelector:
+ type: object
+ resources:
+ type: object
+ tolerations:
+ items:
+ type: object
+ type: array
+ workloadCertTTL:
+ description: For the workloads running in Kubernetes, the lifetime
+ of their Istio certificates is controlled by the workload-cert-ttl
+ flag on Citadel. The default value is 90 days. This value should
+ be no greater than max-workload-cert-ttl of Citadel.
+ type: string
+ type: object
+ defaultResources:
+ description: DefaultResources are applied for all Istio components by
+ default, can be overridden for each component
+ type: object
+ enabledServices:
+ description: EnabledServices the Istio component services replicated
+ to remote side
+ items:
+ properties:
+ labelSelector:
+ type: string
+ name:
+ type: string
+ podIPs:
+ items:
+ type: string
+ type: array
+ ports:
+ items:
+ type: object
+ type: array
+ required:
+ - name
+ type: object
+ type: array
+ excludeIPRanges:
+ description: ExcludeIPRanges the range where not to capture egress traffic
+ type: string
+ includeIPRanges:
+ description: IncludeIPRanges the range where to capture egress traffic
+ type: string
+ proxy:
+ description: Proxy configuration options
+ properties:
+ componentLogLevel:
+ description: Per Component log level for proxy, applies to gateways
+ and sidecars. If a component level is not set, then the "LogLevel"
+ will be used. If left empty, "misc:error" is used.
+ type: string
+ dnsRefreshRate:
+ description: Configure the DNS refresh rate for Envoy cluster of
+ type STRICT_DNS This must be given it terms of seconds. For example,
+ 300s is valid but 5m is invalid.
+ pattern: ^[0-9]{1,5}s$
+ type: string
+ enableCoreDump:
+ description: If set, newly injected sidecars will have core dumps
+ enabled.
+ type: boolean
+ image:
+ type: string
+ logLevel:
+ description: 'Log level for proxy, applies to gateways and sidecars.
+ If left empty, "warning" is used. Expected values are: trace|debug|info|warning|error|critical|off'
+ enum:
+ - trace
+ - debug
+ - info
+ - warning
+ - error
+ - critical
+ - "off"
+ type: string
+ privileged:
+ description: If set to true, istio-proxy container will have privileged
+ securityContext
+ type: boolean
+ resources:
+ type: object
+ type: object
+ proxyInit:
+ description: Proxy Init configuration options
+ properties:
+ image:
+ type: string
+ type: object
+ sidecarInjector:
+ description: SidecarInjector configuration options
+ properties:
+ affinity:
+ type: object
+ alwaysInjectSelector:
+ description: 'AlwaysInjectSelector: Forces the injection on pods
+ whose labels match this selector. It''s an array of label selectors,
+ that will be OR''ed, meaning we will iterate over it and stop
+ at the first match'
+ items:
+ type: object
+ type: array
+ autoInjectionPolicyEnabled:
+ description: This controls the 'policy' in the sidecar injector
+ type: boolean
+ enableNamespacesByDefault:
+ description: This controls whether the webhook looks for namespaces
+ for injection enabled or disabled
+ type: boolean
+ enabled:
+ type: boolean
+ image:
+ type: string
+ init:
+ properties:
+ resources:
+ type: object
+ type: object
+ initCNIConfiguration:
+ properties:
+ affinity:
+ type: object
+ binDir:
+ description: Must be the same as the environment’s --cni-bin-dir
+ setting (kubelet parameter)
+ type: string
+ confDir:
+ description: Must be the same as the environment’s --cni-conf-dir
+ setting (kubelet parameter)
+ type: string
+ enabled:
+ description: If true, the privileged initContainer istio-init
+ is not needed to perform the traffic redirect settings for
+ the istio-proxy
+ type: boolean
+ excludeNamespaces:
+ description: List of namespaces to exclude from Istio pod check
+ items:
+ type: string
+ type: array
+ image:
+ type: string
+ logLevel:
+ description: Logging level for CNI binary
+ type: string
+ type: object
+ neverInjectSelector:
+ description: 'NeverInjectSelector: Refuses the injection on pods
+ whose labels match this selector. It''s an array of label selectors,
+ that will be OR''ed, meaning we will iterate over it and stop
+ at the first match Takes precedence over AlwaysInjectSelector.'
+ items:
+ type: object
+ type: array
+ nodeSelector:
+ type: object
+ replicaCount:
+ format: int32
+ type: integer
+ resources:
+ type: object
+ rewriteAppHTTPProbe:
+ description: If true, sidecar injector will rewrite PodSpec for
+ liveness health check to redirect request to sidecar. This makes
+ liveness check work even when mTLS is enabled.
+ type: boolean
+ tolerations:
+ items:
+ type: object
+ type: array
+ type: object
+ required:
+ - enabledServices
+ type: object
+ status:
+ type: object
+ version: v1beta1
+status:
+ acceptedNames:
+ kind: ""
+ plural: ""
+ conditions: []
+ storedVersions: []
+{{- end }}
diff --git a/deployments/helm/servicemesh/istio-operator/templates/operator-service.yaml b/deployments/helm/servicemesh/istio-operator/templates/operator-service.yaml
new file mode 100644
index 00000000..04ffc835
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/templates/operator-service.yaml
@@ -0,0 +1,33 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: "{{ include "istio-operator.fullname" . }}-operator"
+ {{- if and .Values.prometheusMetrics.enabled (not .Values.prometheusMetrics.authProxy.enabled) }}
+ annotations:
+ prometheus.io/scrape: "true"
+ prometheus.io/port: "8080"
+ prometheus.io/scheme: http
+ {{- end }}
+ labels:
+ control-plane: controller-manager
+ controller-tools.k8s.io: "1.0"
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: operator
+spec:
+ selector:
+ control-plane: controller-manager
+ controller-tools.k8s.io: "1.0"
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: operator
+ ports:
+ - name: https
+ port: 443
+ {{- if and .Values.prometheusMetrics.enabled (not .Values.prometheusMetrics.authProxy.enabled) }}
+ - name: metrics
+ port: 8080
+ {{- end }}
diff --git a/deployments/helm/servicemesh/istio-operator/templates/operator-statefulset.yaml b/deployments/helm/servicemesh/istio-operator/templates/operator-statefulset.yaml
new file mode 100644
index 00000000..9e90ee80
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/templates/operator-statefulset.yaml
@@ -0,0 +1,87 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: "{{ include "istio-operator.fullname" . }}-operator"
+ labels:
+ control-plane: controller-manager
+ controller-tools.k8s.io: "1.0"
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ helm.sh/chart: {{ include "istio-operator.chart" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/version: {{ .Chart.AppVersion }}
+ app.kubernetes.io/component: operator
+spec:
+ selector:
+ matchLabels:
+ control-plane: controller-manager
+ controller-tools.k8s.io: "1.0"
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: operator
+ serviceName: {{ include "istio-operator.fullname" . }}-operator
+ template:
+ metadata:
+ labels:
+ control-plane: controller-manager
+ controller-tools.k8s.io: "1.0"
+ app.kubernetes.io/name: {{ include "istio-operator.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/component: operator
+ spec:
+ {{- if .Values.rbac.enabled }}
+ serviceAccountName: {{ include "istio-operator.fullname" . }}-operator
+ {{- end }}
+ terminationGracePeriodSeconds: 60
+ containers:
+ {{- if and .Values.prometheusMetrics.enabled .Values.prometheusMetrics.authProxy.enabled }}
+ - name: kube-rbac-proxy
+ image: "{{ .Values.prometheusMetrics.authProxy.image.repository }}:{{ .Values.prometheusMetrics.authProxy.image.tag }}"
+ imagePullPolicy: {{ .Values.prometheusMetrics.authProxy.image.pullPolicy }}
+ args:
+ - "--secure-listen-address=0.0.0.0:8443"
+ - "--upstream=http://127.0.0.1:8080/"
+ - "--logtostderr=true"
+ - "--v=10"
+ ports:
+ - containerPort: 8443
+ name: https
+ {{- end }}
+ - command:
+ - /manager
+ image: "{{ .Values.operator.image.repository }}:{{ .Values.operator.image.tag }}"
+ imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
+ name: manager
+ args:
+ {{- if and .Values.prometheusMetrics.enabled .Values.prometheusMetrics.authProxy.enabled }}
+ - "--metrics-addr=127.0.0.1:8080"
+ {{- end }}
+ - "--watch-created-resources-events=false"
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ ports:
+ - containerPort: 443
+ name: webhook-server
+ protocol: TCP
+ {{- if and .Values.prometheusMetrics.enabled (not .Values.prometheusMetrics.authProxy.enabled) }}
+ - containerPort: 8080
+ name: metrics
+ protocol: TCP
+ {{- end }}
+ resources:
+{{ toYaml .Values.operator.resources | indent 10 }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+{{ toYaml . | indent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+{{ toYaml . | indent 8 }}
+ {{- end }}
diff --git a/deployments/helm/servicemesh/istio-operator/values.yaml b/deployments/helm/servicemesh/istio-operator/values.yaml
new file mode 100644
index 00000000..cb937c11
--- /dev/null
+++ b/deployments/helm/servicemesh/istio-operator/values.yaml
@@ -0,0 +1,40 @@
+
+
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+operator:
+ image:
+ repository: banzaicloud/istio-operator
+ tag: 0.2.1
+ pullPolicy: IfNotPresent
+ resources:
+ limits:
+ cpu: 200m
+ memory: 256Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+istioVersion: 1.2
+
+## Prometheus Metrics
+prometheusMetrics:
+ enabled: false
+# Enable or disable the auth proxy (https://github.com/brancz/kube-rbac-proxy)
+# which protects your /metrics endpoint.
+ authProxy:
+ enabled: false
+
+## Role Based Access
+## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
+##
+rbac:
+ enabled: true
+
+nameOverride: ""
+fullnameOverride: ""
+
+nodeSelector: {}
+tolerations: []
+affinity: {}
diff --git a/kud/deployment_infra/galaxy-requirements.yml b/kud/deployment_infra/galaxy-requirements.yml
index 093dec87..17ac1dc2 100644
--- a/kud/deployment_infra/galaxy-requirements.yml
+++ b/kud/deployment_infra/galaxy-requirements.yml
@@ -8,7 +8,7 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
- src: andrewrothstein.go
- version: v2.1.10
+ version: v2.1.15
- src: andrewrothstein.kubernetes-helm
version: v1.2.9
- src: geerlingguy.docker
diff --git a/kud/deployment_infra/playbooks/configure-kud.yml b/kud/deployment_infra/playbooks/configure-kud.yml
index 0e32e69d..6ac0477d 100644
--- a/kud/deployment_infra/playbooks/configure-kud.yml
+++ b/kud/deployment_infra/playbooks/configure-kud.yml
@@ -23,6 +23,11 @@
when: helm_client.rc != 0
vars:
kubernetes_helm_ver: "v{{ helm_client_version }}"
+ tasks:
+ - name: Initialize helm client
+ command: helm init -c
+ args:
+ creates: ~/.helm
- hosts: kube-node
become: yes
diff --git a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
index 2d1a11d0..2084c95d 100644
--- a/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
+++ b/kud/deployment_infra/playbooks/configure-ovn4nfv.yml
@@ -13,12 +13,14 @@
- hosts: kube-master:kube-node
environment:
PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin/"
- roles:
- - role: andrewrothstein.go
- tasks:
+ pre_tasks:
- name: Load kud variables
include_vars:
file: kud-vars.yml
+ roles:
+ - role: andrewrothstein.go
+ go_ver: "{{ go_version }}"
+ tasks:
- name: clone ovn4nfv-k8s-plugin repo
git:
repo: "{{ ovn4nfv_url }}"
diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml
index b11672f3..9b365477 100644
--- a/kud/deployment_infra/playbooks/kud-vars.yml
+++ b/kud/deployment_infra/playbooks/kud-vars.yml
@@ -54,10 +54,10 @@ istio_url: "https://github.com/istio/istio/releases/download/{{ istio_version }}
go_path: "{{ base_dest }}/go"
ovn4nfv_dest: "{{ go_path }}/src/ovn4nfv-k8s-plugin"
ovn4nfv_source_type: "source"
-ovn4nfv_version: aa14577f6bc672bc8622edada8a487825fdebce1
+ovn4nfv_version: adc7b2d430c44aa4137ac7f9420e14cfce3fa354
ovn4nfv_url: "https://git.opnfv.org/ovn4nfv-k8s-plugin/"
-go_version: '1.12.4'
+go_version: '1.12.5'
kubespray_version: 2.8.2
helm_client_version: 2.9.1
# kud playbooks not compatible with 2.8.0 - see MULTICLOUD-634
diff --git a/kud/hosting_providers/vagrant/installer.sh b/kud/hosting_providers/vagrant/installer.sh
index c9027150..51ca22e8 100755
--- a/kud/hosting_providers/vagrant/installer.sh
+++ b/kud/hosting_providers/vagrant/installer.sh
@@ -176,7 +176,7 @@ function install_plugin {
if [[ "${testing_enabled}" == "true" ]]; then
sudo ./start.sh
pushd $kud_tests
- for functional_test in plugin plugin_edgex; do
+ for functional_test in plugin plugin_edgex plugin_fw; do
bash ${functional_test}.sh
done
popd
diff --git a/kud/tests/_common.sh b/kud/tests/_common.sh
index bfb6ec4b..044891dd 100755
--- a/kud/tests/_common.sh
+++ b/kud/tests/_common.sh
@@ -27,10 +27,8 @@ unprotected_private_net=unprotected-private-net
protected_private_net=protected-private-net
ovn_multus_network_name=ovn-networkobj
rbd_metadata=rbd_metatada.json
-rbd_content_tarball=vault-consul-dev.tar
rbp_metadata=rbp_metatada.json
rbp_instance=rbp_instance.json
-rbp_content_tarball=profile.tar
# vFirewall vars
demo_artifacts_version=1.5.0
@@ -1119,17 +1117,13 @@ DEPLOYMENT
# populate_CSAR_rbdefinition() - Function that populates CSAR folder
# for testing resource bundle definition
function populate_CSAR_rbdefinition {
- local csar_id=$1
-
- _checks_args $csar_id
- pushd ${CSAR_DIR}/${csar_id}
+ _checks_args "$1"
+ pushd "${CSAR_DIR}/$1"
print_msg "Create Helm Chart Archives"
- rm -f ${rbd_content_tarball}.gz
- rm -f ${rbp_content_tarball}.gz
- tar -cf $rbd_content_tarball -C $test_folder/vnfs/testrb/helm vault-consul-dev
- tar -cf $rbp_content_tarball -C $test_folder/vnfs/testrb/helm/profile .
- gzip $rbp_content_tarball
- gzip $rbd_content_tarball
+ rm -f *.tar.gz
+ tar -czf rb_profile.tar.gz -C $test_folder/vnfs/testrb/helm/profile .
+ #Creates vault-consul-dev-0.0.0.tgz
+ helm package $test_folder/vnfs/testrb/helm/vault-consul-dev --version 0.0.0
popd
}
@@ -1144,3 +1138,17 @@ function populate_CSAR_edgex_rbdefinition {
tar -czf rb_definition.tar.gz -C $test_folder/vnfs/edgex/helm edgex
popd
}
+
+# populate_CSAR_fw_rbdefinition() - Function that populates CSAR folder
+# for testing resource bundle definition of firewall scenario
+function populate_CSAR_fw_rbdefinition {
+ _checks_args "$1"
+ pushd "${CSAR_DIR}/$1"
+ print_msg "Create Helm Chart Archives for vFirewall"
+ rm -f *.tar.gz
+ # Reuse profile from the edgeX case as it is an empty profile
+ tar -czf rb_profile.tar.gz -C $test_folder/vnfs/edgex/profile .
+ tar -czf rb_definition.tar.gz -C $test_folder/../demo firewall
+ popd
+}
+
diff --git a/kud/tests/_functions.sh b/kud/tests/_functions.sh
index 86636ccd..d585086b 100755
--- a/kud/tests/_functions.sh
+++ b/kud/tests/_functions.sh
@@ -173,6 +173,26 @@ function wait_deployment {
done
}
+# wait_for_pod() - Wait until first pod matched by kubectl filters is in running status
+function wait_for_pod {
+ #Example usage:
+ # wait_for_pods example_pod
+ # wait_for_pods --namespace test different_pod
+ # wait_for_pods -n test -l app=plugin_test
+
+ status_phase=""
+ while [[ "$status_phase" != "Running" ]]; do
+ new_phase="$(kubectl get pods -o 'go-template={{ index .items 0 "status" "phase" }}' "$@" )"
+ if [[ "$new_phase" != "$status_phase" ]]; then
+ echo "$(date +%H:%M:%S) - Filter=[$*] : $new_phase"
+ status_phase="$new_phase"
+ fi
+ if [[ "$new_phase" == "Err"* ]]; then
+ exit 1
+ fi
+ done
+}
+
# setup() - Base testing setup shared among functional tests
function setup {
if ! $(kubectl version &>/dev/null); then
diff --git a/kud/tests/plugin.sh b/kud/tests/plugin.sh
index af74af5f..aff04128 100755
--- a/kud/tests/plugin.sh
+++ b/kud/tests/plugin.sh
@@ -17,9 +17,11 @@ source _common.sh
source _common_test.sh
source _functions.sh
-base_url="http://localhost:8081"
+base_url="http://localhost:9015/v1"
+kubeconfig_path="$HOME/.kube/config"
#Will resolve to file $KUBE_CONFIG_DIR/kud
cloud_region_id="kud"
+cloud_region_owner="test_owner"
namespace="testns"
csar_id="94e414f6-9ca4-11e8-bb6a-52540067263b"
rb_name="test-rbdef"
@@ -34,10 +36,10 @@ function _build_generic_sim {
return
fi
BUILD_ARGS="--no-cache"
- if [ $HTTP_PROXY ]; then
+ if [ ${HTTP_PROXY:-} ]; then
BUILD_ARGS+=" --build-arg HTTP_PROXY=${HTTP_PROXY}"
fi
- if [ $HTTPS_PROXY ]; then
+ if [ ${HTTPS_PROXY:-} ]; then
BUILD_ARGS+=" --build-arg HTTPS_PROXY=${HTTPS_PROXY}"
fi
@@ -67,7 +69,7 @@ populate_CSAR_rbdefinition $csar_id
# Test
print_msg "Create Resource Bundle Definition Metadata"
-payload_raw="
+payload="
{
\"rb-name\": \"${rb_name}\",
\"rb-version\": \"${rb_version}\",
@@ -78,14 +80,13 @@ payload_raw="
}
}
"
-payload=$(echo $payload_raw | tr '\n' ' ')
-rb_ret_name=$(curl -s -d "$payload" -X POST "${base_url}/v1/rb/definition" | jq -r '."rb-name"')
+call_api -d "$payload" "${base_url}/rb/definition"
print_msg "Upload Resource Bundle Definition Content"
-curl -s --data-binary @${CSAR_DIR}/${csar_id}/${rbd_content_tarball}.gz -X POST "${base_url}/v1/rb/definition/$rb_name/$rb_version/content"
+call_api --data-binary "@${CSAR_DIR}/${csar_id}/vault-consul-dev-0.0.0.tgz" "${base_url}/rb/definition/$rb_name/$rb_version/content"
print_msg "Listing Resource Bundle Definitions"
-rb_list=$(curl -s -X GET "${base_url}/v1/rb/definition/$rb_name")
+rb_list=$(call_api "${base_url}/rb/definition/$rb_name")
if [[ "$rb_list" != *"${rb_name}"* ]]; then
echo $rb_list
echo "Resource Bundle Definition not stored"
@@ -94,7 +95,7 @@ fi
print_msg "Create Resource Bundle Profile Metadata"
kubeversion=$(kubectl version | grep 'Server Version' | awk -F '"' '{print $6}')
-payload_raw="
+payload="
{
\"profile-name\": \"${profile_name}\",
\"rb-name\": \"${rb_name}\",
@@ -107,22 +108,33 @@ payload_raw="
}
}
"
-payload=$(echo $payload_raw | tr '\n' ' ')
-rbp_ret_name=$(curl -s -d "$payload" -X POST "${base_url}/v1/rb/definition/$rb_name/$rb_version/profile" | jq -r '."profile-name"')
+call_api -d "$payload" "${base_url}/rb/definition/$rb_name/$rb_version/profile"
print_msg "Upload Resource Bundle Profile Content"
-curl -s --data-binary @${CSAR_DIR}/${csar_id}/${rbp_content_tarball}.gz -X POST "${base_url}/v1/rb/definition/$rb_name/$rb_version/profile/$profile_name/content"
+call_api --data-binary "@${CSAR_DIR}/${csar_id}/rb_profile.tar.gz" "${base_url}/rb/definition/$rb_name/$rb_version/profile/$profile_name/content"
print_msg "Getting Resource Bundle Profile"
-rbp_ret=$(curl -s -X GET "${base_url}/v1/rb/definition/$rb_name/$rb_version/profile/$profile_name")
+rbp_ret=$(call_api "${base_url}/rb/definition/$rb_name/$rb_version/profile/$profile_name")
if [[ "$rbp_ret" != *"${profile_name}"* ]]; then
echo $rbp_ret
echo "Resource Bundle Profile not stored"
exit 1
fi
+print_msg "Setup cloud data"
+payload="$(cat <<EOF
+{
+ "cloud-region": "$cloud_region_id",
+ "cloud-owner": "$cloud_region_owner"
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/connectivity-info" >/dev/null
+
print_msg "Instantiate Profile"
-payload_raw="
+payload="
{
\"cloud-region\": \"$cloud_region_id\",
\"rb-name\":\"$rb_name\",
@@ -130,8 +142,9 @@ payload_raw="
\"profile-name\":\"$profile_name\"
}
"
-payload=$(echo $payload_raw | tr '\n' ' ')
-inst_id=$(curl -s -d "$payload" "${base_url}/v1/instance" | jq -r '.id')
+inst_id=$(call_api -d "$payload" "${base_url}/instance")
+echo "$inst_id"
+inst_id=$(jq -r '.id' <<< "$inst_id")
print_msg "Validating Kubernetes"
kubectl get --no-headers=true --namespace=${namespace} deployment ${release_name}-vault-consul-dev
@@ -139,35 +152,21 @@ kubectl get --no-headers=true --namespace=${namespace} service override-vault-co
echo "VNF Instance created succesfully with id: $inst_id"
print_msg "Getting $inst_id VNF Instance information"
-vnf_details=$(curl -s -X GET "${base_url}/v1/instance/${inst_id}")
-if [[ -z "$vnf_details" ]]; then
- echo "Cannot retrieved VNF Instance details"
- exit 1
-fi
-echo "VNF details $vnf_details"
+call_api "${base_url}/instance/${inst_id}"
+# Teardown
print_msg "Deleting $rb_name/$rb_version Resource Bundle Definition"
-curl -X DELETE "${base_url}/v1/rb/definition/$rb_name/$rb_version"
-if [[ 500 -ne $(curl -o /dev/null -w %{http_code} -s -X GET "${base_url}/v1/rb/definition/$rb_name/$rb_version") ]]; then
- echo "Resource Bundle Definition not deleted"
# TODO: Change the HTTP code for 404 when the resource is not found in the API
- exit 1
-fi
+delete_resource "${base_url}/rb/definition/$rb_name/$rb_version"
print_msg "Deleting $profile_name Resource Bundle Profile"
-curl -X DELETE "${base_url}/v1/rb/definition/$rb_name/$rb_version/profile/$profile_name"
-if [[ 500 -ne $(curl -o /dev/null -w %{http_code} -s -X GET "${base_url}/v1/rb/definition/$rb_name/$rb_version/profile/$profile_name") ]]; then
- echo "Resource Bundle Profile not deleted"
# TODO: Change the HTTP code for 404 when the resource is not found in the API
- exit 1
-fi
+delete_resource "${base_url}/rb/definition/$rb_name/$rb_version/profile/$profile_name"
print_msg "Deleting $inst_id VNF Instance"
-curl -X DELETE "${base_url}/v1/instance/${inst_id}"
-if [[ 404 -ne $(curl -o /dev/null -w %{http_code} -s -X GET "${base_url}/${inst_id}") ]]; then
- echo "VNF Instance not deleted"
- exit 1
-fi
+delete_resource "${base_url}/instance/${inst_id}"
+
+print_msg "Deleting ${cloud_region_id} cloud region connection"
+delete_resource "${base_url}/connectivity-info/${cloud_region_id}"
-# Teardown
teardown $plugin_deployment_name
diff --git a/kud/tests/plugin_edgex.sh b/kud/tests/plugin_edgex.sh
index 929961c0..8eae5692 100755
--- a/kud/tests/plugin_edgex.sh
+++ b/kud/tests/plugin_edgex.sh
@@ -99,7 +99,6 @@ kubectl get --no-headers=true --namespace=${namespace} service edgex-core-comman
print_msg "Retrieving VNF details"
call_api "${base_url}/instance/${vnf_id}"
-
#Teardown
print_msg "Deleting VNF Instance"
delete_resource "${base_url}/instance/${vnf_id}"
diff --git a/kud/tests/plugin_fw.sh b/kud/tests/plugin_fw.sh
new file mode 100755
index 00000000..d7bed4fd
--- /dev/null
+++ b/kud/tests/plugin_fw.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+set -o nounset
+set -o pipefail
+#set -o xtrace
+
+source _common_test.sh
+source _functions.sh
+source _common.sh
+
+base_url="http://localhost:9015/v1"
+kubeconfig_path="$HOME/.kube/config"
+csar_id=cc009bfe-bbee-11e8-9766-525400435678
+rb_name="vfw"
+rb_version="plugin_test"
+chart_name="firewall"
+profile_name="test_profile"
+release_name="test-release"
+namespace="plugin-tests-namespace"
+cloud_region_id="kud"
+cloud_region_owner="localhost"
+
+# Setup
+install_deps
+populate_CSAR_fw_rbdefinition "$csar_id"
+
+print_msg "Registering resource bundle"
+payload="$(cat <<EOF
+{
+ "rb-name": "${rb_name}",
+ "rb-version": "${rb_version}",
+ "chart-name": "${chart_name}"
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/rb/definition"
+
+print_msg "Uploading resource bundle content"
+call_api --data-binary "@${CSAR_DIR}/${csar_id}/rb_definition.tar.gz" \
+ "${base_url}/rb/definition/${rb_name}/${rb_version}/content"
+
+print_msg "Registering rb's profile"
+payload="$(cat <<EOF
+{
+ "rb-name": "${rb_name}",
+ "rb-version": "${rb_version}",
+ "profile-name": "${profile_name}",
+ "release-name": "${release_name}",
+ "namespace": "${namespace}"
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/rb/definition/${rb_name}/${rb_version}/profile"
+
+print_msg "Uploading profile data"
+call_api --data-binary "@${CSAR_DIR}/${csar_id}/rb_profile.tar.gz" \
+ "${base_url}/rb/definition/${rb_name}/${rb_version}/profile/${profile_name}/content"
+
+print_msg "Setup cloud data"
+payload="$(cat <<EOF
+{
+ "cloud-region": "$cloud_region_id",
+ "cloud-owner": "$cloud_region_owner"
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/connectivity-info" >/dev/null #massive output
+
+print_msg "Creating vFW VNF Instance"
+payload="$(cat <<EOF
+{
+ "rb-name": "${rb_name}",
+ "rb-version": "${rb_version}",
+ "profile-name": "${profile_name}",
+ "cloud-region": "${cloud_region_id}"
+}
+EOF
+)"
+response="$(call_api -d "${payload}" "${base_url}/instance")"
+echo "$response"
+vnf_id="$(jq -r '.id' <<< "${response}")"
+
+print_msg "Validating VNF instance"
+# Check if all pods are up
+wait_for_pod -n "${namespace}" -l app=sink
+wait_for_pod -n "${namespace}" -l app=firewall
+wait_for_pod -n "${namespace}" -l app=packetgen
+# TODO: Provide some health check to verify vFW work
+
+print_msg "Retrieving VNF details"
+call_api "${base_url}/instance/${vnf_id}"
+
+
+#Teardown
+print_msg "Deleting VNF Instance"
+delete_resource "${base_url}/instance/${vnf_id}"
+
+print_msg "Deleting Profile"
+delete_resource "${base_url}/rb/definition/${rb_name}/${rb_version}/profile/${profile_name}"
+
+print_msg "Deleting Resource Bundle"
+delete_resource "${base_url}/rb/definition/${rb_name}/${rb_version}"
+
+print_msg "Deleting ${cloud_region_id} cloud region connection"
+delete_resource "${base_url}/connectivity-info/${cloud_region_id}"
diff --git a/src/k8splugin/internal/app/client.go b/src/k8splugin/internal/app/client.go
index 8d2af297..914a8eec 100644
--- a/src/k8splugin/internal/app/client.go
+++ b/src/k8splugin/internal/app/client.go
@@ -39,6 +39,7 @@ type KubernetesClient struct {
dynamicClient dynamic.Interface
discoverClient discovery.CachedDiscoveryInterface
restMapper meta.RESTMapper
+ instanceID string
}
// getKubeConfig uses the connectivity client to get the kubeconfig based on the name
@@ -55,11 +56,17 @@ func (k *KubernetesClient) getKubeConfig(cloudregion string) (string, error) {
}
// init loads the Kubernetes configuation values stored into the local configuration file
-func (k *KubernetesClient) init(cloudregion string) error {
+func (k *KubernetesClient) init(cloudregion string, iid string) error {
if cloudregion == "" {
return pkgerrors.New("Cloudregion is empty")
}
+ if iid == "" {
+ return pkgerrors.New("Instance ID is empty")
+ }
+
+ k.instanceID = iid
+
configPath, err := k.getKubeConfig(cloudregion)
if err != nil {
return pkgerrors.Wrap(err, "Get kubeconfig file")
@@ -89,6 +96,7 @@ func (k *KubernetesClient) init(cloudregion string) error {
}
k.restMapper = restmapper.NewDeferredDiscoveryRESTMapper(k.discoverClient)
+
return nil
}
@@ -122,8 +130,6 @@ func (k *KubernetesClient) ensureNamespace(namespace string) error {
func (k *KubernetesClient) createKind(resTempl helm.KubernetesResourceTemplate,
namespace string) (helm.KubernetesResource, error) {
- log.Println("Processing Kind: " + resTempl.GVK.Kind)
-
if _, err := os.Stat(resTempl.FilePath); os.IsNotExist(err) {
return helm.KubernetesResource{}, pkgerrors.New("File " + resTempl.FilePath + "does not exists")
}
@@ -137,6 +143,7 @@ func (k *KubernetesClient) createKind(resTempl helm.KubernetesResourceTemplate,
createdResourceName, err := pluginImpl.Create(resTempl.FilePath, namespace, k)
if err != nil {
+ log.Printf("Error: %s while creating: %s", err.Error(), resTempl.GVK.Kind)
return helm.KubernetesResource{}, pkgerrors.Wrap(err, "Error in plugin "+resTempl.GVK.Kind+" plugin")
}
@@ -212,3 +219,9 @@ func (k *KubernetesClient) GetDynamicClient() dynamic.Interface {
func (k *KubernetesClient) GetStandardClient() kubernetes.Interface {
return k.clientSet
}
+
+//GetInstanceID returns the instanceID that is injected into all the
+//resources created by the plugin
+func (k *KubernetesClient) GetInstanceID() string {
+ return k.instanceID
+}
diff --git a/src/k8splugin/internal/app/client_test.go b/src/k8splugin/internal/app/client_test.go
index fd293ab0..7001d9e2 100644
--- a/src/k8splugin/internal/app/client_test.go
+++ b/src/k8splugin/internal/app/client_test.go
@@ -72,7 +72,7 @@ func TestInit(t *testing.T) {
kubeClient := KubernetesClient{}
// Refer to the connection via its name
- err = kubeClient.init("mock_connection")
+ err = kubeClient.init("mock_connection", "abcdefg")
if err != nil {
t.Fatalf("TestGetKubeClient returned an error (%s)", err)
}
diff --git a/src/k8splugin/internal/app/config_backend.go b/src/k8splugin/internal/app/config_backend.go
index b31cbac7..6bc145ee 100644
--- a/src/k8splugin/internal/app/config_backend.go
+++ b/src/k8splugin/internal/app/config_backend.go
@@ -354,7 +354,7 @@ func scheduleResources(c chan configResourceList) {
log.Printf("[scheduleResources]: POST %v %v", data.profile, data.resourceTemplates)
for _, inst := range resp {
k8sClient := KubernetesClient{}
- err = k8sClient.init(inst.Request.CloudRegion)
+ err = k8sClient.init(inst.Request.CloudRegion, inst.ID)
if err != nil {
log.Printf("Getting CloudRegion Information: %s", err.Error())
//Move onto the next cloud region
@@ -374,7 +374,7 @@ func scheduleResources(c chan configResourceList) {
log.Printf("[scheduleResources]: DELETE %v %v", data.profile, data.resourceTemplates)
for _, inst := range resp {
k8sClient := KubernetesClient{}
- err = k8sClient.init(inst.Request.CloudRegion)
+ err = k8sClient.init(inst.Request.CloudRegion, inst.ID)
if err != nil {
log.Printf("Getting CloudRegion Information: %s", err.Error())
//Move onto the next cloud region
diff --git a/src/k8splugin/internal/app/instance.go b/src/k8splugin/internal/app/instance.go
index d28fe799..5cfdaea1 100644
--- a/src/k8splugin/internal/app/instance.go
+++ b/src/k8splugin/internal/app/instance.go
@@ -127,8 +127,10 @@ func (v *InstanceClient) Create(i InstanceRequest) (InstanceResponse, error) {
return InstanceResponse{}, pkgerrors.Wrap(err, "Error resolving helm charts")
}
+ id := generateInstanceID()
+
k8sClient := KubernetesClient{}
- err = k8sClient.init(i.CloudRegion)
+ err = k8sClient.init(i.CloudRegion, id)
if err != nil {
return InstanceResponse{}, pkgerrors.Wrap(err, "Getting CloudRegion Information")
}
@@ -138,8 +140,6 @@ func (v *InstanceClient) Create(i InstanceRequest) (InstanceResponse, error) {
return InstanceResponse{}, pkgerrors.Wrap(err, "Create Kubernetes Resources")
}
- id := generateInstanceID()
-
//Compose the return response
resp := InstanceResponse{
ID: id,
@@ -292,7 +292,7 @@ func (v *InstanceClient) Delete(id string) error {
}
k8sClient := KubernetesClient{}
- err = k8sClient.init(inst.Request.CloudRegion)
+ err = k8sClient.init(inst.Request.CloudRegion, inst.ID)
if err != nil {
return pkgerrors.Wrap(err, "Getting CloudRegion Information")
}
diff --git a/src/k8splugin/internal/config/config.go b/src/k8splugin/internal/config/config.go
index ac653282..23ec401e 100644
--- a/src/k8splugin/internal/config/config.go
+++ b/src/k8splugin/internal/config/config.go
@@ -26,19 +26,20 @@ import (
// Configuration loads up all the values that are used to configure
// backend implementations
type Configuration struct {
- CAFile string `json:"ca-file"`
- ServerCert string `json:"server-cert"`
- ServerKey string `json:"server-key"`
- Password string `json:"password"`
- DatabaseAddress string `json:"database-address"`
- DatabaseType string `json:"database-type"`
- PluginDir string `json:"plugin-dir"`
- EtcdIP string `json:"etcd-ip"`
- EtcdCert string `json:"etcd-cert"`
- EtcdKey string `json:"etcd-key"`
- EtcdCAFile string `json:"etcd-ca-file"`
- OVNCentralAddress string `json:"ovn-central-address"`
- ServicePort string `json:"service-port"`
+ CAFile string `json:"ca-file"`
+ ServerCert string `json:"server-cert"`
+ ServerKey string `json:"server-key"`
+ Password string `json:"password"`
+ DatabaseAddress string `json:"database-address"`
+ DatabaseType string `json:"database-type"`
+ PluginDir string `json:"plugin-dir"`
+ EtcdIP string `json:"etcd-ip"`
+ EtcdCert string `json:"etcd-cert"`
+ EtcdKey string `json:"etcd-key"`
+ EtcdCAFile string `json:"etcd-ca-file"`
+ OVNCentralAddress string `json:"ovn-central-address"`
+ ServicePort string `json:"service-port"`
+ KubernetesLabelName string `json:"kubernetes-label-name"`
}
// Config is the structure that stores the configuration
@@ -74,19 +75,20 @@ func defaultConfiguration() *Configuration {
}
return &Configuration{
- CAFile: "ca.cert",
- ServerCert: "server.cert",
- ServerKey: "server.key",
- Password: "",
- DatabaseAddress: "127.0.0.1",
- DatabaseType: "mongo",
- PluginDir: cwd,
- EtcdIP: "127.0.0.1",
- EtcdCert: "etcd.cert",
- EtcdKey: "etcd.key",
- EtcdCAFile: "etcd-ca.cert",
- OVNCentralAddress: "127.0.0.1:6641",
- ServicePort: "9015",
+ CAFile: "ca.cert",
+ ServerCert: "server.cert",
+ ServerKey: "server.key",
+ Password: "",
+ DatabaseAddress: "127.0.0.1",
+ DatabaseType: "mongo",
+ PluginDir: cwd,
+ EtcdIP: "127.0.0.1",
+ EtcdCert: "etcd.cert",
+ EtcdKey: "etcd.key",
+ EtcdCAFile: "etcd-ca.cert",
+ OVNCentralAddress: "127.0.0.1:6641",
+ ServicePort: "9015",
+ KubernetesLabelName: "k8splugin.io/rb-instance-id",
}
}
diff --git a/src/k8splugin/internal/plugin/helpers.go b/src/k8splugin/internal/plugin/helpers.go
index 26e0f467..b5c9109c 100644
--- a/src/k8splugin/internal/plugin/helpers.go
+++ b/src/k8splugin/internal/plugin/helpers.go
@@ -17,14 +17,18 @@
package plugin
import (
+ "encoding/json"
"log"
"strings"
utils "github.com/onap/multicloud-k8s/src/k8splugin/internal"
+ "github.com/onap/multicloud-k8s/src/k8splugin/internal/config"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
pkgerrors "github.com/pkg/errors"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
@@ -45,6 +49,9 @@ type KubernetesConnector interface {
// GetStandardClient returns the standard client that can be used to handle
// standard kubernetes kinds
GetStandardClient() kubernetes.Interface
+
+ //GetInstanceID returns the InstanceID for tracking during creation
+ GetInstanceID() string
}
// Reference is the interface that is implemented
@@ -90,3 +97,54 @@ func GetPluginByKind(kind string) (Reference, error) {
return pluginImpl, nil
}
+
+// TagPodsIfPresent finds the PodTemplateSpec from any workload
+// object that contains it and changes the spec to include the tag label
+func TagPodsIfPresent(unstruct *unstructured.Unstructured, tag string) {
+
+ spec, ok := unstruct.Object["spec"].(map[string]interface{})
+ if !ok {
+ log.Println("Error converting spec to map")
+ return
+ }
+ template, ok := spec["template"].(map[string]interface{})
+ if !ok {
+ log.Println("Error converting template to map")
+ return
+ }
+
+ data, err := json.Marshal(template)
+ if err != nil {
+ log.Println("Error Marshaling Podspec")
+ return
+ }
+
+ //Attempt to convert the template to a podtemplatespec.
+ //This is to check if we have any pods being created.
+ podTemplateSpec := &corev1.PodTemplateSpec{}
+ _, err = podTemplateSpec.MarshalTo(data)
+ if err != nil {
+ log.Println("Did not find a podTemplateSpec" + err.Error())
+ return
+ }
+
+ //At this point, we know that the data contains a PodTemplateSpec
+ metadata, ok := template["metadata"].(map[string]interface{})
+ if !ok {
+ log.Println("Error converting metadata to map")
+ return
+ }
+
+ //Get the labels map
+ labels, ok := metadata["labels"].(map[string]string)
+ if !ok {
+ log.Println("Error converting labels to map")
+ return
+ }
+
+ //Check if labels exist for this object
+ if labels == nil {
+ labels = map[string]string{}
+ }
+ labels[config.GetConfiguration().KubernetesLabelName] = tag
+}
diff --git a/src/k8splugin/internal/utils.go b/src/k8splugin/internal/utils.go
index 47a236c2..174f8e79 100644
--- a/src/k8splugin/internal/utils.go
+++ b/src/k8splugin/internal/utils.go
@@ -17,8 +17,8 @@ import (
"io/ioutil"
"log"
"os"
- "path/filepath"
"path"
+ "path/filepath"
"plugin"
"strings"
@@ -52,13 +52,11 @@ func DecodeYAML(path string, into runtime.Object) (runtime.Object, error) {
}
}
- log.Println("Reading YAML file")
rawBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, pkgerrors.Wrap(err, "Read YAML file error")
}
- log.Println("Decoding deployment YAML")
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode(rawBytes, nil, into)
if err != nil {
diff --git a/src/k8splugin/plugins/generic/plugin.go b/src/k8splugin/plugins/generic/plugin.go
index b9a96ab5..0711466f 100644
--- a/src/k8splugin/plugins/generic/plugin.go
+++ b/src/k8splugin/plugins/generic/plugin.go
@@ -14,8 +14,6 @@ limitations under the License.
package main
import (
- "log"
-
pkgerrors "github.com/pkg/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -23,10 +21,14 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
utils "github.com/onap/multicloud-k8s/src/k8splugin/internal"
- "github.com/onap/multicloud-k8s/src/k8splugin/internal/plugin"
+ "github.com/onap/multicloud-k8s/src/k8splugin/internal/config"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
+ "github.com/onap/multicloud-k8s/src/k8splugin/internal/plugin"
)
+// Compile time check to see if genericPlugin implements the correct interface
+var _ plugin.Reference = genericPlugin{}
+
// ExportedVariable is what we will look for when calling the generic plugin
var ExportedVariable genericPlugin
@@ -56,6 +58,20 @@ func (g genericPlugin) Create(yamlFilePath string, namespace string, client plug
return "", pkgerrors.Wrap(err, "Mapping kind to resource error")
}
+ //Add the tracking label to all resources created here
+ labels := unstruct.GetLabels()
+ //Check if labels exist for this object
+ if labels == nil {
+ labels = map[string]string{}
+ }
+ labels[config.GetConfiguration().KubernetesLabelName] = client.GetInstanceID()
+ unstruct.SetLabels(labels)
+
+ // This checks if the resource we are creating has a podSpec in it
+ // Eg: Deployment, StatefulSet, Job etc..
+ // If a PodSpec is found, the label will be added to it too.
+ plugin.TagPodsIfPresent(unstruct, client.GetInstanceID())
+
gvr := mapping.Resource
var createdObj *unstructured.Unstructured
@@ -94,8 +110,6 @@ func (g genericPlugin) Get(resource helm.KubernetesResource,
}
gvr := mapping.Resource
- log.Printf("Using gvr: %s, %s, %s", gvr.Group, gvr.Version, gvr.Resource)
-
opts := metav1.GetOptions{}
var unstruct *unstructured.Unstructured
switch mapping.Scope.Name() {
@@ -141,8 +155,6 @@ func (g genericPlugin) Delete(resource helm.KubernetesResource, namespace string
}
gvr := mapping.Resource
- log.Printf("Using gvr: %s, %s, %s", gvr.Group, gvr.Version, gvr.Resource)
-
deletePolicy := metav1.DeletePropagationForeground
opts := &metav1.DeleteOptions{
PropagationPolicy: &deletePolicy,
diff --git a/src/k8splugin/plugins/namespace/plugin.go b/src/k8splugin/plugins/namespace/plugin.go
index d30f55b8..feb2aa61 100644
--- a/src/k8splugin/plugins/namespace/plugin.go
+++ b/src/k8splugin/plugins/namespace/plugin.go
@@ -26,6 +26,9 @@ import (
"github.com/onap/multicloud-k8s/src/k8splugin/internal/plugin"
)
+// Compile time check to see if namespacePlugin implements the correct interface
+var _ plugin.Reference = namespacePlugin{}
+
// ExportedVariable is what we will look for when calling the plugin
var ExportedVariable namespacePlugin
diff --git a/src/k8splugin/plugins/namespace/plugin_test.go b/src/k8splugin/plugins/namespace/plugin_test.go
index 489ac096..c1944a40 100644
--- a/src/k8splugin/plugins/namespace/plugin_test.go
+++ b/src/k8splugin/plugins/namespace/plugin_test.go
@@ -46,6 +46,10 @@ func (t TestKubernetesConnector) GetStandardClient() kubernetes.Interface {
return fake.NewSimpleClientset(t.object)
}
+func (t TestKubernetesConnector) GetInstanceID() string {
+ return ""
+}
+
func TestCreateNamespace(t *testing.T) {
testCases := []struct {
label string
diff --git a/src/k8splugin/plugins/network/plugin.go b/src/k8splugin/plugins/network/plugin.go
index ca5aa959..aa0d584b 100644
--- a/src/k8splugin/plugins/network/plugin.go
+++ b/src/k8splugin/plugins/network/plugin.go
@@ -14,9 +14,10 @@ limitations under the License.
package main
import (
- v1 "github.com/onap/multicloud-k8s/src/k8splugin/plugins/network/v1"
"regexp"
+ v1 "github.com/onap/multicloud-k8s/src/k8splugin/plugins/network/v1"
+
utils "github.com/onap/multicloud-k8s/src/k8splugin/internal"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/plugin"
@@ -25,20 +26,25 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
+// Compile time check to see if networkPlugin implements the correct interface
+var _ plugin.Reference = networkPlugin{}
+
// ExportedVariable is what we will look for when calling the plugin
var ExportedVariable networkPlugin
type networkPlugin struct {
}
-func extractData(data string) (cniType, networkName string) {
+func extractData(data string) (cniType, networkName string, err error) {
re := regexp.MustCompile("_")
split := re.Split(data, -1)
- if len(split) != 3 {
+ if len(split) != 2 {
+ err = pkgerrors.New("Couldn't split resource '" + data +
+ "' into CNI type and Network name")
return
}
- cniType = split[1]
- networkName = split[2]
+ cniType = split[0]
+ networkName = split[1]
return
}
@@ -82,7 +88,11 @@ func (p networkPlugin) List(gvk schema.GroupVersionKind, namespace string,
// Delete an existing Network
func (p networkPlugin) Delete(resource helm.KubernetesResource, namespace string, client plugin.KubernetesConnector) error {
- cniType, networkName := extractData(resource.Name)
+ cniType, networkName, err := extractData(resource.Name)
+ if err != nil {
+ return pkgerrors.Wrap(err, "Error extracting CNI type from resource")
+ }
+
typePlugin, ok := utils.LoadedPlugins[cniType+"-network"]
if !ok {
return pkgerrors.New("No plugin for resource " + cniType + " found")
diff --git a/src/k8splugin/plugins/network/plugin_test.go b/src/k8splugin/plugins/network/plugin_test.go
index 586bccb8..33cae1c7 100644
--- a/src/k8splugin/plugins/network/plugin_test.go
+++ b/src/k8splugin/plugins/network/plugin_test.go
@@ -130,18 +130,23 @@ func TestDeleteNetwork(t *testing.T) {
}{
{
label: "Fail to load non-existing plugin",
- input: "test",
- expectedError: "No plugin for resource",
+ input: "non-existing-cni_test",
+ expectedError: "No plugin for resource non-existing-cni",
},
{
- label: "Fail to delete a network",
+ label: "Fail to extract cni from network name",
input: "1_ovn4nfvk8s_test",
+ expectedError: "Error extracting CNI type from resource: Couldn't split resource '1_ovn4nfvk8s_test' into CNI type and Network name",
+ },
+ {
+ label: "Fail to delete a network",
+ input: "ovn4nfvk8s_test",
mockError: "Internal error",
expectedError: "Error during the deletion for ovn4nfvk8s plugin: Internal error",
},
{
label: "Successfully delete a ovn4nfv network",
- input: "1_ovn4nfvk8s_test",
+ input: "ovn4nfvk8s_test",
},
}
diff --git a/src/k8splugin/plugins/service/plugin.go b/src/k8splugin/plugins/service/plugin.go
index 2fceffc0..4c1f37be 100644
--- a/src/k8splugin/plugins/service/plugin.go
+++ b/src/k8splugin/plugins/service/plugin.go
@@ -22,10 +22,14 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
utils "github.com/onap/multicloud-k8s/src/k8splugin/internal"
+ "github.com/onap/multicloud-k8s/src/k8splugin/internal/config"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/plugin"
)
+// Compile time check to see if servicePlugin implements the correct interface
+var _ plugin.Reference = servicePlugin{}
+
// ExportedVariable is what we will look for when calling the plugin
var ExportedVariable servicePlugin
@@ -49,6 +53,14 @@ func (p servicePlugin) Create(yamlFilePath string, namespace string, client plug
}
service.Namespace = namespace
+ labels := service.GetLabels()
+ //Check if labels exist for this object
+ if labels == nil {
+ labels = map[string]string{}
+ }
+ labels[config.GetConfiguration().KubernetesLabelName] = client.GetInstanceID()
+ service.SetLabels(labels)
+
result, err := client.GetStandardClient().CoreV1().Services(namespace).Create(service)
if err != nil {
return "", pkgerrors.Wrap(err, "Create Service error")
diff --git a/src/k8splugin/plugins/service/plugin_test.go b/src/k8splugin/plugins/service/plugin_test.go
index aa0bcc29..1cef5027 100644
--- a/src/k8splugin/plugins/service/plugin_test.go
+++ b/src/k8splugin/plugins/service/plugin_test.go
@@ -14,11 +14,12 @@ limitations under the License.
package main
import (
- "github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
"reflect"
"strings"
"testing"
+ "github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
+
coreV1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -45,6 +46,10 @@ func (t TestKubernetesConnector) GetStandardClient() kubernetes.Interface {
return fake.NewSimpleClientset(t.object)
}
+func (t TestKubernetesConnector) GetInstanceID() string {
+ return ""
+}
+
func TestCreateService(t *testing.T) {
name := "mock-service"
testCases := []struct {