diff options
-rw-r--r-- | kud/demo/composite-firewall/firewall/templates/deployment.yaml | 135 | ||||
-rw-r--r-- | kud/demo/composite-firewall/firewall/values.yaml | 11 | ||||
-rw-r--r-- | kud/demo/composite-firewall/packetgen/templates/deployment.yaml | 123 | ||||
-rw-r--r-- | kud/demo/composite-firewall/packetgen/values.yaml | 11 | ||||
-rw-r--r-- | kud/demo/composite-firewall/sink/values.yaml | 4 | ||||
-rw-r--r-- | kud/deployment_infra/emco/examples/02-project.yaml | 52 | ||||
-rw-r--r-- | kud/deployment_infra/emco/examples/values.yaml.example | 2 | ||||
-rw-r--r-- | kud/hosting_providers/containerized/addons/values.yaml.tmpl | 2 | ||||
-rwxr-xr-x | kud/hosting_providers/containerized/installer.sh | 3 | ||||
-rwxr-xr-x | kud/tests/_functions.sh | 4 | ||||
-rwxr-xr-x | kud/tests/plugin_fw_v2.sh | 18 | ||||
-rw-r--r-- | kud/tests/plugin_fw_v2.yaml | 19 | ||||
-rw-r--r-- | src/k8splugin/internal/app/instance.go | 38 |
13 files changed, 267 insertions, 155 deletions
diff --git a/kud/demo/composite-firewall/firewall/templates/deployment.yaml b/kud/demo/composite-firewall/firewall/templates/deployment.yaml index 632a50bf..560f66de 100644 --- a/kud/demo/composite-firewall/firewall/templates/deployment.yaml +++ b/kud/demo/composite-firewall/firewall/templates/deployment.yaml @@ -1,5 +1,5 @@ -apiVersion: apps/v1 -kind: Deployment +apiVersion: kubevirt.io/v1alpha3 +kind: VirtualMachine metadata: name: {{ include "firewall.fullname" . }} labels: @@ -7,57 +7,96 @@ metadata: app: {{ include "firewall.name" . }} chart: {{ .Chart.Name }} spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ include "firewall.name" . }} - release: {{ .Release.Name }} + running: true template: metadata: labels: app: {{ include "firewall.name" . }} release: {{ .Release.Name }} - annotations: - VirtletLibvirtCPUSetting: | - mode: host-model - VirtletCloudInitUserData: | - ssh_pwauth: True - users: - - name: admin - gecos: User - primary-group: admin - groups: users - sudo: ALL=(ALL) NOPASSWD:ALL - lock_passwd: false - passwd: "$6$rounds=4096$QA5OCKHTE41$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/" - runcmd: - - export demo_artifacts_version={{ .Values.global.demoArtifactsVersion }} - - export vfw_private_ip_0={{ .Values.global.vfwPrivateIp0 }} - - export vsn_private_ip_0={{ .Values.global.vsnPrivateIp0 }} - - export protected_net_cidr={{ .Values.global.protectedNetCidr }} - - export dcae_collector_ip={{ .Values.global.dcaeCollectorIp }} - - export dcae_collector_port={{ .Values.global.dcaeCollectorPort }} - - export protected_net_gw={{ .Values.global.protectedNetGw }} - - export protected_private_net_cidr={{ .Values.global.protectedPrivateNetCidr }} - - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/firewall | sudo -E bash - VirtletRootVolumeSize: 5Gi - kubernetes.io/target-runtime: virtlet.cloud spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: extraRuntime - operator: In - values: - - virtlet - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - tty: true - stdin: true + domain: + cpu: + model: host-model + devices: + disks: + - name: containerdisk + disk: + bus: virtio + - name: cloudinitdisk + disk: + bus: virtio + interfaces: + - name: default + bridge: {} + - name: unprotected + macAddress: 52:57:2b:7b:e4:27 + bridge: {} + - name: protected + macAddress: fa:d1:3a:a1:5c:67 + bridge: {} + - name: emco + macAddress: 86:31:ea:6a:ce:75 + bridge: {} resources: - limits: + requests: memory: {{ .Values.resources.memory }} + networks: + - name: default + pod: {} + - name: unprotected + multus: + networkName: {{ .Values.global.unprotectedNetworkName }} + - name: protected + multus: + networkName: {{ .Values.global.protectedNetworkName }} + - name: emco + multus: + networkName: {{ .Values.global.emcoPrivateNetworkName }} + volumes: + - name: cloudinitdisk + cloudInitNoCloud: + networkData: | + version: 2 + ethernets: + enp1s0: + dhcp4: true + eth1: + match: + macaddress: "52:57:2b:7b:e4:27" + set-name: eth1 + dhcp4: true + eth2: + match: + macaddress: "fa:d1:3a:a1:5c:67" + set-name: eth2 + dhcp4: true + eth3: + match: + macaddress: "86:31:ea:6a:ce:75" + set-name: eth3 + dhcp4: true + userData: | + #cloud-config + ssh_pwauth: True + users: + - name: admin + gecos: User + primary-group: admin + groups: users + sudo: ALL=(ALL) NOPASSWD:ALL + lock_passwd: false + passwd: "$6$rounds=4096$QA5OCKHTE41$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/" + runcmd: + - export demo_artifacts_version={{ .Values.global.demoArtifactsVersion }} + - export vfw_private_ip_0={{ .Values.global.vfwPrivateIp0 }} + - export vsn_private_ip_0={{ .Values.global.vsnPrivateIp0 }} + - export protected_net_cidr={{ .Values.global.protectedNetCidr }} + - export dcae_collector_ip={{ .Values.global.dcaeCollectorIp }} + - export dcae_collector_port={{ .Values.global.dcaeCollectorPort }} + - export protected_net_gw={{ .Values.global.protectedNetGw }} + - export protected_private_net_cidr={{ .Values.global.protectedPrivateNetCidr }} + - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/firewall | sudo -E bash + - name: containerdisk + containerDisk: + image: integratedcloudnative/ubuntu:16.04 + imagePullPolicy: IfNotPresent diff --git a/kud/demo/composite-firewall/firewall/values.yaml b/kud/demo/composite-firewall/firewall/values.yaml index 09098564..813174a2 100644 --- a/kud/demo/composite-firewall/firewall/values.yaml +++ b/kud/demo/composite-firewall/firewall/values.yaml @@ -2,13 +2,6 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. -replicaCount: 1 - -image: - repository: virtlet.cloud/ubuntu/16.04 - tag: latest - pullPolicy: IfNotPresent - nameOverride: "" fullnameOverride: "" @@ -35,8 +28,8 @@ global: vfwPrivateIp2: 10.10.20.3 #Packetgen container - vpgPrivateIp0: 192.168.10.200 - vpgPrivateIp1: 10.10.20.200 + vpgPrivateIp0: 192.168.10.2 + vpgPrivateIp1: 10.10.20.2 #Sink container vsnPrivateIp0: 192.168.20.3 diff --git a/kud/demo/composite-firewall/packetgen/templates/deployment.yaml b/kud/demo/composite-firewall/packetgen/templates/deployment.yaml index 827d2838..a46d22ee 100644 --- a/kud/demo/composite-firewall/packetgen/templates/deployment.yaml +++ b/kud/demo/composite-firewall/packetgen/templates/deployment.yaml @@ -1,5 +1,5 @@ -apiVersion: apps/v1 -kind: Deployment +apiVersion: kubevirt.io/v1alpha3 +kind: VirtualMachine metadata: name: {{ include "packetgen.fullname" . }} labels: @@ -7,11 +7,7 @@ metadata: app: {{ include "packetgen.name" . }} chart: {{ .Chart.Name }} spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - app: {{ include "packetgen.name" .}} - release: {{ .Release.Name }} + running: true template: metadata: labels: @@ -20,46 +16,79 @@ spec: annotations: app: {{ include "packetgen.name" . }} release: {{ .Release.Name }} - VirtletLibvirtCPUSetting: | - mode: host-model - VirtletCloudInitUserData: | - ssh_pwauth: True - users: - - name: admin - gecos: User - primary-group: admin - groups: users - sudo: ALL=(ALL) NOPASSWD:ALL - lock_passwd: false - passwd: "$6$rounds=4096$QA5OCKHTE41$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/" - runcmd: - - export demo_artifacts_version={{ .Values.global.demoArtifactsVersion }} - - export vfw_private_ip_0={{ .Values.global.vfwPrivateIp0 }} - - export vsn_private_ip_0={{ .Values.global.vsnPrivateIp0 }} - - export protected_net_cidr={{ .Values.global.protectedNetCidr }} - - export dcae_collector_ip={{ .Values.global.dcaeCollectorIp }} - - export dcae_collector_port={{ .Values.global.dcaeCollectorPort }} - - export protected_net_gw={{ .Values.global.protectedNetGw }} - - export protected_private_net_cidr={{ .Values.global.protectedPrivateNetCidr }} - - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/packetgen | sudo -E bash - VirtletRootVolumeSize: 5Gi - kubernetes.io/target-runtime: virtlet.cloud spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: extraRuntime - operator: In - values: - - virtlet - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - tty: true - stdin: true + domain: + cpu: + model: host-model + devices: + disks: + - name: containerdisk + disk: + bus: virtio + - name: cloudinitdisk + disk: + bus: virtio + interfaces: + - name: default + bridge: {} + - name: unprotected + macAddress: ee:f0:75:e0:b6:26 + bridge: {} + - name: emco + macAddress: 0a:c0:37:55:f5:ab + bridge: {} resources: - limits: + requests: memory: {{ .Values.resources.limits.memory }} + networks: + - name: default + pod: {} + - name: unprotected + multus: + networkName: {{ .Values.global.unprotectedNetworkName }} + - name: emco + multus: + networkName: {{ .Values.global.emcoPrivateNetworkName }} + volumes: + - name: cloudinitdisk + cloudInitNoCloud: + networkData: | + version: 2 + ethernets: + enp1s0: + dhcp4: true + eth1: + match: + macaddress: "ee:f0:75:e0:b6:26" + set-name: eth1 + dhcp4: true + eth2: + match: + macaddress: "0a:c0:37:55:f5:ab" + set-name: eth2 + dhcp4: true + userData: | + #cloud-config + ssh_pwauth: True + users: + - name: admin + gecos: User + primary-group: admin + groups: users + sudo: ALL=(ALL) NOPASSWD:ALL + lock_passwd: false + passwd: "$6$rounds=4096$QA5OCKHTE41$jRACivoPMJcOjLRgxl3t.AMfU7LhCFwOWv2z66CQX.TSxBy50JoYtycJXSPr2JceG.8Tq/82QN9QYt3euYEZW/" + runcmd: + - export demo_artifacts_version={{ .Values.global.demoArtifactsVersion }} + - export vfw_private_ip_0={{ .Values.global.vfwPrivateIp0 }} + - export vsn_private_ip_0={{ .Values.global.vsnPrivateIp0 }} + - export protected_net_cidr={{ .Values.global.protectedNetCidr }} + - export dcae_collector_ip={{ .Values.global.dcaeCollectorIp }} + - export dcae_collector_port={{ .Values.global.dcaeCollectorPort }} + - export protected_net_gw={{ .Values.global.protectedNetGw }} + - export protected_private_net_cidr={{ .Values.global.protectedPrivateNetCidr }} + - wget -O - https://git.onap.org/multicloud/k8s/plain/kud/tests/vFW/packetgen | sudo -E bash + - name: containerdisk + containerDisk: + image: integratedcloudnative/ubuntu:16.04 + imagePullPolicy: IfNotPresent diff --git a/kud/demo/composite-firewall/packetgen/values.yaml b/kud/demo/composite-firewall/packetgen/values.yaml index f8cac9d5..89bd049b 100644 --- a/kud/demo/composite-firewall/packetgen/values.yaml +++ b/kud/demo/composite-firewall/packetgen/values.yaml @@ -2,13 +2,6 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. -replicaCount: 1 - -image: - repository: virtlet.cloud/ubuntu/16.04 - tag: latest - pullPolicy: IfNotPresent - nameOverride: "" fullnameOverride: "" @@ -43,8 +36,8 @@ global: vfwPrivateIp2: 10.10.20.3 #Packetgen container - vpgPrivateIp0: 192.168.10.200 - vpgPrivateIp1: 10.10.20.200 + vpgPrivateIp0: 192.168.10.2 + vpgPrivateIp1: 10.10.20.2 #Sink container vsnPrivateIp0: 192.168.20.3 diff --git a/kud/demo/composite-firewall/sink/values.yaml b/kud/demo/composite-firewall/sink/values.yaml index b7ba1913..8d80b81f 100644 --- a/kud/demo/composite-firewall/sink/values.yaml +++ b/kud/demo/composite-firewall/sink/values.yaml @@ -47,8 +47,8 @@ global: vfwPrivateIp2: 10.10.20.3 #Packetgen container - vpgPrivateIp0: 192.168.10.200 - vpgPrivateIp1: 10.10.20.200 + vpgPrivateIp0: 192.168.10.2 + vpgPrivateIp1: 10.10.20.2 #Sink container vsnPrivateIp0: 192.168.20.3 diff --git a/kud/deployment_infra/emco/examples/02-project.yaml b/kud/deployment_infra/emco/examples/02-project.yaml index 98ecfdb4..d62a4f65 100644 --- a/kud/deployment_infra/emco/examples/02-project.yaml +++ b/kud/deployment_infra/emco/examples/02-project.yaml @@ -7,17 +7,63 @@ version: emco/v2 resourceContext: anchor: projects metadata : - name: {{ .ProjectName }} + name: {{ .ProjectName }} --- -#create default logical cloud with admin permissions +#create privileged logical cloud version: emco/v2 resourceContext: anchor: projects/{{ .ProjectName }}/logical-clouds metadata: name: {{ .LogicalCloud }} spec: - level: "0" + namespace: {{ .LogicalCloud }} + user: + user-name: {{ .LogicalCloud }}-admin + type: certificate + +--- +#create cluster quotas +version: emco/v2 +resourceContext: + anchor: projects/{{ .ProjectName }}/logical-clouds/{{ .LogicalCloud }}/cluster-quotas +metadata: + name: quota +spec: + #an empty spec means that the addons will not be required to specify + #cpu, memory, etc. limits + +--- +#add namespaced permissions +version: emco/v2 +resourceContext: + anchor: projects/{{ .ProjectName }}/logical-clouds/{{ .LogicalCloud }}/user-permissions +metadata: + name: namespaced +spec: + namespace: {{ .LogicalCloud }} + apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" + +--- +#add non-namespaced permissions +version: emco/v2 +resourceContext: + anchor: projects/{{ .ProjectName }}/logical-clouds/{{ .LogicalCloud }}/user-permissions +metadata: + name: non-namespaced +spec: + namespace: "" + apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" {{- range $index, $cluster := .Clusters }} --- diff --git a/kud/deployment_infra/emco/examples/values.yaml.example b/kud/deployment_infra/emco/examples/values.yaml.example index 8c4b6c30..67944eb8 100644 --- a/kud/deployment_infra/emco/examples/values.yaml.example +++ b/kud/deployment_infra/emco/examples/values.yaml.example @@ -11,7 +11,7 @@ Clusters: Name: cluster ProjectName: kud -LogicalCloud: default +LogicalCloud: kud PackagesPath: $PWD/../output/packages AddonsApp: addons diff --git a/kud/hosting_providers/containerized/addons/values.yaml.tmpl b/kud/hosting_providers/containerized/addons/values.yaml.tmpl index f2a20f84..b3e5845c 100644 --- a/kud/hosting_providers/containerized/addons/values.yaml.tmpl +++ b/kud/hosting_providers/containerized/addons/values.yaml.tmpl @@ -11,7 +11,7 @@ Clusters: Name: cluster ProjectName: kud -LogicalCloud: default +LogicalCloud: kud PackagesPath: ${PACKAGES_PATH} AddonsApp: addons diff --git a/kud/hosting_providers/containerized/installer.sh b/kud/hosting_providers/containerized/installer.sh index cdb43dd5..18ebfff3 100755 --- a/kud/hosting_providers/containerized/installer.sh +++ b/kud/hosting_providers/containerized/installer.sh @@ -239,6 +239,9 @@ function install_host_artifacts { for test in _common _common_test _functions topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk; do cp ${kud_tests}/${test}.sh ${host_addons_dir}/tests done + cp ${kud_tests}/plugin_fw_v2.sh ${host_addons_dir}/tests + cp ${kud_tests}/plugin_fw_v2.yaml ${host_addons_dir}/tests + cp -r ${kud_tests}/../demo/composite-firewall ${host_addons_dir}/tests mkdir -p ${host_artifacts_dir} cp -rf ${kud_inventory_folder}/artifacts/* ${host_artifacts_dir} diff --git a/kud/tests/_functions.sh b/kud/tests/_functions.sh index 1a803173..ec415409 100755 --- a/kud/tests/_functions.sh +++ b/kud/tests/_functions.sh @@ -14,7 +14,11 @@ set -o pipefail FUNCTIONS_DIR="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")" +# Do not overwrite any user modifications to PATH when sourcing +# /etc/environment +USER_PATH=$PATH source /etc/environment +PATH=$USER_PATH:$PATH source $FUNCTIONS_DIR/_common_test.sh function print_msg { diff --git a/kud/tests/plugin_fw_v2.sh b/kud/tests/plugin_fw_v2.sh index d6254ac3..abab9004 100755 --- a/kud/tests/plugin_fw_v2.sh +++ b/kud/tests/plugin_fw_v2.sh @@ -14,10 +14,10 @@ set -o pipefail source _common_test.sh source _functions.sh -source _functions.sh # TODO KUBECONFIG may be a list of paths -kubeconfig_path="${KUBECONFIG:-$HOME/.kube/config}" +KUBECONFIG_PATH="${KUBECONFIG:-$HOME/.kube/config}" +DEMO_FOLDER="${DEMO_FOLDER:-$test_folder/../demo}" clusters="${KUD_PLUGIN_FW_CLUSTERS:-$(cat <<EOF [ @@ -28,7 +28,7 @@ clusters="${KUD_PLUGIN_FW_CLUSTERS:-$(cat <<EOF "userData1": "edge01 user data 1", "userData2": "edge01 user data 2" }, - "file": "$kubeconfig_path" + "file": "$KUBECONFIG_PATH" } ] EOF @@ -73,17 +73,15 @@ service_host=${service_host:-"localhost"} CSAR_DIR="/opt/csar" csar_id="4bf66240-a0be-4ce2-aebd-a01df7725f16" -demo_folder=$test_folder/../demo - function populate_CSAR_compositevfw_helm { _checks_args "$1" pushd "${CSAR_DIR}/$1" print_msg "Create Helm Chart Archives for compositevfw" rm -f *.tar.gz - tar -czf packetgen.tar.gz -C $demo_folder/composite-firewall packetgen - tar -czf firewall.tar.gz -C $demo_folder/composite-firewall firewall - tar -czf sink.tar.gz -C $demo_folder/composite-firewall sink - tar -czf profile.tar.gz -C $demo_folder/composite-firewall manifest.yaml override_values.yaml + tar -czf packetgen.tar.gz -C $DEMO_FOLDER/composite-firewall packetgen + tar -czf firewall.tar.gz -C $DEMO_FOLDER/composite-firewall firewall + tar -czf sink.tar.gz -C $DEMO_FOLDER/composite-firewall sink + tar -czf profile.tar.gz -C $DEMO_FOLDER/composite-firewall manifest.yaml override_values.yaml popd } @@ -228,7 +226,7 @@ else for name in $(cluster_names); do print_msg "Wait for all pods to start on cluster $name" file=$(cluster_file "$name") - KUBECONFIG=$file kubectl wait pod -l release=fw0 --for=condition=Ready + KUBECONFIG=$file kubectl wait pod -l release=fw0 --for=condition=Ready --timeout=5m done # TODO: Provide some health check to verify vFW work print_msg "Not waiting for vFW to fully install as no further checks are implemented in testcase" diff --git a/kud/tests/plugin_fw_v2.yaml b/kud/tests/plugin_fw_v2.yaml index be436106..b4dbf77f 100644 --- a/kud/tests/plugin_fw_v2.yaml +++ b/kud/tests/plugin_fw_v2.yaml @@ -234,7 +234,7 @@ metadata: spec: application-name: packetgen workload-resource: {{.Release}}-packetgen - type: Deployment + type: VirtualMachine --- version: emco/v2 @@ -245,7 +245,7 @@ metadata: spec: application-name: firewall workload-resource: {{.Release}}-firewall - type: Deployment + type: VirtualMachine --- version: emco/v2 @@ -265,10 +265,11 @@ resourceContext: metadata: name: packetgen_unprotected_if spec: - interface: eth1 + interface: net1 name: {{.UnprotectedProviderNetwork}} defaultGateway: "false" ipAddress: 192.168.10.2 + macAddress: ee:f0:75:e0:b6:26 --- version: emco/v2 @@ -277,10 +278,11 @@ resourceContext: metadata: name: packetgen_emco_if spec: - interface: eth2 + interface: net2 name: {{.EmcoProviderNetwork}} defaultGateway: "false" ipAddress: 10.10.20.2 + macAddress: 0a:c0:37:55:f5:ab --- version: emco/v2 @@ -289,10 +291,11 @@ resourceContext: metadata: name: firewall_unprotected_if spec: - interface: eth1 + interface: net1 name: {{.UnprotectedProviderNetwork}} defaultGateway: "false" ipAddress: 192.168.10.3 + macAddress: 52:57:2b:7b:e4:27 --- version: emco/v2 @@ -301,10 +304,11 @@ resourceContext: metadata: name: firewall_protected_if spec: - interface: eth2 + interface: net2 name: {{.ProtectedNetwork}} defaultGateway: "false" ipAddress: 192.168.20.2 + macAddress: fa:d1:3a:a1:5c:67 --- version: emco/v2 @@ -313,10 +317,11 @@ resourceContext: metadata: name: firewall_emco_if spec: - interface: eth3 + interface: net3 name: {{.EmcoProviderNetwork}} defaultGateway: "false" ipAddress: 10.10.20.3 + macAddress: 86:31:ea:6a:ce:75 --- version: emco/v2 diff --git a/src/k8splugin/internal/app/instance.go b/src/k8splugin/internal/app/instance.go index 1c9c81a9..5aa60882 100644 --- a/src/k8splugin/internal/app/instance.go +++ b/src/k8splugin/internal/app/instance.go @@ -545,24 +545,6 @@ func (v *InstanceClient) checkRssStatus(rss helm.KubernetesResource, k8sClient K apiVersion, kind := rss.GVK.ToAPIVersionAndKind() log.Printf("apiVersion: %s, Kind: %s", apiVersion, kind) - restClient, err := k8sClient.getRestApi(apiVersion) - if err != nil { - return false, err - } - mapper := k8sClient.GetMapper() - mapping, err := mapper.RESTMapping(schema.GroupKind{ - Group: rss.GVK.Group, - Kind: rss.GVK.Kind, - }, rss.GVK.Version) - resourceInfo := resource.Info{ - Client: restClient, - Mapping: mapping, - Namespace: namespace, - Name: rss.Name, - Source: "", - Object: nil, - ResourceVersion: "", - } var parsedRes runtime.Object //TODO: Should we care about different api version for a same kind? @@ -591,6 +573,26 @@ func (v *InstanceClient) checkRssStatus(rss helm.KubernetesResource, k8sClient K //For not listed resource, consider ready return true, nil } + + restClient, err := k8sClient.getRestApi(apiVersion) + if err != nil { + return false, err + } + mapper := k8sClient.GetMapper() + mapping, err := mapper.RESTMapping(schema.GroupKind{ + Group: rss.GVK.Group, + Kind: rss.GVK.Kind, + }, rss.GVK.Version) + resourceInfo := resource.Info{ + Client: restClient, + Mapping: mapping, + Namespace: namespace, + Name: rss.Name, + Source: "", + Object: nil, + ResourceVersion: "", + } + err = runtime.DefaultUnstructuredConverter.FromUnstructured(status.Status.Object, parsedRes) if err != nil { return false, err |