aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--build/Dockerfile37
-rw-r--r--build/Readme.md4
-rwxr-xr-xbuild/entrypoint35
-rw-r--r--deployments/kubernetes/Readme.md4
-rw-r--r--deployments/kubernetes/onap4k8s.yaml359
-rw-r--r--deployments/kubernetes/onap4k8sdb.yaml87
-rwxr-xr-xkud/tests/plugin_collection_v2.sh131
-rw-r--r--src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_cr.yaml5
-rw-r--r--src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml35
-rw-r--r--src/monitor/deploy/operator.yaml6
-rw-r--r--src/monitor/pkg/apis/k8splugin/v1alpha1/types.go18
-rw-r--r--src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.deepcopy.go54
-rw-r--r--src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.openapi.go36
-rw-r--r--src/monitor/pkg/controller/add_resourcebundlestate.go8
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/configMap_controller.go179
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/configMap_predicate.go44
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/controller.go224
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/daemonSet_controller.go182
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/daemonSet_predicate.go44
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/deployment_controller.go182
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/deployment_predicate.go44
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/helpers.go6
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/ingress_controller.go182
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/ingress_predicate.go44
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/job_controller.go182
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/job_predicate.go44
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/pod_controller.go2
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/secret_controller.go179
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/secret_predicate.go44
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/service_controller.go182
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/service_predicate.go44
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/statefulSet_controller.go182
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/statefulSet_predicate.go44
-rw-r--r--src/orchestrator/pkg/appcontext/appcontext.go3
-rw-r--r--src/orchestrator/pkg/module/add_intents.go1
-rw-r--r--src/orchestrator/pkg/module/instantiation.go41
-rw-r--r--src/orchestrator/pkg/module/instantiation_scheduler_helper.go222
-rw-r--r--src/ovnaction/cmd/main.go4
-rw-r--r--src/rsync/cmd/main.go4
39 files changed, 3084 insertions, 44 deletions
diff --git a/build/Dockerfile b/build/Dockerfile
new file mode 100644
index 00000000..ff9e8572
--- /dev/null
+++ b/build/Dockerfile
@@ -0,0 +1,37 @@
+# SPDX-license-identifier: Apache-2.0
+##############################################################################
+# Copyright (c) 2018
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+FROM golang:1.14.1
+
+WORKDIR /go/src/github.com/onap/multicloud-k8s
+COPY ./ ./
+RUN cd src/orchestrator && make all
+RUN cd src/ncm && make all
+RUN cd src/dcm && make all
+RUN cd src/rsync && make all
+RUN cd src/ovnaction && make all
+RUN cd src/clm && make all
+
+FROM ubuntu:16.04
+
+WORKDIR /opt/mco
+RUN groupadd -r mco && useradd -r -g mco mco
+RUN chown mco:mco /opt/mco -R
+COPY --chown=mco --from=0 /go/src/github.com/onap/multicloud-k8s/src/orchestrator/orchestrator ./
+COPY --chown=mco --from=0 /go/src/github.com/onap/multicloud-k8s/src/ncm/ncm ./
+COPY --chown=mco --from=0 /go/src/github.com/onap/multicloud-k8s/src/dcm/dcm ./
+COPY --chown=mco --from=0 /go/src/github.com/onap/multicloud-k8s/src/rsync/rsync ./
+COPY --chown=mco --from=0 /go/src/github.com/onap/multicloud-k8s/src/ovnaction/ovnaction ./
+COPY --chown=mco --from=0 /go/src/github.com/onap/multicloud-k8s/src/clm/clm ./
+COPY --chown=mco --from=0 /go/src/github.com/onap/multicloud-k8s/build/entrypoint ./
+
+USER mco
+
+ENTRYPOINT ["./entrypoint"]
+
diff --git a/build/Readme.md b/build/Readme.md
new file mode 100644
index 00000000..193c0ea5
--- /dev/null
+++ b/build/Readme.md
@@ -0,0 +1,4 @@
+# Steps for building v2 API Docker Image
+$cd k8s
+$docker build -f build/Dockerfile . -t mco
+
diff --git a/build/entrypoint b/build/entrypoint
new file mode 100755
index 00000000..bafb76e9
--- /dev/null
+++ b/build/entrypoint
@@ -0,0 +1,35 @@
+#!/bin/sh -e
+
+cmd=${1:-""}
+
+case ${cmd} in
+ "mco")
+ ./orchestrator
+ ;;
+
+ "ncm")
+ ./ncm
+ ;;
+
+ "dcm")
+ ./dcm
+ ;;
+
+ "rsync")
+ ./rsync
+ ;;
+
+ "ovnaction")
+ ./ovnaction
+ ;;
+
+ "clm")
+ ./clm
+ ;;
+
+ *)
+ echo "invalid command ${cmd}"
+ sleep infinity
+
+esac
+
diff --git a/deployments/kubernetes/Readme.md b/deployments/kubernetes/Readme.md
new file mode 100644
index 00000000..d2d3d801
--- /dev/null
+++ b/deployments/kubernetes/Readme.md
@@ -0,0 +1,4 @@
+# Steps for running v2 API microservices
+$kubectl create namespace onap4k8s
+$kubectl apply -f onap4k8sdb.yaml -n onap4k8s
+$kubectl apply -f onap4k8s.yaml -n onap4k8s
diff --git a/deployments/kubernetes/onap4k8s.yaml b/deployments/kubernetes/onap4k8s.yaml
new file mode 100644
index 00000000..302fd53e
--- /dev/null
+++ b/deployments/kubernetes/onap4k8s.yaml
@@ -0,0 +1,359 @@
+# Resources to create ONAP4K8s v2 Microservices
+---
+# Orchestrator Config Map
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: orchestrator
+ labels:
+ app: orchestrator
+data:
+ config.json: |
+ {
+ "database-type": "mongo",
+ "database-ip": "mongo",
+ "etcd-ip": "etcd",
+ "service-port": "9015"
+ }
+
+---
+# Orchestrator Service
+apiVersion: v1
+kind: Service
+metadata:
+ name: orchestrator
+ labels:
+ app: orchestrator
+spec:
+ selector:
+ app: orchestrator
+ type: NodePort
+ ports:
+ - name: http
+ port: 9015
+ protocol: TCP
+ targetPort: 9015
+
+---
+# Orchestrator Deployment
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: orchestrator
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: orchestrator
+ template:
+ metadata:
+ labels:
+ app: orchestrator
+ spec:
+ containers:
+ - name: orchestrator
+ image: rtsood/mco:0.0.1
+ imagePullPolicy: IfNotPresent
+ command: ["/opt/mco/entrypoint", "mco"]
+ workingDir: /opt/mco
+ ports:
+ - containerPort: 9015
+ volumeMounts:
+ - name: config
+ mountPath: /opt/mco/config.json
+ subPath: config.json
+ volumes:
+ - name: config
+ configMap:
+ name: orchestrator
+ items:
+ - key: config.json
+ path: config.json
+
+---
+# NCM Config Map
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: ncm
+ labels:
+ app: ncm
+data:
+ config.json: |
+ {
+ "database-type": "mongo",
+ "database-ip": "mongo",
+ "etcd-ip": "etcd",
+ "service-port": "9031"
+ }
+
+---
+# NCM Service
+apiVersion: v1
+kind: Service
+metadata:
+ name: ncm
+ labels:
+ app: ncm
+spec:
+ selector:
+ app: ncm
+ type: NodePort
+ ports:
+ - name: http
+ port: 9031
+ protocol: TCP
+ targetPort: 9031
+
+---
+# NCM Deployment
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ncm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: ncm
+ template:
+ metadata:
+ labels:
+ app: ncm
+ spec:
+ containers:
+ - name: ncm
+ image: rtsood/mco:0.0.1
+ imagePullPolicy: IfNotPresent
+ command: ["/opt/mco/entrypoint", "ncm"]
+ workingDir: /opt/mco
+ ports:
+ - containerPort: 9031
+ volumeMounts:
+ - name: config
+ mountPath: /opt/mco/config.json
+ subPath: config.json
+ volumes:
+ - name: config
+ configMap:
+ name: ncm
+ items:
+ - key: config.json
+ path: config.json
+
+---
+# RSYNC ConfigMap
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: rsync
+ labels:
+ app: rsync
+data:
+ config.json: |
+ {
+ "database-type": "mongo",
+ "database-ip": "mongo",
+ "etcd-ip": "etcd"
+ }
+
+---
+# RSYNC Service
+apiVersion: v1
+kind: Service
+metadata:
+ name: rsync
+ labels:
+ app: rsync
+spec:
+ selector:
+ app: rsync
+ type: NodePort
+ ports:
+ - name: internal
+ port: 9041
+ protocol: TCP
+ targetPort: 9041
+
+---
+# RSYNC Deployment
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: rsync
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: rsync
+ template:
+ metadata:
+ labels:
+ app: rsync
+ spec:
+ containers:
+ - name: rsync
+ image: rtsood/mco:0.0.1
+ imagePullPolicy: IfNotPresent
+ command: ["/opt/mco/entrypoint", "rsync"]
+ workingDir: /opt/mco
+ ports:
+ - containerPort: 9041
+ volumeMounts:
+ - name: config
+ mountPath: /opt/mco/config.json
+ subPath: config.json
+ volumes:
+ - name: config
+ configMap:
+ name: rsync
+ items:
+ - key: config.json
+ path: config.json
+---
+# Ovnaction Config Map
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: ovnaction
+ labels:
+ app: ovnaction
+data:
+ config.json: |
+ {
+ "database-type": "mongo",
+ "database-ip": "mongo",
+ "etcd-ip": "etcd",
+ "service-port": "9051"
+ }
+
+---
+# Ovnaction Service
+apiVersion: v1
+kind: Service
+metadata:
+ name: ovnaction
+ labels:
+ app: ovnaction
+spec:
+ selector:
+ app: ovnaction
+ type: NodePort
+ ports:
+ - name: internal
+ port: 9053
+ protocol: TCP
+ targetPort: 9053
+ - name: http
+ port: 9051
+ protocol: TCP
+ targetPort: 9051
+
+---
+# Ovnaction Deployment
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ovnaction
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: ovnaction
+ template:
+ metadata:
+ labels:
+ app: ovnaction
+ spec:
+ containers:
+ - name: ovnaction
+ image: rtsood/mco:0.0.1
+ imagePullPolicy: IfNotPresent
+ command: ["/opt/mco/entrypoint", "ovnaction"]
+ workingDir: /opt/mco
+ ports:
+ - containerPort: 9053
+ - containerPort: 9051
+ volumeMounts:
+ - name: config
+ mountPath: /opt/mco/config.json
+ subPath: config.json
+ volumes:
+ - name: config
+ configMap:
+ name: ovnaction
+ items:
+ - key: config.json
+ path: config.json
+
+---
+# Clm Config Map
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: clm
+ labels:
+ app: clm
+data:
+ config.json: |
+ {
+ "database-type": "mongo",
+ "database-ip": "mongo",
+ "etcd-ip": "etcd",
+ "service-port": "9061"
+ }
+
+---
+# Clm Service
+apiVersion: v1
+kind: Service
+metadata:
+ name: clm
+ labels:
+ app: clm
+spec:
+ selector:
+ app: clm
+ type: NodePort
+ ports:
+ - name: http
+ port: 9061
+ protocol: TCP
+ targetPort: 9061
+
+---
+# Clm Deployment
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: clm
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: clm
+ template:
+ metadata:
+ labels:
+ app: clm
+ spec:
+ containers:
+ - name: clm
+ image: rtsood/mco:0.0.1
+ imagePullPolicy: IfNotPresent
+ command: ["/opt/mco/entrypoint", "clm"]
+ workingDir: /opt/mco
+ ports:
+ - containerPort: 9061
+ volumeMounts:
+ - name: config
+ mountPath: /opt/mco/config.json
+ subPath: config.json
+ volumes:
+ - name: config
+ configMap:
+ name: clm
+ items:
+ - key: config.json
+ path: config.json
+
diff --git a/deployments/kubernetes/onap4k8sdb.yaml b/deployments/kubernetes/onap4k8sdb.yaml
new file mode 100644
index 00000000..c5982041
--- /dev/null
+++ b/deployments/kubernetes/onap4k8sdb.yaml
@@ -0,0 +1,87 @@
+# Resources to create Databases used by ONAP4K8s v2 Microservices
+---
+#Etcd Service
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: etcd
+ name: etcd
+spec:
+ ports:
+ - name: "2379"
+ port: 2379
+ targetPort: 2379
+ - name: "2380"
+ port: 2380
+ targetPort: 2380
+ selector:
+ app: etcd
+
+---
+#Mongo Service
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: mongo
+ name: mongo
+spec:
+ ports:
+ - name: "27017"
+ port: 27017
+ targetPort: 27017
+ selector:
+ app: mongo
+
+---
+#Etcd Deployment
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: etcd
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: etcd
+ template:
+ metadata:
+ labels:
+ app: etcd
+ spec:
+ containers:
+ - image: bitnami/etcd:3
+ imagePullPolicy: IfNotPresent
+ name: etcd
+ env:
+ - name: "ALLOW_NONE_AUTHENTICATION"
+ value: "yes"
+ ports:
+ - containerPort: 2379
+ - containerPort: 2380
+
+---
+#Mongo Deployment
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: mongo
+ name: mongo
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: mongo
+ template:
+ metadata:
+ labels:
+ app: mongo
+ spec:
+ containers:
+ - image: mongo
+ imagePullPolicy: IfNotPresent
+ name: mongo
+ ports:
+ - containerPort: 27017
diff --git a/kud/tests/plugin_collection_v2.sh b/kud/tests/plugin_collection_v2.sh
index 5ebed6ad..05ff4265 100755
--- a/kud/tests/plugin_collection_v2.sh
+++ b/kud/tests/plugin_collection_v2.sh
@@ -82,6 +82,8 @@ deploymentIntentGroupNameDesc="test_deployment_intent_group_desc"
releaseName="test"
intentToBeAddedinDeploymentIntentGroup="name_of_intent_to_be_added_in_deployment_group"
intentToBeAddedinDeploymentIntentGroupDesc="desc_of_intent_to_be_added_in_deployment_group"
+hpaIntentName="hpaIntentName"
+trafficIntentName="trafficIntentName"
chart_name="edgex"
profile_name="test_profile"
@@ -90,6 +92,28 @@ namespace="plugin-tests-namespace"
cloud_region_id="kud"
cloud_region_owner="localhost"
+
+# Controllers
+genericPlacementIntent="genericPlacementIntent"
+OVNintent="OVNintent"
+OVNintentName="OVNintentName"
+OVNHostName="OVNHostName"
+OVNPort="9027"
+CostBasedIntent="costBasedIntent"
+CostBasedIntentName="CostBasedIntentName"
+CostBasedHostName="OVNHostName"
+CostBasedPort="9028"
+hpaIntent="hpaIntent"
+trafficIntent="trafficIntent"
+gpcHostName="gpcHostName"
+gpcPort="9029"
+hpaControllerIntentName="hpaControllerIntentName"
+hpaHostName="hpaHostName"
+hpaPort="9030"
+trafficControllerIntentName="trafficControllerIntentName"
+trafficHostName="trafficHostName"
+trafficPort="9031"
+
# Setup
install_deps
populate_CSAR_composite_app_helm "$csar_id"
@@ -97,7 +121,20 @@ populate_CSAR_composite_app_helm "$csar_id"
# BEGIN :: Delete statements are issued so that we clean up the 'orchestrator' collection
# and freshly populate the documents, also it serves as a direct test
# for all our DELETE APIs and an indirect test for all GET APIs
+print_msg "Deleting controller ${genericPlacementIntent}"
+delete_resource "${base_url}/controllers/${genericPlacementIntent}"
+
+print_msg "Deleting controller ${hpaIntent}"
+delete_resource "${base_url}/controllers/${hpaIntent}"
+
+print_msg "Deleting controller ${trafficIntent}"
+delete_resource "${base_url}/controllers/${trafficIntent}"
+print_msg "Deleting controller ${CostBasedIntent}"
+delete_resource "${base_url}/controllers/${CostBasedIntent}"
+
+print_msg "Deleting controller ${OVNintent}"
+delete_resource "${base_url}/controllers/${OVNintent}"
print_msg "Deleting intentToBeAddedinDeploymentIntentGroup"
delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/deployment-intent-groups/${deploymentIntentGroupName}/intents/${intentToBeAddedinDeploymentIntentGroup}"
@@ -135,6 +172,9 @@ delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite
print_msg "Deleting ${project_name}"
delete_resource "${base_url}/projects/${project_name}"
+
+
+
# END :: Delete statements were issued so that we clean up the db
# and freshly populate the documents, also it serves as a direct test
# for all our DELETE APIs and an indirect test for all GET APIs
@@ -415,7 +455,7 @@ call_api -d "${payload}" "${base_url}/projects/${project_name}/composite-apps/${
# END: Registering DeploymentIntentGroup in the database
# BEGIN: Adding intents to an intent group
-print_msg "Adding the genericPlacement intent to the deploymentIntent group"
+print_msg "Adding all the intents to the deploymentIntent group"
payload="$(cat <<EOF
{
"metadata":{
@@ -426,7 +466,11 @@ payload="$(cat <<EOF
},
"spec":{
"intent":{
- "generic-placement-intent":"${genericPlacementIntentName}"
+ "${genericPlacementIntent}":"${genericPlacementIntentName}",
+ "${hpaIntent}" : "${hpaControllerIntentName}",
+ "${trafficIntent}" : "${trafficControllerIntentName}",
+ "${CostBasedIntent}" : "${CostBasedIntentName}",
+ "${OVNintent}" : "${OVNintentName}"
}
}
}
@@ -435,8 +479,89 @@ EOF
call_api -d "${payload}" "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/deployment-intent-groups/${deploymentIntentGroupName}/intents"
# END: Adding intents to an intent group
+# BEGIN: Adding controllers
+print_msg "Adding CostBased placement contoller"
+payload="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${CostBasedIntent}",
+ "description":"${CostBasedIntentName}",
+ "userData1":"${userData1}",
+ "userData2":"${userData2}"
+ },
+ "spec":{
+ "host": "${CostBasedHostName}",
+ "port": ${CostBasedPort},
+ "type": "placement",
+ "priority": 3
+ }
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/controllers"
+
+print_msg "Adding HPA contoller"
+payload="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${hpaIntent}",
+ "description":"${hpaControllerIntentName}",
+ "userData1":"${userData1}",
+ "userData2":"${userData2}"
+ },
+ "spec":{
+ "host": "${hpaHostName}",
+ "port": ${hpaPort},
+ "type": "placement",
+ "priority": 2
+ }
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/controllers"
+
+print_msg "Adding traffic contoller"
+payload="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${trafficIntent}",
+ "description":"${trafficControllerIntentName}",
+ "userData1":"${userData1}",
+ "userData2":"${userData2}"
+ },
+ "spec":{
+ "host": "${trafficHostName}",
+ "port": ${trafficPort},
+ "type": "action",
+ "priority": 3
+ }
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/controllers"
+
+print_msg "Adding OVN action contoller"
+payload="$(cat <<EOF
+{
+ "metadata":{
+ "name":"${OVNintent}",
+ "description":"${OVNintentName}",
+ "userData1":"${userData1}",
+ "userData2":"${userData2}"
+ },
+ "spec":{
+ "host": "${OVNHostName}",
+ "port": ${OVNPort},
+ "type": "action",
+ "priority": 2
+ }
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/controllers"
+# END: Adding controllers
+
#BEGIN: Instantiation
print_msg "Getting the sorted templates for each of the apps.."
call_api -d "" "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/deployment-intent-groups/${deploymentIntentGroupName}/instantiate"
# END: Instantiation
-
diff --git a/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_cr.yaml b/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_cr.yaml
index 9e6e42d3..86fe0553 100644
--- a/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_cr.yaml
+++ b/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_cr.yaml
@@ -2,12 +2,11 @@ apiVersion: k8splugin.io/v1alpha1
kind: ResourceBundleState
metadata:
name: example-resourcebundlestate
- namespace: test
labels:
- "k8splugin.io/rb-inst-id": "bionic-beaver"
+ "emco/deployment-id": "bionic-beaver"
spec:
selector:
matchLabels:
- "k8splugin.io/rb-inst-id": "bionic-beaver"
+ "emco/deployment-id": "bionic-beaver"
status:
ready: false
diff --git a/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml b/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml
index 68585a32..dd38b2ef 100644
--- a/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml
+++ b/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml
@@ -58,11 +58,46 @@ spec:
items:
type: object
type: array
+ configMapStatuses:
+ items:
+ type: object
+ type: array
+ deploymentStatuses:
+ items:
+ type: object
+ type: array
+ secretStatuses:
+ items:
+ type: object
+ type: array
+ daemonSetStatuses:
+ items:
+ type: object
+ type: array
+ ingressStatuses:
+ items:
+ type: object
+ type: array
+ jobStatuses:
+ items:
+ type: object
+ type: array
+ statefulSetStatuses:
+ items:
+ type: object
+ type: array
required:
- ready
- resourceCount
- podStatuses
- serviceStatuses
+ - configMapStatuses
+ - deploymentStatuses
+ - secretStatuses
+ - daemonSetStatuses
+ - ingressStatuses
+ - jobStatuses
+ - statefulSetStatuses
type: object
version: v1alpha1
versions:
diff --git a/src/monitor/deploy/operator.yaml b/src/monitor/deploy/operator.yaml
index 9affeaaa..a06c07d3 100644
--- a/src/monitor/deploy/operator.yaml
+++ b/src/monitor/deploy/operator.yaml
@@ -2,15 +2,17 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: monitor
+ labels:
+ "emco/deployment-id": "bionic-beaver"
spec:
replicas: 1
selector:
matchLabels:
- name: monitor
+ "emco/deployment-id": "bionic-beaver"
template:
metadata:
labels:
- name: monitor
+ "emco/deployment-id": "bionic-beaver"
spec:
serviceAccountName: monitor
containers:
diff --git a/src/monitor/pkg/apis/k8splugin/v1alpha1/types.go b/src/monitor/pkg/apis/k8splugin/v1alpha1/types.go
index 30fe1dd2..231f226e 100644
--- a/src/monitor/pkg/apis/k8splugin/v1alpha1/types.go
+++ b/src/monitor/pkg/apis/k8splugin/v1alpha1/types.go
@@ -1,7 +1,10 @@
package v1alpha1
import (
+ appsv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
+ v1beta1 "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -28,10 +31,17 @@ type ResourceBundleStateSpec struct {
// ResourceBundleStatus defines the observed state of ResourceBundleState
// +k8s:openapi-gen=true
type ResourceBundleStatus struct {
- Ready bool `json:"ready" protobuf:"varint,1,opt,name=ready"`
- ResourceCount int32 `json:"resourceCount" protobuf:"varint,2,opt,name=resourceCount"`
- PodStatuses []PodStatus `json:"podStatuses" protobuf:"varint,3,opt,name=podStatuses"`
- ServiceStatuses []corev1.Service `json:"serviceStatuses" protobuf:"varint,4,opt,name=serviceStatuses"`
+ Ready bool `json:"ready" protobuf:"varint,1,opt,name=ready"`
+ ResourceCount int32 `json:"resourceCount" protobuf:"varint,2,opt,name=resourceCount"`
+ PodStatuses []PodStatus `json:"podStatuses" protobuf:"varint,3,opt,name=podStatuses"`
+ ServiceStatuses []corev1.Service `json:"serviceStatuses" protobuf:"varint,4,opt,name=serviceStatuses"`
+ ConfigMapStatuses []corev1.ConfigMap `json:"configMapStatuses" protobuf:"varint,5,opt,name=configMapStatuses"`
+ DeploymentStatuses []appsv1.Deployment `json:"deploymentStatuses" protobuf:"varint,6,opt,name=deploymentStatuses"`
+ SecretStatuses []corev1.Secret `json:"secretStatuses" protobuf:"varint,7,opt,name=secretStatuses"`
+ DaemonSetStatuses []appsv1.DaemonSet `json:"daemonSetStatuses" protobuf:"varint,8,opt,name=daemonSetStatuses"`
+ IngressStatuses []v1beta1.Ingress `json:"ingressStatuses" protobuf:"varint,11,opt,name=ingressStatuses"`
+ JobStatuses []v1.Job `json:"jobStatuses" protobuf:"varint,12,opt,name=jobStatuses"`
+ StatefulSetStatuses []appsv1.StatefulSet `json:"statefulSetStatuses" protobuf:"varint,13,opt,name=statefulSetStatuses"`
}
// PodStatus defines the observed state of ResourceBundleState
diff --git a/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.deepcopy.go b/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.deepcopy.go
index 72036ed1..d25db361 100644
--- a/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.deepcopy.go
+++ b/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.deepcopy.go
@@ -1,11 +1,14 @@
// +build !ignore_autogenerated
-// Code generated by operator-sdk-v0.9.0-x86_64-linux-gnu. DO NOT EDIT.
+// Code generated by operator-sdk. DO NOT EDIT.
package v1alpha1
import (
+ appsv1 "k8s.io/api/apps/v1"
+ batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
+ v1beta1 "k8s.io/api/extensions/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -127,6 +130,55 @@ func (in *ResourceBundleStatus) DeepCopyInto(out *ResourceBundleStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.ConfigMapStatuses != nil {
+ in, out := &in.ConfigMapStatuses, &out.ConfigMapStatuses
+ *out = make([]corev1.ConfigMap, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.DeploymentStatuses != nil {
+ in, out := &in.DeploymentStatuses, &out.DeploymentStatuses
+ *out = make([]appsv1.Deployment, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.SecretStatuses != nil {
+ in, out := &in.SecretStatuses, &out.SecretStatuses
+ *out = make([]corev1.Secret, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.DaemonSetStatuses != nil {
+ in, out := &in.DaemonSetStatuses, &out.DaemonSetStatuses
+ *out = make([]appsv1.DaemonSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.IngressStatuses != nil {
+ in, out := &in.IngressStatuses, &out.IngressStatuses
+ *out = make([]v1beta1.Ingress, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.JobStatuses != nil {
+ in, out := &in.JobStatuses, &out.JobStatuses
+ *out = make([]batchv1.Job, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.StatefulSetStatuses != nil {
+ in, out := &in.StatefulSetStatuses, &out.StatefulSetStatuses
+ *out = make([]appsv1.StatefulSet, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
return
}
diff --git a/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.openapi.go b/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.openapi.go
index 232acefb..1d4e81bf 100644
--- a/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.openapi.go
+++ b/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.openapi.go
@@ -1,4 +1,4 @@
-// +build !
+// +build !ignore_autogenerated
// This file was autogenerated by openapi-gen. Do not edit it manually!
@@ -11,10 +11,10 @@ import (
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
- "monitor/pkg/apis/k8splugin/v1alpha1.PodStatus": schema_pkg_apis_k8splugin_v1alpha1_PodStatus(ref),
- "monitor/pkg/apis/k8splugin/v1alpha1.ResourceBundleState": schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleState(ref),
- "monitor/pkg/apis/k8splugin/v1alpha1.ResourceBundleStateSpec": schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleStateSpec(ref),
- "monitor/pkg/apis/k8splugin/v1alpha1.ResourceBundleStatus": schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleStatus(ref),
+ "./pkg/apis/k8splugin/v1alpha1.PodStatus": schema_pkg_apis_k8splugin_v1alpha1_PodStatus(ref),
+ "./pkg/apis/k8splugin/v1alpha1.ResourceBundleState": schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleState(ref),
+ "./pkg/apis/k8splugin/v1alpha1.ResourceBundleStateSpec": schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleStateSpec(ref),
+ "./pkg/apis/k8splugin/v1alpha1.ResourceBundleStatus": schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleStatus(ref),
}
}
@@ -23,6 +23,7 @@ func schema_pkg_apis_k8splugin_v1alpha1_PodStatus(ref common.ReferenceCallback)
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodStatus defines the observed state of ResourceBundleState",
+ Type: []string{"object"},
Properties: map[string]spec.Schema{
"metadata": {
SchemaProps: spec.SchemaProps{
@@ -54,6 +55,7 @@ func schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleState(ref common.Reference
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceBundleState is the Schema for the ResourceBundleStatees API",
+ Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
@@ -76,19 +78,19 @@ func schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleState(ref common.Reference
},
"spec": {
SchemaProps: spec.SchemaProps{
- Ref: ref("monitor/pkg/apis/k8splugin/v1alpha1.ResourceBundleStateSpec"),
+ Ref: ref("./pkg/apis/k8splugin/v1alpha1.ResourceBundleStateSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
- Ref: ref("monitor/pkg/apis/k8splugin/v1alpha1.ResourceBundleStatus"),
+ Ref: ref("./pkg/apis/k8splugin/v1alpha1.ResourceBundleStatus"),
},
},
},
},
},
Dependencies: []string{
- "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "monitor/pkg/apis/k8splugin/v1alpha1.ResourceBundleStateSpec", "monitor/pkg/apis/k8splugin/v1alpha1.ResourceBundleStatus"},
+ "./pkg/apis/k8splugin/v1alpha1.ResourceBundleStateSpec", "./pkg/apis/k8splugin/v1alpha1.ResourceBundleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
@@ -97,6 +99,7 @@ func schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleStateSpec(ref common.Refer
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceBundleStateSpec defines the desired state of ResourceBundleState",
+ Type: []string{"object"},
Properties: map[string]spec.Schema{
"selector": {
SchemaProps: spec.SchemaProps{
@@ -117,6 +120,7 @@ func schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleStatus(ref common.Referenc
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceBundleStatus defines the observed state of ResourceBundleState",
+ Type: []string{"object"},
Properties: map[string]spec.Schema{
"ready": {
SchemaProps: spec.SchemaProps{
@@ -136,7 +140,7 @@ func schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleStatus(ref common.Referenc
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
- Ref: ref("monitor/pkg/apis/k8splugin/v1alpha1.PodStatus"),
+ Ref: ref("./pkg/apis/k8splugin/v1alpha1.PodStatus"),
},
},
},
@@ -154,11 +158,23 @@ func schema_pkg_apis_k8splugin_v1alpha1_ResourceBundleStatus(ref common.Referenc
},
},
},
+ "configMapStatuses": {
+ SchemaProps: spec.SchemaProps{
+ Type: []string{"array"},
+ Items: &spec.SchemaOrArray{
+ Schema: &spec.Schema{
+ SchemaProps: spec.SchemaProps{
+ Ref: ref("k8s.io/api/core/v1.ConfigMap"),
+ },
+ },
+ },
+ },
+ },
},
Required: []string{"ready", "resourceCount", "podStatuses", "serviceStatuses"},
},
},
Dependencies: []string{
- "k8s.io/api/core/v1.Service", "monitor/pkg/apis/k8splugin/v1alpha1.PodStatus"},
+ "./pkg/apis/k8splugin/v1alpha1.PodStatus", "k8s.io/api/core/v1.ConfigMap", "k8s.io/api/core/v1.Service"},
}
}
diff --git a/src/monitor/pkg/controller/add_resourcebundlestate.go b/src/monitor/pkg/controller/add_resourcebundlestate.go
index d9365ae9..ee42f9cf 100644
--- a/src/monitor/pkg/controller/add_resourcebundlestate.go
+++ b/src/monitor/pkg/controller/add_resourcebundlestate.go
@@ -7,4 +7,12 @@ import (
func init() {
AddToManagerFuncs = append(AddToManagerFuncs, resourcebundlestate.Add)
AddToManagerFuncs = append(AddToManagerFuncs, resourcebundlestate.AddPodController)
+ AddToManagerFuncs = append(AddToManagerFuncs, resourcebundlestate.AddServiceController)
+ AddToManagerFuncs = append(AddToManagerFuncs, resourcebundlestate.AddConfigMapController)
+ AddToManagerFuncs = append(AddToManagerFuncs, resourcebundlestate.AddDeploymentController)
+ AddToManagerFuncs = append(AddToManagerFuncs, resourcebundlestate.AddSecretController)
+ AddToManagerFuncs = append(AddToManagerFuncs, resourcebundlestate.AddDaemonSetController)
+ AddToManagerFuncs = append(AddToManagerFuncs, resourcebundlestate.AddIngressController)
+ AddToManagerFuncs = append(AddToManagerFuncs, resourcebundlestate.AddJobController)
+ AddToManagerFuncs = append(AddToManagerFuncs, resourcebundlestate.AddStatefulSetController)
}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/configMap_controller.go b/src/monitor/pkg/controller/resourcebundlestate/configMap_controller.go
new file mode 100644
index 00000000..f93355af
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/configMap_controller.go
@@ -0,0 +1,179 @@
+package resourcebundlestate
+
+import (
+ "context"
+ "log"
+
+ "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
+
+ corev1 "k8s.io/api/core/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+// AddConfigMapController the new controller to the controller manager
+func AddConfigMapController(mgr manager.Manager) error {
+ return addConfigMapController(mgr, newConfigMapReconciler(mgr))
+}
+
+func addConfigMapController(mgr manager.Manager, r *configMapReconciler) error {
+ // Create a new controller
+ c, err := controller.New("ConfigMap-controller", mgr, controller.Options{Reconciler: r})
+ if err != nil {
+ return err
+ }
+
+ // Watch for changes to secondar resource ConfigMaps
+ // Predicate filters Service which don't have the k8splugin label
+ err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForObject{}, &configMapPredicate{})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func newConfigMapReconciler(m manager.Manager) *configMapReconciler {
+ return &configMapReconciler{client: m.GetClient()}
+}
+
+type configMapReconciler struct {
+ client client.Client
+}
+
+// Reconcile implements the loop that will update the ResourceBundleState CR
+// whenever we get any updates from all the ConfigMaps we watch.
+func (r *configMapReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
+ log.Printf("Updating ResourceBundleState for ConfigMap: %+v\n", req)
+
+ cm := &corev1.ConfigMap{}
+ err := r.client.Get(context.TODO(), req.NamespacedName, cm)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ log.Printf("ConfigMap not found: %+v. Remove from CR if it is stored there.\n", req.NamespacedName)
+ // Remove the ConfigMap's status from StatusList
+ // This can happen if we get the DeletionTimeStamp event
+ // after the ConfigMap has been deleted.
+ r.deleteConfigMapFromAllCRs(req.NamespacedName)
+ return reconcile.Result{}, nil
+ }
+ log.Printf("Failed to get ConfigMap: %+v\n", req.NamespacedName)
+ return reconcile.Result{}, err
+ }
+
+ // Find the CRs which track this ConfigMap via the labelselector
+ crSelector := returnLabel(cm.GetLabels())
+ if crSelector == nil {
+ log.Println("We should not be here. The predicate should have filtered this ConfigMap")
+ }
+
+ // Get the CRs which have this label and update them all
+ // Ideally, we will have only one CR, but there is nothing
+ // preventing the creation of multiple.
+ // TODO: Consider using an admission validating webook to prevent multiple
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err = listResources(r.client, req.Namespace, crSelector, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return reconcile.Result{}, nil
+ }
+
+ err = r.updateCRs(rbStatusList, cm)
+ if err != nil {
+ // Requeue the update
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+}
+
+// deleteConfigMapFromAllCRs deletes ConfigMap status from all the CRs when the ConfigMap itself has been deleted
+// and we have not handled the updateCRs yet.
+// Since, we don't have the ConfigMap's labels, we need to look at all the CRs in this namespace
+func (r *configMapReconciler) deleteConfigMapFromAllCRs(namespacedName types.NamespacedName) error {
+
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err := listResources(r.client, namespacedName.Namespace, nil, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return nil
+ }
+ for _, cr := range rbStatusList.Items {
+ r.deleteFromSingleCR(&cr, namespacedName.Name)
+ }
+
+ return nil
+}
+
+func (r *configMapReconciler) updateCRs(crl *v1alpha1.ResourceBundleStateList, cm *corev1.ConfigMap) error {
+
+ for _, cr := range crl.Items {
+ // ConfigMap is not scheduled for deletion
+ if cm.DeletionTimestamp == nil {
+ err := r.updateSingleCR(&cr, cm)
+ if err != nil {
+ return err
+ }
+ } else {
+ // ConfigMap is scheduled for deletion
+ r.deleteFromSingleCR(&cr, cm.Name)
+ }
+ }
+
+ return nil
+}
+
+func (r *configMapReconciler) deleteFromSingleCR(cr *v1alpha1.ResourceBundleState, name string) error {
+ cr.Status.ResourceCount--
+ length := len(cr.Status.ConfigMapStatuses)
+ for i, rstatus := range cr.Status.ConfigMapStatuses {
+ if rstatus.Name == name {
+ //Delete that status from the array
+ cr.Status.ConfigMapStatuses[i] = cr.Status.ConfigMapStatuses[length-1]
+ cr.Status.ConfigMapStatuses = cr.Status.ConfigMapStatuses[:length-1]
+ return nil
+ }
+ }
+
+ log.Println("Did not find a status for ConfigMapStatuses in CR")
+ return nil
+}
+
+func (r *configMapReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, cm *corev1.ConfigMap) error {
+
+ // Update status after searching for it in the list of resourceStatuses
+ for _, rstatus := range cr.Status.ConfigMapStatuses {
+ // Look for the status if we already have it in the CR
+ if rstatus.Name == cm.Name {
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Exited for loop with no status found
+ // Increment the number of tracked resources
+ cr.Status.ResourceCount++
+
+ // Add it to CR
+ cr.Status.ConfigMapStatuses = append(cr.Status.ConfigMapStatuses, corev1.ConfigMap{
+ ObjectMeta: cm.ObjectMeta,
+ })
+
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/configMap_predicate.go b/src/monitor/pkg/controller/resourcebundlestate/configMap_predicate.go
new file mode 100644
index 00000000..b9b17738
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/configMap_predicate.go
@@ -0,0 +1,44 @@
+package resourcebundlestate
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/event"
+)
+
+type configMapPredicate struct {
+}
+
+func (c *configMapPredicate) Create(evt event.CreateEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (c *configMapPredicate) Delete(evt event.DeleteEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (c *configMapPredicate) Update(evt event.UpdateEvent) bool {
+
+ if evt.MetaNew == nil {
+ return false
+ }
+
+ labels := evt.MetaNew.GetLabels()
+ return checkLabel(labels)
+}
+
+func (c *configMapPredicate) Generic(evt event.GenericEvent) bool {
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/controller.go b/src/monitor/pkg/controller/resourcebundlestate/controller.go
index 71765e97..7206116b 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/controller.go
@@ -6,7 +6,10 @@ import (
"github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
+ appsv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
+ v1beta1 "k8s.io/api/extensions/v1beta1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
@@ -70,7 +73,49 @@ func (r *reconciler) Reconcile(req reconcile.Request) (reconcile.Result, error)
err = r.updateServices(rbstate, rbstate.Spec.Selector.MatchLabels)
if err != nil {
- log.Printf("Error adding services: %v\n", err)
+ log.Printf("Error adding servicestatuses: %v\n", err)
+ return reconcile.Result{}, err
+ }
+
+ err = r.updateConfigMaps(rbstate, rbstate.Spec.Selector.MatchLabels)
+ if err != nil {
+ log.Printf("Error adding configmapstatuses: %v\n", err)
+ return reconcile.Result{}, err
+ }
+
+ err = r.updateDeployments(rbstate, rbstate.Spec.Selector.MatchLabels)
+ if err != nil {
+ log.Printf("Error adding deploymentstatuses: %v\n", err)
+ return reconcile.Result{}, err
+ }
+
+ err = r.updateSecrets(rbstate, rbstate.Spec.Selector.MatchLabels)
+ if err != nil {
+ log.Printf("Error adding secretstatuses: %v\n", err)
+ return reconcile.Result{}, err
+ }
+
+ err = r.updateDaemonSets(rbstate, rbstate.Spec.Selector.MatchLabels)
+ if err != nil {
+ log.Printf("Error adding daemonSetstatuses: %v\n", err)
+ return reconcile.Result{}, err
+ }
+
+ err = r.updateIngresses(rbstate, rbstate.Spec.Selector.MatchLabels)
+ if err != nil {
+ log.Printf("Error adding ingressStatuses: %v\n", err)
+ return reconcile.Result{}, err
+ }
+
+ err = r.updateJobs(rbstate, rbstate.Spec.Selector.MatchLabels)
+ if err != nil {
+ log.Printf("Error adding jobstatuses: %v\n", err)
+ return reconcile.Result{}, err
+ }
+
+ err = r.updateStatefulSets(rbstate, rbstate.Spec.Selector.MatchLabels)
+ if err != nil {
+ log.Printf("Error adding statefulSetstatuses: %v\n", err)
return reconcile.Result{}, err
}
@@ -96,7 +141,16 @@ func (r *reconciler) updateServices(rbstate *v1alpha1.ResourceBundleState,
return err
}
- rbstate.Status.ServiceStatuses = serviceList.Items
+ rbstate.Status.ServiceStatuses = []corev1.Service{}
+
+ for _, svc := range serviceList.Items {
+ resStatus := corev1.Service{
+ ObjectMeta: svc.ObjectMeta,
+ Status: svc.Status,
+ }
+ rbstate.Status.ServiceStatuses = append(rbstate.Status.ServiceStatuses, resStatus)
+ }
+
return nil
}
@@ -124,3 +178,169 @@ func (r *reconciler) updatePods(rbstate *v1alpha1.ResourceBundleState,
return nil
}
+
+func (r *reconciler) updateConfigMaps(rbstate *v1alpha1.ResourceBundleState,
+ selectors map[string]string) error {
+
+ // Update the CR with the ConfigMaps created as well
+ configMapList := &corev1.ConfigMapList{}
+ err := listResources(r.client, rbstate.Namespace, selectors, configMapList)
+ if err != nil {
+ log.Printf("Failed to list configMaps: %v", err)
+ return err
+ }
+
+ rbstate.Status.ConfigMapStatuses = []corev1.ConfigMap{}
+
+ for _, cm := range configMapList.Items {
+ resStatus := corev1.ConfigMap{
+ ObjectMeta: cm.ObjectMeta,
+ }
+ rbstate.Status.ConfigMapStatuses = append(rbstate.Status.ConfigMapStatuses, resStatus)
+ }
+
+ return nil
+}
+
+func (r *reconciler) updateDeployments(rbstate *v1alpha1.ResourceBundleState,
+ selectors map[string]string) error {
+
+ // Update the CR with the Deployments created as well
+ deploymentList := &appsv1.DeploymentList{}
+ err := listResources(r.client, rbstate.Namespace, selectors, deploymentList)
+ if err != nil {
+ log.Printf("Failed to list deployments: %v", err)
+ return err
+ }
+
+ rbstate.Status.DeploymentStatuses = []appsv1.Deployment{}
+
+ for _, dep := range deploymentList.Items {
+ resStatus := appsv1.Deployment{
+ ObjectMeta: dep.ObjectMeta,
+ Status: dep.Status,
+ }
+ rbstate.Status.DeploymentStatuses = append(rbstate.Status.DeploymentStatuses, resStatus)
+ }
+
+ return nil
+}
+
+func (r *reconciler) updateSecrets(rbstate *v1alpha1.ResourceBundleState,
+ selectors map[string]string) error {
+
+ // Update the CR with the Secrets created as well
+ secretList := &corev1.SecretList{}
+ err := listResources(r.client, rbstate.Namespace, selectors, secretList)
+ if err != nil {
+ log.Printf("Failed to list secrets: %v", err)
+ return err
+ }
+
+ rbstate.Status.SecretStatuses = []corev1.Secret{}
+
+ for _, sec := range secretList.Items {
+ resStatus := corev1.Secret{
+ ObjectMeta: sec.ObjectMeta,
+ }
+ rbstate.Status.SecretStatuses = append(rbstate.Status.SecretStatuses, resStatus)
+ }
+
+ return nil
+}
+
+func (r *reconciler) updateDaemonSets(rbstate *v1alpha1.ResourceBundleState,
+ selectors map[string]string) error {
+
+ // Update the CR with the DaemonSets created as well
+ daemonSetList := &appsv1.DaemonSetList{}
+ err := listResources(r.client, rbstate.Namespace, selectors, daemonSetList)
+ if err != nil {
+ log.Printf("Failed to list DaemonSets: %v", err)
+ return err
+ }
+
+ rbstate.Status.DaemonSetStatuses = []appsv1.DaemonSet{}
+
+ for _, ds := range daemonSetList.Items {
+ resStatus := appsv1.DaemonSet{
+ ObjectMeta: ds.ObjectMeta,
+ Status: ds.Status,
+ }
+ rbstate.Status.DaemonSetStatuses = append(rbstate.Status.DaemonSetStatuses, resStatus)
+ }
+
+ return nil
+}
+
+func (r *reconciler) updateIngresses(rbstate *v1alpha1.ResourceBundleState,
+ selectors map[string]string) error {
+
+ // Update the CR with the Ingresses created as well
+ ingressList := &v1beta1.IngressList{}
+ err := listResources(r.client, rbstate.Namespace, selectors, ingressList)
+ if err != nil {
+ log.Printf("Failed to list ingresses: %v", err)
+ return err
+ }
+
+ rbstate.Status.IngressStatuses = []v1beta1.Ingress{}
+
+ for _, ing := range ingressList.Items {
+ resStatus := v1beta1.Ingress{
+ ObjectMeta: ing.ObjectMeta,
+ Status: ing.Status,
+ }
+ rbstate.Status.IngressStatuses = append(rbstate.Status.IngressStatuses, resStatus)
+ }
+
+ return nil
+}
+
+func (r *reconciler) updateJobs(rbstate *v1alpha1.ResourceBundleState,
+ selectors map[string]string) error {
+
+ // Update the CR with the Services created as well
+ jobList := &v1.JobList{}
+ err := listResources(r.client, rbstate.Namespace, selectors, jobList)
+ if err != nil {
+ log.Printf("Failed to list jobs: %v", err)
+ return err
+ }
+
+ rbstate.Status.JobStatuses = []v1.Job{}
+
+ for _, job := range jobList.Items {
+ resStatus := v1.Job{
+ ObjectMeta: job.ObjectMeta,
+ Status: job.Status,
+ }
+ rbstate.Status.JobStatuses = append(rbstate.Status.JobStatuses, resStatus)
+ }
+
+ return nil
+}
+
+func (r *reconciler) updateStatefulSets(rbstate *v1alpha1.ResourceBundleState,
+ selectors map[string]string) error {
+
+ // Update the CR with the StatefulSets created as well
+ statefulSetList := &appsv1.StatefulSetList{}
+ err := listResources(r.client, rbstate.Namespace, selectors, statefulSetList)
+ if err != nil {
+ log.Printf("Failed to list statefulSets: %v", err)
+ return err
+ }
+
+ rbstate.Status.StatefulSetStatuses = []appsv1.StatefulSet{}
+
+ for _, sfs := range statefulSetList.Items {
+ resStatus := appsv1.StatefulSet{
+ ObjectMeta: sfs.ObjectMeta,
+ Status: sfs.Status,
+ }
+ rbstate.Status.StatefulSetStatuses = append(rbstate.Status.StatefulSetStatuses, resStatus)
+ }
+
+ return nil
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/daemonSet_controller.go b/src/monitor/pkg/controller/resourcebundlestate/daemonSet_controller.go
new file mode 100644
index 00000000..3ccb40ce
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/daemonSet_controller.go
@@ -0,0 +1,182 @@
+package resourcebundlestate
+
+import (
+ "context"
+ "log"
+
+ "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
+
+ appsv1 "k8s.io/api/apps/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+// AddDaemonSetController the new controller to the controller manager
+func AddDaemonSetController(mgr manager.Manager) error {
+ return addDaemonSetController(mgr, newDaemonSetReconciler(mgr))
+}
+
+func addDaemonSetController(mgr manager.Manager, r *daemonSetReconciler) error {
+ // Create a new controller
+ c, err := controller.New("Daemonset-controller", mgr, controller.Options{Reconciler: r})
+ if err != nil {
+ return err
+ }
+
+ // Watch for changes to secondar resource DaemonSets
+ // Predicate filters DaemonSets which don't have the k8splugin label
+ err = c.Watch(&source.Kind{Type: &appsv1.DaemonSet{}}, &handler.EnqueueRequestForObject{}, &daemonSetPredicate{})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func newDaemonSetReconciler(m manager.Manager) *daemonSetReconciler {
+ return &daemonSetReconciler{client: m.GetClient()}
+}
+
+type daemonSetReconciler struct {
+ client client.Client
+}
+
+// Reconcile implements the loop that will update the ResourceBundleState CR
+// whenever we get any updates from all the daemonSets we watch.
+func (r *daemonSetReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
+ log.Printf("Updating ResourceBundleState for DaemonSet: %+v\n", req)
+
+ ds := &appsv1.DaemonSet{}
+ err := r.client.Get(context.TODO(), req.NamespacedName, ds)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ log.Printf("DaemonSet not found: %+v. Remove from CR if it is stored there.\n", req.NamespacedName)
+ // Remove the DaemonSet's status from StatusList
+ // This can happen if we get the DeletionTimeStamp event
+ // after the DaemonSet has been deleted.
+ r.deleteDaemonSetFromAllCRs(req.NamespacedName)
+ return reconcile.Result{}, nil
+ }
+ log.Printf("Failed to get daemonSet: %+v\n", req.NamespacedName)
+ return reconcile.Result{}, err
+ }
+
+ // Find the CRs which track this daemonSet via the labelselector
+ crSelector := returnLabel(ds.GetLabels())
+ if crSelector == nil {
+ log.Println("We should not be here. The predicate should have filtered this DaemonSet")
+ }
+
+ // Get the CRs which have this label and update them all
+ // Ideally, we will have only one CR, but there is nothing
+ // preventing the creation of multiple.
+ // TODO: Consider using an admission validating webook to prevent multiple
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err = listResources(r.client, req.Namespace, crSelector, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return reconcile.Result{}, nil
+ }
+
+ err = r.updateCRs(rbStatusList, ds)
+ if err != nil {
+ // Requeue the update
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+}
+
+// deleteDaemonSetFromAllCRs deletes daemonSet status from all the CRs when the DaemonSet itself has been deleted
+// and we have not handled the updateCRs yet.
+// Since, we don't have the daemonSet's labels, we need to look at all the CRs in this namespace
+func (r *daemonSetReconciler) deleteDaemonSetFromAllCRs(namespacedName types.NamespacedName) error {
+
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err := listResources(r.client, namespacedName.Namespace, nil, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return nil
+ }
+ for _, cr := range rbStatusList.Items {
+ r.deleteFromSingleCR(&cr, namespacedName.Name)
+ }
+
+ return nil
+}
+
+func (r *daemonSetReconciler) updateCRs(crl *v1alpha1.ResourceBundleStateList, ds *appsv1.DaemonSet) error {
+
+ for _, cr := range crl.Items {
+ // DaemonSet is not scheduled for deletion
+ if ds.DeletionTimestamp == nil {
+ err := r.updateSingleCR(&cr, ds)
+ if err != nil {
+ return err
+ }
+ } else {
+ // DaemonSet is scheduled for deletion
+ r.deleteFromSingleCR(&cr, ds.Name)
+ }
+ }
+
+ return nil
+}
+
+func (r *daemonSetReconciler) deleteFromSingleCR(cr *v1alpha1.ResourceBundleState, name string) error {
+ cr.Status.ResourceCount--
+ length := len(cr.Status.DaemonSetStatuses)
+ for i, rstatus := range cr.Status.DaemonSetStatuses {
+ if rstatus.Name == name {
+ //Delete that status from the array
+ cr.Status.DaemonSetStatuses[i] = cr.Status.DaemonSetStatuses[length-1]
+ cr.Status.DaemonSetStatuses[length-1].Status = appsv1.DaemonSetStatus{}
+ cr.Status.DaemonSetStatuses = cr.Status.DaemonSetStatuses[:length-1]
+ return nil
+ }
+ }
+
+ log.Println("Did not find a status for DaemonSet in CR")
+ return nil
+}
+
+func (r *daemonSetReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, ds *appsv1.DaemonSet) error {
+
+ // Update status after searching for it in the list of resourceStatuses
+ for i, rstatus := range cr.Status.DaemonSetStatuses {
+ // Look for the status if we already have it in the CR
+ if rstatus.Name == ds.Name {
+ ds.Status.DeepCopyInto(&cr.Status.DaemonSetStatuses[i].Status)
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Exited for loop with no status found
+ // Increment the number of tracked resources
+ cr.Status.ResourceCount++
+
+ // Add it to CR
+ cr.Status.DaemonSetStatuses = append(cr.Status.DaemonSetStatuses, appsv1.DaemonSet{
+ ObjectMeta: ds.ObjectMeta,
+ Status: ds.Status,
+ })
+
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/daemonSet_predicate.go b/src/monitor/pkg/controller/resourcebundlestate/daemonSet_predicate.go
new file mode 100644
index 00000000..16a8bc54
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/daemonSet_predicate.go
@@ -0,0 +1,44 @@
+package resourcebundlestate
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/event"
+)
+
+type daemonSetPredicate struct {
+}
+
+func (d *daemonSetPredicate) Create(evt event.CreateEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (d *daemonSetPredicate) Delete(evt event.DeleteEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (d *daemonSetPredicate) Update(evt event.UpdateEvent) bool {
+
+ if evt.MetaNew == nil {
+ return false
+ }
+
+ labels := evt.MetaNew.GetLabels()
+ return checkLabel(labels)
+}
+
+func (d *daemonSetPredicate) Generic(evt event.GenericEvent) bool {
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/deployment_controller.go b/src/monitor/pkg/controller/resourcebundlestate/deployment_controller.go
new file mode 100644
index 00000000..c563ed77
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/deployment_controller.go
@@ -0,0 +1,182 @@
+package resourcebundlestate
+
+import (
+ "context"
+ "log"
+
+ "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
+
+ appsv1 "k8s.io/api/apps/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+// AddDeploymentController the new controller to the controller manager
+func AddDeploymentController(mgr manager.Manager) error {
+ return addDeploymentController(mgr, newDeploymentReconciler(mgr))
+}
+
+func addDeploymentController(mgr manager.Manager, r *deploymentReconciler) error {
+ // Create a new controller
+ c, err := controller.New("Deployment-controller", mgr, controller.Options{Reconciler: r})
+ if err != nil {
+ return err
+ }
+
+ // Watch for changes to secondar resource Deployments
+ // Predicate filters Deployment which don't have the k8splugin label
+ err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{}, &deploymentPredicate{})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func newDeploymentReconciler(m manager.Manager) *deploymentReconciler {
+ return &deploymentReconciler{client: m.GetClient()}
+}
+
+type deploymentReconciler struct {
+ client client.Client
+}
+
+// Reconcile implements the loop that will update the ResourceBundleState CR
+// whenever we get any updates from all the deployments we watch.
+func (r *deploymentReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
+ log.Printf("Updating ResourceBundleState for Deployment: %+v\n", req)
+
+ dep := &appsv1.Deployment{}
+ err := r.client.Get(context.TODO(), req.NamespacedName, dep)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ log.Printf("Deployment not found: %+v. Remove from CR if it is stored there.\n", req.NamespacedName)
+ // Remove the Deployment's status from StatusList
+ // This can happen if we get the DeletionTimeStamp event
+ // after the Deployment has been deleted.
+ r.deleteDeploymentFromAllCRs(req.NamespacedName)
+ return reconcile.Result{}, nil
+ }
+ log.Printf("Failed to get deployment: %+v\n", req.NamespacedName)
+ return reconcile.Result{}, err
+ }
+
+ // Find the CRs which track this deployment via the labelselector
+ crSelector := returnLabel(dep.GetLabels())
+ if crSelector == nil {
+ log.Println("We should not be here. The predicate should have filtered this Deployment")
+ }
+
+ // Get the CRs which have this label and update them all
+ // Ideally, we will have only one CR, but there is nothing
+ // preventing the creation of multiple.
+ // TODO: Consider using an admission validating webook to prevent multiple
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err = listResources(r.client, req.Namespace, crSelector, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return reconcile.Result{}, nil
+ }
+
+ err = r.updateCRs(rbStatusList, dep)
+ if err != nil {
+ // Requeue the update
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+}
+
+// deleteDeploymentFromAllCRs deletes deployment status from all the CRs when the Deployment itself has been deleted
+// and we have not handled the updateCRs yet.
+// Since, we don't have the deployment's labels, we need to look at all the CRs in this namespace
+func (r *deploymentReconciler) deleteDeploymentFromAllCRs(namespacedName types.NamespacedName) error {
+
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err := listResources(r.client, namespacedName.Namespace, nil, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return nil
+ }
+ for _, cr := range rbStatusList.Items {
+ r.deleteFromSingleCR(&cr, namespacedName.Name)
+ }
+
+ return nil
+}
+
+func (r *deploymentReconciler) updateCRs(crl *v1alpha1.ResourceBundleStateList, dep *appsv1.Deployment) error {
+
+ for _, cr := range crl.Items {
+ // Deployment is not scheduled for deletion
+ if dep.DeletionTimestamp == nil {
+ err := r.updateSingleCR(&cr, dep)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Deployment is scheduled for deletion
+ r.deleteFromSingleCR(&cr, dep.Name)
+ }
+ }
+
+ return nil
+}
+
+func (r *deploymentReconciler) deleteFromSingleCR(cr *v1alpha1.ResourceBundleState, name string) error {
+ cr.Status.ResourceCount--
+ length := len(cr.Status.DeploymentStatuses)
+ for i, rstatus := range cr.Status.DeploymentStatuses {
+ if rstatus.Name == name {
+ //Delete that status from the array
+ cr.Status.DeploymentStatuses[i] = cr.Status.DeploymentStatuses[length-1]
+ cr.Status.DeploymentStatuses[length-1].Status = appsv1.DeploymentStatus{}
+ cr.Status.DeploymentStatuses = cr.Status.DeploymentStatuses[:length-1]
+ return nil
+ }
+ }
+
+ log.Println("Did not find a status for Deployment in CR")
+ return nil
+}
+
+func (r *deploymentReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, dep *appsv1.Deployment) error {
+
+ // Update status after searching for it in the list of resourceStatuses
+ for i, rstatus := range cr.Status.DeploymentStatuses {
+ // Look for the status if we already have it in the CR
+ if rstatus.Name == dep.Name {
+ dep.Status.DeepCopyInto(&cr.Status.DeploymentStatuses[i].Status)
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Exited for loop with no status found
+ // Increment the number of tracked resources
+ cr.Status.ResourceCount++
+
+ // Add it to CR
+ cr.Status.DeploymentStatuses = append(cr.Status.DeploymentStatuses, appsv1.Deployment{
+ ObjectMeta: dep.ObjectMeta,
+ Status: dep.Status,
+ })
+
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/deployment_predicate.go b/src/monitor/pkg/controller/resourcebundlestate/deployment_predicate.go
new file mode 100644
index 00000000..6061e93f
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/deployment_predicate.go
@@ -0,0 +1,44 @@
+package resourcebundlestate
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/event"
+)
+
+type deploymentPredicate struct {
+}
+
+func (d *deploymentPredicate) Create(evt event.CreateEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (d *deploymentPredicate) Delete(evt event.DeleteEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (d *deploymentPredicate) Update(evt event.UpdateEvent) bool {
+
+ if evt.MetaNew == nil {
+ return false
+ }
+
+ labels := evt.MetaNew.GetLabels()
+ return checkLabel(labels)
+}
+
+func (d *deploymentPredicate) Generic(evt event.GenericEvent) bool {
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/helpers.go b/src/monitor/pkg/controller/resourcebundlestate/helpers.go
index dab78825..5a5676f8 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/helpers.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/helpers.go
@@ -12,7 +12,7 @@ import (
// checkLabel verifies if the expected label exists and returns bool
func checkLabel(labels map[string]string) bool {
- _, ok := labels["k8splugin.io/rb-inst-id"]
+ _, ok := labels["emco/deployment-id"]
if !ok {
log.Printf("Pod does not have label. Filter it.")
return false
@@ -23,13 +23,13 @@ func checkLabel(labels map[string]string) bool {
// returnLabel verifies if the expected label exists and returns a map
func returnLabel(labels map[string]string) map[string]string {
- l, ok := labels["k8splugin.io/rb-inst-id"]
+ l, ok := labels["emco/deployment-id"]
if !ok {
log.Printf("Pod does not have label. Filter it.")
return nil
}
return map[string]string{
- "k8splugin.io/rb-inst-id": l,
+ "emco/deployment-id": l,
}
}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/ingress_controller.go b/src/monitor/pkg/controller/resourcebundlestate/ingress_controller.go
new file mode 100644
index 00000000..603536b3
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/ingress_controller.go
@@ -0,0 +1,182 @@
+package resourcebundlestate
+
+import (
+ "context"
+ "log"
+
+ "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
+
+ v1beta1 "k8s.io/api/extensions/v1beta1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+// AddIngressController the new controller to the controller manager
+func AddIngressController(mgr manager.Manager) error {
+ return addIngressController(mgr, newIngressReconciler(mgr))
+}
+
+func addIngressController(mgr manager.Manager, r *ingressReconciler) error {
+ // Create a new controller
+ c, err := controller.New("Ingress-controller", mgr, controller.Options{Reconciler: r})
+ if err != nil {
+ return err
+ }
+
+ // Watch for changes to secondar resource Ingress
+ // Predicate filters Ingress which don't have the k8splugin label
+ err = c.Watch(&source.Kind{Type: &v1beta1.Ingress{}}, &handler.EnqueueRequestForObject{}, &ingressPredicate{})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func newIngressReconciler(m manager.Manager) *ingressReconciler {
+ return &ingressReconciler{client: m.GetClient()}
+}
+
+type ingressReconciler struct {
+ client client.Client
+}
+
+// Reconcile implements the loop that will update the ResourceBundleState CR
+// whenever we get any updates from all the ingress we watch.
+func (r *ingressReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
+ log.Printf("Updating ResourceBundleState for Ingress: %+v\n", req)
+
+ ing := &v1beta1.Ingress{}
+ err := r.client.Get(context.TODO(), req.NamespacedName, ing)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ log.Printf("Ingress not found: %+v. Remove from CR if it is stored there.\n", req.NamespacedName)
+ // Remove the Ingress's status from StatusList
+ // This can happen if we get the DeletionTimeStamp event
+ // after the Ingress has been deleted.
+ r.deleteIngressFromAllCRs(req.NamespacedName)
+ return reconcile.Result{}, nil
+ }
+ log.Printf("Failed to get ingress: %+v\n", req.NamespacedName)
+ return reconcile.Result{}, err
+ }
+
+ // Find the CRs which track this Ingress via the labelselector
+ crSelector := returnLabel(ing.GetLabels())
+ if crSelector == nil {
+ log.Println("We should not be here. The predicate should have filtered this Ingress")
+ }
+
+ // Get the CRs which have this label and update them all
+ // Ideally, we will have only one CR, but there is nothing
+ // preventing the creation of multiple.
+ // TODO: Consider using an admission validating webook to prevent multiple
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err = listResources(r.client, req.Namespace, crSelector, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return reconcile.Result{}, nil
+ }
+
+ err = r.updateCRs(rbStatusList, ing)
+ if err != nil {
+ // Requeue the update
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+}
+
+// deleteIngressFromAllCRs deletes ingress status from all the CRs when the Ingress itself has been deleted
+// and we have not handled the updateCRs yet.
+// Since, we don't have the Ingress's labels, we need to look at all the CRs in this namespace
+func (r *ingressReconciler) deleteIngressFromAllCRs(namespacedName types.NamespacedName) error {
+
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err := listResources(r.client, namespacedName.Namespace, nil, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return nil
+ }
+ for _, cr := range rbStatusList.Items {
+ r.deleteFromSingleCR(&cr, namespacedName.Name)
+ }
+
+ return nil
+}
+
+func (r *ingressReconciler) updateCRs(crl *v1alpha1.ResourceBundleStateList, ing *v1beta1.Ingress) error {
+
+ for _, cr := range crl.Items {
+ // Ingress is not scheduled for deletion
+ if ing.DeletionTimestamp == nil {
+ err := r.updateSingleCR(&cr, ing)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Ingress is scheduled for deletion
+ r.deleteFromSingleCR(&cr, ing.Name)
+ }
+ }
+
+ return nil
+}
+
+func (r *ingressReconciler) deleteFromSingleCR(cr *v1alpha1.ResourceBundleState, name string) error {
+ cr.Status.ResourceCount--
+ length := len(cr.Status.IngressStatuses)
+ for i, rstatus := range cr.Status.IngressStatuses {
+ if rstatus.Name == name {
+ //Delete that status from the array
+ cr.Status.IngressStatuses[i] = cr.Status.IngressStatuses[length-1]
+ cr.Status.IngressStatuses[length-1].Status = v1beta1.IngressStatus{}
+ cr.Status.IngressStatuses = cr.Status.IngressStatuses[:length-1]
+ return nil
+ }
+ }
+
+ log.Println("Did not find a status for Ingress in CR")
+ return nil
+}
+
+func (r *ingressReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, ing *v1beta1.Ingress) error {
+
+ // Update status after searching for it in the list of resourceStatuses
+ for i, rstatus := range cr.Status.IngressStatuses {
+ // Look for the status if we already have it in the CR
+ if rstatus.Name == ing.Name {
+ ing.Status.DeepCopyInto(&cr.Status.IngressStatuses[i].Status)
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Exited for loop with no status found
+ // Increment the number of tracked resources
+ cr.Status.ResourceCount++
+
+ // Add it to CR
+ cr.Status.IngressStatuses = append(cr.Status.IngressStatuses, v1beta1.Ingress{
+ ObjectMeta: ing.ObjectMeta,
+ Status: ing.Status,
+ })
+
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/ingress_predicate.go b/src/monitor/pkg/controller/resourcebundlestate/ingress_predicate.go
new file mode 100644
index 00000000..9a41c842
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/ingress_predicate.go
@@ -0,0 +1,44 @@
+package resourcebundlestate
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/event"
+)
+
+type ingressPredicate struct {
+}
+
+func (i *ingressPredicate) Create(evt event.CreateEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (i *ingressPredicate) Delete(evt event.DeleteEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (i *ingressPredicate) Update(evt event.UpdateEvent) bool {
+
+ if evt.MetaNew == nil {
+ return false
+ }
+
+ labels := evt.MetaNew.GetLabels()
+ return checkLabel(labels)
+}
+
+func (i *ingressPredicate) Generic(evt event.GenericEvent) bool {
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/job_controller.go b/src/monitor/pkg/controller/resourcebundlestate/job_controller.go
new file mode 100644
index 00000000..cd76e66f
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/job_controller.go
@@ -0,0 +1,182 @@
+package resourcebundlestate
+
+import (
+ "context"
+ "log"
+
+ "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
+
+ v1 "k8s.io/api/batch/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+// AddJobController the new controller to the controller manager
+func AddJobController(mgr manager.Manager) error {
+ return addJobController(mgr, newJobReconciler(mgr))
+}
+
+func addJobController(mgr manager.Manager, r *jobReconciler) error {
+ // Create a new controller
+ c, err := controller.New("Job-controller", mgr, controller.Options{Reconciler: r})
+ if err != nil {
+ return err
+ }
+
+ // Watch for changes to secondar resource Jobs
+ // Predicate filters Job which don't have the k8splugin label
+ err = c.Watch(&source.Kind{Type: &v1.Job{}}, &handler.EnqueueRequestForObject{}, &jobPredicate{})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func newJobReconciler(m manager.Manager) *jobReconciler {
+ return &jobReconciler{client: m.GetClient()}
+}
+
+type jobReconciler struct {
+ client client.Client
+}
+
+// Reconcile implements the loop that will update the ResourceBundleState CR
+// whenever we get any updates from all the jobs we watch.
+func (r *jobReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
+ log.Printf("Updating ResourceBundleState for Job: %+v\n", req)
+
+ job := &v1.Job{}
+ err := r.client.Get(context.TODO(), req.NamespacedName, job)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ log.Printf("Job not found: %+v. Remove from CR if it is stored there.\n", req.NamespacedName)
+ // Remove the Job's status from StatusList
+ // This can happen if we get the DeletionTimeStamp event
+ // after the Job has been deleted.
+ r.deleteJobFromAllCRs(req.NamespacedName)
+ return reconcile.Result{}, nil
+ }
+ log.Printf("Failed to get Job: %+v\n", req.NamespacedName)
+ return reconcile.Result{}, err
+ }
+
+ // Find the CRs which track this Job via the labelselector
+ crSelector := returnLabel(job.GetLabels())
+ if crSelector == nil {
+ log.Println("We should not be here. The predicate should have filtered this Job")
+ }
+
+ // Get the CRs which have this label and update them all
+ // Ideally, we will have only one CR, but there is nothing
+ // preventing the creation of multiple.
+ // TODO: Consider using an admission validating webook to prevent multiple
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err = listResources(r.client, req.Namespace, crSelector, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return reconcile.Result{}, nil
+ }
+
+ err = r.updateCRs(rbStatusList, job)
+ if err != nil {
+ // Requeue the update
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+}
+
+// deleteJobFromAllCRs deletes job status from all the CRs when the Job itself has been deleted
+// and we have not handled the updateCRs yet.
+// Since, we don't have the job's labels, we need to look at all the CRs in this namespace
+func (r *jobReconciler) deleteJobFromAllCRs(namespacedName types.NamespacedName) error {
+
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err := listResources(r.client, namespacedName.Namespace, nil, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return nil
+ }
+ for _, cr := range rbStatusList.Items {
+ r.deleteFromSingleCR(&cr, namespacedName.Name)
+ }
+
+ return nil
+}
+
+func (r *jobReconciler) updateCRs(crl *v1alpha1.ResourceBundleStateList, job *v1.Job) error {
+
+ for _, cr := range crl.Items {
+ // Job is not scheduled for deletion
+ if job.DeletionTimestamp == nil {
+ err := r.updateSingleCR(&cr, job)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Job is scheduled for deletion
+ r.deleteFromSingleCR(&cr, job.Name)
+ }
+ }
+
+ return nil
+}
+
+func (r *jobReconciler) deleteFromSingleCR(cr *v1alpha1.ResourceBundleState, name string) error {
+ cr.Status.ResourceCount--
+ length := len(cr.Status.JobStatuses)
+ for i, rstatus := range cr.Status.JobStatuses {
+ if rstatus.Name == name {
+ //Delete that status from the array
+ cr.Status.JobStatuses[i] = cr.Status.JobStatuses[length-1]
+ cr.Status.JobStatuses[length-1].Status = v1.JobStatus{}
+ cr.Status.JobStatuses = cr.Status.JobStatuses[:length-1]
+ return nil
+ }
+ }
+
+ log.Println("Did not find a status for Job in CR")
+ return nil
+}
+
+func (r *jobReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, job *v1.Job) error {
+
+ // Update status after searching for it in the list of resourceStatuses
+ for i, rstatus := range cr.Status.JobStatuses {
+ // Look for the status if we already have it in the CR
+ if rstatus.Name == job.Name {
+ job.Status.DeepCopyInto(&cr.Status.JobStatuses[i].Status)
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Exited for loop with no status found
+ // Increment the number of tracked resources
+ cr.Status.ResourceCount++
+
+ // Add it to CR
+ cr.Status.JobStatuses = append(cr.Status.JobStatuses, v1.Job{
+ ObjectMeta: job.ObjectMeta,
+ Status: job.Status,
+ })
+
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/job_predicate.go b/src/monitor/pkg/controller/resourcebundlestate/job_predicate.go
new file mode 100644
index 00000000..14841532
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/job_predicate.go
@@ -0,0 +1,44 @@
+package resourcebundlestate
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/event"
+)
+
+type jobPredicate struct {
+}
+
+func (j *jobPredicate) Create(evt event.CreateEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (j *jobPredicate) Delete(evt event.DeleteEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (j *jobPredicate) Update(evt event.UpdateEvent) bool {
+
+ if evt.MetaNew == nil {
+ return false
+ }
+
+ labels := evt.MetaNew.GetLabels()
+ return checkLabel(labels)
+}
+
+func (j *jobPredicate) Generic(evt event.GenericEvent) bool {
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/pod_controller.go b/src/monitor/pkg/controller/resourcebundlestate/pod_controller.go
index ba3685f7..65a324db 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/pod_controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/pod_controller.go
@@ -24,7 +24,7 @@ func AddPodController(mgr manager.Manager) error {
func addPodController(mgr manager.Manager, r *podReconciler) error {
// Create a new controller
- c, err := controller.New("ResourceBundleState-controller", mgr, controller.Options{Reconciler: r})
+ c, err := controller.New("Pod-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/secret_controller.go b/src/monitor/pkg/controller/resourcebundlestate/secret_controller.go
new file mode 100644
index 00000000..fe70d53f
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/secret_controller.go
@@ -0,0 +1,179 @@
+package resourcebundlestate
+
+import (
+ "context"
+ "log"
+
+ "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
+
+ corev1 "k8s.io/api/core/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+// AddSecretController the new controller to the controller manager
+func AddSecretController(mgr manager.Manager) error {
+ return addSecretController(mgr, newSecretReconciler(mgr))
+}
+
+func addSecretController(mgr manager.Manager, r *secretReconciler) error {
+ // Create a new controller
+ c, err := controller.New("Secret-controller", mgr, controller.Options{Reconciler: r})
+ if err != nil {
+ return err
+ }
+
+ // Watch for changes to secondar resource Secret
+ // Predicate filters Secret which don't have the k8splugin label
+ err = c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForObject{}, &secretPredicate{})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func newSecretReconciler(m manager.Manager) *secretReconciler {
+ return &secretReconciler{client: m.GetClient()}
+}
+
+type secretReconciler struct {
+ client client.Client
+}
+
+// Reconcile implements the loop that will update the ResourceBundleState CR
+// whenever we get any updates from all the Secrets we watch.
+func (r *secretReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
+ log.Printf("Updating ResourceBundleState for Secret: %+v\n", req)
+
+ sec := &corev1.Secret{}
+ err := r.client.Get(context.TODO(), req.NamespacedName, sec)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ log.Printf("Secret not found: %+v. Remove from CR if it is stored there.\n", req.NamespacedName)
+ // Remove the Secret's status from StatusList
+ // This can happen if we get the DeletionTimeStamp event
+ // after the Secret has been deleted.
+ r.deleteSecretFromAllCRs(req.NamespacedName)
+ return reconcile.Result{}, nil
+ }
+ log.Printf("Failed to get Secret: %+v\n", req.NamespacedName)
+ return reconcile.Result{}, err
+ }
+
+ // Find the CRs which track this Secret via the labelselector
+ crSelector := returnLabel(sec.GetLabels())
+ if crSelector == nil {
+ log.Println("We should not be here. The predicate should have filtered this Secret")
+ }
+
+ // Get the CRs which have this label and update them all
+ // Ideally, we will have only one CR, but there is nothing
+ // preventing the creation of multiple.
+ // TODO: Consider using an admission validating webook to prevent multiple
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err = listResources(r.client, req.Namespace, crSelector, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return reconcile.Result{}, nil
+ }
+
+ err = r.updateCRs(rbStatusList, sec)
+ if err != nil {
+ // Requeue the update
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+}
+
+// deleteSecretFromAllCRs deletes Secret status from all the CRs when the Secret itself has been deleted
+// and we have not handled the updateCRs yet.
+// Since, we don't have the Secret's labels, we need to look at all the CRs in this namespace
+func (r *secretReconciler) deleteSecretFromAllCRs(namespacedName types.NamespacedName) error {
+
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err := listResources(r.client, namespacedName.Namespace, nil, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return nil
+ }
+ for _, cr := range rbStatusList.Items {
+ r.deleteFromSingleCR(&cr, namespacedName.Name)
+ }
+
+ return nil
+}
+
+func (r *secretReconciler) updateCRs(crl *v1alpha1.ResourceBundleStateList, sec *corev1.Secret) error {
+
+ for _, cr := range crl.Items {
+ // Secret is not scheduled for deletion
+ if sec.DeletionTimestamp == nil {
+ err := r.updateSingleCR(&cr, sec)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Secret is scheduled for deletion
+ r.deleteFromSingleCR(&cr, sec.Name)
+ }
+ }
+
+ return nil
+}
+
+func (r *secretReconciler) deleteFromSingleCR(cr *v1alpha1.ResourceBundleState, name string) error {
+ cr.Status.ResourceCount--
+ length := len(cr.Status.SecretStatuses)
+ for i, rstatus := range cr.Status.SecretStatuses {
+ if rstatus.Name == name {
+ //Delete that status from the array
+ cr.Status.SecretStatuses[i] = cr.Status.SecretStatuses[length-1]
+ cr.Status.SecretStatuses = cr.Status.SecretStatuses[:length-1]
+ return nil
+ }
+ }
+
+ log.Println("Did not find a status for SecretStatuses in CR")
+ return nil
+}
+
+func (r *secretReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, sec *corev1.Secret) error {
+
+ // Update status after searching for it in the list of resourceStatuses
+ for _, rstatus := range cr.Status.SecretStatuses {
+ // Look for the status if we already have it in the CR
+ if rstatus.Name == sec.Name {
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Exited for loop with no status found
+ // Increment the number of tracked resources
+ cr.Status.ResourceCount++
+
+ // Add it to CR
+ cr.Status.SecretStatuses = append(cr.Status.SecretStatuses, corev1.Secret{
+ ObjectMeta: sec.ObjectMeta,
+ })
+
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/secret_predicate.go b/src/monitor/pkg/controller/resourcebundlestate/secret_predicate.go
new file mode 100644
index 00000000..e25e1114
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/secret_predicate.go
@@ -0,0 +1,44 @@
+package resourcebundlestate
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/event"
+)
+
+type secretPredicate struct {
+}
+
+func (s *secretPredicate) Create(evt event.CreateEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (s *secretPredicate) Delete(evt event.DeleteEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (s *secretPredicate) Update(evt event.UpdateEvent) bool {
+
+ if evt.MetaNew == nil {
+ return false
+ }
+
+ labels := evt.MetaNew.GetLabels()
+ return checkLabel(labels)
+}
+
+func (s *secretPredicate) Generic(evt event.GenericEvent) bool {
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/service_controller.go b/src/monitor/pkg/controller/resourcebundlestate/service_controller.go
new file mode 100644
index 00000000..d1bb2fd6
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/service_controller.go
@@ -0,0 +1,182 @@
+package resourcebundlestate
+
+import (
+ "context"
+ "log"
+
+ "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
+
+ corev1 "k8s.io/api/core/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+// AddServiceController the new controller to the controller manager
+func AddServiceController(mgr manager.Manager) error {
+ return addServiceController(mgr, newServiceReconciler(mgr))
+}
+
+func addServiceController(mgr manager.Manager, r *serviceReconciler) error {
+ // Create a new controller
+ c, err := controller.New("Service-controller", mgr, controller.Options{Reconciler: r})
+ if err != nil {
+ return err
+ }
+
+ // Watch for changes to secondar resource Services
+ // Predicate filters Service which don't have the k8splugin label
+ err = c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForObject{}, &servicePredicate{})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func newServiceReconciler(m manager.Manager) *serviceReconciler {
+ return &serviceReconciler{client: m.GetClient()}
+}
+
+type serviceReconciler struct {
+ client client.Client
+}
+
+// Reconcile implements the loop that will update the ResourceBundleState CR
+// whenever we get any updates from all the services we watch.
+func (r *serviceReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
+ log.Printf("Updating ResourceBundleState for Service: %+v\n", req)
+
+ svc := &corev1.Service{}
+ err := r.client.Get(context.TODO(), req.NamespacedName, svc)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ log.Printf("Service not found: %+v. Remove from CR if it is stored there.\n", req.NamespacedName)
+ // Remove the Service's status from StatusList
+ // This can happen if we get the DeletionTimeStamp event
+ // after the Service has been deleted.
+ r.deleteServiceFromAllCRs(req.NamespacedName)
+ return reconcile.Result{}, nil
+ }
+ log.Printf("Failed to get service: %+v\n", req.NamespacedName)
+ return reconcile.Result{}, err
+ }
+
+ // Find the CRs which track this service via the labelselector
+ crSelector := returnLabel(svc.GetLabels())
+ if crSelector == nil {
+ log.Println("We should not be here. The predicate should have filtered this Service")
+ }
+
+ // Get the CRs which have this label and update them all
+ // Ideally, we will have only one CR, but there is nothing
+ // preventing the creation of multiple.
+ // TODO: Consider using an admission validating webook to prevent multiple
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err = listResources(r.client, req.Namespace, crSelector, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return reconcile.Result{}, nil
+ }
+
+ err = r.updateCRs(rbStatusList, svc)
+ if err != nil {
+ // Requeue the update
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+}
+
+// deleteServiceFromAllCRs deletes service status from all the CRs when the Service itself has been deleted
+// and we have not handled the updateCRs yet.
+// Since, we don't have the service's labels, we need to look at all the CRs in this namespace
+func (r *serviceReconciler) deleteServiceFromAllCRs(namespacedName types.NamespacedName) error {
+
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err := listResources(r.client, namespacedName.Namespace, nil, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return nil
+ }
+ for _, cr := range rbStatusList.Items {
+ r.deleteFromSingleCR(&cr, namespacedName.Name)
+ }
+
+ return nil
+}
+
+func (r *serviceReconciler) updateCRs(crl *v1alpha1.ResourceBundleStateList, svc *corev1.Service) error {
+
+ for _, cr := range crl.Items {
+ // Service is not scheduled for deletion
+ if svc.DeletionTimestamp == nil {
+ err := r.updateSingleCR(&cr, svc)
+ if err != nil {
+ return err
+ }
+ } else {
+ // Service is scheduled for deletion
+ r.deleteFromSingleCR(&cr, svc.Name)
+ }
+ }
+
+ return nil
+}
+
+func (r *serviceReconciler) deleteFromSingleCR(cr *v1alpha1.ResourceBundleState, name string) error {
+ cr.Status.ResourceCount--
+ length := len(cr.Status.ServiceStatuses)
+ for i, rstatus := range cr.Status.ServiceStatuses {
+ if rstatus.Name == name {
+ //Delete that status from the array
+ cr.Status.ServiceStatuses[i] = cr.Status.ServiceStatuses[length-1]
+ cr.Status.ServiceStatuses[length-1].Status = corev1.ServiceStatus{}
+ cr.Status.ServiceStatuses = cr.Status.ServiceStatuses[:length-1]
+ return nil
+ }
+ }
+
+ log.Println("Did not find a status for Service in CR")
+ return nil
+}
+
+func (r *serviceReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, svc *corev1.Service) error {
+
+ // Update status after searching for it in the list of resourceStatuses
+ for i, rstatus := range cr.Status.ServiceStatuses {
+ // Look for the status if we already have it in the CR
+ if rstatus.Name == svc.Name {
+ svc.Status.DeepCopyInto(&cr.Status.ServiceStatuses[i].Status)
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Exited for loop with no status found
+ // Increment the number of tracked resources
+ cr.Status.ResourceCount++
+
+ // Add it to CR
+ cr.Status.ServiceStatuses = append(cr.Status.ServiceStatuses, corev1.Service{
+ ObjectMeta: svc.ObjectMeta,
+ Status: svc.Status,
+ })
+
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/service_predicate.go b/src/monitor/pkg/controller/resourcebundlestate/service_predicate.go
new file mode 100644
index 00000000..d427443f
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/service_predicate.go
@@ -0,0 +1,44 @@
+package resourcebundlestate
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/event"
+)
+
+type servicePredicate struct {
+}
+
+func (s *servicePredicate) Create(evt event.CreateEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (s *servicePredicate) Delete(evt event.DeleteEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (s *servicePredicate) Update(evt event.UpdateEvent) bool {
+
+ if evt.MetaNew == nil {
+ return false
+ }
+
+ labels := evt.MetaNew.GetLabels()
+ return checkLabel(labels)
+}
+
+func (s *servicePredicate) Generic(evt event.GenericEvent) bool {
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/statefulSet_controller.go b/src/monitor/pkg/controller/resourcebundlestate/statefulSet_controller.go
new file mode 100644
index 00000000..ebe61dba
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/statefulSet_controller.go
@@ -0,0 +1,182 @@
+package resourcebundlestate
+
+import (
+ "context"
+ "log"
+
+ "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
+
+ appsv1 "k8s.io/api/apps/v1"
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/manager"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+ "sigs.k8s.io/controller-runtime/pkg/source"
+)
+
+// AddStatefulSetController the new controller to the controller manager
+func AddStatefulSetController(mgr manager.Manager) error {
+ return addStatefulSetController(mgr, newStatefulSetReconciler(mgr))
+}
+
+func addStatefulSetController(mgr manager.Manager, r *statefulSetReconciler) error {
+ // Create a new controller
+ c, err := controller.New("Statefulset-controller", mgr, controller.Options{Reconciler: r})
+ if err != nil {
+ return err
+ }
+
+ // Watch for changes to secondar resource StatefulSets
+ // Predicate filters StatefulSet which don't have the k8splugin label
+ err = c.Watch(&source.Kind{Type: &appsv1.StatefulSet{}}, &handler.EnqueueRequestForObject{}, &statefulSetPredicate{})
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func newStatefulSetReconciler(m manager.Manager) *statefulSetReconciler {
+ return &statefulSetReconciler{client: m.GetClient()}
+}
+
+type statefulSetReconciler struct {
+ client client.Client
+}
+
+// Reconcile implements the loop that will update the ResourceBundleState CR
+// whenever we get any updates from all the StatefulSets we watch.
+func (r *statefulSetReconciler) Reconcile(req reconcile.Request) (reconcile.Result, error) {
+ log.Printf("Updating ResourceBundleState for StatefulSet: %+v\n", req)
+
+ sfs := &appsv1.StatefulSet{}
+ err := r.client.Get(context.TODO(), req.NamespacedName, sfs)
+ if err != nil {
+ if k8serrors.IsNotFound(err) {
+ log.Printf("StatefulSet not found: %+v. Remove from CR if it is stored there.\n", req.NamespacedName)
+ // Remove the StatefulSet's status from StatusList
+ // This can happen if we get the DeletionTimeStamp event
+ // after the StatefulSet has been deleted.
+ r.deleteStatefulSetFromAllCRs(req.NamespacedName)
+ return reconcile.Result{}, nil
+ }
+ log.Printf("Failed to get statefulSet: %+v\n", req.NamespacedName)
+ return reconcile.Result{}, err
+ }
+
+ // Find the CRs which track this statefulSet via the labelselector
+ crSelector := returnLabel(sfs.GetLabels())
+ if crSelector == nil {
+ log.Println("We should not be here. The predicate should have filtered this StatefulSet")
+ }
+
+ // Get the CRs which have this label and update them all
+ // Ideally, we will have only one CR, but there is nothing
+ // preventing the creation of multiple.
+ // TODO: Consider using an admission validating webook to prevent multiple
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err = listResources(r.client, req.Namespace, crSelector, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return reconcile.Result{}, nil
+ }
+
+ err = r.updateCRs(rbStatusList, sfs)
+ if err != nil {
+ // Requeue the update
+ return reconcile.Result{}, err
+ }
+
+ return reconcile.Result{}, nil
+}
+
+// deleteStatefulSetFromAllCRs deletes statefulSet status from all the CRs when the StatefulSet itself has been deleted
+// and we have not handled the updateCRs yet.
+// Since, we don't have the statefulSet's labels, we need to look at all the CRs in this namespace
+func (r *statefulSetReconciler) deleteStatefulSetFromAllCRs(namespacedName types.NamespacedName) error {
+
+ rbStatusList := &v1alpha1.ResourceBundleStateList{}
+ err := listResources(r.client, namespacedName.Namespace, nil, rbStatusList)
+ if err != nil || len(rbStatusList.Items) == 0 {
+ log.Printf("Did not find any CRs tracking this resource\n")
+ return nil
+ }
+ for _, cr := range rbStatusList.Items {
+ r.deleteFromSingleCR(&cr, namespacedName.Name)
+ }
+
+ return nil
+}
+
+func (r *statefulSetReconciler) updateCRs(crl *v1alpha1.ResourceBundleStateList, sfs *appsv1.StatefulSet) error {
+
+ for _, cr := range crl.Items {
+ // StatefulSet is not scheduled for deletion
+ if sfs.DeletionTimestamp == nil {
+ err := r.updateSingleCR(&cr, sfs)
+ if err != nil {
+ return err
+ }
+ } else {
+ // StatefulSet is scheduled for deletion
+ r.deleteFromSingleCR(&cr, sfs.Name)
+ }
+ }
+
+ return nil
+}
+
+func (r *statefulSetReconciler) deleteFromSingleCR(cr *v1alpha1.ResourceBundleState, name string) error {
+ cr.Status.ResourceCount--
+ length := len(cr.Status.StatefulSetStatuses)
+ for i, rstatus := range cr.Status.StatefulSetStatuses {
+ if rstatus.Name == name {
+ //Delete that status from the array
+ cr.Status.StatefulSetStatuses[i] = cr.Status.StatefulSetStatuses[length-1]
+ cr.Status.StatefulSetStatuses[length-1].Status = appsv1.StatefulSetStatus{}
+ cr.Status.StatefulSetStatuses = cr.Status.StatefulSetStatuses[:length-1]
+ return nil
+ }
+ }
+
+ log.Println("Did not find a status for StatefulSet in CR")
+ return nil
+}
+
+func (r *statefulSetReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, sfs *appsv1.StatefulSet) error {
+
+ // Update status after searching for it in the list of resourceStatuses
+ for i, rstatus := range cr.Status.StatefulSetStatuses {
+ // Look for the status if we already have it in the CR
+ if rstatus.Name == sfs.Name {
+ sfs.Status.DeepCopyInto(&cr.Status.StatefulSetStatuses[i].Status)
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+ return nil
+ }
+ }
+
+ // Exited for loop with no status found
+ // Increment the number of tracked resources
+ cr.Status.ResourceCount++
+
+ // Add it to CR
+ cr.Status.StatefulSetStatuses = append(cr.Status.StatefulSetStatuses, appsv1.StatefulSet{
+ ObjectMeta: sfs.ObjectMeta,
+ Status: sfs.Status,
+ })
+
+ err := r.client.Status().Update(context.TODO(), cr)
+ if err != nil {
+ log.Printf("failed to update rbstate: %v\n", err)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/statefulSet_predicate.go b/src/monitor/pkg/controller/resourcebundlestate/statefulSet_predicate.go
new file mode 100644
index 00000000..af313131
--- /dev/null
+++ b/src/monitor/pkg/controller/resourcebundlestate/statefulSet_predicate.go
@@ -0,0 +1,44 @@
+package resourcebundlestate
+
+import (
+ "sigs.k8s.io/controller-runtime/pkg/event"
+)
+
+type statefulSetPredicate struct {
+}
+
+func (s *statefulSetPredicate) Create(evt event.CreateEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (s *statefulSetPredicate) Delete(evt event.DeleteEvent) bool {
+
+ if evt.Meta == nil {
+ return false
+ }
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
+
+func (s *statefulSetPredicate) Update(evt event.UpdateEvent) bool {
+
+ if evt.MetaNew == nil {
+ return false
+ }
+
+ labels := evt.MetaNew.GetLabels()
+ return checkLabel(labels)
+}
+
+func (s *statefulSetPredicate) Generic(evt event.GenericEvent) bool {
+
+ labels := evt.Meta.GetLabels()
+ return checkLabel(labels)
+}
diff --git a/src/orchestrator/pkg/appcontext/appcontext.go b/src/orchestrator/pkg/appcontext/appcontext.go
index 98baa0f6..a847ae32 100644
--- a/src/orchestrator/pkg/appcontext/appcontext.go
+++ b/src/orchestrator/pkg/appcontext/appcontext.go
@@ -172,7 +172,6 @@ func (ac *AppContext) DeleteClusterMetaGrpHandle(ch interface{}) error {
return nil
}
-
/*
GetClusterMetaHandle takes in appName and ClusterName as string arguments and return the ClusterMetaHandle as string
*/
@@ -188,7 +187,7 @@ func (ac *AppContext) GetClusterMetaHandle(app string, cluster string) (string,
if err != nil {
return "", err
}
- cmh := fmt.Sprintf("%v", ch) + metaGrpPREFIX + "/"
+ cmh := fmt.Sprintf("%v", ch) + metaGrpPREFIX + "/"
return cmh, nil
}
diff --git a/src/orchestrator/pkg/module/add_intents.go b/src/orchestrator/pkg/module/add_intents.go
index 89bf255f..9c47863a 100644
--- a/src/orchestrator/pkg/module/add_intents.go
+++ b/src/orchestrator/pkg/module/add_intents.go
@@ -224,7 +224,6 @@ func (c IntentClient) GetAllIntents(p string, ca string, v string, di string) (L
if err != nil {
return ListOfIntents{}, pkgerrors.Wrap(err, "Unmarshalling Intent")
}
- //mapOfIntents := ListOfIntents{a.Spec.Intent.ListOfIntents}
listOfMapOfIntents = append(listOfMapOfIntents, a.Spec.Intent)
}
return ListOfIntents{listOfMapOfIntents}, nil
diff --git a/src/orchestrator/pkg/module/instantiation.go b/src/orchestrator/pkg/module/instantiation.go
index d7ec663d..76be2a2d 100644
--- a/src/orchestrator/pkg/module/instantiation.go
+++ b/src/orchestrator/pkg/module/instantiation.go
@@ -30,7 +30,7 @@ import (
const ManifestFileName = "manifest.yaml"
// GenericPlacementIntentName denotes the generic placement intent name
-const GenericPlacementIntentName = "generic-placement-intent"
+const GenericPlacementIntentName = "genericPlacementIntent"
// SEPARATOR used while creating clusternames to store in etcd
const SEPARATOR = "+"
@@ -100,22 +100,18 @@ and returns the name of the genericPlacementIntentName. Returns empty value if s
*/
func findGenericPlacementIntent(p, ca, v, di string) (string, error) {
var gi string
- var found bool
iList, err := NewIntentClient().GetAllIntents(p, ca, v, di)
if err != nil {
return gi, err
}
for _, eachMap := range iList.ListOfIntents {
if gi, found := eachMap[GenericPlacementIntentName]; found {
- log.Info(":: Name of the generic-placement-intent ::", log.Fields{"GenPlmtIntent": gi})
- return gi, err
+ log.Info(":: Name of the generic-placement-intent found ::", log.Fields{"GenPlmtIntent": gi})
+ return gi, nil
}
}
- if found == false {
- fmt.Println("generic-placement-intent not found !")
- }
+ log.Info(":: generic-placement-intent not found ! ::", log.Fields{"Searched for GenPlmtIntent": GenericPlacementIntentName})
return gi, pkgerrors.New("Generic-placement-intent not found")
-
}
// GetSortedTemplateForApp returns the sorted templates.
@@ -277,6 +273,35 @@ func (c InstantiationClient) Instantiate(p string, ca string, v string, di strin
}
// END:: save the context in the orchestrator db record
+ // BEGIN: scheduler code
+
+ pl, mapOfControllers, err := getPrioritizedControllerList(p, ca, v, di)
+ if err != nil {
+ return err
+ }
+ log.Info("Priority Based List ", log.Fields{"PlacementControllers::": pl.pPlaCont,
+ "ActionControllers::": pl.pActCont, "mapOfControllers::": mapOfControllers})
+
+ err = callGrpcForControllerList(pl.pPlaCont, mapOfControllers, ctxval)
+ if err != nil {
+ return err
+ }
+
+ err = deleteExtraClusters(allApps, context)
+ if err != nil {
+ return err
+ }
+
+ err = callGrpcForControllerList(pl.pActCont, mapOfControllers, ctxval)
+ if err != nil {
+ return err
+ }
+
+ // END: Scheduler code
+
+ // BEGIN : Rsync code
+ // END : Rsyc code
+
log.Info(":: Done with instantiation... ::", log.Fields{"CompositeAppName": ca})
return err
}
diff --git a/src/orchestrator/pkg/module/instantiation_scheduler_helper.go b/src/orchestrator/pkg/module/instantiation_scheduler_helper.go
new file mode 100644
index 00000000..e4bbbfac
--- /dev/null
+++ b/src/orchestrator/pkg/module/instantiation_scheduler_helper.go
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2020 Intel Corporation, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package module
+
+import (
+ "container/heap"
+
+ "fmt"
+
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
+ client "github.com/onap/multicloud-k8s/src/orchestrator/pkg/grpc/contextupdateclient"
+ log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module/controller"
+ mtypes "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module/types"
+)
+
+// ControllerTypePlacement denotes "placement" Controller Type
+const ControllerTypePlacement string = "placement"
+
+// ControllerTypeAction denotes "action" Controller Type
+const ControllerTypeAction string = "action"
+
+// ControllerElement consists of controller and an internal field - index
+type ControllerElement struct {
+ controller controller.Controller
+ index int // used for indexing the HeapArray
+}
+
+// PrioritizedControlList contains PrioritizedList of PlacementControllers and ActionControllers
+type PrioritizedControlList struct {
+ pPlaCont []controller.Controller
+ pActCont []controller.Controller
+}
+
+// PriorityQueue is the heapArray to store the Controllers
+type PriorityQueue []*ControllerElement
+
+func (pq PriorityQueue) Len() int { return len(pq) }
+
+func (pq PriorityQueue) Less(i, j int) bool {
+ // We want Pop to give us highest Priority controller
+ // The lower the number, higher the priority
+ return pq[i].controller.Spec.Priority < pq[j].controller.Spec.Priority
+}
+
+// Pop method returns the controller with the highest priority
+func (pq *PriorityQueue) Pop() interface{} {
+ old := *pq
+ n := len(old)
+ c := old[n-1]
+ c.index = -1
+ *pq = old[0 : n-1]
+ return c
+}
+
+// Push method add a controller into the heapArray
+func (pq *PriorityQueue) Push(c interface{}) {
+ n := len(*pq)
+ controllerElement := c.(*ControllerElement)
+ controllerElement.index = n
+ *pq = append(*pq, controllerElement)
+}
+
+func (pq PriorityQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+ pq[i].index = i
+ pq[j].index = j
+}
+
+func getPrioritizedControllerList(p, ca, v, di string) (PrioritizedControlList, map[string]string, error) {
+ listOfControllers := make([]string, 0) // shall contain the real controllerNames to be passed to controllerAPI
+ mapOfControllers := make(map[string]string)
+
+ iList, err := NewIntentClient().GetAllIntents(p, ca, v, di)
+ if err != nil {
+ return PrioritizedControlList{}, map[string]string{}, err
+ }
+ for _, eachmap := range iList.ListOfIntents {
+ for controller, controllerIntent := range eachmap {
+ if controller != GenericPlacementIntentName {
+ listOfControllers = append(listOfControllers, controller)
+ mapOfControllers[controller] = controllerIntent
+ }
+ }
+ }
+
+ listPC := make([]*ControllerElement, 0)
+ listAC := make([]*ControllerElement, 0)
+
+ for _, cn := range listOfControllers {
+ c, err := NewClient().Controller.GetController(cn)
+
+ if err != nil {
+ return PrioritizedControlList{}, map[string]string{}, err
+ }
+ if c.Spec.Type == ControllerTypePlacement {
+ // Collect in listPC
+ listPC = append(listPC, &ControllerElement{controller: controller.Controller{
+ Metadata: mtypes.Metadata{
+ Name: c.Metadata.Name,
+ Description: c.Metadata.Description,
+ UserData1: c.Metadata.UserData1,
+ UserData2: c.Metadata.UserData2,
+ },
+ Spec: controller.ControllerSpec{
+ Host: c.Spec.Host,
+ Port: c.Spec.Port,
+ Type: c.Spec.Type,
+ Priority: c.Spec.Priority,
+ },
+ }})
+ } else if c.Spec.Type == ControllerTypeAction {
+ // Collect in listAC
+ listAC = append(listAC, &ControllerElement{controller: controller.Controller{
+ Metadata: mtypes.Metadata{
+ Name: c.Metadata.Name,
+ Description: c.Metadata.Description,
+ UserData1: c.Metadata.UserData1,
+ UserData2: c.Metadata.UserData2,
+ },
+ Spec: controller.ControllerSpec{
+ Host: c.Spec.Host,
+ Port: c.Spec.Port,
+ Type: c.Spec.Type,
+ Priority: c.Spec.Priority,
+ },
+ }})
+ } else {
+ log.Info("Controller type undefined", log.Fields{"Controller type": c.Spec.Type, "ControllerName": c.Metadata.Name})
+ }
+ }
+
+ pqPlacementCont := make(PriorityQueue, len(listPC))
+ for i, eachPC := range listPC {
+ pqPlacementCont[i] = &ControllerElement{controller: eachPC.controller, index: i}
+ }
+ prioritizedPlaControllerList := make([]controller.Controller, 0)
+ heap.Init(&pqPlacementCont)
+ for pqPlacementCont.Len() > 0 {
+ ce := heap.Pop(&pqPlacementCont).(*ControllerElement)
+
+ prioritizedPlaControllerList = append(prioritizedPlaControllerList, ce.controller)
+ }
+
+ pqActionCont := make(PriorityQueue, len(listAC))
+ for i, eachAC := range listAC {
+ pqActionCont[i] = &ControllerElement{controller: eachAC.controller, index: i}
+ }
+ prioritizedActControllerList := make([]controller.Controller, 0)
+ heap.Init(&pqActionCont)
+ for pqActionCont.Len() > 0 {
+ ce := heap.Pop(&pqActionCont).(*ControllerElement)
+ prioritizedActControllerList = append(prioritizedActControllerList, ce.controller)
+ }
+
+ prioritizedControlList := PrioritizedControlList{pPlaCont: prioritizedPlaControllerList, pActCont: prioritizedActControllerList}
+
+ return prioritizedControlList, mapOfControllers, nil
+
+}
+
+/*
+callGrpcForControllerList method shall take in a list of controllers, a map of contollers to controllerIntentNames and contextID. It invokes the context
+updation through the grpc client for the given list of controllers.
+*/
+func callGrpcForControllerList(cl []controller.Controller, mc map[string]string, contextid interface{}) error {
+ for _, c := range cl {
+ controller := c.Metadata.Name
+ controllerIntentName := mc[controller]
+ appContextID := fmt.Sprintf("%v", contextid)
+ err := client.InvokeContextUpdate(controller, controllerIntentName, appContextID)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+/*
+deleteExtraClusters method shall delete the extra cluster handles for each AnyOf cluster present in the etcd after the grpc call for context updation.
+*/
+func deleteExtraClusters(apps []App, ct appcontext.AppContext) error {
+ for _, app := range apps {
+ an := app.Metadata.Name
+ gmap, err := ct.GetClusterGroupMap(an)
+ if err != nil {
+ return err
+ }
+ for gr, cl := range gmap {
+ for i, cn := range cl {
+ // avoids deleting the first cluster
+ if i > 0 {
+ ch, err := ct.GetClusterHandle(an, cn)
+ if err != nil {
+ return err
+ }
+ err = ct.DeleteCluster(ch)
+ if err != nil {
+ return err
+ }
+ log.Info("::Deleted cluster for::", log.Fields{"appName": an, "GroupNumber": gr, "ClusterName": cn})
+ }
+ }
+
+ }
+ }
+ return nil
+}
diff --git a/src/ovnaction/cmd/main.go b/src/ovnaction/cmd/main.go
index 9e791cd1..a6b34923 100644
--- a/src/ovnaction/cmd/main.go
+++ b/src/ovnaction/cmd/main.go
@@ -50,9 +50,9 @@ func startGrpcServer() error {
certFile := config.GetConfiguration().GrpcServerCert
keyFile := config.GetConfiguration().GrpcServerKey
- host, port := register.GetServerHostPort()
+ _, port := register.GetServerHostPort()
- lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port))
+ lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
log.Fatalf("Could not listen to port: %v", err)
}
diff --git a/src/rsync/cmd/main.go b/src/rsync/cmd/main.go
index e62e0542..f46fa79b 100644
--- a/src/rsync/cmd/main.go
+++ b/src/rsync/cmd/main.go
@@ -43,9 +43,9 @@ func startGrpcServer() error {
certFile := config.GetConfiguration().GrpcServerCert
keyFile := config.GetConfiguration().GrpcServerKey
- host, port := register.GetServerHostPort()
+ _, port := register.GetServerHostPort()
- lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port))
+ lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
if err != nil {
log.Fatalf("Could not listen to port: %v", err)
}