aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--deployments/helm/v2/onap4k8s/README.md25
-rw-r--r--docs/emco_apis.yaml97
-rw-r--r--kud/demo/composite-firewall/firewall/values.yaml2
-rw-r--r--kud/demo/composite-firewall/packetgen/values.yaml2
-rw-r--r--kud/demo/composite-firewall/sink/values.yaml2
-rw-r--r--src/dcm/go.mod1
-rw-r--r--src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml9
-rw-r--r--src/monitor/pkg/apis/k8splugin/v1alpha1/types.go2
-rw-r--r--src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.deepcopy.go4
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/configMap_controller.go1
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/controller.go14
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/daemonSet_controller.go1
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/deployment_controller.go1
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/ingress_controller.go1
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/job_controller.go1
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/pod_controller.go6
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/secret_controller.go1
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/service_controller.go1
-rw-r--r--src/monitor/pkg/controller/resourcebundlestate/statefulSet_controller.go1
-rw-r--r--src/ncm/api/api.go3
-rw-r--r--src/ncm/api/schedulerhandler.go111
-rw-r--r--src/ncm/go.mod13
-rw-r--r--src/ncm/pkg/scheduler/scheduler.go35
-rw-r--r--src/orchestrator/api/api.go2
-rw-r--r--src/orchestrator/api/instantiation_handler.go95
-rw-r--r--src/orchestrator/go.mod7
-rw-r--r--src/orchestrator/pkg/module/instantiation.go87
-rw-r--r--src/orchestrator/pkg/state/types.go8
-rw-r--r--src/orchestrator/pkg/status/status_helper.go482
-rw-r--r--src/orchestrator/pkg/status/types.go76
30 files changed, 984 insertions, 107 deletions
diff --git a/deployments/helm/v2/onap4k8s/README.md b/deployments/helm/v2/onap4k8s/README.md
index 43213153..3534f5a5 100644
--- a/deployments/helm/v2/onap4k8s/README.md
+++ b/deployments/helm/v2/onap4k8s/README.md
@@ -2,7 +2,7 @@
# EMCO v2 helm charts
#################################################################
-EMCO Helm charts include charts for EMCO microservices along with MongoDb, etcd, Flutend
+EMCO Helm charts include charts for EMCO microservices along with MongoDb, etcd, Fluentd
### Steps to generate and install packages
@@ -36,25 +36,29 @@ Pacakges helm charts in tar.gz format. All packages are in **dist/packages** dir
`$ helm install dist/packages/emco-tools-0.1.0.tgz --name rel-tools --namespace emco`
- NOTE: Deploy the Chart emco-0.1.0.tgz to deploy all packages including database, services and tools.
+NOTE: Deploy the Chart emco-0.1.0.tgz to deploy all packages including database, services and tools.
- `$ helm install dist/packages/emco-0.1.0.tgz --name rel --namespace emco`
+`$ helm install dist/packages/emco-0.1.0.tgz --name rel --namespace emco`
**5. To check logs of the different Microservices check fluentd logs**
-`kubectl logs emco-fluentd-0 -n test | grep orchestrator`
+`kubectl logs rel-fluentd-0 -n emco | grep orchestrator`
**6. Delete all packages**
-`$helm delete emco-services --purge`
+`$ helm delete rel-services --purge`
-`$helm delete emco-db --purge`
+`$ helm delete rel-db --purge`
Optional if tools were installed
-`$helm delete emco-tools --purge`
+`$ helm delete rel-tools --purge`
+
+NOTE: If the Chart emco-0.1.0.tgz was deployed
+
+`$ helm delete rel --purge`
**7. Delete local helm repo**
@@ -65,11 +69,10 @@ Optional if tools were installed
After deleting the db package and before installing the package again following error happens:
- `Error: release emco-db failed: object is being deleted: persistentvolumes "emco-db-emco-etcd-data-0" already exists`
+`Error: release rel-db failed: object is being deleted: persistentvolumes "rel-db-emco-etcd-data-0" already exists`
Workaround :
- `kubectl edit persistentvolumes emco-db-emco-etcd-data-0 -n emco`
-
- and remover finalizers section
+`kubectl edit persistentvolumes rel-db-emco-etcd-data-0`
+and remover finalizers section
diff --git a/docs/emco_apis.yaml b/docs/emco_apis.yaml
index 419c1316..163e3d69 100644
--- a/docs/emco_apis.yaml
+++ b/docs/emco_apis.yaml
@@ -1312,6 +1312,44 @@ paths:
- $ref: '#/components/parameters/compositeAppName'
- $ref: '#/components/parameters/compositeAppVersion'
- $ref: '#/components/parameters/deploymentIntentGroupName'
+ - in: query
+ name: type
+ description: source of status information
+ schema:
+ type: string
+ enum: [rsync, cluster]
+ default: rsync
+ - in: query
+ name: output
+ description: output format
+ schema:
+ type: string
+ enum: [summary, all, detail]
+ default: all
+ - in: query
+ name: instance
+ description: instance identifier
+ schema:
+ type: string
+ maxLength: 32
+ - in: query
+ name: app
+ description: app name
+ schema:
+ type: string
+ maxLength: 64
+ - in: query
+ name: cluster
+ description: cluster-provider+cluster
+ schema:
+ type: string
+ maxLength: 128
+ - in: query
+ name: resource
+ description: resource name
+ schema:
+ type: string
+ maxLength: 64
get:
tags:
- Deployment Lifecycle
@@ -2210,6 +2248,65 @@ paths:
requestBody:
content: {}
+
+ /cluster-providers/{cluster-providers-name}/clusters/{cluster-name}/status:
+ # parameters list that are used with each operation for this path
+ parameters:
+ - $ref: '#/components/parameters/clusterProviderName'
+ - $ref: '#/components/parameters/clusterName'
+ - in: query
+ name: type
+ description: source of status information
+ schema:
+ type: string
+ enum: [rsync, cluster]
+ default: rsync
+ - in: query
+ name: output
+ description: output format
+ schema:
+ type: string
+ enum: [summary, all, detail]
+ default: all
+ - in: query
+ name: instance
+ description: instance identifier
+ schema:
+ type: string
+ maxLength: 32
+ - in: query
+ name: app
+ description: app name
+ schema:
+ type: string
+ maxLength: 64
+ - in: query
+ name: cluster
+ description: cluster-provider+cluster
+ schema:
+ type: string
+ maxLength: 128
+ - in: query
+ name: resource
+ description: resource name
+ schema:
+ type: string
+ maxLength: 64
+ get:
+ tags:
+ - Networks
+ summary: Query status of cluster network intents
+ description: Query status of cluster network intents
+ operationId: statusNetworksForCluster
+ responses:
+ '200':
+ description: Success
+ content: {}
+ '404':
+ description: No Status found
+ content: {}
+
+
######################## Network Controller Intent API's##########################################
/projects/{project-name}/composite-apps/{composite-app-name}/{composite-app-version}/network-controller-intent:
parameters:
diff --git a/kud/demo/composite-firewall/firewall/values.yaml b/kud/demo/composite-firewall/firewall/values.yaml
index 3a6c8983..09098564 100644
--- a/kud/demo/composite-firewall/firewall/values.yaml
+++ b/kud/demo/composite-firewall/firewall/values.yaml
@@ -44,7 +44,7 @@ global:
#########
ovnMultusNetworkName: ovn-networkobj
- demoArtifactsVersion: 1.5.0
+ demoArtifactsVersion: 1.6.0
dcaeCollectorIp: 10.0.4.1
dcaeCollectorPort: 8081
diff --git a/kud/demo/composite-firewall/packetgen/values.yaml b/kud/demo/composite-firewall/packetgen/values.yaml
index 300947d5..f8cac9d5 100644
--- a/kud/demo/composite-firewall/packetgen/values.yaml
+++ b/kud/demo/composite-firewall/packetgen/values.yaml
@@ -52,6 +52,6 @@ global:
#########
ovnMultusNetworkName: ovn-networkobj
- demoArtifactsVersion: 1.5.0
+ demoArtifactsVersion: 1.6.0
dcaeCollectorIp: 10.0.4.1
dcaeCollectorPort: 8081
diff --git a/kud/demo/composite-firewall/sink/values.yaml b/kud/demo/composite-firewall/sink/values.yaml
index a6fa1c46..245c9dea 100644
--- a/kud/demo/composite-firewall/sink/values.yaml
+++ b/kud/demo/composite-firewall/sink/values.yaml
@@ -56,6 +56,6 @@ global:
#########
ovnMultusNetworkName: ovn-networkobj
- demoArtifactsVersion: 1.5.0
+ demoArtifactsVersion: 1.6.0
dcaeCollectorIp: 10.0.4.1
dcaeCollectorPort: 8081
diff --git a/src/dcm/go.mod b/src/dcm/go.mod
index 4d509054..5f5f9e7b 100644
--- a/src/dcm/go.mod
+++ b/src/dcm/go.mod
@@ -13,6 +13,7 @@ require (
replace (
github.com/onap/multicloud-k8s/src/clm => ../clm
github.com/onap/multicloud-k8s/src/orchestrator => ../orchestrator
+ github.com/onap/multicloud-k8s/src/monitor => ../monitor
k8s.io/api => k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d
diff --git a/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml b/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml
index dd38b2ef..9ca95a32 100644
--- a/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml
+++ b/src/monitor/deploy/crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml
@@ -38,15 +38,6 @@ spec:
properties:
podStatuses:
items:
- properties:
- metadata:
- type: object
- ready:
- type: boolean
- status:
- type: object
- required:
- - ready
type: object
type: array
ready:
diff --git a/src/monitor/pkg/apis/k8splugin/v1alpha1/types.go b/src/monitor/pkg/apis/k8splugin/v1alpha1/types.go
index 064591fc..6476f0db 100644
--- a/src/monitor/pkg/apis/k8splugin/v1alpha1/types.go
+++ b/src/monitor/pkg/apis/k8splugin/v1alpha1/types.go
@@ -33,7 +33,7 @@ type ResourceBundleStateSpec struct {
type ResourceBundleStatus struct {
Ready bool `json:"ready" protobuf:"varint,1,opt,name=ready"`
ResourceCount int32 `json:"resourceCount" protobuf:"varint,2,opt,name=resourceCount"`
- PodStatuses []PodStatus `json:"podStatuses" protobuf:"varint,3,opt,name=podStatuses"`
+ PodStatuses []corev1.Pod `json:"podStatuses" protobuf:"varint,3,opt,name=podStatuses"`
ServiceStatuses []corev1.Service `json:"serviceStatuses" protobuf:"varint,4,opt,name=serviceStatuses"`
ConfigMapStatuses []corev1.ConfigMap `json:"configMapStatuses" protobuf:"varint,5,opt,name=configMapStatuses"`
DeploymentStatuses []appsv1.Deployment `json:"deploymentStatuses" protobuf:"varint,6,opt,name=deploymentStatuses"`
diff --git a/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.deepcopy.go b/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.deepcopy.go
index d25db361..ff974aae 100644
--- a/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.deepcopy.go
+++ b/src/monitor/pkg/apis/k8splugin/v1alpha1/zz_generated.deepcopy.go
@@ -13,6 +13,7 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
+/*
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodStatus) DeepCopyInto(out *PodStatus) {
*out = *in
@@ -30,6 +31,7 @@ func (in *PodStatus) DeepCopy() *PodStatus {
in.DeepCopyInto(out)
return out
}
+*/
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceBundleState) DeepCopyInto(out *ResourceBundleState) {
@@ -118,7 +120,7 @@ func (in *ResourceBundleStatus) DeepCopyInto(out *ResourceBundleStatus) {
*out = *in
if in.PodStatuses != nil {
in, out := &in.PodStatuses, &out.PodStatuses
- *out = make([]PodStatus, len(*in))
+ *out = make([]corev1.Pod, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/configMap_controller.go b/src/monitor/pkg/controller/resourcebundlestate/configMap_controller.go
index f93355af..ed9283cc 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/configMap_controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/configMap_controller.go
@@ -166,6 +166,7 @@ func (r *configMapReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, c
// Add it to CR
cr.Status.ConfigMapStatuses = append(cr.Status.ConfigMapStatuses, corev1.ConfigMap{
+ TypeMeta: cm.TypeMeta,
ObjectMeta: cm.ObjectMeta,
})
diff --git a/src/monitor/pkg/controller/resourcebundlestate/controller.go b/src/monitor/pkg/controller/resourcebundlestate/controller.go
index 7206116b..faee5892 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/controller.go
@@ -145,6 +145,7 @@ func (r *reconciler) updateServices(rbstate *v1alpha1.ResourceBundleState,
for _, svc := range serviceList.Items {
resStatus := corev1.Service{
+ TypeMeta: svc.TypeMeta,
ObjectMeta: svc.ObjectMeta,
Status: svc.Status,
}
@@ -165,12 +166,12 @@ func (r *reconciler) updatePods(rbstate *v1alpha1.ResourceBundleState,
return err
}
- rbstate.Status.PodStatuses = []v1alpha1.PodStatus{}
+ rbstate.Status.PodStatuses = []corev1.Pod{}
for _, pod := range podList.Items {
- resStatus := v1alpha1.PodStatus{
+ resStatus := corev1.Pod{
+ TypeMeta: pod.TypeMeta,
ObjectMeta: pod.ObjectMeta,
- Ready: false,
Status: pod.Status,
}
rbstate.Status.PodStatuses = append(rbstate.Status.PodStatuses, resStatus)
@@ -194,6 +195,7 @@ func (r *reconciler) updateConfigMaps(rbstate *v1alpha1.ResourceBundleState,
for _, cm := range configMapList.Items {
resStatus := corev1.ConfigMap{
+ TypeMeta: cm.TypeMeta,
ObjectMeta: cm.ObjectMeta,
}
rbstate.Status.ConfigMapStatuses = append(rbstate.Status.ConfigMapStatuses, resStatus)
@@ -217,6 +219,7 @@ func (r *reconciler) updateDeployments(rbstate *v1alpha1.ResourceBundleState,
for _, dep := range deploymentList.Items {
resStatus := appsv1.Deployment{
+ TypeMeta: dep.TypeMeta,
ObjectMeta: dep.ObjectMeta,
Status: dep.Status,
}
@@ -241,6 +244,7 @@ func (r *reconciler) updateSecrets(rbstate *v1alpha1.ResourceBundleState,
for _, sec := range secretList.Items {
resStatus := corev1.Secret{
+ TypeMeta: sec.TypeMeta,
ObjectMeta: sec.ObjectMeta,
}
rbstate.Status.SecretStatuses = append(rbstate.Status.SecretStatuses, resStatus)
@@ -264,6 +268,7 @@ func (r *reconciler) updateDaemonSets(rbstate *v1alpha1.ResourceBundleState,
for _, ds := range daemonSetList.Items {
resStatus := appsv1.DaemonSet{
+ TypeMeta: ds.TypeMeta,
ObjectMeta: ds.ObjectMeta,
Status: ds.Status,
}
@@ -288,6 +293,7 @@ func (r *reconciler) updateIngresses(rbstate *v1alpha1.ResourceBundleState,
for _, ing := range ingressList.Items {
resStatus := v1beta1.Ingress{
+ TypeMeta: ing.TypeMeta,
ObjectMeta: ing.ObjectMeta,
Status: ing.Status,
}
@@ -312,6 +318,7 @@ func (r *reconciler) updateJobs(rbstate *v1alpha1.ResourceBundleState,
for _, job := range jobList.Items {
resStatus := v1.Job{
+ TypeMeta: job.TypeMeta,
ObjectMeta: job.ObjectMeta,
Status: job.Status,
}
@@ -336,6 +343,7 @@ func (r *reconciler) updateStatefulSets(rbstate *v1alpha1.ResourceBundleState,
for _, sfs := range statefulSetList.Items {
resStatus := appsv1.StatefulSet{
+ TypeMeta: sfs.TypeMeta,
ObjectMeta: sfs.ObjectMeta,
Status: sfs.Status,
}
diff --git a/src/monitor/pkg/controller/resourcebundlestate/daemonSet_controller.go b/src/monitor/pkg/controller/resourcebundlestate/daemonSet_controller.go
index 3ccb40ce..1f46f05c 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/daemonSet_controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/daemonSet_controller.go
@@ -168,6 +168,7 @@ func (r *daemonSetReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, d
// Add it to CR
cr.Status.DaemonSetStatuses = append(cr.Status.DaemonSetStatuses, appsv1.DaemonSet{
+ TypeMeta: ds.TypeMeta,
ObjectMeta: ds.ObjectMeta,
Status: ds.Status,
})
diff --git a/src/monitor/pkg/controller/resourcebundlestate/deployment_controller.go b/src/monitor/pkg/controller/resourcebundlestate/deployment_controller.go
index c563ed77..99ca2f40 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/deployment_controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/deployment_controller.go
@@ -168,6 +168,7 @@ func (r *deploymentReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState,
// Add it to CR
cr.Status.DeploymentStatuses = append(cr.Status.DeploymentStatuses, appsv1.Deployment{
+ TypeMeta: dep.TypeMeta,
ObjectMeta: dep.ObjectMeta,
Status: dep.Status,
})
diff --git a/src/monitor/pkg/controller/resourcebundlestate/ingress_controller.go b/src/monitor/pkg/controller/resourcebundlestate/ingress_controller.go
index 603536b3..f6113686 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/ingress_controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/ingress_controller.go
@@ -168,6 +168,7 @@ func (r *ingressReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, ing
// Add it to CR
cr.Status.IngressStatuses = append(cr.Status.IngressStatuses, v1beta1.Ingress{
+ TypeMeta: ing.TypeMeta,
ObjectMeta: ing.ObjectMeta,
Status: ing.Status,
})
diff --git a/src/monitor/pkg/controller/resourcebundlestate/job_controller.go b/src/monitor/pkg/controller/resourcebundlestate/job_controller.go
index cd76e66f..63bd9535 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/job_controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/job_controller.go
@@ -168,6 +168,7 @@ func (r *jobReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, job *v1
// Add it to CR
cr.Status.JobStatuses = append(cr.Status.JobStatuses, v1.Job{
+ TypeMeta: job.TypeMeta,
ObjectMeta: job.ObjectMeta,
Status: job.Status,
})
diff --git a/src/monitor/pkg/controller/resourcebundlestate/pod_controller.go b/src/monitor/pkg/controller/resourcebundlestate/pod_controller.go
index 65a324db..0f7ce47e 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/pod_controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/pod_controller.go
@@ -136,7 +136,7 @@ func (r *podReconciler) deleteFromSingleCR(cr *v1alpha1.ResourceBundleState, nam
if rstatus.Name == name {
//Delete that status from the array
cr.Status.PodStatuses[i] = cr.Status.PodStatuses[length-1]
- cr.Status.PodStatuses[length-1] = v1alpha1.PodStatus{}
+ cr.Status.PodStatuses[length-1] = corev1.Pod{}
cr.Status.PodStatuses = cr.Status.PodStatuses[:length-1]
return nil
}
@@ -167,9 +167,9 @@ func (r *podReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, pod *co
cr.Status.ResourceCount++
// Add it to CR
- cr.Status.PodStatuses = append(cr.Status.PodStatuses, v1alpha1.PodStatus{
+ cr.Status.PodStatuses = append(cr.Status.PodStatuses, corev1.Pod{
+ TypeMeta: pod.TypeMeta,
ObjectMeta: pod.ObjectMeta,
- Ready: false,
Status: pod.Status,
})
diff --git a/src/monitor/pkg/controller/resourcebundlestate/secret_controller.go b/src/monitor/pkg/controller/resourcebundlestate/secret_controller.go
index fe70d53f..0540b687 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/secret_controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/secret_controller.go
@@ -166,6 +166,7 @@ func (r *secretReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, sec
// Add it to CR
cr.Status.SecretStatuses = append(cr.Status.SecretStatuses, corev1.Secret{
+ TypeMeta: sec.TypeMeta,
ObjectMeta: sec.ObjectMeta,
})
diff --git a/src/monitor/pkg/controller/resourcebundlestate/service_controller.go b/src/monitor/pkg/controller/resourcebundlestate/service_controller.go
index d1bb2fd6..80bde3b6 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/service_controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/service_controller.go
@@ -168,6 +168,7 @@ func (r *serviceReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState, svc
// Add it to CR
cr.Status.ServiceStatuses = append(cr.Status.ServiceStatuses, corev1.Service{
+ TypeMeta: svc.TypeMeta,
ObjectMeta: svc.ObjectMeta,
Status: svc.Status,
})
diff --git a/src/monitor/pkg/controller/resourcebundlestate/statefulSet_controller.go b/src/monitor/pkg/controller/resourcebundlestate/statefulSet_controller.go
index ebe61dba..c6183d08 100644
--- a/src/monitor/pkg/controller/resourcebundlestate/statefulSet_controller.go
+++ b/src/monitor/pkg/controller/resourcebundlestate/statefulSet_controller.go
@@ -168,6 +168,7 @@ func (r *statefulSetReconciler) updateSingleCR(cr *v1alpha1.ResourceBundleState,
// Add it to CR
cr.Status.StatefulSetStatuses = append(cr.Status.StatefulSetStatuses, appsv1.StatefulSet{
+ TypeMeta: sfs.TypeMeta,
ObjectMeta: sfs.ObjectMeta,
Status: sfs.Status,
})
diff --git a/src/ncm/api/api.go b/src/ncm/api/api.go
index 6dd958a1..45551e6c 100644
--- a/src/ncm/api/api.go
+++ b/src/ncm/api/api.go
@@ -90,6 +90,9 @@ func NewRouter(testClient interface{}) *mux.Router {
}
router.HandleFunc("/cluster-providers/{cluster-provider}/clusters/{cluster}/apply", schedulerHandler.applySchedulerHandler).Methods("POST")
router.HandleFunc("/cluster-providers/{cluster-provider}/clusters/{cluster}/terminate", schedulerHandler.terminateSchedulerHandler).Methods("POST")
+ router.HandleFunc("/cluster-providers/{cluster-provider}/clusters/{cluster}/status", schedulerHandler.statusSchedulerHandler).Methods("GET")
+ router.HandleFunc("/cluster-providers/{cluster-provider}/clusters/{cluster}/status",
+ schedulerHandler.statusSchedulerHandler).Queries("instance", "{instance}", "type", "{type}", "output", "{output}", "app", "{app}", "cluster", "{cluster}", "resource", "{resource}")
return router
}
diff --git a/src/ncm/api/schedulerhandler.go b/src/ncm/api/schedulerhandler.go
index d07d132d..9d8b4eff 100644
--- a/src/ncm/api/schedulerhandler.go
+++ b/src/ncm/api/schedulerhandler.go
@@ -17,9 +17,13 @@
package api
import (
+ "encoding/json"
"net/http"
+ "net/url"
+ "strings"
"github.com/onap/multicloud-k8s/src/ncm/pkg/scheduler"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/validation"
"github.com/gorilla/mux"
)
@@ -47,7 +51,7 @@ func (h schedulerHandler) applySchedulerHandler(w http.ResponseWriter, r *http.R
w.WriteHeader(http.StatusNoContent)
}
-// terminateSchedulerHandler handles requests to apply network intents for a cluster
+// terminateSchedulerHandler handles requests to terminate network intents for a cluster
func (h schedulerHandler) terminateSchedulerHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
provider := vars["cluster-provider"]
@@ -61,3 +65,108 @@ func (h schedulerHandler) terminateSchedulerHandler(w http.ResponseWriter, r *ht
w.WriteHeader(http.StatusNoContent)
}
+
+// statusSchedulerHandler handles requests to query status of network intents for a cluster
+func (h schedulerHandler) statusSchedulerHandler(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ provider := vars["cluster-provider"]
+ cluster := vars["cluster"]
+
+ qParams, err := url.ParseQuery(r.URL.RawQuery)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ var queryInstance string
+ if i, found := qParams["instance"]; found {
+ queryInstance = i[0]
+ } else {
+ queryInstance = "" // default type
+ }
+
+ var queryType string
+ if t, found := qParams["type"]; found {
+ queryType = t[0]
+ if queryType != "cluster" && queryType != "rsync" {
+ http.Error(w, "Invalid query type", http.StatusBadRequest)
+ return
+ }
+ } else {
+ queryType = "rsync" // default type
+ }
+
+ var queryOutput string
+ if o, found := qParams["output"]; found {
+ queryOutput = o[0]
+ if queryOutput != "summary" && queryOutput != "all" && queryOutput != "detail" {
+ http.Error(w, "Invalid query output", http.StatusBadRequest)
+ return
+ }
+ } else {
+ queryOutput = "all" // default output format
+ }
+
+ var queryApps []string
+ if a, found := qParams["app"]; found {
+ queryApps = a
+ for _, app := range queryApps {
+ errs := validation.IsValidName(app)
+ if len(errs) > 0 {
+ http.Error(w, "Invalid app query", http.StatusBadRequest)
+ return
+ }
+ }
+ } else {
+ queryApps = make([]string, 0)
+ }
+
+ var queryClusters []string
+ if c, found := qParams["cluster"]; found {
+ queryClusters = c
+ for _, cl := range queryClusters {
+ parts := strings.Split(cl, "+")
+ if len(parts) != 2 {
+ http.Error(w, "Invalid cluster query", http.StatusBadRequest)
+ return
+ }
+ for _, p := range parts {
+ errs := validation.IsValidName(p)
+ if len(errs) > 0 {
+ http.Error(w, "Invalid cluster query", http.StatusBadRequest)
+ return
+ }
+ }
+ }
+ } else {
+ queryClusters = make([]string, 0)
+ }
+
+ var queryResources []string
+ if r, found := qParams["resource"]; found {
+ queryResources = r
+ for _, res := range queryResources {
+ errs := validation.IsValidName(res)
+ if len(errs) > 0 {
+ http.Error(w, "Invalid resources query", http.StatusBadRequest)
+ return
+ }
+ }
+ } else {
+ queryResources = make([]string, 0)
+ }
+
+ status, iErr := h.client.NetworkIntentsStatus(provider, cluster, queryInstance, queryType, queryOutput, queryApps, queryClusters, queryResources)
+ if iErr != nil {
+ http.Error(w, iErr.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ iErr = json.NewEncoder(w).Encode(status)
+ if iErr != nil {
+ http.Error(w, iErr.Error(), http.StatusInternalServerError)
+ return
+ }
+}
diff --git a/src/ncm/go.mod b/src/ncm/go.mod
index d3d2924a..84ecc4ec 100644
--- a/src/ncm/go.mod
+++ b/src/ncm/go.mod
@@ -6,19 +6,20 @@ require (
github.com/gorilla/mux v1.7.3
github.com/k8snetworkplumbingwg/network-attachment-definition-client v0.0.0-20200127152046-0ee521d56061
github.com/onap/multicloud-k8s/src/clm v0.0.0-00010101000000-000000000000
- github.com/onap/multicloud-k8s/src/orchestrator v0.0.0-20200601021239-7959bd4c6fd4
- github.com/pkg/errors v0.8.1
- google.golang.org/grpc v1.27.1
+ github.com/onap/multicloud-k8s/src/orchestrator v0.0.0-20200721211210-783ed87fb39a
+ github.com/pkg/errors v0.9.1
+ google.golang.org/grpc v1.28.0
gopkg.in/yaml.v2 v2.2.8
- k8s.io/api v0.0.0-20190831074750-7364b6bdad65
- k8s.io/apimachinery v0.0.0-20190831074630-461753078381
- k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
+ k8s.io/api v0.18.2
+ k8s.io/apimachinery v0.18.2
+ k8s.io/client-go v12.0.0+incompatible
k8s.io/kubernetes v1.14.1
k8s.io/utils v0.0.0-20200520001619-278ece378a50 // indirect
)
replace (
github.com/onap/multicloud-k8s/src/clm => ../clm
+ github.com/onap/multicloud-k8s/src/monitor => ../monitor
github.com/onap/multicloud-k8s/src/orchestrator => ../orchestrator
github.com/onap/multicloud-k8s/src/rsync => ../rsync
k8s.io/api => k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b
diff --git a/src/ncm/pkg/scheduler/scheduler.go b/src/ncm/pkg/scheduler/scheduler.go
index f2135974..516c0525 100644
--- a/src/ncm/pkg/scheduler/scheduler.go
+++ b/src/ncm/pkg/scheduler/scheduler.go
@@ -31,6 +31,7 @@ import (
log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/module/controller"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/state"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/status"
pkgerrors "github.com/pkg/errors"
)
@@ -41,6 +42,7 @@ const rsyncName = "rsync"
// ClusterManager is an interface exposes the Cluster functionality
type SchedulerManager interface {
ApplyNetworkIntents(clusterProvider, cluster string) error
+ NetworkIntentsStatus(clusterProvider, cluster, qInstance, qType, qOutput string, qApps, qClusters, qResources []string) (ClusterStatus, error)
TerminateNetworkIntents(clusterProvider, cluster string) error
}
@@ -63,6 +65,11 @@ func NewSchedulerClient() *SchedulerClient {
}
}
+// ClusterStatus holds the status data prepared for cluster network intent status queries
+type ClusterStatus struct {
+ status.StatusResult `json:",inline"`
+}
+
func deleteAppContext(ac appcontext.AppContext) {
err := ac.DeleteCompositeApp()
if err != nil {
@@ -303,3 +310,31 @@ func (v *SchedulerClient) TerminateNetworkIntents(clusterProvider, cluster strin
return nil
}
+
+/*
+NetworkIntentsStatus takes in cluster provider, cluster and query parameters.
+This method is responsible obtaining the status of
+the cluster network intents, which is made available in the appcontext
+*/
+func (c SchedulerClient) NetworkIntentsStatus(clusterProvider, cluster, qInstance, qType, qOutput string, qApps, qClusters, qResources []string) (ClusterStatus, error) {
+
+ s, err := clusterPkg.NewClusterClient().GetClusterState(clusterProvider, cluster)
+ if err != nil {
+ return ClusterStatus{}, pkgerrors.Wrap(err, "cluster state not found")
+ }
+
+ // Prepare the apps list (just one hardcoded value)
+ allApps := make([]string, 0)
+ allApps = append(allApps, nettypes.CONTEXT_CLUSTER_APP)
+
+ statusResponse, err := status.PrepareStatusResult(s, allApps, qInstance, qType, qOutput, qApps, qClusters, qResources)
+ if err != nil {
+ return ClusterStatus{}, err
+ }
+ statusResponse.Name = clusterProvider + "+" + cluster
+ clStatus := ClusterStatus{
+ StatusResult: statusResponse,
+ }
+
+ return clStatus, nil
+}
diff --git a/src/orchestrator/api/api.go b/src/orchestrator/api/api.go
index 72b444b7..de69d163 100644
--- a/src/orchestrator/api/api.go
+++ b/src/orchestrator/api/api.go
@@ -194,6 +194,8 @@ func NewRouter(projectClient moduleLib.ProjectManager,
router.HandleFunc("/projects/{project-name}/composite-apps/{composite-app-name}/{composite-app-version}/deployment-intent-groups/{deployment-intent-group-name}/instantiate", instantiationHandler.instantiateHandler).Methods("POST")
router.HandleFunc("/projects/{project-name}/composite-apps/{composite-app-name}/{composite-app-version}/deployment-intent-groups/{deployment-intent-group-name}/terminate", instantiationHandler.terminateHandler).Methods("POST")
router.HandleFunc("/projects/{project-name}/composite-apps/{composite-app-name}/{composite-app-version}/deployment-intent-groups/{deployment-intent-group-name}/status", instantiationHandler.statusHandler).Methods("GET")
+ router.HandleFunc("/projects/{project-name}/composite-apps/{composite-app-name}/{composite-app-version}/deployment-intent-groups/{deployment-intent-group-name}/status",
+ instantiationHandler.statusHandler).Queries("instance", "{instance}", "type", "{type}", "output", "{output}", "app", "{app}", "cluster", "{cluster}", "resource", "{resource}")
return router
}
diff --git a/src/orchestrator/api/instantiation_handler.go b/src/orchestrator/api/instantiation_handler.go
index eeac8a00..f9f86954 100644
--- a/src/orchestrator/api/instantiation_handler.go
+++ b/src/orchestrator/api/instantiation_handler.go
@@ -19,8 +19,11 @@ package api
import (
"encoding/json"
"net/http"
+ "net/url"
+ "strings"
"github.com/gorilla/mux"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/validation"
moduleLib "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module"
)
@@ -90,7 +93,95 @@ func (h instantiationHandler) statusHandler(w http.ResponseWriter, r *http.Reque
v := vars["composite-app-version"]
di := vars["deployment-intent-group-name"]
- status, iErr := h.client.Status(p, ca, v, di)
+ qParams, err := url.ParseQuery(r.URL.RawQuery)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ var queryInstance string
+ if o, found := qParams["instance"]; found {
+ queryInstance = o[0]
+ if queryInstance == "" {
+ http.Error(w, "Invalid query instance", http.StatusBadRequest)
+ return
+ }
+ } else {
+ queryInstance = "" // default instance value
+ }
+
+ var queryType string
+ if t, found := qParams["type"]; found {
+ queryType = t[0]
+ if queryType != "cluster" && queryType != "rsync" {
+ http.Error(w, "Invalid query type", http.StatusBadRequest)
+ return
+ }
+ } else {
+ queryType = "rsync" // default type
+ }
+
+ var queryOutput string
+ if o, found := qParams["output"]; found {
+ queryOutput = o[0]
+ if queryOutput != "summary" && queryOutput != "all" && queryOutput != "detail" {
+ http.Error(w, "Invalid query output", http.StatusBadRequest)
+ return
+ }
+ } else {
+ queryOutput = "all" // default output format
+ }
+
+ var queryApps []string
+ if a, found := qParams["app"]; found {
+ queryApps = a
+ for _, app := range queryApps {
+ errs := validation.IsValidName(app)
+ if len(errs) > 0 {
+ http.Error(w, "Invalid app query", http.StatusBadRequest)
+ return
+ }
+ }
+ } else {
+ queryApps = make([]string, 0)
+ }
+
+ var queryClusters []string
+ if c, found := qParams["cluster"]; found {
+ queryClusters = c
+ for _, cl := range queryClusters {
+ parts := strings.Split(cl, "+")
+ if len(parts) != 2 {
+ http.Error(w, "Invalid cluster query", http.StatusBadRequest)
+ return
+ }
+ for _, p := range parts {
+ errs := validation.IsValidName(p)
+ if len(errs) > 0 {
+ http.Error(w, "Invalid cluster query", http.StatusBadRequest)
+ return
+ }
+ }
+ }
+ } else {
+ queryClusters = make([]string, 0)
+ }
+
+ var queryResources []string
+ if r, found := qParams["resource"]; found {
+ queryResources = r
+ for _, res := range queryResources {
+ errs := validation.IsValidName(res)
+ if len(errs) > 0 {
+ http.Error(w, "Invalid resources query", http.StatusBadRequest)
+ return
+ }
+ }
+ } else {
+ queryResources = make([]string, 0)
+ }
+
+ status, iErr := h.client.Status(p, ca, v, di, queryInstance, queryType, queryOutput, queryApps, queryClusters, queryResources)
if iErr != nil {
http.Error(w, iErr.Error(), http.StatusInternalServerError)
return
@@ -103,6 +194,4 @@ func (h instantiationHandler) statusHandler(w http.ResponseWriter, r *http.Reque
http.Error(w, iErr.Error(), http.StatusInternalServerError)
return
}
- w.WriteHeader(http.StatusAccepted)
-
}
diff --git a/src/orchestrator/go.mod b/src/orchestrator/go.mod
index f79d43dd..fc37f038 100644
--- a/src/orchestrator/go.mod
+++ b/src/orchestrator/go.mod
@@ -12,11 +12,7 @@ require (
github.com/golang/protobuf v1.4.1
github.com/gorilla/handlers v1.3.0
github.com/gorilla/mux v1.7.3
- github.com/huandu/xstrings v1.3.1 // indirect
- github.com/jmoiron/sqlx v1.2.0 // indirect
github.com/lib/pq v1.6.0 // indirect
- github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
- github.com/mitchellh/copystructure v1.0.0 // indirect
github.com/onap/multicloud-k8s/src/monitor v0.0.0-20200630152613-7c20f73e7c5d
github.com/onap/multicloud-k8s/src/ncm v0.0.0-20200515060444-c77850a75eee
github.com/onap/multicloud-k8s/src/rsync v0.0.0-20200630152613-7c20f73e7c5d
@@ -36,9 +32,10 @@ require (
gopkg.in/yaml.v2 v2.2.8
gopkg.in/yaml.v3 v3.0.0-20200506231410-2ff61e1afc86
k8s.io/apimachinery v0.18.2
+ k8s.io/client-go v12.0.0+incompatible
k8s.io/cloud-provider v0.0.0-20190409023720-1bc0c81fa51d // indirect
k8s.io/helm v2.14.3+incompatible
- sigs.k8s.io/kustomize v2.0.3+incompatible // indirect
+ k8s.io/kubernetes v1.14.1
)
replace (
diff --git a/src/orchestrator/pkg/module/instantiation.go b/src/orchestrator/pkg/module/instantiation.go
index 08250d16..de723242 100644
--- a/src/orchestrator/pkg/module/instantiation.go
+++ b/src/orchestrator/pkg/module/instantiation.go
@@ -22,11 +22,11 @@ import (
"fmt"
"time"
- rb "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
gpic "github.com/onap/multicloud-k8s/src/orchestrator/pkg/gpic"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db"
log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/state"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/status"
"github.com/onap/multicloud-k8s/src/orchestrator/utils/helm"
pkgerrors "github.com/pkg/errors"
)
@@ -45,14 +45,12 @@ type InstantiationClient struct {
db InstantiationClientDbInfo
}
-type ClusterAppStatus struct {
- Cluster string
- App string
- Status rb.ResourceBundleStatus
-}
-
-type StatusData struct {
- Data []ClusterAppStatus
+type DeploymentStatus struct {
+ Project string `json:"project,omitempty"`
+ CompositeAppName string `json:"composite-app-name,omitempty"`
+ CompositeAppVersion string `json:"composite-app-version,omitempty"`
+ CompositeProfileName string `json:"composite-profile-name,omitempty"`
+ status.StatusResult `json:",inline"`
}
/*
@@ -75,7 +73,7 @@ type InstantiationKey struct {
type InstantiationManager interface {
Approve(p string, ca string, v string, di string) error
Instantiate(p string, ca string, v string, di string) error
- Status(p string, ca string, v string, di string) (StatusData, error)
+ Status(p, ca, v, di, qInstance, qType, qOutput string, qApps, qClusters, qResources []string) (DeploymentStatus, error)
Terminate(p string, ca string, v string, di string) error
}
@@ -429,64 +427,39 @@ Status takes in projectName, compositeAppName, compositeAppVersion,
DeploymentIntentName. This method is responsible obtaining the status of
the deployment, which is made available in the appcontext.
*/
-func (c InstantiationClient) Status(p string, ca string, v string, di string) (StatusData, error) {
+func (c InstantiationClient) Status(p, ca, v, di, qInstance, qType, qOutput string, qApps, qClusters, qResources []string) (DeploymentStatus, error) {
- s, err := NewDeploymentIntentGroupClient().GetDeploymentIntentGroupState(di, p, ca, v)
+ dIGrp, err := NewDeploymentIntentGroupClient().GetDeploymentIntentGroup(di, p, ca, v)
if err != nil {
- return StatusData{}, pkgerrors.Wrap(err, "deploymentIntentGroup not found: "+di)
+ return DeploymentStatus{}, pkgerrors.Wrap(err, "Not finding the deploymentIntentGroup")
}
- currentCtxId := state.GetLastContextIdFromStateInfo(s)
- ac, err := state.GetAppContextFromId(currentCtxId)
+ diState, err := NewDeploymentIntentGroupClient().GetDeploymentIntentGroupState(di, p, ca, v)
if err != nil {
- return StatusData{}, pkgerrors.Wrap(err, "AppContext for deploymentIntentGroup not found: "+di)
+ return DeploymentStatus{}, pkgerrors.Wrap(err, "deploymentIntentGroup state not found: "+di)
}
// Get all apps in this composite app
- allApps, err := NewAppClient().GetApps(p, ca, v)
+ apps, err := NewAppClient().GetApps(p, ca, v)
if err != nil {
- return StatusData{}, pkgerrors.Wrap(err, "Not finding the apps")
+ return DeploymentStatus{}, pkgerrors.Wrap(err, "Not finding the apps")
+ }
+ allApps := make([]string, 0)
+ for _, a := range apps {
+ allApps = append(allApps, a.Metadata.Name)
}
- var diStatus StatusData
- diStatus.Data = make([]ClusterAppStatus, 0)
-
- // Loop through each app and get the status data for each cluster in the app
- for _, app := range allApps {
- // Get the clusters in the appcontext for this app
- clusters, err := ac.GetClusterNames(app.Metadata.Name)
- if err != nil {
- log.Info(":: No clusters for app ::", log.Fields{"AppName": app.Metadata.Name})
- continue
- }
-
- for _, cluster := range clusters {
- handle, err := ac.GetClusterStatusHandle(app.Metadata.Name, cluster)
- if err != nil {
- log.Info(":: No status handle for cluster, app ::",
- log.Fields{"Cluster": cluster, "AppName": app.Metadata.Name, "Error": err})
- continue
- }
- statusValue, err := ac.GetValue(handle)
- if err != nil {
- log.Info(":: No status value for cluster, app ::",
- log.Fields{"Cluster": cluster, "AppName": app.Metadata.Name, "Error": err})
- continue
- }
- log.Info(":: STATUS VALUE ::", log.Fields{"statusValue": statusValue})
- var statusData ClusterAppStatus
- err = json.Unmarshal([]byte(statusValue.(string)), &statusData.Status)
- if err != nil {
- log.Info(":: Error unmarshaling status value for cluster, app ::",
- log.Fields{"Cluster": cluster, "AppName": app.Metadata.Name, "Error": err})
- continue
- }
- statusData.Cluster = cluster
- statusData.App = app.Metadata.Name
- log.Info(":: STATUS DATA ::", log.Fields{"status": statusData})
-
- diStatus.Data = append(diStatus.Data, statusData)
- }
+ statusResponse, err := status.PrepareStatusResult(diState, allApps, qInstance, qType, qOutput, qApps, qClusters, qResources)
+ if err != nil {
+ return DeploymentStatus{}, err
+ }
+ statusResponse.Name = di
+ diStatus := DeploymentStatus{
+ Project: p,
+ CompositeAppName: ca,
+ CompositeAppVersion: v,
+ CompositeProfileName: dIGrp.Spec.Profile,
+ StatusResult: statusResponse,
}
return diStatus, nil
diff --git a/src/orchestrator/pkg/state/types.go b/src/orchestrator/pkg/state/types.go
index 665a1be4..99f0adca 100644
--- a/src/orchestrator/pkg/state/types.go
+++ b/src/orchestrator/pkg/state/types.go
@@ -22,15 +22,15 @@ import "time"
// information about resources which can be instantiated via rsync.
// The last Actions entry holds the current state of the container object.
type StateInfo struct {
- Actions []ActionEntry
+ Actions []ActionEntry `json:"actions"`
}
// ActionEntry is used to keep track of the time an action (e.g. Created, Instantiate, Terminate) was invoked
// For actions where an AppContext is relevent, the ContextId field will be non-zero length
type ActionEntry struct {
- State StateValue
- ContextId string
- TimeStamp time.Time
+ State StateValue `json:"state"`
+ ContextId string `json:"instance"`
+ TimeStamp time.Time `json:"time"`
}
type StateValue = string
diff --git a/src/orchestrator/pkg/status/status_helper.go b/src/orchestrator/pkg/status/status_helper.go
new file mode 100644
index 00000000..a791493e
--- /dev/null
+++ b/src/orchestrator/pkg/status/status_helper.go
@@ -0,0 +1,482 @@
+/*
+ * Copyright 2020 Intel Corporation, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package status
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ rb "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
+ "github.com/onap/multicloud-k8s/src/monitor/pkg/generated/clientset/versioned/scheme"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
+ log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/resourcestatus"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/state"
+ pkgerrors "github.com/pkg/errors"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// decodeYAML reads a YAMl []byte to extract the Kubernetes object definition
+func decodeYAML(y []byte, into runtime.Object) (runtime.Object, error) {
+ decode := scheme.Codecs.UniversalDeserializer().Decode
+ obj, _, err := decode(y, nil, into)
+ if err != nil {
+ return nil, pkgerrors.Wrap(err, "Deserialize YAML error")
+ }
+
+ return obj, nil
+}
+
+func getUnstruct(y []byte) (unstructured.Unstructured, error) {
+ //Decode the yaml file to create a runtime.Object
+ unstruct := unstructured.Unstructured{}
+ //Ignore the returned obj as we expect the data in unstruct
+ _, err := decodeYAML(y, &unstruct)
+ if err != nil {
+ log.Info(":: Error decoding YAML ::", log.Fields{"object": y, "error": err})
+ return unstructured.Unstructured{}, pkgerrors.Wrap(err, "Decode object error")
+ }
+
+ return unstruct, nil
+}
+
+// GetClusterResources takes in a ResourceBundleStatus CR and resturns a list of ResourceStatus elments
+func GetClusterResources(rbData rb.ResourceBundleStatus, qOutput string, qResources []string,
+ resourceList *[]ResourceStatus, cnts map[string]int) (int, error) {
+
+ count := 0
+
+ for _, p := range rbData.PodStatuses {
+ if !keepResource(p.Name, qResources) {
+ continue
+ }
+ r := ResourceStatus{}
+ r.Name = p.Name
+ r.Gvk = (&p.TypeMeta).GroupVersionKind()
+ if qOutput == "detail" {
+ r.Detail = p
+ }
+ *resourceList = append(*resourceList, r)
+ count++
+ cnt := cnts["Present"]
+ cnts["Present"] = cnt + 1
+ }
+
+ for _, s := range rbData.ServiceStatuses {
+ if !keepResource(s.Name, qResources) {
+ continue
+ }
+ r := ResourceStatus{}
+ r.Name = s.Name
+ r.Gvk = (&s.TypeMeta).GroupVersionKind()
+ if qOutput == "detail" {
+ r.Detail = s
+ }
+ *resourceList = append(*resourceList, r)
+ count++
+ cnt := cnts["Present"]
+ cnts["Present"] = cnt + 1
+ }
+
+ for _, d := range rbData.DeploymentStatuses {
+ if !keepResource(d.Name, qResources) {
+ continue
+ }
+ r := ResourceStatus{}
+ r.Name = d.Name
+ r.Gvk = (&d.TypeMeta).GroupVersionKind()
+ if qOutput == "detail" {
+ r.Detail = d
+ }
+ *resourceList = append(*resourceList, r)
+ count++
+ cnt := cnts["Present"]
+ cnts["Present"] = cnt + 1
+ }
+
+ for _, c := range rbData.ConfigMapStatuses {
+ if !keepResource(c.Name, qResources) {
+ continue
+ }
+ r := ResourceStatus{}
+ r.Name = c.Name
+ r.Gvk = (&c.TypeMeta).GroupVersionKind()
+ if qOutput == "detail" {
+ r.Detail = c
+ }
+ *resourceList = append(*resourceList, r)
+ count++
+ cnt := cnts["Present"]
+ cnts["Present"] = cnt + 1
+ }
+
+ for _, s := range rbData.SecretStatuses {
+ if !keepResource(s.Name, qResources) {
+ continue
+ }
+ r := ResourceStatus{}
+ r.Name = s.Name
+ r.Gvk = (&s.TypeMeta).GroupVersionKind()
+ if qOutput == "detail" {
+ r.Detail = s
+ }
+ *resourceList = append(*resourceList, r)
+ count++
+ cnt := cnts["Present"]
+ cnts["Present"] = cnt + 1
+ }
+
+ for _, d := range rbData.DaemonSetStatuses {
+ if !keepResource(d.Name, qResources) {
+ continue
+ }
+ r := ResourceStatus{}
+ r.Name = d.Name
+ r.Gvk = (&d.TypeMeta).GroupVersionKind()
+ if qOutput == "detail" {
+ r.Detail = d
+ }
+ *resourceList = append(*resourceList, r)
+ count++
+ cnt := cnts["Present"]
+ cnts["Present"] = cnt + 1
+ }
+
+ for _, i := range rbData.IngressStatuses {
+ if !keepResource(i.Name, qResources) {
+ continue
+ }
+ r := ResourceStatus{}
+ r.Name = i.Name
+ r.Gvk = (&i.TypeMeta).GroupVersionKind()
+ if qOutput == "detail" {
+ r.Detail = i
+ }
+ *resourceList = append(*resourceList, r)
+ count++
+ cnt := cnts["Present"]
+ cnts["Present"] = cnt + 1
+ }
+
+ for _, j := range rbData.JobStatuses {
+ if !keepResource(j.Name, qResources) {
+ continue
+ }
+ r := ResourceStatus{}
+ r.Name = j.Name
+ r.Gvk = (&j.TypeMeta).GroupVersionKind()
+ if qOutput == "detail" {
+ r.Detail = j
+ }
+ *resourceList = append(*resourceList, r)
+ count++
+ cnt := cnts["Present"]
+ cnts["Present"] = cnt + 1
+ }
+
+ for _, s := range rbData.StatefulSetStatuses {
+ if !keepResource(s.Name, qResources) {
+ continue
+ }
+ r := ResourceStatus{}
+ r.Name = s.Name
+ r.Gvk = (&s.TypeMeta).GroupVersionKind()
+ if qOutput == "detail" {
+ r.Detail = s
+ }
+ *resourceList = append(*resourceList, r)
+ count++
+ cnt := cnts["Present"]
+ cnts["Present"] = cnt + 1
+ }
+
+ return count, nil
+}
+
+// isResourceHandle takes a cluster handle and determines if the other handle parameter is a resource handle for this cluster
+// handle. It does this by verifying that the cluster handle is a prefix of the handle and that the remainder of the handle
+// is a value that matches to a resource format: "resource/<name>+<type>/"
+// Example cluster handle:
+// /context/6385596659306465421/app/network-intents/cluster/vfw-cluster-provider+edge01/
+// Example resource handle:
+// /context/6385596659306465421/app/network-intents/cluster/vfw-cluster-provider+edge01/resource/emco-private-net+ProviderNetwork/
+func isResourceHandle(ch, h interface{}) bool {
+ clusterHandle := fmt.Sprintf("%v", ch)
+ handle := fmt.Sprintf("%v", h)
+ diff := strings.Split(handle, clusterHandle)
+
+ if len(diff) != 2 && diff[0] != "" {
+ return false
+ }
+
+ parts := strings.Split(diff[1], "/")
+
+ if len(parts) == 3 &&
+ parts[0] == "resource" &&
+ len(strings.Split(parts[1], "+")) == 2 &&
+ parts[2] == "" {
+ return true
+ } else {
+ return false
+ }
+}
+
+// keepResource keeps a resource if the filter list is empty or if the resource is part of the list
+func keepResource(r string, rList []string) bool {
+ if len(rList) == 0 {
+ return true
+ }
+ for _, res := range rList {
+ if r == res {
+ return true
+ }
+ }
+ return false
+}
+
+// GetAppContextResources collects the resource status of all resources in an AppContext subject to the filter parameters
+func GetAppContextResources(ac appcontext.AppContext, ch interface{}, qOutput string, qResources []string, resourceList *[]ResourceStatus, statusCnts map[string]int) (int, error) {
+ count := 0
+
+ // Get all Resources for the Cluster
+ hs, err := ac.GetAllHandles(ch)
+ if err != nil {
+ log.Info(":: Error getting all handles ::", log.Fields{"handles": ch, "error": err})
+ return 0, err
+ }
+
+ for _, h := range hs {
+ // skip any handles that are not resource handles
+ if !isResourceHandle(ch, h) {
+ continue
+ }
+
+ // Get Resource from AppContext
+ res, err := ac.GetValue(h)
+ if err != nil {
+ log.Info(":: Error getting resource value ::", log.Fields{"Handle": h})
+ continue
+ }
+
+ // Get Resource Status from AppContext
+ sh, err := ac.GetLevelHandle(h, "status")
+ if err != nil {
+ log.Info(":: No status handle for resource ::", log.Fields{"Handle": h})
+ continue
+ }
+ s, err := ac.GetValue(sh)
+ if err != nil {
+ log.Info(":: Error getting resource status value ::", log.Fields{"Handle": sh})
+ continue
+ }
+ rstatus := resourcestatus.ResourceStatus{}
+ js, err := json.Marshal(s)
+ if err != nil {
+ log.Info(":: Non-JSON status data for resource ::", log.Fields{"Handle": sh, "Value": s})
+ continue
+ }
+ err = json.Unmarshal(js, &rstatus)
+ if err != nil {
+ log.Info(":: Invalid status data for resource ::", log.Fields{"Handle": sh, "Value": s})
+ continue
+ }
+
+ // Get the unstructured object
+ unstruct, err := getUnstruct([]byte(res.(string)))
+ if err != nil {
+ log.Info(":: Error getting GVK ::", log.Fields{"Resource": res, "error": err})
+ continue
+ }
+ if !keepResource(unstruct.GetName(), qResources) {
+ continue
+ }
+
+ // Make and fill out a ResourceStatus structure
+ r := ResourceStatus{}
+ r.Gvk = unstruct.GroupVersionKind()
+ r.Name = unstruct.GetName()
+ if qOutput == "detail" {
+ r.Detail = unstruct.Object
+ }
+ r.RsyncStatus = fmt.Sprintf("%v", rstatus.Status)
+ *resourceList = append(*resourceList, r)
+ cnt := statusCnts[rstatus.Status]
+ statusCnts[rstatus.Status] = cnt + 1
+ count++
+ }
+
+ return count, nil
+}
+
+// PrepareStatusResult takes in a resource stateInfo object, the list of apps and the query parameters.
+// It then fills out the StatusResult structure appropriately from information in the AppContext
+func PrepareStatusResult(stateInfo state.StateInfo, apps []string, qInstance, qType, qOutput string, qApps, qClusters, qResources []string) (StatusResult, error) {
+
+ var currentCtxId string
+ if qInstance != "" {
+ currentCtxId = qInstance
+ } else {
+ currentCtxId = state.GetLastContextIdFromStateInfo(stateInfo)
+ }
+ ac, err := state.GetAppContextFromId(currentCtxId)
+ if err != nil {
+ return StatusResult{}, pkgerrors.Wrap(err, "AppContext for status query not found")
+ }
+
+ // get the appcontext status value
+ h, err := ac.GetCompositeAppHandle()
+ if err != nil {
+ return StatusResult{}, pkgerrors.Wrap(err, "AppContext handle not found")
+ }
+ sh, err := ac.GetLevelHandle(h, "status")
+ if err != nil {
+ return StatusResult{}, pkgerrors.Wrap(err, "AppContext status handle not found")
+ }
+ statusVal, err := ac.GetValue(sh)
+ if err != nil {
+ return StatusResult{}, pkgerrors.Wrap(err, "AppContext status value not found")
+ }
+ acStatus := appcontext.AppContextStatus{}
+ js, err := json.Marshal(statusVal)
+ if err != nil {
+ return StatusResult{}, pkgerrors.Wrap(err, "Invalid AppContext status value format")
+ }
+ err = json.Unmarshal(js, &acStatus)
+ if err != nil {
+ return StatusResult{}, pkgerrors.Wrap(err, "Invalid AppContext status value format")
+ }
+
+ statusResult := StatusResult{}
+
+ statusResult.Apps = make([]AppStatus, 0)
+ statusResult.State = stateInfo
+ statusResult.Status = acStatus.Status
+
+ rsyncStatusCnts := make(map[string]int)
+ clusterStatusCnts := make(map[string]int)
+ // Loop through each app and get the status data for each cluster in the app
+ for _, app := range apps {
+ appCount := 0
+ if len(qApps) > 0 {
+ found := false
+ for _, a := range qApps {
+ if a == app {
+ found = true
+ break
+ }
+ }
+ if !found {
+ continue
+ }
+ }
+ // Get the clusters in the appcontext for this app
+ clusters, err := ac.GetClusterNames(app)
+ if err != nil {
+ continue
+ }
+ var appStatus AppStatus
+ appStatus.Name = app
+ appStatus.Clusters = make([]ClusterStatus, 0)
+
+ for _, cluster := range clusters {
+ clusterCount := 0
+ if len(qClusters) > 0 {
+ found := false
+ for _, c := range qClusters {
+ if c == cluster {
+ found = true
+ break
+ }
+ }
+ if !found {
+ continue
+ }
+ }
+
+ var clusterStatus ClusterStatus
+ pc := strings.Split(cluster, "+")
+ clusterStatus.ClusterProvider = pc[0]
+ clusterStatus.Cluster = pc[1]
+
+ if qType == "cluster" {
+ csh, err := ac.GetClusterStatusHandle(app, cluster)
+ if err != nil {
+ log.Info(":: No cluster status handle for cluster, app ::",
+ log.Fields{"Cluster": cluster, "AppName": app, "Error": err})
+ continue
+ }
+ clusterRbValue, err := ac.GetValue(csh)
+ if err != nil {
+ log.Info(":: No cluster status value for cluster, app ::",
+ log.Fields{"Cluster": cluster, "AppName": app, "Error": err})
+ continue
+ }
+ var rbValue rb.ResourceBundleStatus
+ err = json.Unmarshal([]byte(clusterRbValue.(string)), &rbValue)
+ if err != nil {
+ log.Info(":: Error unmarshaling cluster status value for cluster, app ::",
+ log.Fields{"Cluster": cluster, "AppName": app, "Error": err})
+ continue
+ }
+
+ clusterStatus.Resources = make([]ResourceStatus, 0)
+ cnt, err := GetClusterResources(rbValue, qOutput, qResources, &clusterStatus.Resources, clusterStatusCnts)
+ if err != nil {
+ log.Info(":: Error gathering cluster resources for cluster, app ::",
+ log.Fields{"Cluster": cluster, "AppName": app, "Error": err})
+ continue
+ }
+ appCount += cnt
+ clusterCount += cnt
+ } else if qType == "rsync" {
+ ch, err := ac.GetClusterHandle(app, cluster)
+ if err != nil {
+ log.Info(":: No handle for cluster, app ::",
+ log.Fields{"Cluster": cluster, "AppName": app, "Error": err})
+ continue
+ }
+
+ /* code to get status for resources from AppContext */
+ clusterStatus.Resources = make([]ResourceStatus, 0)
+ cnt, err := GetAppContextResources(ac, ch, qOutput, qResources, &clusterStatus.Resources, rsyncStatusCnts)
+ if err != nil {
+ log.Info(":: Error gathering appcontext resources for cluster, app ::",
+ log.Fields{"Cluster": cluster, "AppName": app, "Error": err})
+ continue
+ }
+ appCount += cnt
+ clusterCount += cnt
+ } else {
+ log.Info(":: Invalid status type ::", log.Fields{"Status Type": qType})
+ continue
+ }
+
+ if clusterCount > 0 {
+ appStatus.Clusters = append(appStatus.Clusters, clusterStatus)
+ }
+ }
+ if appCount > 0 && qOutput != "summary" {
+ statusResult.Apps = append(statusResult.Apps, appStatus)
+ }
+ }
+ statusResult.RsyncStatus = rsyncStatusCnts
+ statusResult.ClusterStatus = clusterStatusCnts
+
+ return statusResult, nil
+}
diff --git a/src/orchestrator/pkg/status/types.go b/src/orchestrator/pkg/status/types.go
new file mode 100644
index 00000000..91a4bc12
--- /dev/null
+++ b/src/orchestrator/pkg/status/types.go
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2020 Intel Corporation, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package status
+
+import (
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/state"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// StatusQueryParam defines the type of the query parameter
+type StatusQueryParam = string
+type queryparams struct {
+ Instance StatusQueryParam // identify which AppContext to use - default is latest
+ Summary StatusQueryParam // only show high level summary
+ All StatusQueryParam // include basic resource information
+ Detail StatusQueryParam // show resource details
+ Rsync StatusQueryParam // select rsync (appcontext) data as source for query
+ App StatusQueryParam // filter results by specified app(s)
+ Cluster StatusQueryParam // filter results by specified cluster(s)
+ Resource StatusQueryParam // filter results by specified resource(s)
+}
+
+// StatusQueryEnum defines the set of valid query parameter strings
+var StatusQueryEnum = &queryparams{
+ Instance: "instance",
+ Summary: "summary",
+ All: "all",
+ Detail: "detail",
+ Rsync: "rsync",
+ App: "app",
+ Cluster: "cluster",
+ Resource: "resource",
+}
+
+type StatusResult struct {
+ Name string `json:"name,omitempty,inline"`
+ State state.StateInfo `json:"states,omitempty,inline"`
+ Status appcontext.StatusValue `json:"status,omitempty,inline"`
+ RsyncStatus map[string]int `json:"rsync-status,omitempty,inline"`
+ ClusterStatus map[string]int `json:"cluster-status,omitempty,inline"`
+ Apps []AppStatus `json:"apps,omitempty,inline"`
+}
+
+type AppStatus struct {
+ Name string `json:"name,omitempty"`
+ Clusters []ClusterStatus `json:"clusters,omitempty"`
+}
+
+type ClusterStatus struct {
+ ClusterProvider string `json:"cluster-provider,omitempty"`
+ Cluster string `json:"cluster,omitempty"`
+ Resources []ResourceStatus `json:"resources,omitempty"`
+}
+
+type ResourceStatus struct {
+ Gvk schema.GroupVersionKind `json:"GVK,omitempty"`
+ Name string `json:"name,omitempty"`
+ Detail interface{} `json:"detail,omitempty"`
+ RsyncStatus string `json:"rsync-status,omitempty"`
+ ClusterStatus string `json:"cluster-status,omitempty"`
+}