diff options
author | Eric Multanen <eric.w.multanen@intel.com> | 2020-08-12 15:33:12 -0700 |
---|---|---|
committer | Eric Multanen <eric.w.multanen@intel.com> | 2020-08-31 15:50:06 -0700 |
commit | 645c6a331cd00043fcf9f567f5f261a9db070918 (patch) | |
tree | 3298b48aca4d93d22680960bd2a27290799b5732 /src/orchestrator | |
parent | ce99856834a225f6f68b6eda725ae7122a2f8185 (diff) |
Enhance the status query API
This patch enhances the status query API.
- The ResourceBundleState CRD is modified to just use the
k8s Pod structure instead of a customized struct.
- Status queries can either present results showing
the rsync status of the composite app and resources
or from information received from the cluster via
the ResourceBundleState CR
- Query parameters are provided to the API call to
customize the query and response
- Support for querying status of cluster network
intents is added
Issue-ID: MULTICLOUD-1042
Signed-off-by: Eric Multanen <eric.w.multanen@intel.com>
Change-Id: Icca4cdd901e2f2b446414fade256fc24d87594cd
Diffstat (limited to 'src/orchestrator')
-rw-r--r-- | src/orchestrator/api/api.go | 2 | ||||
-rw-r--r-- | src/orchestrator/api/instantiation_handler.go | 95 | ||||
-rw-r--r-- | src/orchestrator/go.mod | 7 | ||||
-rw-r--r-- | src/orchestrator/pkg/module/instantiation.go | 87 | ||||
-rw-r--r-- | src/orchestrator/pkg/state/types.go | 8 | ||||
-rw-r--r-- | src/orchestrator/pkg/status/status_helper.go | 482 | ||||
-rw-r--r-- | src/orchestrator/pkg/status/types.go | 76 |
7 files changed, 688 insertions, 69 deletions
diff --git a/src/orchestrator/api/api.go b/src/orchestrator/api/api.go index 72b444b7..de69d163 100644 --- a/src/orchestrator/api/api.go +++ b/src/orchestrator/api/api.go @@ -194,6 +194,8 @@ func NewRouter(projectClient moduleLib.ProjectManager, router.HandleFunc("/projects/{project-name}/composite-apps/{composite-app-name}/{composite-app-version}/deployment-intent-groups/{deployment-intent-group-name}/instantiate", instantiationHandler.instantiateHandler).Methods("POST") router.HandleFunc("/projects/{project-name}/composite-apps/{composite-app-name}/{composite-app-version}/deployment-intent-groups/{deployment-intent-group-name}/terminate", instantiationHandler.terminateHandler).Methods("POST") router.HandleFunc("/projects/{project-name}/composite-apps/{composite-app-name}/{composite-app-version}/deployment-intent-groups/{deployment-intent-group-name}/status", instantiationHandler.statusHandler).Methods("GET") + router.HandleFunc("/projects/{project-name}/composite-apps/{composite-app-name}/{composite-app-version}/deployment-intent-groups/{deployment-intent-group-name}/status", + instantiationHandler.statusHandler).Queries("instance", "{instance}", "type", "{type}", "output", "{output}", "app", "{app}", "cluster", "{cluster}", "resource", "{resource}") return router } diff --git a/src/orchestrator/api/instantiation_handler.go b/src/orchestrator/api/instantiation_handler.go index eeac8a00..f9f86954 100644 --- a/src/orchestrator/api/instantiation_handler.go +++ b/src/orchestrator/api/instantiation_handler.go @@ -19,8 +19,11 @@ package api import ( "encoding/json" "net/http" + "net/url" + "strings" "github.com/gorilla/mux" + "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/validation" moduleLib "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module" ) @@ -90,7 +93,95 @@ func (h instantiationHandler) statusHandler(w http.ResponseWriter, r *http.Reque v := vars["composite-app-version"] di := vars["deployment-intent-group-name"] - status, iErr := h.client.Status(p, ca, v, di) + qParams, err := url.ParseQuery(r.URL.RawQuery) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + var queryInstance string + if o, found := qParams["instance"]; found { + queryInstance = o[0] + if queryInstance == "" { + http.Error(w, "Invalid query instance", http.StatusBadRequest) + return + } + } else { + queryInstance = "" // default instance value + } + + var queryType string + if t, found := qParams["type"]; found { + queryType = t[0] + if queryType != "cluster" && queryType != "rsync" { + http.Error(w, "Invalid query type", http.StatusBadRequest) + return + } + } else { + queryType = "rsync" // default type + } + + var queryOutput string + if o, found := qParams["output"]; found { + queryOutput = o[0] + if queryOutput != "summary" && queryOutput != "all" && queryOutput != "detail" { + http.Error(w, "Invalid query output", http.StatusBadRequest) + return + } + } else { + queryOutput = "all" // default output format + } + + var queryApps []string + if a, found := qParams["app"]; found { + queryApps = a + for _, app := range queryApps { + errs := validation.IsValidName(app) + if len(errs) > 0 { + http.Error(w, "Invalid app query", http.StatusBadRequest) + return + } + } + } else { + queryApps = make([]string, 0) + } + + var queryClusters []string + if c, found := qParams["cluster"]; found { + queryClusters = c + for _, cl := range queryClusters { + parts := strings.Split(cl, "+") + if len(parts) != 2 { + http.Error(w, "Invalid cluster query", http.StatusBadRequest) + return + } + for _, p := range parts { + errs := validation.IsValidName(p) + if len(errs) > 0 { + http.Error(w, "Invalid cluster query", http.StatusBadRequest) + return + } + } + } + } else { + queryClusters = make([]string, 0) + } + + var queryResources []string + if r, found := qParams["resource"]; found { + queryResources = r + for _, res := range queryResources { + errs := validation.IsValidName(res) + if len(errs) > 0 { + http.Error(w, "Invalid resources query", http.StatusBadRequest) + return + } + } + } else { + queryResources = make([]string, 0) + } + + status, iErr := h.client.Status(p, ca, v, di, queryInstance, queryType, queryOutput, queryApps, queryClusters, queryResources) if iErr != nil { http.Error(w, iErr.Error(), http.StatusInternalServerError) return @@ -103,6 +194,4 @@ func (h instantiationHandler) statusHandler(w http.ResponseWriter, r *http.Reque http.Error(w, iErr.Error(), http.StatusInternalServerError) return } - w.WriteHeader(http.StatusAccepted) - } diff --git a/src/orchestrator/go.mod b/src/orchestrator/go.mod index f79d43dd..fc37f038 100644 --- a/src/orchestrator/go.mod +++ b/src/orchestrator/go.mod @@ -12,11 +12,7 @@ require ( github.com/golang/protobuf v1.4.1 github.com/gorilla/handlers v1.3.0 github.com/gorilla/mux v1.7.3 - github.com/huandu/xstrings v1.3.1 // indirect - github.com/jmoiron/sqlx v1.2.0 // indirect github.com/lib/pq v1.6.0 // indirect - github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mitchellh/copystructure v1.0.0 // indirect github.com/onap/multicloud-k8s/src/monitor v0.0.0-20200630152613-7c20f73e7c5d github.com/onap/multicloud-k8s/src/ncm v0.0.0-20200515060444-c77850a75eee github.com/onap/multicloud-k8s/src/rsync v0.0.0-20200630152613-7c20f73e7c5d @@ -36,9 +32,10 @@ require ( gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v3 v3.0.0-20200506231410-2ff61e1afc86 k8s.io/apimachinery v0.18.2 + k8s.io/client-go v12.0.0+incompatible k8s.io/cloud-provider v0.0.0-20190409023720-1bc0c81fa51d // indirect k8s.io/helm v2.14.3+incompatible - sigs.k8s.io/kustomize v2.0.3+incompatible // indirect + k8s.io/kubernetes v1.14.1 ) replace ( diff --git a/src/orchestrator/pkg/module/instantiation.go b/src/orchestrator/pkg/module/instantiation.go index 08250d16..de723242 100644 --- a/src/orchestrator/pkg/module/instantiation.go +++ b/src/orchestrator/pkg/module/instantiation.go @@ -22,11 +22,11 @@ import ( "fmt" "time" - rb "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1" gpic "github.com/onap/multicloud-k8s/src/orchestrator/pkg/gpic" "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db" log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils" "github.com/onap/multicloud-k8s/src/orchestrator/pkg/state" + "github.com/onap/multicloud-k8s/src/orchestrator/pkg/status" "github.com/onap/multicloud-k8s/src/orchestrator/utils/helm" pkgerrors "github.com/pkg/errors" ) @@ -45,14 +45,12 @@ type InstantiationClient struct { db InstantiationClientDbInfo } -type ClusterAppStatus struct { - Cluster string - App string - Status rb.ResourceBundleStatus -} - -type StatusData struct { - Data []ClusterAppStatus +type DeploymentStatus struct { + Project string `json:"project,omitempty"` + CompositeAppName string `json:"composite-app-name,omitempty"` + CompositeAppVersion string `json:"composite-app-version,omitempty"` + CompositeProfileName string `json:"composite-profile-name,omitempty"` + status.StatusResult `json:",inline"` } /* @@ -75,7 +73,7 @@ type InstantiationKey struct { type InstantiationManager interface { Approve(p string, ca string, v string, di string) error Instantiate(p string, ca string, v string, di string) error - Status(p string, ca string, v string, di string) (StatusData, error) + Status(p, ca, v, di, qInstance, qType, qOutput string, qApps, qClusters, qResources []string) (DeploymentStatus, error) Terminate(p string, ca string, v string, di string) error } @@ -429,64 +427,39 @@ Status takes in projectName, compositeAppName, compositeAppVersion, DeploymentIntentName. This method is responsible obtaining the status of the deployment, which is made available in the appcontext. */ -func (c InstantiationClient) Status(p string, ca string, v string, di string) (StatusData, error) { +func (c InstantiationClient) Status(p, ca, v, di, qInstance, qType, qOutput string, qApps, qClusters, qResources []string) (DeploymentStatus, error) { - s, err := NewDeploymentIntentGroupClient().GetDeploymentIntentGroupState(di, p, ca, v) + dIGrp, err := NewDeploymentIntentGroupClient().GetDeploymentIntentGroup(di, p, ca, v) if err != nil { - return StatusData{}, pkgerrors.Wrap(err, "deploymentIntentGroup not found: "+di) + return DeploymentStatus{}, pkgerrors.Wrap(err, "Not finding the deploymentIntentGroup") } - currentCtxId := state.GetLastContextIdFromStateInfo(s) - ac, err := state.GetAppContextFromId(currentCtxId) + diState, err := NewDeploymentIntentGroupClient().GetDeploymentIntentGroupState(di, p, ca, v) if err != nil { - return StatusData{}, pkgerrors.Wrap(err, "AppContext for deploymentIntentGroup not found: "+di) + return DeploymentStatus{}, pkgerrors.Wrap(err, "deploymentIntentGroup state not found: "+di) } // Get all apps in this composite app - allApps, err := NewAppClient().GetApps(p, ca, v) + apps, err := NewAppClient().GetApps(p, ca, v) if err != nil { - return StatusData{}, pkgerrors.Wrap(err, "Not finding the apps") + return DeploymentStatus{}, pkgerrors.Wrap(err, "Not finding the apps") + } + allApps := make([]string, 0) + for _, a := range apps { + allApps = append(allApps, a.Metadata.Name) } - var diStatus StatusData - diStatus.Data = make([]ClusterAppStatus, 0) - - // Loop through each app and get the status data for each cluster in the app - for _, app := range allApps { - // Get the clusters in the appcontext for this app - clusters, err := ac.GetClusterNames(app.Metadata.Name) - if err != nil { - log.Info(":: No clusters for app ::", log.Fields{"AppName": app.Metadata.Name}) - continue - } - - for _, cluster := range clusters { - handle, err := ac.GetClusterStatusHandle(app.Metadata.Name, cluster) - if err != nil { - log.Info(":: No status handle for cluster, app ::", - log.Fields{"Cluster": cluster, "AppName": app.Metadata.Name, "Error": err}) - continue - } - statusValue, err := ac.GetValue(handle) - if err != nil { - log.Info(":: No status value for cluster, app ::", - log.Fields{"Cluster": cluster, "AppName": app.Metadata.Name, "Error": err}) - continue - } - log.Info(":: STATUS VALUE ::", log.Fields{"statusValue": statusValue}) - var statusData ClusterAppStatus - err = json.Unmarshal([]byte(statusValue.(string)), &statusData.Status) - if err != nil { - log.Info(":: Error unmarshaling status value for cluster, app ::", - log.Fields{"Cluster": cluster, "AppName": app.Metadata.Name, "Error": err}) - continue - } - statusData.Cluster = cluster - statusData.App = app.Metadata.Name - log.Info(":: STATUS DATA ::", log.Fields{"status": statusData}) - - diStatus.Data = append(diStatus.Data, statusData) - } + statusResponse, err := status.PrepareStatusResult(diState, allApps, qInstance, qType, qOutput, qApps, qClusters, qResources) + if err != nil { + return DeploymentStatus{}, err + } + statusResponse.Name = di + diStatus := DeploymentStatus{ + Project: p, + CompositeAppName: ca, + CompositeAppVersion: v, + CompositeProfileName: dIGrp.Spec.Profile, + StatusResult: statusResponse, } return diStatus, nil diff --git a/src/orchestrator/pkg/state/types.go b/src/orchestrator/pkg/state/types.go index 665a1be4..99f0adca 100644 --- a/src/orchestrator/pkg/state/types.go +++ b/src/orchestrator/pkg/state/types.go @@ -22,15 +22,15 @@ import "time" // information about resources which can be instantiated via rsync. // The last Actions entry holds the current state of the container object. type StateInfo struct { - Actions []ActionEntry + Actions []ActionEntry `json:"actions"` } // ActionEntry is used to keep track of the time an action (e.g. Created, Instantiate, Terminate) was invoked // For actions where an AppContext is relevent, the ContextId field will be non-zero length type ActionEntry struct { - State StateValue - ContextId string - TimeStamp time.Time + State StateValue `json:"state"` + ContextId string `json:"instance"` + TimeStamp time.Time `json:"time"` } type StateValue = string diff --git a/src/orchestrator/pkg/status/status_helper.go b/src/orchestrator/pkg/status/status_helper.go new file mode 100644 index 00000000..a791493e --- /dev/null +++ b/src/orchestrator/pkg/status/status_helper.go @@ -0,0 +1,482 @@ +/* + * Copyright 2020 Intel Corporation, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package status + +import ( + "encoding/json" + "fmt" + "strings" + + rb "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1" + "github.com/onap/multicloud-k8s/src/monitor/pkg/generated/clientset/versioned/scheme" + "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext" + log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils" + "github.com/onap/multicloud-k8s/src/orchestrator/pkg/resourcestatus" + "github.com/onap/multicloud-k8s/src/orchestrator/pkg/state" + pkgerrors "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// decodeYAML reads a YAMl []byte to extract the Kubernetes object definition +func decodeYAML(y []byte, into runtime.Object) (runtime.Object, error) { + decode := scheme.Codecs.UniversalDeserializer().Decode + obj, _, err := decode(y, nil, into) + if err != nil { + return nil, pkgerrors.Wrap(err, "Deserialize YAML error") + } + + return obj, nil +} + +func getUnstruct(y []byte) (unstructured.Unstructured, error) { + //Decode the yaml file to create a runtime.Object + unstruct := unstructured.Unstructured{} + //Ignore the returned obj as we expect the data in unstruct + _, err := decodeYAML(y, &unstruct) + if err != nil { + log.Info(":: Error decoding YAML ::", log.Fields{"object": y, "error": err}) + return unstructured.Unstructured{}, pkgerrors.Wrap(err, "Decode object error") + } + + return unstruct, nil +} + +// GetClusterResources takes in a ResourceBundleStatus CR and resturns a list of ResourceStatus elments +func GetClusterResources(rbData rb.ResourceBundleStatus, qOutput string, qResources []string, + resourceList *[]ResourceStatus, cnts map[string]int) (int, error) { + + count := 0 + + for _, p := range rbData.PodStatuses { + if !keepResource(p.Name, qResources) { + continue + } + r := ResourceStatus{} + r.Name = p.Name + r.Gvk = (&p.TypeMeta).GroupVersionKind() + if qOutput == "detail" { + r.Detail = p + } + *resourceList = append(*resourceList, r) + count++ + cnt := cnts["Present"] + cnts["Present"] = cnt + 1 + } + + for _, s := range rbData.ServiceStatuses { + if !keepResource(s.Name, qResources) { + continue + } + r := ResourceStatus{} + r.Name = s.Name + r.Gvk = (&s.TypeMeta).GroupVersionKind() + if qOutput == "detail" { + r.Detail = s + } + *resourceList = append(*resourceList, r) + count++ + cnt := cnts["Present"] + cnts["Present"] = cnt + 1 + } + + for _, d := range rbData.DeploymentStatuses { + if !keepResource(d.Name, qResources) { + continue + } + r := ResourceStatus{} + r.Name = d.Name + r.Gvk = (&d.TypeMeta).GroupVersionKind() + if qOutput == "detail" { + r.Detail = d + } + *resourceList = append(*resourceList, r) + count++ + cnt := cnts["Present"] + cnts["Present"] = cnt + 1 + } + + for _, c := range rbData.ConfigMapStatuses { + if !keepResource(c.Name, qResources) { + continue + } + r := ResourceStatus{} + r.Name = c.Name + r.Gvk = (&c.TypeMeta).GroupVersionKind() + if qOutput == "detail" { + r.Detail = c + } + *resourceList = append(*resourceList, r) + count++ + cnt := cnts["Present"] + cnts["Present"] = cnt + 1 + } + + for _, s := range rbData.SecretStatuses { + if !keepResource(s.Name, qResources) { + continue + } + r := ResourceStatus{} + r.Name = s.Name + r.Gvk = (&s.TypeMeta).GroupVersionKind() + if qOutput == "detail" { + r.Detail = s + } + *resourceList = append(*resourceList, r) + count++ + cnt := cnts["Present"] + cnts["Present"] = cnt + 1 + } + + for _, d := range rbData.DaemonSetStatuses { + if !keepResource(d.Name, qResources) { + continue + } + r := ResourceStatus{} + r.Name = d.Name + r.Gvk = (&d.TypeMeta).GroupVersionKind() + if qOutput == "detail" { + r.Detail = d + } + *resourceList = append(*resourceList, r) + count++ + cnt := cnts["Present"] + cnts["Present"] = cnt + 1 + } + + for _, i := range rbData.IngressStatuses { + if !keepResource(i.Name, qResources) { + continue + } + r := ResourceStatus{} + r.Name = i.Name + r.Gvk = (&i.TypeMeta).GroupVersionKind() + if qOutput == "detail" { + r.Detail = i + } + *resourceList = append(*resourceList, r) + count++ + cnt := cnts["Present"] + cnts["Present"] = cnt + 1 + } + + for _, j := range rbData.JobStatuses { + if !keepResource(j.Name, qResources) { + continue + } + r := ResourceStatus{} + r.Name = j.Name + r.Gvk = (&j.TypeMeta).GroupVersionKind() + if qOutput == "detail" { + r.Detail = j + } + *resourceList = append(*resourceList, r) + count++ + cnt := cnts["Present"] + cnts["Present"] = cnt + 1 + } + + for _, s := range rbData.StatefulSetStatuses { + if !keepResource(s.Name, qResources) { + continue + } + r := ResourceStatus{} + r.Name = s.Name + r.Gvk = (&s.TypeMeta).GroupVersionKind() + if qOutput == "detail" { + r.Detail = s + } + *resourceList = append(*resourceList, r) + count++ + cnt := cnts["Present"] + cnts["Present"] = cnt + 1 + } + + return count, nil +} + +// isResourceHandle takes a cluster handle and determines if the other handle parameter is a resource handle for this cluster +// handle. It does this by verifying that the cluster handle is a prefix of the handle and that the remainder of the handle +// is a value that matches to a resource format: "resource/<name>+<type>/" +// Example cluster handle: +// /context/6385596659306465421/app/network-intents/cluster/vfw-cluster-provider+edge01/ +// Example resource handle: +// /context/6385596659306465421/app/network-intents/cluster/vfw-cluster-provider+edge01/resource/emco-private-net+ProviderNetwork/ +func isResourceHandle(ch, h interface{}) bool { + clusterHandle := fmt.Sprintf("%v", ch) + handle := fmt.Sprintf("%v", h) + diff := strings.Split(handle, clusterHandle) + + if len(diff) != 2 && diff[0] != "" { + return false + } + + parts := strings.Split(diff[1], "/") + + if len(parts) == 3 && + parts[0] == "resource" && + len(strings.Split(parts[1], "+")) == 2 && + parts[2] == "" { + return true + } else { + return false + } +} + +// keepResource keeps a resource if the filter list is empty or if the resource is part of the list +func keepResource(r string, rList []string) bool { + if len(rList) == 0 { + return true + } + for _, res := range rList { + if r == res { + return true + } + } + return false +} + +// GetAppContextResources collects the resource status of all resources in an AppContext subject to the filter parameters +func GetAppContextResources(ac appcontext.AppContext, ch interface{}, qOutput string, qResources []string, resourceList *[]ResourceStatus, statusCnts map[string]int) (int, error) { + count := 0 + + // Get all Resources for the Cluster + hs, err := ac.GetAllHandles(ch) + if err != nil { + log.Info(":: Error getting all handles ::", log.Fields{"handles": ch, "error": err}) + return 0, err + } + + for _, h := range hs { + // skip any handles that are not resource handles + if !isResourceHandle(ch, h) { + continue + } + + // Get Resource from AppContext + res, err := ac.GetValue(h) + if err != nil { + log.Info(":: Error getting resource value ::", log.Fields{"Handle": h}) + continue + } + + // Get Resource Status from AppContext + sh, err := ac.GetLevelHandle(h, "status") + if err != nil { + log.Info(":: No status handle for resource ::", log.Fields{"Handle": h}) + continue + } + s, err := ac.GetValue(sh) + if err != nil { + log.Info(":: Error getting resource status value ::", log.Fields{"Handle": sh}) + continue + } + rstatus := resourcestatus.ResourceStatus{} + js, err := json.Marshal(s) + if err != nil { + log.Info(":: Non-JSON status data for resource ::", log.Fields{"Handle": sh, "Value": s}) + continue + } + err = json.Unmarshal(js, &rstatus) + if err != nil { + log.Info(":: Invalid status data for resource ::", log.Fields{"Handle": sh, "Value": s}) + continue + } + + // Get the unstructured object + unstruct, err := getUnstruct([]byte(res.(string))) + if err != nil { + log.Info(":: Error getting GVK ::", log.Fields{"Resource": res, "error": err}) + continue + } + if !keepResource(unstruct.GetName(), qResources) { + continue + } + + // Make and fill out a ResourceStatus structure + r := ResourceStatus{} + r.Gvk = unstruct.GroupVersionKind() + r.Name = unstruct.GetName() + if qOutput == "detail" { + r.Detail = unstruct.Object + } + r.RsyncStatus = fmt.Sprintf("%v", rstatus.Status) + *resourceList = append(*resourceList, r) + cnt := statusCnts[rstatus.Status] + statusCnts[rstatus.Status] = cnt + 1 + count++ + } + + return count, nil +} + +// PrepareStatusResult takes in a resource stateInfo object, the list of apps and the query parameters. +// It then fills out the StatusResult structure appropriately from information in the AppContext +func PrepareStatusResult(stateInfo state.StateInfo, apps []string, qInstance, qType, qOutput string, qApps, qClusters, qResources []string) (StatusResult, error) { + + var currentCtxId string + if qInstance != "" { + currentCtxId = qInstance + } else { + currentCtxId = state.GetLastContextIdFromStateInfo(stateInfo) + } + ac, err := state.GetAppContextFromId(currentCtxId) + if err != nil { + return StatusResult{}, pkgerrors.Wrap(err, "AppContext for status query not found") + } + + // get the appcontext status value + h, err := ac.GetCompositeAppHandle() + if err != nil { + return StatusResult{}, pkgerrors.Wrap(err, "AppContext handle not found") + } + sh, err := ac.GetLevelHandle(h, "status") + if err != nil { + return StatusResult{}, pkgerrors.Wrap(err, "AppContext status handle not found") + } + statusVal, err := ac.GetValue(sh) + if err != nil { + return StatusResult{}, pkgerrors.Wrap(err, "AppContext status value not found") + } + acStatus := appcontext.AppContextStatus{} + js, err := json.Marshal(statusVal) + if err != nil { + return StatusResult{}, pkgerrors.Wrap(err, "Invalid AppContext status value format") + } + err = json.Unmarshal(js, &acStatus) + if err != nil { + return StatusResult{}, pkgerrors.Wrap(err, "Invalid AppContext status value format") + } + + statusResult := StatusResult{} + + statusResult.Apps = make([]AppStatus, 0) + statusResult.State = stateInfo + statusResult.Status = acStatus.Status + + rsyncStatusCnts := make(map[string]int) + clusterStatusCnts := make(map[string]int) + // Loop through each app and get the status data for each cluster in the app + for _, app := range apps { + appCount := 0 + if len(qApps) > 0 { + found := false + for _, a := range qApps { + if a == app { + found = true + break + } + } + if !found { + continue + } + } + // Get the clusters in the appcontext for this app + clusters, err := ac.GetClusterNames(app) + if err != nil { + continue + } + var appStatus AppStatus + appStatus.Name = app + appStatus.Clusters = make([]ClusterStatus, 0) + + for _, cluster := range clusters { + clusterCount := 0 + if len(qClusters) > 0 { + found := false + for _, c := range qClusters { + if c == cluster { + found = true + break + } + } + if !found { + continue + } + } + + var clusterStatus ClusterStatus + pc := strings.Split(cluster, "+") + clusterStatus.ClusterProvider = pc[0] + clusterStatus.Cluster = pc[1] + + if qType == "cluster" { + csh, err := ac.GetClusterStatusHandle(app, cluster) + if err != nil { + log.Info(":: No cluster status handle for cluster, app ::", + log.Fields{"Cluster": cluster, "AppName": app, "Error": err}) + continue + } + clusterRbValue, err := ac.GetValue(csh) + if err != nil { + log.Info(":: No cluster status value for cluster, app ::", + log.Fields{"Cluster": cluster, "AppName": app, "Error": err}) + continue + } + var rbValue rb.ResourceBundleStatus + err = json.Unmarshal([]byte(clusterRbValue.(string)), &rbValue) + if err != nil { + log.Info(":: Error unmarshaling cluster status value for cluster, app ::", + log.Fields{"Cluster": cluster, "AppName": app, "Error": err}) + continue + } + + clusterStatus.Resources = make([]ResourceStatus, 0) + cnt, err := GetClusterResources(rbValue, qOutput, qResources, &clusterStatus.Resources, clusterStatusCnts) + if err != nil { + log.Info(":: Error gathering cluster resources for cluster, app ::", + log.Fields{"Cluster": cluster, "AppName": app, "Error": err}) + continue + } + appCount += cnt + clusterCount += cnt + } else if qType == "rsync" { + ch, err := ac.GetClusterHandle(app, cluster) + if err != nil { + log.Info(":: No handle for cluster, app ::", + log.Fields{"Cluster": cluster, "AppName": app, "Error": err}) + continue + } + + /* code to get status for resources from AppContext */ + clusterStatus.Resources = make([]ResourceStatus, 0) + cnt, err := GetAppContextResources(ac, ch, qOutput, qResources, &clusterStatus.Resources, rsyncStatusCnts) + if err != nil { + log.Info(":: Error gathering appcontext resources for cluster, app ::", + log.Fields{"Cluster": cluster, "AppName": app, "Error": err}) + continue + } + appCount += cnt + clusterCount += cnt + } else { + log.Info(":: Invalid status type ::", log.Fields{"Status Type": qType}) + continue + } + + if clusterCount > 0 { + appStatus.Clusters = append(appStatus.Clusters, clusterStatus) + } + } + if appCount > 0 && qOutput != "summary" { + statusResult.Apps = append(statusResult.Apps, appStatus) + } + } + statusResult.RsyncStatus = rsyncStatusCnts + statusResult.ClusterStatus = clusterStatusCnts + + return statusResult, nil +} diff --git a/src/orchestrator/pkg/status/types.go b/src/orchestrator/pkg/status/types.go new file mode 100644 index 00000000..91a4bc12 --- /dev/null +++ b/src/orchestrator/pkg/status/types.go @@ -0,0 +1,76 @@ +/* + * Copyright 2020 Intel Corporation, Inc + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package status + +import ( + "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext" + "github.com/onap/multicloud-k8s/src/orchestrator/pkg/state" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// StatusQueryParam defines the type of the query parameter +type StatusQueryParam = string +type queryparams struct { + Instance StatusQueryParam // identify which AppContext to use - default is latest + Summary StatusQueryParam // only show high level summary + All StatusQueryParam // include basic resource information + Detail StatusQueryParam // show resource details + Rsync StatusQueryParam // select rsync (appcontext) data as source for query + App StatusQueryParam // filter results by specified app(s) + Cluster StatusQueryParam // filter results by specified cluster(s) + Resource StatusQueryParam // filter results by specified resource(s) +} + +// StatusQueryEnum defines the set of valid query parameter strings +var StatusQueryEnum = &queryparams{ + Instance: "instance", + Summary: "summary", + All: "all", + Detail: "detail", + Rsync: "rsync", + App: "app", + Cluster: "cluster", + Resource: "resource", +} + +type StatusResult struct { + Name string `json:"name,omitempty,inline"` + State state.StateInfo `json:"states,omitempty,inline"` + Status appcontext.StatusValue `json:"status,omitempty,inline"` + RsyncStatus map[string]int `json:"rsync-status,omitempty,inline"` + ClusterStatus map[string]int `json:"cluster-status,omitempty,inline"` + Apps []AppStatus `json:"apps,omitempty,inline"` +} + +type AppStatus struct { + Name string `json:"name,omitempty"` + Clusters []ClusterStatus `json:"clusters,omitempty"` +} + +type ClusterStatus struct { + ClusterProvider string `json:"cluster-provider,omitempty"` + Cluster string `json:"cluster,omitempty"` + Resources []ResourceStatus `json:"resources,omitempty"` +} + +type ResourceStatus struct { + Gvk schema.GroupVersionKind `json:"GVK,omitempty"` + Name string `json:"name,omitempty"` + Detail interface{} `json:"detail,omitempty"` + RsyncStatus string `json:"rsync-status,omitempty"` + ClusterStatus string `json:"cluster-status,omitempty"` +} |