diff options
author | Eric Multanen <eric.w.multanen@intel.com> | 2020-07-16 13:12:03 -0700 |
---|---|---|
committer | Eric Multanen <eric.w.multanen@intel.com> | 2020-07-16 16:07:31 -0700 |
commit | 97c6ca4bfdfa4f5e367649ebf6a4e2c583e2cb78 (patch) | |
tree | 9adeb20034b02282155ada7bca735888d8f082a1 | |
parent | ad17b4360890fc2915795515ac265fc66720f4ad (diff) |
Move status tracking CR to rsync
Handle creation and deletion of the ResourceBundleState
Custom Resource per app/cluster in the rsync microservice
instead of adding it to the resources of the composite
application in the orchestrator service.
Issue-ID: MULTICLOUD-1125
Signed-off-by: Eric Multanen <eric.w.multanen@intel.com>
Change-Id: Id0c0639ff62e788628e78f972107a7603c27abfb
-rw-r--r-- | kud/tests/vfw-test-setenv.sh | 3 | ||||
-rwxr-xr-x | src/monitor/deploy/monitor-deploy.sh | 1 | ||||
-rw-r--r-- | src/orchestrator/pkg/module/instantiation.go | 6 | ||||
-rw-r--r-- | src/orchestrator/pkg/module/instantiation_appcontext_helper.go | 45 | ||||
-rw-r--r-- | src/rsync/pkg/context/context.go | 73 | ||||
-rw-r--r-- | src/rsync/pkg/status/status.go | 49 |
6 files changed, 110 insertions, 67 deletions
diff --git a/kud/tests/vfw-test-setenv.sh b/kud/tests/vfw-test-setenv.sh index 77031f9f..4d2a0078 100644 --- a/kud/tests/vfw-test-setenv.sh +++ b/kud/tests/vfw-test-setenv.sh @@ -1,7 +1,8 @@ export packetgen_helm_path=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/packetgen.tar.gz export firewall_helm_path=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/firewall.tar.gz export sink_helm_path=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/sink.tar.gz -export kubeconfigfile=/home/vagrant/multicloud-k8s/cluster-configs/config-edge02 +export kubeconfigfile=/home/vagrant/multicloud-k8s/cluster-configs/config-edge01 +export kubeconfigfile2=/home/vagrant/multicloud-k8s/cluster-configs/config-edge02 export packetgen_profile_targz=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz export firewall_profile_targz=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz export sink_profile_targz=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz diff --git a/src/monitor/deploy/monitor-deploy.sh b/src/monitor/deploy/monitor-deploy.sh index 47c7120f..6134e4da 100755 --- a/src/monitor/deploy/monitor-deploy.sh +++ b/src/monitor/deploy/monitor-deploy.sh @@ -1,3 +1,4 @@ +kubectl apply -f crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml kubectl apply -f role.yaml kubectl apply -f cluster_role.yaml kubectl apply -f role_binding.yaml diff --git a/src/orchestrator/pkg/module/instantiation.go b/src/orchestrator/pkg/module/instantiation.go index 1f2e1117..f4e75861 100644 --- a/src/orchestrator/pkg/module/instantiation.go +++ b/src/orchestrator/pkg/module/instantiation.go @@ -246,12 +246,6 @@ func (c InstantiationClient) Instantiate(p string, ca string, v string, di strin return pkgerrors.Wrapf(err, "Unable to get the resources for app :: %s", eachApp.Metadata.Name) } - statusResource, err := getStatusResource(ctxval.(string), eachApp.Metadata.Name) - if err != nil { - return pkgerrors.Wrapf(err, "Unable to generate the status resource for app :: %s", eachApp.Metadata.Name) - } - resources = append(resources, statusResource) - specData, err := NewAppIntentClient().GetAllIntentsByApp(eachApp.Metadata.Name, p, ca, v, gIntent) if err != nil { return pkgerrors.Wrap(err, "Unable to get the intents for app") diff --git a/src/orchestrator/pkg/module/instantiation_appcontext_helper.go b/src/orchestrator/pkg/module/instantiation_appcontext_helper.go index 1cb3f23d..9ace81b6 100644 --- a/src/orchestrator/pkg/module/instantiation_appcontext_helper.go +++ b/src/orchestrator/pkg/module/instantiation_appcontext_helper.go @@ -25,16 +25,12 @@ import ( "encoding/json" "io/ioutil" - jyaml "github.com/ghodss/yaml" - - rb "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1" "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext" gpic "github.com/onap/multicloud-k8s/src/orchestrator/pkg/gpic" log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils" "github.com/onap/multicloud-k8s/src/orchestrator/utils" "github.com/onap/multicloud-k8s/src/orchestrator/utils/helm" pkgerrors "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // resource consists of name of reource @@ -89,7 +85,7 @@ func getResources(st []helm.KubernetesResourceTemplate) ([]resource, error) { // This might happen when the rendered file just has some comments inside, no real k8s object. if n == SEPARATOR { log.Info(":: Ignoring, Unable to render the template ::", log.Fields{"YAML PATH": t.FilePath}) - continue; + continue } resources = append(resources, resource{name: n, filecontent: string(yamlFile)}) @@ -99,45 +95,6 @@ func getResources(st []helm.KubernetesResourceTemplate) ([]resource, error) { return resources, nil } -// addStatusResource adds a status monitoring resource to the app -// which consists of name(name+kind) and content -func getStatusResource(id, app string) (resource, error) { - - var statusCr rb.ResourceBundleState - - label := id + "-" + app - name := app + "-" + id - - statusCr.TypeMeta.APIVersion = "k8splugin.io/v1alpha1" - statusCr.TypeMeta.Kind = "ResourceBundleState" - statusCr.SetName(name) - - labels := make(map[string]string) - labels["emco/deployment-id"] = label - statusCr.SetLabels(labels) - - labelSelector, err := metav1.ParseToLabelSelector("emco/deployment-id = " + label) - if err != nil { - log.Info(":: ERROR Parsing Label Selector ::", log.Fields{"Error": err}) - } else { - statusCr.Spec.Selector = labelSelector - } - - // Marshaling to json then convert to yaml works better than marshaling to yaml - // The 'apiVersion' attribute was marshaling to 'apiversion' - // y, _ := yaml.Marshal(&statusCr) - j, _ := json.Marshal(&statusCr) - y, _ := jyaml.JSONToYAML(j) - log.Info(":: RESULTING STATUS CR ::", log.Fields{"StatusCR": y}) - - statusResource := resource{ - name: name + "+" + "ResourceBundleState", - filecontent: string(y), - } - - return statusResource, nil -} - func addResourcesToCluster(ct appcontext.AppContext, ch interface{}, resources []resource) error { var resOrderInstr struct { diff --git a/src/rsync/pkg/context/context.go b/src/rsync/pkg/context/context.go index 3ce6ee9b..530802ab 100644 --- a/src/rsync/pkg/context/context.go +++ b/src/rsync/pkg/context/context.go @@ -129,9 +129,63 @@ func instantiateResource(ac appcontext.AppContext, c *kubeclient.Client, name st return nil } +func addStatusTracker(c *kubeclient.Client, app string, cluster string, label string) error { + + b, err := status.GetStatusCR(label) + if err != nil { + logutils.Error("Failed to get status CR for installing", logutils.Fields{ + "error": err, + "label": label, + }) + return err + } + if err = c.Apply(b); err != nil { + logutils.Error("Failed to apply status tracker", logutils.Fields{ + "error": err, + "cluster": cluster, + "app": app, + "label": label, + }) + return err + } + logutils.Info("Status tracker installed::", logutils.Fields{ + "cluster": cluster, + "app": app, + "label": label, + }) + return nil +} + +func deleteStatusTracker(c *kubeclient.Client, app string, cluster string, label string) error { + b, err := status.GetStatusCR(label) + if err != nil { + logutils.Error("Failed to get status CR for deleting", logutils.Fields{ + "error": err, + "label": label, + }) + return err + } + if err = c.Delete(b); err != nil { + logutils.Error("Failed to delete res", logutils.Fields{ + "error": err, + "app": app, + "label": label, + }) + return err + } + logutils.Info("Status tracker deleted::", logutils.Fields{ + "cluster": cluster, + "app": app, + "label": label, + }) + return nil +} + type fn func(ac appcontext.AppContext, client *kubeclient.Client, res string, app string, cluster string, label string) error -func applyFnComApp(cid interface{}, con *connector.Connector, f fn, breakonError bool) error { +type statusfn func(client *kubeclient.Client, app string, cluster string, label string) error + +func applyFnComApp(cid interface{}, con *connector.Connector, f fn, sfn statusfn, breakonError bool) error { ac := appcontext.AppContext{} g, _ := errgroup.WithContext(context.Background()) _, err := ac.LoadAppContext(cid) @@ -184,7 +238,7 @@ func applyFnComApp(cid interface{}, con *connector.Connector, f fn, breakonError } var aov map[string][]string json.Unmarshal([]byte(resorder.(string)), &aov) - for _, res := range aov["resorder"] { + for i, res := range aov["resorder"] { err = f(ac, c, res, appName, cluster, label) if err != nil { logutils.Error("Error in resource %s: %v", logutils.Fields{ @@ -193,10 +247,21 @@ func applyFnComApp(cid interface{}, con *connector.Connector, f fn, breakonError "resource": res, }) if breakonError { + // handle status tracking before exiting if at least one resource got handled + if i > 0 { + serr := sfn(c, appName, cluster, label) + if serr != nil { + logutils.Warn("Error handling status tracker", logutils.Fields{"error": serr}) + } + } return err } } } + serr := sfn(c, appName, cluster, label) + if serr != nil { + logutils.Warn("Error handling status tracker", logutils.Fields{"error": serr}) + } return nil }) } @@ -221,7 +286,7 @@ func applyFnComApp(cid interface{}, con *connector.Connector, f fn, breakonError // InstantiateComApp Instantiate Apps in Composite App func (instca *CompositeAppContext) InstantiateComApp(cid interface{}) error { con := connector.Init(cid) - err := applyFnComApp(cid, con, instantiateResource, true) + err := applyFnComApp(cid, con, instantiateResource, addStatusTracker, true) if err != nil { logutils.Error("InstantiateComApp unsuccessful", logutils.Fields{"error": err}) return err @@ -234,7 +299,7 @@ func (instca *CompositeAppContext) InstantiateComApp(cid interface{}) error { // TerminateComApp Terminates Apps in Composite App func (instca *CompositeAppContext) TerminateComApp(cid interface{}) error { con := connector.Init(cid) - err := applyFnComApp(cid, con, terminateResource, false) + err := applyFnComApp(cid, con, terminateResource, deleteStatusTracker, false) if err != nil { logutils.Error("TerminateComApp unsuccessful", logutils.Fields{ "error": err, diff --git a/src/rsync/pkg/status/status.go b/src/rsync/pkg/status/status.go index 351da027..8c1e12be 100644 --- a/src/rsync/pkg/status/status.go +++ b/src/rsync/pkg/status/status.go @@ -23,6 +23,7 @@ import ( "strings" "sync" + yaml "github.com/ghodss/yaml" pkgerrors "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -31,6 +32,7 @@ import ( clientset "github.com/onap/multicloud-k8s/src/monitor/pkg/generated/clientset/versioned" informers "github.com/onap/multicloud-k8s/src/monitor/pkg/generated/informers/externalversions" appcontext "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" ) @@ -45,11 +47,7 @@ var channelData channelManager const monitorLabel = "emco/deployment-id" // HandleStatusUpdate for an application in a cluster -// TODO: Add code for specific handling func HandleStatusUpdate(clusterId string, id string, v *v1alpha1.ResourceBundleState) { - //status := v.Status.ServiceStatuses - //podStatus := v.Status.PodStatuses - // Get the contextId from the label (id) result := strings.SplitN(id, "-", 2) if result[0] == "" { @@ -84,18 +82,12 @@ func HandleStatusUpdate(clusterId string, id string, v *v1alpha1.ResourceBundleS } // Get the handle for the context/app/cluster status object - handle, err := ac.GetStatusHandle(result[1], clusterId) - if err != nil { - // Expected first time - logrus.Info(clusterId, "::Status context handle not found::", id, "::Error::", err) - } + handle, _ := ac.GetStatusHandle(result[1], clusterId) // If status handle was not found, then create the status object in the appcontext if handle == nil { chandle, err := ac.GetClusterHandle(result[1], clusterId) - if err != nil { - logrus.Info(clusterId, "::Cluster context handle not found::", id, "::Error::", err) - } else { + if err == nil { ac.AddStatus(chandle, string(vjson)) } } else { @@ -220,3 +212,36 @@ func getKubeConfig(clustername string) ([]byte, error) { } return dec, nil } + +// GetStatusCR returns a status monitoring customer resource +func GetStatusCR(label string) ([]byte, error) { + + var statusCr v1alpha1.ResourceBundleState + + statusCr.TypeMeta.APIVersion = "k8splugin.io/v1alpha1" + statusCr.TypeMeta.Kind = "ResourceBundleState" + statusCr.SetName(label) + + labels := make(map[string]string) + labels["emco/deployment-id"] = label + statusCr.SetLabels(labels) + + labelSelector, err := metav1.ParseToLabelSelector("emco/deployment-id = " + label) + if err != nil { + return nil, err + } + statusCr.Spec.Selector = labelSelector + + // Marshaling to json then convert to yaml works better than marshaling to yaml + // The 'apiVersion' attribute was marshaling to 'apiversion' + j, err := json.Marshal(&statusCr) + if err != nil { + return nil, err + } + y, err := yaml.JSONToYAML(j) + if err != nil { + return nil, err + } + + return y, nil +} |