aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kud/tests/vfw-test-setenv.sh3
-rwxr-xr-xsrc/monitor/deploy/monitor-deploy.sh1
-rw-r--r--src/orchestrator/pkg/module/instantiation.go6
-rw-r--r--src/orchestrator/pkg/module/instantiation_appcontext_helper.go43
-rw-r--r--src/rsync/pkg/context/context.go73
-rw-r--r--src/rsync/pkg/status/status.go49
6 files changed, 109 insertions, 66 deletions
diff --git a/kud/tests/vfw-test-setenv.sh b/kud/tests/vfw-test-setenv.sh
index 77031f9f..4d2a0078 100644
--- a/kud/tests/vfw-test-setenv.sh
+++ b/kud/tests/vfw-test-setenv.sh
@@ -1,7 +1,8 @@
export packetgen_helm_path=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/packetgen.tar.gz
export firewall_helm_path=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/firewall.tar.gz
export sink_helm_path=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/sink.tar.gz
-export kubeconfigfile=/home/vagrant/multicloud-k8s/cluster-configs/config-edge02
+export kubeconfigfile=/home/vagrant/multicloud-k8s/cluster-configs/config-edge01
+export kubeconfigfile2=/home/vagrant/multicloud-k8s/cluster-configs/config-edge02
export packetgen_profile_targz=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz
export firewall_profile_targz=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz
export sink_profile_targz=/home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz
diff --git a/src/monitor/deploy/monitor-deploy.sh b/src/monitor/deploy/monitor-deploy.sh
index 47c7120f..6134e4da 100755
--- a/src/monitor/deploy/monitor-deploy.sh
+++ b/src/monitor/deploy/monitor-deploy.sh
@@ -1,3 +1,4 @@
+kubectl apply -f crds/k8splugin_v1alpha1_resourcebundlestate_crd.yaml
kubectl apply -f role.yaml
kubectl apply -f cluster_role.yaml
kubectl apply -f role_binding.yaml
diff --git a/src/orchestrator/pkg/module/instantiation.go b/src/orchestrator/pkg/module/instantiation.go
index 1f2e1117..f4e75861 100644
--- a/src/orchestrator/pkg/module/instantiation.go
+++ b/src/orchestrator/pkg/module/instantiation.go
@@ -246,12 +246,6 @@ func (c InstantiationClient) Instantiate(p string, ca string, v string, di strin
return pkgerrors.Wrapf(err, "Unable to get the resources for app :: %s", eachApp.Metadata.Name)
}
- statusResource, err := getStatusResource(ctxval.(string), eachApp.Metadata.Name)
- if err != nil {
- return pkgerrors.Wrapf(err, "Unable to generate the status resource for app :: %s", eachApp.Metadata.Name)
- }
- resources = append(resources, statusResource)
-
specData, err := NewAppIntentClient().GetAllIntentsByApp(eachApp.Metadata.Name, p, ca, v, gIntent)
if err != nil {
return pkgerrors.Wrap(err, "Unable to get the intents for app")
diff --git a/src/orchestrator/pkg/module/instantiation_appcontext_helper.go b/src/orchestrator/pkg/module/instantiation_appcontext_helper.go
index a8c6eda7..9ace81b6 100644
--- a/src/orchestrator/pkg/module/instantiation_appcontext_helper.go
+++ b/src/orchestrator/pkg/module/instantiation_appcontext_helper.go
@@ -25,16 +25,12 @@ import (
"encoding/json"
"io/ioutil"
- jyaml "github.com/ghodss/yaml"
-
- rb "github.com/onap/multicloud-k8s/src/monitor/pkg/apis/k8splugin/v1alpha1"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
gpic "github.com/onap/multicloud-k8s/src/orchestrator/pkg/gpic"
log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
"github.com/onap/multicloud-k8s/src/orchestrator/utils"
"github.com/onap/multicloud-k8s/src/orchestrator/utils/helm"
pkgerrors "github.com/pkg/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// resource consists of name of reource
@@ -99,45 +95,6 @@ func getResources(st []helm.KubernetesResourceTemplate) ([]resource, error) {
return resources, nil
}
-// addStatusResource adds a status monitoring resource to the app
-// which consists of name(name+kind) and content
-func getStatusResource(id, app string) (resource, error) {
-
- var statusCr rb.ResourceBundleState
-
- label := id + "-" + app
- name := app + "-" + id
-
- statusCr.TypeMeta.APIVersion = "k8splugin.io/v1alpha1"
- statusCr.TypeMeta.Kind = "ResourceBundleState"
- statusCr.SetName(name)
-
- labels := make(map[string]string)
- labels["emco/deployment-id"] = label
- statusCr.SetLabels(labels)
-
- labelSelector, err := metav1.ParseToLabelSelector("emco/deployment-id = " + label)
- if err != nil {
- log.Info(":: ERROR Parsing Label Selector ::", log.Fields{"Error": err})
- } else {
- statusCr.Spec.Selector = labelSelector
- }
-
- // Marshaling to json then convert to yaml works better than marshaling to yaml
- // The 'apiVersion' attribute was marshaling to 'apiversion'
- // y, _ := yaml.Marshal(&statusCr)
- j, _ := json.Marshal(&statusCr)
- y, _ := jyaml.JSONToYAML(j)
- log.Info(":: RESULTING STATUS CR ::", log.Fields{"StatusCR": y})
-
- statusResource := resource{
- name: name + "+" + "ResourceBundleState",
- filecontent: string(y),
- }
-
- return statusResource, nil
-}
-
func addResourcesToCluster(ct appcontext.AppContext, ch interface{}, resources []resource) error {
var resOrderInstr struct {
diff --git a/src/rsync/pkg/context/context.go b/src/rsync/pkg/context/context.go
index f6a33ad8..cc7773b8 100644
--- a/src/rsync/pkg/context/context.go
+++ b/src/rsync/pkg/context/context.go
@@ -129,9 +129,63 @@ func instantiateResource(ac appcontext.AppContext, c *kubeclient.Client, name st
return nil
}
+func addStatusTracker(c *kubeclient.Client, app string, cluster string, label string) error {
+
+ b, err := status.GetStatusCR(label)
+ if err != nil {
+ logutils.Error("Failed to get status CR for installing", logutils.Fields{
+ "error": err,
+ "label": label,
+ })
+ return err
+ }
+ if err = c.Apply(b); err != nil {
+ logutils.Error("Failed to apply status tracker", logutils.Fields{
+ "error": err,
+ "cluster": cluster,
+ "app": app,
+ "label": label,
+ })
+ return err
+ }
+ logutils.Info("Status tracker installed::", logutils.Fields{
+ "cluster": cluster,
+ "app": app,
+ "label": label,
+ })
+ return nil
+}
+
+func deleteStatusTracker(c *kubeclient.Client, app string, cluster string, label string) error {
+ b, err := status.GetStatusCR(label)
+ if err != nil {
+ logutils.Error("Failed to get status CR for deleting", logutils.Fields{
+ "error": err,
+ "label": label,
+ })
+ return err
+ }
+ if err = c.Delete(b); err != nil {
+ logutils.Error("Failed to delete res", logutils.Fields{
+ "error": err,
+ "app": app,
+ "label": label,
+ })
+ return err
+ }
+ logutils.Info("Status tracker deleted::", logutils.Fields{
+ "cluster": cluster,
+ "app": app,
+ "label": label,
+ })
+ return nil
+}
+
type fn func(ac appcontext.AppContext, client *kubeclient.Client, res string, app string, cluster string, label string) error
-func applyFnComApp(cid interface{}, con *connector.Connector, f fn, breakonError bool) error {
+type statusfn func(client *kubeclient.Client, app string, cluster string, label string) error
+
+func applyFnComApp(cid interface{}, con *connector.Connector, f fn, sfn statusfn, breakonError bool) error {
ac := appcontext.AppContext{}
g, _ := errgroup.WithContext(context.Background())
_, err := ac.LoadAppContext(cid)
@@ -184,7 +238,7 @@ func applyFnComApp(cid interface{}, con *connector.Connector, f fn, breakonError
}
var aov map[string][]string
json.Unmarshal([]byte(resorder.(string)), &aov)
- for _, res := range aov["resorder"] {
+ for i, res := range aov["resorder"] {
err = f(ac, c, res, appName, cluster, label)
if err != nil {
logutils.Error("Error in resource %s: %v", logutils.Fields{
@@ -193,10 +247,21 @@ func applyFnComApp(cid interface{}, con *connector.Connector, f fn, breakonError
"resource": res,
})
if breakonError {
+ // handle status tracking before exiting if at least one resource got handled
+ if i > 0 {
+ serr := sfn(c, appName, cluster, label)
+ if serr != nil {
+ logutils.Warn("Error handling status tracker", logutils.Fields{"error": serr})
+ }
+ }
return err
}
}
}
+ serr := sfn(c, appName, cluster, label)
+ if serr != nil {
+ logutils.Warn("Error handling status tracker", logutils.Fields{"error": serr})
+ }
return nil
})
}
@@ -221,7 +286,7 @@ func applyFnComApp(cid interface{}, con *connector.Connector, f fn, breakonError
// InstantiateComApp Instantiate Apps in Composite App
func (instca *CompositeAppContext) InstantiateComApp(cid interface{}) error {
con := connector.Init(cid)
- err := applyFnComApp(cid, con, instantiateResource, true)
+ err := applyFnComApp(cid, con, instantiateResource, addStatusTracker, true)
if err != nil {
logutils.Error("InstantiateComApp unsuccessful", logutils.Fields{"error": err})
return err
@@ -234,7 +299,7 @@ func (instca *CompositeAppContext) InstantiateComApp(cid interface{}) error {
// TerminateComApp Terminates Apps in Composite App
func (instca *CompositeAppContext) TerminateComApp(cid interface{}) error {
con := connector.Init(cid)
- err := applyFnComApp(cid, con, terminateResource, false)
+ err := applyFnComApp(cid, con, terminateResource, deleteStatusTracker, false)
if err != nil {
logutils.Error("TerminateComApp unsuccessful", logutils.Fields{
"error": err,
diff --git a/src/rsync/pkg/status/status.go b/src/rsync/pkg/status/status.go
index 351da027..8c1e12be 100644
--- a/src/rsync/pkg/status/status.go
+++ b/src/rsync/pkg/status/status.go
@@ -23,6 +23,7 @@ import (
"strings"
"sync"
+ yaml "github.com/ghodss/yaml"
pkgerrors "github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -31,6 +32,7 @@ import (
clientset "github.com/onap/multicloud-k8s/src/monitor/pkg/generated/clientset/versioned"
informers "github.com/onap/multicloud-k8s/src/monitor/pkg/generated/informers/externalversions"
appcontext "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
)
@@ -45,11 +47,7 @@ var channelData channelManager
const monitorLabel = "emco/deployment-id"
// HandleStatusUpdate for an application in a cluster
-// TODO: Add code for specific handling
func HandleStatusUpdate(clusterId string, id string, v *v1alpha1.ResourceBundleState) {
- //status := v.Status.ServiceStatuses
- //podStatus := v.Status.PodStatuses
-
// Get the contextId from the label (id)
result := strings.SplitN(id, "-", 2)
if result[0] == "" {
@@ -84,18 +82,12 @@ func HandleStatusUpdate(clusterId string, id string, v *v1alpha1.ResourceBundleS
}
// Get the handle for the context/app/cluster status object
- handle, err := ac.GetStatusHandle(result[1], clusterId)
- if err != nil {
- // Expected first time
- logrus.Info(clusterId, "::Status context handle not found::", id, "::Error::", err)
- }
+ handle, _ := ac.GetStatusHandle(result[1], clusterId)
// If status handle was not found, then create the status object in the appcontext
if handle == nil {
chandle, err := ac.GetClusterHandle(result[1], clusterId)
- if err != nil {
- logrus.Info(clusterId, "::Cluster context handle not found::", id, "::Error::", err)
- } else {
+ if err == nil {
ac.AddStatus(chandle, string(vjson))
}
} else {
@@ -220,3 +212,36 @@ func getKubeConfig(clustername string) ([]byte, error) {
}
return dec, nil
}
+
+// GetStatusCR returns a status monitoring customer resource
+func GetStatusCR(label string) ([]byte, error) {
+
+ var statusCr v1alpha1.ResourceBundleState
+
+ statusCr.TypeMeta.APIVersion = "k8splugin.io/v1alpha1"
+ statusCr.TypeMeta.Kind = "ResourceBundleState"
+ statusCr.SetName(label)
+
+ labels := make(map[string]string)
+ labels["emco/deployment-id"] = label
+ statusCr.SetLabels(labels)
+
+ labelSelector, err := metav1.ParseToLabelSelector("emco/deployment-id = " + label)
+ if err != nil {
+ return nil, err
+ }
+ statusCr.Spec.Selector = labelSelector
+
+ // Marshaling to json then convert to yaml works better than marshaling to yaml
+ // The 'apiVersion' attribute was marshaling to 'apiversion'
+ j, err := json.Marshal(&statusCr)
+ if err != nil {
+ return nil, err
+ }
+ y, err := yaml.JSONToYAML(j)
+ if err != nil {
+ return nil, err
+ }
+
+ return y, nil
+}