summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRajamohan Raj <rajamohan.raj@intel.com>2020-05-26 20:08:58 +0000
committerRajamohan Raj <rajamohan.raj@intel.com>2020-06-01 18:45:54 +0000
commitc257a136355a794f5bf778f670c041e8958c3608 (patch)
treee642dae19b2dd8fade13de9342a9dee76d884589
parent7959bd4c6fd403cf4ba58bf572b1259267b3c76d (diff)
Adding cluster meta data and saving in etcd
As part of this patch, we assign groupNumbers for the set of clusters which are under anyOf, or in other words are optional for deployement of app. Also refactored the instantiation flow by separating out the etcd interactions Issue-ID: MULTICLOUD-1064 Signed-off-by: Rajamohan Raj <rajamohan.raj@intel.com> Change-Id: I21ece189daf6e6b3a7cfdba5df22d57b3d33ca78
-rwxr-xr-xkud/tests/plugin_ncm_v2.sh61
-rw-r--r--src/orchestrator/pkg/appcontext/appcontext.go90
-rw-r--r--src/orchestrator/pkg/appcontext/appcontext_test.go7
-rw-r--r--src/orchestrator/pkg/gpic/gpic.go45
-rw-r--r--src/orchestrator/pkg/module/instantiation.go123
-rw-r--r--src/orchestrator/pkg/module/instantiation_appcontext_helper.go217
-rw-r--r--src/orchestrator/pkg/rtcontext/rtcontext.go25
7 files changed, 432 insertions, 136 deletions
diff --git a/kud/tests/plugin_ncm_v2.sh b/kud/tests/plugin_ncm_v2.sh
index b7d791f3..579fda31 100755
--- a/kud/tests/plugin_ncm_v2.sh
+++ b/kud/tests/plugin_ncm_v2.sh
@@ -44,6 +44,10 @@ clusterName3="clusterName3"
cluster_desc3="cluster_desc3"
clusterName4="clusterName4"
cluster_desc4="cluster_desc4"
+clusterName5="clusterName5"
+cluster_desc5="cluster_desc5"
+clusterName6="clusterName6"
+cluster_desc6="cluster_desc6"
clusterLabel1="clusterLabel1"
clusterLabel2="clusterLabel2"
@@ -54,6 +58,8 @@ clusterLabel2="clusterLabel2"
print_msg "Deleting the clusterLabel1 and clusterLabel2, if they were existing"
delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName3}/labels/${clusterLabel1}"
delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName4}/labels/${clusterLabel2}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName5}/labels/${clusterLabel1}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName6}/labels/${clusterLabel2}"
# Above statements delete the clusterLabel1 and clusterLabel2 which are linked to cluster3 and cluster4
print_msg "Deleting the cluster1, cluster2, cluster3, cluster4 if they were existing"
@@ -61,6 +67,8 @@ delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/cluster
delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName2}"
delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName3}"
delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName4}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName5}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName6}"
print_msg "Deleting the cluster-providers, if they were existing"
delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}"
@@ -172,6 +180,39 @@ call_api -F "metadata=$payload" \
-F "file=@$kubeconfig_path" \
"${base_url}/cluster-providers/${cluster_provider_name2}/clusters" >/dev/null #massive output
+print_msg "Registering cluster5"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${clusterName5}",
+ "description": "${cluster_desc5}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/cluster-providers/${cluster_provider_name1}/clusters" >/dev/null #massive output
+
+
+print_msg "Registering cluster6"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${clusterName6}",
+ "description": "${cluster_desc6}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/cluster-providers/${cluster_provider_name2}/clusters" >/dev/null #massive output
+
# END : Register cluster1, cluster2, cluster3 and cluster4
@@ -185,7 +226,7 @@ EOF
)"
call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName3}/labels"
-print_msg "Adding label to cluster2"
+print_msg "Adding label to cluster4"
payload="$(cat <<EOF
{
"label-name" : "${clusterLabel2}"
@@ -194,3 +235,21 @@ EOF
)"
call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName4}/labels"
+# BEGIN: adding labels to cluster5 and cluster6. Cluster5 to label1 and cluster6 to label2
+print_msg "Adding label to cluster5"
+payload="$(cat <<EOF
+{
+ "label-name" : "${clusterLabel1}"
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName5}/labels"
+
+print_msg "Adding label to cluster6"
+payload="$(cat <<EOF
+{
+ "label-name" : "${clusterLabel2}"
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName6}/labels" \ No newline at end of file
diff --git a/src/orchestrator/pkg/appcontext/appcontext.go b/src/orchestrator/pkg/appcontext/appcontext.go
index 5625446d..98baa0f6 100644
--- a/src/orchestrator/pkg/appcontext/appcontext.go
+++ b/src/orchestrator/pkg/appcontext/appcontext.go
@@ -18,14 +18,15 @@ package appcontext
import (
"fmt"
- "strings"
-
+ log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/rtcontext"
pkgerrors "github.com/pkg/errors"
-
- log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
+ "strings"
)
+// metaPrefix used for denoting clusterMeta level
+const metaGrpPREFIX = "!@#metaGrp"
+
type AppContext struct {
initDone bool
rtcObj rtcontext.RunTimeContext
@@ -140,7 +141,7 @@ func (ac *AppContext) GetAppHandle(appname string) (interface{}, error) {
return nil, pkgerrors.Errorf("No handle was found for the given app")
}
-//Add cluster to the context under app
+// AddCluster helps to add cluster to the context under app. It takes in the app handle and clusterName as value.
func (ac *AppContext) AddCluster(handle interface{}, clustername string) (interface{}, error) {
h, err := ac.rtc.RtcAddLevel(handle, "cluster", clustername)
if err != nil {
@@ -150,6 +151,85 @@ func (ac *AppContext) AddCluster(handle interface{}, clustername string) (interf
return h, nil
}
+// AddClusterMetaGrp adds the meta info of groupNumber to which a cluster belongs.
+// It takes in cluster handle and groupNumber as arguments
+func (ac *AppContext) AddClusterMetaGrp(ch interface{}, gn string) error {
+ mh, err := ac.rtc.RtcAddOneLevel(ch, metaGrpPREFIX, gn)
+ if err != nil {
+ return err
+ }
+ log.Info(":: Added cluster meta handle ::", log.Fields{"ClusterMetaHandler": mh})
+ return nil
+}
+
+// DeleteClusterMetaGrpHandle deletes the group number to which the cluster belongs, it takes in the cluster handle.
+func (ac *AppContext) DeleteClusterMetaGrpHandle(ch interface{}) error {
+ err := ac.rtc.RtcDeletePrefix(ch)
+ if err != nil {
+ return err
+ }
+ log.Info(":: Deleted cluster meta handle ::", log.Fields{"ClusterMetaHandler": ch})
+ return nil
+}
+
+
+/*
+GetClusterMetaHandle takes in appName and ClusterName as string arguments and return the ClusterMetaHandle as string
+*/
+func (ac *AppContext) GetClusterMetaHandle(app string, cluster string) (string, error) {
+ if app == "" {
+ return "", pkgerrors.Errorf("Not a valid run time context app name")
+ }
+ if cluster == "" {
+ return "", pkgerrors.Errorf("Not a valid run time context cluster name")
+ }
+
+ ch, err := ac.GetClusterHandle(app, cluster)
+ if err != nil {
+ return "", err
+ }
+ cmh := fmt.Sprintf("%v", ch) + metaGrpPREFIX + "/"
+ return cmh, nil
+
+}
+
+/*
+GetClusterGroupMap shall take in appName and return a map showing the grouping among the clusters.
+sample output of "GroupMap" :{"1":["cluster_provider1+clusterName3","cluster_provider1+clusterName5"],"2":["cluster_provider2+clusterName4","cluster_provider2+clusterName6"]}
+*/
+func (ac *AppContext) GetClusterGroupMap(an string) (map[string][]string, error) {
+ cl, err := ac.GetClusterNames(an)
+ if err != nil {
+ log.Info(":: Unable to fetch clusterList for app ::", log.Fields{"AppName ": an})
+ return nil, err
+ }
+ rh, err := ac.rtc.RtcGet()
+ if err != nil {
+ return nil, err
+ }
+
+ var gmap = make(map[string][]string)
+ for _, cn := range cl {
+ s := fmt.Sprintf("%v", rh) + "app/" + an + "/cluster/" + cn + "/" + metaGrpPREFIX + "/"
+ var v string
+ err = ac.rtc.RtcGetValue(s, &v)
+ if err != nil {
+ log.Info(":: No group number for cluster ::", log.Fields{"cluster": cn, "Reason": err})
+ continue
+ }
+ gn := fmt.Sprintf("%v", v)
+ log.Info(":: GroupNumber retrieved ::", log.Fields{"GroupNumber": gn})
+
+ cl, found := gmap[gn]
+ if found == false {
+ cl = make([]string, 0)
+ }
+ cl = append(cl, cn)
+ gmap[gn] = cl
+ }
+ return gmap, nil
+}
+
//Delete cluster from the context and everything underneth
func (ac *AppContext) DeleteCluster(handle interface{}) error {
err := ac.rtc.RtcDeletePrefix(handle)
diff --git a/src/orchestrator/pkg/appcontext/appcontext_test.go b/src/orchestrator/pkg/appcontext/appcontext_test.go
index 07a13d0b..05c73703 100644
--- a/src/orchestrator/pkg/appcontext/appcontext_test.go
+++ b/src/orchestrator/pkg/appcontext/appcontext_test.go
@@ -84,6 +84,13 @@ func (c *MockRunTimeContext) RtcAddLevel(handle interface{}, level string, value
}
+func (c *MockRunTimeContext) RtcAddOneLevel(handle interface{}, level string, value interface{}) (interface{}, error) {
+ str := fmt.Sprintf("%v", handle) + level + "/"
+ c.Items[str] = value
+ return nil, c.Err
+
+}
+
func (c *MockRunTimeContext) RtcAddResource(handle interface{}, resname string, value interface{}) (interface{}, error) {
str := fmt.Sprintf("%v", handle) + "resource" + "/" + resname + "/"
c.Items[str] = value
diff --git a/src/orchestrator/pkg/gpic/gpic.go b/src/orchestrator/pkg/gpic/gpic.go
index 256d3b41..78b547da 100644
--- a/src/orchestrator/pkg/gpic/gpic.go
+++ b/src/orchestrator/pkg/gpic/gpic.go
@@ -25,11 +25,19 @@ import (
ncmmodule "github.com/onap/multicloud-k8s/src/ncm/pkg/module"
pkgerrors "github.com/pkg/errors"
"log"
+ "strconv"
)
-// Clusters has 1 field - a list of ClusterNames
-type Clusters struct {
- ClustersWithName []ClusterWithName
+// ClusterList consists of mandatoryClusters and clusterGroups
+type ClusterList struct {
+ MandatoryClusters []ClusterWithName
+ ClusterGroups []ClusterGroup
+}
+
+//ClusterGroup consists of a list of optionalClusters and a groupNumber. All the clusters under the optional clusters belong to same groupNumber
+type ClusterGroup struct {
+ OptionalClusters []ClusterWithName
+ GroupNumber string
}
// ClusterWithName has two fields - ProviderName and ClusterName
@@ -89,32 +97,41 @@ func intentResolverHelper(pn, cn, cln string, clustersWithName []ClusterWithName
}
// IntentResolver shall help to resolve the given intent into 2 lists of clusters where the app need to be deployed.
-func IntentResolver(intent IntentStruc) (Clusters, error) {
- var clustersWithName []ClusterWithName
+func IntentResolver(intent IntentStruc) (ClusterList, error) {
+ var mc []ClusterWithName
var err error
-
+ var cg []ClusterGroup
+ index := 0
for _, eachAllOf := range intent.AllOfArray {
- clustersWithName, err = intentResolverHelper(eachAllOf.ProviderName, eachAllOf.ClusterName, eachAllOf.ClusterLabelName, clustersWithName)
+ mc, err = intentResolverHelper(eachAllOf.ProviderName, eachAllOf.ClusterName, eachAllOf.ClusterLabelName, mc)
if err != nil {
- return Clusters{}, pkgerrors.Wrap(err, "intentResolverHelper error")
+ return ClusterList{}, pkgerrors.Wrap(err, "intentResolverHelper error")
}
if len(eachAllOf.AnyOfArray) > 0 {
for _, eachAnyOf := range eachAllOf.AnyOfArray {
- clustersWithName, err = intentResolverHelper(eachAnyOf.ProviderName, eachAnyOf.ClusterName, eachAnyOf.ClusterLabelName, clustersWithName)
+ var opc []ClusterWithName
+ opc, err = intentResolverHelper(eachAnyOf.ProviderName, eachAnyOf.ClusterName, eachAnyOf.ClusterLabelName, opc)
+ index++
if err != nil {
- return Clusters{}, pkgerrors.Wrap(err, "intentResolverHelper error")
+ return ClusterList{}, pkgerrors.Wrap(err, "intentResolverHelper error")
}
+ eachClustergroup := ClusterGroup{OptionalClusters: opc, GroupNumber: strconv.Itoa(index)}
+ cg = append(cg, eachClustergroup)
}
}
}
if len(intent.AnyOfArray) > 0 {
+ var opc []ClusterWithName
for _, eachAnyOf := range intent.AnyOfArray {
- clustersWithName, err = intentResolverHelper(eachAnyOf.ProviderName, eachAnyOf.ClusterName, eachAnyOf.ClusterLabelName, clustersWithName)
+ opc, err = intentResolverHelper(eachAnyOf.ProviderName, eachAnyOf.ClusterName, eachAnyOf.ClusterLabelName, opc)
+ index++
if err != nil {
- return Clusters{}, pkgerrors.Wrap(err, "intentResolverHelper error")
+ return ClusterList{}, pkgerrors.Wrap(err, "intentResolverHelper error")
}
+ eachClustergroup := ClusterGroup{OptionalClusters: opc, GroupNumber: strconv.Itoa(index)}
+ cg = append(cg, eachClustergroup)
}
}
- clusters := Clusters{clustersWithName}
- return clusters, nil
+ clusterList := ClusterList{MandatoryClusters: mc, ClusterGroups: cg}
+ return clusterList, nil
}
diff --git a/src/orchestrator/pkg/module/instantiation.go b/src/orchestrator/pkg/module/instantiation.go
index 32e84c34..d7ec663d 100644
--- a/src/orchestrator/pkg/module/instantiation.go
+++ b/src/orchestrator/pkg/module/instantiation.go
@@ -19,15 +19,11 @@ package module
import (
"encoding/base64"
"fmt"
- "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
gpic "github.com/onap/multicloud-k8s/src/orchestrator/pkg/gpic"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db"
- "github.com/onap/multicloud-k8s/src/orchestrator/utils"
+ log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
"github.com/onap/multicloud-k8s/src/orchestrator/utils/helm"
pkgerrors "github.com/pkg/errors"
- "io/ioutil"
- //"log"
- log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
)
// ManifestFileName is the name given to the manifest file in the profile package
@@ -172,101 +168,6 @@ func GetSortedTemplateForApp(appName, p, ca, v, rName, cp string, overrideValues
return sortedTemplates, err
}
-// resource consists of name of reource
-type resource struct {
- name string
- filecontent []byte
-}
-
-// getResources shall take in the sorted templates and output the resources
-// which consists of name(name+kind) and filecontent
-func getResources(st []helm.KubernetesResourceTemplate) ([]resource, error) {
- var resources []resource
- for _, t := range st {
- yamlStruct, err := utils.ExtractYamlParameters(t.FilePath)
- yamlFile, err := ioutil.ReadFile(t.FilePath)
- if err != nil {
- return nil, pkgerrors.Wrap(err, "Failed to get the resources..")
- }
- n := yamlStruct.Metadata.Name + SEPARATOR + yamlStruct.Kind
-
- resources = append(resources, resource{name: n, filecontent: yamlFile})
-
- log.Info(":: Added resource into resource-order ::", log.Fields{"ResourceName": n})
- }
- return resources, nil
-}
-
-func addResourcesToCluster(ct appcontext.AppContext, ch interface{}, resources []resource, resourceOrder []string) error {
-
- for _, resource := range resources {
- resourceOrder = append(resourceOrder, resource.name)
- _, err := ct.AddResource(ch, resource.name, resource.filecontent)
- if err != nil {
- cleanuperr := ct.DeleteCompositeApp()
- if cleanuperr != nil {
- log.Info(":: Error Cleaning up AppContext after add resource failure ::", log.Fields{"Resource": resource.name, "Error": cleanuperr.Error})
- }
- return pkgerrors.Wrapf(err, "Error adding resource ::%s to AppContext", resource.name)
- }
- _, err = ct.AddInstruction(ch, "resource", "order", resourceOrder)
- if err != nil {
- cleanuperr := ct.DeleteCompositeApp()
- if cleanuperr != nil {
- log.Info(":: Error Cleaning up AppContext after add instruction failure ::", log.Fields{"Resource": resource.name, "Error": cleanuperr.Error})
- }
- return pkgerrors.Wrapf(err, "Error adding instruction for resource ::%s to AppContext", resource.name)
- }
- }
- return nil
-}
-
-func addClustersToAppContext(l gpic.Clusters, ct appcontext.AppContext, appHandle interface{}, resources []resource) error {
- for _, c := range l.ClustersWithName {
- p := c.ProviderName
- n := c.ClusterName
- var resourceOrder []string
- clusterhandle, err := ct.AddCluster(appHandle, p+SEPARATOR+n)
- if err != nil {
- cleanuperr := ct.DeleteCompositeApp()
- if cleanuperr != nil {
- log.Info(":: Error Cleaning up AppContext after add cluster failure ::", log.Fields{"cluster-provider": p, "cluster-name": n, "Error": cleanuperr.Error})
- }
- return pkgerrors.Wrapf(err, "Error adding Cluster(provider::%s and name::%s) to AppContext", p, n)
- }
-
- err = addResourcesToCluster(ct, clusterhandle, resources, resourceOrder)
- if err != nil {
- return pkgerrors.Wrapf(err, "Error adding Resources to Cluster(provider::%s and name::%s) to AppContext", p, n)
- }
- }
- return nil
-}
-
-/*
-verifyResources method is just to check if the resource handles are correctly saved.
-*/
-
-func verifyResources(l gpic.Clusters, ct appcontext.AppContext, resources []resource, appName string) error {
- for _, c := range l.ClustersWithName {
- p := c.ProviderName
- n := c.ClusterName
- cn := p + SEPARATOR + n
- for _, res := range resources {
-
- rh, err := ct.GetResourceHandle(appName, cn, res.name)
- if err != nil {
- return pkgerrors.Wrapf(err, "Error getting resoure handle for resource :: %s, app:: %s, cluster :: %s", appName, res.name, cn)
- }
- log.Info(":: GetResourceHandle ::", log.Fields{"ResourceHandler": rh, "appName": appName, "Cluster": cn, "Resource": res.name})
-
- }
-
- }
-
- return nil
-}
-
/*
Instantiate methods takes in projectName, compositeAppName, compositeAppVersion,
DeploymentIntentName. This method is responsible for template resolution, intent
@@ -295,24 +196,13 @@ func (c InstantiationClient) Instantiate(p string, ca string, v string, di strin
return pkgerrors.Wrap(err, "Not finding the apps")
}
- // Make an app context for the compositeApp
- context := appcontext.AppContext{}
- ctxval, err := context.InitAppContext()
+ cca, err := makeAppContextForCompositeApp(p, ca, v, rName)
if err != nil {
- return pkgerrors.Wrap(err, "Error creating AppContext CompositeApp")
- }
- compositeHandle, err := context.CreateCompositeApp()
- if err != nil {
- return pkgerrors.Wrap(err, "Error creating CompositeApp handle")
- }
- err = context.AddCompositeAppMeta(appcontext.CompositeAppMeta{Project: p, CompositeApp: ca, Version: v, Release: rName})
- if err != nil {
- return pkgerrors.Wrap(err, "Error Adding CompositeAppMeta")
+ return err
}
-
- m, err := context.GetCompositeAppMeta()
-
- log.Info(":: The meta data stored in the runtime context :: ", log.Fields{"Project": m.Project, "CompositeApp": m.CompositeApp, "Version": m.Version, "Release": m.Release})
+ context := cca.context
+ ctxval := cca.ctxval
+ compositeHandle := cca.compositeAppHandle
var appOrder []string
@@ -336,6 +226,7 @@ func (c InstantiationClient) Instantiate(p string, ca string, v string, di strin
if err != nil {
return pkgerrors.Wrap(err, "Unable to get the intents for app")
}
+ // listOfClusters shall have both mandatoryClusters and optionalClusters where the app needs to be installed.
listOfClusters, err := gpic.IntentResolver(specData.Intent)
if err != nil {
return pkgerrors.Wrap(err, "Unable to get the intents resolved for app")
diff --git a/src/orchestrator/pkg/module/instantiation_appcontext_helper.go b/src/orchestrator/pkg/module/instantiation_appcontext_helper.go
new file mode 100644
index 00000000..1734a0c8
--- /dev/null
+++ b/src/orchestrator/pkg/module/instantiation_appcontext_helper.go
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2020 Intel Corporation, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package module
+
+/*
+This file deals with the interaction of instantiation flow and etcd.
+It contains methods for creating appContext, saving cluster and resource details to etcd.
+
+*/
+import (
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
+ gpic "github.com/onap/multicloud-k8s/src/orchestrator/pkg/gpic"
+ log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
+ "github.com/onap/multicloud-k8s/src/orchestrator/utils"
+ "github.com/onap/multicloud-k8s/src/orchestrator/utils/helm"
+ pkgerrors "github.com/pkg/errors"
+ "io/ioutil"
+)
+
+// resource consists of name of reource
+type resource struct {
+ name string
+ filecontent []byte
+}
+
+type contextForCompositeApp struct {
+ context appcontext.AppContext
+ ctxval interface{}
+ compositeAppHandle interface{}
+}
+
+// makeAppContext creates an appContext for a compositeApp and returns the output as contextForCompositeApp
+func makeAppContextForCompositeApp(p, ca, v, rName string) (contextForCompositeApp, error) {
+ context := appcontext.AppContext{}
+ ctxval, err := context.InitAppContext()
+ if err != nil {
+ return contextForCompositeApp{}, pkgerrors.Wrap(err, "Error creating AppContext CompositeApp")
+ }
+ compositeHandle, err := context.CreateCompositeApp()
+ if err != nil {
+ return contextForCompositeApp{}, pkgerrors.Wrap(err, "Error creating CompositeApp handle")
+ }
+ err = context.AddCompositeAppMeta(appcontext.CompositeAppMeta{Project: p, CompositeApp: ca, Version: v, Release: rName})
+ if err != nil {
+ return contextForCompositeApp{}, pkgerrors.Wrap(err, "Error Adding CompositeAppMeta")
+ }
+
+ m, err := context.GetCompositeAppMeta()
+
+ log.Info(":: The meta data stored in the runtime context :: ", log.Fields{"Project": m.Project, "CompositeApp": m.CompositeApp, "Version": m.Version, "Release": m.Release})
+
+ cca := contextForCompositeApp{context: context, ctxval: ctxval, compositeAppHandle: compositeHandle}
+
+ return cca, nil
+
+}
+
+// getResources shall take in the sorted templates and output the resources
+// which consists of name(name+kind) and filecontent
+func getResources(st []helm.KubernetesResourceTemplate) ([]resource, error) {
+ var resources []resource
+ for _, t := range st {
+ yamlStruct, err := utils.ExtractYamlParameters(t.FilePath)
+ yamlFile, err := ioutil.ReadFile(t.FilePath)
+ if err != nil {
+ return nil, pkgerrors.Wrap(err, "Failed to get the resources..")
+ }
+ n := yamlStruct.Metadata.Name + SEPARATOR + yamlStruct.Kind
+
+ resources = append(resources, resource{name: n, filecontent: yamlFile})
+
+ log.Info(":: Added resource into resource-order ::", log.Fields{"ResourceName": n})
+ }
+ return resources, nil
+}
+
+func addResourcesToCluster(ct appcontext.AppContext, ch interface{}, resources []resource, resourceOrder []string) error {
+
+ for _, resource := range resources {
+ resourceOrder = append(resourceOrder, resource.name)
+ _, err := ct.AddResource(ch, resource.name, resource.filecontent)
+ if err != nil {
+ cleanuperr := ct.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Info(":: Error Cleaning up AppContext after add resource failure ::", log.Fields{"Resource": resource.name, "Error": cleanuperr.Error})
+ }
+ return pkgerrors.Wrapf(err, "Error adding resource ::%s to AppContext", resource.name)
+ }
+ _, err = ct.AddInstruction(ch, "resource", "order", resourceOrder)
+ if err != nil {
+ cleanuperr := ct.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Info(":: Error Cleaning up AppContext after add instruction failure ::", log.Fields{"Resource": resource.name, "Error": cleanuperr.Error})
+ }
+ return pkgerrors.Wrapf(err, "Error adding instruction for resource ::%s to AppContext", resource.name)
+ }
+ }
+ return nil
+}
+
+//addClustersToAppContext method shall add cluster details save into etcd
+func addClustersToAppContext(l gpic.ClusterList, ct appcontext.AppContext, appHandle interface{}, resources []resource) error {
+ mc := l.MandatoryClusters
+ gc := l.ClusterGroups
+
+ for _, c := range mc {
+ p := c.ProviderName
+ n := c.ClusterName
+ var resourceOrder []string
+ clusterhandle, err := ct.AddCluster(appHandle, p+SEPARATOR+n)
+ if err != nil {
+ cleanuperr := ct.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Info(":: Error Cleaning up AppContext after add cluster failure ::", log.Fields{"cluster-provider": p, "cluster-name": n, "Error": cleanuperr.Error})
+ }
+ return pkgerrors.Wrapf(err, "Error adding Cluster(provider::%s and name::%s) to AppContext", p, n)
+ }
+
+ err = addResourcesToCluster(ct, clusterhandle, resources, resourceOrder)
+ if err != nil {
+ return pkgerrors.Wrapf(err, "Error adding Resources to Cluster(provider::%s and name::%s) to AppContext", p, n)
+ }
+ }
+
+ for _, eachGrp := range gc {
+ oc := eachGrp.OptionalClusters
+ gn := eachGrp.GroupNumber
+
+ for _, eachCluster := range oc {
+ p := eachCluster.ProviderName
+ n := eachCluster.ClusterName
+
+ var resourceOrder []string
+ clusterhandle, err := ct.AddCluster(appHandle, p+SEPARATOR+n)
+
+ if err != nil {
+ cleanuperr := ct.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Info(":: Error Cleaning up AppContext after add cluster failure ::", log.Fields{"cluster-provider": p, "cluster-name": n, "GroupName": gn, "Error": cleanuperr.Error})
+ }
+ return pkgerrors.Wrapf(err, "Error adding Cluster(provider::%s and name::%s) to AppContext", p, n)
+ }
+
+ err = ct.AddClusterMetaGrp(clusterhandle, gn)
+ if err != nil {
+ cleanuperr := ct.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Info(":: Error Cleaning up AppContext after add cluster failure ::", log.Fields{"cluster-provider": p, "cluster-name": n, "GroupName": gn, "Error": cleanuperr.Error})
+ }
+ return pkgerrors.Wrapf(err, "Error adding Cluster(provider::%s and name::%s) to AppContext", p, n)
+ }
+
+ err = addResourcesToCluster(ct, clusterhandle, resources, resourceOrder)
+ if err != nil {
+ return pkgerrors.Wrapf(err, "Error adding Resources to Cluster(provider::%s, name::%s and groupName:: %s) to AppContext", p, n, gn)
+ }
+ }
+ }
+ return nil
+}
+
+/*
+verifyResources method is just to check if the resource handles are correctly saved.
+*/
+func verifyResources(l gpic.ClusterList, ct appcontext.AppContext, resources []resource, appName string) error {
+
+ for _, cg := range l.ClusterGroups {
+ gn := cg.GroupNumber
+ oc := cg.OptionalClusters
+ for _, eachCluster := range oc {
+ p := eachCluster.ProviderName
+ n := eachCluster.ClusterName
+ cn := p + SEPARATOR + n
+
+ for _, res := range resources {
+ rh, err := ct.GetResourceHandle(appName, cn, res.name)
+ if err != nil {
+ return pkgerrors.Wrapf(err, "Error getting resoure handle for resource :: %s, app:: %s, cluster :: %s, groupName :: %s", appName, res.name, cn, gn)
+ }
+ log.Info(":: GetResourceHandle ::", log.Fields{"ResourceHandler": rh, "appName": appName, "Cluster": cn, "Resource": res.name})
+ }
+ }
+ grpMap, err := ct.GetClusterGroupMap(appName)
+ if err != nil {
+ return pkgerrors.Wrapf(err, "Error getting GetGroupMap for app:: %s, groupName :: %s", appName, gn)
+ }
+ log.Info(":: GetGroupMapReults ::", log.Fields{"GroupMap": grpMap})
+ }
+
+ for _, mc := range l.MandatoryClusters {
+ p := mc.ProviderName
+ n := mc.ClusterName
+ cn := p + SEPARATOR + n
+ for _, res := range resources {
+ rh, err := ct.GetResourceHandle(appName, cn, res.name)
+ if err != nil {
+ return pkgerrors.Wrapf(err, "Error getting resoure handle for resource :: %s, app:: %s, cluster :: %s", appName, res.name, cn)
+ }
+ log.Info(":: GetResourceHandle ::", log.Fields{"ResourceHandler": rh, "appName": appName, "Cluster": cn, "Resource": res.name})
+ }
+ }
+ return nil
+}
diff --git a/src/orchestrator/pkg/rtcontext/rtcontext.go b/src/orchestrator/pkg/rtcontext/rtcontext.go
index 5610ea58..432c5d87 100644
--- a/src/orchestrator/pkg/rtcontext/rtcontext.go
+++ b/src/orchestrator/pkg/rtcontext/rtcontext.go
@@ -48,6 +48,7 @@ type Rtcontext interface {
RtcGetValue(handle interface{}, value interface{}) error
RtcUpdateValue(handle interface{}, value interface{}) error
RtcGetMeta() (interface{}, error)
+ RtcAddOneLevel(pl interface{}, level string, value interface{}) (interface{}, error)
}
//Intialize context by assiging a new id
@@ -176,6 +177,30 @@ func (rtc *RunTimeContext) RtcAddLevel(handle interface{}, level string, value s
return (interface{})(key), nil
}
+// RtcAddOneLevel adds one more level to the existing context prefix.RtcAddOneLevel. It takes in PreviousContentLevel as inteface, new level to be appended as string and the value to be saved of any type. It returns the updated interface and nil if no error.
+//
+func (rtc *RunTimeContext) RtcAddOneLevel(pl interface{}, level string, value interface{}) (interface{}, error) {
+ str := fmt.Sprintf("%v", pl)
+ sid := fmt.Sprintf("%v", rtc.cid)
+ if !strings.HasPrefix(str, sid) {
+ return nil, pkgerrors.Errorf("Not a valid run time context handle")
+ }
+
+ if level == "" {
+ return nil, pkgerrors.Errorf("Not a valid run time context level")
+ }
+ if value == "" {
+ return nil, pkgerrors.Errorf("Not a valid run time context level value")
+ }
+
+ key := str + level + "/"
+ err := contextdb.Db.Put(key, value)
+ if err != nil {
+ return nil, pkgerrors.Errorf("Error adding run time context level: %s", err.Error())
+ }
+ return (interface{})(key), nil
+}
+
// Add a resource under the given level and return new handle
func (rtc *RunTimeContext) RtcAddResource(handle interface{}, resname string, value interface{}) (interface{}, error) {