aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xkud/tests/plugin_collection_v2.sh67
-rwxr-xr-xkud/tests/plugin_ncm_v2.sh196
-rw-r--r--kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/role.yaml2
-rw-r--r--kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/rolebinding.yaml2
-rw-r--r--src/orchestrator/pkg/appcontext/appcontext.go13
-rw-r--r--src/orchestrator/pkg/gpic/gpic.go12
-rw-r--r--src/orchestrator/pkg/module/instantiation.go248
-rw-r--r--src/orchestrator/utils/utils.go100
8 files changed, 591 insertions, 49 deletions
diff --git a/kud/tests/plugin_collection_v2.sh b/kud/tests/plugin_collection_v2.sh
index 09dec5c4..5ebed6ad 100755
--- a/kud/tests/plugin_collection_v2.sh
+++ b/kud/tests/plugin_collection_v2.sh
@@ -33,6 +33,7 @@ if [ ${1:+1} ]; then
fi
base_url=${base_url:-"http://localhost:9015/v2"}
+
kubeconfig_path="$HOME/.kube/config"
csar_id=cb009bfe-bbee-11e8-9766-525400435678
@@ -69,12 +70,12 @@ appIntentNameForApp1="appIntentForApp1"
appIntentForApp1Desc="AppIntentForApp1Desc"
appIntentNameForApp2="appIntentForApp2"
appIntentForApp2Desc="AppIntentForApp2Desc"
-providerName1="aws"
-providerName2="azure"
-clusterName1="edge1"
-clusterName2="edge2"
-clusterLabelName1="east-us1"
-clusterLabelName2="east-us2"
+providerName1="cluster_provider1"
+providerName2="cluster_provider2"
+clusterName1="clusterName1"
+clusterName2="clusterName2"
+clusterLabelName1="clusterLabel1"
+clusterLabelName2="clusterLabel2"
deploymentIntentGroupName="test_deployment_intent_group"
deploymentIntentGroupNameDesc="test_deployment_intent_group_desc"
@@ -93,7 +94,53 @@ cloud_region_owner="localhost"
install_deps
populate_CSAR_composite_app_helm "$csar_id"
-# BEGIN: Register project API
+# BEGIN :: Delete statements are issued so that we clean up the 'orchestrator' collection
+# and freshly populate the documents, also it serves as a direct test
+# for all our DELETE APIs and an indirect test for all GET APIs
+
+
+print_msg "Deleting intentToBeAddedinDeploymentIntentGroup"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/deployment-intent-groups/${deploymentIntentGroupName}/intents/${intentToBeAddedinDeploymentIntentGroup}"
+
+print_msg "Deleting ${deploymentIntentGroupName}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/deployment-intent-groups/${deploymentIntentGroupName}"
+
+print_msg "Deleting ${appIntentNameForApp2}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/generic-placement-intents/${genericPlacementIntentName}/app-intents/${appIntentNameForApp2}"
+
+print_msg "Deleting ${appIntentNameForApp1}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/generic-placement-intents/${genericPlacementIntentName}/app-intents/${appIntentNameForApp1}"
+
+print_msg "Deleting ${genericPlacementIntentName}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/generic-placement-intents/${genericPlacementIntentName}"
+
+print_msg "Deleting ${sub_composite_profile_name2}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/composite-profiles/${main_composite_profile_name}/profiles/${sub_composite_profile_name2}"
+
+print_msg "Deleting ${sub_composite_profile_name1}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/composite-profiles/${main_composite_profile_name}/profiles/${sub_composite_profile_name1}"
+
+print_msg "Deleting ${main_composite_profile_name}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/composite-profiles/${main_composite_profile_name}"
+
+print_msg "Deleting ${app2_name}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/apps/${app2_name}"
+
+print_msg "Deleting ${app1_name}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}/apps/${app1_name}"
+
+print_msg "Deleting ${composite_app_name}/${composite_app_version}"
+delete_resource "${base_url}/projects/${project_name}/composite-apps/${composite_app_name}/${composite_app_version}"
+
+print_msg "Deleting ${project_name}"
+delete_resource "${base_url}/projects/${project_name}"
+
+# END :: Delete statements were issued so that we clean up the db
+# and freshly populate the documents, also it serves as a direct test
+# for all our DELETE APIs and an indirect test for all GET APIs
+
+
+# BEGIN: Register project
print_msg "Registering project"
payload="$(cat <<EOF
{
@@ -107,9 +154,9 @@ payload="$(cat <<EOF
EOF
)"
call_api -d "${payload}" "${base_url}/projects"
-# END: Register project API
+# END: Register project
-# BEGIN: Register composite-app API
+# BEGIN: Register composite-app
print_msg "Registering composite-app"
payload="$(cat <<EOF
{
@@ -126,7 +173,7 @@ payload="$(cat <<EOF
EOF
)"
call_api -d "${payload}" "${base_url}/projects/${project_name}/composite-apps"
-# END: Register composite-app API
+# END: Register composite-app
diff --git a/kud/tests/plugin_ncm_v2.sh b/kud/tests/plugin_ncm_v2.sh
new file mode 100755
index 00000000..b7d791f3
--- /dev/null
+++ b/kud/tests/plugin_ncm_v2.sh
@@ -0,0 +1,196 @@
+# /*
+# * Copyright 2020 Intel Corporation, Inc
+# *
+# * Licensed under the Apache License, Version 2.0 (the "License");
+# * you may not use this file except in compliance with the License.
+# * You may obtain a copy of the License at
+# *
+# * http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+set -o errexit
+set -o nounset
+set -o pipefail
+#set -o xtrace
+
+source _common_test.sh
+source _functions.sh
+source _common.sh
+
+base_url=${base_url:-"http://localhost:9016/v2"}
+
+kubeconfig_path="$HOME/.kube/config"
+
+cluster_provider_name1="cluster_provider1"
+cluster_provider_name2="cluster_provider2"
+cluster_provider_desc1="cluster_provider1_Desc"
+cluster_provider_desc2="cluster_provider2_Desc"
+userData1="user1"
+userData2="user2"
+
+clusterName1="clusterName1"
+cluster_desc1="cluster_desc1"
+clusterName2="clusterName2"
+cluster_desc2="cluster_desc2"
+#clusterName3 and clusterName4 shall be added with clusterLabel1 and clusterLabel2
+# under cluster_provider1 and cluster_provider2 respectively
+clusterName3="clusterName3"
+cluster_desc3="cluster_desc3"
+clusterName4="clusterName4"
+cluster_desc4="cluster_desc4"
+
+clusterLabel1="clusterLabel1"
+clusterLabel2="clusterLabel2"
+
+# BEGIN :: Delete statements are issued so that we clean up the 'cluster' collection
+# and freshly populate the documents, also it serves as a direct test
+# for all our DELETE APIs and an indirect test for all GET APIs
+print_msg "Deleting the clusterLabel1 and clusterLabel2, if they were existing"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName3}/labels/${clusterLabel1}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName4}/labels/${clusterLabel2}"
+# Above statements delete the clusterLabel1 and clusterLabel2 which are linked to cluster3 and cluster4
+
+print_msg "Deleting the cluster1, cluster2, cluster3, cluster4 if they were existing"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName1}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName2}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName3}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName4}"
+
+print_msg "Deleting the cluster-providers, if they were existing"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}"
+
+# END :: Delete statements are issued so that we clean up the 'cluster' collection
+# and freshly populate the documents, also it serves as a direct test
+# for all our DELETE APIs and an indirect test for all GET APIs
+
+# BEGIN: Register cluster_provider_name1 and cluster_provider_name2
+print_msg "Deleting the cluster-providers, if they were existing"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name1}"
+delete_resource "${base_url}/cluster-providers/${cluster_provider_name2}"
+
+print_msg "Registering cluster_provider_name1"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${cluster_provider_name1}",
+ "description": "${cluster_provider_desc1}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/cluster-providers"
+
+print_msg "Registering cluster_provider_name2"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${cluster_provider_name2}",
+ "description": "${cluster_provider_desc2}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/cluster-providers"
+# END: Register cluster_provider_name1 and cluster_provider_name2
+
+# BEGIN : Register cluster1, cluster2, cluster3 and cluster4
+print_msg "Registering cluster1"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${clusterName1}",
+ "description": "${cluster_desc1}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/cluster-providers/${cluster_provider_name1}/clusters" >/dev/null #massive output
+
+
+print_msg "Registering cluster2"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${clusterName2}",
+ "description": "${cluster_desc2}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/cluster-providers/${cluster_provider_name2}/clusters" >/dev/null #massive output
+
+
+print_msg "Registering cluster3"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${clusterName3}",
+ "description": "${cluster_desc3}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/cluster-providers/${cluster_provider_name1}/clusters" >/dev/null #massive output
+
+
+print_msg "Registering cluster4"
+payload="$(cat <<EOF
+{
+ "metadata": {
+ "name": "${clusterName4}",
+ "description": "${cluster_desc4}",
+ "userData1": "${userData1}",
+ "userData2": "${userData2}"
+ }
+}
+EOF
+)"
+call_api -F "metadata=$payload" \
+ -F "file=@$kubeconfig_path" \
+ "${base_url}/cluster-providers/${cluster_provider_name2}/clusters" >/dev/null #massive output
+
+# END : Register cluster1, cluster2, cluster3 and cluster4
+
+
+# BEGIN: adding labels to cluster3 and cluster4
+print_msg "Adding label to cluster3"
+payload="$(cat <<EOF
+{
+ "label-name" : "${clusterLabel1}"
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name1}/clusters/${clusterName3}/labels"
+
+print_msg "Adding label to cluster2"
+payload="$(cat <<EOF
+{
+ "label-name" : "${clusterLabel2}"
+}
+EOF
+)"
+call_api -d "${payload}" "${base_url}/cluster-providers/${cluster_provider_name2}/clusters/${clusterName4}/labels"
+
diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/role.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/role.yaml
index dfb932d8..a3c69c31 100644
--- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/role.yaml
+++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/role.yaml
@@ -1,7 +1,5 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
-apiVersion: rbac.authorization.k8s.io/v1
-kind: Role
metadata:
name: {{ template "prometheus.fullname" . }}-prometheus
labels:
diff --git a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/rolebinding.yaml b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/rolebinding.yaml
index 04932ee1..a721cd42 100644
--- a/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/rolebinding.yaml
+++ b/kud/tests/vnfs/comp-app/collection/app2/helm/prometheus/templates/rolebinding.yaml
@@ -1,7 +1,5 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-kind: RoleBinding
metadata:
name: {{ template "prometheus.fullname" . }}-prometheus
labels:
diff --git a/src/orchestrator/pkg/appcontext/appcontext.go b/src/orchestrator/pkg/appcontext/appcontext.go
index d92b1d11..8f7841ac 100644
--- a/src/orchestrator/pkg/appcontext/appcontext.go
+++ b/src/orchestrator/pkg/appcontext/appcontext.go
@@ -22,6 +22,8 @@ import (
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/rtcontext"
pkgerrors "github.com/pkg/errors"
+ //"log"
+ log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
)
type AppContext struct {
@@ -81,6 +83,7 @@ func (ac *AppContext) AddApp(handle interface{}, appname string) (interface{}, e
if err != nil {
return nil, err
}
+ log.Info(":: Added app handle ::", log.Fields{"AppHandle":h})
return h, nil
}
@@ -93,7 +96,7 @@ func (ac *AppContext) DeleteApp(handle interface{}) error {
return nil
}
-//Returns the hanlde for a given app
+//Returns the handle for a given app
func (ac *AppContext) GetAppHandle(appname string) (interface{}, error) {
if appname == "" {
return nil, pkgerrors.Errorf("Not a valid run time context app name")
@@ -123,6 +126,7 @@ func (ac *AppContext) AddCluster(handle interface{}, clustername string) (interf
if err != nil {
return nil, err
}
+ log.Info(":: Added cluster handle ::", log.Fields{"ClusterHandler":h})
return h, nil
}
@@ -193,11 +197,13 @@ func (ac *AppContext) GetClusterNames(appname string) ([]string, error) {
}
//Add resource under app and cluster
-func (ac *AppContext) AddResource(handle interface{}, resname string, value interface{}) (interface{}, error) {
+func (ac *AppContext) AddResource(handle interface{}, resname string, value []byte) (interface{}, error) {
h, err := ac.rtc.RtcAddResource(handle, resname, value)
if err != nil {
return nil, err
}
+ log.Info(":: Added resource handle ::", log.Fields{"ResourceHandler":h})
+
return h, nil
}
@@ -238,7 +244,7 @@ func (ac *AppContext) GetResourceHandle(appname string, clustername string, resn
}
//Update the resource value usign the given handle
-func (ac *AppContext) UpdateResourceValue(handle interface{}, value interface{}) error {
+func (ac *AppContext) UpdateResourceValue(handle interface{}, value []byte) error {
return ac.rtc.RtcUpdateValue(handle, value)
}
@@ -254,6 +260,7 @@ func (ac *AppContext) AddInstruction(handle interface{}, level string, insttype
if err != nil {
return nil, err
}
+ log.Info(":: Added instruction handle ::", log.Fields{"InstructionHandler":h})
return h, nil
}
diff --git a/src/orchestrator/pkg/gpic/gpic.go b/src/orchestrator/pkg/gpic/gpic.go
index f02e5352..256d3b41 100644
--- a/src/orchestrator/pkg/gpic/gpic.go
+++ b/src/orchestrator/pkg/gpic/gpic.go
@@ -22,14 +22,14 @@ package gpic
*/
import (
- "log"
ncmmodule "github.com/onap/multicloud-k8s/src/ncm/pkg/module"
pkgerrors "github.com/pkg/errors"
+ "log"
)
// Clusters has 1 field - a list of ClusterNames
type Clusters struct {
- ClustersWithName []ClusterWithName
+ ClustersWithName []ClusterWithName
}
// ClusterWithName has two fields - ProviderName and ClusterName
@@ -82,7 +82,7 @@ func intentResolverHelper(pn, cn, cln string, clustersWithName []ClusterWithName
for _, eachClusterName := range clusterNamesList {
eachClusterWithPN := ClusterWithName{pn, eachClusterName}
clustersWithName = append(clustersWithName, eachClusterWithPN)
- log.Printf("Added Cluster: %s ", cln)
+ log.Printf("Added Cluster :: %s through its label: %s ", eachClusterName, cln)
}
}
return clustersWithName, nil
@@ -95,13 +95,13 @@ func IntentResolver(intent IntentStruc) (Clusters, error) {
for _, eachAllOf := range intent.AllOfArray {
clustersWithName, err = intentResolverHelper(eachAllOf.ProviderName, eachAllOf.ClusterName, eachAllOf.ClusterLabelName, clustersWithName)
- if err!=nil {
+ if err != nil {
return Clusters{}, pkgerrors.Wrap(err, "intentResolverHelper error")
}
if len(eachAllOf.AnyOfArray) > 0 {
for _, eachAnyOf := range eachAllOf.AnyOfArray {
clustersWithName, err = intentResolverHelper(eachAnyOf.ProviderName, eachAnyOf.ClusterName, eachAnyOf.ClusterLabelName, clustersWithName)
- if err!=nil {
+ if err != nil {
return Clusters{}, pkgerrors.Wrap(err, "intentResolverHelper error")
}
}
@@ -110,7 +110,7 @@ func IntentResolver(intent IntentStruc) (Clusters, error) {
if len(intent.AnyOfArray) > 0 {
for _, eachAnyOf := range intent.AnyOfArray {
clustersWithName, err = intentResolverHelper(eachAnyOf.ProviderName, eachAnyOf.ClusterName, eachAnyOf.ClusterLabelName, clustersWithName)
- if err!=nil {
+ if err != nil {
return Clusters{}, pkgerrors.Wrap(err, "intentResolverHelper error")
}
}
diff --git a/src/orchestrator/pkg/module/instantiation.go b/src/orchestrator/pkg/module/instantiation.go
index 56021547..58706ef6 100644
--- a/src/orchestrator/pkg/module/instantiation.go
+++ b/src/orchestrator/pkg/module/instantiation.go
@@ -17,15 +17,17 @@
package module
import (
+ "encoding/base64"
"fmt"
-
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
gpic "github.com/onap/multicloud-k8s/src/orchestrator/pkg/gpic"
-
- "encoding/base64"
-
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db"
+ "github.com/onap/multicloud-k8s/src/orchestrator/utils"
"github.com/onap/multicloud-k8s/src/orchestrator/utils/helm"
pkgerrors "github.com/pkg/errors"
- "log"
+ "io/ioutil"
+ //"log"
+ log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
)
// ManifestFileName is the name given to the manifest file in the profile package
@@ -34,10 +36,29 @@ const ManifestFileName = "manifest.yaml"
// GenericPlacementIntentName denotes the generic placement intent name
const GenericPlacementIntentName = "generic-placement-intent"
+// SEPARATOR used while creating clusternames to store in etcd
+const SEPARATOR = "+"
+
// InstantiationClient implements the InstantiationManager
type InstantiationClient struct {
- storeName string
- tagMetaData string
+ db InstantiationClientDbInfo
+}
+
+/*
+InstantiationKey used in storing the contextid in the momgodb
+It consists of
+GenericPlacementIntentName,
+ProjectName,
+CompositeAppName,
+CompositeAppVersion,
+DeploymentIntentGroup
+*/
+type InstantiationKey struct {
+ IntentName string
+ Project string
+ CompositeApp string
+ Version string
+ DeploymentIntentGroup string
}
// InstantiationManager is an interface which exposes the
@@ -47,11 +68,19 @@ type InstantiationManager interface {
Instantiate(p string, ca string, v string, di string) error
}
+// InstantiationClientDbInfo consists of storeName and tagContext
+type InstantiationClientDbInfo struct {
+ storeName string // name of the mongodb collection to use for Instantiationclient documents
+ tagContext string // attribute key name for context object in App Context
+}
+
// NewInstantiationClient returns an instance of InstantiationClient
func NewInstantiationClient() *InstantiationClient {
return &InstantiationClient{
- storeName: "orchestrator",
- tagMetaData: "instantiation",
+ db: InstantiationClientDbInfo{
+ storeName: "orchestrator",
+ tagContext: "contextid",
+ },
}
}
@@ -70,10 +99,10 @@ func getOverrideValuesByAppName(ov []OverrideValues, a string) map[string]string
}
/*
-FindGenericPlacementIntent takes in projectName, CompositeAppName, CompositeAppVersion, DeploymentIntentName
+findGenericPlacementIntent takes in projectName, CompositeAppName, CompositeAppVersion, DeploymentIntentName
and returns the name of the genericPlacementIntentName. Returns empty value if string not found.
*/
-func FindGenericPlacementIntent(p, ca, v, di string) (string, error) {
+func findGenericPlacementIntent(p, ca, v, di string) (string, error) {
var gi string
var found bool
iList, err := NewIntentClient().GetAllIntents(p, ca, v, di)
@@ -82,7 +111,7 @@ func FindGenericPlacementIntent(p, ca, v, di string) (string, error) {
}
for _, eachMap := range iList.ListOfIntents {
if gi, found := eachMap[GenericPlacementIntentName]; found {
- log.Printf("::Name of the generic-placement-intent:: %s", gi)
+ log.Info(":: Name of the generic-placement-intent ::", log.Fields{"GenPlmtIntent":gi})
return gi, err
}
}
@@ -97,7 +126,8 @@ func FindGenericPlacementIntent(p, ca, v, di string) (string, error) {
//It takes in arguments - appName, project, compositeAppName, releaseName, compositeProfileName, array of override values
func GetSortedTemplateForApp(appName, p, ca, v, rName, cp string, overrideValues []OverrideValues) ([]helm.KubernetesResourceTemplate, error) {
- log.Println("Processing App.. ", appName)
+
+ log.Info(":: Processing App ::", log.Fields{"appName":appName})
var sortedTemplates []helm.KubernetesResourceTemplate
@@ -109,7 +139,8 @@ func GetSortedTemplateForApp(appName, p, ca, v, rName, cp string, overrideValues
if err != nil {
return sortedTemplates, pkgerrors.Wrap(err, "Fail to convert to byte array")
}
- log.Println("Got the app content..")
+
+ log.Info(":: Got the app content.. ::", log.Fields{"appName":appName})
appPC, err := NewAppProfileClient().GetAppProfileContentByApp(p, ca, v, cp, appName)
if err != nil {
@@ -120,7 +151,7 @@ func GetSortedTemplateForApp(appName, p, ca, v, rName, cp string, overrideValues
return sortedTemplates, pkgerrors.Wrap(err, "Fail to convert to byte array")
}
- log.Println("Got the app Profile content ...")
+ log.Info(":: Got the app Profile content .. ::", log.Fields{"appName":appName})
overrideValuesOfApp := getOverrideValuesByAppName(overrideValues, appName)
//Convert override values from map to array of strings of the following format
@@ -137,12 +168,111 @@ func GetSortedTemplateForApp(appName, p, ca, v, rName, cp string, overrideValues
appProfileContent, overrideValuesOfAppStr,
appName)
- log.Printf("The len of the sortedTemplates :: %d", len(sortedTemplates))
+ log.Info(":: Total no. of sorted templates ::", log.Fields{"len(sortedTemplates):":len(sortedTemplates)})
return sortedTemplates, err
}
-// Instantiate methods takes in project
+// resource consists of name of reource
+type resource struct {
+ name string
+ filecontent []byte
+}
+
+// getResources shall take in the sorted templates and output the resources
+// which consists of name(name+kind) and filecontent
+func getResources(st []helm.KubernetesResourceTemplate) ([]resource, error) {
+ var resources []resource
+ for _, t := range st {
+ yamlStruct, err := utils.ExtractYamlParameters(t.FilePath)
+ yamlFile, err := ioutil.ReadFile(t.FilePath)
+ if err != nil {
+ return nil, pkgerrors.Wrap(err, "Failed to get the resources..")
+ }
+ n := yamlStruct.Metadata.Name + SEPARATOR + yamlStruct.Kind
+
+ resources = append(resources, resource{name: n, filecontent: yamlFile})
+
+ log.Info(":: Added resource into resource-order ::", log.Fields{"ResourceName":n})
+ }
+ return resources, nil
+}
+
+func addResourcesToCluster(ct appcontext.AppContext, ch interface{}, resources []resource, resourceOrder []string) error {
+ for _, resource := range resources {
+
+ resourceOrder = append(resourceOrder, resource.name)
+ _, err := ct.AddResource(ch, resource.name, resource.filecontent)
+ if err != nil {
+ cleanuperr := ct.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Info(":: Error Cleaning up AppContext after add resource failure ::", log.Fields{"Resource":resource.name, "Error":cleanuperr.Error})
+ }
+ return pkgerrors.Wrapf(err, "Error adding resource ::%s to AppContext", resource.name)
+ }
+ _, err = ct.AddInstruction(ch, "resource", "order", resourceOrder)
+ if err != nil {
+ cleanuperr := ct.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Info(":: Error Cleaning up AppContext after add instruction failure ::", log.Fields{"Resource":resource.name, "Error":cleanuperr.Error})
+ }
+ return pkgerrors.Wrapf(err, "Error adding instruction for resource ::%s to AppContext", resource.name)
+ }
+ }
+ return nil
+}
+
+func addClustersToAppContext(l gpic.Clusters, ct appcontext.AppContext, appHandle interface{}, resources []resource) error {
+ for _, c := range l.ClustersWithName {
+ p := c.ProviderName
+ n := c.ClusterName
+ var resourceOrder []string
+ clusterhandle, err := ct.AddCluster(appHandle, p+SEPARATOR+n)
+ if err != nil {
+ cleanuperr := ct.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Info(":: Error Cleaning up AppContext after add cluster failure ::", log.Fields{"cluster-provider":p, "cluster-name":n, "Error":cleanuperr.Error})
+ }
+ return pkgerrors.Wrapf(err, "Error adding Cluster(provider::%s and name::%s) to AppContext", p, n)
+ }
+
+ err = addResourcesToCluster(ct, clusterhandle, resources, resourceOrder)
+ if err != nil {
+ return pkgerrors.Wrapf(err, "Error adding Resources to Cluster(provider::%s and name::%s) to AppContext", p, n)
+ }
+ }
+ return nil
+}
+
+/*
+verifyResources method is just to check if the resource handles are correctly saved.
+*/
+
+func verifyResources(l gpic.Clusters, ct appcontext.AppContext, resources []resource, appName string) error {
+ for _, c := range l.ClustersWithName {
+ p := c.ProviderName
+ n := c.ClusterName
+ cn := p + SEPARATOR + n
+ for _, res := range resources {
+
+ rh, err := ct.GetResourceHandle(appName, cn, res.name)
+ if err != nil {
+ return pkgerrors.Wrapf(err, "Error getting resoure handle for resource :: %s, app:: %s, cluster :: %s", appName, res.name, cn)
+ }
+ log.Info(":: GetResourceHandle ::", log.Fields{"ResourceHandler":rh, "appName":appName, "Cluster": cn, "Resource":res.name})
+
+ }
+
+ }
+
+ return nil
+}
+
+/*
+Instantiate methods takes in projectName, compositeAppName, compositeAppVersion,
+DeploymentIntentName. This method is responsible for template resolution, intent
+resolution, creation and saving of context for saving into etcd.
+*/
func (c InstantiationClient) Instantiate(p string, ca string, v string, di string) error {
dIGrp, err := NewDeploymentIntentGroupClient().GetDeploymentIntentGroup(di, p, ca, v)
@@ -153,36 +283,102 @@ func (c InstantiationClient) Instantiate(p string, ca string, v string, di strin
overrideValues := dIGrp.Spec.OverrideValuesObj
cp := dIGrp.Spec.Profile
- gIntent, err := FindGenericPlacementIntent(p, ca, v, di)
+ gIntent, err := findGenericPlacementIntent(p, ca, v, di)
if err != nil {
return err
}
- log.Printf("The name of the GenPlacIntent:: %s", gIntent)
- log.Printf("dIGrp :: %s, releaseName :: %s and cp :: %s \n", dIGrp.MetaData.Name, rName, cp)
+ log.Info(":: The name of the GenPlacIntent ::", log.Fields{"GenPlmtIntent":gIntent})
+ log.Info(":: DeploymentIntentGroup, ReleaseName, CompositeProfile ::", log.Fields{"dIGrp":dIGrp.MetaData.Name, "releaseName":rName, "cp":cp})
+
allApps, err := NewAppClient().GetApps(p, ca, v)
if err != nil {
return pkgerrors.Wrap(err, "Not finding the apps")
}
+
+ // Make an app context for the compositeApp
+ context := appcontext.AppContext{}
+ ctxval, err := context.InitAppContext()
+ if err != nil {
+ return pkgerrors.Wrap(err, "Error creating AppContext CompositeApp")
+ }
+ compositeHandle, err := context.CreateCompositeApp()
+ if err != nil {
+ return pkgerrors.Wrap(err, "Error creating AppContext")
+ }
+
+ var appOrder []string
+
+ // Add composite app using appContext
for _, eachApp := range allApps {
+ appOrder = append(appOrder, eachApp.Metadata.Name)
sortedTemplates, err := GetSortedTemplateForApp(eachApp.Metadata.Name, p, ca, v, rName, cp, overrideValues)
+
if err != nil {
return pkgerrors.Wrap(err, "Unable to get the sorted templates for app")
}
- log.Printf("Resolved all the templates for app :: %s under the compositeApp...", eachApp.Metadata.Name)
- log.Printf("sortedTemplates :: %v ", sortedTemplates)
+
+ log.Info(":: Resolved all the templates ::", log.Fields{"appName":eachApp.Metadata.Name, "SortedTemplate":sortedTemplates})
+
+ resources, err := getResources(sortedTemplates)
+ if err != nil {
+ return pkgerrors.Wrapf(err, "Unable to get the resources for app :: %s", eachApp.Metadata.Name)
+ }
specData, err := NewAppIntentClient().GetAllIntentsByApp(eachApp.Metadata.Name, p, ca, v, gIntent)
if err != nil {
return pkgerrors.Wrap(err, "Unable to get the intents for app")
}
- listOfClusters,err := gpic.IntentResolver(specData.Intent)
- if err!=nil {
+ listOfClusters, err := gpic.IntentResolver(specData.Intent)
+ if err != nil {
return pkgerrors.Wrap(err, "Unable to get the intents resolved for app")
}
- log.Printf("::listOfClusters:: %v", listOfClusters)
+ log.Info(":: listOfClusters ::", log.Fields{"listOfClusters":listOfClusters})
+
+ //BEGIN: storing into etcd
+ // Add an app to the app context
+ apphandle, err := context.AddApp(compositeHandle, eachApp.Metadata.Name)
+ if err != nil {
+ cleanuperr := context.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Info(":: Error Cleaning up AppContext compositeApp failure ::", log.Fields{"Error":cleanuperr.Error(), "AppName":eachApp.Metadata.Name})
+ }
+ return pkgerrors.Wrap(err, "Error adding App to AppContext")
+ }
+ err = addClustersToAppContext(listOfClusters, context, apphandle, resources)
+ if err != nil {
+ log.Info(":: Error while adding cluster and resources to app ::", log.Fields{"Error":err.Error(), "AppName":eachApp.Metadata.Name})
+ }
+ err = verifyResources(listOfClusters, context, resources, eachApp.Metadata.Name)
+ if err != nil {
+ log.Info(":: Error while verifying resources in app ::", log.Fields{"Error":err.Error(), "AppName":eachApp.Metadata.Name})
+ }
+
+ }
+ context.AddInstruction(compositeHandle, "app", "order", appOrder)
+ //END: storing into etcd
+
+ // BEGIN:: save the context in the orchestrator db record
+ key := InstantiationKey{
+ IntentName: gIntent,
+ Project: p,
+ CompositeApp: ca,
+ Version: v,
+ DeploymentIntentGroup: di,
+ }
+
+ err = db.DBconn.Insert(c.db.storeName, key, nil, c.db.tagContext, ctxval)
+ if err != nil {
+ cleanuperr := context.DeleteCompositeApp()
+ if cleanuperr != nil {
+
+ log.Info(":: Error Cleaning up AppContext while saving context in the db for GPIntent ::", log.Fields{"Error":cleanuperr.Error(), "GPIntent":gIntent, "DeploymentIntentGroup":di, "CompositeApp":ca, "CompositeAppVersion":v, "Project":p})
+ }
+ return pkgerrors.Wrap(err, "Error adding AppContext to DB")
}
- log.Printf("Done with instantiation...")
+ // END:: save the context in the orchestrator db record
+
+ log.Info(":: Done with instantiation... ::", log.Fields{"CompositeAppName":ca})
return err
}
diff --git a/src/orchestrator/utils/utils.go b/src/orchestrator/utils/utils.go
index 13c78ba4..22ce903b 100644
--- a/src/orchestrator/utils/utils.go
+++ b/src/orchestrator/utils/utils.go
@@ -19,15 +19,105 @@ package utils
import (
"archive/tar"
"compress/gzip"
+ "strings"
+
"io"
"io/ioutil"
+ "log"
"os"
"path"
"path/filepath"
pkgerrors "github.com/pkg/errors"
+ yaml "gopkg.in/yaml.v3"
)
+// ListYamlStruct is applied when the kind is list
+type ListYamlStruct struct {
+ APIVersion string `yaml:"apiVersion,omitempty"`
+ Kind string `yaml:"kind,omitempty"`
+ items []YamlStruct `yaml:"items,omitempty"`
+}
+
+// YamlStruct represents normal parameters in a manifest file.
+// Over the course of time, Pls add more parameters as and when you require.
+type YamlStruct struct {
+ APIVersion string `yaml:"apiVersion,omitempty"`
+ Kind string `yaml:"kind,omitempty"`
+ Metadata struct {
+ Name string `yaml:"name,omitempty"`
+ Namespace string `yaml:"namespace,omitempty"`
+ Labels struct {
+ RouterDeisIoRoutable string `yaml:"router.deis.io/routable,omitempty"`
+ } `yaml:"labels"`
+ Annotations struct {
+ RouterDeisIoDomains string `yaml:"router.deis.io/domains,omitempty"`
+ } `yaml:"annotations,omitempty"`
+ } `yaml:"metadata,omitempty"`
+ Spec struct {
+ Type string `yaml:"type,omitempty"`
+ Selector struct {
+ App string `yaml:"app,omitempty"`
+ } `yaml:"selector,omitempty"`
+ Ports []struct {
+ Name string `yaml:"name,omitempty"`
+ Port int `yaml:"port,omitempty"`
+ NodePort int `yaml:"nodePort,omitempty"`
+ } `yaml:"ports"`
+ } `yaml:"spec"`
+}
+
+func (y YamlStruct) isValid() bool {
+ if y.APIVersion == "" {
+ log.Printf("apiVersion is missing in manifest file")
+ return false
+ }
+ if y.Kind == "" {
+ log.Printf("kind is missing in manifest file")
+ return false
+ }
+ if y.Metadata.Name == "" {
+ log.Printf("metadata.name is missing in manifest file")
+ return false
+ }
+ return true
+}
+
+// ExtractYamlParameters is a method which takes in the abolute path of a manifest file
+// and returns a struct accordingly
+func ExtractYamlParameters(f string) (YamlStruct, error) {
+ filename, _ := filepath.Abs(f)
+ yamlFile, err := ioutil.ReadFile(filename)
+
+ var yamlStruct YamlStruct
+
+ err = yaml.Unmarshal(yamlFile, &yamlStruct)
+ if err != nil {
+ return YamlStruct{}, pkgerrors.New("Cant unmarshal yaml file ..")
+ }
+
+ /* This is a special case handling when the kind is "List".
+ When the kind is list and the metadata name is empty.
+ We set the metadata name as the file name. For eg:
+ if filename is "/tmp/helm-tmpl-240995533/prometheus/templates/serviceaccount.yaml-0".
+ We set metadata name as "serviceaccount.yaml-0"
+ Usually when the kind is list, the list might contains a list of
+ */
+ if yamlStruct.Kind == "List" && yamlStruct.Metadata.Name == "" {
+ li := strings.LastIndex(filename, "/")
+ fn := string(filename[li+1:])
+ yamlStruct.Metadata.Name = fn
+ log.Printf("Setting the metadata name as :: %s", fn)
+ }
+ if yamlStruct.isValid() {
+ log.Printf("YAML parameters for file ::%s \n %v", f, yamlStruct)
+ return yamlStruct, nil
+ }
+ log.Printf("YAML file ::%s has errors", f)
+ return YamlStruct{}, pkgerrors.Errorf("Cant extract parameters from yaml file :: %s", filename)
+
+}
+
//ExtractTarBall provides functionality to extract a tar.gz file
//into a temporary location for later use.
//It returns the path to the new location
@@ -114,3 +204,13 @@ func EnsureDirectory(f string) error {
}
return os.MkdirAll(base, 0755)
}
+
+// func main() {
+// filename := "./test.yaml"
+// yamlStruct, err := ExtractYamlParameters(filename)
+// if err!=nil {
+// log.Print(err)
+// }
+// fmt.Printf("%s+%s", yamlStruct.Metadata.Name, yamlStruct.Kind)
+// fmt.Printf("%v", yamlStruct)
+// }