summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/clm/pkg/cluster/cluster.go9
-rw-r--r--src/monitor/deploy/operator.yaml2
-rw-r--r--src/orchestrator/api/projecthandler.go4
-rw-r--r--src/orchestrator/api/projecthandler_test.go2
-rw-r--r--src/orchestrator/pkg/appcontext/appcontext.go28
-rw-r--r--src/orchestrator/pkg/module/deployment_intent_groups.go9
-rw-r--r--src/orchestrator/pkg/state/state_helper.go29
-rw-r--r--src/rsync/go.mod1
-rw-r--r--src/rsync/go.sum4
-rw-r--r--src/rsync/pkg/client/client.go2
-rw-r--r--src/rsync/pkg/context/context.go381
-rw-r--r--src/tools/emcoctl/Readme.md23
-rw-r--r--src/tools/emcoctl/cmd/apply.go7
-rw-r--r--src/tools/emcoctl/cmd/config.go14
-rw-r--r--src/tools/emcoctl/cmd/delete.go20
-rw-r--r--src/tools/emcoctl/cmd/get.go17
-rw-r--r--src/tools/emcoctl/cmd/getall.go39
-rw-r--r--src/tools/emcoctl/cmd/root.go7
-rw-r--r--src/tools/emcoctl/cmd/utils.go101
-rw-r--r--src/tools/emcoctl/examples/emco-cfg.yaml8
-rw-r--r--src/tools/emcoctl/examples/vfw.yaml408
21 files changed, 966 insertions, 149 deletions
diff --git a/src/clm/pkg/cluster/cluster.go b/src/clm/pkg/cluster/cluster.go
index 26a9d6df..fb8768d6 100644
--- a/src/clm/pkg/cluster/cluster.go
+++ b/src/clm/pkg/cluster/cluster.go
@@ -19,6 +19,7 @@ package cluster
import (
"time"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db"
mtypes "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module/types"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/state"
@@ -423,6 +424,14 @@ func (v *ClusterClient) DeleteCluster(provider, name string) error {
// remove the app contexts associated with this cluster
if stateVal == state.StateEnum.Terminated {
+ // Verify that the appcontext has completed terminating
+ ctxid := state.GetLastContextIdFromStateInfo(s)
+ acStatus, err := state.GetAppContextStatus(ctxid)
+ if err == nil &&
+ !(acStatus.Status == appcontext.AppContextStatusEnum.Terminated || acStatus.Status == appcontext.AppContextStatusEnum.TerminateFailed) {
+ return pkgerrors.Errorf("Network intents for cluster have not completed terminating " + name)
+ }
+
for _, id := range state.GetContextIdsFromStateInfo(s) {
context, err := state.GetAppContextFromId(id)
if err != nil {
diff --git a/src/monitor/deploy/operator.yaml b/src/monitor/deploy/operator.yaml
index 93e4522c..80020d7a 100644
--- a/src/monitor/deploy/operator.yaml
+++ b/src/monitor/deploy/operator.yaml
@@ -18,7 +18,7 @@ spec:
containers:
- name: monitor
# Replace this with the built image name
- image: ewmduck/monitor:latest
+ image: emcov2/monitor:latest
command:
- /opt/monitor/monitor
imagePullPolicy: IfNotPresent
diff --git a/src/orchestrator/api/projecthandler.go b/src/orchestrator/api/projecthandler.go
index 6b512804..f130d283 100644
--- a/src/orchestrator/api/projecthandler.go
+++ b/src/orchestrator/api/projecthandler.go
@@ -126,7 +126,7 @@ func (h projectHandler) getHandler(w http.ResponseWriter, r *http.Request) {
projects, err := h.client.GetAllProjects()
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
+ http.Error(w, err.Error(), http.StatusNotFound)
return
}
@@ -147,7 +147,7 @@ func (h projectHandler) getHandler(w http.ResponseWriter, r *http.Request) {
ret, err := h.client.GetProject(name)
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
+ http.Error(w, err.Error(), http.StatusNotFound)
return
}
diff --git a/src/orchestrator/api/projecthandler_test.go b/src/orchestrator/api/projecthandler_test.go
index 6810099f..ef5fdf07 100644
--- a/src/orchestrator/api/projecthandler_test.go
+++ b/src/orchestrator/api/projecthandler_test.go
@@ -273,7 +273,7 @@ func TestProjectGetHandler(t *testing.T) {
},
{
label: "Get Non-Exiting Project",
- expectedCode: http.StatusInternalServerError,
+ expectedCode: http.StatusNotFound,
name: "nonexistingproject",
projectClient: &mockProjectManager{
Items: []moduleLib.Project{},
diff --git a/src/orchestrator/pkg/appcontext/appcontext.go b/src/orchestrator/pkg/appcontext/appcontext.go
index db2ba432..5d757940 100644
--- a/src/orchestrator/pkg/appcontext/appcontext.go
+++ b/src/orchestrator/pkg/appcontext/appcontext.go
@@ -37,30 +37,30 @@ type AppContext struct {
// AppContextStatus represents the current status of the appcontext
// Instantiating - instantiate has been invoked and is still in progress
// Instantiated - instantiate has completed
-// PreTerminate - terminate has been invoked when in Instantiating status - need to clean up first
// Terminating - terminate has been invoked and is still in progress
// Terminated - terminate has completed
-// Failed - the instantiate or terminate action has failed
+// InstantiateFailed - the instantiate action has failed
+// TerminateFailed - the terminate action has failed
type AppContextStatus struct {
Status StatusValue
}
type StatusValue string
type statuses struct {
- Instantiating StatusValue
- Instantiated StatusValue
- PreTerminate StatusValue
- Terminating StatusValue
- Terminated StatusValue
- Failed StatusValue
+ Instantiating StatusValue
+ Instantiated StatusValue
+ Terminating StatusValue
+ Terminated StatusValue
+ InstantiateFailed StatusValue
+ TerminateFailed StatusValue
}
var AppContextStatusEnum = &statuses{
- Instantiating: "Instantiating",
- Instantiated: "Instantiated",
- PreTerminate: "PreTerminate",
- Terminating: "Terminating",
- Terminated: "Terminated",
- Failed: "Failed",
+ Instantiating: "Instantiating",
+ Instantiated: "Instantiated",
+ Terminating: "Terminating",
+ Terminated: "Terminated",
+ InstantiateFailed: "InstantiateFailed",
+ TerminateFailed: "TerminateFailed",
}
// CompositeAppMeta consists of projectName, CompositeAppName,
diff --git a/src/orchestrator/pkg/module/deployment_intent_groups.go b/src/orchestrator/pkg/module/deployment_intent_groups.go
index f9829853..dec6391f 100644
--- a/src/orchestrator/pkg/module/deployment_intent_groups.go
+++ b/src/orchestrator/pkg/module/deployment_intent_groups.go
@@ -21,6 +21,7 @@ import (
"reflect"
"time"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/state"
@@ -271,6 +272,14 @@ func (c *DeploymentIntentGroupClient) DeleteDeploymentIntentGroup(di string, p s
// remove the app contexts associated with thie Deployment Intent Group
if stateVal == state.StateEnum.Terminated {
+ // Verify that the appcontext has completed terminating
+ ctxid := state.GetLastContextIdFromStateInfo(s)
+ acStatus, err := state.GetAppContextStatus(ctxid)
+ if err == nil &&
+ !(acStatus.Status == appcontext.AppContextStatusEnum.Terminated || acStatus.Status == appcontext.AppContextStatusEnum.TerminateFailed) {
+ return pkgerrors.Errorf("DeploymentIntentGroup has not completed terminating: " + di)
+ }
+
for _, id := range state.GetContextIdsFromStateInfo(s) {
context, err := state.GetAppContextFromId(id)
if err != nil {
diff --git a/src/orchestrator/pkg/state/state_helper.go b/src/orchestrator/pkg/state/state_helper.go
index 9d59fb75..1f926f8f 100644
--- a/src/orchestrator/pkg/state/state_helper.go
+++ b/src/orchestrator/pkg/state/state_helper.go
@@ -17,6 +17,8 @@
package state
import (
+ "encoding/json"
+
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
pkgerrors "github.com/pkg/errors"
)
@@ -69,3 +71,30 @@ func GetContextIdsFromStateInfo(s StateInfo) []string {
return ids
}
+
+func GetAppContextStatus(ctxid string) (appcontext.AppContextStatus, error) {
+
+ ac, err := GetAppContextFromId(ctxid)
+ if err != nil {
+ return appcontext.AppContextStatus{}, err
+ }
+
+ h, err := ac.GetCompositeAppHandle()
+ if err != nil {
+ return appcontext.AppContextStatus{}, err
+ }
+ sh, err := ac.GetLevelHandle(h, "status")
+ if err != nil {
+ return appcontext.AppContextStatus{}, err
+ }
+ s, err := ac.GetValue(sh)
+ if err != nil {
+ return appcontext.AppContextStatus{}, err
+ }
+ acStatus := appcontext.AppContextStatus{}
+ js, _ := json.Marshal(s)
+ json.Unmarshal(js, &acStatus)
+
+ return acStatus, nil
+
+}
diff --git a/src/rsync/go.mod b/src/rsync/go.mod
index 0fd2c787..b5f5c93e 100644
--- a/src/rsync/go.mod
+++ b/src/rsync/go.mod
@@ -27,6 +27,7 @@ require (
replace (
github.com/onap/multicloud-k8s/src/clm => ../clm
github.com/onap/multicloud-k8s/src/monitor => ../monitor
+ github.com/onap/multicloud-k8s/src/orchestrator => ../orchestrator
k8s.io/api => k8s.io/api v0.17.3
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.17.3
k8s.io/apimachinery => k8s.io/apimachinery v0.17.3
diff --git a/src/rsync/go.sum b/src/rsync/go.sum
index 00637891..95895d07 100644
--- a/src/rsync/go.sum
+++ b/src/rsync/go.sum
@@ -1216,10 +1216,14 @@ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
+github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
diff --git a/src/rsync/pkg/client/client.go b/src/rsync/pkg/client/client.go
index a489b951..5920dea5 100644
--- a/src/rsync/pkg/client/client.go
+++ b/src/rsync/pkg/client/client.go
@@ -25,7 +25,7 @@ import (
// DefaultValidation default action to validate. If `true` all resources by
// default will be validated.
-const DefaultValidation = true
+const DefaultValidation = false
// Client is a kubernetes client, like `kubectl`
type Client struct {
diff --git a/src/rsync/pkg/context/context.go b/src/rsync/pkg/context/context.go
index f77482e6..4b886ec7 100644
--- a/src/rsync/pkg/context/context.go
+++ b/src/rsync/pkg/context/context.go
@@ -21,6 +21,7 @@ import (
"encoding/json"
"fmt"
"strings"
+ "sync"
"time"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
@@ -36,7 +37,9 @@ import (
)
type CompositeAppContext struct {
- cid interface{}
+ cid interface{}
+ chans []chan bool
+ mutex sync.Mutex
}
func getRes(ac appcontext.AppContext, name string, app string, cluster string) ([]byte, interface{}, error) {
@@ -144,26 +147,150 @@ func instantiateResource(ac appcontext.AppContext, c *kubeclient.Client, name st
return nil
}
+func updateResourceStatus(ac appcontext.AppContext, resState resourcestatus.ResourceStatus, app string, cluster string, aov map[string][]string) error {
+
+ for _, res := range aov["resorder"] {
+
+ rh, err := ac.GetResourceHandle(app, cluster, res)
+ if err != nil {
+ return err
+ }
+ sh, err := ac.GetLevelHandle(rh, "status")
+ if err != nil {
+ return err
+ }
+
+ s, err := ac.GetValue(sh)
+ if err != nil {
+ return err
+ }
+ rStatus := resourcestatus.ResourceStatus{}
+ js, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ err = json.Unmarshal(js, &rStatus)
+ if err != nil {
+ return err
+ }
+ // no need to update a status that has reached a 'done' status
+ if rStatus.Status == resourcestatus.RsyncStatusEnum.Deleted ||
+ rStatus.Status == resourcestatus.RsyncStatusEnum.Applied ||
+ rStatus.Status == resourcestatus.RsyncStatusEnum.Failed {
+ continue
+ }
+
+ err = ac.UpdateStatusValue(sh, resState)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+
+}
+
+// return true if all resources have reached a 'done' status - e.g. Applied, Deleted or Failed
+func allResourcesDone(ac appcontext.AppContext, app string, cluster string, aov map[string][]string) bool {
+
+ for _, res := range aov["resorder"] {
+
+ rh, err := ac.GetResourceHandle(app, cluster, res)
+ if err != nil {
+ return false
+ }
+ sh, err := ac.GetLevelHandle(rh, "status")
+ if err != nil {
+ return false
+ }
+
+ s, err := ac.GetValue(sh)
+ if err != nil {
+ return false
+ }
+ rStatus := resourcestatus.ResourceStatus{}
+ js, err := json.Marshal(s)
+ if err != nil {
+ return false
+ }
+ err = json.Unmarshal(js, &rStatus)
+ if err != nil {
+ return false
+ }
+ if rStatus.Status != resourcestatus.RsyncStatusEnum.Deleted &&
+ rStatus.Status != resourcestatus.RsyncStatusEnum.Applied &&
+ rStatus.Status != resourcestatus.RsyncStatusEnum.Failed {
+ return false
+ }
+ }
+
+ return true
+
+}
+
// Wait for 2 secs
const waitTime = 2
-func waitForClusterReady(c *kubeclient.Client, cluster string) error {
+func waitForClusterReady(instca *CompositeAppContext, ac appcontext.AppContext, c *kubeclient.Client, appname string, cluster string, aov map[string][]string) error {
+
+ forceDone := false
+ resStateUpdated := false
+ ch := addChan(instca)
+
+ rch := make(chan error, 1)
+ checkReachable := func() {
+ err := c.IsReachable()
+ rch <- err
+ }
+
+ go checkReachable()
+Loop:
for {
- if err := c.IsReachable(); err != nil {
- // TODO: Add more realistic error checking
- // TODO: Add Incremental wait logic here
- time.Sleep(waitTime * time.Second)
- } else {
+ select {
+ case rerr := <-rch:
+ if rerr == nil {
+ break Loop
+ } else {
+ logutils.Info("Cluster is not reachable - keep trying::", logutils.Fields{"cluster": cluster})
+ go checkReachable()
+ }
+ case <-ch:
+ statusFailed := resourcestatus.ResourceStatus{
+ Status: resourcestatus.RsyncStatusEnum.Failed,
+ }
+ err := updateResourceStatus(ac, statusFailed, appname, cluster, aov)
+ if err != nil {
+ deleteChan(instca, ch)
+ return err
+ }
+ forceDone = true
+ break Loop
+ case <-time.After(waitTime * time.Second):
+ // on first timeout - cluster is apparently not reachable, update resources in
+ // this group to 'Retrying'
+ if !resStateUpdated {
+ statusRetrying := resourcestatus.ResourceStatus{
+ Status: resourcestatus.RsyncStatusEnum.Retrying,
+ }
+ err := updateResourceStatus(ac, statusRetrying, appname, cluster, aov)
+ if err != nil {
+ deleteChan(instca, ch)
+ return err
+ }
+ resStateUpdated = true
+ }
break
}
}
- logutils.Info("Cluster is reachable::", logutils.Fields{
- "cluster": cluster,
- })
+
+ deleteChan(instca, ch)
+ if forceDone {
+ return pkgerrors.Errorf("Termination of rsync cluster retry: " + cluster)
+ }
return nil
}
-// initializeResourceStatus sets the initial status of every resource appropriately based on the state of the AppContext
+// initializeAppContextStatus sets the initial status of every resource appropriately based on the state of the AppContext
func initializeAppContextStatus(ac appcontext.AppContext, acStatus appcontext.AppContextStatus) error {
h, err := ac.GetCompositeAppHandle()
if err != nil {
@@ -320,12 +447,18 @@ func updateEndingAppContextStatus(ac appcontext.AppContext, handle interface{},
js, _ := json.Marshal(s)
json.Unmarshal(js, &acStatus)
- if failure {
- acStatus.Status = appcontext.AppContextStatusEnum.Failed
- } else if acStatus.Status == appcontext.AppContextStatusEnum.Instantiating {
- acStatus.Status = appcontext.AppContextStatusEnum.Instantiated
+ if acStatus.Status == appcontext.AppContextStatusEnum.Instantiating {
+ if failure {
+ acStatus.Status = appcontext.AppContextStatusEnum.InstantiateFailed
+ } else {
+ acStatus.Status = appcontext.AppContextStatusEnum.Instantiated
+ }
} else if acStatus.Status == appcontext.AppContextStatusEnum.Terminating {
- acStatus.Status = appcontext.AppContextStatusEnum.Terminated
+ if failure {
+ acStatus.Status = appcontext.AppContextStatusEnum.TerminateFailed
+ } else {
+ acStatus.Status = appcontext.AppContextStatusEnum.Terminated
+ }
} else {
return pkgerrors.Errorf("Invalid AppContextStatus %v", acStatus)
}
@@ -337,20 +470,197 @@ func updateEndingAppContextStatus(ac appcontext.AppContext, handle interface{},
return nil
}
+func getAppContextStatus(ac appcontext.AppContext) (*appcontext.AppContextStatus, error) {
+
+ h, err := ac.GetCompositeAppHandle()
+ if err != nil {
+ return nil, err
+ }
+ sh, err := ac.GetLevelHandle(h, "status")
+ if err != nil {
+ return nil, err
+ }
+ s, err := ac.GetValue(sh)
+ if err != nil {
+ return nil, err
+ }
+ acStatus := appcontext.AppContextStatus{}
+ js, _ := json.Marshal(s)
+ json.Unmarshal(js, &acStatus)
+
+ return &acStatus, nil
+
+}
+
type fn func(ac appcontext.AppContext, client *kubeclient.Client, res string, app string, cluster string, label string) error
type statusfn func(client *kubeclient.Client, app string, cluster string, label string) error
-func applyFnComApp(cid interface{}, acStatus appcontext.AppContextStatus, f fn, sfn statusfn, breakonError bool) error {
- con := connector.Init(cid)
+func addChan(instca *CompositeAppContext) chan bool {
+
+ instca.mutex.Lock()
+ c := make(chan bool)
+ instca.chans = append(instca.chans, c)
+ instca.mutex.Unlock()
+
+ return c
+}
+
+func deleteChan(instca *CompositeAppContext, c chan bool) error {
+
+ var i int
+ instca.mutex.Lock()
+ for i = 0; i < len(instca.chans); i++ {
+ if instca.chans[i] == c {
+ break
+ }
+ }
+
+ if i == len(instca.chans) {
+ instca.mutex.Unlock()
+ return pkgerrors.Errorf("Given channel was not found:")
+ }
+ instca.chans[i] = instca.chans[len(instca.chans)-1]
+ instca.chans = instca.chans[:len(instca.chans)-1]
+ instca.mutex.Unlock()
+
+ return nil
+}
+
+func waitForDone(ac appcontext.AppContext) {
+ count := 0
+ for {
+ time.Sleep(1 * time.Second)
+ count++
+ if count == 60*60 {
+ logutils.Info("Wait for done watcher running..", logutils.Fields{})
+ count = 0
+ }
+ acStatus, _ := getAppContextStatus(ac)
+ if acStatus.Status == appcontext.AppContextStatusEnum.Instantiated ||
+ acStatus.Status == appcontext.AppContextStatusEnum.InstantiateFailed {
+ return
+ }
+ }
+ return
+}
+
+func kickoffRetryWatcher(instca *CompositeAppContext, ac appcontext.AppContext, acStatus appcontext.AppContextStatus, wg *errgroup.Group) {
+
+ wg.Go(func() error {
+
+ var count int
+
+ count = 0
+ for {
+ time.Sleep(1 * time.Second)
+ count++
+ if count == 60*60 {
+ logutils.Info("Retry watcher running..", logutils.Fields{})
+ count = 0
+ }
+
+ cStatus, err := getAppContextStatus(ac)
+ if err != nil {
+ logutils.Error("Failed to get the app context status", logutils.Fields{
+ "error": err,
+ })
+ return err
+ }
+ flag, err := getAppContextFlag(ac)
+ if err != nil {
+ logutils.Error("Failed to get the stop flag", logutils.Fields{
+ "error": err,
+ })
+ return err
+ } else {
+ if flag == true {
+ instca.mutex.Lock()
+ for i := 0; i < len(instca.chans); i++ {
+ instca.chans[i] <- true
+ logutils.Info("kickoffRetryWatcher - send an exit message", logutils.Fields{})
+ }
+ instca.mutex.Unlock()
+ break
+ }
+ }
+ if acStatus.Status == appcontext.AppContextStatusEnum.Instantiating {
+ if cStatus.Status == appcontext.AppContextStatusEnum.Instantiated ||
+ cStatus.Status == appcontext.AppContextStatusEnum.InstantiateFailed {
+ break
+ }
+ } else {
+ if cStatus.Status == appcontext.AppContextStatusEnum.Terminated ||
+ cStatus.Status == appcontext.AppContextStatusEnum.TerminateFailed {
+ break
+ }
+ }
+
+ }
+ return nil
+ })
+
+}
+
+func getAppContextFlag(ac appcontext.AppContext) (bool, error) {
+ h, err := ac.GetCompositeAppHandle()
+ if err != nil {
+ return false, err
+ }
+ sh, err := ac.GetLevelHandle(h, "stopflag")
+ if sh == nil {
+ return false, err
+ } else {
+ v, err := ac.GetValue(sh)
+ if err != nil {
+ return false, err
+ } else {
+ return v.(bool), nil
+ }
+ }
+}
+
+func updateAppContextFlag(cid interface{}, sf bool) error {
+ ac := appcontext.AppContext{}
+ _, err := ac.LoadAppContext(cid)
+ if err != nil {
+ return err
+ }
+ hc, err := ac.GetCompositeAppHandle()
+ if err != nil {
+ return err
+ }
+ sh, err := ac.GetLevelHandle(hc, "stopflag")
+ if sh == nil {
+ _, err = ac.AddLevelValue(hc, "stopflag", sf)
+ } else {
+ err = ac.UpdateValue(sh, sf)
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func applyFnComApp(instca *CompositeAppContext, acStatus appcontext.AppContextStatus, f fn, sfn statusfn, breakonError bool) error {
+ con := connector.Init(instca.cid)
//Cleanup
defer con.RemoveClient()
ac := appcontext.AppContext{}
- h, err := ac.LoadAppContext(cid)
+ h, err := ac.LoadAppContext(instca.cid)
if err != nil {
return err
}
+ // if terminating, wait for all retrying instantiate threads to exit
+ if acStatus.Status == appcontext.AppContextStatusEnum.Terminating {
+ waitForDone(ac)
+ err := updateAppContextFlag(instca.cid, false)
+ if err != nil {
+ return err
+ }
+ }
+
// initialize appcontext status
err = initializeAppContextStatus(ac, acStatus)
if err != nil {
@@ -375,6 +685,8 @@ func applyFnComApp(cid interface{}, acStatus appcontext.AppContextStatus, f fn,
})
id, _ := ac.GetCompositeAppHandle()
g, _ := errgroup.WithContext(context.Background())
+ wg, _ := errgroup.WithContext(context.Background())
+ kickoffRetryWatcher(instca, ac, acStatus, wg)
// Iterate over all the subapps
for _, app := range appList["apporder"] {
appName := app
@@ -414,8 +726,13 @@ func applyFnComApp(cid interface{}, acStatus appcontext.AppContextStatus, f fn,
json.Unmarshal([]byte(resorder.(string)), &aov)
// Keep retrying for reachability
for {
+ done := allResourcesDone(ac, appName, cluster, aov)
+ if done {
+ break
+ }
+
// Wait for cluster to be reachable
- err = waitForClusterReady(c, cluster)
+ err := waitForClusterReady(instca, ac, c, appName, cluster, aov)
if err != nil {
// TODO: Add error handling
return err
@@ -479,19 +796,39 @@ func applyFnComApp(cid interface{}, acStatus appcontext.AppContextStatus, f fn,
logutils.Error("Encountered error updating AppContext status", logutils.Fields{"error": err})
return err
}
+ if err := wg.Wait(); err != nil {
+ logutils.Error("Encountered error in watcher thread", logutils.Fields{"error": err})
+ return err
+ }
return nil
}
// InstantiateComApp Instantiate Apps in Composite App
func (instca *CompositeAppContext) InstantiateComApp(cid interface{}) error {
- go applyFnComApp(cid, appcontext.AppContextStatus{Status: appcontext.AppContextStatusEnum.Instantiating},
+ instca.cid = cid
+ instca.chans = []chan bool{}
+ instca.mutex = sync.Mutex{}
+ err := updateAppContextFlag(cid, false)
+ if err != nil {
+ logutils.Error("Encountered error updating AppContext flag", logutils.Fields{"error": err})
+ return err
+ }
+ go applyFnComApp(instca, appcontext.AppContextStatus{Status: appcontext.AppContextStatusEnum.Instantiating},
instantiateResource, addStatusTracker, true)
return nil
}
// TerminateComApp Terminates Apps in Composite App
func (instca *CompositeAppContext) TerminateComApp(cid interface{}) error {
- go applyFnComApp(cid, appcontext.AppContextStatus{Status: appcontext.AppContextStatusEnum.Terminating},
+ instca.cid = cid
+ instca.chans = []chan bool{}
+ instca.mutex = sync.Mutex{}
+ err := updateAppContextFlag(cid, true)
+ if err != nil {
+ logutils.Error("Encountered error updating AppContext flag", logutils.Fields{"error": err})
+ return err
+ }
+ go applyFnComApp(instca, appcontext.AppContextStatus{Status: appcontext.AppContextStatusEnum.Terminating},
terminateResource, deleteStatusTracker, false)
return nil
}
diff --git a/src/tools/emcoctl/Readme.md b/src/tools/emcoctl/Readme.md
index bf07e563..ecbcf4de 100644
--- a/src/tools/emcoctl/Readme.md
+++ b/src/tools/emcoctl/Readme.md
@@ -54,20 +54,33 @@ This command will apply the resources in the file. The user is responsible to en
`$ emcoctl apply -f filename.yaml`
+For applying resources that don't have a json body anchor can be provided as an arguement
+
+`$ emcoctl apply <anchor>`
+
+`$ emcoctl apply projects/testvfw/composite-apps/compositevfw/v1/deployment-intent-groups/vfw_deployment_intent_group/instantiate`
+
+
2. Get Emco Resources
-Get the resources in the input file. This command will use the metadata name to get the resource.
+Get the resources in the input file. This command will use the metadata name in each of the resources in the file to get information about the resource.
`$ emcoctl get -f filename.yaml`
+For getting information for one resource anchor can be provided as an arguement
+
+`$ emcoctl get <anchor>`
+
+`$ emcoctl get projects/testvfw/composite-apps/compositevfw/v1/deployment-intent-groups/vfw_deployment_intent_group`
+
3. Delete Emco Resources
-Delete resources in the file. The emcoctl will start deleting resources in the reverse order than given in the file to maintain hierarchy. This command will use the metadata name to delete the resource.
+Delete resources in the file. The emcoctl will start deleting resources in the reverse order than given in the file to maintain hierarchy. This command will use the metadata name in each of the resources in the file to delete the resource..
`$ emcoctl delete -f filename.yaml`
-4. Get all Emco Resources
+For deleting one resource anchor can be provided as an arguement
-Get all for the resources in the file.
+`$ emcoctl delete <anchor>`
-`$ emcoctl getall -f filename.yaml`
+`$ emcoctl delete projects/testvfw/composite-apps/compositevfw/v1/deployment-intent-groups/vfw_deployment_intent_group`
diff --git a/src/tools/emcoctl/cmd/apply.go b/src/tools/emcoctl/cmd/apply.go
index f451a614..cf494bc4 100644
--- a/src/tools/emcoctl/cmd/apply.go
+++ b/src/tools/emcoctl/cmd/apply.go
@@ -24,9 +24,8 @@ import (
// applyCmd represents the apply command
var applyCmd = &cobra.Command{
Use: "apply",
- Short: "apply(Post) the resources from input file or url(with body) from command line",
+ Short: "apply(Post) the resources from input file or url(without body) from command line",
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("apply called")
c := NewRestClient()
if len(inputFiles) > 0 {
resources := readResources()
@@ -46,7 +45,6 @@ var applyCmd = &cobra.Command{
}
}
} else if len(args) >= 1 {
- fmt.Println(args[0])
c.RestClientPost(args[0], []byte{})
} else {
fmt.Println("Error: No args ")
@@ -55,8 +53,7 @@ var applyCmd = &cobra.Command{
}
func init() {
- fmt.Println("INIT ")
rootCmd.AddCommand(applyCmd)
applyCmd.Flags().StringSliceVarP(&inputFiles, "filename", "f", []string{}, "Filename of the input file")
- applyCmd.Flags().StringSliceVarP(&valuesFiles, "values", "v", []string{}, "Values to go with the file")
+ //applyCmd.Flags().StringSliceVarP(&valuesFiles, "values", "v", []string{}, "Values to go with the file")
}
diff --git a/src/tools/emcoctl/cmd/config.go b/src/tools/emcoctl/cmd/config.go
index c5e44660..8af1cc28 100644
--- a/src/tools/emcoctl/cmd/config.go
+++ b/src/tools/emcoctl/cmd/config.go
@@ -39,6 +39,20 @@ const urlVersion string = "v2"
const urlPrefix string = "http://"
var Configurations EmcoConfigurations
+// SetDefaultConfiguration default configuration if t
+func SetDefaultConfiguration() {
+ Configurations.Orchestrator.Host = "localhost"
+ Configurations.Orchestrator.Port = 9015
+ Configurations.Clm.Host = "localhost"
+ Configurations.Clm.Port = 9061
+ Configurations.Ncm.Host = "localhost"
+ Configurations.Ncm.Port = 9031
+ Configurations.Dcm.Host = "localhost"
+ Configurations.Dcm.Port = 0
+ Configurations.OvnAction.Host = "localhost"
+ Configurations.OvnAction.Port = 9051
+}
+
// GetOrchestratorURL Url for Orchestrator
func GetOrchestratorURL() string {
if Configurations.Orchestrator.Host == "" || Configurations.Orchestrator.Port == 0 {
diff --git a/src/tools/emcoctl/cmd/delete.go b/src/tools/emcoctl/cmd/delete.go
index d6dbfe34..faa52b5b 100644
--- a/src/tools/emcoctl/cmd/delete.go
+++ b/src/tools/emcoctl/cmd/delete.go
@@ -1,5 +1,5 @@
/*
-Copyright © 2020 Intel Corp
+Copyright © 2020 Intel Corp
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -24,9 +24,8 @@ import (
// deleteCmd represents the delete command
var deleteCmd = &cobra.Command{
Use: "delete",
- Short: "Delete resources in input file or commandline",
+ Short: "Delete the resources from input file or url from command line",
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("delete called")
c := NewRestClient()
if len(inputFiles) > 0 {
resources := readResources()
@@ -35,23 +34,14 @@ var deleteCmd = &cobra.Command{
c.RestClientDelete(res.anchor, res.body)
}
} else if len(args) >= 1 {
- fmt.Println(args[0])
- c.RestClientDelete(args[0], nil)
+ c.RestClientDeleteAnchor(args[0])
+ } else {
+ fmt.Println("Error: No args ")
}
},
}
func init() {
rootCmd.AddCommand(deleteCmd)
-
- // Here you will define your flags and configuration settings.
-
- // Cobra supports Persistent Flags which will work for this command
- // and all subcommands, e.g.:
- // deleteCmd.PersistentFlags().String("foo", "", "A help for foo")
-
- // Cobra supports local flags which will only run when this command
- // is called directly, e.g.:
- // deleteCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
deleteCmd.Flags().StringSliceVarP(&inputFiles, "filename", "f", []string{}, "Filename of the input file")
}
diff --git a/src/tools/emcoctl/cmd/get.go b/src/tools/emcoctl/cmd/get.go
index 2cc96dc4..124ceec7 100644
--- a/src/tools/emcoctl/cmd/get.go
+++ b/src/tools/emcoctl/cmd/get.go
@@ -24,17 +24,24 @@ import (
// getCmd represents the get command
var getCmd = &cobra.Command{
Use: "get",
- Short: "Get the resource(s) based on the URL",
+ Short: "Get the resources from input file or url from command line",
Run: func(cmd *cobra.Command, args []string) {
- fmt.Println("get called")
c := NewRestClient()
- if len(args) >= 1 {
- fmt.Println(args[0])
- c.RestClientGet(args[0])
+ if len(inputFiles) > 0 {
+ resources := readResources()
+ c := NewRestClient()
+ for _, res := range resources {
+ c.RestClientGet(res.anchor, res.body)
+ }
+ } else if len(args) >= 1 {
+ c.RestClientGetAnchor(args[0])
+ } else {
+ fmt.Println("Error: No args ")
}
},
}
func init() {
rootCmd.AddCommand(getCmd)
+ getCmd.Flags().StringSliceVarP(&inputFiles, "filename", "f", []string{}, "Filename of the input file")
}
diff --git a/src/tools/emcoctl/cmd/getall.go b/src/tools/emcoctl/cmd/getall.go
deleted file mode 100644
index 329b2582..00000000
--- a/src/tools/emcoctl/cmd/getall.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright © 2020 Intel Corp
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package cmd
-
-import (
- "github.com/spf13/cobra"
-)
-
-// getallCmd represents the getall command
-var getallCmd = &cobra.Command{
- Use: "getall",
- Short: "Get all resources in the file provided",
- Run: func(cmd *cobra.Command, args []string) {
- resources := readResources()
- c := NewRestClient()
- for _, res := range resources {
- c.RestClientGetAll(res.anchor)
- }
- },
-}
-
-func init() {
- rootCmd.AddCommand(getallCmd)
- // Here you will define your flags and configuration settings.
- getallCmd.Flags().StringSliceVarP(&inputFiles, "filename", "f", []string{}, "Filename of the input file")
-}
diff --git a/src/tools/emcoctl/cmd/root.go b/src/tools/emcoctl/cmd/root.go
index 4c1ac19f..7648606a 100644
--- a/src/tools/emcoctl/cmd/root.go
+++ b/src/tools/emcoctl/cmd/root.go
@@ -38,7 +38,6 @@ var rootCmd = &cobra.Command{
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if err := rootCmd.Execute(); err != nil {
- fmt.Println("Test")
fmt.Println(err)
os.Exit(1)
}
@@ -55,7 +54,7 @@ func init() {
// initConfig reads in config file and ENV variables if set.
func initConfig() {
- cfgFile = "emco-cfg.yaml"
+
if cfgFile != "" {
// Use config file from the flag.
viper.SetConfigFile(cfgFile)
@@ -66,7 +65,6 @@ func initConfig() {
fmt.Println(err)
os.Exit(1)
}
- fmt.Println(home)
// Search config in home directory with name ".emco" (without extension).
viper.AddConfigPath(home)
viper.SetConfigName(".emco")
@@ -81,5 +79,8 @@ func initConfig() {
if err != nil {
fmt.Printf("Unable to decode into struct, %v", err)
}
+ } else {
+ fmt.Println("Warning: No Configuration File found. Using defaults")
+ SetDefaultConfiguration()
}
} \ No newline at end of file
diff --git a/src/tools/emcoctl/cmd/utils.go b/src/tools/emcoctl/cmd/utils.go
index 34063eee..62b33755 100644
--- a/src/tools/emcoctl/cmd/utils.go
+++ b/src/tools/emcoctl/cmd/utils.go
@@ -179,12 +179,30 @@ func (r RestyClient) RestClientMultipartPost(anchor string, body []byte, file st
}
return pkgerrors.Errorf("Server Multipart Post Error")
}
-// RestClientGetAll returns all resource in the input file
-func (r RestyClient) RestClientGetAll(anchor string) error {
+
+// RestClientGetAnchor returns get data from anchor
+func (r RestyClient) RestClientGetAnchor(anchor string) error {
url, err := GetURL(anchor)
if err != nil {
return err
}
+ s := strings.Split(anchor, "/")
+ if len(s) >= 3 {
+ a := s[len(s)-2]
+ // Determine if multipart
+ if a == "apps" || a == "profiles" || a == "clusters" {
+ // Supports only getting metadata
+ resp, err := r.client.R().
+ SetHeader("Accept", "application/json").
+ Get(url)
+ if err != nil {
+ fmt.Println(err)
+ return err
+ }
+ fmt.Println("URL:", anchor, "Response Code:", resp.StatusCode(), "Response:", resp)
+ return nil
+ }
+ }
resp, err := r.client.R().
Get(url)
if err != nil {
@@ -194,34 +212,58 @@ func (r RestyClient) RestClientGetAll(anchor string) error {
fmt.Println("URL:", anchor, "Response Code:", resp.StatusCode(), "Response:", resp)
return nil
}
+
// RestClientGet gets resource
-func (r RestyClient) RestClientGet(anchor string) error {
+func (r RestyClient) RestClientGet(anchor string, body []byte) error {
+ if anchor == "" {
+ return pkgerrors.Errorf("Anchor can't be empty")
+ }
s := strings.Split(anchor, "/")
- a := s[len(s)-2]
- // Determine if multipart
- if a == "apps" || a == "profiles" || a == "clusters" {
- url, err := GetURL(anchor)
- if err != nil {
- return err
- }
- // Supports only getting metadata
- resp, err := r.client.R().
- SetHeader("Accept", "application/json").
- Get(url)
- if err != nil {
- fmt.Println(err)
- return err
+ a := s[len(s)-1]
+ if a == "instantiate" || a == "apply" || a == "approve" || a == "terminate" {
+ // No get for these
+ return nil
+ }
+ var e emcoBody
+ err := json.Unmarshal(body, &e)
+ if err != nil {
+ fmt.Println(err)
+ return err
+ }
+ if e.Meta.Name != "" {
+ name := e.Meta.Name
+ anchor = anchor + "/" + name
+ if a == "composite-apps" {
+ var cav emcoCompositeAppSpec
+ err := mapstructure.Decode(e.Spec, &cav)
+ if err != nil {
+ fmt.Println("mapstruct error")
+ return err
+ }
+ anchor = anchor + "/" + cav.Version
}
- fmt.Println("URL:", anchor, "Response Code:", resp.StatusCode(), "Response:", resp)
- } else {
- r.RestClientGetAll(anchor)
+ } else if e.Label != "" {
+ anchor = anchor + "/" + e.Label
}
+ return r.RestClientGetAnchor(anchor)
+}
+// RestClientDeleteAnchor returns all resource in the input file
+func (r RestyClient) RestClientDeleteAnchor(anchor string) error {
+ url, err := GetURL(anchor)
+ if err != nil {
+ return err
+ }
+ resp, err := r.client.R().Delete(url)
+ if err != nil {
+ fmt.Println(err)
+ return err
+ }
+ fmt.Println("URL:", anchor, "Response Code:", resp.StatusCode(), "Response:", resp)
return nil
}
// RestClientDelete calls rest delete command
func (r RestyClient) RestClientDelete(anchor string, body []byte) error {
- var url string
s := strings.Split(anchor, "/")
a := s[len(s)-1]
@@ -229,13 +271,11 @@ func (r RestyClient) RestClientDelete(anchor string, body []byte) error {
// Change instantiate to destroy
s[len(s)-1] = "terminate"
anchor = strings.Join(s[:], "/")
- fmt.Println("URL:", anchor)
return r.RestClientPost(anchor, []byte{})
} else if a == "apply" {
// Change apply to terminate
s[len(s)-1] = "terminate"
anchor = strings.Join(s[:], "/")
- fmt.Println("URL:", anchor)
return r.RestClientPost(anchor, []byte{})
} else if a == "approve" || a == "status" {
// Approve and status doesn't have delete
@@ -261,19 +301,10 @@ func (r RestyClient) RestClientDelete(anchor string, body []byte) error {
}
anchor = anchor + "/" + cav.Version
}
+ } else if e.Label != "" {
+ anchor = anchor + "/" + e.Label
}
- url, err = GetURL(anchor)
- if err != nil {
- return err
- }
- resp, err := r.client.R().
- Delete(url)
- if err != nil {
- fmt.Println(err)
- return err
- }
- fmt.Println("URL:", anchor, "Response Code:", resp.StatusCode())
- return nil
+ return r.RestClientDeleteAnchor(anchor)
}
// GetURL reads the configuration file to get URL
func GetURL(anchor string) (string, error) {
diff --git a/src/tools/emcoctl/examples/emco-cfg.yaml b/src/tools/emcoctl/examples/emco-cfg.yaml
index f2790654..c1703def 100644
--- a/src/tools/emcoctl/examples/emco-cfg.yaml
+++ b/src/tools/emcoctl/examples/emco-cfg.yaml
@@ -3,4 +3,10 @@
port: 9015
clm:
host: localhost
- port: 9061 \ No newline at end of file
+ port: 9061
+ ncm:
+ host: localhost
+ port: 9031
+ ovnaction:
+ host: localhost
+ port: 9051 \ No newline at end of file
diff --git a/src/tools/emcoctl/examples/vfw.yaml b/src/tools/emcoctl/examples/vfw.yaml
new file mode 100644
index 00000000..b5df4834
--- /dev/null
+++ b/src/tools/emcoctl/examples/vfw.yaml
@@ -0,0 +1,408 @@
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: rsync
+spec:
+ host: "192.168.121.6"
+ port: 30546
+---
+
+#creating controller entries
+version: emco/v2
+resourceContext:
+ anchor: controllers
+metadata :
+ name: ovnaction
+spec:
+ host: "192.168.121.6"
+ port: 32259
+ type: "action"
+ priority: 1
+
+---
+
+#creating cluster provider
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers
+metadata :
+ name: vfw-cluster-provider
+
+---
+#creating cluster
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/vfw-cluster-provider/clusters
+metadata :
+ name: edge01
+file:
+ kubeconfig
+
+---
+#Add label cluster
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/vfw-cluster-provider/clusters/edge01/labels
+label-name: LabelA
+
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/vfw-cluster-provider/clusters/edge01/networks
+metadata:
+ name: emco-private-net
+spec:
+ cniType: ovn4nfv
+ ipv4Subnets:
+ - subnet: 10.10.20.0/24
+ name: subnet1
+ gateway: 10.10.20.1/24
+
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/vfw-cluster-provider/clusters/edge01/networks
+metadata:
+ name: emco-unprotected-net
+spec:
+ cniType: ovn4nfv
+ ipv4Subnets:
+ - subnet: 192.168.10.0/24
+ name: subnet1
+ gateway: 192.168.10.1/24
+
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/vfw-cluster-provider/clusters/edge01/networks
+metadata:
+ name: protected-private-net
+spec:
+ cniType: ovn4nfv
+ ipv4Subnets:
+ - subnet: 192.168.20.0/24
+ name: subnet1
+ gateway: 192.168.20.1/24
+
+---
+version: emco/v2
+resourceContext:
+ anchor: cluster-providers/vfw-cluster-provider/clusters/edge01/apply
+
+---
+#create project
+version: emco/v2
+resourceContext:
+ anchor: projects
+metadata :
+ name: testvfw
+
+---
+#creating collection composite app entry
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps
+metadata :
+ name: compositevfw
+spec:
+ version: v1
+
+---
+#adding prometheus app to the composite app
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/apps
+metadata :
+ name: packetgen
+file:
+ /home/vagrant/multicloud-k8s/kud/demo/composite-firewall/packetgen.tar.gz
+
+---
+#adding prometheus app to the composite app
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/apps
+metadata :
+ name: firewall
+file:
+ /home/vagrant/multicloud-k8s/kud/demo/composite-firewall/firewall.tar.gz
+
+---
+#adding collectd app to the composite app
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/apps
+metadata :
+ name: sink
+file:
+ /home/vagrant/multicloud-k8s/kud/demo/composite-firewall/sink.tar.gz
+
+---
+#creating collection composite profile entry
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/composite-profiles
+metadata :
+ name: vfw_composite-profile
+
+---
+#adding prometheus app profiles to the composite profile
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/composite-profiles/vfw_composite-profile/profiles
+metadata :
+ name: packetgen-profile
+spec:
+ app-name: packetgen
+file:
+ /home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz
+
+---
+#adding firewall app profiles to the composite profile
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/composite-profiles/vfw_composite-profile/profiles
+metadata :
+ name: firewall-profile
+spec:
+ app-name: firewall
+file:
+ /home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz
+
+---
+#adding firewall app profiles to the composite profile
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/composite-profiles/vfw_composite-profile/profiles
+metadata :
+ name: sink-profile
+spec:
+ app-name: sink
+file:
+ /home/vagrant/multicloud-k8s/kud/demo/composite-firewall/profile.tar.gz
+
+---
+#create the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/generic-placement-intents
+metadata :
+ name: fw-placement-intent
+spec:
+ logical-cloud: NA
+
+---
+#add the prometheus app placement intent to the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/generic-placement-intents/fw-placement-intent/app-intents
+metadata:
+ name: packetgen-placement-intent
+spec:
+ app-name: packetgen
+ intent:
+ allOf:
+ - provider-name: vfw-cluster-provider
+ cluster-label-name: LabelA
+---
+#add the prometheus app placement intent to the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/generic-placement-intents/fw-placement-intent/app-intents
+metadata:
+ name: firewall-placement-intent
+spec:
+ app-name: firewall
+ intent:
+ allOf:
+ - provider-name: vfw-cluster-provider
+ cluster-label-name: LabelA
+
+---
+#add the prometheus app placement intent to the generic placement intent
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/generic-placement-intents/fw-placement-intent/app-intents
+metadata:
+ name: sink-placement-intent
+spec:
+ app-name: sink
+ intent:
+ allOf:
+ - provider-name: vfw-cluster-provider
+ cluster-label-name: LabelA
+
+---
+#creating cluster provider
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent
+metadata :
+ name: vfw_ovnaction_intent
+
+---
+#
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent/vfw_ovnaction_intent/workload-intents
+metadata :
+ name: packetgen_workload_intent
+spec:
+ application-name: packetgen
+ workload-resource: r1-packetgen
+ type: Deployment
+
+---
+#
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent/vfw_ovnaction_intent/workload-intents
+metadata :
+ name: firewall_workload_intent
+spec:
+ application-name: firewall
+ workload-resource: r1-firewall
+ type: Deployment
+
+---
+#
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent/vfw_ovnaction_intent/workload-intents
+metadata :
+ name: sink_workload_intent
+spec:
+ application-name: sink
+ workload-resource: r1-sink
+ type: Deployment
+
+---
+#
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent/vfw_ovnaction_intent/workload-intents/packetgen_workload_intent/interfaces
+metadata :
+ name: packetgen_unprotected_if
+spec:
+ interface: eth1
+ name: unprotected-private-net
+ defaultGateway: "false"
+ ipAddress: 192.168.10.2
+
+---
+#
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent/vfw_ovnaction_intent/workload-intents/packetgen_workload_intent/interfaces
+metadata :
+ name: packetgen_emco_if
+spec:
+ interface: eth2
+ name: emco-private-net
+ defaultGateway: "false"
+ ipAddress: 10.10.20.2
+
+---
+#
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent/vfw_ovnaction_intent/workload-intents/firewall_workload_intent/interfaces
+metadata :
+ name: firewall_emco_if
+spec:
+ interface: eth3
+ name: emco-private-net
+ defaultGateway: "false"
+ ipAddress: 10.10.20.3
+
+---
+#
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent/vfw_ovnaction_intent/workload-intents/firewall_workload_intent/interfaces
+metadata :
+ name: firewall_unprotected_if
+spec:
+ interface: eth1
+ name: unprotected-private-net
+ defaultGateway: "false"
+ ipAddress: 192.168.10.3
+
+---
+#
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent/vfw_ovnaction_intent/workload-intents/firewall_workload_intent/interfaces
+metadata :
+ name: firewall_protected_if
+spec:
+ interface: eth2
+ name: protected-private-net
+ defaultGateway: "false"
+ ipAddress: 192.168.20.2
+
+---
+#
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent/vfw_ovnaction_intent/workload-intents/sink_workload_intent/interfaces
+metadata :
+ name: sink_protected_if
+spec:
+ interface: eth1
+ name: protected-private-net
+ defaultGateway: "false"
+ ipAddress: 192.168.20.3
+
+---
+#
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/network-controller-intent/vfw_ovnaction_intent/workload-intents/sink_workload_intent/interfaces
+metadata :
+ name: sink_emco_if
+spec:
+ interface: eth2
+ name: emco-private-net
+ defaultGateway: "false"
+ ipAddress: 10.10.20.4
+
+---
+#create deployment intent group
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/deployment-intent-groups
+metadata :
+ name: vfw_deployment_intent_group
+spec:
+ profile: vfw_composite-profile
+ version: r1
+ override-values:
+ - app-name: packetgen
+ values:
+ ".Values.service.ports.nodePort": '30888'
+ - app-name: firewall
+ values:
+ ".Values.global.dcaeCollectorIp": 1.2.3.4
+ ".Values.global.dcaeCollectorPort": '8888'
+ - app-name: sink
+ values:
+ ".Values.service.ports.nodePort": '30677'
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/deployment-intent-groups/vfw_deployment_intent_group/intents
+metadata :
+ name: fw-deployment-intent
+spec:
+ intent:
+ genericPlacementIntent: fw-placement-intent
+ ovnaction: vfw_ovnaction_intent
+
+---
+version: emco/v2
+resourceContext:
+ anchor: projects/testvfw/composite-apps/compositevfw/v1/deployment-intent-groups/vfw_deployment_intent_group/approve