aboutsummaryrefslogtreecommitdiffstats
path: root/src/ncm
diff options
context:
space:
mode:
authorEric Multanen <eric.w.multanen@intel.com>2020-05-28 17:25:30 -0700
committerEric Multanen <eric.w.multanen@intel.com>2020-06-02 14:00:07 -0700
commita6e2a3a65a6e4f3f1d964578ae4e60c6f9dd1184 (patch)
treeab7fa1157f9b98840b0482592c886da61e62cc6d /src/ncm
parentad7782cbf83c11f152a6457f3808a4da99a1ae56 (diff)
Reorganize ncm packages to align with architecture
Reorginize the ncm packges to delineate - cluster provider and cluster management - virtual and provider netowrk intent management - intent scheduler - internal ovn4k8s network controller Overall, no code changes, just moving things around. Issue-ID: MULTICLOUD-1029 Signed-off-by: Eric Multanen <eric.w.multanen@intel.com> Change-Id: I3d43c7e4eb6f285b51c0385ba18626d3511a14f5
Diffstat (limited to 'src/ncm')
-rw-r--r--src/ncm/api/api.go48
-rw-r--r--src/ncm/api/clusterhandler.go48
-rw-r--r--src/ncm/api/clusterhandler_test.go263
-rw-r--r--src/ncm/api/networkhandler.go15
-rw-r--r--src/ncm/api/providernethandler.go23
-rw-r--r--src/ncm/api/schedulerhandler.go63
-rw-r--r--src/ncm/internal/grpc/rsyncclient.go (renamed from src/ncm/pkg/grpc/rsyncclient.go)0
-rw-r--r--src/ncm/internal/ovncontroller/ovncontroller.go135
-rw-r--r--src/ncm/pkg/cluster/cluster.go (renamed from src/ncm/pkg/module/cluster.go)312
-rw-r--r--src/ncm/pkg/module/module.go20
-rw-r--r--src/ncm/pkg/module/types/module_definitions.go24
-rw-r--r--src/ncm/pkg/networkintents/network.go (renamed from src/ncm/pkg/module/network.go)32
-rw-r--r--src/ncm/pkg/networkintents/providernet.go (renamed from src/ncm/pkg/module/providernet.go)34
-rw-r--r--src/ncm/pkg/networkintents/types/types.go90
-rw-r--r--src/ncm/pkg/scheduler/scheduler.go220
15 files changed, 804 insertions, 523 deletions
diff --git a/src/ncm/api/api.go b/src/ncm/api/api.go
index 2b9c1b17..a304516c 100644
--- a/src/ncm/api/api.go
+++ b/src/ncm/api/api.go
@@ -20,33 +20,43 @@ import (
"reflect"
"github.com/gorilla/mux"
- moduleLib "github.com/onap/multicloud-k8s/src/ncm/pkg/module"
+ "github.com/onap/multicloud-k8s/src/ncm/pkg/cluster"
+ "github.com/onap/multicloud-k8s/src/ncm/pkg/module"
+ "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents"
+ "github.com/onap/multicloud-k8s/src/ncm/pkg/scheduler"
)
-var moduleClient *moduleLib.Client
+var moduleClient *module.Client
// For the given client and testClient, if the testClient is not null and
// implements the client manager interface corresponding to client, then
// return the testClient, otherwise return the client.
func setClient(client, testClient interface{}) interface{} {
switch cl := client.(type) {
- case *moduleLib.ClusterClient:
- if testClient != nil && reflect.TypeOf(testClient).Implements(reflect.TypeOf((*moduleLib.ClusterManager)(nil)).Elem()) {
- c, ok := testClient.(moduleLib.ClusterManager)
+ case *cluster.ClusterClient:
+ if testClient != nil && reflect.TypeOf(testClient).Implements(reflect.TypeOf((*cluster.ClusterManager)(nil)).Elem()) {
+ c, ok := testClient.(cluster.ClusterManager)
if ok {
return c
}
}
- case *moduleLib.NetworkClient:
- if testClient != nil && reflect.TypeOf(testClient).Implements(reflect.TypeOf((*moduleLib.NetworkManager)(nil)).Elem()) {
- c, ok := testClient.(moduleLib.NetworkManager)
+ case *networkintents.NetworkClient:
+ if testClient != nil && reflect.TypeOf(testClient).Implements(reflect.TypeOf((*networkintents.NetworkManager)(nil)).Elem()) {
+ c, ok := testClient.(networkintents.NetworkManager)
if ok {
return c
}
}
- case *moduleLib.ProviderNetClient:
- if testClient != nil && reflect.TypeOf(testClient).Implements(reflect.TypeOf((*moduleLib.ProviderNetManager)(nil)).Elem()) {
- c, ok := testClient.(moduleLib.ProviderNetManager)
+ case *networkintents.ProviderNetClient:
+ if testClient != nil && reflect.TypeOf(testClient).Implements(reflect.TypeOf((*networkintents.ProviderNetManager)(nil)).Elem()) {
+ c, ok := testClient.(networkintents.ProviderNetManager)
+ if ok {
+ return c
+ }
+ }
+ case *scheduler.SchedulerClient:
+ if testClient != nil && reflect.TypeOf(testClient).Implements(reflect.TypeOf((*scheduler.SchedulerManager)(nil)).Elem()) {
+ c, ok := testClient.(scheduler.SchedulerManager)
if ok {
return c
}
@@ -61,12 +71,12 @@ func setClient(client, testClient interface{}) interface{} {
// testClient parameter allows unit testing for a given client
func NewRouter(testClient interface{}) *mux.Router {
- moduleClient = moduleLib.NewClient()
+ moduleClient = module.NewClient()
router := mux.NewRouter().PathPrefix("/v2").Subrouter()
clusterHandler := clusterHandler{
- client: setClient(moduleClient.Cluster, testClient).(moduleLib.ClusterManager),
+ client: setClient(moduleClient.Cluster, testClient).(cluster.ClusterManager),
}
router.HandleFunc("/cluster-providers", clusterHandler.createClusterProviderHandler).Methods("POST")
router.HandleFunc("/cluster-providers", clusterHandler.getClusterProviderHandler).Methods("GET")
@@ -77,8 +87,6 @@ func NewRouter(testClient interface{}) *mux.Router {
router.HandleFunc("/cluster-providers/{provider-name}/clusters", clusterHandler.getClusterHandler).Queries("label", "{label}")
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{name}", clusterHandler.getClusterHandler).Methods("GET")
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{name}", clusterHandler.deleteClusterHandler).Methods("DELETE")
- router.HandleFunc("/cluster-providers/{provider-name}/clusters/{name}/apply", clusterHandler.applyClusterHandler).Methods("POST")
- router.HandleFunc("/cluster-providers/{provider-name}/clusters/{name}/terminate", clusterHandler.terminateClusterHandler).Methods("POST")
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/labels", clusterHandler.createClusterLabelHandler).Methods("POST")
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/labels", clusterHandler.getClusterLabelHandler).Methods("GET")
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/labels/{label}", clusterHandler.getClusterLabelHandler).Methods("GET")
@@ -89,7 +97,7 @@ func NewRouter(testClient interface{}) *mux.Router {
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/kv-pairs/{kvpair}", clusterHandler.deleteClusterKvPairsHandler).Methods("DELETE")
networkHandler := networkHandler{
- client: setClient(moduleClient.Network, testClient).(moduleLib.NetworkManager),
+ client: setClient(moduleClient.Network, testClient).(networkintents.NetworkManager),
}
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/networks", networkHandler.createNetworkHandler).Methods("POST")
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/networks", networkHandler.getNetworkHandler).Methods("GET")
@@ -98,7 +106,7 @@ func NewRouter(testClient interface{}) *mux.Router {
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/networks/{name}", networkHandler.deleteNetworkHandler).Methods("DELETE")
providernetHandler := providernetHandler{
- client: setClient(moduleClient.ProviderNet, testClient).(moduleLib.ProviderNetManager),
+ client: setClient(moduleClient.ProviderNet, testClient).(networkintents.ProviderNetManager),
}
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/provider-networks", providernetHandler.createProviderNetHandler).Methods("POST")
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/provider-networks", providernetHandler.getProviderNetHandler).Methods("GET")
@@ -106,5 +114,11 @@ func NewRouter(testClient interface{}) *mux.Router {
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/provider-networks/{name}", providernetHandler.getProviderNetHandler).Methods("GET")
router.HandleFunc("/cluster-providers/{provider-name}/clusters/{cluster-name}/provider-networks/{name}", providernetHandler.deleteProviderNetHandler).Methods("DELETE")
+ schedulerHandler := schedulerHandler{
+ client: setClient(moduleClient.Scheduler, testClient).(scheduler.SchedulerManager),
+ }
+ router.HandleFunc("/cluster-providers/{cluster-provider}/clusters/{cluster}/apply", schedulerHandler.applySchedulerHandler).Methods("POST")
+ router.HandleFunc("/cluster-providers/{cluster-provider}/clusters/{cluster}/terminate", schedulerHandler.terminateSchedulerHandler).Methods("POST")
+
return router
}
diff --git a/src/ncm/api/clusterhandler.go b/src/ncm/api/clusterhandler.go
index 78453aa8..08040408 100644
--- a/src/ncm/api/clusterhandler.go
+++ b/src/ncm/api/clusterhandler.go
@@ -27,7 +27,7 @@ import (
"net/http"
"net/textproto"
- moduleLib "github.com/onap/multicloud-k8s/src/ncm/pkg/module"
+ clusterPkg "github.com/onap/multicloud-k8s/src/ncm/pkg/cluster"
"github.com/gorilla/mux"
)
@@ -37,12 +37,12 @@ import (
type clusterHandler struct {
// Interface that implements Cluster operations
// We will set this variable with a mock interface for testing
- client moduleLib.ClusterManager
+ client clusterPkg.ClusterManager
}
// Create handles creation of the ClusterProvider entry in the database
func (h clusterHandler) createClusterProviderHandler(w http.ResponseWriter, r *http.Request) {
- var p moduleLib.ClusterProvider
+ var p clusterPkg.ClusterProvider
err := json.NewDecoder(r.Body).Decode(&p)
@@ -125,8 +125,8 @@ func (h clusterHandler) deleteClusterProviderHandler(w http.ResponseWriter, r *h
func (h clusterHandler) createClusterHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
provider := vars["provider-name"]
- var p moduleLib.Cluster
- var q moduleLib.ClusterContent
+ var p clusterPkg.Cluster
+ var q clusterPkg.ClusterContent
// Implemenation using multipart form
// Review and enable/remove at a later date
@@ -213,7 +213,7 @@ func (h clusterHandler) getClusterHandler(w http.ResponseWriter, r *http.Request
// handle the get all clusters case - return a list of only the json parts
if len(name) == 0 {
- var retList []moduleLib.Cluster
+ var retList []clusterPkg.Cluster
ret, err := h.client.GetClusters(provider)
if err != nil {
@@ -222,7 +222,7 @@ func (h clusterHandler) getClusterHandler(w http.ResponseWriter, r *http.Request
}
for _, cl := range ret {
- retList = append(retList, moduleLib.Cluster{Metadata: cl.Metadata})
+ retList = append(retList, clusterPkg.Cluster{Metadata: cl.Metadata})
}
w.Header().Set("Content-Type", "application/json")
@@ -324,42 +324,12 @@ func (h clusterHandler) deleteClusterHandler(w http.ResponseWriter, r *http.Requ
w.WriteHeader(http.StatusNoContent)
}
-// apply network intents associated with the cluster
-func (h clusterHandler) applyClusterHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- provider := vars["provider-name"]
- name := vars["name"]
-
- err := h.client.ApplyNetworkIntents(provider, name)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- w.WriteHeader(http.StatusNoContent)
-}
-
-// terminate network intents associated with the cluster
-func (h clusterHandler) terminateClusterHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- provider := vars["provider-name"]
- name := vars["name"]
-
- err := h.client.TerminateNetworkIntents(provider, name)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- w.WriteHeader(http.StatusNoContent)
-}
-
// Create handles creation of the ClusterLabel entry in the database
func (h clusterHandler) createClusterLabelHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
provider := vars["provider-name"]
cluster := vars["cluster-name"]
- var p moduleLib.ClusterLabel
+ var p clusterPkg.ClusterLabel
err := json.NewDecoder(r.Body).Decode(&p)
@@ -439,7 +409,7 @@ func (h clusterHandler) createClusterKvPairsHandler(w http.ResponseWriter, r *ht
vars := mux.Vars(r)
provider := vars["provider-name"]
cluster := vars["cluster-name"]
- var p moduleLib.ClusterKvPairs
+ var p clusterPkg.ClusterKvPairs
err := json.NewDecoder(r.Body).Decode(&p)
diff --git a/src/ncm/api/clusterhandler_test.go b/src/ncm/api/clusterhandler_test.go
index b32df527..a9290ad9 100644
--- a/src/ncm/api/clusterhandler_test.go
+++ b/src/ncm/api/clusterhandler_test.go
@@ -27,8 +27,9 @@ import (
"reflect"
"testing"
- moduleLib "github.com/onap/multicloud-k8s/src/ncm/pkg/module"
+ "github.com/onap/multicloud-k8s/src/ncm/pkg/cluster"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
+ types "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module/types"
pkgerrors "github.com/pkg/errors"
)
@@ -39,35 +40,35 @@ import (
type mockClusterManager struct {
// Items and err will be used to customize each test
// via a localized instantiation of mockClusterManager
- ClusterProviderItems []moduleLib.ClusterProvider
- ClusterItems []moduleLib.Cluster
- ClusterContentItems []moduleLib.ClusterContent
+ ClusterProviderItems []cluster.ClusterProvider
+ ClusterItems []cluster.Cluster
+ ClusterContentItems []cluster.ClusterContent
ClusterContextItems []appcontext.AppContext
- ClusterLabelItems []moduleLib.ClusterLabel
- ClusterKvPairsItems []moduleLib.ClusterKvPairs
+ ClusterLabelItems []cluster.ClusterLabel
+ ClusterKvPairsItems []cluster.ClusterKvPairs
ClusterList []string
Err error
}
-func (m *mockClusterManager) CreateClusterProvider(inp moduleLib.ClusterProvider) (moduleLib.ClusterProvider, error) {
+func (m *mockClusterManager) CreateClusterProvider(inp cluster.ClusterProvider) (cluster.ClusterProvider, error) {
if m.Err != nil {
- return moduleLib.ClusterProvider{}, m.Err
+ return cluster.ClusterProvider{}, m.Err
}
return m.ClusterProviderItems[0], nil
}
-func (m *mockClusterManager) GetClusterProvider(name string) (moduleLib.ClusterProvider, error) {
+func (m *mockClusterManager) GetClusterProvider(name string) (cluster.ClusterProvider, error) {
if m.Err != nil {
- return moduleLib.ClusterProvider{}, m.Err
+ return cluster.ClusterProvider{}, m.Err
}
return m.ClusterProviderItems[0], nil
}
-func (m *mockClusterManager) GetClusterProviders() ([]moduleLib.ClusterProvider, error) {
+func (m *mockClusterManager) GetClusterProviders() ([]cluster.ClusterProvider, error) {
if m.Err != nil {
- return []moduleLib.ClusterProvider{}, m.Err
+ return []cluster.ClusterProvider{}, m.Err
}
return m.ClusterProviderItems, nil
@@ -77,25 +78,25 @@ func (m *mockClusterManager) DeleteClusterProvider(name string) error {
return m.Err
}
-func (m *mockClusterManager) CreateCluster(provider string, inp moduleLib.Cluster, inq moduleLib.ClusterContent) (moduleLib.Cluster, error) {
+func (m *mockClusterManager) CreateCluster(provider string, inp cluster.Cluster, inq cluster.ClusterContent) (cluster.Cluster, error) {
if m.Err != nil {
- return moduleLib.Cluster{}, m.Err
+ return cluster.Cluster{}, m.Err
}
return m.ClusterItems[0], nil
}
-func (m *mockClusterManager) GetCluster(provider, name string) (moduleLib.Cluster, error) {
+func (m *mockClusterManager) GetCluster(provider, name string) (cluster.Cluster, error) {
if m.Err != nil {
- return moduleLib.Cluster{}, m.Err
+ return cluster.Cluster{}, m.Err
}
return m.ClusterItems[0], nil
}
-func (m *mockClusterManager) GetClusterContent(provider, name string) (moduleLib.ClusterContent, error) {
+func (m *mockClusterManager) GetClusterContent(provider, name string) (cluster.ClusterContent, error) {
if m.Err != nil {
- return moduleLib.ClusterContent{}, m.Err
+ return cluster.ClusterContent{}, m.Err
}
return m.ClusterContentItems[0], nil
@@ -109,9 +110,9 @@ func (m *mockClusterManager) GetClusterContext(provider, name string) (appcontex
return m.ClusterContextItems[0], nil
}
-func (m *mockClusterManager) GetClusters(provider string) ([]moduleLib.Cluster, error) {
+func (m *mockClusterManager) GetClusters(provider string) ([]cluster.Cluster, error) {
if m.Err != nil {
- return []moduleLib.Cluster{}, m.Err
+ return []cluster.Cluster{}, m.Err
}
return m.ClusterItems, nil
@@ -129,67 +130,59 @@ func (m *mockClusterManager) DeleteCluster(provider, name string) error {
return m.Err
}
-func (m *mockClusterManager) ApplyNetworkIntents(provider, name string) error {
- return m.Err
-}
-
-func (m *mockClusterManager) TerminateNetworkIntents(provider, name string) error {
- return m.Err
-}
-
-func (m *mockClusterManager) CreateClusterLabel(provider, cluster string, inp moduleLib.ClusterLabel) (moduleLib.ClusterLabel, error) {
+func (m *mockClusterManager) CreateClusterLabel(provider, clusterName string, inp cluster.ClusterLabel) (cluster.ClusterLabel, error) {
if m.Err != nil {
- return moduleLib.ClusterLabel{}, m.Err
+ return cluster.ClusterLabel{}, m.Err
}
return m.ClusterLabelItems[0], nil
}
-func (m *mockClusterManager) GetClusterLabel(provider, cluster, label string) (moduleLib.ClusterLabel, error) {
+func (m *mockClusterManager) GetClusterLabel(provider, clusterName, label string) (cluster.ClusterLabel, error) {
if m.Err != nil {
- return moduleLib.ClusterLabel{}, m.Err
+ return cluster.ClusterLabel{}, m.Err
}
return m.ClusterLabelItems[0], nil
}
-func (m *mockClusterManager) GetClusterLabels(provider, cluster string) ([]moduleLib.ClusterLabel, error) {
+func (m *mockClusterManager) GetClusterLabels(provider, clusterName string) ([]cluster.ClusterLabel, error) {
if m.Err != nil {
- return []moduleLib.ClusterLabel{}, m.Err
+ return []cluster.ClusterLabel{}, m.Err
}
return m.ClusterLabelItems, nil
}
-func (m *mockClusterManager) DeleteClusterLabel(provider, cluster, label string) error {
+func (m *mockClusterManager) DeleteClusterLabel(provider, clusterName, label string) error {
return m.Err
}
-func (m *mockClusterManager) CreateClusterKvPairs(provider, cluster string, inp moduleLib.ClusterKvPairs) (moduleLib.ClusterKvPairs, error) {
+func (m *mockClusterManager) CreateClusterKvPairs(provider, clusterName string, inp cluster.ClusterKvPairs) (cluster.ClusterKvPairs, error) {
if m.Err != nil {
- return moduleLib.ClusterKvPairs{}, m.Err
+ return cluster.ClusterKvPairs{}, m.Err
}
return m.ClusterKvPairsItems[0], nil
}
-func (m *mockClusterManager) GetClusterKvPairs(provider, cluster, kvpair string) (moduleLib.ClusterKvPairs, error) {
+func (m *mockClusterManager) GetClusterKvPairs(provider, clusterName, kvpair string) (cluster.ClusterKvPairs, error) {
if m.Err != nil {
- return moduleLib.ClusterKvPairs{}, m.Err
+ return cluster.ClusterKvPairs{}, m.Err
}
return m.ClusterKvPairsItems[0], nil
}
-func (m *mockClusterManager) GetAllClusterKvPairs(provider, cluster string) ([]moduleLib.ClusterKvPairs, error) {
+func (m *mockClusterManager) GetAllClusterKvPairs(provider, clusterName string) ([]cluster.ClusterKvPairs, error) {
if m.Err != nil {
- return []moduleLib.ClusterKvPairs{}, m.Err
+ return []cluster.ClusterKvPairs{}, m.Err
}
return m.ClusterKvPairsItems, nil
}
-func (m *mockClusterManager) DeleteClusterKvPairs(provider, cluster, kvpair string) error {
+func (m *mockClusterManager) DeleteClusterKvPairs(provider, clusterName, kvpair string) error {
return m.Err
}
@@ -197,7 +190,7 @@ func TestClusterProviderCreateHandler(t *testing.T) {
testCases := []struct {
label string
reader io.Reader
- expected moduleLib.ClusterProvider
+ expected cluster.ClusterProvider
expectedCode int
clusterClient *mockClusterManager
}{
@@ -217,8 +210,8 @@ func TestClusterProviderCreateHandler(t *testing.T) {
"userData2": "some user data 2"
}
}`)),
- expected: moduleLib.ClusterProvider{
- Metadata: moduleLib.Metadata{
+ expected: cluster.ClusterProvider{
+ Metadata: types.Metadata{
Name: "clusterProviderTest",
Description: "testClusterProvider",
UserData1: "some user data 1",
@@ -227,9 +220,9 @@ func TestClusterProviderCreateHandler(t *testing.T) {
},
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterProviderItems: []moduleLib.ClusterProvider{
+ ClusterProviderItems: []cluster.ClusterProvider{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "clusterProviderTest",
Description: "testClusterProvider",
UserData1: "some user data 1",
@@ -265,7 +258,7 @@ func TestClusterProviderCreateHandler(t *testing.T) {
//Check returned body only if statusCreated
if resp.StatusCode == http.StatusCreated {
- got := moduleLib.ClusterProvider{}
+ got := cluster.ClusterProvider{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -281,7 +274,7 @@ func TestClusterProviderGetAllHandler(t *testing.T) {
testCases := []struct {
label string
- expected []moduleLib.ClusterProvider
+ expected []cluster.ClusterProvider
name, version string
expectedCode int
clusterClient *mockClusterManager
@@ -289,9 +282,9 @@ func TestClusterProviderGetAllHandler(t *testing.T) {
{
label: "Get Cluster Provider",
expectedCode: http.StatusOK,
- expected: []moduleLib.ClusterProvider{
+ expected: []cluster.ClusterProvider{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testClusterProvider1",
Description: "testClusterProvider 1 description",
UserData1: "some user data 1",
@@ -299,7 +292,7 @@ func TestClusterProviderGetAllHandler(t *testing.T) {
},
},
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testClusterProvider2",
Description: "testClusterProvider 2 description",
UserData1: "some user data A",
@@ -309,9 +302,9 @@ func TestClusterProviderGetAllHandler(t *testing.T) {
},
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterProviderItems: []moduleLib.ClusterProvider{
+ ClusterProviderItems: []cluster.ClusterProvider{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testClusterProvider1",
Description: "testClusterProvider 1 description",
UserData1: "some user data 1",
@@ -319,7 +312,7 @@ func TestClusterProviderGetAllHandler(t *testing.T) {
},
},
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testClusterProvider2",
Description: "testClusterProvider 2 description",
UserData1: "some user data A",
@@ -343,7 +336,7 @@ func TestClusterProviderGetAllHandler(t *testing.T) {
//Check returned body only if statusOK
if resp.StatusCode == http.StatusOK {
- got := []moduleLib.ClusterProvider{}
+ got := []cluster.ClusterProvider{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -359,7 +352,7 @@ func TestClusterProviderGetHandler(t *testing.T) {
testCases := []struct {
label string
- expected moduleLib.ClusterProvider
+ expected cluster.ClusterProvider
name, version string
expectedCode int
clusterClient *mockClusterManager
@@ -367,8 +360,8 @@ func TestClusterProviderGetHandler(t *testing.T) {
{
label: "Get Cluster Provider",
expectedCode: http.StatusOK,
- expected: moduleLib.ClusterProvider{
- Metadata: moduleLib.Metadata{
+ expected: cluster.ClusterProvider{
+ Metadata: types.Metadata{
Name: "testClusterProvider",
Description: "testClusterProvider description",
UserData1: "some user data 1",
@@ -378,9 +371,9 @@ func TestClusterProviderGetHandler(t *testing.T) {
name: "testClusterProvider",
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterProviderItems: []moduleLib.ClusterProvider{
+ ClusterProviderItems: []cluster.ClusterProvider{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testClusterProvider",
Description: "testClusterProvider description",
UserData1: "some user data 1",
@@ -395,7 +388,7 @@ func TestClusterProviderGetHandler(t *testing.T) {
expectedCode: http.StatusInternalServerError,
name: "nonexistingclusterprovider",
clusterClient: &mockClusterManager{
- ClusterProviderItems: []moduleLib.ClusterProvider{},
+ ClusterProviderItems: []cluster.ClusterProvider{},
Err: pkgerrors.New("Internal Error"),
},
},
@@ -413,7 +406,7 @@ func TestClusterProviderGetHandler(t *testing.T) {
//Check returned body only if statusOK
if resp.StatusCode == http.StatusOK {
- got := moduleLib.ClusterProvider{}
+ got := cluster.ClusterProvider{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -468,7 +461,7 @@ func TestClusterCreateHandler(t *testing.T) {
label string
metadata string
kubeconfig string
- expected moduleLib.Cluster
+ expected cluster.Cluster
expectedCode int
clusterClient *mockClusterManager
}{
@@ -494,8 +487,8 @@ of a file attached
to the creation
of clusterTest
`,
- expected: moduleLib.Cluster{
- Metadata: moduleLib.Metadata{
+ expected: cluster.Cluster{
+ Metadata: types.Metadata{
Name: "clusterTest",
Description: "testCluster",
UserData1: "some user data 1",
@@ -504,9 +497,9 @@ of clusterTest
},
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterProviderItems: []moduleLib.ClusterProvider{
+ ClusterProviderItems: []cluster.ClusterProvider{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "clusterProvider1",
Description: "ClusterProvider 1 description",
UserData1: "some user data 1",
@@ -514,9 +507,9 @@ of clusterTest
},
},
},
- ClusterItems: []moduleLib.Cluster{
+ ClusterItems: []cluster.Cluster{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "clusterTest",
Description: "testCluster",
UserData1: "some user data 1",
@@ -524,7 +517,7 @@ of clusterTest
},
},
},
- ClusterContentItems: []moduleLib.ClusterContent{
+ ClusterContentItems: []cluster.ClusterContent{
{
Kubeconfig: "dGVzdCBjb250ZW50cwpvZiBhIGZpbGUgYXR0YWNoZWQKdG8gdGhlIGNyZWF0aW9uCm9mIGNsdXN0ZXJUZXN0Cg==",
},
@@ -574,7 +567,7 @@ of clusterTest
//Check returned body only if statusCreated
if resp.StatusCode == http.StatusCreated {
- got := moduleLib.Cluster{}
+ got := cluster.Cluster{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -590,7 +583,7 @@ func TestClusterGetAllHandler(t *testing.T) {
testCases := []struct {
label string
- expected []moduleLib.Cluster
+ expected []cluster.Cluster
name, version string
expectedCode int
clusterClient *mockClusterManager
@@ -598,9 +591,9 @@ func TestClusterGetAllHandler(t *testing.T) {
{
label: "Get Clusters",
expectedCode: http.StatusOK,
- expected: []moduleLib.Cluster{
+ expected: []cluster.Cluster{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testCluster1",
Description: "testCluster 1 description",
UserData1: "some user data 1",
@@ -608,7 +601,7 @@ func TestClusterGetAllHandler(t *testing.T) {
},
},
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testCluster2",
Description: "testCluster 2 description",
UserData1: "some user data A",
@@ -618,9 +611,9 @@ func TestClusterGetAllHandler(t *testing.T) {
},
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterItems: []moduleLib.Cluster{
+ ClusterItems: []cluster.Cluster{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testCluster1",
Description: "testCluster 1 description",
UserData1: "some user data 1",
@@ -628,7 +621,7 @@ func TestClusterGetAllHandler(t *testing.T) {
},
},
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testCluster2",
Description: "testCluster 2 description",
UserData1: "some user data A",
@@ -636,7 +629,7 @@ func TestClusterGetAllHandler(t *testing.T) {
},
},
},
- ClusterContentItems: []moduleLib.ClusterContent{
+ ClusterContentItems: []cluster.ClusterContent{
// content here doesn't matter - just needs to be present
{
Kubeconfig: "dGVzdCBjb250ZW50cwpvZiBhIGZpbGUgYXR0YWNoZWQKdG8gdGhlIGNyZWF0aW9uCm9mIGNsdXN0ZXJUZXN0Cg==",
@@ -661,7 +654,7 @@ func TestClusterGetAllHandler(t *testing.T) {
//Check returned body only if statusOK
if resp.StatusCode == http.StatusOK {
- got := []moduleLib.Cluster{}
+ got := []cluster.Cluster{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -677,7 +670,7 @@ func TestClusterGetHandler(t *testing.T) {
testCases := []struct {
label string
- expected moduleLib.Cluster
+ expected cluster.Cluster
name, version string
accept string
expectedCode int
@@ -687,8 +680,8 @@ func TestClusterGetHandler(t *testing.T) {
label: "Get Cluster with Accept: application/json",
accept: "application/json",
expectedCode: http.StatusOK,
- expected: moduleLib.Cluster{
- Metadata: moduleLib.Metadata{
+ expected: cluster.Cluster{
+ Metadata: types.Metadata{
Name: "testCluster",
Description: "testCluster description",
UserData1: "some user data 1",
@@ -698,9 +691,9 @@ func TestClusterGetHandler(t *testing.T) {
name: "testCluster",
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterItems: []moduleLib.Cluster{
+ ClusterItems: []cluster.Cluster{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testCluster",
Description: "testCluster description",
UserData1: "some user data 1",
@@ -708,7 +701,7 @@ func TestClusterGetHandler(t *testing.T) {
},
},
},
- ClusterContentItems: []moduleLib.ClusterContent{
+ ClusterContentItems: []cluster.ClusterContent{
{
Kubeconfig: "dGVzdCBjb250ZW50cwpvZiBhIGZpbGUgYXR0YWNoZWQKdG8gdGhlIGNyZWF0aW9uCm9mIGNsdXN0ZXJUZXN0Cg==",
},
@@ -721,7 +714,7 @@ func TestClusterGetHandler(t *testing.T) {
expectedCode: http.StatusInternalServerError,
name: "nonexistingcluster",
clusterClient: &mockClusterManager{
- ClusterItems: []moduleLib.Cluster{},
+ ClusterItems: []cluster.Cluster{},
Err: pkgerrors.New("Internal Error"),
},
},
@@ -742,7 +735,7 @@ func TestClusterGetHandler(t *testing.T) {
//Check returned body only if statusOK
if resp.StatusCode == http.StatusOK {
- got := moduleLib.Cluster{}
+ got := cluster.Cluster{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -776,9 +769,9 @@ of clusterTest
name: "testCluster",
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterItems: []moduleLib.Cluster{
+ ClusterItems: []cluster.Cluster{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "testCluster",
Description: "testCluster description",
UserData1: "some user data 1",
@@ -786,7 +779,7 @@ of clusterTest
},
},
},
- ClusterContentItems: []moduleLib.ClusterContent{
+ ClusterContentItems: []cluster.ClusterContent{
{
Kubeconfig: "dGVzdCBjb250ZW50cwpvZiBhIGZpbGUgYXR0YWNoZWQKdG8gdGhlIGNyZWF0aW9uCm9mIGNsdXN0ZXJUZXN0Cg==",
},
@@ -799,7 +792,7 @@ of clusterTest
expectedCode: http.StatusInternalServerError,
name: "nonexistingcluster",
clusterClient: &mockClusterManager{
- ClusterItems: []moduleLib.Cluster{},
+ ClusterItems: []cluster.Cluster{},
Err: pkgerrors.New("Internal Error"),
},
},
@@ -875,7 +868,7 @@ func TestClusterLabelCreateHandler(t *testing.T) {
testCases := []struct {
label string
reader io.Reader
- expected moduleLib.ClusterLabel
+ expected cluster.ClusterLabel
expectedCode int
clusterClient *mockClusterManager
}{
@@ -890,12 +883,12 @@ func TestClusterLabelCreateHandler(t *testing.T) {
reader: bytes.NewBuffer([]byte(`{
"label-name": "test-label"
}`)),
- expected: moduleLib.ClusterLabel{
+ expected: cluster.ClusterLabel{
LabelName: "test-label",
},
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterLabelItems: []moduleLib.ClusterLabel{
+ ClusterLabelItems: []cluster.ClusterLabel{
{
LabelName: "test-label",
},
@@ -916,7 +909,7 @@ func TestClusterLabelCreateHandler(t *testing.T) {
//Check returned body only if statusCreated
if resp.StatusCode == http.StatusCreated {
- got := moduleLib.ClusterLabel{}
+ got := cluster.ClusterLabel{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -932,7 +925,7 @@ func TestClusterLabelsGetHandler(t *testing.T) {
testCases := []struct {
label string
- expected []moduleLib.ClusterLabel
+ expected []cluster.ClusterLabel
name, version string
expectedCode int
clusterClient *mockClusterManager
@@ -940,7 +933,7 @@ func TestClusterLabelsGetHandler(t *testing.T) {
{
label: "Get Cluster Labels",
expectedCode: http.StatusOK,
- expected: []moduleLib.ClusterLabel{
+ expected: []cluster.ClusterLabel{
{
LabelName: "test-label1",
},
@@ -953,7 +946,7 @@ func TestClusterLabelsGetHandler(t *testing.T) {
},
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterLabelItems: []moduleLib.ClusterLabel{
+ ClusterLabelItems: []cluster.ClusterLabel{
{
LabelName: "test-label1",
},
@@ -980,7 +973,7 @@ func TestClusterLabelsGetHandler(t *testing.T) {
//Check returned body only if statusOK
if resp.StatusCode == http.StatusOK {
- got := []moduleLib.ClusterLabel{}
+ got := []cluster.ClusterLabel{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -996,7 +989,7 @@ func TestClusterLabelGetHandler(t *testing.T) {
testCases := []struct {
label string
- expected moduleLib.ClusterLabel
+ expected cluster.ClusterLabel
name, version string
expectedCode int
clusterClient *mockClusterManager
@@ -1004,13 +997,13 @@ func TestClusterLabelGetHandler(t *testing.T) {
{
label: "Get Cluster Label",
expectedCode: http.StatusOK,
- expected: moduleLib.ClusterLabel{
+ expected: cluster.ClusterLabel{
LabelName: "testlabel",
},
name: "testlabel",
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterLabelItems: []moduleLib.ClusterLabel{
+ ClusterLabelItems: []cluster.ClusterLabel{
{
LabelName: "testlabel",
},
@@ -1022,7 +1015,7 @@ func TestClusterLabelGetHandler(t *testing.T) {
expectedCode: http.StatusInternalServerError,
name: "nonexistingclusterlabel",
clusterClient: &mockClusterManager{
- ClusterLabelItems: []moduleLib.ClusterLabel{},
+ ClusterLabelItems: []cluster.ClusterLabel{},
Err: pkgerrors.New("Internal Error"),
},
},
@@ -1040,7 +1033,7 @@ func TestClusterLabelGetHandler(t *testing.T) {
//Check returned body only if statusOK
if resp.StatusCode == http.StatusOK {
- got := moduleLib.ClusterLabel{}
+ got := cluster.ClusterLabel{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -1094,7 +1087,7 @@ func TestClusterKvPairsCreateHandler(t *testing.T) {
testCases := []struct {
label string
reader io.Reader
- expected moduleLib.ClusterKvPairs
+ expected cluster.ClusterKvPairs
expectedCode int
clusterClient *mockClusterManager
}{
@@ -1124,14 +1117,14 @@ func TestClusterKvPairsCreateHandler(t *testing.T) {
]
}
}`)),
- expected: moduleLib.ClusterKvPairs{
- Metadata: moduleLib.Metadata{
+ expected: cluster.ClusterKvPairs{
+ Metadata: types.Metadata{
Name: "ClusterKvPair1",
Description: "test cluster kv pairs",
UserData1: "some user data 1",
UserData2: "some user data 2",
},
- Spec: moduleLib.ClusterKvSpec{
+ Spec: cluster.ClusterKvSpec{
Kv: []map[string]interface{}{
{
"key1": "value1",
@@ -1144,15 +1137,15 @@ func TestClusterKvPairsCreateHandler(t *testing.T) {
},
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterKvPairsItems: []moduleLib.ClusterKvPairs{
+ ClusterKvPairsItems: []cluster.ClusterKvPairs{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "ClusterKvPair1",
Description: "test cluster kv pairs",
UserData1: "some user data 1",
UserData2: "some user data 2",
},
- Spec: moduleLib.ClusterKvSpec{
+ Spec: cluster.ClusterKvSpec{
Kv: []map[string]interface{}{
{
"key1": "value1",
@@ -1180,7 +1173,7 @@ func TestClusterKvPairsCreateHandler(t *testing.T) {
//Check returned body only if statusCreated
if resp.StatusCode == http.StatusCreated {
- got := moduleLib.ClusterKvPairs{}
+ got := cluster.ClusterKvPairs{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -1196,7 +1189,7 @@ func TestClusterKvPairsGetAllHandler(t *testing.T) {
testCases := []struct {
label string
- expected []moduleLib.ClusterKvPairs
+ expected []cluster.ClusterKvPairs
name, version string
expectedCode int
clusterClient *mockClusterManager
@@ -1204,15 +1197,15 @@ func TestClusterKvPairsGetAllHandler(t *testing.T) {
{
label: "Get Cluster KvPairs",
expectedCode: http.StatusOK,
- expected: []moduleLib.ClusterKvPairs{
+ expected: []cluster.ClusterKvPairs{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "ClusterKvPair1",
Description: "test cluster kv pairs",
UserData1: "some user data 1",
UserData2: "some user data 2",
},
- Spec: moduleLib.ClusterKvSpec{
+ Spec: cluster.ClusterKvSpec{
Kv: []map[string]interface{}{
{
"key1": "value1",
@@ -1224,13 +1217,13 @@ func TestClusterKvPairsGetAllHandler(t *testing.T) {
},
},
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "ClusterKvPair2",
Description: "test cluster kv pairs",
UserData1: "some user data A",
UserData2: "some user data B",
},
- Spec: moduleLib.ClusterKvSpec{
+ Spec: cluster.ClusterKvSpec{
Kv: []map[string]interface{}{
{
"keyA": "valueA",
@@ -1244,15 +1237,15 @@ func TestClusterKvPairsGetAllHandler(t *testing.T) {
},
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterKvPairsItems: []moduleLib.ClusterKvPairs{
+ ClusterKvPairsItems: []cluster.ClusterKvPairs{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "ClusterKvPair1",
Description: "test cluster kv pairs",
UserData1: "some user data 1",
UserData2: "some user data 2",
},
- Spec: moduleLib.ClusterKvSpec{
+ Spec: cluster.ClusterKvSpec{
Kv: []map[string]interface{}{
{
"key1": "value1",
@@ -1264,13 +1257,13 @@ func TestClusterKvPairsGetAllHandler(t *testing.T) {
},
},
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "ClusterKvPair2",
Description: "test cluster kv pairs",
UserData1: "some user data A",
UserData2: "some user data B",
},
- Spec: moduleLib.ClusterKvSpec{
+ Spec: cluster.ClusterKvSpec{
Kv: []map[string]interface{}{
{
"keyA": "valueA",
@@ -1298,7 +1291,7 @@ func TestClusterKvPairsGetAllHandler(t *testing.T) {
//Check returned body only if statusOK
if resp.StatusCode == http.StatusOK {
- got := []moduleLib.ClusterKvPairs{}
+ got := []cluster.ClusterKvPairs{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
@@ -1314,7 +1307,7 @@ func TestClusterKvPairsGetHandler(t *testing.T) {
testCases := []struct {
label string
- expected moduleLib.ClusterKvPairs
+ expected cluster.ClusterKvPairs
name, version string
expectedCode int
clusterClient *mockClusterManager
@@ -1322,14 +1315,14 @@ func TestClusterKvPairsGetHandler(t *testing.T) {
{
label: "Get Cluster KV Pairs",
expectedCode: http.StatusOK,
- expected: moduleLib.ClusterKvPairs{
- Metadata: moduleLib.Metadata{
+ expected: cluster.ClusterKvPairs{
+ Metadata: types.Metadata{
Name: "ClusterKvPair2",
Description: "test cluster kv pairs",
UserData1: "some user data A",
UserData2: "some user data B",
},
- Spec: moduleLib.ClusterKvSpec{
+ Spec: cluster.ClusterKvSpec{
Kv: []map[string]interface{}{
{
"keyA": "valueA",
@@ -1343,15 +1336,15 @@ func TestClusterKvPairsGetHandler(t *testing.T) {
name: "ClusterKvPair2",
clusterClient: &mockClusterManager{
//Items that will be returned by the mocked Client
- ClusterKvPairsItems: []moduleLib.ClusterKvPairs{
+ ClusterKvPairsItems: []cluster.ClusterKvPairs{
{
- Metadata: moduleLib.Metadata{
+ Metadata: types.Metadata{
Name: "ClusterKvPair2",
Description: "test cluster kv pairs",
UserData1: "some user data A",
UserData2: "some user data B",
},
- Spec: moduleLib.ClusterKvSpec{
+ Spec: cluster.ClusterKvSpec{
Kv: []map[string]interface{}{
{
"keyA": "valueA",
@@ -1370,7 +1363,7 @@ func TestClusterKvPairsGetHandler(t *testing.T) {
expectedCode: http.StatusInternalServerError,
name: "nonexistingclusterkvpairs",
clusterClient: &mockClusterManager{
- ClusterKvPairsItems: []moduleLib.ClusterKvPairs{},
+ ClusterKvPairsItems: []cluster.ClusterKvPairs{},
Err: pkgerrors.New("Internal Error"),
},
},
@@ -1388,7 +1381,7 @@ func TestClusterKvPairsGetHandler(t *testing.T) {
//Check returned body only if statusOK
if resp.StatusCode == http.StatusOK {
- got := moduleLib.ClusterKvPairs{}
+ got := cluster.ClusterKvPairs{}
json.NewDecoder(resp.Body).Decode(&got)
if reflect.DeepEqual(testCase.expected, got) == false {
diff --git a/src/ncm/api/networkhandler.go b/src/ncm/api/networkhandler.go
index 01d077a7..ed266697 100644
--- a/src/ncm/api/networkhandler.go
+++ b/src/ncm/api/networkhandler.go
@@ -22,7 +22,8 @@ import (
"io"
"net/http"
- moduleLib "github.com/onap/multicloud-k8s/src/ncm/pkg/module"
+ netintents "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents"
+ nettypes "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents/types"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/validation"
pkgerrors "github.com/pkg/errors"
@@ -34,11 +35,11 @@ import (
type networkHandler struct {
// Interface that implements Cluster operations
// We will set this variable with a mock interface for testing
- client moduleLib.NetworkManager
+ client netintents.NetworkManager
}
// Check for valid format of input parameters
-func validateNetworkInputs(p moduleLib.Network) error {
+func validateNetworkInputs(p netintents.Network) error {
// validate name
errs := validation.IsValidName(p.Metadata.Name)
if len(errs) > 0 {
@@ -47,7 +48,7 @@ func validateNetworkInputs(p moduleLib.Network) error {
// validate cni type
found := false
- for _, val := range moduleLib.CNI_TYPES {
+ for _, val := range nettypes.CNI_TYPES {
if p.Spec.CniType == val {
found = true
break
@@ -59,7 +60,7 @@ func validateNetworkInputs(p moduleLib.Network) error {
subnets := p.Spec.Ipv4Subnets
for _, subnet := range subnets {
- err := moduleLib.ValidateSubnet(subnet)
+ err := nettypes.ValidateSubnet(subnet)
if err != nil {
return pkgerrors.Wrap(err, "invalid subnet")
}
@@ -69,7 +70,7 @@ func validateNetworkInputs(p moduleLib.Network) error {
// Create handles creation of the Network entry in the database
func (h networkHandler) createNetworkHandler(w http.ResponseWriter, r *http.Request) {
- var p moduleLib.Network
+ var p netintents.Network
vars := mux.Vars(r)
clusterProvider := vars["provider-name"]
cluster := vars["cluster-name"]
@@ -114,7 +115,7 @@ func (h networkHandler) createNetworkHandler(w http.ResponseWriter, r *http.Requ
// Put handles creation/update of the Network entry in the database
func (h networkHandler) putNetworkHandler(w http.ResponseWriter, r *http.Request) {
- var p moduleLib.Network
+ var p netintents.Network
vars := mux.Vars(r)
clusterProvider := vars["provider-name"]
cluster := vars["cluster-name"]
diff --git a/src/ncm/api/providernethandler.go b/src/ncm/api/providernethandler.go
index 23310c31..66a41a4d 100644
--- a/src/ncm/api/providernethandler.go
+++ b/src/ncm/api/providernethandler.go
@@ -23,7 +23,8 @@ import (
"net/http"
"strings"
- moduleLib "github.com/onap/multicloud-k8s/src/ncm/pkg/module"
+ netintents "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents"
+ nettypes "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents/types"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/validation"
pkgerrors "github.com/pkg/errors"
@@ -35,11 +36,11 @@ import (
type providernetHandler struct {
// Interface that implements Cluster operations
// We will set this variable with a mock interface for testing
- client moduleLib.ProviderNetManager
+ client netintents.ProviderNetManager
}
// Check for valid format of input parameters
-func validateProviderNetInputs(p moduleLib.ProviderNet) error {
+func validateProviderNetInputs(p netintents.ProviderNet) error {
// validate name
errs := validation.IsValidName(p.Metadata.Name)
if len(errs) > 0 {
@@ -48,7 +49,7 @@ func validateProviderNetInputs(p moduleLib.ProviderNet) error {
// validate cni type
found := false
- for _, val := range moduleLib.CNI_TYPES {
+ for _, val := range nettypes.CNI_TYPES {
if p.Spec.CniType == val {
found = true
break
@@ -60,7 +61,7 @@ func validateProviderNetInputs(p moduleLib.ProviderNet) error {
// validate the provider network type
found = false
- for _, val := range moduleLib.PROVIDER_NET_TYPES {
+ for _, val := range nettypes.PROVIDER_NET_TYPES {
if strings.ToUpper(p.Spec.ProviderNetType) == val {
found = true
break
@@ -73,7 +74,7 @@ func validateProviderNetInputs(p moduleLib.ProviderNet) error {
// validate the subnets
subnets := p.Spec.Ipv4Subnets
for _, subnet := range subnets {
- err := moduleLib.ValidateSubnet(subnet)
+ err := nettypes.ValidateSubnet(subnet)
if err != nil {
return pkgerrors.Wrap(err, "invalid subnet")
}
@@ -88,10 +89,10 @@ func validateProviderNetInputs(p moduleLib.ProviderNet) error {
// validate the VLAN Node Selector value
expectLabels := false
found = false
- for _, val := range moduleLib.VLAN_NODE_SELECTORS {
+ for _, val := range nettypes.VLAN_NODE_SELECTORS {
if strings.ToLower(p.Spec.Vlan.VlanNodeSelector) == val {
found = true
- if val == moduleLib.VLAN_NODE_SPECIFIC {
+ if val == nettypes.VLAN_NODE_SPECIFIC {
expectLabels = true
}
break
@@ -114,7 +115,7 @@ func validateProviderNetInputs(p moduleLib.ProviderNet) error {
// Need at least one label if node selector value was "specific"
// (if selector is "any" - don't care if labels were supplied or not
if expectLabels && !gotLabels {
- return pkgerrors.Errorf("Node Labels required for VlAN node selector \"%v\"", moduleLib.VLAN_NODE_SPECIFIC)
+ return pkgerrors.Errorf("Node Labels required for VlAN node selector \"%v\"", nettypes.VLAN_NODE_SPECIFIC)
}
return nil
@@ -122,7 +123,7 @@ func validateProviderNetInputs(p moduleLib.ProviderNet) error {
// Create handles creation of the ProviderNet entry in the database
func (h providernetHandler) createProviderNetHandler(w http.ResponseWriter, r *http.Request) {
- var p moduleLib.ProviderNet
+ var p netintents.ProviderNet
vars := mux.Vars(r)
clusterProvider := vars["provider-name"]
cluster := vars["cluster-name"]
@@ -167,7 +168,7 @@ func (h providernetHandler) createProviderNetHandler(w http.ResponseWriter, r *h
// Put handles creation/update of the ProviderNet entry in the database
func (h providernetHandler) putProviderNetHandler(w http.ResponseWriter, r *http.Request) {
- var p moduleLib.ProviderNet
+ var p netintents.ProviderNet
vars := mux.Vars(r)
clusterProvider := vars["provider-name"]
cluster := vars["cluster-name"]
diff --git a/src/ncm/api/schedulerhandler.go b/src/ncm/api/schedulerhandler.go
new file mode 100644
index 00000000..d07d132d
--- /dev/null
+++ b/src/ncm/api/schedulerhandler.go
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2020 Intel Corporation, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package api
+
+import (
+ "net/http"
+
+ "github.com/onap/multicloud-k8s/src/ncm/pkg/scheduler"
+
+ "github.com/gorilla/mux"
+)
+
+// Used to store backend implementations objects
+// Also simplifies mocking for unit testing purposes
+type schedulerHandler struct {
+ // Interface that implements Cluster operations
+ // We will set this variable with a mock interface for testing
+ client scheduler.SchedulerManager
+}
+
+// applyClusterHandler handles requests to apply network intents for a cluster
+func (h schedulerHandler) applySchedulerHandler(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ provider := vars["cluster-provider"]
+ cluster := vars["cluster"]
+
+ err := h.client.ApplyNetworkIntents(provider, cluster)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+}
+
+// terminateSchedulerHandler handles requests to apply network intents for a cluster
+func (h schedulerHandler) terminateSchedulerHandler(w http.ResponseWriter, r *http.Request) {
+ vars := mux.Vars(r)
+ provider := vars["cluster-provider"]
+ cluster := vars["cluster"]
+
+ err := h.client.TerminateNetworkIntents(provider, cluster)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusNoContent)
+}
diff --git a/src/ncm/pkg/grpc/rsyncclient.go b/src/ncm/internal/grpc/rsyncclient.go
index 5eb870a7..5eb870a7 100644
--- a/src/ncm/pkg/grpc/rsyncclient.go
+++ b/src/ncm/internal/grpc/rsyncclient.go
diff --git a/src/ncm/internal/ovncontroller/ovncontroller.go b/src/ncm/internal/ovncontroller/ovncontroller.go
new file mode 100644
index 00000000..125ad6c7
--- /dev/null
+++ b/src/ncm/internal/ovncontroller/ovncontroller.go
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2020 Intel Corporation, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ovncontroller
+
+import (
+ "encoding/json"
+
+ netintents "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents"
+ nettypes "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents/types"
+ appcontext "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
+ log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
+ "gopkg.in/yaml.v2"
+
+ pkgerrors "github.com/pkg/errors"
+)
+
+// controller takes an appcontext as input
+// finds the cluster(s) associated with the context
+// queries the network intents and adds resources to the context
+func Apply(ctxVal interface{}, clusterProvider, cluster string) error {
+ type resource struct {
+ name string
+ value string
+ }
+ var resources []resource
+
+ var ac appcontext.AppContext
+ _, err := ac.LoadAppContext(ctxVal)
+ if err != nil {
+ return pkgerrors.Wrapf(err, "Error getting AppContext with Id: %v for %v/%v", ctxVal, clusterProvider, cluster)
+ }
+
+ // Find all Network Intents for this cluster
+ networkIntents, err := netintents.NewNetworkClient().GetNetworks(clusterProvider, cluster)
+ if err != nil {
+ return pkgerrors.Wrap(err, "Error finding Network Intents")
+ }
+ for _, intent := range networkIntents {
+ var crNetwork = netintents.CrNetwork{
+ ApiVersion: netintents.NETWORK_APIVERSION,
+ Kind: netintents.NETWORK_KIND,
+ }
+ crNetwork.Network = intent
+ // Produce the yaml CR document for each intent
+ y, err := yaml.Marshal(&crNetwork)
+ if err != nil {
+ log.Info("Error marshalling network intent to yaml", log.Fields{
+ "error": err,
+ "intent": intent,
+ })
+ continue
+ }
+ resources = append(resources, resource{
+ name: intent.Metadata.Name + nettypes.SEPARATOR + netintents.NETWORK_KIND,
+ value: string(y),
+ })
+ }
+
+ // Find all Provider Network Intents for this cluster
+ providerNetworkIntents, err := netintents.NewProviderNetClient().GetProviderNets(clusterProvider, cluster)
+ if err != nil {
+ return pkgerrors.Wrap(err, "Error finding Provider Network Intents")
+ }
+ for _, intent := range providerNetworkIntents {
+ var crProviderNet = netintents.CrProviderNet{
+ ApiVersion: netintents.PROVIDER_NETWORK_APIVERSION,
+ Kind: netintents.PROVIDER_NETWORK_KIND,
+ }
+ crProviderNet.ProviderNet = intent
+ // Produce the yaml CR document for each intent
+ y, err := yaml.Marshal(&crProviderNet)
+ if err != nil {
+ log.Info("Error marshalling provider network intent to yaml", log.Fields{
+ "error": err,
+ "intent": intent,
+ })
+ continue
+ }
+ resources = append(resources, resource{
+ name: intent.Metadata.Name + nettypes.SEPARATOR + netintents.PROVIDER_NETWORK_KIND,
+ value: string(y),
+ })
+ }
+
+ if len(resources) == 0 {
+ return nil
+ }
+
+ clusterhandle, _ := ac.GetClusterHandle(nettypes.CONTEXT_CLUSTER_APP, clusterProvider+nettypes.SEPARATOR+cluster)
+
+ var orderinstr struct {
+ Resorder []string `json:"resorder"`
+ }
+ var depinstr struct {
+ Resdep map[string]string `json:"resdependency"`
+ }
+ resdep := make(map[string]string)
+ for _, resource := range resources {
+ orderinstr.Resorder = append(orderinstr.Resorder, resource.name)
+ resdep[resource.name] = "go"
+ _, err = ac.AddResource(clusterhandle, resource.name, resource.value)
+ if err != nil {
+ cleanuperr := ac.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Warn("Error cleaning AppContext after add resource failure", log.Fields{
+ "cluster-provider": clusterProvider,
+ "cluster": cluster,
+ "resource": resource.name,
+ })
+ }
+ return pkgerrors.Wrap(err, "Error adding Resource to AppContext")
+ }
+ }
+ jresord, _ := json.Marshal(orderinstr)
+ depinstr.Resdep = resdep
+ jresdep, _ := json.Marshal(depinstr)
+ _, err = ac.AddInstruction(clusterhandle, "resource", "order", string(jresord))
+ _, err = ac.AddInstruction(clusterhandle, "resource", "dependency", string(jresdep))
+
+ return nil
+}
diff --git a/src/ncm/pkg/module/cluster.go b/src/ncm/pkg/cluster/cluster.go
index 5d1f577f..f656688c 100644
--- a/src/ncm/pkg/module/cluster.go
+++ b/src/ncm/pkg/cluster/cluster.go
@@ -14,31 +14,24 @@
* limitations under the License.
*/
-package module
+package cluster
import (
- "context"
- "encoding/json"
- "time"
-
- "github.com/onap/multicloud-k8s/src/ncm/pkg/grpc"
+ ncmtypes "github.com/onap/multicloud-k8s/src/ncm/pkg/module/types"
appcontext "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db"
- log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
- "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/rpc"
- installpb "github.com/onap/multicloud-k8s/src/rsync/pkg/grpc/installapp"
- "gopkg.in/yaml.v2"
+ mtypes "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module/types"
pkgerrors "github.com/pkg/errors"
)
// ClusterProvider contains the parameters needed for ClusterProviders
type ClusterProvider struct {
- Metadata Metadata `json:"metadata"`
+ Metadata mtypes.Metadata `json:"metadata"`
}
type Cluster struct {
- Metadata Metadata `json:"metadata"`
+ Metadata mtypes.Metadata `json:"metadata"`
}
type ClusterContent struct {
@@ -50,8 +43,8 @@ type ClusterLabel struct {
}
type ClusterKvPairs struct {
- Metadata Metadata `json:"metadata"`
- Spec ClusterKvSpec `json:"spec"`
+ Metadata mtypes.Metadata `json:"metadata"`
+ Spec ClusterKvSpec `json:"spec"`
}
type ClusterKvSpec struct {
@@ -106,8 +99,6 @@ type ClusterManager interface {
GetClusters(provider string) ([]Cluster, error)
GetClustersWithLabel(provider, label string) ([]string, error)
DeleteCluster(provider, name string) error
- ApplyNetworkIntents(provider, name string) error
- TerminateNetworkIntents(provider, name string) error
CreateClusterLabel(provider, cluster string, pr ClusterLabel) (ClusterLabel, error)
GetClusterLabel(provider, cluster, label string) (ClusterLabel, error)
GetClusterLabels(provider, cluster string) ([]ClusterLabel, error)
@@ -121,18 +112,18 @@ type ClusterManager interface {
// ClusterClient implements the Manager
// It will also be used to maintain some localized state
type ClusterClient struct {
- db ClientDbInfo
+ db ncmtypes.ClientDbInfo
}
// NewClusterClient returns an instance of the ClusterClient
// which implements the Manager
func NewClusterClient() *ClusterClient {
return &ClusterClient{
- db: ClientDbInfo{
- storeName: "cluster",
- tagMeta: "clustermetadata",
- tagContent: "clustercontent",
- tagContext: "clustercontext",
+ db: ncmtypes.ClientDbInfo{
+ StoreName: "cluster",
+ TagMeta: "clustermetadata",
+ TagContent: "clustercontent",
+ TagContext: "clustercontext",
},
}
}
@@ -151,7 +142,7 @@ func (v *ClusterClient) CreateClusterProvider(p ClusterProvider) (ClusterProvide
return ClusterProvider{}, pkgerrors.New("ClusterProvider already exists")
}
- err = db.DBconn.Insert(v.db.storeName, key, nil, v.db.tagMeta, p)
+ err = db.DBconn.Insert(v.db.StoreName, key, nil, v.db.TagMeta, p)
if err != nil {
return ClusterProvider{}, pkgerrors.Wrap(err, "Creating DB Entry")
}
@@ -167,7 +158,7 @@ func (v *ClusterClient) GetClusterProvider(name string) (ClusterProvider, error)
ClusterProviderName: name,
}
- value, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ value, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return ClusterProvider{}, pkgerrors.Wrap(err, "Get ClusterProvider")
}
@@ -194,7 +185,7 @@ func (v *ClusterClient) GetClusterProviders() ([]ClusterProvider, error) {
}
var resp []ClusterProvider
- values, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ values, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return []ClusterProvider{}, pkgerrors.Wrap(err, "Get ClusterProviders")
}
@@ -219,7 +210,7 @@ func (v *ClusterClient) DeleteClusterProvider(name string) error {
ClusterProviderName: name,
}
- err := db.DBconn.Remove(v.db.storeName, key)
+ err := db.DBconn.Remove(v.db.StoreName, key)
if err != nil {
return pkgerrors.Wrap(err, "Delete ClusterProvider Entry;")
}
@@ -248,11 +239,11 @@ func (v *ClusterClient) CreateCluster(provider string, p Cluster, q ClusterConte
return Cluster{}, pkgerrors.New("Cluster already exists")
}
- err = db.DBconn.Insert(v.db.storeName, key, nil, v.db.tagMeta, p)
+ err = db.DBconn.Insert(v.db.StoreName, key, nil, v.db.TagMeta, p)
if err != nil {
return Cluster{}, pkgerrors.Wrap(err, "Creating DB Entry")
}
- err = db.DBconn.Insert(v.db.storeName, key, nil, v.db.tagContent, q)
+ err = db.DBconn.Insert(v.db.StoreName, key, nil, v.db.TagContent, q)
if err != nil {
return Cluster{}, pkgerrors.Wrap(err, "Creating DB Entry")
}
@@ -268,7 +259,7 @@ func (v *ClusterClient) GetCluster(provider, name string) (Cluster, error) {
ClusterName: name,
}
- value, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ value, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return Cluster{}, pkgerrors.Wrap(err, "Get Cluster")
}
@@ -294,7 +285,7 @@ func (v *ClusterClient) GetClusterContent(provider, name string) (ClusterContent
ClusterName: name,
}
- value, err := db.DBconn.Find(v.db.storeName, key, v.db.tagContent)
+ value, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagContent)
if err != nil {
return ClusterContent{}, pkgerrors.Wrap(err, "Get Cluster Content")
}
@@ -320,7 +311,7 @@ func (v *ClusterClient) GetClusterContext(provider, name string) (appcontext.App
ClusterName: name,
}
- value, err := db.DBconn.Find(v.db.storeName, key, v.db.tagContext)
+ value, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagContext)
if err != nil {
return appcontext.AppContext{}, pkgerrors.Wrap(err, "Get Cluster Context")
}
@@ -347,7 +338,7 @@ func (v *ClusterClient) GetClusters(provider string) ([]Cluster, error) {
ClusterName: "",
}
- values, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ values, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return []Cluster{}, pkgerrors.Wrap(err, "Get Clusters")
}
@@ -375,7 +366,7 @@ func (v *ClusterClient) GetClustersWithLabel(provider, label string) ([]string,
ClusterLabelName: label,
}
- values, err := db.DBconn.Find(v.db.storeName, key, "cluster")
+ values, err := db.DBconn.Find(v.db.StoreName, key, "cluster")
if err != nil {
return []string{}, pkgerrors.Wrap(err, "Get Clusters by label")
}
@@ -401,7 +392,7 @@ func (v *ClusterClient) DeleteCluster(provider, name string) error {
return pkgerrors.Errorf("Cannot delete cluster until context is deleted: %v, %v", provider, name)
}
- err = db.DBconn.Remove(v.db.storeName, key)
+ err = db.DBconn.Remove(v.db.StoreName, key)
if err != nil {
return pkgerrors.Wrap(err, "Delete Cluster Entry;")
}
@@ -409,243 +400,6 @@ func (v *ClusterClient) DeleteCluster(provider, name string) error {
return nil
}
-// Apply Network Intents associated with a cluster
-func (v *ClusterClient) ApplyNetworkIntents(provider, name string) error {
-
- _, err := v.GetClusterContext(provider, name)
- if err == nil {
- return pkgerrors.Errorf("Cluster network intents have already been applied: %v, %v", provider, name)
- }
-
- type resource struct {
- name string
- value string
- }
-
- var resources []resource
-
- // Find all Network Intents for this cluster
- networkIntents, err := NewNetworkClient().GetNetworks(provider, name)
- if err != nil {
- return pkgerrors.Wrap(err, "Error finding Network Intents")
- }
- for _, intent := range networkIntents {
- var crNetwork = CrNetwork{
- ApiVersion: NETWORK_APIVERSION,
- Kind: NETWORK_KIND,
- }
- crNetwork.Network = intent
- // Produce the yaml CR document for each intent
- y, err := yaml.Marshal(&crNetwork)
- if err != nil {
- log.Info("Error marshalling network intent to yaml", log.Fields{
- "error": err,
- "intent": intent,
- })
- continue
- }
- resources = append(resources, resource{
- name: intent.Metadata.Name + SEPARATOR + NETWORK_KIND,
- value: string(y),
- })
- }
-
- // Find all Provider Network Intents for this cluster
- providerNetworkIntents, err := NewProviderNetClient().GetProviderNets(provider, name)
- if err != nil {
- return pkgerrors.Wrap(err, "Error finding Provider Network Intents")
- }
- for _, intent := range providerNetworkIntents {
- var crProviderNet = CrProviderNet{
- ApiVersion: PROVIDER_NETWORK_APIVERSION,
- Kind: PROVIDER_NETWORK_KIND,
- }
- crProviderNet.ProviderNet = intent
- // Produce the yaml CR document for each intent
- y, err := yaml.Marshal(&crProviderNet)
- if err != nil {
- log.Info("Error marshalling provider network intent to yaml", log.Fields{
- "error": err,
- "intent": intent,
- })
- continue
- }
- resources = append(resources, resource{
- name: intent.Metadata.Name + SEPARATOR + PROVIDER_NETWORK_KIND,
- value: string(y),
- })
- }
-
- if len(resources) == 0 {
- return nil
- }
-
- // Make an app context for the network intent resources
- ac := appcontext.AppContext{}
- ctxVal, err := ac.InitAppContext()
- if err != nil {
- return pkgerrors.Wrap(err, "Error creating AppContext")
- }
- handle, err := ac.CreateCompositeApp()
- if err != nil {
- return pkgerrors.Wrap(err, "Error creating AppContext CompositeApp")
- }
-
- // Add an app (fixed value) to the app context
- apphandle, err := ac.AddApp(handle, CONTEXT_CLUSTER_APP)
- if err != nil {
- cleanuperr := ac.DeleteCompositeApp()
- if cleanuperr != nil {
- log.Warn("Error cleaning AppContext CompositeApp create failure", log.Fields{
- "cluster-provider": provider,
- "cluster": name,
- })
- }
- return pkgerrors.Wrap(err, "Error adding App to AppContext")
- }
-
- // Add an app order instruction
- appinstr := struct {
- Apporder []string `json:"apporder"`
- }{
- []string{CONTEXT_CLUSTER_APP},
- }
- jinstr, _ := json.Marshal(appinstr)
-
- appdepinstr := struct {
- Appdep map[string]string `json:"appdependency"`
- }{
- map[string]string{CONTEXT_CLUSTER_APP: "go"},
- }
- jdep, _ := json.Marshal(appdepinstr)
-
- _, err = ac.AddInstruction(handle, "app", "order", string(jinstr))
- _, err = ac.AddInstruction(handle, "app", "dependency", string(jdep))
-
- // Add a cluster to the app
- clusterhandle, err := ac.AddCluster(apphandle, provider+SEPARATOR+name)
- if err != nil {
- cleanuperr := ac.DeleteCompositeApp()
- if cleanuperr != nil {
- log.Warn("Error cleaning AppContext after add cluster failure", log.Fields{
- "cluster-provider": provider,
- "cluster": name,
- })
- }
- return pkgerrors.Wrap(err, "Error adding Cluster to AppContext")
- }
-
- // add the resources to the app context
-
- var orderinstr struct {
- Resorder []string `json:"resorder"`
- }
- var depinstr struct {
- Resdep map[string]string `json:"resdependency"`
- }
- resdep := make(map[string]string)
- for _, resource := range resources {
- orderinstr.Resorder = append(orderinstr.Resorder, resource.name)
- resdep[resource.name] = "go"
- _, err = ac.AddResource(clusterhandle, resource.name, resource.value)
- if err != nil {
- cleanuperr := ac.DeleteCompositeApp()
- if cleanuperr != nil {
- log.Warn("Error cleaning AppContext after add resource failure", log.Fields{
- "cluster-provider": provider,
- "cluster": name,
- "resource": resource.name,
- })
- }
- return pkgerrors.Wrap(err, "Error adding Resource to AppContext")
- }
- }
- jresord, _ := json.Marshal(orderinstr)
- depinstr.Resdep = resdep
- jresdep, _ := json.Marshal(depinstr)
- _, err = ac.AddInstruction(clusterhandle, "resource", "order", string(jresord))
- _, err = ac.AddInstruction(clusterhandle, "resource", "dependency", string(jresdep))
-
- // save the context in the cluster db record
- key := ClusterKey{
- ClusterProviderName: provider,
- ClusterName: name,
- }
- err = db.DBconn.Insert(v.db.storeName, key, nil, v.db.tagContext, ctxVal)
- if err != nil {
- cleanuperr := ac.DeleteCompositeApp()
- if cleanuperr != nil {
- log.Warn("Error cleaning AppContext after DB insert failure", log.Fields{
- "cluster-provider": provider,
- "cluster": name,
- })
- }
- return pkgerrors.Wrap(err, "Error adding AppContext to DB")
- }
-
- // call resource synchronizer to instantiate the CRs in the cluster
- conn := rpc.GetRpcConn(grpc.RsyncName)
- if conn == nil {
- grpc.InitRsyncClient()
- conn = rpc.GetRpcConn(grpc.RsyncName)
- }
-
- var rpcClient installpb.InstallappClient
- var installRes *installpb.InstallAppResponse
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- defer cancel()
-
- if conn != nil {
- rpcClient = installpb.NewInstallappClient(conn)
- installReq := new(installpb.InstallAppRequest)
- installReq.AppContext = ctxVal.(string)
- installRes, err = rpcClient.InstallApp(ctx, installReq)
- if err == nil {
- log.Info("Response from InstappApp GRPC call", log.Fields{
- "Succeeded": installRes.AppContextInstalled,
- "Message": installRes.AppContextInstallMessage,
- })
- }
- } else {
- return pkgerrors.Errorf("InstallApp Failed - Could not get InstallAppClient: %v", grpc.RsyncName)
- }
-
- return nil
-}
-
-// Terminate Network Intents associated with a cluster
-func (v *ClusterClient) TerminateNetworkIntents(provider, name string) error {
- context, err := v.GetClusterContext(provider, name)
- if err != nil {
- return pkgerrors.Wrapf(err, "Error finding AppContext for cluster: %v, %v", provider, name)
- }
-
- // TODO: call resource synchronizer to terminate the CRs in the cluster
-
- // remove the app context
- cleanuperr := context.DeleteCompositeApp()
- if cleanuperr != nil {
- log.Warn("Error deleted AppContext", log.Fields{
- "cluster-provider": provider,
- "cluster": name,
- })
- }
-
- // remove the app context field from the cluster db record
- key := ClusterKey{
- ClusterProviderName: provider,
- ClusterName: name,
- }
- err = db.DBconn.RemoveTag(v.db.storeName, key, v.db.tagContext)
- if err != nil {
- log.Warn("Error removing AppContext from Cluster document", log.Fields{
- "cluster-provider": provider,
- "cluster": name,
- })
- }
- return nil
-}
-
// CreateClusterLabel - create a new Cluster Label mongo document for a cluster-provider/cluster
func (v *ClusterClient) CreateClusterLabel(provider string, cluster string, p ClusterLabel) (ClusterLabel, error) {
//Construct key and tag to select the entry
@@ -667,7 +421,7 @@ func (v *ClusterClient) CreateClusterLabel(provider string, cluster string, p Cl
return ClusterLabel{}, pkgerrors.New("Cluster Label already exists")
}
- err = db.DBconn.Insert(v.db.storeName, key, nil, v.db.tagMeta, p)
+ err = db.DBconn.Insert(v.db.StoreName, key, nil, v.db.TagMeta, p)
if err != nil {
return ClusterLabel{}, pkgerrors.Wrap(err, "Creating DB Entry")
}
@@ -684,7 +438,7 @@ func (v *ClusterClient) GetClusterLabel(provider, cluster, label string) (Cluste
ClusterLabelName: label,
}
- value, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ value, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return ClusterLabel{}, pkgerrors.Wrap(err, "Get Cluster")
}
@@ -711,7 +465,7 @@ func (v *ClusterClient) GetClusterLabels(provider, cluster string) ([]ClusterLab
ClusterLabelName: "",
}
- values, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ values, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return []ClusterLabel{}, pkgerrors.Wrap(err, "Get Cluster Labels")
}
@@ -739,7 +493,7 @@ func (v *ClusterClient) DeleteClusterLabel(provider, cluster, label string) erro
ClusterLabelName: label,
}
- err := db.DBconn.Remove(v.db.storeName, key)
+ err := db.DBconn.Remove(v.db.StoreName, key)
if err != nil {
return pkgerrors.Wrap(err, "Delete ClusterLabel Entry;")
}
@@ -767,7 +521,7 @@ func (v *ClusterClient) CreateClusterKvPairs(provider string, cluster string, p
return ClusterKvPairs{}, pkgerrors.New("Cluster KV Pair already exists")
}
- err = db.DBconn.Insert(v.db.storeName, key, nil, v.db.tagMeta, p)
+ err = db.DBconn.Insert(v.db.StoreName, key, nil, v.db.TagMeta, p)
if err != nil {
return ClusterKvPairs{}, pkgerrors.Wrap(err, "Creating DB Entry")
}
@@ -784,7 +538,7 @@ func (v *ClusterClient) GetClusterKvPairs(provider, cluster, kvpair string) (Clu
ClusterKvPairsName: kvpair,
}
- value, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ value, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return ClusterKvPairs{}, pkgerrors.Wrap(err, "Get Cluster")
}
@@ -811,7 +565,7 @@ func (v *ClusterClient) GetAllClusterKvPairs(provider, cluster string) ([]Cluste
ClusterKvPairsName: "",
}
- values, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ values, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return []ClusterKvPairs{}, pkgerrors.Wrap(err, "Get Cluster KV Pairs")
}
@@ -839,7 +593,7 @@ func (v *ClusterClient) DeleteClusterKvPairs(provider, cluster, kvpair string) e
ClusterKvPairsName: kvpair,
}
- err := db.DBconn.Remove(v.db.storeName, key)
+ err := db.DBconn.Remove(v.db.StoreName, key)
if err != nil {
return pkgerrors.Wrap(err, "Delete ClusterKvPairs Entry;")
}
diff --git a/src/ncm/pkg/module/module.go b/src/ncm/pkg/module/module.go
index e3ebcccc..10b241f6 100644
--- a/src/ncm/pkg/module/module.go
+++ b/src/ncm/pkg/module/module.go
@@ -16,20 +16,28 @@
package module
+import (
+ "github.com/onap/multicloud-k8s/src/ncm/pkg/cluster"
+ "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents"
+ "github.com/onap/multicloud-k8s/src/ncm/pkg/scheduler"
+)
+
// Client for using the services in the ncm
type Client struct {
- Cluster *ClusterClient
- Network *NetworkClient
- ProviderNet *ProviderNetClient
+ Cluster *cluster.ClusterClient
+ Network *networkintents.NetworkClient
+ ProviderNet *networkintents.ProviderNetClient
+ Scheduler *scheduler.SchedulerClient
// Add Clients for API's here
}
// NewClient creates a new client for using the services
func NewClient() *Client {
c := &Client{}
- c.Cluster = NewClusterClient()
- c.Network = NewNetworkClient()
- c.ProviderNet = NewProviderNetClient()
+ c.Cluster = cluster.NewClusterClient()
+ c.Network = networkintents.NewNetworkClient()
+ c.ProviderNet = networkintents.NewProviderNetClient()
+ c.Scheduler = scheduler.NewSchedulerClient()
// Add Client API handlers here
return c
}
diff --git a/src/ncm/pkg/module/types/module_definitions.go b/src/ncm/pkg/module/types/module_definitions.go
new file mode 100644
index 00000000..0dd657ac
--- /dev/null
+++ b/src/ncm/pkg/module/types/module_definitions.go
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2020 Intel Corporation, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package types
+
+// TODO - should move to common module types location - e.g. in orchestrator
+type ClientDbInfo struct {
+ StoreName string // name of the mongodb collection to use for client documents
+ TagMeta string // attribute key name for the json data of a client document
+ TagContent string // attribute key name for the file data of a client document
+ TagContext string // attribute key name for context object in App Context
+}
diff --git a/src/ncm/pkg/module/network.go b/src/ncm/pkg/networkintents/network.go
index e753905e..e8480e0f 100644
--- a/src/ncm/pkg/module/network.go
+++ b/src/ncm/pkg/networkintents/network.go
@@ -14,23 +14,27 @@
* limitations under the License.
*/
-package module
+package networkintents
import (
+ clusterPkg "github.com/onap/multicloud-k8s/src/ncm/pkg/cluster"
+ ncmtypes "github.com/onap/multicloud-k8s/src/ncm/pkg/module/types"
+ nettypes "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents/types"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db"
+ mtypes "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module/types"
pkgerrors "github.com/pkg/errors"
)
// Network contains the parameters needed for dynamic networks
type Network struct {
- Metadata Metadata `json:"metadata" yaml:"metadata"`
- Spec NetworkSpec `json:"spec" yaml:"spec"`
+ Metadata mtypes.Metadata `json:"metadata" yaml:"metadata"`
+ Spec NetworkSpec `json:"spec" yaml:"spec"`
}
type NetworkSpec struct {
- CniType string `json:"cniType" yaml:"cniType"`
- Ipv4Subnets []Ipv4Subnet `json:"ipv4Subnets" yaml:"ipv4Subnets"`
+ CniType string `json:"cniType" yaml:"cniType"`
+ Ipv4Subnets []nettypes.Ipv4Subnet `json:"ipv4Subnets" yaml:"ipv4Subnets"`
}
// NetworkKey is the key structure that is used in the database
@@ -61,16 +65,16 @@ type NetworkManager interface {
// NetworkClient implements the Manager
// It will also be used to maintain some localized state
type NetworkClient struct {
- db ClientDbInfo
+ db ncmtypes.ClientDbInfo
}
// NewNetworkClient returns an instance of the NetworkClient
// which implements the Manager
func NewNetworkClient() *NetworkClient {
return &NetworkClient{
- db: ClientDbInfo{
- storeName: "cluster",
- tagMeta: "networkmetadata",
+ db: ncmtypes.ClientDbInfo{
+ StoreName: "cluster",
+ TagMeta: "networkmetadata",
},
}
}
@@ -86,7 +90,7 @@ func (v *NetworkClient) CreateNetwork(p Network, clusterProvider, cluster string
}
//Check if cluster exists
- _, err := NewClusterClient().GetCluster(clusterProvider, cluster)
+ _, err := clusterPkg.NewClusterClient().GetCluster(clusterProvider, cluster)
if err != nil {
return Network{}, pkgerrors.New("Unable to find the cluster")
}
@@ -97,7 +101,7 @@ func (v *NetworkClient) CreateNetwork(p Network, clusterProvider, cluster string
return Network{}, pkgerrors.New("Network already exists")
}
- err = db.DBconn.Insert(v.db.storeName, key, nil, v.db.tagMeta, p)
+ err = db.DBconn.Insert(v.db.StoreName, key, nil, v.db.TagMeta, p)
if err != nil {
return Network{}, pkgerrors.Wrap(err, "Creating DB Entry")
}
@@ -115,7 +119,7 @@ func (v *NetworkClient) GetNetwork(name, clusterProvider, cluster string) (Netwo
NetworkName: name,
}
- value, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ value, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return Network{}, pkgerrors.Wrap(err, "Get Network")
}
@@ -144,7 +148,7 @@ func (v *NetworkClient) GetNetworks(clusterProvider, cluster string) ([]Network,
}
var resp []Network
- values, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ values, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return []Network{}, pkgerrors.Wrap(err, "Get Networks")
}
@@ -171,7 +175,7 @@ func (v *NetworkClient) DeleteNetwork(name, clusterProvider, cluster string) err
NetworkName: name,
}
- err := db.DBconn.Remove(v.db.storeName, key)
+ err := db.DBconn.Remove(v.db.StoreName, key)
if err != nil {
return pkgerrors.Wrap(err, "Delete Network Entry;")
}
diff --git a/src/ncm/pkg/module/providernet.go b/src/ncm/pkg/networkintents/providernet.go
index a1cbe940..0eb763c1 100644
--- a/src/ncm/pkg/module/providernet.go
+++ b/src/ncm/pkg/networkintents/providernet.go
@@ -14,25 +14,29 @@
* limitations under the License.
*/
-package module
+package networkintents
import (
+ clusterPkg "github.com/onap/multicloud-k8s/src/ncm/pkg/cluster"
+ ncmtypes "github.com/onap/multicloud-k8s/src/ncm/pkg/module/types"
+ nettypes "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents/types"
"github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db"
+ mtypes "github.com/onap/multicloud-k8s/src/orchestrator/pkg/module/types"
pkgerrors "github.com/pkg/errors"
)
// ProviderNet contains the parameters needed for dynamic networks
type ProviderNet struct {
- Metadata Metadata `json:"metadata"`
+ Metadata mtypes.Metadata `json:"metadata"`
Spec ProviderNetSpec `json:"spec"`
}
type ProviderNetSpec struct {
- CniType string `json:"cniType" yaml:"cniType"`
- Ipv4Subnets []Ipv4Subnet `json:"ipv4Subnets" yaml:"ipv4Subnets"`
- ProviderNetType string `json:"providerNetType" yaml:"providerNetType"`
- Vlan Vlan `json:"vlan" yaml:"vlan"`
+ CniType string `json:"cniType" yaml:"cniType"`
+ Ipv4Subnets []nettypes.Ipv4Subnet `json:"ipv4Subnets" yaml:"ipv4Subnets"`
+ ProviderNetType string `json:"providerNetType" yaml:"providerNetType"`
+ Vlan nettypes.Vlan `json:"vlan" yaml:"vlan"`
}
// structure for the Network Custom Resource
@@ -63,16 +67,16 @@ type ProviderNetManager interface {
// ProviderNetClient implements the Manager
// It will also be used to maintain some localized state
type ProviderNetClient struct {
- db ClientDbInfo
+ db ncmtypes.ClientDbInfo
}
// NewProviderNetClient returns an instance of the ProviderNetClient
// which implements the Manager
func NewProviderNetClient() *ProviderNetClient {
return &ProviderNetClient{
- db: ClientDbInfo{
- storeName: "cluster",
- tagMeta: "networkmetadata",
+ db: ncmtypes.ClientDbInfo{
+ StoreName: "cluster",
+ TagMeta: "networkmetadata",
},
}
}
@@ -88,7 +92,7 @@ func (v *ProviderNetClient) CreateProviderNet(p ProviderNet, clusterProvider, cl
}
//Check if cluster exists
- _, err := NewClusterClient().GetCluster(clusterProvider, cluster)
+ _, err := clusterPkg.NewClusterClient().GetCluster(clusterProvider, cluster)
if err != nil {
return ProviderNet{}, pkgerrors.New("Unable to find the cluster")
}
@@ -99,7 +103,7 @@ func (v *ProviderNetClient) CreateProviderNet(p ProviderNet, clusterProvider, cl
return ProviderNet{}, pkgerrors.New("ProviderNet already exists")
}
- err = db.DBconn.Insert(v.db.storeName, key, nil, v.db.tagMeta, p)
+ err = db.DBconn.Insert(v.db.StoreName, key, nil, v.db.TagMeta, p)
if err != nil {
return ProviderNet{}, pkgerrors.Wrap(err, "Creating DB Entry")
}
@@ -117,7 +121,7 @@ func (v *ProviderNetClient) GetProviderNet(name, clusterProvider, cluster string
ProviderNetName: name,
}
- value, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ value, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return ProviderNet{}, pkgerrors.Wrap(err, "Get ProviderNet")
}
@@ -146,7 +150,7 @@ func (v *ProviderNetClient) GetProviderNets(clusterProvider, cluster string) ([]
}
var resp []ProviderNet
- values, err := db.DBconn.Find(v.db.storeName, key, v.db.tagMeta)
+ values, err := db.DBconn.Find(v.db.StoreName, key, v.db.TagMeta)
if err != nil {
return []ProviderNet{}, pkgerrors.Wrap(err, "Get ProviderNets")
}
@@ -173,7 +177,7 @@ func (v *ProviderNetClient) DeleteProviderNet(name, clusterProvider, cluster str
ProviderNetName: name,
}
- err := db.DBconn.Remove(v.db.storeName, key)
+ err := db.DBconn.Remove(v.db.StoreName, key)
if err != nil {
return pkgerrors.Wrap(err, "Delete ProviderNet Entry;")
}
diff --git a/src/ncm/pkg/networkintents/types/types.go b/src/ncm/pkg/networkintents/types/types.go
new file mode 100644
index 00000000..e6fea72e
--- /dev/null
+++ b/src/ncm/pkg/networkintents/types/types.go
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2020 Intel Corporation, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package types
+
+import (
+ "strings"
+
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/validation"
+ pkgerrors "github.com/pkg/errors"
+)
+
+const VLAN_PROVIDER_NET_TYPE_VLAN string = "VLAN"
+const VLAN_PROVIDER_NET_TYPE_DIRECT string = "DIRECT"
+
+const SEPARATOR = "+"
+const CONTEXT_CLUSTER_APP = "network-intents"
+
+var PROVIDER_NET_TYPES = [...]string{VLAN_PROVIDER_NET_TYPE_VLAN, VLAN_PROVIDER_NET_TYPE_DIRECT}
+
+const CNI_TYPE_OVN4NFV string = "ovn4nfv"
+
+var CNI_TYPES = [...]string{CNI_TYPE_OVN4NFV}
+
+type Ipv4Subnet struct {
+ Subnet string `json:"subnet" yaml:"subnet"` // CIDR notation, e.g. 172.16.33.0/24
+ Name string `json:"name" yaml:"name"`
+ Gateway string `json:"gateway" yaml:"gateway"` // IPv4 addre, e.g. 172.16.33.1/24
+ Exclude string `json:"excludeIps" yaml:"excludeIps"` // space separated list of single IPs or ranges e.g. "172.16.33.2 172.16.33.5..172.16.33.10"
+}
+
+const VLAN_NODE_ANY = "any"
+const VLAN_NODE_SPECIFIC = "specific"
+
+var VLAN_NODE_SELECTORS = [...]string{VLAN_NODE_ANY, VLAN_NODE_SPECIFIC}
+
+type Vlan struct {
+ VlanId string `json:"vlanID" yaml:"vlanId"`
+ ProviderInterfaceName string `json:"providerInterfaceName" yaml:"providerInterfaceName"`
+ LogicalInterfaceName string `json:"logicalInterfaceName" yaml:"logicalInterfaceName"`
+ VlanNodeSelector string `json:"vlanNodeSelector" yaml:"vlanNodeSelector"`
+ NodeLabelList []string `json:"nodeLabelList" yaml:"nodeLabelList"`
+}
+
+// Check for valid format of an Ipv4Subnet
+func ValidateSubnet(sub Ipv4Subnet) error {
+ // verify subnet is in valid cidr format
+ err := validation.IsIpv4Cidr(sub.Subnet)
+ if err != nil {
+ return pkgerrors.Wrap(err, "invalid subnet")
+ }
+
+ // just a size check on interface name - system dependent
+ errs := validation.IsValidName(sub.Name)
+ if len(errs) > 0 {
+ return pkgerrors.Errorf("Invalid subnet name=[%v], errors: %v", sub.Name, errs)
+ }
+
+ // verify gateway is in valid cidr format
+ if len(sub.Gateway) > 0 {
+ err = validation.IsIpv4Cidr(sub.Gateway)
+ if err != nil {
+ return pkgerrors.Wrap(err, "invalid gateway")
+ }
+ }
+
+ // verify excludeIps is composed of space separated ipv4 addresses and
+ // ipv4 address ranges separated by '..'
+ for _, value := range strings.Fields(sub.Exclude) {
+ for _, ip := range strings.SplitN(value, "..", 2) {
+ err = validation.IsIpv4(ip)
+ if err != nil {
+ return pkgerrors.Errorf("invalid ipv4 exclude list %v", sub.Exclude)
+ }
+ }
+ }
+ return nil
+}
diff --git a/src/ncm/pkg/scheduler/scheduler.go b/src/ncm/pkg/scheduler/scheduler.go
new file mode 100644
index 00000000..e72085b7
--- /dev/null
+++ b/src/ncm/pkg/scheduler/scheduler.go
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2020 Intel Corporation, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package scheduler
+
+import (
+ "context"
+ "encoding/json"
+ "time"
+
+ "github.com/onap/multicloud-k8s/src/ncm/internal/grpc"
+ oc "github.com/onap/multicloud-k8s/src/ncm/internal/ovncontroller"
+ clusterPkg "github.com/onap/multicloud-k8s/src/ncm/pkg/cluster"
+ ncmtypes "github.com/onap/multicloud-k8s/src/ncm/pkg/module/types"
+ nettypes "github.com/onap/multicloud-k8s/src/ncm/pkg/networkintents/types"
+ appcontext "github.com/onap/multicloud-k8s/src/orchestrator/pkg/appcontext"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/db"
+ log "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/logutils"
+ "github.com/onap/multicloud-k8s/src/orchestrator/pkg/infra/rpc"
+ installpb "github.com/onap/multicloud-k8s/src/rsync/pkg/grpc/installapp"
+
+ pkgerrors "github.com/pkg/errors"
+)
+
+// ClusterManager is an interface exposes the Cluster functionality
+type SchedulerManager interface {
+ ApplyNetworkIntents(clusterProvider, cluster string) error
+ TerminateNetworkIntents(clusterProvider, cluster string) error
+}
+
+// ClusterClient implements the Manager
+// It will also be used to maintain some localized state
+type SchedulerClient struct {
+ db ncmtypes.ClientDbInfo
+}
+
+// NewSchedulerClient returns an instance of the SchedulerClient
+// which implements the Manager
+func NewSchedulerClient() *SchedulerClient {
+ return &SchedulerClient{
+ db: ncmtypes.ClientDbInfo{
+ StoreName: "cluster",
+ TagMeta: "clustermetadata",
+ TagContent: "clustercontent",
+ TagContext: "clustercontext",
+ },
+ }
+}
+
+// Apply Network Intents associated with a cluster
+func (v *SchedulerClient) ApplyNetworkIntents(clusterProvider, cluster string) error {
+
+ _, err := clusterPkg.NewClusterClient().GetClusterContext(clusterProvider, cluster)
+ if err == nil {
+ return pkgerrors.Errorf("Cluster network intents have already been applied: %v, %v", clusterProvider, cluster)
+ }
+
+ // Make an app context for the network intent resources
+ ac := appcontext.AppContext{}
+ ctxVal, err := ac.InitAppContext()
+ if err != nil {
+ return pkgerrors.Wrap(err, "Error creating AppContext")
+ }
+ handle, err := ac.CreateCompositeApp()
+ if err != nil {
+ return pkgerrors.Wrap(err, "Error creating AppContext CompositeApp")
+ }
+
+ // Add an app (fixed value) to the app context
+ apphandle, err := ac.AddApp(handle, nettypes.CONTEXT_CLUSTER_APP)
+ if err != nil {
+ cleanuperr := ac.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Warn("Error cleaning AppContext CompositeApp create failure", log.Fields{
+ "cluster-provider": clusterProvider,
+ "cluster": cluster,
+ })
+ }
+ return pkgerrors.Wrap(err, "Error adding App to AppContext")
+ }
+
+ // Add an app order instruction
+ appinstr := struct {
+ Apporder []string `json:"apporder"`
+ }{
+ []string{nettypes.CONTEXT_CLUSTER_APP},
+ }
+ jinstr, _ := json.Marshal(appinstr)
+
+ appdepinstr := struct {
+ Appdep map[string]string `json:"appdependency"`
+ }{
+ map[string]string{nettypes.CONTEXT_CLUSTER_APP: "go"},
+ }
+ jdep, _ := json.Marshal(appdepinstr)
+
+ _, err = ac.AddInstruction(handle, "app", "order", string(jinstr))
+ _, err = ac.AddInstruction(handle, "app", "dependency", string(jdep))
+
+ // Add a cluster to the app
+ _, err = ac.AddCluster(apphandle, clusterProvider+nettypes.SEPARATOR+cluster)
+ if err != nil {
+ cleanuperr := ac.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Warn("Error cleaning AppContext after add cluster failure", log.Fields{
+ "cluster-provider": clusterProvider,
+ "cluster": cluster,
+ })
+ }
+ return pkgerrors.Wrap(err, "Error adding Cluster to AppContext")
+ }
+
+ // Pass the context to the appropriate controller (just default ovncontroller now)
+ // for internal controller - pass the appcontext, cluster provider and cluster names in directly
+ // external controllers will be given the appcontext id and wiil have to recontstruct
+ // their own context
+ err = oc.Apply(ctxVal, clusterProvider, cluster)
+ if err != nil {
+ cleanuperr := ac.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Warn("Error cleaning AppContext after controller failure", log.Fields{
+ "cluster-provider": clusterProvider,
+ "cluster": cluster,
+ })
+ }
+ return pkgerrors.Wrap(err, "Error adding Cluster to AppContext")
+ }
+
+ // save the context in the cluster db record
+ key := clusterPkg.ClusterKey{
+ ClusterProviderName: clusterProvider,
+ ClusterName: cluster,
+ }
+ err = db.DBconn.Insert(v.db.StoreName, key, nil, v.db.TagContext, ctxVal)
+ if err != nil {
+ cleanuperr := ac.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Warn("Error cleaning AppContext after DB insert failure", log.Fields{
+ "cluster-provider": clusterProvider,
+ "cluster": cluster,
+ })
+ }
+ return pkgerrors.Wrap(err, "Error adding AppContext to DB")
+ }
+
+ // call resource synchronizer to instantiate the CRs in the cluster
+ conn := rpc.GetRpcConn(grpc.RsyncName)
+ if conn == nil {
+ grpc.InitRsyncClient()
+ conn = rpc.GetRpcConn(grpc.RsyncName)
+ }
+
+ var rpcClient installpb.InstallappClient
+ var installRes *installpb.InstallAppResponse
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ if conn != nil {
+ rpcClient = installpb.NewInstallappClient(conn)
+ installReq := new(installpb.InstallAppRequest)
+ installReq.AppContext = ctxVal.(string)
+ installRes, err = rpcClient.InstallApp(ctx, installReq)
+ if err == nil {
+ log.Info("Response from InstappApp GRPC call", log.Fields{
+ "Succeeded": installRes.AppContextInstalled,
+ "Message": installRes.AppContextInstallMessage,
+ })
+ }
+ } else {
+ return pkgerrors.Errorf("InstallApp Failed - Could not get InstallAppClient: %v", grpc.RsyncName)
+ }
+
+ return nil
+}
+
+// Terminate Network Intents associated with a cluster
+func (v *SchedulerClient) TerminateNetworkIntents(clusterProvider, cluster string) error {
+ context, err := clusterPkg.NewClusterClient().GetClusterContext(clusterProvider, cluster)
+ if err != nil {
+ return pkgerrors.Wrapf(err, "Error finding AppContext for cluster: %v, %v", clusterProvider, cluster)
+ }
+
+ // TODO: call resource synchronizer to terminate the CRs in the cluster
+
+ // remove the app context
+ cleanuperr := context.DeleteCompositeApp()
+ if cleanuperr != nil {
+ log.Warn("Error deleted AppContext", log.Fields{
+ "cluster-provider": clusterProvider,
+ "cluster": cluster,
+ })
+ }
+
+ // remove the app context field from the cluster db record
+ key := clusterPkg.ClusterKey{
+ ClusterProviderName: clusterProvider,
+ ClusterName: cluster,
+ }
+ err = db.DBconn.RemoveTag(v.db.StoreName, key, v.db.TagContext)
+ if err != nil {
+ log.Warn("Error removing AppContext from Cluster document", log.Fields{
+ "cluster-provider": clusterProvider,
+ "cluster": cluster,
+ })
+ }
+ return nil
+}