aboutsummaryrefslogtreecommitdiffstats
path: root/src/k8splugin/internal
diff options
context:
space:
mode:
Diffstat (limited to 'src/k8splugin/internal')
-rw-r--r--src/k8splugin/internal/app/client.go39
-rw-r--r--src/k8splugin/internal/app/instance.go15
-rw-r--r--src/k8splugin/internal/healthcheck/healthcheck.go261
-rw-r--r--src/k8splugin/internal/healthcheck/hooks.go145
-rw-r--r--src/k8splugin/internal/healthcheck/kubeclient.go61
-rw-r--r--src/k8splugin/internal/healthcheck/stream.go75
-rw-r--r--src/k8splugin/internal/helm/helm.go253
-rw-r--r--src/k8splugin/internal/helm/helm_test.go67
-rw-r--r--src/k8splugin/internal/helm/types.go22
-rw-r--r--src/k8splugin/internal/rb/profile.go6
10 files changed, 460 insertions, 484 deletions
diff --git a/src/k8splugin/internal/app/client.go b/src/k8splugin/internal/app/client.go
index 85fefe69..00fd8e97 100644
--- a/src/k8splugin/internal/app/client.go
+++ b/src/k8splugin/internal/app/client.go
@@ -16,6 +16,7 @@ limitations under the License.
package app
import (
+ "context"
"io/ioutil"
"os"
"strings"
@@ -33,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/disk"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
@@ -67,7 +67,7 @@ func (k *KubernetesClient) getPodsByLabel(namespace string) ([]ResourceStatus, e
listOpts := metav1.ListOptions{
LabelSelector: config.GetConfiguration().KubernetesLabelName + "=" + k.instanceID,
}
- podList, err := client.List(listOpts)
+ podList, err := client.List(context.TODO(), listOpts)
if err != nil {
return nil, pkgerrors.Wrap(err, "Retrieving PodList from cluster")
}
@@ -111,9 +111,9 @@ func (k *KubernetesClient) queryResources(apiVersion, kind, labelSelector, names
var unstrList *unstructured.UnstructuredList
switch mapping.Scope.Name() {
case meta.RESTScopeNameNamespace:
- unstrList, err = dynClient.Resource(gvr).Namespace(namespace).List(opts)
+ unstrList, err = dynClient.Resource(gvr).Namespace(namespace).List(context.TODO(), opts)
case meta.RESTScopeNameRoot:
- unstrList, err = dynClient.Resource(gvr).List(opts)
+ unstrList, err = dynClient.Resource(gvr).List(context.TODO(), opts)
default:
return nil, pkgerrors.New("Got an unknown RESTScopeName for mapping: " + gvk.String())
}
@@ -128,8 +128,8 @@ func (k *KubernetesClient) queryResources(apiVersion, kind, labelSelector, names
return resp, nil
}
-// getResourcesStatus yields status of given generic resource
-func (k *KubernetesClient) getResourceStatus(res helm.KubernetesResource, namespace string) (ResourceStatus, error) {
+// GetResourcesStatus yields status of given generic resource
+func (k *KubernetesClient) GetResourceStatus(res helm.KubernetesResource, namespace string) (ResourceStatus, error) {
dynClient := k.GetDynamicClient()
mapper := k.GetMapper()
mapping, err := mapper.RESTMapping(schema.GroupKind{
@@ -146,9 +146,9 @@ func (k *KubernetesClient) getResourceStatus(res helm.KubernetesResource, namesp
var unstruct *unstructured.Unstructured
switch mapping.Scope.Name() {
case meta.RESTScopeNameNamespace:
- unstruct, err = dynClient.Resource(gvr).Namespace(namespace).Get(res.Name, opts)
+ unstruct, err = dynClient.Resource(gvr).Namespace(namespace).Get(context.TODO(), res.Name, opts)
case meta.RESTScopeNameRoot:
- unstruct, err = dynClient.Resource(gvr).Get(res.Name, opts)
+ unstruct, err = dynClient.Resource(gvr).Get(context.TODO(), res.Name, opts)
default:
return ResourceStatus{}, pkgerrors.New("Got an unknown RESTSCopeName for mapping: " + res.GVK.String())
}
@@ -276,7 +276,7 @@ func (k *KubernetesClient) ensureNamespace(namespace string) error {
return nil
}
-func (k *KubernetesClient) createKind(resTempl helm.KubernetesResourceTemplate,
+func (k *KubernetesClient) CreateKind(resTempl helm.KubernetesResourceTemplate,
namespace string) (helm.KubernetesResource, error) {
if _, err := os.Stat(resTempl.FilePath); os.IsNotExist(err) {
@@ -360,7 +360,7 @@ func (k *KubernetesClient) createResources(sortedTemplates []helm.KubernetesReso
var createdResources []helm.KubernetesResource
for _, resTempl := range sortedTemplates {
- resCreated, err := k.createKind(resTempl, namespace)
+ resCreated, err := k.CreateKind(resTempl, namespace)
if err != nil {
return nil, pkgerrors.Wrapf(err, "Error creating kind: %+v", resTempl.GVK)
}
@@ -390,7 +390,7 @@ func (k *KubernetesClient) updateResources(sortedTemplates []helm.KubernetesReso
return updatedResources, nil
}
-func (k *KubernetesClient) deleteKind(resource helm.KubernetesResource, namespace string) error {
+func (k *KubernetesClient) DeleteKind(resource helm.KubernetesResource, namespace string) error {
log.Warn("Deleting Resource", log.Fields{
"gvk": resource.GVK,
"resource": resource.Name,
@@ -412,7 +412,7 @@ func (k *KubernetesClient) deleteKind(resource helm.KubernetesResource, namespac
func (k *KubernetesClient) deleteResources(resources []helm.KubernetesResource, namespace string) error {
//TODO: Investigate if deletion should be in a particular order
for _, res := range resources {
- err := k.deleteKind(res, namespace)
+ err := k.DeleteKind(res, namespace)
if err != nil {
return pkgerrors.Wrap(err, "Deleting resources")
}
@@ -443,18 +443,3 @@ func (k *KubernetesClient) GetStandardClient() kubernetes.Interface {
func (k *KubernetesClient) GetInstanceID() string {
return k.instanceID
}
-
-//Following set of methods are implemented so that KubernetesClient
-//implements genericclioptions.RESTClientGetter interface
-func (k *KubernetesClient) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
- return k.discoverClient, nil
-}
-func (k *KubernetesClient) ToRESTMapper() (meta.RESTMapper, error) {
- return k.GetMapper(), nil
-}
-func (k *KubernetesClient) ToRawKubeConfigLoader() clientcmd.ClientConfig {
- return k.rawConfig
-}
-func (k *KubernetesClient) ToRESTConfig() (*rest.Config, error) {
- return k.restConfig, nil
-}
diff --git a/src/k8splugin/internal/app/instance.go b/src/k8splugin/internal/app/instance.go
index c3d3d207..c1ec35b6 100644
--- a/src/k8splugin/internal/app/instance.go
+++ b/src/k8splugin/internal/app/instance.go
@@ -19,16 +19,13 @@ package app
import (
"encoding/json"
- "log"
- "strings"
-
- "k8s.io/apimachinery/pkg/runtime/schema"
- protorelease "k8s.io/helm/pkg/proto/hapi/release"
-
"github.com/onap/multicloud-k8s/src/k8splugin/internal/db"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/namegenerator"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/rb"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "log"
+ "strings"
pkgerrors "github.com/pkg/errors"
)
@@ -52,7 +49,7 @@ type InstanceResponse struct {
Namespace string `json:"namespace"`
ReleaseName string `json:"release-name"`
Resources []helm.KubernetesResource `json:"resources"`
- Hooks []*protorelease.Hook `json:"-"`
+ Hooks []*helm.Hook `json:"-"`
}
// InstanceMiniResponse contains the response from instantiation
@@ -264,7 +261,7 @@ func (v *InstanceClient) Query(id, apiVersion, kind, name, labels string) (Insta
Name: name,
GVK: schema.FromAPIVersionAndKind(apiVersion, kind),
}
- res, err := k8sClient.getResourceStatus(resIdentifier, resResp.Namespace)
+ res, err := k8sClient.GetResourceStatus(resIdentifier, resResp.Namespace)
if err != nil {
return InstanceStatus{}, pkgerrors.Wrap(err, "Querying Resource")
}
@@ -323,7 +320,7 @@ Main:
continue Main //Don't double check pods if someone decided to define pod explicitly in helm chart
}
}
- status, err := k8sClient.getResourceStatus(resource, resResp.Namespace)
+ status, err := k8sClient.GetResourceStatus(resource, resResp.Namespace)
if err != nil {
cumulatedErrorMsg = append(cumulatedErrorMsg, err.Error())
} else {
diff --git a/src/k8splugin/internal/healthcheck/healthcheck.go b/src/k8splugin/internal/healthcheck/healthcheck.go
index cf1d39bd..70f5bec2 100644
--- a/src/k8splugin/internal/healthcheck/healthcheck.go
+++ b/src/k8splugin/internal/healthcheck/healthcheck.go
@@ -16,26 +16,20 @@ package healthcheck
import (
"encoding/json"
"strings"
- "time"
+ "sync"
- protorelease "k8s.io/helm/pkg/proto/hapi/release"
- "k8s.io/helm/pkg/releasetesting"
+ "helm.sh/helm/v3/pkg/release"
+ "helm.sh/helm/v3/pkg/time"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/app"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/db"
+ "github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
log "github.com/onap/multicloud-k8s/src/k8splugin/internal/logutils"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/namegenerator"
pkgerrors "github.com/pkg/errors"
)
-var (
- HcS_UNKNOWN string = protorelease.TestRun_Status_name[int32(protorelease.TestRun_UNKNOWN)]
- HcS_RUNNING string = protorelease.TestRun_Status_name[int32(protorelease.TestRun_RUNNING)]
- HcS_SUCCESS string = protorelease.TestRun_Status_name[int32(protorelease.TestRun_SUCCESS)]
- HcS_FAILURE string = protorelease.TestRun_Status_name[int32(protorelease.TestRun_FAILURE)]
-)
-
// InstanceHCManager interface exposes instance Healthcheck fuctionalities
type InstanceHCManager interface {
Create(instanceId string) (InstanceMiniHCStatus, error)
@@ -69,24 +63,30 @@ type InstanceHCClient struct {
// InstanceHCStatus holds healthcheck status
type InstanceHCStatus struct {
- InstanceId string `json:"instance-id"`
- HealthcheckId string `json:"healthcheck-id"`
- Status string `json:"status"`
- Info string `json:"info"`
- releasetesting.TestSuite `json:"test-suite"`
+ InstanceId string `json:"instance-id"`
+ HealthcheckId string `json:"healthcheck-id"`
+ Status release.HookPhase `json:"status"`
+ TestSuite *TestSuite `json:"test-suite"` //TODO could be merged with current struct
+}
+
+// TestSuite is a structure to hold compatibility with pre helm3 output
+type TestSuite struct {
+ StartedAt time.Time
+ CompletedAt time.Time
+ Results []*HookStatus
}
// InstanceMiniHCStatus holds only healthcheck summary
type InstanceMiniHCStatus struct {
- HealthcheckId string `json:"healthcheck-id"`
- Status string `json:"status"`
+ HealthcheckId string `json:"healthcheck-id"`
+ Status release.HookPhase `json:"status"`
}
// InstanceHCOverview holds Healthcheck-related data
type InstanceHCOverview struct {
InstanceId string `json:"instance-id"`
HCSummary []InstanceMiniHCStatus `json:"healthcheck-summary"`
- Hooks []*protorelease.Hook `json:"hooks"`
+ Hooks []*helm.Hook `json:"hooks"`
}
func NewHCClient() *InstanceHCClient {
@@ -101,6 +101,8 @@ func instanceMiniHCStatusFromStatus(ihcs InstanceHCStatus) InstanceMiniHCStatus
}
func (ihc InstanceHCClient) Create(instanceId string) (InstanceMiniHCStatus, error) {
+ //TODO Handle hook delete policies
+
//Generate ID
id := namegenerator.Generate()
@@ -111,9 +113,8 @@ func (ihc InstanceHCClient) Create(instanceId string) (InstanceMiniHCStatus, err
return InstanceMiniHCStatus{}, pkgerrors.Wrap(err, "Getting instance")
}
- //Prepare Environment, Request and Release structs
- //TODO In future could derive params from request
- client, err := NewKubeClient(instanceId, instance.Request.CloudRegion)
+ k8sClient := app.KubernetesClient{}
+ err = k8sClient.Init(instance.Request.CloudRegion, instanceId)
if err != nil {
return InstanceMiniHCStatus{}, pkgerrors.Wrap(err, "Preparing KubeClient")
}
@@ -121,91 +122,146 @@ func (ihc InstanceHCClient) Create(instanceId string) (InstanceMiniHCStatus, err
InstanceId: instanceId,
HealthcheckId: id,
}
- env := &releasetesting.Environment{
- Namespace: instance.Namespace,
- KubeClient: client,
- Parallel: false,
- Stream: NewStream(key, ihc.storeName, ihc.tagInst),
- }
- release := protorelease.Release{
- Name: instance.ReleaseName,
- Hooks: instance.Hooks,
- }
- //Define HC
- testSuite, err := releasetesting.NewTestSuite(&release)
- if err != nil {
- log.Error("Error creating TestSuite", log.Fields{
- "Release": release,
- })
- return InstanceMiniHCStatus{}, pkgerrors.Wrap(err, "Creating TestSuite")
+ //Filter out only relevant hooks
+ hooks := hookPairs{}
+ for _, hook := range instance.Hooks {
+ for _, hookEvent := range hook.Hook.Events {
+ if hookEvent == release.HookTest { //Helm3 no longer supports test-failure
+ hooks = append(hooks, hookPair{
+ Definition: hook,
+ Status: &HookStatus{
+ Name: hook.Hook.Name,
+ },
+ })
+ }
+ }
}
//Save state
+ testSuite := TestSuite{
+ StartedAt: time.Now(),
+ Results: hooks.statuses(),
+ }
ihcs := InstanceHCStatus{
- TestSuite: *testSuite,
- HealthcheckId: id,
InstanceId: instanceId,
- Status: HcS_RUNNING,
+ HealthcheckId: id,
+ Status: release.HookPhaseRunning,
+ TestSuite: &testSuite,
+ }
+
+ for _, h := range hooks {
+ h.Status.StartedAt = time.Now()
+ kr, err := k8sClient.CreateKind(h.Definition.KRT, instance.Namespace)
+ if err != nil {
+ // Set status fields
+ h.Status.Status = release.HookPhaseFailed
+ h.Status.CompletedAt = time.Now()
+ testSuite.CompletedAt = time.Now()
+ ihcs.Status = release.HookPhaseFailed
+ retErr := "Starting hook " + h.Status.Name
+
+ // Dump to DB
+ err = db.DBconn.Create(ihc.storeName, key, ihc.tagInst, ihcs)
+ if err != nil {
+ retErr = retErr + " and couldn't save to DB"
+ }
+
+ return instanceMiniHCStatusFromStatus(ihcs),
+ pkgerrors.Wrap(err, retErr)
+ } else {
+ h.Status.Status = release.HookPhaseRunning
+ h.Status.KR = kr
+ }
}
err = db.DBconn.Create(ihc.storeName, key, ihc.tagInst, ihcs)
if err != nil {
return instanceMiniHCStatusFromStatus(ihcs),
pkgerrors.Wrap(err, "Creating Instance DB Entry")
}
-
- // Run HC async
- // If testSuite doesn't fail immediately, we let it continue in background
- errC := make(chan error, 1)
- timeout := make(chan bool, 1)
- // Stream handles updates of testSuite run so we don't need to care
- var RunAsync func() = func() {
- err := ihcs.TestSuite.Run(env)
- if err != nil {
- log.Error("Error running TestSuite", log.Fields{
- "TestSuite": ihcs.TestSuite,
- "Environment": env,
- "Error": err.Error(),
- })
- ihcs.Status = HcS_FAILURE
- ihcs.Info = ihcs.Info + "\n" + err.Error()
- }
- // Send to channel before db update as it can be slow
- errC <- err
- // Download latest Status/Info
- resp, err := ihc.Get(ihcs.InstanceId, ihcs.HealthcheckId)
- if err != nil {
- log.Error("Error querying Healthcheck status", log.Fields{"error": err})
- return
+ log.Info("Successfully initialized Healthcheck resources", log.Fields{
+ "InstanceId": instanceId,
+ "HealthcheckId": id,
+ })
+ go func() {
+ var wg sync.WaitGroup
+ update := make(chan bool) //True - hook finished, False - all hooks finished
+ for _, h := range hooks {
+ wg.Add(1)
+ go func(hookStatus *HookStatus) {
+ //TODO Handle errors here better in future, for now it's ok
+ hookStatus.Status, _ = getHookState(*hookStatus, k8sClient, instance.Namespace)
+ hookStatus.CompletedAt = time.Now()
+ log.Info("Hook finished", log.Fields{
+ "HealthcheckId": id,
+ "InstanceId": instanceId,
+ "Hook": hookStatus.Name,
+ "Status": hookStatus.Status,
+ })
+ update <- true
+ wg.Done()
+ return
+ }(h.Status)
}
- ihcs.Status = resp.Status
- ihcs.Info = resp.Info
- // Update DB
- err = db.DBconn.Update(ihc.storeName, key, ihc.tagInst, ihcs)
- if err != nil {
- log.Error("Error saving Testsuite failure in DB", log.Fields{
- "InstanceHCStatus": ihcs,
- "Error": err,
+ go func() {
+ wg.Wait()
+ log.Info("All hooks finished", log.Fields{
+ "HealthcheckId": id,
+ "InstanceId": instanceId,
})
+ update <- false
+ return
+ }()
+ for {
+ select {
+ case b := <-update:
+ log.Info("Healthcheck update", log.Fields{
+ "HealthcheckId": id,
+ "InstanceId": instanceId,
+ "Reason": map[bool]string{true: "Hook finished", false: "All hooks finished"}[b],
+ })
+ if b { //Some hook finished - need to update DB
+ err = db.DBconn.Update(ihc.storeName, key, ihc.tagInst, ihcs)
+ if err != nil {
+ log.Error("Couldn't update database", log.Fields{
+ "Store": ihc.storeName,
+ "Key": key,
+ "Payload": ihcs,
+ })
+ }
+ } else { //All hooks finished - need to terminate goroutine
+ testSuite.CompletedAt = time.Now()
+ //If everything's fine, final result is OK
+ finalResult := release.HookPhaseSucceeded
+ //If at least single hook wasn't determined - it's Unknown
+ for _, h := range hooks {
+ if h.Status.Status == release.HookPhaseUnknown {
+ finalResult = release.HookPhaseUnknown
+ break
+ }
+ }
+ //Unless at least one hook failed, then we've failed
+ for _, h := range hooks {
+ if h.Status.Status == release.HookPhaseFailed {
+ finalResult = release.HookPhaseFailed
+ break
+ }
+ }
+ ihcs.Status = finalResult
+ err = db.DBconn.Update(ihc.storeName, key, ihc.tagInst, ihcs)
+ if err != nil {
+ log.Error("Couldn't update database", log.Fields{
+ "Store": ihc.storeName,
+ "Key": key,
+ "Payload": ihcs,
+ })
+ }
+ return
+ }
+ }
}
- }
- go func() {
- //Time is hardcoded but the only impact is that typically http response is sent after 2s
- //Could be considered shorter in future
- time.Sleep(2 * time.Second)
- timeout <- true
}()
- go RunAsync()
- select {
- case err := <-errC:
- if err == nil {
- return instanceMiniHCStatusFromStatus(ihcs), nil
- } else {
- return instanceMiniHCStatusFromStatus(ihcs), err
- }
- case <-timeout:
- return instanceMiniHCStatusFromStatus(ihcs), nil
- }
+ return instanceMiniHCStatusFromStatus(ihcs), nil
}
func (ihc InstanceHCClient) Get(instanceId, healthcheckId string) (InstanceHCStatus, error) {
@@ -227,25 +283,32 @@ func (ihc InstanceHCClient) Get(instanceId, healthcheckId string) (InstanceHCSta
}
func (ihc InstanceHCClient) Delete(instanceId, healthcheckId string) error {
+ key := HealthcheckKey{instanceId, healthcheckId}
v := app.NewInstanceClient()
instance, err := v.Get(instanceId)
if err != nil {
return pkgerrors.Wrap(err, "Getting instance")
}
- client, err := NewKubeClient(instanceId, instance.Request.CloudRegion)
+ ihcs, err := ihc.Get(instanceId, healthcheckId)
+ if err != nil {
+ return pkgerrors.Wrap(err, "Error querying Healthcheck status")
+ }
+ k8sClient := app.KubernetesClient{}
+ err = k8sClient.Init(instance.Request.CloudRegion, instanceId)
if err != nil {
return pkgerrors.Wrap(err, "Preparing KubeClient")
}
- env := &releasetesting.Environment{
- Namespace: instance.Namespace,
- KubeClient: client,
+ cumulatedErr := ""
+ for _, hook := range ihcs.TestSuite.Results {
+ err = k8sClient.DeleteKind(hook.KR, instance.Namespace)
+ //FIXME handle "missing resource" error as not error - hook may be already deleted
+ if err != nil {
+ cumulatedErr += err.Error() + "\n"
+ }
}
- ihcs, err := ihc.Get(instanceId, healthcheckId)
- if err != nil {
- return pkgerrors.Wrap(err, "Error querying Healthcheck status")
+ if cumulatedErr != "" {
+ return pkgerrors.New("Removing hooks failed with errors:\n" + cumulatedErr)
}
- env.DeleteTestPods(ihcs.TestSuite.TestManifests)
- key := HealthcheckKey{instanceId, healthcheckId}
err = db.DBconn.Delete(ihc.storeName, key, ihc.tagInst)
if err != nil {
return pkgerrors.Wrap(err, "Removing Healthcheck in DB")
diff --git a/src/k8splugin/internal/healthcheck/hooks.go b/src/k8splugin/internal/healthcheck/hooks.go
new file mode 100644
index 00000000..0b1be9ac
--- /dev/null
+++ b/src/k8splugin/internal/healthcheck/hooks.go
@@ -0,0 +1,145 @@
+/*
+Copyright © 2021 Samsung Electronics
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package healthcheck
+
+import (
+ "time"
+
+ "github.com/onap/multicloud-k8s/src/k8splugin/internal/app"
+ "github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
+ log "github.com/onap/multicloud-k8s/src/k8splugin/internal/logutils"
+
+ "helm.sh/helm/v3/pkg/release"
+ helmtime "helm.sh/helm/v3/pkg/time"
+
+ batch "k8s.io/api/batch/v1"
+ v1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+
+ pkgerrors "github.com/pkg/errors"
+)
+
+type HookStatus struct {
+ StartedAt helmtime.Time `json:"started_at"`
+ CompletedAt helmtime.Time `json:"completed_at"`
+ Status release.HookPhase `json:"status"`
+ Name string `json:"name"`
+ KR helm.KubernetesResource `json:"-"`
+}
+
+// Helper type to combine Hook definition with Status
+type hookPair struct {
+ Definition *helm.Hook
+ Status *HookStatus
+}
+
+// Wraper type to implement helper method for extraction
+type hookPairs []hookPair
+
+// Helper function to retrieve slice of Statuses from slice of hookPairs
+func (hps hookPairs) statuses() (hsps []*HookStatus) {
+ for _, hp := range hps {
+ hsps = append(hsps, hp.Status)
+ }
+ return
+}
+
+// TODO Optimize by using k8s.io/client-go/tools/cache.NewListWatchFromClient just like
+// in helm.sh/helm/v3/pkg/kube/client.go -> watchUntilReady()
+func getHookState(hookStatus HookStatus, k8sClient app.KubernetesClient, namespace string) (release.HookPhase, error) {
+ // Initial check of Hook Resource type
+ switch hookStatus.KR.GVK.Kind {
+ case "Job", "Pod":
+ default:
+ //We don't know how to check state of such resource
+ return release.HookPhaseUnknown, nil
+ }
+
+ for {
+ res, err := k8sClient.GetResourceStatus(hookStatus.KR, namespace)
+ if err != nil {
+ log.Error("Unable to check Resource Status", log.Fields{
+ "Resource": hookStatus.KR,
+ "Namespace": namespace,
+ "Error": err,
+ })
+ return release.HookPhaseUnknown,
+ pkgerrors.Wrap(err, "Unable to check Resource Status")
+ }
+
+ var parsedRes runtime.Object
+ switch hookStatus.KR.GVK.Kind {
+ case "Job":
+ parsedRes = new(batch.Job)
+ case "Pod":
+ parsedRes = new(v1.Pod)
+ }
+ err = runtime.DefaultUnstructuredConverter.FromUnstructured(res.Status.Object, parsedRes)
+ if err != nil {
+ log.Error("Couldn't convert Response into runtime object", log.Fields{
+ "Response": res.Status.Object,
+ "Error": err,
+ })
+ return release.HookPhaseUnknown,
+ pkgerrors.Wrap(err, "Couldn't conver Response into runtime object")
+ }
+
+ var tempState release.HookPhase
+ switch hookStatus.KR.GVK.Kind {
+ case "Job":
+ tempState = parseJobState(parsedRes)
+ case "Pod":
+ tempState = parsePodState(parsedRes)
+ }
+ if tempState != release.HookPhaseRunning {
+ return tempState, nil
+ }
+ //TODO should be changed to "Watching" of resource as pointed earlier
+ time.Sleep(5 * time.Second)
+ }
+}
+
+// Based on kube/client.go -> waitForJob()
+func parseJobState(obj runtime.Object) (state release.HookPhase) {
+ job, ok := obj.(*batch.Job)
+ if !ok {
+ //Something went wrong, and we don't want to parse such resource again
+ return release.HookPhaseUnknown
+ }
+ for _, c := range job.Status.Conditions {
+ if c.Type == batch.JobComplete && c.Status == "True" {
+ return release.HookPhaseSucceeded
+ } else if c.Type == batch.JobFailed && c.Status == "True" {
+ return release.HookPhaseFailed
+ }
+ }
+ return release.HookPhaseRunning
+}
+
+// Based on kube/client.go -> waitForPodSuccess()
+func parsePodState(obj runtime.Object) (state release.HookPhase) {
+ pod, ok := obj.(*v1.Pod)
+ if !ok {
+ return release.HookPhaseUnknown
+ }
+
+ switch pod.Status.Phase {
+ case v1.PodSucceeded:
+ return release.HookPhaseSucceeded
+ case v1.PodFailed:
+ return release.HookPhaseFailed
+ default:
+ return release.HookPhaseRunning
+ }
+}
diff --git a/src/k8splugin/internal/healthcheck/kubeclient.go b/src/k8splugin/internal/healthcheck/kubeclient.go
deleted file mode 100644
index 2a168a78..00000000
--- a/src/k8splugin/internal/healthcheck/kubeclient.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-Copyright © 2021 Samsung Electronics
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package healthcheck
-
-import (
- "k8s.io/helm/pkg/kube"
- "k8s.io/helm/pkg/tiller/environment"
-
- "github.com/onap/multicloud-k8s/src/k8splugin/internal/app"
- "github.com/onap/multicloud-k8s/src/k8splugin/internal/config"
-
- pkgerrors "github.com/pkg/errors"
-)
-
-//implements environment.KubeClient but overrides it so that
-//custom labels can be injected into created resources
-//using internal k8sClient
-type KubeClientImpl struct {
- environment.KubeClient
- labels map[string]string
- k app.KubernetesClient
-}
-
-var _ environment.KubeClient = KubeClientImpl{}
-
-func NewKubeClient(instanceId, cloudRegion string) (*KubeClientImpl, error) {
- k8sClient := app.KubernetesClient{}
- err := k8sClient.Init(cloudRegion, instanceId)
- if err != nil {
- return nil, pkgerrors.Wrap(err, "Initializing k8sClient")
- }
- return &KubeClientImpl{
- labels: map[string]string{
- config.GetConfiguration().KubernetesLabelName: instanceId,
- },
- KubeClient: kube.New(&k8sClient),
- k: k8sClient,
- }, nil
-}
-
-/* FIXME
-// Need to correct this later and provide override of Create method to use our k8sClient
-// So that healthcheck hook resources would be labeled with vf-module data just like currently
-// every k8splugin-managed resource is
-
-//Create function is overrided to label test resources with custom labels
-func (kci *KubeClientImpl) Create(namespace string, reader io.Reader, timeout int64, shouldWait bool) error {
- return nil
-}
-*/
diff --git a/src/k8splugin/internal/healthcheck/stream.go b/src/k8splugin/internal/healthcheck/stream.go
deleted file mode 100644
index d7c6e654..00000000
--- a/src/k8splugin/internal/healthcheck/stream.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-Copyright © 2021 Samsung Electronics
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package healthcheck
-
-import (
- "google.golang.org/grpc"
- "k8s.io/helm/pkg/proto/hapi/release"
- "k8s.io/helm/pkg/proto/hapi/services"
-
- "github.com/onap/multicloud-k8s/src/k8splugin/internal/db"
- log "github.com/onap/multicloud-k8s/src/k8splugin/internal/logutils"
-
- pkgerrors "github.com/pkg/errors"
-)
-
-//implements basic stream implementation that logs progress
-//and updates state in DB
-type StreamImpl struct {
- Key HealthcheckKey
- StoreName string
- Tag string
- grpc.ServerStream //only to satisfy interface needs, it's not used
-}
-
-var _ services.ReleaseService_RunReleaseTestServer = StreamImpl{}
-
-func NewStream(key HealthcheckKey, store, tag string) *StreamImpl {
- s := StreamImpl{
- Key: key,
- StoreName: store,
- Tag: tag,
- }
- return &s
-}
-
-func (si StreamImpl) Send(m *services.TestReleaseResponse) error {
- log.Info("Stream message", log.Fields{"msg": m})
-
- DBResp, err := db.DBconn.Read(si.StoreName, si.Key, si.Tag)
- if err != nil || DBResp == nil {
- return pkgerrors.Wrap(err, "Error retrieving Healthcheck data")
- }
-
- resp := InstanceHCStatus{}
- err = db.DBconn.Unmarshal(DBResp, &resp)
- if err != nil {
- return pkgerrors.Wrap(err, "Unmarshaling Healthcheck Value")
- }
-
- resp.Status = release.TestRun_Status_name[int32(m.Status)]
- if m.Msg != "" {
- if resp.Info == "" {
- resp.Info = m.Msg
- } else {
- resp.Info = resp.Info + "\n" + m.Msg
- }
- }
-
- err = db.DBconn.Update(si.StoreName, si.Key, si.Tag, resp)
- if err != nil {
- return pkgerrors.Wrap(err, "Updating Healthcheck")
- }
- return nil
-}
diff --git a/src/k8splugin/internal/helm/helm.go b/src/k8splugin/internal/helm/helm.go
index 31047eb6..3c25ac8c 100644
--- a/src/k8splugin/internal/helm/helm.go
+++ b/src/k8splugin/internal/helm/helm.go
@@ -20,31 +20,24 @@ package helm
import (
"fmt"
"io/ioutil"
- "k8s.io/helm/pkg/strvals"
"os"
"path/filepath"
"regexp"
- "sort"
- "strconv"
"strings"
utils "github.com/onap/multicloud-k8s/src/k8splugin/internal"
- "github.com/ghodss/yaml"
pkgerrors "github.com/pkg/errors"
+ "helm.sh/helm/v3/pkg/action"
+ "helm.sh/helm/v3/pkg/chart/loader"
+ "helm.sh/helm/v3/pkg/cli"
+ helmOptions "helm.sh/helm/v3/pkg/cli/values"
+ "helm.sh/helm/v3/pkg/getter"
+ "helm.sh/helm/v3/pkg/releaseutil"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/validation"
k8syaml "k8s.io/apimachinery/pkg/util/yaml"
- "k8s.io/helm/pkg/chartutil"
- "k8s.io/helm/pkg/hooks"
- "k8s.io/helm/pkg/manifest"
- "k8s.io/helm/pkg/proto/hapi/chart"
- protorelease "k8s.io/helm/pkg/proto/hapi/release"
- "k8s.io/helm/pkg/releaseutil"
- "k8s.io/helm/pkg/renderutil"
- "k8s.io/helm/pkg/tiller"
- "k8s.io/helm/pkg/timeconv"
)
// Template is the interface for all helm templating commands
@@ -55,7 +48,7 @@ type Template interface {
GenerateKubernetesArtifacts(
chartPath string,
valueFiles []string,
- values []string) (map[string][]string, error)
+ values []string) ([]KubernetesResourceTemplate, []*Hook, error)
}
// TemplateClient implements the Template interface
@@ -79,130 +72,30 @@ func NewTemplateClient(k8sversion, namespace, releasename string) *TemplateClien
}
}
-// Define hooks that are honored by k8splugin
-var honoredEvents = map[string]protorelease.Hook_Event{
- hooks.ReleaseTestSuccess: protorelease.Hook_RELEASE_TEST_SUCCESS,
- hooks.ReleaseTestFailure: protorelease.Hook_RELEASE_TEST_FAILURE,
-}
-
// Combines valueFiles and values into a single values stream.
// values takes precedence over valueFiles
-func (h *TemplateClient) processValues(valueFiles []string, values []string) ([]byte, error) {
- base := map[string]interface{}{}
-
- //Values files that are used for overriding the chart
- for _, filePath := range valueFiles {
- currentMap := map[string]interface{}{}
-
- var bytes []byte
- var err error
- if strings.TrimSpace(filePath) == "-" {
- bytes, err = ioutil.ReadAll(os.Stdin)
- } else {
- bytes, err = ioutil.ReadFile(filePath)
- }
-
- if err != nil {
- return []byte{}, err
- }
-
- if err := yaml.Unmarshal(bytes, &currentMap); err != nil {
- return []byte{}, fmt.Errorf("failed to parse %s: %s", filePath, err)
- }
- // Merge with the previous map
- base = h.mergeValues(base, currentMap)
- }
-
- //User specified value. Similar to ones provided by -x
- for _, value := range values {
- if err := strvals.ParseInto(value, base); err != nil {
- return []byte{}, fmt.Errorf("failed parsing --set data: %s", err)
- }
- }
-
- return yaml.Marshal(base)
-}
-
-func (h *TemplateClient) mergeValues(dest map[string]interface{}, src map[string]interface{}) map[string]interface{} {
- for k, v := range src {
- // If the key doesn't exist already, then just set the key to that value
- if _, exists := dest[k]; !exists {
- dest[k] = v
- continue
- }
- nextMap, ok := v.(map[string]interface{})
- // If it isn't another map, overwrite the value
- if !ok {
- dest[k] = v
- continue
- }
- // Edge case: If the key exists in the destination, but isn't a map
- destMap, isMap := dest[k].(map[string]interface{})
- // If the source map has a map for this key, prefer it
- if !isMap {
- dest[k] = v
- continue
- }
- // If we got to this point, it is a map in both, so merge them
- dest[k] = h.mergeValues(destMap, nextMap)
- }
- return dest
-}
-
-// Checks whether resource is a hook and if it is, returns hook struct
-//Logic is based on private method
-//file *manifestFile) sort(result *result) error
-//of helm/pkg/tiller package
-func isHook(path, resource string) (*protorelease.Hook, error) {
-
- var entry releaseutil.SimpleHead
- err := yaml.Unmarshal([]byte(resource), &entry)
+func (h *TemplateClient) processValues(valueFiles []string, values []string) (map[string]interface{}, error) {
+ settings := cli.New()
+ providers := getter.All(settings)
+ options := helmOptions.Options{
+ ValueFiles: valueFiles,
+ Values: values,
+ }
+ base, err := options.MergeValues(providers)
if err != nil {
- return nil, pkgerrors.Wrap(err, "Loading resource to YAML")
- }
- //If resource has no metadata it can't be a hook
- if entry.Metadata == nil ||
- entry.Metadata.Annotations == nil ||
- len(entry.Metadata.Annotations) == 0 {
- return nil, nil
- }
- //Determine hook weight
- hookWeight, err := strconv.Atoi(entry.Metadata.Annotations[hooks.HookWeightAnno])
- if err != nil {
- hookWeight = 0
- }
- //Prepare hook obj
- resultHook := &protorelease.Hook{
- Name: entry.Metadata.Name,
- Kind: entry.Kind,
- Path: path,
- Manifest: resource,
- Events: []protorelease.Hook_Event{},
- Weight: int32(hookWeight),
- DeletePolicies: []protorelease.Hook_DeletePolicy{},
+ return nil, err
}
- //Determine hook's events
- hookTypes, ok := entry.Metadata.Annotations[hooks.HookAnno]
- if !ok {
- return resultHook, nil
- }
- for _, hookType := range strings.Split(hookTypes, ",") {
- hookType = strings.ToLower(strings.TrimSpace(hookType))
- e, ok := honoredEvents[hookType]
- if ok {
- resultHook.Events = append(resultHook.Events, e)
- }
- }
- return resultHook, nil
+
+ return base, nil
}
// GenerateKubernetesArtifacts a mapping of type to fully evaluated helm template
func (h *TemplateClient) GenerateKubernetesArtifacts(inputPath string, valueFiles []string,
- values []string) ([]KubernetesResourceTemplate, []*protorelease.Hook, error) {
+ values []string) ([]KubernetesResourceTemplate, []*Hook, error) {
var outputDir, chartPath, namespace, releaseName string
var retData []KubernetesResourceTemplate
- var hookList []*protorelease.Hook
+ var hookList []*Hook
releaseName = h.releaseName
namespace = h.kubeNameSpace
@@ -231,106 +124,88 @@ func (h *TemplateClient) GenerateKubernetesArtifacts(inputPath string, valueFile
if err != nil {
return retData, hookList, err
}
- config := &chart.Config{Raw: string(rawVals), Values: map[string]*chart.Value{}}
if msgs := validation.IsDNS1123Label(releaseName); releaseName != "" && len(msgs) > 0 {
return retData, hookList, fmt.Errorf("release name %s is not a valid DNS label: %s", releaseName, strings.Join(msgs, ";"))
}
- // Check chart requirements to make sure all dependencies are present in /charts
- c, err := chartutil.Load(chartPath)
+ // Initialize the install client
+ client := action.NewInstall(&action.Configuration{})
+ client.DryRun = true
+ client.ClientOnly = true
+ client.ReleaseName = releaseName
+ client.IncludeCRDs = true
+ client.DisableHooks = true //to ensure no duplicates in case of defined pre/post install hooks
+
+ // Check chart dependencies to make sure all are present in /charts
+ chartRequested, err := loader.Load(chartPath)
if err != nil {
- return retData, hookList, pkgerrors.Errorf("Got error: %s", err.Error())
+ return retData, hookList, err
}
- renderOpts := renderutil.Options{
- ReleaseOptions: chartutil.ReleaseOptions{
- Name: releaseName,
- IsInstall: true,
- IsUpgrade: false,
- Time: timeconv.Now(),
- Namespace: namespace,
- },
- KubeVersion: h.kubeVersion,
+ if chartRequested.Metadata.Type != "" && chartRequested.Metadata.Type != "application" {
+ return retData, hookList, fmt.Errorf(
+ "chart %q has an unsupported type and is not installable: %q",
+ chartRequested.Metadata.Name,
+ chartRequested.Metadata.Type,
+ )
}
- renderedTemplates, err := renderutil.Render(c, config, renderOpts)
+ client.Namespace = namespace
+ release, err := client.Run(chartRequested, rawVals)
if err != nil {
return retData, hookList, err
}
-
- newRenderedTemplates := make(map[string]string)
-
- //Some manifests can contain multiple yaml documents
- //This step is splitting them up into multiple files
- //Each file contains only a single k8s kind
- for k, v := range renderedTemplates {
- //Splits into manifest-0, manifest-1 etc
- if filepath.Base(k) == "NOTES.txt" {
- continue
- }
- rmap := releaseutil.SplitManifests(v)
-
- // Iterating over map can yield different order at times
- // so first we'll sort keys
- sortedKeys := make([]string, len(rmap))
- for k1, _ := range rmap {
- sortedKeys = append(sortedKeys, k1)
- }
- // This makes empty files have the lowest indices
- sort.Strings(sortedKeys)
-
- for k1, v1 := range sortedKeys {
- key := fmt.Sprintf("%s-%d", k, k1)
- newRenderedTemplates[key] = rmap[v1]
- }
+ // SplitManifests returns integer-sortable so that manifests get output
+ // in the same order as the input by `BySplitManifestsOrder`.
+ rmap := releaseutil.SplitManifests(release.Manifest)
+ // We won't get any meaningful hooks from here
+ _, m, err := releaseutil.SortManifests(rmap, nil, releaseutil.InstallOrder)
+ if err != nil {
+ return retData, hookList, err
}
-
- listManifests := manifest.SplitManifests(newRenderedTemplates)
- var manifestsToRender []manifest.Manifest
- //render all manifests in the chart
- manifestsToRender = listManifests
- for _, m := range tiller.SortByKind(manifestsToRender) {
- data := m.Content
- b := filepath.Base(m.Name)
+ for _, k := range m {
+ data := k.Content
+ b := filepath.Base(k.Name)
if b == "NOTES.txt" {
continue
}
if strings.HasPrefix(b, "_") {
continue
}
-
// blank template after execution
if h.emptyRegex.MatchString(data) {
continue
}
-
- mfilePath := filepath.Join(outputDir, m.Name)
+ mfilePath := filepath.Join(outputDir, k.Name)
utils.EnsureDirectory(mfilePath)
- err = ioutil.WriteFile(mfilePath, []byte(data), 0666)
+ err = ioutil.WriteFile(mfilePath, []byte(k.Content), 0600)
if err != nil {
return retData, hookList, err
}
-
- hook, _ := isHook(mfilePath, data)
- // if hook is not nil, then append it to hooks list and continue
- // if it's not, disregard error
- if hook != nil {
- hookList = append(hookList, hook)
- continue
- }
-
gvk, err := getGroupVersionKind(data)
if err != nil {
return retData, hookList, err
}
-
kres := KubernetesResourceTemplate{
GVK: gvk,
FilePath: mfilePath,
}
retData = append(retData, kres)
}
+ for _, h := range release.Hooks {
+ hFilePath := filepath.Join(outputDir, h.Name)
+ utils.EnsureDirectory(hFilePath)
+ err = ioutil.WriteFile(hFilePath, []byte(h.Manifest), 0600)
+ if err != nil {
+ return retData, hookList, err
+ }
+ gvk, err := getGroupVersionKind(h.Manifest)
+ if err != nil {
+ return retData, hookList, err
+ }
+ hookList = append(hookList, &Hook{*h, KubernetesResourceTemplate{gvk, hFilePath}})
+ }
return retData, hookList, nil
}
diff --git a/src/k8splugin/internal/helm/helm_test.go b/src/k8splugin/internal/helm/helm_test.go
index 358577ea..29d446fa 100644
--- a/src/k8splugin/internal/helm/helm_test.go
+++ b/src/k8splugin/internal/helm/helm_test.go
@@ -20,6 +20,7 @@ package helm
import (
"crypto/sha256"
"fmt"
+ "gopkg.in/yaml.v2"
"io/ioutil"
"path/filepath"
"strings"
@@ -45,7 +46,7 @@ func TestProcessValues(t *testing.T) {
filepath.Join(profileDir, "override_values.yaml"),
},
//Hash of a combined values.yaml file that is expected
- expectedHash: "c18a70f426933de3c051c996dc34fd537d0131b2d13a2112a2ecff674db6c2f9",
+ expectedHash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
expectedError: "",
},
{
@@ -58,7 +59,7 @@ func TestProcessValues(t *testing.T) {
"service.externalPort=82",
},
//Hash of a combined values.yaml file that is expected
- expectedHash: "028a3521fc9f8777ea7e67a6de0c51f2c875b88ca91734999657f0ca924ddb7a",
+ expectedHash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
expectedError: "",
},
{
@@ -73,7 +74,7 @@ func TestProcessValues(t *testing.T) {
"service.externalPort=82",
},
//Hash of a combined values.yaml file that is expected
- expectedHash: "516fab4ab7b76ba2ff35a97c2a79b74302543f532857b945f2fe25e717e755be",
+ expectedHash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
expectedError: "",
},
{
@@ -83,7 +84,7 @@ func TestProcessValues(t *testing.T) {
"servers[0].port=80",
},
expectedError: "",
- expectedHash: "50d9401b003f65c1ccfd1c5155106fff88c8201ab8b7d66bd6ffa4fe2883bead",
+ expectedHash: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
},
}
@@ -102,11 +103,11 @@ func TestProcessValues(t *testing.T) {
}
} else {
//Compute the hash of returned data and compare
- h.Write(out)
gotHash := fmt.Sprintf("%x", h.Sum(nil))
h.Reset()
if gotHash != testCase.expectedHash {
- t.Fatalf("Got unexpected hash '%s' of values.yaml:\n%s", gotHash, out)
+ mout, _ := yaml.Marshal(&out)
+ t.Fatalf("Got unexpected hash '%s' of values.yaml:\n%v", gotHash, string(mout))
}
}
})
@@ -133,9 +134,9 @@ func TestGenerateKubernetesArtifacts(t *testing.T) {
values: []string{},
//sha256 hash of the evaluated templates in each chart
expectedHashMap: map[string]string{
- "testchart2/templates/service.yaml": "fdd6a2b6795486f0dd1d8c44379afb5ffe4072c09f9cf6594738e8ded4dd872d",
- "subcharta/templates/service.yaml": "570389588fffdb7193ab265888d781f3d751f3a40362533344f9aa7bb93a8bb0",
- "subchartb/templates/service.yaml": "5654e03d922e8ec49649b4bbda9dfc9e643b3b7c9c18b602cc7e26fd36a39c2a",
+ "manifest-0": "fcc1083ace82b633e3a0a687d50f532c07e1212b7a42b2c178b65e5768fffcfe",
+ "manifest-2": "eefeac6ff5430a16a32ae3974857cbe5ff516a1a68566e5edcddd410d60397c0",
+ "manifest-1": "b88aa963ee3afb9676e9930519d7caa103df1251da48a9351ab4ac0c5730d2af",
},
expectedError: "",
},
@@ -150,9 +151,9 @@ func TestGenerateKubernetesArtifacts(t *testing.T) {
},
//sha256 hash of the evaluated templates in each chart
expectedHashMap: map[string]string{
- "testchart2/templates/service.yaml": "2bb96e791ecb6a3404bc5de3f6c4182aed881630269e2aa6766df38b0f852724",
- "subcharta/templates/service.yaml": "570389588fffdb7193ab265888d781f3d751f3a40362533344f9aa7bb93a8bb0",
- "subchartb/templates/service.yaml": "5654e03d922e8ec49649b4bbda9dfc9e643b3b7c9c18b602cc7e26fd36a39c2a",
+ "manifest-0": "fcc1083ace82b633e3a0a687d50f532c07e1212b7a42b2c178b65e5768fffcfe",
+ "manifest-2": "03ae530e49071d005be78f581b7c06c59119f91f572b28c0c0c06ced8e37bf6e",
+ "manifest-1": "b88aa963ee3afb9676e9930519d7caa103df1251da48a9351ab4ac0c5730d2af",
},
expectedError: "",
},
@@ -164,8 +165,8 @@ func TestGenerateKubernetesArtifacts(t *testing.T) {
"goingEmpty=false",
},
expectedHashMap: map[string]string{
- "testchart3/templates/multi.yaml-2": "e24cbbefac2c2f700880b8fd041838f2dd48bbc1e099e7c1d2485ae7feb3da0d",
- "testchart3/templates/multi.yaml-3": "592a8e5b2c35b8469aa45703a835bc00657bfe36b51eb08427a46e7d22fb1525",
+ "manifest-0": "666e8d114981a4b5d13fb799be060aa57e0e48904bba4a410f87a2e827a57ddb",
+ "manifest-2": "6a5af22538c273b9d4a3156e3b6bb538c655041eae31e93db21a9e178f73ecf0",
},
expectedError: "",
},
@@ -177,9 +178,9 @@ func TestGenerateKubernetesArtifacts(t *testing.T) {
"goingEmpty=true",
},
expectedHashMap: map[string]string{
- "testchart3/templates/multi.yaml-3": "e24cbbefac2c2f700880b8fd041838f2dd48bbc1e099e7c1d2485ae7feb3da0d",
- "testchart3/templates/multi.yaml-4": "0bea01e65148584609ede5000c024241ba1c35b440b32ec0a4f7013015715bfe",
- "testchart3/templates/multi.yaml-5": "6a5af22538c273b9d4a3156e3b6bb538c655041eae31e93db21a9e178f73ecf0",
+ "manifest-0": "666e8d114981a4b5d13fb799be060aa57e0e48904bba4a410f87a2e827a57ddb",
+ "manifest-1": "8613e7e7cc0186516b13be37ec7fc321ff89e3abaed0a841773a4eba2d77ce2a",
+ "manifest-2": "3543ae9563fe62ce4a7446d72e1cd23140d8cc5495f0221430d70e94845c1408",
},
expectedError: "",
},
@@ -190,7 +191,8 @@ func TestGenerateKubernetesArtifacts(t *testing.T) {
values: []string{},
expectedError: "",
expectedHashMap: map[string]string{
- "mockv3/templates/deployment.yaml": "259a027a4957e7428eb1d2e774fa1afaa62449521853f8b2916887040bae2ca4",
+ "manifest-0": "94975ff704b9cc00a7988fe7fc865665495655ec2584d3e9de2f7e5294c7eb0d",
+ "dummy-test": "b50bb5f818fe0be332f09401104ae9cea59442e2dabe1a16b4ce21b753177a80",
},
},
}
@@ -200,7 +202,7 @@ func TestGenerateKubernetesArtifacts(t *testing.T) {
for _, testCase := range testCases {
t.Run(testCase.label, func(t *testing.T) {
tc := NewTemplateClient("1.12.3", "testnamespace", "testreleasename")
- out, _, err := tc.GenerateKubernetesArtifacts(testCase.chartPath, testCase.valueFiles,
+ out, hooks, err := tc.GenerateKubernetesArtifacts(testCase.chartPath, testCase.valueFiles,
testCase.values)
if err != nil {
if testCase.expectedError == "" {
@@ -209,13 +211,38 @@ func TestGenerateKubernetesArtifacts(t *testing.T) {
if strings.Contains(err.Error(), testCase.expectedError) == false {
t.Fatalf("Got unexpected error message %s", err)
}
+ } else if len(testCase.expectedHashMap) != len(out)+len(hooks) {
+ t.Fatalf("Mismatch of expected files (%d) and returned resources (%d)",
+ len(testCase.expectedHashMap), len(out)+len(hooks))
} else {
//Compute the hash of returned data and compare
for _, v := range out {
f := v.FilePath
data, err := ioutil.ReadFile(f)
if err != nil {
- t.Errorf("Unable to read file %s", v)
+ t.Fatalf("Unable to read file %s", v)
+ }
+ h.Write(data)
+ gotHash := fmt.Sprintf("%x", h.Sum(nil))
+ h.Reset()
+
+ //Find the right hash from expectedHashMap
+ expectedHash := ""
+ for k1, v1 := range testCase.expectedHashMap {
+ if strings.Contains(f, k1) == true {
+ expectedHash = v1
+ break
+ }
+ }
+ if gotHash != expectedHash {
+ t.Fatalf("Got unexpected hash for %s: '%s'; expected: '%s'", f, gotHash, expectedHash)
+ }
+ }
+ for _, v := range hooks {
+ f := v.KRT.FilePath
+ data, err := ioutil.ReadFile(f)
+ if err != nil {
+ t.Fatalf("Unable to read file %+v", v)
}
h.Write(data)
gotHash := fmt.Sprintf("%x", h.Sum(nil))
diff --git a/src/k8splugin/internal/helm/types.go b/src/k8splugin/internal/helm/types.go
index 2c8badb8..9e066bbc 100644
--- a/src/k8splugin/internal/helm/types.go
+++ b/src/k8splugin/internal/helm/types.go
@@ -1,5 +1,6 @@
/*
* Copyright 2018 Intel Corporation, Inc
+ * Copyright © 2021 Samsung Electronics
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,9 @@
package helm
import (
+ "encoding/json"
+
+ "helm.sh/helm/v3/pkg/release"
"k8s.io/apimachinery/pkg/runtime/schema"
)
@@ -39,3 +43,21 @@ type KubernetesResource struct {
// Name of resource in Kubernetes
Name string
}
+
+// Hook is internal container for Helm Hook Definition
+type Hook struct {
+ Hook release.Hook
+ KRT KubernetesResourceTemplate
+}
+
+// Custom Marshal implementation to satisfy external interface
+func (h Hook) MarshalJSON() ([]byte, error) {
+ return json.Marshal(&struct {
+ Name string `json:"name"`
+ Kind string `json:"kind"`
+ Path string `json:"kind"`
+ Manifest string `json:"kind"`
+ Events []release.HookEvent `json:"events"`
+ }{h.Hook.Name, h.Hook.Kind, h.Hook.Path,
+ h.Hook.Manifest, h.Hook.Events})
+}
diff --git a/src/k8splugin/internal/rb/profile.go b/src/k8splugin/internal/rb/profile.go
index df4d2e5c..3db6c40f 100644
--- a/src/k8splugin/internal/rb/profile.go
+++ b/src/k8splugin/internal/rb/profile.go
@@ -24,8 +24,6 @@ import (
"log"
"path/filepath"
- protorelease "k8s.io/helm/pkg/proto/hapi/release"
-
"github.com/onap/multicloud-k8s/src/k8splugin/internal/db"
"github.com/onap/multicloud-k8s/src/k8splugin/internal/helm"
@@ -273,10 +271,10 @@ func (v *ProfileClient) Download(rbName, rbVersion, prName string) ([]byte, erro
//Resolve returns the path where the helm chart merged with
//configuration overrides resides and final ReleaseName picked for instantiation
func (v *ProfileClient) Resolve(rbName string, rbVersion string,
- profileName string, values []string, overrideReleaseName string) ([]helm.KubernetesResourceTemplate, []*protorelease.Hook, string, error) {
+ profileName string, values []string, overrideReleaseName string) ([]helm.KubernetesResourceTemplate, []*helm.Hook, string, error) {
var sortedTemplates []helm.KubernetesResourceTemplate
- var hookList []*protorelease.Hook
+ var hookList []*helm.Hook
var finalReleaseName string
//Download and process the profile first