aboutsummaryrefslogtreecommitdiffstats
path: root/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg')
-rw-r--r--vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/handler/handler.go96
-rw-r--r--vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/pipeline/pipeline.go130
-rw-r--r--vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/sample-rest-requests/createWriter.json11
-rw-r--r--vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/hdfs-config.go37
-rw-r--r--vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/hdfsUtils.go33
-rw-r--r--vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/kafka-config.go55
-rw-r--r--vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/logutils.go32
-rw-r--r--vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/readJson.go28
8 files changed, 422 insertions, 0 deletions
diff --git a/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/handler/handler.go b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/handler/handler.go
new file mode 100644
index 00000000..65021b4a
--- /dev/null
+++ b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/handler/handler.go
@@ -0,0 +1,96 @@
+package handler
+
+
+import (
+ "fmt"
+ "net/http"
+ "io/ioutil"
+ "encoding/json"
+ "github.com/gorilla/mux"
+
+ guuid "github.com/google/uuid"
+ pipeline "hdfs-writer/pkg/pipeline"
+ utils "hdfs-writer/pkg/utils"
+)
+
+
+var slogger = utils.GetLoggerInstance()
+// ChannelMap is the global map to store writerNames as key and channels as values.
+var ChannelMap =make(map[string]chan struct{})
+
+
+// This is a sample test request handler
+func testFunc(w http.ResponseWriter, r *http.Request){
+ slogger.Info("Invoking testFunc ...")
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintln(w,"HTTP Test successful ")
+}
+
+// CreateRouter returns a http handler for the registered URLs
+func CreateRouter() http.Handler{
+ router := mux.NewRouter().StrictSlash(true)
+ slogger.Info("Created router ...")
+ router.HandleFunc("/test", testFunc).Methods("GET")
+ router.HandleFunc("/createWriter", createWriter).Methods("POST")
+ router.HandleFunc("/deleteWriter/{writerName}", deleteWriter).Methods("DELETE")
+ return router
+}
+
+
+// CreateWriter creates a pipeline
+func createWriter(w http.ResponseWriter, r *http.Request){
+ reqBody, _ := ioutil.ReadAll(r.Body)
+ slogger.Info(string(reqBody))
+ var results map[string]interface{}
+ json.Unmarshal(reqBody, &results)
+ if len(results)==0{
+ slogger.Fatalf("Unable to read from the config json file, unable to create configObject map")
+ }
+ writerStr := "writer"
+ writer := results[writerStr].(map[string]interface{})
+ kafkaConfigMapObj := writer["kafkaConfig"].(map[string]interface{})
+ hdfsConfigObj := writer["hdfsConfig"].(map[string]interface{})
+
+ kc := utils.SetKafkaParametersByObjectMap(kafkaConfigMapObj)
+ hc := utils.SetHdfsParametersByObjectMap(hdfsConfigObj)
+
+ //populate the channelMap
+ pipelineChan := make(chan struct{})
+ slogger.Infof("Channel created by post :: %v", pipelineChan)
+ uuid := guuid.New().String()
+ //slogger.Infof("guuid :: %s",uuid)
+ slogger.Infof(":: Storing writerName and channel in ChannelMap :: ")
+ writerName := writerStr+"-"+uuid[len(uuid)-4:]
+ slogger.Infof("::writerName:: %s ",writerName)
+ ChannelMap[writerName] = pipelineChan
+
+ // envoke the go routine to build pipeline
+ go pipeline.BuildWriterPipeline(kc,hc, writerName, ChannelMap[writerName])
+ successMessage := fmt.Sprintf("Created the writer ::%s", writerName)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintln(w,successMessage)
+}
+
+
+// deleteWriter deletes a given writer pipeline
+func deleteWriter(w http.ResponseWriter, r *http.Request){
+ vars := mux.Vars(r)
+ writerName := vars["writerName"]
+ if _, keyExists := ChannelMap[writerName]; keyExists{
+ slogger.Infof("::Writer to be closed:: %s",writerName)
+ toBeClosedChannel := ChannelMap[writerName]
+ close(toBeClosedChannel)
+ // deleting the channel from ChannelMap after closure to
+ // avoid closing the closed channel
+ delete(ChannelMap, writerName)
+
+ w.WriteHeader(http.StatusOK)
+ deleteMessage := fmt.Sprintf("Deleted writer :: %s",writerName)
+ fmt.Fprintln(w,deleteMessage)
+
+ }else{
+ notFoundMessage := fmt.Sprintf("Could not find writer :: %s",writerName)
+ fmt.Fprintln(w,notFoundMessage)
+ }
+
+} \ No newline at end of file
diff --git a/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/pipeline/pipeline.go b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/pipeline/pipeline.go
new file mode 100644
index 00000000..c5dbd3cd
--- /dev/null
+++ b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/pipeline/pipeline.go
@@ -0,0 +1,130 @@
+package pipeline
+
+import (
+ "fmt"
+ "os"
+ "github.com/colinmarc/hdfs"
+ "github.com/confluentinc/confluent-kafka-go/kafka"
+ utils "hdfs-writer/pkg/utils"
+
+)
+
+// BuildWriterPipeline builds a pipeline
+func BuildWriterPipeline(k utils.KafkaConfig, h utils.HdfsConfig, writerName string, sigchan chan struct{}) {
+ slogger := utils.GetLoggerInstance()
+ topics := make([]string, 1)
+ topics[0] = k.GetTopic()
+
+ c,err := kafka.NewConsumer(&kafka.ConfigMap{
+ "bootstrap.servers": k.GetBroker(),
+ "broker.address.family": "v4",
+ "group.id": k.GetGroup(),
+ "session.timeout.ms": 6000,
+ "auto.offset.reset": "earliest"})
+
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to create consumer: %s\n", err)
+ os.Exit(1)
+ }
+ fmt.Printf("Created Consumer %v\n", c)
+ err = c.SubscribeTopics(topics, nil)
+
+ run := true
+ setUpPipeline := false
+
+ var hdfsFileWriter *hdfs.FileWriter
+ var hdfsFileWriterError error
+ // HDFS CLIENT CREATION
+ //client := utils.GetHdfsClientInstance(h.GetHdfsURL())
+ client := utils.CreateHdfsClient(h.GetHdfsURL())
+
+
+ for run==true {
+ select {
+ case sig := <-sigchan:
+ client.Close()
+ if hdfsFileWriter!=nil{
+ cleanup(hdfsFileWriter)
+ }
+ slogger.Infof("\nCaught signal %v: terminating the go-routine of writer :: %s\n", sig, writerName)
+ run = false
+ default:
+ //slogger.Info("Running default option ....")
+ ev := c.Poll(100)
+ if ev == nil {
+ continue
+ }
+ //:: BEGIN : Switch between different types of messages that come out of kafka
+ switch e := ev.(type){
+ case *kafka.Message:
+ slogger.Infof("::: Message on %s\n%s\n", e.TopicPartition, e.Value)
+ dataStr := string(e.Value)
+ slogger.Infof("byte array ::: %s", []byte(dataStr))
+ fileInfo, fileInfoError := client.Stat("/" + k.GetTopic())
+ // create file if it doesnt exists already
+ if fileInfoError != nil {
+ slogger.Infof("Error::: %s",fileInfoError)
+ slogger.Infof("Creating file::: %s", "/"+k.GetTopic())
+ hdfsFileWriterError = client.CreateEmptyFile("/"+k.GetTopic())
+ if hdfsFileWriterError !=nil {
+ slogger.Infof("Creation of empty file ::: %s failed\n Error:: %s",
+ "/"+k.GetTopic(), hdfsFileWriterError.Error())
+ panic(fmt.Sprintf("Creation of empty file ::: %s failed", k.GetTopic()))
+ }
+ _= client.Chmod("/"+k.GetTopic(), 0777);
+ }
+ newDataStr := dataStr + "\n"
+ // file exists case, so just append
+ hdfsFileWriter, hdfsFileWriterError = client.Append("/"+fileInfo.Name())
+
+ if hdfsFileWriterError != nil || hdfsFileWriter==nil{
+ if(hdfsFileWriter==nil){
+ slogger.Infof("hdfsFileWriter is NULL !!")
+ }
+ slogger.Infof(":::Appending to file : %s failed:::\nError occured:::%s\n",
+ "/"+k.GetTopic(),hdfsFileWriterError)
+ panic(fmt.Sprintf("Appending to file : %s failed", k.GetTopic()))
+ }
+ bytesWritten, error := hdfsFileWriter.Write([]byte(newDataStr))
+ if bytesWritten > 0 && error == nil {
+ slogger.Infof("::: Wrote %s to HDFS:::", newDataStr)
+ slogger.Infof("::: Wrote %d bytes to HDFS:::", bytesWritten)
+
+ if setUpPipeline==false{
+ slogger.Infof(" The pipeline with topic: %s and hdfs url %s is setup,"+
+ "watching for more messages.. ",k.GetTopic(), h.GetHdfsURL())
+ setUpPipeline = true
+ }
+ } else {
+ slogger.Info("::: Unable to write to HDFS\n :::Error:: %s",error)
+ }
+ hdfsFileWriter.Close()
+
+ case kafka.Error:
+ // Errors should generally be considered
+ // informational, the client will try to
+ // automatically recover.
+ // But in this example we choose to terminate
+ // the application if all brokers are down.
+ fmt.Fprintf(os.Stderr, "%% Error: %v: %v\n", e.Code(), e)
+ if e.Code() == kafka.ErrAllBrokersDown {
+ run = false
+ }
+
+ default:
+ fmt.Printf("Ignored %v\n", e)
+ } //:: END : Switch between different types of messages that come out of kafka
+ } // END: select channel
+ } // END : infinite loop
+
+ fmt.Printf("Closing the consumer")
+}
+
+func cleanup(h *hdfs.FileWriter){
+ if h!=nil{
+ err := h.Close()
+ if err!=nil{
+ fmt.Printf(":::Error occured while closing the hdfs writer::: \n%s", err.Error())
+ }
+ }
+} \ No newline at end of file
diff --git a/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/sample-rest-requests/createWriter.json b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/sample-rest-requests/createWriter.json
new file mode 100644
index 00000000..9a41d91b
--- /dev/null
+++ b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/sample-rest-requests/createWriter.json
@@ -0,0 +1,11 @@
+{"writer": {
+ "kafkaConfig": {
+ "broker": "kafka-cluster-kafka-bootstrap:9092",
+ "group": "grp1",
+ "topic": "newTopic9"
+ },
+ "hdfsConfig": {
+ "hdfs_url": "hdfs1-namenode:8020"
+ }
+}
+} \ No newline at end of file
diff --git a/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/hdfs-config.go b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/hdfs-config.go
new file mode 100644
index 00000000..ac33bc6a
--- /dev/null
+++ b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/hdfs-config.go
@@ -0,0 +1,37 @@
+package utils
+
+import (
+ "os"
+)
+
+// SetHdfsParametersByObjectMap set the value of the hdfs config parameters
+// and return HdfsConfig object
+func SetHdfsParametersByObjectMap(m map[string]interface{}) HdfsConfig{
+
+ hc := HdfsConfig{}
+ hc.hdfsURL = m["hdfs_url"].(string)
+ return hc
+
+}
+
+// SetHdfsParametersByEnvVariables sets the hdfs parameters
+func SetHdfsParametersByEnvVariables() HdfsConfig {
+
+ slogger := GetLoggerInstance()
+ hdfsConfigObject := HdfsConfig{
+ hdfsURL: os.Getenv("HDFS_URL"),
+ }
+ slogger.Infof("::hdfsURL:: %s", hdfsConfigObject.hdfsURL)
+ return hdfsConfigObject
+
+}
+
+// HdfsConfig contains hdfs related config items
+type HdfsConfig struct {
+ hdfsURL string
+}
+
+// GetHdfsURL returns HdfsURL
+func (h HdfsConfig) GetHdfsURL() string {
+ return h.hdfsURL
+} \ No newline at end of file
diff --git a/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/hdfsUtils.go b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/hdfsUtils.go
new file mode 100644
index 00000000..1a93a5ad
--- /dev/null
+++ b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/hdfsUtils.go
@@ -0,0 +1,33 @@
+package utils
+
+import (
+ "fmt"
+ "github.com/colinmarc/hdfs"
+ //"sync"
+ //"go.uber.org/zap"
+)
+
+
+//var clientOnce sync.Once
+//var hdfsClient *hdfs.Client
+//var slogger *zap.SugaredLogger
+
+
+//GetHdfsClientInstance returns a singleton hdfsClient instance
+// func GetHdfsClientInstance(hdfsURL string) (*hdfs.Client){
+// clientOnce.Do(func(){
+// hdfsClient = createHdfsClient(hdfsURL)
+// })
+// return hdfsClient
+// }
+
+//CreateHdfsClient creates a hdfs client and returns hdfs client
+func CreateHdfsClient(hdfsURL string) (*hdfs.Client){
+ slogger := GetLoggerInstance()
+ hdfsClient, hdfsConnectError := hdfs.New(hdfsURL)
+ if hdfsConnectError !=nil {
+ slogger.Fatalf(":::Error in create hdfsClient::: %v", hdfsConnectError)
+ fmt.Printf("::Unable to initialize hdfsURL, check logs")
+ }
+ return hdfsClient
+} \ No newline at end of file
diff --git a/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/kafka-config.go b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/kafka-config.go
new file mode 100644
index 00000000..080bfd4b
--- /dev/null
+++ b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/kafka-config.go
@@ -0,0 +1,55 @@
+package utils
+
+
+import (
+ "os"
+)
+
+// SetKafkaParametersByObjectMap sets the value of the kafka parameters
+// and sets the KafkaConfig object
+func SetKafkaParametersByObjectMap(m map[string]interface{}) KafkaConfig {
+ kc := KafkaConfig{}
+ kc.broker = m["broker"].(string)
+ kc.group = m["group"].(string)
+ kc.topic = m["topic"].(string)
+
+ return kc
+}
+
+// SetKafkaParametersByEnvVariables sets the kafka parameters
+func SetKafkaParametersByEnvVariables() KafkaConfig {
+ slogger := GetLoggerInstance()
+
+ kafkaConfigObject := KafkaConfig{
+ broker: os.Getenv("BROKER"),
+ group: os.Getenv("GROUP"),
+ topic: os.Getenv("TOPIC"),
+ }
+ slogger.Infof("::broker:: %s", kafkaConfigObject.broker)
+ slogger.Infof("::group:: %s", kafkaConfigObject.group)
+ slogger.Infof("::topic:: %s", kafkaConfigObject.topic)
+
+ return kafkaConfigObject
+}
+
+// KafkaConfig contains all the config parameters needed for kafka. This can be extended over time
+type KafkaConfig struct {
+ broker string
+ group string
+ topic string
+}
+
+// GetBroker returns kafka broker configured
+func (k KafkaConfig) GetBroker() string {
+ return k.broker
+}
+
+// GetGroup returns kafka group configured
+func (k KafkaConfig) GetGroup() string {
+ return k.group
+}
+
+// GetTopic returns kafka topic configured
+func (k KafkaConfig) GetTopic() string {
+ return k.topic
+} \ No newline at end of file
diff --git a/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/logutils.go b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/logutils.go
new file mode 100644
index 00000000..0f72e718
--- /dev/null
+++ b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/logutils.go
@@ -0,0 +1,32 @@
+package utils
+
+import (
+ "go.uber.org/zap"
+ "fmt"
+ "sync"
+)
+
+
+
+var logOnce sync.Once
+var logger *zap.SugaredLogger
+
+//GetLoggerInstance returns a singleton instance of logger
+func GetLoggerInstance() (*zap.SugaredLogger){
+ logOnce.Do(func(){
+ logger = createLogger()
+ })
+ return logger
+}
+
+
+//createLogger returns a SugaredLogger, sugaredLogger can be directly used to generate logs
+func createLogger() (*zap.SugaredLogger){
+ logger, err := zap.NewDevelopment()
+ if err != nil {
+ fmt.Printf("can't initialize zap logger: %v", err)
+ }
+ defer logger.Sync()
+ slogger := logger.Sugar()
+ return slogger
+} \ No newline at end of file
diff --git a/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/readJson.go b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/readJson.go
new file mode 100644
index 00000000..bfab64e6
--- /dev/null
+++ b/vnfs/DAaaS/microservices/GoApps/src/go-hdfs-writer/pkg/utils/readJson.go
@@ -0,0 +1,28 @@
+package utils
+
+import (
+ "os"
+ "io/ioutil"
+)
+
+
+//ReadJSON reads the content of a give file and returns as a string
+// used for small config files only.
+func ReadJSON(path string) string {
+ slogger := GetLoggerInstance()
+ jsonFile, err := os.Open(path)
+ if err!=nil{
+ //fmt.Print(err)
+ slogger.Errorf("Unable to open file: %s", path)
+ slogger.Errorf("Error::: %s", err)
+
+ }else{
+ slogger.Infof("Successfully opened config.json")
+ }
+
+ defer jsonFile.Close()
+ byteValue, _ := ioutil.ReadAll(jsonFile)
+ s := string(byteValue)
+ return s
+}
+