diff options
author | chenxdu <chenxdu@cisco.com> | 2018-09-19 16:42:15 +0200 |
---|---|---|
committer | Donald Hunter <donaldh@cisco.com> | 2018-10-01 10:23:50 +0100 |
commit | fd1060d4c176272f312fb23495ff8cdbebc121ae (patch) | |
tree | 2f24090ec71e47e69bd392918198745d0c8406e8 /pnda-ztt-app/src/main/scala/com/cisco/ztt/KafkaOutput.scala | |
parent | a789d153737a991c14c7be03ae9044563573e4d2 (diff) |
PNDA Telemetry app for virtual firwall use case
The telemetry app ingests virtual firewall VES events into
HDFS and the timeseries datastore to support futher analytics.
Change-Id: I3a0920d4b416c1c165311ab9ff0fc31d8c96499f
Signed-off-by: chenxdu <chenxdu@cisco.com>
Issue-ID: DCAEGEN2-632
Signed-off-by: Donald Hunter <donaldh@cisco.com>
Diffstat (limited to 'pnda-ztt-app/src/main/scala/com/cisco/ztt/KafkaOutput.scala')
-rw-r--r-- | pnda-ztt-app/src/main/scala/com/cisco/ztt/KafkaOutput.scala | 70 |
1 files changed, 70 insertions, 0 deletions
diff --git a/pnda-ztt-app/src/main/scala/com/cisco/ztt/KafkaOutput.scala b/pnda-ztt-app/src/main/scala/com/cisco/ztt/KafkaOutput.scala new file mode 100644 index 0000000..1041d00 --- /dev/null +++ b/pnda-ztt-app/src/main/scala/com/cisco/ztt/KafkaOutput.scala @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018 Cisco Systems. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.cisco.ztt + + + + + +import java.io.StringWriter + +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.common.serialization.ByteArraySerializer +import org.apache.kafka.common.serialization.StringSerializer +import org.apache.log4j.Logger +import org.apache.spark.streaming.dstream.DStream + +import com.cisco.pnda.model.DataPlatformEvent +import com.cisco.pnda.model.DataPlatformEventCodec +import com.cisco.pnda.model.StaticHelpers +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.module.scala.DefaultScalaModule +import com.github.benfradet.spark.kafka.writer.dStreamToKafkaWriter + + +class KafkaOutput extends Serializable { + + object Holder extends Serializable { + @transient lazy val logger = Logger.getLogger(getClass.getName) + } + + def writeToKafka(output: DStream[Payload]) = { + + val props = AppConfig.loadProperties(); + val producerConfig = Map( + "bootstrap.servers" -> props.getProperty("kafka.brokers"), + "key.serializer" -> classOf[StringSerializer].getName, + "value.serializer" -> classOf[ByteArraySerializer].getName +) + output.writeToKafka( + producerConfig, + s => { + val mapper = new ObjectMapper() + mapper.registerModule(DefaultScalaModule) + + val out = new StringWriter + mapper.writeValue(out, s.datapoint) + val json = out.toString() + + val event = new DataPlatformEvent(s.publishSrc, s.timestamp, s.hostIp, json) + val avroSchemaString = StaticHelpers.loadResourceFile("dataplatform-raw.avsc"); + val codec = new DataPlatformEventCodec(avroSchemaString); + + new ProducerRecord[String, Array[Byte]](s.publishTopic, codec.encode(event)) + } + ) + } +}
\ No newline at end of file |