aboutsummaryrefslogtreecommitdiffstats
path: root/cdap3vm/config/hadoop-cluster-config-template/kafka/server.properties
diff options
context:
space:
mode:
Diffstat (limited to 'cdap3vm/config/hadoop-cluster-config-template/kafka/server.properties')
-rw-r--r--cdap3vm/config/hadoop-cluster-config-template/kafka/server.properties70
1 files changed, 70 insertions, 0 deletions
diff --git a/cdap3vm/config/hadoop-cluster-config-template/kafka/server.properties b/cdap3vm/config/hadoop-cluster-config-template/kafka/server.properties
new file mode 100644
index 0000000..7dfb879
--- /dev/null
+++ b/cdap3vm/config/hadoop-cluster-config-template/kafka/server.properties
@@ -0,0 +1,70 @@
+auto.leader.rebalance.enable=true
+kafka.ganglia.metrics.port=8671
+socket.send.buffer.bytes=102400
+message.max.bytes=1000000
+num.network.threads=3
+log.segment.bytes=1073741824
+kafka.ganglia.metrics.host=localhost
+leader.imbalance.per.broker.percentage=10
+replica.lag.time.max.ms=10000
+authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
+num.io.threads=8
+offsets.retention.minutes=86400000
+fetch.purgatory.purge.interval.requests=10000
+offsets.topic.compression.codec=0
+replica.lag.max.messages=4000
+num.recovery.threads.per.data.dir=1
+log.dirs=/grid/0/kafka-logs
+log.retention.bytes=-1
+producer.purgatory.purge.interval.requests=10000
+log.flush.scheduler.interval.ms=3000
+default.replication.factor=1
+replica.high.watermark.checkpoint.interval.ms=5000
+zookeeper.connect=TODO_REPLACE_HOSTNAME:2181
+controlled.shutdown.retry.backoff.ms=5000
+kafka.timeline.metrics.host=TODO_REPLACE_HOSTNAME
+kafka.ganglia.metrics.group=kafka
+kafka.timeline.metrics.reporter.sendInterval=5900
+num.partitions=1
+offsets.topic.segment.bytes=104857600
+controller.socket.timeout.ms=30000
+queued.max.requests=500
+broker.id=0
+offset.metadata.max.bytes=4096
+kafka.timeline.metrics.reporter.enabled=true
+controlled.shutdown.max.retries=3
+min.insync.replicas=1
+offsets.commit.required.acks=-1
+replica.fetch.wait.max.ms=500
+controlled.shutdown.enable=true
+log.roll.hours=168
+log.cleanup.interval.mins=10
+replica.socket.receive.buffer.bytes=65536
+kafka.ganglia.metrics.reporter.enabled=true
+offsets.topic.num.partitions=50
+delete.topic.enable=false
+offsets.load.buffer.size=5242880
+num.replica.fetchers=1
+socket.request.max.bytes=104857600
+kafka.timeline.metrics.maxRowCacheSize=10000
+kafka.timeline.metrics.port=6188
+compression.type=producer
+zookeeper.sync.time.ms=2000
+zookeeper.connection.timeout.ms=6000
+socket.receive.buffer.bytes=102400
+controller.message.queue.size=10
+offsets.commit.timeout.ms=5000
+offsets.topic.replication.factor=3
+offsets.retention.check.interval.ms=600000
+log.flush.interval.ms=3000
+replica.fetch.max.bytes=1048576
+kafka.metrics.reporters=org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter
+listeners=PLAINTEXT://TODO_REPLACE_HOSTNAME:6667
+replica.fetch.min.bytes=1
+replica.socket.timeout.ms=30000
+zookeeper.session.timeout.ms=30000
+auto.create.topics.enable=true
+log.index.size.max.bytes=10485760
+leader.imbalance.check.interval.seconds=300
+log.index.interval.bytes=4096
+log.retention.hours=168