aboutsummaryrefslogtreecommitdiffstats
path: root/cdap3vm/config/hadoop-cluster-config-template/kafka/server.properties
blob: 7dfb8797b6d949e595ffeefb5bc82f33f39d9403 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
auto.leader.rebalance.enable=true
kafka.ganglia.metrics.port=8671
socket.send.buffer.bytes=102400
message.max.bytes=1000000
num.network.threads=3
log.segment.bytes=1073741824
kafka.ganglia.metrics.host=localhost
leader.imbalance.per.broker.percentage=10
replica.lag.time.max.ms=10000
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
num.io.threads=8
offsets.retention.minutes=86400000
fetch.purgatory.purge.interval.requests=10000
offsets.topic.compression.codec=0
replica.lag.max.messages=4000
num.recovery.threads.per.data.dir=1
log.dirs=/grid/0/kafka-logs
log.retention.bytes=-1
producer.purgatory.purge.interval.requests=10000
log.flush.scheduler.interval.ms=3000
default.replication.factor=1
replica.high.watermark.checkpoint.interval.ms=5000
zookeeper.connect=TODO_REPLACE_HOSTNAME:2181
controlled.shutdown.retry.backoff.ms=5000
kafka.timeline.metrics.host=TODO_REPLACE_HOSTNAME
kafka.ganglia.metrics.group=kafka
kafka.timeline.metrics.reporter.sendInterval=5900
num.partitions=1
offsets.topic.segment.bytes=104857600
controller.socket.timeout.ms=30000
queued.max.requests=500
broker.id=0
offset.metadata.max.bytes=4096
kafka.timeline.metrics.reporter.enabled=true
controlled.shutdown.max.retries=3
min.insync.replicas=1
offsets.commit.required.acks=-1
replica.fetch.wait.max.ms=500
controlled.shutdown.enable=true
log.roll.hours=168
log.cleanup.interval.mins=10
replica.socket.receive.buffer.bytes=65536
kafka.ganglia.metrics.reporter.enabled=true
offsets.topic.num.partitions=50
delete.topic.enable=false
offsets.load.buffer.size=5242880
num.replica.fetchers=1
socket.request.max.bytes=104857600
kafka.timeline.metrics.maxRowCacheSize=10000
kafka.timeline.metrics.port=6188
compression.type=producer
zookeeper.sync.time.ms=2000
zookeeper.connection.timeout.ms=6000
socket.receive.buffer.bytes=102400
controller.message.queue.size=10
offsets.commit.timeout.ms=5000
offsets.topic.replication.factor=3
offsets.retention.check.interval.ms=600000
log.flush.interval.ms=3000
replica.fetch.max.bytes=1048576
kafka.metrics.reporters=org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter
listeners=PLAINTEXT://TODO_REPLACE_HOSTNAME:6667
replica.fetch.min.bytes=1
replica.socket.timeout.ms=30000
zookeeper.session.timeout.ms=30000
auto.create.topics.enable=true
log.index.size.max.bytes=10485760
leader.imbalance.check.interval.seconds=300
log.index.interval.bytes=4096
log.retention.hours=168