Skip to content

Commit 085d412

Browse files
fadlinurhasanbentol
authored andcommitted
Configure Kafka so it still works if a single box is died. (#12)
* Add min.insync.replicas config condition Co-authored-by: bentol <[email protected]> * berks update Co-authored-by: bentol <[email protected]> * set min insync replicas as attribute Co-authored-by: bentol <[email protected]> * berks update Co-authored-by: bentol <[email protected]> * update config attribute Co-authored-by: bentol <[email protected]> * berks update Co-authored-by: bentol <[email protected]> Co-authored-by: bentol <[email protected]>
1 parent 6d59ed7 commit 085d412

File tree

5 files changed

+27
-27
lines changed

5 files changed

+27
-27
lines changed

attributes/default.rb

+12-11
Original file line numberDiff line numberDiff line change
@@ -49,23 +49,24 @@
4949

5050
# Kafka configuration, default provided by Kafka project
5151
default[cookbook_name]['kafka']['port'] = 9092
52-
default[cookbook_name]['kafka']['max_replication_factor'] = 3
5352
default[cookbook_name]['kafka']['config'] = {
5453
'advertised.listeners' => "PLAINTEXT://#{node['ipaddress']}:#{node[cookbook_name]['kafka']['port']}",
5554
'broker.id' => -1,
56-
'port' => node[cookbook_name]['kafka']['port'],
57-
'num.network.threads' => 3,
58-
'num.io.threads' => 8,
59-
'socket.send.buffer.bytes' => 102_400,
60-
'socket.receive.buffer.bytes' => 102_400,
61-
'socket.request.max.bytes' => 104_857_600,
55+
'log.cleaner.enable' => false,
6256
'log.dirs' => '/var/lib/kafka',
63-
'num.partitions' => 1,
64-
'num.recovery.threads.per.data.dir' => 1,
57+
'log.retention.check.interval.ms' => 300_000,
6558
'log.retention.hours' => 168,
6659
'log.segment.bytes' => 1_073_741_824,
67-
'log.retention.check.interval.ms' => 300_000,
68-
'log.cleaner.enable' => false,
60+
'min.insync.replicas' => 2,
61+
'num.io.threads' => 8,
62+
'num.network.threads' => 3,
63+
'num.partitions' => 1,
64+
'num.recovery.threads.per.data.dir' => 1,
65+
'offsets.topic.replication.factor' => 3,
66+
'port' => node[cookbook_name]['kafka']['port'],
67+
'socket.receive.buffer.bytes' => 102_400,
68+
'socket.request.max.bytes' => 104_857_600,
69+
'socket.send.buffer.bytes' => 102_400,
6970
'zookeeper.connect' => 'localhost:2181',
7071
'zookeeper.connection.timeout.ms' => 15_000,
7172
'zookeeper.session.timeout.ms' => 15_000

cookbooks/kafka/attributes/default.rb

+12-11
Original file line numberDiff line numberDiff line change
@@ -49,23 +49,24 @@
4949

5050
# Kafka configuration, default provided by Kafka project
5151
default[cookbook_name]['kafka']['port'] = 9092
52-
default[cookbook_name]['kafka']['max_replication_factor'] = 3
5352
default[cookbook_name]['kafka']['config'] = {
5453
'advertised.listeners' => "PLAINTEXT://#{node['ipaddress']}:#{node[cookbook_name]['kafka']['port']}",
5554
'broker.id' => -1,
56-
'port' => node[cookbook_name]['kafka']['port'],
57-
'num.network.threads' => 3,
58-
'num.io.threads' => 8,
59-
'socket.send.buffer.bytes' => 102_400,
60-
'socket.receive.buffer.bytes' => 102_400,
61-
'socket.request.max.bytes' => 104_857_600,
55+
'log.cleaner.enable' => false,
6256
'log.dirs' => '/var/lib/kafka',
63-
'num.partitions' => 1,
64-
'num.recovery.threads.per.data.dir' => 1,
57+
'log.retention.check.interval.ms' => 300_000,
6558
'log.retention.hours' => 168,
6659
'log.segment.bytes' => 1_073_741_824,
67-
'log.retention.check.interval.ms' => 300_000,
68-
'log.cleaner.enable' => false,
60+
'min.insync.replicas' => 2,
61+
'num.io.threads' => 8,
62+
'num.network.threads' => 3,
63+
'num.partitions' => 1,
64+
'num.recovery.threads.per.data.dir' => 1,
65+
'offsets.topic.replication.factor' => 3,
66+
'port' => node[cookbook_name]['kafka']['port'],
67+
'socket.receive.buffer.bytes' => 102_400,
68+
'socket.request.max.bytes' => 104_857_600,
69+
'socket.send.buffer.bytes' => 102_400,
6970
'zookeeper.connect' => 'localhost:2181',
7071
'zookeeper.connection.timeout.ms' => 15_000,
7172
'zookeeper.session.timeout.ms' => 15_000

cookbooks/kafka/recipes/kafka_config.rb

+1-2
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,7 @@
4747
kafka_hosts_count = node[cookbook_name]['kafka']['hosts_count']
4848
if kafka_hosts_count < 3
4949
config['offsets.topic.replication.factor'] = kafka_hosts_count
50-
else
51-
config['offsets.topic.replication.factor'] = node[cookbook_name]['kafka']['max_replication_factor']
50+
config['min.insync.replicas'] = 1
5251
end
5352

5453
# Write configurations

cookbooks/kafka/recipes/kafka_consul_register.rb

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
{
2929
"id": "#{node['hostname']}-hc-payload",
3030
"name": "kafka",
31-
"args": ["/bin/bash", "-c", "nc -vz #{node['ipaddress']} #{node[cookbook_name]['kafka']['port']} 2>&1 | grep open"],
31+
"args": ["/bin/bash", "-c", "nc -vz #{node['ipaddress']} #{node[cookbook_name]['kafka']['port']} 2>&1 | grep 'open\\\|succeeded'"],
3232
"interval": "10s",
3333
"timeout": "1s"
3434
}

recipes/kafka_config.rb

+1-2
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,7 @@
4747
kafka_hosts_count = node[cookbook_name]['kafka']['hosts_count']
4848
if kafka_hosts_count < 3
4949
config['offsets.topic.replication.factor'] = kafka_hosts_count
50-
else
51-
config['offsets.topic.replication.factor'] = node[cookbook_name]['kafka']['max_replication_factor']
50+
config['min.insync.replicas'] = 1
5251
end
5352

5453
# Write configurations

0 commit comments

Comments
 (0)