Skip to content

Commit 25b1554

Browse files
authored
Slight db adjustment for snap sync perf (#7919)
1 parent 053af58 commit 25b1554

File tree

3 files changed

+13
-4
lines changed

3 files changed

+13
-4
lines changed

src/Nethermind/Nethermind.Db.Rocks/Config/DbConfig.cs

+9
Original file line numberDiff line numberDiff line change
@@ -223,6 +223,15 @@ public class DbConfig : IDbConfig
223223
"block_based_table_factory.block_size=32000;" +
224224

225225
"block_based_table_factory.filter_policy=bloomfilter:15;" +
226+
227+
// Note: This causes write batch to not be atomic. A concurrent read may read item on start of batch, but not end of batch.
228+
// With state, this is fine as writes are done in parallel batch and therefore, not atomic, and the read goes
229+
// through triestore first anyway.
230+
"unordered_write=true;" +
231+
232+
// Default is 1 MB.
233+
"max_write_batch_group_size_bytes=4000000;" +
234+
226235
"";
227236
public string? StateDbAdditionalRocksDbOptions { get; set; }
228237
}

src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs

+3-3
Original file line numberDiff line numberDiff line change
@@ -1045,7 +1045,7 @@ internal class RocksDbWriteBatch : IWriteBatch
10451045
/// we writes the batch in smaller batches. This removes atomicity so its only turned on when NoWAL flag is on.
10461046
/// It does not work as well as just turning on unordered_write, but Snapshot and Iterator can still works.
10471047
/// </summary>
1048-
private const int MaxWritesOnNoWal = 128;
1048+
private const int MaxWritesOnNoWal = 256;
10491049
private int _writeCount;
10501050

10511051
public RocksDbWriteBatch(DbOnTheRocks dbOnTheRocks)
@@ -1435,13 +1435,13 @@ private IDictionary<string, string> GetHeavyWriteOptions(ulong l0SizeTarget)
14351435
// Make buffer (probably) smaller so that it does not take too much memory to have many of them.
14361436
// More buffer means more parallel flush, but each read have to go through all buffer one by one much like l0
14371437
// but no io, only cpu.
1438-
// bufferSize*maxBufferNumber = 128MB, which is the max memory used, which tend to be the case as its now
1438+
// bufferSize*maxBufferNumber = 16MB*Core count, which is the max memory used, which tend to be the case as its now
14391439
// stalled by compaction instead of flush.
14401440
// The buffer is not compressed unlike l0File, so to account for it, its size need to be slightly larger.
14411441
ulong targetFileSize = (ulong)16.MiB();
14421442
ulong bufferSize = (ulong)(targetFileSize / _perTableDbConfig.CompressibilityHint);
14431443
ulong l0FileSize = targetFileSize * (ulong)_minWriteBufferToMerge;
1444-
ulong maxBufferNumber = 8;
1444+
ulong maxBufferNumber = (ulong)Environment.ProcessorCount;
14451445

14461446
// Guide recommend to have l0 and l1 to be the same size. They have to be compacted together so if l1 is larger,
14471447
// the extra size in l1 is basically extra rewrites. If l0 is larger... then I don't know why not. Even so, it seems to

src/Nethermind/Nethermind.Network/P2P/Subprotocols/Snap/SnapProtocolHandler.cs

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ namespace Nethermind.Network.P2P.Subprotocols.Snap
2828
public class SnapProtocolHandler : ZeroProtocolHandlerBase, ISnapSyncPeer
2929
{
3030
public static TimeSpan LowerLatencyThreshold = TimeSpan.FromMilliseconds(2000);
31-
public static TimeSpan UpperLatencyThreshold = TimeSpan.FromMilliseconds(3000);
31+
public static TimeSpan UpperLatencyThreshold = TimeSpan.FromMilliseconds(3500);
3232
private static readonly TrieNodesMessage EmptyTrieNodesMessage = new TrieNodesMessage(ArrayPoolList<byte[]>.Empty());
3333

3434
private readonly LatencyBasedRequestSizer _requestSizer = new(

0 commit comments

Comments
 (0)