Skip to content

Commit

Permalink
Slight db adjustment for snap sync perf (#7919)
Browse files Browse the repository at this point in the history
  • Loading branch information
asdacap authored Dec 16, 2024
1 parent 053af58 commit 25b1554
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 4 deletions.
9 changes: 9 additions & 0 deletions src/Nethermind/Nethermind.Db.Rocks/Config/DbConfig.cs
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,15 @@ public class DbConfig : IDbConfig
"block_based_table_factory.block_size=32000;" +

"block_based_table_factory.filter_policy=bloomfilter:15;" +

// Note: This causes write batch to not be atomic. A concurrent read may read item on start of batch, but not end of batch.
// With state, this is fine as writes are done in parallel batch and therefore, not atomic, and the read goes
// through triestore first anyway.
"unordered_write=true;" +

// Default is 1 MB.
"max_write_batch_group_size_bytes=4000000;" +

"";
public string? StateDbAdditionalRocksDbOptions { get; set; }
}
6 changes: 3 additions & 3 deletions src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs
Original file line number Diff line number Diff line change
Expand Up @@ -1045,7 +1045,7 @@ internal class RocksDbWriteBatch : IWriteBatch
/// we writes the batch in smaller batches. This removes atomicity so its only turned on when NoWAL flag is on.
/// It does not work as well as just turning on unordered_write, but Snapshot and Iterator can still works.
/// </summary>
private const int MaxWritesOnNoWal = 128;
private const int MaxWritesOnNoWal = 256;
private int _writeCount;

public RocksDbWriteBatch(DbOnTheRocks dbOnTheRocks)
Expand Down Expand Up @@ -1435,13 +1435,13 @@ private IDictionary<string, string> GetHeavyWriteOptions(ulong l0SizeTarget)
// Make buffer (probably) smaller so that it does not take too much memory to have many of them.
// More buffer means more parallel flush, but each read have to go through all buffer one by one much like l0
// but no io, only cpu.
// bufferSize*maxBufferNumber = 128MB, which is the max memory used, which tend to be the case as its now
// bufferSize*maxBufferNumber = 16MB*Core count, which is the max memory used, which tend to be the case as its now
// stalled by compaction instead of flush.
// The buffer is not compressed unlike l0File, so to account for it, its size need to be slightly larger.
ulong targetFileSize = (ulong)16.MiB();
ulong bufferSize = (ulong)(targetFileSize / _perTableDbConfig.CompressibilityHint);
ulong l0FileSize = targetFileSize * (ulong)_minWriteBufferToMerge;
ulong maxBufferNumber = 8;
ulong maxBufferNumber = (ulong)Environment.ProcessorCount;

// Guide recommend to have l0 and l1 to be the same size. They have to be compacted together so if l1 is larger,
// the extra size in l1 is basically extra rewrites. If l0 is larger... then I don't know why not. Even so, it seems to
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace Nethermind.Network.P2P.Subprotocols.Snap
public class SnapProtocolHandler : ZeroProtocolHandlerBase, ISnapSyncPeer
{
public static TimeSpan LowerLatencyThreshold = TimeSpan.FromMilliseconds(2000);
public static TimeSpan UpperLatencyThreshold = TimeSpan.FromMilliseconds(3000);
public static TimeSpan UpperLatencyThreshold = TimeSpan.FromMilliseconds(3500);
private static readonly TrieNodesMessage EmptyTrieNodesMessage = new TrieNodesMessage(ArrayPoolList<byte[]>.Empty());

private readonly LatencyBasedRequestSizer _requestSizer = new(
Expand Down

0 comments on commit 25b1554

Please sign in to comment.