Skip to content

Commit 018b6cc

Browse files
cchudantantiyro
andauthored
fix(db): fix number of files in db, startup hang, ram issues and flushing issues (#379)
Co-authored-by: antiyro <[email protected]>
1 parent e140ab3 commit 018b6cc

File tree

5 files changed

+89
-67
lines changed

5 files changed

+89
-67
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
## Next release
44

5+
- fix(db): fix number of files in db, startup hang, ram issues and flushing issues
56
- fix: FeePayment conversion
67
- fix(block_production): get l2-to-l1 messages recursively from the call tree
78
- refactor: replace starknet-rs BlockId with types-rs BlockId and remove redundant mp_block::BlockId

crates/client/db/src/lib.rs

Lines changed: 7 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,8 @@ use db_metrics::DbMetrics;
88
use mp_chain_config::ChainConfig;
99
use mp_utils::service::Service;
1010
use rocksdb::backup::{BackupEngine, BackupEngineOptions};
11-
use rocksdb::{
12-
BoundColumnFamily, ColumnFamilyDescriptor, DBCompressionType, DBWithThreadMode, Env, FlushOptions, MultiThreaded,
13-
Options, SliceTransform,
14-
};
11+
use rocksdb::{BoundColumnFamily, ColumnFamilyDescriptor, DBWithThreadMode, Env, FlushOptions, MultiThreaded};
12+
use rocksdb_options::rocksdb_global_options;
1513
use starknet_types_core::hash::{Pedersen, Poseidon, StarkHash};
1614
use std::path::{Path, PathBuf};
1715
use std::sync::{Arc, Mutex};
@@ -28,6 +26,7 @@ pub mod db_metrics;
2826
pub mod devnet_db;
2927
mod error;
3028
pub mod l1_db;
29+
mod rocksdb_options;
3130
pub mod storage_updates;
3231
pub mod tests;
3332

@@ -38,39 +37,8 @@ pub type WriteBatchWithTransaction = rocksdb::WriteBatchWithTransaction<false>;
3837

3938
const DB_UPDATES_BATCH_SIZE: usize = 1024;
4039

41-
#[allow(clippy::identity_op)] // allow 1 * MiB
42-
#[allow(non_upper_case_globals)] // allow KiB/MiB/GiB names
43-
pub fn open_rocksdb(path: &Path, create: bool) -> Result<Arc<DB>> {
44-
const KiB: usize = 1024;
45-
const MiB: usize = 1024 * KiB;
46-
const GiB: usize = 1024 * MiB;
47-
48-
let mut opts = Options::default();
49-
opts.set_report_bg_io_stats(true);
50-
opts.set_use_fsync(false);
51-
opts.create_if_missing(create);
52-
opts.create_missing_column_families(true);
53-
opts.set_keep_log_file_num(1);
54-
opts.optimize_level_style_compaction(4 * GiB);
55-
opts.set_compression_type(DBCompressionType::Zstd);
56-
let cores = std::thread::available_parallelism().map(|e| e.get() as i32).unwrap_or(1);
57-
opts.increase_parallelism(cores);
58-
59-
opts.set_atomic_flush(true);
60-
opts.set_manual_wal_flush(true);
61-
opts.set_max_subcompactions(cores as _);
62-
63-
opts.set_max_log_file_size(1 * MiB);
64-
opts.set_max_open_files(512); // 512 is the value used by substrate for reference
65-
opts.set_keep_log_file_num(3);
66-
opts.set_log_level(rocksdb::LogLevel::Warn);
67-
68-
let mut env = Env::new().context("Creating rocksdb env")?;
69-
// env.set_high_priority_background_threads(cores); // flushes
70-
env.set_low_priority_background_threads(cores); // compaction
71-
72-
opts.set_env(&env);
73-
40+
pub fn open_rocksdb(path: &Path) -> Result<Arc<DB>> {
41+
let opts = rocksdb_global_options()?;
7442
tracing::debug!("opening db at {:?}", path.display());
7543
let db = DB::open_cf_descriptors(
7644
&opts,
@@ -265,31 +233,6 @@ impl Column {
265233
Devnet => "devnet",
266234
}
267235
}
268-
269-
/// Per column rocksdb options, like memory budget, compaction profiles, block sizes for hdd/sdd
270-
/// etc. TODO: add basic sensible defaults
271-
pub(crate) fn rocksdb_options(&self) -> Options {
272-
let mut opts = Options::default();
273-
match self {
274-
Column::ContractStorage => {
275-
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(
276-
contract_db::CONTRACT_STORAGE_PREFIX_EXTRACTOR,
277-
));
278-
}
279-
Column::ContractToClassHashes => {
280-
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(
281-
contract_db::CONTRACT_CLASS_HASH_PREFIX_EXTRACTOR,
282-
));
283-
}
284-
Column::ContractToNonces => {
285-
opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(
286-
contract_db::CONTRACT_NONCES_PREFIX_EXTRACTOR,
287-
));
288-
}
289-
_ => {}
290-
}
291-
opts
292-
}
293236
}
294237

295238
pub trait DatabaseExt {
@@ -386,7 +329,7 @@ impl MadaraBackend {
386329
let temp_dir = tempfile::TempDir::with_prefix("madara-test").unwrap();
387330
Arc::new(Self {
388331
backup_handle: None,
389-
db: open_rocksdb(temp_dir.as_ref(), true).unwrap(),
332+
db: open_rocksdb(temp_dir.as_ref()).unwrap(),
390333
last_flush_time: Default::default(),
391334
chain_config,
392335
db_metrics: DbMetrics::register().unwrap(),
@@ -425,7 +368,7 @@ impl MadaraBackend {
425368
None
426369
};
427370

428-
let db = open_rocksdb(&db_path, true)?;
371+
let db = open_rocksdb(&db_path)?;
429372

430373
let backend = Arc::new(Self {
431374
db_metrics: DbMetrics::register().context("Registering db metrics")?,
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
#![allow(clippy::identity_op)] // allow 1 * MiB
2+
#![allow(non_upper_case_globals)] // allow KiB/MiB/GiB names
3+
4+
use crate::{contract_db, Column};
5+
use anyhow::{Context, Result};
6+
use rocksdb::{DBCompressionType, Env, Options, SliceTransform};
7+
8+
const KiB: usize = 1024;
9+
const MiB: usize = 1024 * KiB;
10+
const GiB: usize = 1024 * MiB;
11+
12+
pub fn rocksdb_global_options() -> Result<Options> {
13+
let mut options = Options::default();
14+
options.create_if_missing(true);
15+
options.create_missing_column_families(true);
16+
let cores = std::thread::available_parallelism().map(|e| e.get() as i32).unwrap_or(1);
17+
options.increase_parallelism(cores);
18+
options.set_max_background_jobs(cores);
19+
20+
options.set_atomic_flush(true);
21+
options.set_max_subcompactions(cores as _);
22+
23+
options.set_max_log_file_size(10 * MiB);
24+
options.set_max_open_files(2048);
25+
options.set_keep_log_file_num(3);
26+
options.set_log_level(rocksdb::LogLevel::Warn);
27+
28+
let mut env = Env::new().context("Creating rocksdb env")?;
29+
// env.set_high_priority_background_threads(cores); // flushes
30+
env.set_low_priority_background_threads(cores); // compaction
31+
32+
options.set_env(&env);
33+
34+
Ok(options)
35+
}
36+
37+
impl Column {
38+
/// Per column rocksdb options, like memory budget, compaction profiles, block sizes for hdd/sdd
39+
/// etc.
40+
pub(crate) fn rocksdb_options(&self) -> Options {
41+
let mut options = Options::default();
42+
43+
match self {
44+
Column::ContractStorage => {
45+
options.set_prefix_extractor(SliceTransform::create_fixed_prefix(
46+
contract_db::CONTRACT_STORAGE_PREFIX_EXTRACTOR,
47+
));
48+
}
49+
Column::ContractToClassHashes => {
50+
options.set_prefix_extractor(SliceTransform::create_fixed_prefix(
51+
contract_db::CONTRACT_CLASS_HASH_PREFIX_EXTRACTOR,
52+
));
53+
}
54+
Column::ContractToNonces => {
55+
options.set_prefix_extractor(SliceTransform::create_fixed_prefix(
56+
contract_db::CONTRACT_NONCES_PREFIX_EXTRACTOR,
57+
));
58+
}
59+
_ => {}
60+
}
61+
62+
options.set_compression_type(DBCompressionType::Zstd);
63+
match self {
64+
Column::BlockNToBlockInfo | Column::BlockNToBlockInner => {
65+
options.optimize_universal_style_compaction(1 * GiB);
66+
}
67+
_ => {
68+
options.optimize_universal_style_compaction(100 * MiB);
69+
}
70+
}
71+
options
72+
}
73+
}

crates/node/src/cli/chain_config_overrides.rs

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ use mp_chain_config::{
1515
};
1616
use mp_utils::parsers::parse_key_value_yaml;
1717
use mp_utils::serde::{deserialize_duration, deserialize_private_key, serialize_duration};
18+
use url::Url;
1819

1920
/// Override chain config parameters.
2021
/// Format: "--chain-config-override chain_id=SN_MADARA,chain_name=MADARA,block_time=1500ms,bouncer_config.block_max_capacity.n_steps=100000000"
@@ -28,6 +29,8 @@ pub struct ChainConfigOverrideParams {
2829
pub struct ChainConfigOverridesInner {
2930
pub chain_name: String,
3031
pub chain_id: ChainId,
32+
pub feeder_gateway_url: Url,
33+
pub gateway_url: Url,
3134
pub native_fee_token_address: ContractAddress,
3235
pub parent_fee_token_address: ContractAddress,
3336
#[serde(deserialize_with = "deserialize_starknet_version", serialize_with = "serialize_starknet_version")]
@@ -66,6 +69,8 @@ impl ChainConfigOverrideParams {
6669
eth_core_contract_address: chain_config.eth_core_contract_address,
6770
eth_gps_statement_verifier: chain_config.eth_gps_statement_verifier,
6871
private_key: chain_config.private_key,
72+
feeder_gateway_url: chain_config.feeder_gateway_url,
73+
gateway_url: chain_config.gateway_url,
6974
})
7075
.context("Failed to convert ChainConfig to Value")?;
7176

@@ -101,8 +106,8 @@ impl ChainConfigOverrideParams {
101106
Ok(ChainConfig {
102107
chain_name: chain_config_overrides.chain_name,
103108
chain_id: chain_config_overrides.chain_id,
104-
feeder_gateway_url: chain_config.feeder_gateway_url,
105-
gateway_url: chain_config.gateway_url,
109+
feeder_gateway_url: chain_config_overrides.feeder_gateway_url,
110+
gateway_url: chain_config_overrides.gateway_url,
106111
native_fee_token_address: chain_config_overrides.native_fee_token_address,
107112
parent_fee_token_address: chain_config_overrides.parent_fee_token_address,
108113
latest_protocol_version: chain_config_overrides.latest_protocol_version,

crates/node/src/service/sync.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ impl SyncService {
3232
) -> anyhow::Result<Self> {
3333
let fetch_config = config.block_fetch_config(chain_config.chain_id.clone(), chain_config.clone());
3434

35-
tracing::info!("🛰️ Using feeder gateway URL: {}", fetch_config.feeder_gateway.as_str());
35+
tracing::info!("🛰️ Using feeder gateway URL: {}", fetch_config.feeder_gateway.as_str());
3636

3737
Ok(Self {
3838
db_backend: Arc::clone(db.backend()),

0 commit comments

Comments
 (0)