diff --git a/README.md b/README.md
index 688ee021..8232d216 100644
--- a/README.md
+++ b/README.md
@@ -83,16 +83,19 @@ if you have any questions, please feel free to ask us for help and you can find
* the doc about [queryDoc](https://docs.db3.network/functions/queryDoc.html)
-# Try Our Cloud Sandbox
+# Try the testnet
-* [Console](https://console.cloud.db3.network/console/home):https://console.cloud.db3.network/console/home
-* Data Rollup Node: https://rollup.cloud.db3.network
-* Data Index Node: https://index.cloud.db3.network
-You can connect to the Data Rollup Node and Data Index Node with db3.js
+| Public Chains | Testnet | Mainnet |
+|----------|:-------------:|:------:|
+| zksync |data rollup node:`https://zksync.rollup.testnet.db3.network`
data index node:`https://zksync.index.testnet.db3.network` | :soon: |
+| scroll |data rollup node:`https://scroll.rollup.testnet.db3.network`
data index node:`https://scroll.index.testnet.db3.network` | :soon: |
+
+You can connect to the Data Rollup Node and Data Index Node with db3.js
Note: the cloud sandbox is just for testing and unstable
+
# How it works
The DB3 Network has two roles:
diff --git a/src/node/src/command.rs b/src/node/src/command.rs
index e170ba37..5f6c2274 100644
--- a/src/node/src/command.rs
+++ b/src/node/src/command.rs
@@ -104,6 +104,9 @@ pub enum DB3Command {
/// this is just for upgrade the node
#[clap(long, default_value = "100000")]
doc_id_start: i64,
+ /// use the legacy transaction format
+ #[clap(long, default_value = "false")]
+ use_legacy_tx: bool,
},
/// Start the data index node
@@ -241,6 +244,7 @@ impl DB3Command {
key_root_path,
admin_addr,
doc_id_start,
+ use_legacy_tx,
} => {
let log_level = if verbose {
LevelFilter::DEBUG
@@ -261,6 +265,7 @@ impl DB3Command {
key_root_path.as_str(),
admin_addr.as_str(),
doc_id_start,
+ use_legacy_tx,
)
.await;
let running = Arc::new(AtomicBool::new(true));
@@ -479,11 +484,13 @@ impl DB3Command {
key_root_path: &str,
admin_addr: &str,
doc_start_id: i64,
+ use_legacy_tx: bool,
) {
let listen_addr = format!("{bind_host}:{listening_port}");
let rollup_config = RollupExecutorConfig {
temp_data_path: rollup_data_path.to_string(),
key_root_path: key_root_path.to_string(),
+ use_legacy_tx,
};
let store_config = MutationStoreConfig {
diff --git a/src/node/src/node_test_base.rs b/src/node/src/node_test_base.rs
index f96ba496..b5d97743 100644
--- a/src/node/src/node_test_base.rs
+++ b/src/node/src/node_test_base.rs
@@ -59,6 +59,7 @@ pub mod tests {
let rollup_config = RollupExecutorConfig {
temp_data_path: format!("{real_path}/rollup_data_path"),
key_root_path: key_root_path.to_string(),
+ use_legacy_tx: false,
};
if let Err(_e) = std::fs::create_dir_all(rollup_config.temp_data_path.as_str()) {
println!("create dir error");
diff --git a/src/node/src/recover.rs b/src/node/src/recover.rs
index 7c25cf17..cf032dc2 100644
--- a/src/node/src/recover.rs
+++ b/src/node/src/recover.rs
@@ -69,7 +69,8 @@ impl Recover {
format!("0x{}", hex::encode(wallet.address().as_bytes()))
);
let meta_store = Arc::new(
- MetaStoreClient::new(contract_addr.as_str(), evm_node_url.as_str(), wallet).await?,
+ MetaStoreClient::new(contract_addr.as_str(), evm_node_url.as_str(), wallet, false)
+ .await?,
);
let ar_fs_config = ArFileSystemConfig {
arweave_url: ar_node_url,
diff --git a/src/node/src/rollup_executor.rs b/src/node/src/rollup_executor.rs
index 878e6cb9..24f97a03 100644
--- a/src/node/src/rollup_executor.rs
+++ b/src/node/src/rollup_executor.rs
@@ -36,6 +36,7 @@ use tracing::{info, warn}; // Workaround to use prinltn! for logs.
pub struct RollupExecutorConfig {
pub temp_data_path: String,
pub key_root_path: String,
+ pub use_legacy_tx: bool,
}
pub struct RollupExecutor {
@@ -71,8 +72,13 @@ impl RollupExecutor {
let wallet = system_store.get_evm_wallet(c.chain_id)?;
let min_rollup_size = c.min_rollup_size;
let meta_store = ArcSwapOption::from(Some(Arc::new(
- MetaStoreClient::new(c.contract_addr.as_str(), c.evm_node_url.as_str(), wallet)
- .await?,
+ MetaStoreClient::new(
+ c.contract_addr.as_str(),
+ c.evm_node_url.as_str(),
+ wallet,
+ config.use_legacy_tx,
+ )
+ .await?,
)));
let ar_fs_config = ArFileSystemConfig {
arweave_url: c.ar_node_url.clone(),
@@ -134,8 +140,13 @@ impl RollupExecutor {
self.rollup_max_interval
.store(c.rollup_max_interval, Ordering::Relaxed);
let meta_store = Some(Arc::new(
- MetaStoreClient::new(c.contract_addr.as_str(), c.evm_node_url.as_str(), wallet)
- .await?,
+ MetaStoreClient::new(
+ c.contract_addr.as_str(),
+ c.evm_node_url.as_str(),
+ wallet,
+ self.config.use_legacy_tx,
+ )
+ .await?,
));
self.min_gc_round_offset
.store(c.min_gc_offset, Ordering::Relaxed);
@@ -303,10 +314,12 @@ impl RollupExecutor {
network_id,
)
.await?;
+
let (evm_cost, tx_hash) = meta_store
.update_rollup_step(id.as_str(), network_id)
.await?;
let tx_str = format!("0x{}", hex::encode(tx_hash.as_bytes()));
+
info!("the process rollup done with num mutations {num_rows}, raw data size {memory_size}, compress data size {size} and processed time {} id {} ar cost {} and evm tx {} and cost {}", now.elapsed().as_secs(),
id.as_str(), reward,
tx_str.as_str(),
diff --git a/src/node/src/storage_node_light_impl.rs b/src/node/src/storage_node_light_impl.rs
index 35b5bbef..6a1b770f 100644
--- a/src/node/src/storage_node_light_impl.rs
+++ b/src/node/src/storage_node_light_impl.rs
@@ -718,6 +718,7 @@ mod tests {
let rollup_config = RollupExecutorConfig {
temp_data_path: format!("{real_path}/data_path"),
key_root_path: format!("{real_path}/keys"),
+ use_legacy_tx: false,
};
let system_store_config = SystemStoreConfig {
diff --git a/src/storage/src/meta_store_client.rs b/src/storage/src/meta_store_client.rs
index f6657b42..83d47b0a 100644
--- a/src/storage/src/meta_store_client.rs
+++ b/src/storage/src/meta_store_client.rs
@@ -34,13 +34,19 @@ abigen!(Events, "abi/Events.json");
pub struct MetaStoreClient {
address: Address,
client: Arc>>, LocalWallet>>,
+ use_legacy_tx: bool,
}
unsafe impl Sync for MetaStoreClient {}
unsafe impl Send for MetaStoreClient {}
impl MetaStoreClient {
- pub async fn new(contract_addr: &str, rpc_url: &str, wallet: LocalWallet) -> Result {
+ pub async fn new(
+ contract_addr: &str,
+ rpc_url: &str,
+ wallet: LocalWallet,
+ use_legacy_tx: bool,
+ ) -> Result {
let address = contract_addr
.parse::()
.map_err(|_| DB3Error::InvalidAddress)?;
@@ -51,7 +57,11 @@ impl MetaStoreClient {
let provider_arc = Arc::new(provider);
let signable_client = SignerMiddleware::new(provider_arc, wallet);
let client = Arc::new(signable_client);
- Ok(Self { address, client })
+ Ok(Self {
+ address,
+ client,
+ use_legacy_tx,
+ })
}
pub async fn register_data_network(
@@ -74,11 +84,21 @@ impl MetaStoreClient {
empty_index_addresses,
desc,
);
- tx.send()
- .await
- .map_err(|e| DB3Error::StoreEventError(format!("fail to register data network {e}")))?;
+ match self.use_legacy_tx {
+ true => {
+ tx.legacy().send().await.map_err(|e| {
+ DB3Error::StoreEventError(format!("fail to register data network {e}"))
+ })?;
+ }
+ false => {
+ tx.send().await.map_err(|e| {
+ DB3Error::StoreEventError(format!("fail to register data network {e}"))
+ })?;
+ }
+ }
Ok(())
}
+
pub async fn create_database(&self, network: u64, desc: &str) -> Result<(U256, TxHash)> {
let store = DB3MetaStore::new(self.address, self.client.clone());
let desc_bytes = desc.as_bytes();
@@ -88,12 +108,25 @@ impl MetaStoreClient {
let mut desc_bytes32: [u8; 32] = Default::default();
desc_bytes32[..desc_bytes.len()].clone_from_slice(desc_bytes);
let tx = store.create_doc_database(network.into(), desc_bytes32);
- let pending_tx = tx.send().await.map_err(|e| {
- DB3Error::StoreEventError(format!(
- "fail to send create doc database request with error {e}"
- ))
- })?;
- let tx_hash = pending_tx.tx_hash();
+ let tx_hash = match self.use_legacy_tx {
+ true => {
+ let tx = tx.legacy();
+ let pending_tx = tx.send().await.map_err(|e| {
+ DB3Error::StoreEventError(format!(
+ "fail to send create doc database request with error {e}"
+ ))
+ })?;
+ pending_tx.tx_hash()
+ }
+ false => {
+ let pending_tx = tx.send().await.map_err(|e| {
+ DB3Error::StoreEventError(format!(
+ "fail to send create doc database request with error {e}"
+ ))
+ })?;
+ pending_tx.tx_hash()
+ }
+ };
let mut count_down: i32 = 5;
loop {
if count_down <= 0 {
@@ -159,11 +192,25 @@ impl MetaStoreClient {
ar_tx, network
);
let tx = store.update_rollup_steps(network_id, ar_tx_binary);
- //TODO set gas limit
- let pending_tx = tx.send().await.map_err(|e| {
- DB3Error::StoreEventError(format!("fail to send update rollup request with error {e}"))
- })?;
- let tx_hash = pending_tx.tx_hash();
+ let tx_hash = match self.use_legacy_tx {
+ true => {
+ let tx = tx.legacy();
+ let pending_tx = tx.send().await.map_err(|e| {
+ DB3Error::StoreEventError(format!(
+ "fail to send create doc database request with error {e}"
+ ))
+ })?;
+ pending_tx.tx_hash()
+ }
+ false => {
+ let pending_tx = tx.send().await.map_err(|e| {
+ DB3Error::StoreEventError(format!(
+ "fail to send create doc database request with error {e}"
+ ))
+ })?;
+ pending_tx.tx_hash()
+ }
+ };
info!("update rollup step done! tx hash: {}", tx_hash);
let mut count_down: i32 = 5;
loop {
@@ -195,6 +242,7 @@ mod tests {
use super::*;
use fastcrypto::encoding::{Base64, Encoding};
use tokio::time::{sleep, Duration as TokioDuration};
+
#[tokio::test]
async fn register_no1_data_network() {
let data = hex::decode("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80")
@@ -206,7 +254,7 @@ mod tests {
let contract_addr = "0x5fbdb2315678afecb367f032d93f642f64180aa3";
let rpc_url = "ws://127.0.0.1:8545";
sleep(TokioDuration::from_millis(10 * 1000)).await;
- let client = MetaStoreClient::new(contract_addr, rpc_url, wallet)
+ let client = MetaStoreClient::new(contract_addr, rpc_url, wallet, false)
.await
.unwrap();
let result = client
@@ -226,7 +274,7 @@ mod tests {
let rollup_node_address = wallet.address();
let contract_addr = "0x5FbDB2315678afecb367f032d93F642f64180aa3";
let rpc_url = "ws://127.0.0.1:8545";
- let client = MetaStoreClient::new(contract_addr, rpc_url, wallet)
+ let client = MetaStoreClient::new(contract_addr, rpc_url, wallet, false)
.await
.unwrap();
let result = client
diff --git a/thirdparty/data-manager b/thirdparty/data-manager
index 273b7918..c67f0775 160000
--- a/thirdparty/data-manager
+++ b/thirdparty/data-manager
@@ -1 +1 @@
-Subproject commit 273b7918f1ff17fbf4b76ffed4a63828ec8eabfd
+Subproject commit c67f0775e39b1f0aaadfc69d2273210d62f125b1