diff --git a/Cargo.toml b/Cargo.toml index 3e269959..727238f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,8 @@ repository = "https://github.com/noirhq/noir.git" [workspace] resolver = "2" members = [ + "client/consensus", + "client/consensus/pow", "core-primitives", "frame/babel", "frame/cosmos", @@ -23,7 +25,10 @@ members = [ "frame/multimap", "frame/solana", "frame/solana/runtime-api", + "frame/wtema", "primitives/babel", + "primitives/consensus", # dummy + "primitives/consensus/pow", "primitives/cosmos", "primitives/ethereum", "primitives/multimap", @@ -56,6 +61,7 @@ members = [ "vendor/solana/rpc-client-api", ] default-members = [ + "client/*", "core-primitives", "frame/*", "primitives/*", @@ -68,6 +74,7 @@ useless_conversion = "allow" [workspace.dependencies] ark-bn254 = { version = "0.4.0", default-features = false, features = ["curve"] } assert_matches = "1.5.0" +async-trait = "0.1" base64 = { version = "0.22", default-features = false } bech32 = { version = "0.11", default-features = false } bincode = { package = "solana-bincode", git = "https://github.com/noirhq/solana-sdk", branch = "v2.0", default-features = false } @@ -96,6 +103,7 @@ enum-iterator = "1.5.0" env_logger = "0.9" ethereum = { version = "0.15.0", default-features = false } futures = "0.3" +futures-timer = "3.0" getrandom = { version = "0.2", default-features = false } hex = { version = "0.4.3", default-features = false } hex-literal = "0.4" @@ -116,6 +124,7 @@ num-traits = { version = "0.2", default-features = false } num_enum = { version = "0.7", default-features = false } parity-scale-codec = { version = "3.6", default-features = false } parity-wasm = { version = "0.45.0", default-features = false } +parking_lot = "0.12" paste = "1.0" percentage = "0.1.0" rand = { version = "0.8.5", default-features = false } @@ -148,6 +157,8 @@ wasmi-validation = { version = "0.5.0", default-features = false } wat = "1.0" # substrate +sc-client-api = { version = "37.0.0" } +sc-consensus = { version = "0.44.0" } frame-support = { version = "38.2.0", default-features = false } frame-system = { version = "38.0.0", default-features = false } pallet-assets = { version = "40.0.0", default-features = false } @@ -158,11 +169,15 @@ pallet-transaction-payment = { version = "38.0.2", default-features = false } sc-transaction-pool-api = { version = "37.0.0" } sp-api = { version = "34.0.0", default-features = false } sp-arithmetic = { version = "26.0.0", default-features = false } +sp-block-builder = { version = "34.0.0", default-features = false } sp-blockchain = { version = "37.0.1" } +sp-consensus = { version = "0.40.0", default-features = false } sp-core = { version = "34.0.0", default-features = false } +sp-inherents = { version = "34.0.0", default-features = false } sp-io = { version = "38.0.0", default-features = false } sp-keyring = { version = "39.0.0", default-features = false } sp-runtime = { version = "39.0.5", default-features = false } +substrate-prometheus-endpoint = { version = "0.17.0", default-features = false } # frontier fp-evm = { git = "https://github.com/noirhq/frontier", branch = "crates.io/stable2409", default-features = false } @@ -179,9 +194,12 @@ precompile-utils = { git = "https://github.com/noirhq/frontier", branch = "crate cosmos-rpc = { path = "frame/cosmos/rpc", default-features = false } cosmos-runtime-api = { path = "frame/cosmos/runtime-api", default-features = false } frame-babel = { path = "frame/babel", default-features = false } +nc-consensus = { path = "client/consensus" } +nc-consensus-pow = { path = "client/consensus/pow" } noir-core-primitives = { path = "core-primitives", default-features = false } noir-runtime-common = { path = "runtime/common", default-features = false } np-babel = { path = "primitives/babel", default-features = false } +np-consensus-pow = { path = "primitives/consensus/pow", default-features = false } np-cosmos = { path = "primitives/cosmos", default-features = false } np-ethereum = { path = "primitives/ethereum", default-features = false } np-multimap = { path = "primitives/multimap", default-features = false } @@ -199,6 +217,7 @@ pallet-cosmos-x-wasm = { path = "frame/cosmos/x/wasm", default-features = false pallet-cosmos-x-wasm-types = { path = "frame/cosmos/x/wasm/types", default-features = false } pallet-multimap = { path = "frame/multimap", default-features = false } pallet-solana = { path = "frame/solana", default-features = false } +pallet-wtema = { path = "frame/wtema", default-features = false } # vendor composable-support = { path = "vendor/composable/composable-support", default-features = false } diff --git a/client/consensus/Cargo.toml b/client/consensus/Cargo.toml new file mode 100644 index 00000000..356ed668 --- /dev/null +++ b/client/consensus/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "nc-consensus" +description = "Noir common types for consensus" +license = "Apache-2.0" +authors = { workspace = true } +version = { workspace = true } +edition = { workspace = true } +repository = { workspace = true } +publish = false + +[dependencies] +async-trait = { workspace = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml new file mode 100644 index 00000000..96b63aa9 --- /dev/null +++ b/client/consensus/pow/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "nc-consensus-pow" +description = "Noir PoW consensus algorithm" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = { workspace = true } +version = { workspace = true } +edition = { workspace = true } +repository = { workspace = true } + +[dependencies] +async-trait = { workspace = true } +futures = { workspace = true } +futures-timer = { workspace = true } +log = { workspace = true, default-features = true } +parity-scale-codec = { workspace = true, default-features = true, features = ["derive"] } +parking_lot = { workspace = true, default-features = true } +thiserror = { workspace = true, default-features = true } + +nc-consensus = { workspace = true } +np-consensus-pow = { workspace = true, default-features = true } +sc-client-api = { workspace = true } +sc-consensus = { workspace = true } +sp-api = { workspace = true } +sp-block-builder = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-consensus = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } +sp-inherents = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +substrate-prometheus-endpoint = { workspace = true, default-features = true } diff --git a/client/consensus/pow/README.md b/client/consensus/pow/README.md new file mode 100644 index 00000000..8f2f68a7 --- /dev/null +++ b/client/consensus/pow/README.md @@ -0,0 +1,29 @@ +Proof of work consensus for Substrate. + +To use this engine, you can need to have a struct that implements +`PowAlgorithm`. After that, pass an instance of the struct, along +with other necessary client references to `import_queue` to setup +the queue. + +This library also comes with an async mining worker, which can be +started via the `start_mining_worker` function. It returns a worker +handle together with a future. The future must be pulled. Through +the worker handle, you can pull the metadata needed to start the +mining process via `MiningWorker::metadata`, and then do the actual +mining on a standalone thread. Finally, when a seal is found, call +`MiningWorker::submit` to build the block. + +The auxiliary storage for PoW engine only stores the total difficulty. +For other storage requirements for particular PoW algorithm (such as +the actual difficulty for each particular blocks), you can take a client +reference in your `PowAlgorithm` implementation, and use a separate prefix +for the auxiliary storage. It is also possible to just use the runtime +as the storage, but it is not recommended as it won't work well with light +clients. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 + + +## Release + +Polkadot SDK stable2409 diff --git a/client/consensus/pow/src/aux_schema.rs b/client/consensus/pow/src/aux_schema.rs new file mode 100644 index 00000000..52c0eca3 --- /dev/null +++ b/client/consensus/pow/src/aux_schema.rs @@ -0,0 +1,58 @@ +// This file is part of Noir. + +// Copyright (C) Haderech Pte. Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use np_consensus_pow::BlockWeight; +use parity_scale_codec::{Decode, Encode}; +use sc_client_api::AuxStore; +use sp_blockchain::{Error, Result}; + +pub fn block_weight_key(block_hash: H) -> Vec { + (b"block_weight", block_hash).encode() +} + +fn load_decode(backend: &B, key: &[u8]) -> Result> { + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]) + .map_err(|e| Error::Backend(format!("PoW DB is corrupted: {}", e))) + .map(Some), + } +} + +pub fn load_block_weight(backend: &B, block_hash: &H) -> Result> +where + B: AuxStore, + H: Encode, + W: Decode + Default, +{ + load_decode(backend, &block_weight_key(block_hash)[..]).map(|v| v.unwrap_or_default()) +} + +pub(crate) fn write_block_weight( + block_hash: H, + block_weight: BlockWeight, + write_aux: F, +) -> R +where + H: Encode, + W: Encode, + F: FnOnce(&[(Vec, &[u8])]) -> R, +{ + let key = block_weight_key(block_hash); + block_weight.using_encoded(|s| write_aux(&[(key, s)])) +} diff --git a/client/consensus/pow/src/digests.rs b/client/consensus/pow/src/digests.rs new file mode 100644 index 00000000..b9e164d9 --- /dev/null +++ b/client/consensus/pow/src/digests.rs @@ -0,0 +1,63 @@ +// This file is part of Noir. + +// Copyright (C) Haderech Pte. Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use nc_consensus::PreDigestProvider; +use np_consensus_pow::POW_ENGINE_ID; +use parity_scale_codec::{Decode, Encode}; +use sp_blockchain::Result; +use sp_runtime::DigestItem; +use std::ops::Deref; + +/// Generic pre-digest for PoW consensus engine. +#[derive(Clone, Debug, Decode, Encode)] +pub struct PreDigest { + author: AccountId, + inner: Inner, +} + +impl PreDigest { + pub fn new(author: AccountId, inner: Inner) -> Self { + Self { author, inner } + } + + pub fn author(&self) -> &AccountId { + &self.author + } + + pub fn into_inner(self) -> Inner { + self.inner + } +} + +impl Deref for PreDigest { + type Target = Inner; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +#[async_trait::async_trait] +impl PreDigestProvider for PreDigest +where + AccountId: Encode + Send + Sync, +{ + async fn pre_digest(&self, _best_hash: &[u8]) -> Result> { + Ok(vec![DigestItem::PreRuntime(POW_ENGINE_ID, self.author.encode())]) + } +} diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs new file mode 100644 index 00000000..032b5283 --- /dev/null +++ b/client/consensus/pow/src/lib.rs @@ -0,0 +1,706 @@ +// This file is part of Noir. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Noir Proof of work consensus. +//! +//! To use this engine, you can need to have a struct that implements +//! [`PowAlgorithm`]. After that, pass an instance of the struct, along +//! with other necessary client references to [`import_queue`] to setup +//! the queue. +//! +//! This library also comes with an async mining worker, which can be +//! started via the [`start_mining_worker`] function. It returns a worker +//! handle together with a future. The future must be pulled. Through +//! the worker handle, you can pull the metadata needed to start the +//! mining process via [`MiningHandle::metadata`], and then do the actual +//! mining on a standalone thread. Finally, when a seal is found, call +//! [`MiningHandle::submit`] to build the block. +//! +//! The auxiliary storage for PoW engine only stores the total difficulty. +//! For other storage requirements for particular PoW algorithm (such as +//! the actual difficulty for each particular blocks), you can take a client +//! reference in your [`PowAlgorithm`] implementation, and use a separate prefix +//! for the auxiliary storage. It is also possible to just use the runtime +//! as the storage, but it is not recommended as it won't work well with light +//! clients. + +mod aux_schema; +mod digests; +mod worker; + +pub use aux_schema::*; +pub use digests::*; +pub use worker::*; + +use futures::{Future, StreamExt}; +use log::*; +use nc_consensus::PreDigestProvider; +use np_consensus_pow::{Seal, POW_ENGINE_ID}; +use parity_scale_codec::{Decode, Encode}; +use sc_client_api::{self, backend::AuxStore, BlockOf, BlockchainEvents}; +use sc_consensus::{ + BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxJustificationImport, + ForkChoiceStrategy, ImportResult, Verifier, +}; +use sp_api::ProvideRuntimeApi; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::HeaderBackend; +use sp_consensus::{ + BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, +}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; +use sp_runtime::{ + generic::{BlockId, Digest, DigestItem}, + traits::{Block as BlockT, Header as HeaderT, Saturating}, +}; +use std::{cmp::Ordering, marker::PhantomData, sync::Arc, time::Duration}; +use substrate_prometheus_endpoint::Registry; + +const LOG_TARGET: &str = "pow"; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("Header uses the wrong engine: {}", String::from_utf8_lossy(.0))] + WrongEngine([u8; 4]), + #[error("Header {0:?} has a bad seal")] + HeaderBadSeal(B::Hash), + #[error("Header {0:?} is unsealed")] + HeaderUnsealed(B::Hash), + #[error("Preliminary verification failed")] + PreliminaryVerificationFailed, + #[error("Creating inherents failed: {0}")] + CreateInherents(sp_inherents::Error), + #[error("Checking inherents failed: {0}")] + CheckInherents(sp_inherents::Error), + #[error("Checking inherents unhandled error: {}", String::from_utf8_lossy(.0))] + CheckInherentsUnhandled(sp_inherents::InherentIdentifier), + #[error("Multiple pre-runtime digests")] + MultiplePreRuntimeDigests, + #[error(transparent)] + Client(sp_blockchain::Error), + #[error(transparent)] + Codec(parity_scale_codec::Error), + #[error("{0}")] + Other(String), +} + +impl From> for String { + fn from(error: Error) -> String { + error.to_string() + } +} + +impl From> for ConsensusError { + fn from(error: Error) -> ConsensusError { + ConsensusError::ClientImport(error.to_string()) + } +} + +/// Intermediate value passed to block importer. +#[derive(Encode, Decode, Clone, Debug, Default)] +pub struct PowIntermediate { + /// Difficulty of the block, if known. + pub difficulty: Option, +} + +/// Intermediate key for PoW engine. +pub static INTERMEDIATE_KEY: &[u8] = b"pow1"; + +/// Algorithm used for proof of work. +pub trait PowAlgorithm { + /// Difficulty for the algorithm. + type Difficulty: Saturating + Default + Encode + Decode + Ord + Clone + Copy; + + /// Get the next block's difficulty. + /// + /// This function will be called twice during the import process, so the implementation + /// should be properly cached. + fn difficulty(&self, parent: B::Hash) -> Result>; + + /// Verify that the seal is valid against given pre hash when parent block is not yet imported. + /// + /// None means that preliminary verify is not available for this algorithm. + fn preliminary_verify( + &self, + _pre_hash: &B::Hash, + _seal: &Seal, + ) -> Result, Error> { + Ok(None) + } + + /// Break a fork choice tie. + /// + /// By default this chooses the earliest block seen. Using uniform tie + /// breaking algorithms will help to protect against selfish mining. + /// + /// Returns if the new seal should be considered best block. + fn break_tie(&self, _own_seal: &Seal, _new_seal: &Seal) -> bool { + false + } + + /// Verify that the difficulty is valid against given seal. + /// + /// This function should return the actual difficulty of the block. + /// None means that the seal is invalid. + fn verify( + &self, + parent: &BlockId, + pre_hash: &B::Hash, + pre_digest: Option<&[u8]>, + seal: &Seal, + difficulty: Self::Difficulty, + ) -> Result, Error>; + + /// Calculates the difficulty of the given header. + fn calculate_difficulty(&self, _header: &B::Header) -> Result> { + unimplemented!() + } +} + +/// A block importer for PoW. +pub struct PowBlockImport { + inner: I, + client: Arc, + select_chain: SC, + algorithm: A, + _marker: PhantomData, +} + +impl Clone for PowBlockImport { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + client: self.client.clone(), + select_chain: self.select_chain.clone(), + algorithm: self.algorithm.clone(), + _marker: Default::default(), + } + } +} + +impl PowBlockImport +where + B: BlockT, + I: BlockImport + Send + Sync, + I::Error: Into, + C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf, + C::Api: BlockBuilderApi, + A: PowAlgorithm, +{ + /// Create a new block import suitable to be used in PoW + pub fn new(inner: I, client: Arc, select_chain: SC, algorithm: A) -> Self { + Self { inner, client, select_chain, algorithm, _marker: Default::default() } + } +} + +#[async_trait::async_trait] +impl BlockImport for PowBlockImport +where + B: BlockT, + I: BlockImport + Send + Sync, + I::Error: Into, + C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf, + C::Api: BlockBuilderApi, + SC: SelectChain, + A: PowAlgorithm + Send + Sync, + A::Difficulty: Send + 'static, +{ + type Error = ConsensusError; + + async fn check_block(&self, block: BlockCheckParams) -> Result { + self.inner.check_block(block).await.map_err(Into::into) + } + + async fn import_block( + &self, + mut block: BlockImportParams, + ) -> Result { + let best_header = self + .select_chain + .best_chain() + .await + .map_err(|e| format!("Fetch best chain failed via select chain: {}", e)) + .map_err(ConsensusError::ChainLookup)?; + let best_hash = best_header.hash(); + let parent_hash = *block.header.parent_hash(); + + let best_weight = + aux_schema::load_block_weight(&*self.client, &best_hash).map_err(Error::Client::)?; + let mut block_weight = aux_schema::load_block_weight(&*self.client, &parent_hash) + .map_err(Error::Client::)?; + + let inner_seal = fetch_seal::(block.post_digests.last(), block.header.hash())?; + + let intermediate = + block.remove_intermediate::>(INTERMEDIATE_KEY)?; + + let target_difficulty = match intermediate.difficulty { + Some(difficulty) => difficulty, + None => self.algorithm.difficulty(parent_hash)?, + }; + + let pre_hash = block.header.hash(); + let pre_digest = find_pre_digest::(&block.header)?; + let difficulty = match self.algorithm.verify( + &BlockId::hash(parent_hash), + &pre_hash, + pre_digest.as_ref().map(|v| &v[..]), + &inner_seal, + target_difficulty, + )? { + Some(difficulty) => difficulty, + None => return Err(Error::::HeaderBadSeal(block.post_hash()).into()), + }; + + block_weight += difficulty; + + aux_schema::write_block_weight(block.post_hash(), block_weight, |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))); + }); + if block.fork_choice.is_none() { + block.fork_choice = + Some(ForkChoiceStrategy::Custom(match block_weight.cmp(&best_weight) { + Ordering::Less => false, + Ordering::Greater => true, + Ordering::Equal => + if block.header.number() < best_header.number() { + true + } else { + match block.origin { + BlockOrigin::Own => true, + _ => { + let best_inner_seal = fetch_seal::( + best_header.digest().logs.last(), + best_hash, + )?; + + self.algorithm.break_tie(&best_inner_seal, &inner_seal) + }, + } + }, + })); + } + + self.inner.import_block(block).await.map_err(Into::into) + } +} + +/// A verifier for PoW blocks. +pub struct PowVerifier { + client: Arc, + algorithm: A, + create_inherent_data_providers: CIDP, + _marker: PhantomData, +} + +impl PowVerifier +where + B: BlockT, + C: ProvideRuntimeApi, + C::Api: BlockBuilderApi, + A: PowAlgorithm, + CIDP: CreateInherentDataProviders, +{ + pub fn new(client: Arc, algorithm: A, create_inherent_data_providers: CIDP) -> Self { + Self { client, algorithm, create_inherent_data_providers, _marker: Default::default() } + } + + fn check_header(&self, mut header: B::Header) -> Result<(B::Header, DigestItem), Error> { + let hash = header.hash(); + + let (seal, inner_seal) = match header.digest_mut().pop() { + Some(DigestItem::Seal(id, seal)) => + if id == POW_ENGINE_ID { + (DigestItem::Seal(id, seal.clone()), seal) + } else { + return Err(Error::WrongEngine(id)) + }, + _ => return Err(Error::HeaderUnsealed(hash)), + }; + + let pre_hash = header.hash(); + + if !self.algorithm.preliminary_verify(&pre_hash, &inner_seal)?.unwrap_or(true) { + return Err(Error::PreliminaryVerificationFailed) + } + + Ok((header, seal)) + } + + async fn check_inherents( + &self, + block: B, + at_hash: B::Hash, + create_inherent_data_providers: CIDP::InherentDataProviders, + ) -> Result<(), Error> { + let inherent_data = create_inherent_data_providers + .create_inherent_data() + .await + .map_err(Error::::CreateInherents)?; + + let inherent_res = self + .client + .runtime_api() + .check_inherents(at_hash, block, inherent_data) + .map_err(|e| Error::Client(e.into()))?; + + if !inherent_res.ok() { + for (identifier, error) in inherent_res.into_errors() { + match create_inherent_data_providers.try_handle_error(&identifier, &error).await { + Some(res) => res.map_err(Error::CheckInherents)?, + None => return Err(Error::CheckInherentsUnhandled(identifier)), + } + } + } + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Verifier for PowVerifier +where + B: BlockT, + C: ProvideRuntimeApi + Send + Sync, + C::Api: BlockBuilderApi, + A: PowAlgorithm + Send + Sync, + A::Difficulty: 'static + Send, + CIDP: CreateInherentDataProviders + Send + Sync, +{ + async fn verify( + &self, + mut block: BlockImportParams, + ) -> Result, String> { + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await + .map_err(|e| Error::::Client(ConsensusError::from(e).into()))?; + + let (checked_header, seal) = self.check_header(block.header)?; + + if let Some(inner_body) = block.body.take() { + let check_block = B::new(checked_header.clone(), inner_body); + + if !block.state_action.skip_execution_checks() { + self.check_inherents( + check_block.clone(), + parent_hash, + create_inherent_data_providers, + ) + .await?; + } + + block.body = Some(check_block.deconstruct().1); + } + + let intermediate = PowIntermediate:: { difficulty: None }; + block.header = checked_header; + block.post_digests.push(seal); + block.insert_intermediate(INTERMEDIATE_KEY, intermediate); + block.post_hash = Some(hash); + + Ok(block) + } +} + +/// The PoW import queue type. +pub type PowImportQueue = BasicQueue; + +/// Parameters passed to [`import_queue`]. +pub struct ImportQueueParams<'a, B, I, C, A, CIDP, S> { + /// The block import to use. + pub block_import: I, + /// The justification import. + pub justification_import: Option>, + /// The client to interact with the chain. + pub client: Arc, + /// PoW algorithm. + pub algorithm: A, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// The spawner to spawn background tasks. + pub spawner: &'a S, + /// The prometheus registry. + pub registry: Option<&'a Registry>, +} + +/// Import queue for PoW engine. +pub fn import_queue( + ImportQueueParams { + block_import, + justification_import, + client, + algorithm, + create_inherent_data_providers, + spawner, + registry, + }: ImportQueueParams, +) -> Result, sp_consensus::Error> +where + B: BlockT, + I: BlockImport + Send + Sync + 'static, + C: ProvideRuntimeApi + Send + Sync + 'static, + C::Api: BlockBuilderApi, + A: PowAlgorithm + Clone + Send + Sync + 'static, + A::Difficulty: Send, + CIDP: CreateInherentDataProviders + Send + 'static, + S: sp_core::traits::SpawnEssentialNamed, +{ + let verifier = PowVerifier::new(client, algorithm, create_inherent_data_providers); + + Ok(BasicQueue::new(verifier, Box::new(block_import), justification_import, spawner, registry)) +} + +/// Parameters used to start a mining worker. +pub struct PowParams { + /// The client to interact with the chain. + pub client: Arc, + /// A select chain implementation to select the best block. + pub select_chain: SC, + /// The block import. + pub block_import: I, + /// PoW algorithm. + pub algorithm: A, + /// The proposer factory to build proposer instances. + pub proposer_factory: E, + /// The sync oracle that can give us the current sync status. + pub sync_oracle: SO, + /// Hook into the sync module to control the justification sync process. + pub justification_sync_link: L, + /// Something that can create the inherent data providers. + pub create_inherent_data_providers: CIDP, + /// Pre-runtime digest to be inserted into blocks. + pub pre_digest_provider: PP, + /// Timeout for importing a block. + pub timeout: Duration, + /// Maximum time allowed for building a block. + pub build_time: Duration, +} + +/// Start the pow worker. This function provides the necessary helper functions that can +/// be used to implement a miner. However, it does not do the CPU-intensive mining itself. +#[allow(clippy::type_complexity)] +pub fn start_pow( + PowParams { + client, + select_chain, + block_import, + algorithm, + mut proposer_factory, + sync_oracle, + justification_sync_link, + create_inherent_data_providers, + pre_digest_provider, + timeout, + build_time, + }: PowParams, +) -> ( + PowWorker>::Proof, I>, + impl Future, +) +where + Block: BlockT, + C: BlockchainEvents + 'static, + S: SelectChain + 'static, + I: BlockImport + Send + Sync + 'static, + Algorithm: PowAlgorithm + Clone, + Algorithm::Difficulty: Send + 'static, + E: Environment + Send + Sync + 'static, + E::Error: std::fmt::Debug, + E::Proposer: Proposer, + SO: SyncOracle + Clone + Send + Sync + 'static, + L: sc_consensus::JustificationSyncLink, + CIDP: CreateInherentDataProviders, + PP: PreDigestProvider, +{ + let mut timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); + let worker = PowWorker::new(algorithm.clone(), block_import, justification_sync_link); + let worker_ret = worker.clone(); + + let task = async move { + loop { + if timer.next().await.is_none() { + break + } + + if sync_oracle.is_major_syncing() { + debug!(target: LOG_TARGET, "Skipping proposal due to sync."); + worker.on_major_syncing(); + continue + } + + let best_header = match select_chain.best_chain().await { + Ok(x) => x, + Err(err) => { + warn!( + target: LOG_TARGET, + "Unable to pull new block for authoring. \ + Select best chain error: {}", + err + ); + continue + }, + }; + let best_hash = best_header.hash(); + + // The worker is locked for the duration of the whole proposing period. Within this + // period, the mining target is outdated and useless anyway. + + let difficulty = match algorithm.difficulty(best_hash) { + Ok(x) => x, + Err(err) => { + warn!( + target: LOG_TARGET, + "Unable to propose new block for authoring. \ + Fetch difficulty failed: {}", + err, + ); + continue + }, + }; + + let inherent_data_providers = match create_inherent_data_providers + .create_inherent_data_providers(best_hash, ()) + .await + { + Ok(x) => x, + Err(err) => { + warn!( + target: LOG_TARGET, + "Unable to propose new block for authoring. \ + Creating inherent data providers failed: {}", + err, + ); + continue + }, + }; + + let inherent_data = match inherent_data_providers.create_inherent_data().await { + Ok(r) => r, + Err(e) => { + warn!( + target: LOG_TARGET, + "Unable to propose new block for authoring. \ + Creating inherent data failed: {}", + e, + ); + continue + }, + }; + + let mut inherent_digest = Digest::default(); + let mut pre_digest = None; + + match pre_digest_provider.pre_digest(best_hash.as_ref()).await { + Ok(items) => + for item in items { + if let DigestItem::PreRuntime(POW_ENGINE_ID, ref data) = item { + pre_digest = Some(data.clone()); + } + inherent_digest.push(item); + }, + Err(e) => { + warn!( + target: LOG_TARGET, + "Invalid pre-runtime digest: {}", + e, + ); + continue + }, + } + + let proposer = match proposer_factory.init(&best_header).await { + Ok(x) => x, + Err(err) => { + warn!( + target: LOG_TARGET, + "Unable to propose new block for authoring. \ + Creating proposer failed: {:?}", + err, + ); + continue + }, + }; + + let proposal = + match proposer.propose(inherent_data, inherent_digest, build_time, None).await { + Ok(x) => x, + Err(err) => { + warn!( + target: LOG_TARGET, + "Unable to propose new block for authoring. \ + Creating proposal failed: {}", + err, + ); + continue + }, + }; + + let build = MiningBuild:: { + metadata: MiningMetadata { + best_hash, + pre_hash: proposal.block.header().hash(), + pre_digest, + difficulty, + }, + proposal, + }; + + worker.on_build(build); + } + }; + + (worker_ret, task) +} + +/// Find PoW pre-runtime. +fn find_pre_digest(header: &B::Header) -> Result>, Error> { + let mut pre_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: LOG_TARGET, "Checking log {:?}, looking for pre runtime digest", log); + match (log, pre_digest.is_some()) { + (DigestItem::PreRuntime(POW_ENGINE_ID, _), true) => + return Err(Error::MultiplePreRuntimeDigests), + (DigestItem::PreRuntime(POW_ENGINE_ID, v), false) => { + pre_digest = Some(v.clone()); + }, + (_, _) => trace!(target: LOG_TARGET, "Ignoring digest not meant for us"), + } + } + + Ok(pre_digest) +} + +/// Fetch PoW seal. +fn fetch_seal(digest: Option<&DigestItem>, hash: B::Hash) -> Result, Error> { + match digest { + Some(DigestItem::Seal(id, seal)) => + if id == &POW_ENGINE_ID { + Ok(seal.clone()) + } else { + Err(Error::::WrongEngine(*id)) + }, + _ => Err(Error::::HeaderUnsealed(hash)), + } +} diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs new file mode 100644 index 00000000..fec94d76 --- /dev/null +++ b/client/consensus/pow/src/worker.rs @@ -0,0 +1,281 @@ +// This file is part of Noir. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use futures::{ + prelude::*, + task::{Context, Poll}, +}; +use futures_timer::Delay; +use log::*; +use parking_lot::RwLock; +use sc_client_api::ImportNotifications; +use sc_consensus::{BlockImport, BlockImportParams, StateAction, StorageChanges}; +use sp_consensus::{BlockOrigin, Proposal}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header}, + DigestItem, +}; +use std::{ + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; + +use crate::{PowAlgorithm, PowIntermediate, Seal, INTERMEDIATE_KEY, LOG_TARGET, POW_ENGINE_ID}; + +/// Mining metadata. This is the information needed to start an actual mining loop. +#[derive(Clone, Eq, PartialEq)] +pub struct MiningMetadata { + /// Currently known best hash which the pre-hash is built on. + pub best_hash: H, + /// Mining pre-hash. + pub pre_hash: H, + /// Pre-runtime digest item. + pub pre_digest: Option>, + /// Mining target difficulty. + pub difficulty: D, +} + +/// A build of mining, containing the metadata and the block proposal. +pub struct MiningBuild, Proof> { + /// Mining metadata. + pub metadata: MiningMetadata, + /// Mining proposal. + pub proposal: Proposal, +} + +/// Version of the mining worker. +#[derive(Eq, PartialEq, Clone, Copy)] +pub struct Version(usize); + +/// PoW worker that exposes structs to query the current mining build and submit mined blocks. +pub struct PowWorker< + Block: BlockT, + Algorithm: PowAlgorithm, + L: sc_consensus::JustificationSyncLink, + Proof, + I: BlockImport, +> { + version: Arc, + algorithm: Arc, + justification_sync_link: Arc, + build: Arc>>>, + block_import: Arc, +} + +impl PowWorker +where + Block: BlockT, + Algorithm: PowAlgorithm, + Algorithm::Difficulty: 'static + Send, + L: sc_consensus::JustificationSyncLink, + I: BlockImport, +{ + fn increment_version(&self) { + self.version.fetch_add(1, Ordering::SeqCst); + } + + pub fn new(algorithm: Algorithm, block_import: I, justification_sync_link: L) -> Self { + Self { + version: Arc::new(AtomicUsize::new(0)), + algorithm: Arc::new(algorithm), + justification_sync_link: Arc::new(justification_sync_link), + build: Arc::new(RwLock::new(None)), + block_import: Arc::new(block_import), + } + } + + pub fn on_major_syncing(&self) { + let mut build = self.build.write(); + *build = None; + self.increment_version(); + } + + pub fn on_build(&self, value: MiningBuild) { + let mut build = self.build.write(); + *build = Some(value); + self.increment_version(); + } + + /// Get the version of the mining worker. + /// + /// This returns type `Version` which can only compare equality. If `Version` is unchanged, then + /// it can be certain that `best_hash` and `metadata` were not changed. + pub fn version(&self) -> Version { + Version(self.version.load(Ordering::SeqCst)) + } + + /// Get the current best hash. `None` if the worker has just started or the client is doing + /// major syncing. + pub fn best_hash(&self) -> Option { + self.build.read().as_ref().map(|b| b.metadata.best_hash) + } + + /// Get a copy of the current mining metadata, if available. + pub fn metadata(&self) -> Option> { + self.build.read().as_ref().map(|b| b.metadata.clone()) + } + + /// Submit a mined seal. The seal will be validated again. Returns true if the submission is + /// successful. + pub async fn submit(&self, seal: Seal) -> bool { + if let Some(metadata) = self.metadata() { + match self.algorithm.verify( + &BlockId::Hash(metadata.best_hash), + &metadata.pre_hash, + metadata.pre_digest.as_ref().map(|v| &v[..]), + &seal, + metadata.difficulty, + ) { + Ok(Some(_)) => (), + Ok(None) => { + warn!(target: LOG_TARGET, "Unable to import mined block: seal is invalid",); + return false + }, + Err(err) => { + warn!(target: LOG_TARGET, "Unable to import mined block: {}", err,); + return false + }, + } + } else { + warn!(target: LOG_TARGET, "Unable to import mined block: metadata does not exist",); + return false + } + + let build = if let Some(build) = { + let mut build = self.build.write(); + let value = build.take(); + if value.is_some() { + self.increment_version(); + } + value + } { + build + } else { + warn!(target: LOG_TARGET, "Unable to import mined block: build does not exist",); + return false + }; + + let seal = DigestItem::Seal(POW_ENGINE_ID, seal); + let (header, body) = build.proposal.block.deconstruct(); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(seal); + import_block.body = Some(body); + import_block.state_action = + StateAction::ApplyChanges(StorageChanges::Changes(build.proposal.storage_changes)); + + let intermediate = PowIntermediate:: { + difficulty: Some(build.metadata.difficulty), + }; + import_block.insert_intermediate(INTERMEDIATE_KEY, intermediate); + + let header = import_block.post_header(); + + match self.block_import.import_block(import_block).await { + Ok(res) => { + res.handle_justification( + &header.hash(), + *header.number(), + &self.justification_sync_link, + ); + + info!( + target: LOG_TARGET, + "✅ Successfully mined block on top of: {}", build.metadata.best_hash + ); + true + }, + Err(err) => { + warn!(target: LOG_TARGET, "Unable to import mined block: {}", err,); + false + }, + } + } +} + +impl Clone for PowWorker +where + Block: BlockT, + Algorithm: PowAlgorithm, + L: sc_consensus::JustificationSyncLink, + I: BlockImport, +{ + fn clone(&self) -> Self { + Self { + version: self.version.clone(), + algorithm: self.algorithm.clone(), + justification_sync_link: self.justification_sync_link.clone(), + build: self.build.clone(), + block_import: self.block_import.clone(), + } + } +} + +/// A stream that waits for a block import or timeout. +pub struct UntilImportedOrTimeout { + import_notifications: ImportNotifications, + timeout: Duration, + inner_delay: Option, +} + +impl UntilImportedOrTimeout { + /// Create a new stream using the given import notification and timeout duration. + pub fn new(import_notifications: ImportNotifications, timeout: Duration) -> Self { + Self { import_notifications, timeout, inner_delay: None } + } +} + +impl Stream for UntilImportedOrTimeout { + type Item = (); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut fire = false; + + loop { + match Stream::poll_next(Pin::new(&mut self.import_notifications), cx) { + Poll::Pending => break, + Poll::Ready(Some(_)) => { + fire = true; + }, + Poll::Ready(None) => return Poll::Ready(None), + } + } + + let timeout = self.timeout; + let inner_delay = self.inner_delay.get_or_insert_with(|| Delay::new(timeout)); + + match Future::poll(Pin::new(inner_delay), cx) { + Poll::Pending => (), + Poll::Ready(()) => { + fire = true; + }, + } + + if fire { + self.inner_delay = None; + Poll::Ready(Some(())) + } else { + Poll::Pending + } + } +} diff --git a/client/consensus/src/lib.rs b/client/consensus/src/lib.rs new file mode 100644 index 00000000..e9b43afb --- /dev/null +++ b/client/consensus/src/lib.rs @@ -0,0 +1,44 @@ +// This file is part of Noir. + +// Copyright (C) Haderech Pte. Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_blockchain::Result; +use sp_runtime::DigestItem; +use std::sync::Arc; + +/// A trait that provides multiple pre-runtime digests for different consensus engines. +#[async_trait::async_trait] +pub trait PreDigestProvider { + /// Returns a set of pre-runtime digests. + async fn pre_digest(&self, best_hash: &[u8]) -> Result>; +} + +#[async_trait::async_trait] +impl PreDigestProvider for Arc +where + T: PreDigestProvider + Send + Sync, +{ + async fn pre_digest(&self, best_hash: &[u8]) -> Result> { + self.as_ref().pre_digest(best_hash).await + } +} + +#[async_trait::async_trait] +impl PreDigestProvider for () { + async fn pre_digest(&self, _best_hash: &[u8]) -> Result> { + Ok(vec![]) + } +} diff --git a/frame/wtema/Cargo.toml b/frame/wtema/Cargo.toml new file mode 100644 index 00000000..46df2eb3 --- /dev/null +++ b/frame/wtema/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "pallet-wtema" +description = "FRAME wtema for difficulty adjustment algorithm" +license = "GPL-3.0-or-later" +authors = { workspace = true } +version = { workspace = true } +edition = { workspace = true } +repository = { workspace = true } +publish = false + +[dependencies] +frame-support = { workspace = true } +frame-system = { workspace = true } +pallet-timestamp = { workspace = true } +parity-scale-codec = { workspace = true } +scale-info = { workspace = true } +sp-core = { workspace = true } + +[features] +default = ["std"] +std = [ + "frame-support/std", + "frame-system/std", + "pallet-timestamp/std", + "parity-scale-codec/std", + "scale-info/std", + "sp-core/std", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "pallet-timestamp/try-runtime", +] diff --git a/frame/wtema/src/lib.rs b/frame/wtema/src/lib.rs new file mode 100644 index 00000000..387ef555 --- /dev/null +++ b/frame/wtema/src/lib.rs @@ -0,0 +1,153 @@ +// This file is part of Noir. + +// Copyright (C) Haderech Pte. Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! WTEMA difficulty adjustment algorithm. +//! +//! +//! +//! ```text +//! target = prior_target * (1 + t/T/N - 1/N) +//! where +//! N = smoothing constant aka filter +//! t = prior block solvetime +//! T = desired average block time +//! ``` + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +use core::{fmt::Debug, marker::PhantomData}; +use frame_support::{ + sp_runtime, + traits::{Get, OnTimestampSet}, +}; +use parity_scale_codec::FullCodec; +use sp_core::U256; +use sp_runtime::traits::{One, SaturatedConversion, UniqueSaturatedFrom, UniqueSaturatedInto}; + +/// Helper type to calculate the minimum difficulty. +pub struct MinDifficulty(PhantomData); +impl Get for MinDifficulty +where + T: Config, + T::Moment: UniqueSaturatedInto, +{ + fn get() -> T::Difficulty { + let filter = T::Moment::from(T::Filter::get()); + let target_block_time = T::TargetBlockTime::get(); + let minimum_period = T::MinimumPeriod::get(); + + ((filter * target_block_time - T::Moment::one()) / (target_block_time - minimum_period)) + .saturated_into() + } +} + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config + pallet_timestamp::Config { + /// Difficulty for cryptographic puzzles in PoW consensus. + type Difficulty: FullCodec + + Default + + Copy + + MaybeSerializeDeserialize + + Debug + + MaxEncodedLen + + TypeInfo + + UniqueSaturatedFrom + + Into + + PartialOrd; + + /// Desired block time in milliseconds. + #[pallet::constant] + type TargetBlockTime: Get; + + /// Smoothing constant for difficulty adjustment. + #[pallet::constant] + type Filter: Get; + + /// Minimum difficulty to be adjusted according to block time changes. + /// + /// If the difficulty drops below the minimum difficulty, it stops adjusting because of + /// rounding errors. + #[pallet::constant] + type MinDifficulty: Get; + } + + /// Target difficulty for the next block. + #[pallet::storage] + #[pallet::getter(fn difficulty)] + pub type Difficulty = StorageValue<_, T::Difficulty, ValueQuery>; + + /// Timestamp of the last block. + #[pallet::storage] + #[pallet::getter(fn last_timestamp)] + pub type LastTimestamp = StorageValue<_, T::Moment, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub initial_difficulty: T::Difficulty, + } + + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { initial_difficulty: T::Difficulty::saturated_from(U256::from(10000)) } + } + } + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + assert!(self.initial_difficulty.into() != U256::from(0)); + Difficulty::::put(self.initial_difficulty); + } + } +} + +impl OnTimestampSet for Pallet +where + T::Moment: Into, +{ + fn on_timestamp_set(now: T::Moment) { + let block_time = match frame_system::Pallet::::block_number() { + n if n <= One::one() => T::TargetBlockTime::get(), + _ => now - LastTimestamp::::get(), + }; + let desired_block_time = T::TargetBlockTime::get().into(); + let prior_target = U256::max_value() / Difficulty::::get().into(); + let filter = T::Filter::get(); + + let target = (prior_target / (desired_block_time * filter)) + .saturating_mul(desired_block_time * filter + block_time - desired_block_time); + let mut difficulty = T::Difficulty::saturated_from(U256::max_value() / target); + + if difficulty < T::MinDifficulty::get() { + difficulty = T::MinDifficulty::get(); + } + + Difficulty::::put(difficulty); + LastTimestamp::::put(now); + } +} diff --git a/primitives/consensus/Cargo.toml b/primitives/consensus/Cargo.toml new file mode 100644 index 00000000..dad63ead --- /dev/null +++ b/primitives/consensus/Cargo.toml @@ -0,0 +1,4 @@ +[package] +name = "np-consensus" +edition = { workspace = true } +publish = false diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml new file mode 100644 index 00000000..891bc33d --- /dev/null +++ b/primitives/consensus/pow/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "np-consensus-pow" +description = "Noir primitive types for PoW consensus" +license = "Apache-2.0" +authors = { workspace = true } +version = { workspace = true } +edition = { workspace = true } +repository = { workspace = true } +publish = false + +[dependencies] +parity-scale-codec = { workspace = true, features = ["derive"] } +sp-api = { workspace = true } +sp-arithmetic = { workspace = true } +sp-core = { workspace = true } +sp-runtime = { workspace = true } + +[features] +default = ["std"] +std = [ + "parity-scale-codec/std", + "sp-api/std", + "sp-arithmetic/std", + "sp-core/std", + "sp-runtime/std", +] diff --git a/primitives/consensus/pow/src/lib.rs b/primitives/consensus/pow/src/lib.rs new file mode 100644 index 00000000..5f5dca3a --- /dev/null +++ b/primitives/consensus/pow/src/lib.rs @@ -0,0 +1,92 @@ +// This file is part of Noir. + +// Copyright (C) Haderech Pte. Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Noir primitive types for PoW consensus. + +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +use alloc::vec::Vec; +use core::ops::AddAssign; +use parity_scale_codec::{Decode, Encode}; +use sp_api::decl_runtime_apis; +use sp_arithmetic::traits::{Bounded, SaturatedConversion, Saturating, UniqueSaturatedFrom}; +use sp_core::U256; +use sp_runtime::ConsensusEngineId; + +/// `ConsensusEngineId` for PoW. +pub const POW_ENGINE_ID: ConsensusEngineId = *b"pow_"; + +/// Seal for PoW. +pub type Seal = Vec; + +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Decode, Encode)] +pub struct BlockWeight(Weight); + +impl AddAssign for BlockWeight +where + Weight: Copy + Saturating, +{ + fn add_assign(&mut self, other: Weight) { + let ret = self.0.saturating_add(other); + *self = Self(ret); + } +} + +/// Checks if a hash fits the given difficulty. +pub fn check_hash(hash: &Hash, difficulty: Difficulty) -> bool +where + Hash: AsRef<[u8]>, + Difficulty: Into, +{ + let hash = U256::from(hash.as_ref()); + let (_, overflowed) = hash.overflowing_mul(difficulty.into()); + + !overflowed +} + +/// Returns a difficulty for the given hash. +pub fn difficulty(hash: &Hash) -> Difficulty +where + Hash: AsRef<[u8]>, + Difficulty: Bounded + UniqueSaturatedFrom, +{ + let is_zero = hash.as_ref().iter().all(|&x| x == 0); + + if !is_zero { + Difficulty::saturated_from(U256::max_value() / U256::from(hash.as_ref())) + } else { + Difficulty::max_value() + } +} + +decl_runtime_apis! { + /// API necessary for timestamp-based difficulty adjustment algorithms. + pub trait TimestampApi { + /// Return the timestamp in the current block. + fn timestamp() -> Moment; + } + + /// API for those chains that put their difficulty adjustment algorithm directly + /// onto runtime. Note that while putting difficulty adjustment algorithm to + /// runtime is safe, putting the PoW algorithm on runtime is not. + pub trait DifficultyApi { + /// Return the target difficulty of the next block. + fn difficulty() -> Difficulty; + } +} diff --git a/primitives/consensus/src/lib.rs b/primitives/consensus/src/lib.rs new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/primitives/consensus/src/lib.rs @@ -0,0 +1 @@ +