Skip to content

Commit d3a8537

Browse files
Merge pull request #1072 from subspace/unify-piece-receiving-protocol
networking: Unite requests for storage types.
2 parents a5e4e17 + 5558034 commit d3a8537

File tree

8 files changed

+78
-160
lines changed

8 files changed

+78
-160
lines changed

crates/subspace-farmer/src/bin/subspace-farmer/commands/farm/dsn.rs

Lines changed: 44 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,10 @@ use subspace_networking::libp2p::identity::Keypair;
1212
use subspace_networking::libp2p::kad::record::Key;
1313
use subspace_networking::libp2p::kad::ProviderRecord;
1414
use subspace_networking::libp2p::multihash::Multihash;
15-
use subspace_networking::utils::multihash::MultihashCode;
1615
use subspace_networking::{
1716
create, peer_id, BootstrappedNetworkingParameters, Config, Node, NodeRunner,
1817
ParityDbProviderStorage, PieceByHashRequest, PieceByHashRequestHandler, PieceByHashResponse,
19-
PieceKey, ToMultihash,
18+
ToMultihash,
2019
};
2120
use tokio::runtime::Handle;
2221
use tracing::{debug, info, trace, warn};
@@ -86,9 +85,16 @@ pub(super) async fn configure_dsn(
8685
networking_parameters_registry: BootstrappedNetworkingParameters::new(bootstrap_nodes)
8786
.boxed(),
8887
request_response_protocols: vec![PieceByHashRequestHandler::create(move |req| {
89-
let result = match req.key {
90-
PieceKey::ArchivalStorage(piece_index_hash) => {
91-
debug!(key=?req.key, "Archival storage piece request received.");
88+
let result = {
89+
debug!(piece_index_hash = ?req.piece_index_hash, "Piece request received. Trying cache...");
90+
let multihash = req.piece_index_hash.to_multihash();
91+
92+
let piece_from_cache = piece_storage.get(&multihash.into());
93+
94+
if piece_from_cache.is_some() {
95+
piece_from_cache
96+
} else {
97+
debug!(piece_index_hash = ?req.piece_index_hash, "No piece in the cache. Trying archival storage...");
9298

9399
let (mut reader, piece_details) = {
94100
let readers_and_pieces = match weak_readers_and_pieces.upgrade() {
@@ -103,23 +109,26 @@ pub(super) async fn configure_dsn(
103109
Some(readers_and_pieces) => readers_and_pieces,
104110
None => {
105111
debug!(
106-
?piece_index_hash,
112+
?req.piece_index_hash,
107113
"Readers and pieces are not initialized yet"
108114
);
109115
return None;
110116
}
111117
};
112-
let piece_details =
113-
match readers_and_pieces.pieces.get(&piece_index_hash).copied() {
114-
Some(piece_details) => piece_details,
115-
None => {
116-
trace!(
117-
?piece_index_hash,
118-
"Piece is not stored in any of the local plots"
119-
);
120-
return None;
121-
}
122-
};
118+
let piece_details = match readers_and_pieces
119+
.pieces
120+
.get(&req.piece_index_hash)
121+
.copied()
122+
{
123+
Some(piece_details) => piece_details,
124+
None => {
125+
trace!(
126+
?req.piece_index_hash,
127+
"Piece is not stored in any of the local plots"
128+
);
129+
return None;
130+
}
131+
};
123132
let reader = readers_and_pieces
124133
.readers
125134
.get(piece_details.plot_offset)
@@ -136,11 +145,6 @@ pub(super) async fn configure_dsn(
136145
)
137146
})
138147
}
139-
PieceKey::Cache(piece_index_hash) => {
140-
debug!(key=?req.key, "Cache piece request received.");
141-
142-
piece_storage.get(&piece_index_hash.to_multihash().into())
143-
}
144148
};
145149

146150
Some(PieceByHashResponse { piece: result })
@@ -171,7 +175,6 @@ impl<PS: PieceStorage> FarmerProviderRecordProcessor<PS> {
171175
//TODO: consider introducing get-piece helper
172176
async fn get_piece(&self, piece_index_hash: PieceIndexHash) -> Option<Piece> {
173177
let multihash = piece_index_hash.to_multihash();
174-
let piece_key = PieceKey::Cache(piece_index_hash);
175178

176179
let get_providers_result = self.node.get_providers(multihash).await;
177180

@@ -180,29 +183,34 @@ impl<PS: PieceStorage> FarmerProviderRecordProcessor<PS> {
180183
while let Some(provider_id) = get_providers_stream.next().await {
181184
trace!(?multihash, %provider_id, "get_providers returned an item");
182185

186+
if provider_id == self.node.id() {
187+
trace!(?multihash, %provider_id, "Attempted to get a piece from itself.");
188+
continue;
189+
}
190+
183191
let request_result = self
184192
.node
185-
.send_generic_request(provider_id, PieceByHashRequest { key: piece_key })
193+
.send_generic_request(provider_id, PieceByHashRequest { piece_index_hash })
186194
.await;
187195

188196
match request_result {
189197
Ok(PieceByHashResponse { piece: Some(piece) }) => {
190-
trace!(%provider_id, ?multihash, ?piece_key, "Piece request succeeded.");
198+
trace!(%provider_id, ?multihash, ?piece_index_hash, "Piece request succeeded.");
191199
return Some(piece);
192200
}
193201
Ok(PieceByHashResponse { piece: None }) => {
194-
debug!(%provider_id, ?multihash, ?piece_key, "Piece request returned empty piece.");
202+
debug!(%provider_id, ?multihash, ?piece_index_hash, "Piece request returned empty piece.");
195203
}
196204
Err(error) => {
197-
warn!(%provider_id, ?multihash, ?piece_key, ?error, "Piece request failed.");
205+
warn!(%provider_id, ?multihash, ?piece_index_hash, ?error, "Piece request failed.");
198206
}
199207
}
200208
}
201209
}
202210
Err(err) => {
203211
warn!(
204212
?multihash,
205-
?piece_key,
213+
?piece_index_hash,
206214
?err,
207215
"get_providers returned an error"
208216
);
@@ -244,21 +252,17 @@ impl<PS: PieceStorage> FarmerProviderRecordProcessor<PS> {
244252
return;
245253
}
246254

247-
if multihash.code() == u64::from(MultihashCode::PieceIndex) {
248-
trace!(key=?rec.key, "Starting processing provider record...");
255+
trace!(key=?rec.key, "Starting processing provider record...");
249256

250-
if self.piece_storage.should_include_in_storage(&rec.key) {
251-
let piece_index_hash: Blake2b256Hash = multihash.digest()[..BLAKE2B_256_HASH_SIZE]
252-
.try_into()
253-
.expect("Multihash should be known 32 bytes size.");
257+
if self.piece_storage.should_include_in_storage(&rec.key) {
258+
let piece_index_hash: Blake2b256Hash = multihash.digest()[..BLAKE2B_256_HASH_SIZE]
259+
.try_into()
260+
.expect("Multihash should be known 32 bytes size.");
254261

255-
if let Some(piece) = self.get_piece(piece_index_hash.into()).await {
256-
self.piece_storage.add_piece(rec.key.clone(), piece);
257-
self.announce_piece(multihash).await;
258-
}
262+
if let Some(piece) = self.get_piece(piece_index_hash.into()).await {
263+
self.piece_storage.add_piece(rec.key.clone(), piece);
264+
self.announce_piece(multihash).await;
259265
}
260-
} else {
261-
trace!(key=?rec.key, "Processing of the provider record cancelled.");
262266
}
263267
}
264268
}

crates/subspace-farmer/src/bin/subspace-farmer/commands/farm/farmer_provider_storage.rs

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ use std::sync::Arc;
55
use subspace_networking::libp2p::kad::record::Key;
66
use subspace_networking::libp2p::kad::ProviderRecord;
77
use subspace_networking::libp2p::PeerId;
8-
use subspace_networking::utils::multihash::MultihashCode;
98
use subspace_networking::{deconstruct_record_key, ProviderStorage, ToMultihash};
109

1110
pub(crate) struct FarmerProviderStorage<PersistentProviderStorage> {
@@ -44,21 +43,18 @@ where
4443
fn providers(&self, key: &Key) -> Vec<ProviderRecord> {
4544
let mut provider_records = self.persistent_provider_storage.providers(key);
4645

47-
let (piece_index_hash, multihash_code) = deconstruct_record_key(key);
46+
let (piece_index_hash, _) = deconstruct_record_key(key);
4847

49-
if multihash_code == MultihashCode::Sector
50-
&& self
51-
.readers_and_pieces
52-
.lock()
53-
.as_ref()
54-
.expect("Should be populated at this point.")
55-
.pieces
56-
.contains_key(&piece_index_hash)
48+
if self
49+
.readers_and_pieces
50+
.lock()
51+
.as_ref()
52+
.expect("Should be populated at this point.")
53+
.pieces
54+
.contains_key(&piece_index_hash)
5755
{
5856
provider_records.push(ProviderRecord {
59-
key: piece_index_hash
60-
.to_multihash_by_code(MultihashCode::Sector)
61-
.into(),
57+
key: piece_index_hash.to_multihash().into(),
6258
provider: self.local_peer_id,
6359
expires: None,
6460
addresses: Vec::new(), // TODO: add address hints
@@ -77,7 +73,7 @@ where
7773
.keys()
7874
.map(|hash| {
7975
ProviderRecord {
80-
key: hash.to_multihash_by_code(MultihashCode::Sector).into(),
76+
key: hash.to_multihash().into(),
8177
provider: self.local_peer_id,
8278
expires: None,
8379
addresses: Vec::new(), // TODO: add address hints

crates/subspace-farmer/src/single_disk_plot/piece_publisher.rs

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ use std::sync::atomic::{AtomicBool, Ordering};
77
use std::sync::Arc;
88
use std::time::Duration;
99
use subspace_core_primitives::{PieceIndex, PieceIndexHash};
10-
use subspace_networking::utils::multihash::MultihashCode;
1110
use subspace_networking::{Node, ToMultihash};
1211
use tokio::time::error::Elapsed;
1312
use tokio::time::timeout;
@@ -104,8 +103,7 @@ impl PieceSectorPublisher {
104103
) -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
105104
self.check_cancellation()?;
106105

107-
let key =
108-
PieceIndexHash::from_index(piece_index).to_multihash_by_code(MultihashCode::Sector);
106+
let key = PieceIndexHash::from_index(piece_index).to_multihash();
109107

110108
let result = self.dsn_node.start_announcing(key).await;
111109

crates/subspace-networking/src/lib.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ pub use request_handlers::peer_info::{
5353
PeerInfo, PeerInfoRequest, PeerInfoRequestHandler, PeerInfoResponse, PeerSyncStatus,
5454
};
5555
pub use request_handlers::piece_by_key::{
56-
PieceByHashRequest, PieceByHashRequestHandler, PieceByHashResponse, PieceKey,
56+
PieceByHashRequest, PieceByHashRequestHandler, PieceByHashResponse,
5757
};
5858
pub use request_handlers::pieces_by_range::{
5959
PiecesByRangeRequest, PiecesByRangeRequestHandler, PiecesByRangeResponse, PiecesToPlot,

crates/subspace-networking/src/request_handlers/piece_by_key.rs

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,19 +7,11 @@ use crate::request_handlers::generic_request_handler::{GenericRequest, GenericRe
77
use parity_scale_codec::{Decode, Encode};
88
use subspace_core_primitives::{Piece, PieceIndexHash};
99

10-
//TODO: rename all module names if we keep this enum
11-
#[derive(Debug, Clone, Eq, PartialEq, Copy, Encode, Decode)]
12-
pub enum PieceKey {
13-
Cache(PieceIndexHash),
14-
ArchivalStorage(PieceIndexHash),
15-
}
16-
1710
/// Piece-by-hash protocol request.
1811
#[derive(Debug, Clone, Eq, PartialEq, Encode, Decode)]
1912
pub struct PieceByHashRequest {
20-
//TODO: rename if we keep the enum
21-
/// Piece index hash
22-
pub key: PieceKey,
13+
/// Request key - piece index hash
14+
pub piece_index_hash: PieceIndexHash,
2315
}
2416

2517
impl GenericRequest for PieceByHashRequest {

crates/subspace-networking/src/utils/multihash.rs

Lines changed: 4 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
use libp2p::multihash::Multihash;
22
use std::error::Error;
3-
use subspace_core_primitives::{Blake2b256Hash, PieceIndexHash};
3+
use subspace_core_primitives::PieceIndexHash;
44

55
/// Start of Subspace Network multicodec namespace (+1000 to distinguish from future stable values):
66
/// https://github.com/multiformats/multicodec/blob/master/table.csv
@@ -9,9 +9,7 @@ const SUBSPACE_MULTICODEC_NAMESPACE_START: u64 = 0xb39910 + 1000;
99
#[derive(Debug, Clone, PartialEq)]
1010
#[repr(u64)]
1111
pub enum MultihashCode {
12-
Piece = SUBSPACE_MULTICODEC_NAMESPACE_START,
13-
PieceIndex = SUBSPACE_MULTICODEC_NAMESPACE_START + 1,
14-
Sector = SUBSPACE_MULTICODEC_NAMESPACE_START + 2,
12+
PieceIndexHash = SUBSPACE_MULTICODEC_NAMESPACE_START,
1513
}
1614

1715
impl From<MultihashCode> for u64 {
@@ -25,9 +23,7 @@ impl TryFrom<u64> for MultihashCode {
2523

2624
fn try_from(value: u64) -> Result<Self, Self::Error> {
2725
match value {
28-
x if x == MultihashCode::Piece as u64 => Ok(MultihashCode::Piece),
29-
x if x == MultihashCode::PieceIndex as u64 => Ok(MultihashCode::PieceIndex),
30-
x if x == MultihashCode::Sector as u64 => Ok(MultihashCode::Sector),
26+
x if x == MultihashCode::PieceIndexHash as u64 => Ok(MultihashCode::PieceIndexHash),
3127
_ => Err("Unexpected multihash code".into()),
3228
}
3329
}
@@ -39,23 +35,14 @@ pub fn create_multihash_by_piece_index(piece_index: u64) -> Multihash {
3935
piece_index_hash.to_multihash()
4036
}
4137

42-
pub fn create_multihash_by_piece(records_root: &Blake2b256Hash, piece_index: u64) -> Multihash {
43-
let piece_index_bytes = piece_index.to_le_bytes();
44-
let mut input = Vec::with_capacity(records_root.len() + piece_index_bytes.len());
45-
input.extend_from_slice(records_root);
46-
input.extend_from_slice(&piece_index_bytes);
47-
Multihash::wrap(u64::from(MultihashCode::Piece), &input)
48-
.expect("Input never exceeds allocated size; qed")
49-
}
50-
5138
pub trait ToMultihash {
5239
fn to_multihash(&self) -> Multihash;
5340
fn to_multihash_by_code(&self, code: MultihashCode) -> Multihash;
5441
}
5542

5643
impl ToMultihash for PieceIndexHash {
5744
fn to_multihash(&self) -> Multihash {
58-
self.to_multihash_by_code(MultihashCode::PieceIndex)
45+
self.to_multihash_by_code(MultihashCode::PieceIndexHash)
5946
}
6047

6148
fn to_multihash_by_code(&self, code: MultihashCode) -> Multihash {

0 commit comments

Comments
 (0)