@@ -7,9 +7,9 @@ use crate::plot::{PieceOffset, Plot};
77use arc_swap:: ArcSwapOption ;
88use databases:: { CommitmentDatabases , CreateDbEntryResult , DbEntry } ;
99use event_listener_primitives:: { Bag , HandlerId } ;
10+ use parity_db:: Db ;
1011use parking_lot:: Mutex ;
1112use rayon:: prelude:: * ;
12- use rocksdb:: { WriteBatch , DB } ;
1313use std:: path:: PathBuf ;
1414use std:: sync:: atomic:: { AtomicBool , Ordering } ;
1515use std:: sync:: Arc ;
@@ -28,11 +28,13 @@ const TAGS_WRITE_BATCH_SIZE: usize = 16 * 1024 * 1024 / (TAG_SIZE + PIECE_OFFSET
2828#[ derive( Debug , Error ) ]
2929pub enum CommitmentError {
3030 #[ error( "Metadata DB error: {0}" ) ]
31- MetadataDb ( rocksdb :: Error ) ,
31+ MetadataDb ( parity_db :: Error ) ,
3232 #[ error( "Commitment DB error: {0}" ) ]
33- CommitmentDb ( rocksdb :: Error ) ,
33+ CommitmentDb ( parity_db :: Error ) ,
3434 #[ error( "Plot error: {0}" ) ]
3535 Plot ( io:: Error ) ,
36+ #[ error( "Migration error: {0}" ) ]
37+ Migrate ( io:: Error ) ,
3638}
3739
3840#[ derive( Debug , Copy , Clone ) ]
@@ -130,9 +132,23 @@ impl Commitments {
130132 self . inner . current . swap ( current) ;
131133 self . inner . next . swap ( next) ;
132134
133- let db_path = self . inner . base_directory . join ( hex:: encode ( salt) ) ;
135+ let options = parity_db:: Options {
136+ path : self . inner . base_directory . join ( hex:: encode ( salt) ) ,
137+ columns : vec ! [ parity_db:: ColumnOptions {
138+ preimage: false ,
139+ btree_index: true ,
140+ uniform: false ,
141+ ref_counted: false ,
142+ compression: parity_db:: CompressionType :: NoCompression ,
143+ compression_threshold: 4096 ,
144+ } ] ,
145+ sync_wal : true ,
146+ sync_data : true ,
147+ stats : false ,
148+ salt : None ,
149+ } ;
134150 db_entry. lock ( ) . replace ( Arc :: new (
135- DB :: open_default ( db_path ) . map_err ( CommitmentError :: CommitmentDb ) ?,
151+ Db :: open_or_create ( & options ) . map_err ( CommitmentError :: CommitmentDb ) ?,
136152 ) ) ;
137153 }
138154
@@ -173,11 +189,12 @@ impl Commitments {
173189 if tags_with_offset. len ( ) == tags_with_offset. capacity ( ) {
174190 tags_with_offset. sort_by ( |( tag_a, _) , ( tag_b, _) | tag_a. cmp ( tag_b) ) ;
175191
176- let mut batch = WriteBatch :: default ( ) ;
177- for ( tag, offset) in & tags_with_offset {
178- batch. put ( tag, offset) ;
179- }
180- db. write ( batch) . map_err ( CommitmentError :: CommitmentDb ) ?;
192+ db. commit (
193+ tags_with_offset
194+ . iter ( )
195+ . map ( |( tag, offset) | ( 0 , tag, Some ( offset. to_vec ( ) ) ) ) ,
196+ )
197+ . map_err ( CommitmentError :: CommitmentDb ) ?;
181198
182199 tags_with_offset. clear ( ) ;
183200 }
@@ -195,11 +212,12 @@ impl Commitments {
195212 if let Some ( db) = db_guard. as_ref ( ) {
196213 tags_with_offset. sort_by ( |( tag_a, _) , ( tag_b, _) | tag_a. cmp ( tag_b) ) ;
197214
198- let mut batch = WriteBatch :: default ( ) ;
199- for ( tag, offset) in & tags_with_offset {
200- batch. put ( tag, offset) ;
201- }
202- db. write ( batch) . map_err ( CommitmentError :: CommitmentDb ) ?;
215+ db. commit (
216+ tags_with_offset
217+ . iter ( )
218+ . map ( |( tag, offset) | ( 0 , tag, Some ( offset. to_vec ( ) ) ) ) ,
219+ )
220+ . map_err ( CommitmentError :: CommitmentDb ) ?;
203221 }
204222 }
205223
@@ -240,12 +258,12 @@ impl Commitments {
240258 let db_guard = db_entry. lock ( ) ;
241259
242260 if let Some ( db) = db_guard. as_ref ( ) {
243- let mut batch = WriteBatch :: default ( ) ;
244- for piece in pieces {
245- let tag = create_tag ( piece , salt ) ;
246- batch . delete ( tag ) ;
247- }
248- db . write ( batch ) . map_err ( CommitmentError :: CommitmentDb ) ?;
261+ db . commit (
262+ pieces
263+ . iter ( )
264+ . map ( |piece| ( 0 , create_tag ( piece , salt ) , None ) ) ,
265+ )
266+ . map_err ( CommitmentError :: CommitmentDb ) ?;
249267 }
250268 }
251269
@@ -276,11 +294,12 @@ impl Commitments {
276294
277295 tags_with_offset. sort_by ( |( tag_a, _) , ( tag_b, _) | tag_a. cmp ( tag_b) ) ;
278296
279- let mut batch = WriteBatch :: default ( ) ;
280- for ( tag, piece_offset) in tags_with_offset {
281- batch. put ( tag, piece_offset. to_le_bytes ( ) ) ;
282- }
283- db. write ( batch) . map_err ( CommitmentError :: CommitmentDb ) ?;
297+ db. commit (
298+ tags_with_offset
299+ . into_iter ( )
300+ . map ( |( tag, offset) | ( 0 , tag, Some ( offset. to_le_bytes ( ) . to_vec ( ) ) ) ) ,
301+ )
302+ . map_err ( CommitmentError :: CommitmentDb ) ?;
284303 } ;
285304 }
286305
@@ -315,12 +334,17 @@ impl Commitments {
315334 return Vec :: new ( ) ;
316335 }
317336 } ;
318- let iter = db. raw_iterator ( ) ;
337+ let iter = match db. iter ( 0 ) {
338+ Ok ( iter) => iter,
339+ Err ( _) => {
340+ return Vec :: new ( ) ;
341+ }
342+ } ;
319343
320344 // Take the best out of 10 solutions
321345 let mut solutions = SolutionIterator :: new ( iter, target, range)
322- . take ( limit)
323- . collect :: < Vec < _ > > ( ) ;
346+ . map ( |iter| iter . take ( limit) . collect :: < Vec < _ > > ( ) )
347+ . unwrap_or_default ( ) ;
324348 let target = u64:: from_be_bytes ( target) ;
325349 solutions. sort_by_key ( |( tag, _) | {
326350 let tag = u64:: from_be_bytes ( * tag) ;
@@ -374,7 +398,7 @@ enum SolutionIteratorState {
374398}
375399
376400pub ( crate ) struct SolutionIterator < ' a > {
377- iter : rocksdb :: DBRawIterator < ' a > ,
401+ iter : parity_db :: BTreeIterator < ' a > ,
378402 state : SolutionIteratorState ,
379403 /// Lower bound of solution range
380404 lower : u64 ,
@@ -383,7 +407,11 @@ pub(crate) struct SolutionIterator<'a> {
383407}
384408
385409impl < ' a > SolutionIterator < ' a > {
386- pub fn new ( mut iter : rocksdb:: DBRawIterator < ' a > , target : Tag , range : u64 ) -> Self {
410+ pub fn new (
411+ mut iter : parity_db:: BTreeIterator < ' a > ,
412+ target : Tag ,
413+ range : u64 ,
414+ ) -> parity_db:: Result < Self > {
387415 let ( lower, is_lower_overflowed) = u64:: from_be_bytes ( target) . overflowing_sub ( range / 2 ) ;
388416 let ( upper, is_upper_overflowed) = u64:: from_be_bytes ( target) . overflowing_add ( range / 2 ) ;
389417
@@ -394,29 +422,27 @@ impl<'a> SolutionIterator<'a> {
394422 ) ;
395423
396424 let state = if is_lower_overflowed || is_upper_overflowed {
397- iter. seek_to_first ( ) ;
425+ iter. seek_to_first ( ) ? ;
398426 SolutionIteratorState :: OverflowStart
399427 } else {
400- iter. seek ( lower. to_be_bytes ( ) ) ;
428+ iter. seek ( & lower. to_be_bytes ( ) ) ? ;
401429 SolutionIteratorState :: NoOverflow
402430 } ;
403- Self {
431+ Ok ( Self {
404432 iter,
405433 state,
406434 lower,
407435 upper,
408- }
436+ } )
409437 }
410438
411439 fn next_entry ( & mut self ) -> Option < ( Tag , PieceOffset ) > {
412- self . iter
413- . key ( )
414- . map ( |tag| tag. try_into ( ) . unwrap ( ) )
415- . map ( |tag| {
416- let offset = u64:: from_le_bytes ( self . iter . value ( ) . unwrap ( ) . try_into ( ) . unwrap ( ) ) ;
417- self . iter . next ( ) ;
418- ( tag, offset)
419- } )
440+ self . iter . next ( ) . ok ( ) . flatten ( ) . map ( |( tag, offset) | {
441+ (
442+ tag. try_into ( ) . unwrap ( ) ,
443+ u64:: from_le_bytes ( offset. try_into ( ) . unwrap ( ) ) ,
444+ )
445+ } )
420446 }
421447}
422448
@@ -433,7 +459,7 @@ impl<'a> Iterator for SolutionIterator<'a> {
433459 . filter ( |( tag, _) | u64:: from_be_bytes ( * tag) <= self . upper )
434460 . or_else ( || {
435461 self . state = SolutionIteratorState :: OverflowEnd ;
436- self . iter . seek ( self . lower . to_be_bytes ( ) ) ;
462+ self . iter . seek ( & self . lower . to_be_bytes ( ) ) . ok ( ) ? ;
437463 self . next ( )
438464 } ) ,
439465 SolutionIteratorState :: OverflowEnd => self . next_entry ( ) ,
0 commit comments