Skip to content

Commit 93b1e55

Browse files
committed
Fix tests and improve algo to collect spillable pages
1 parent affbc77 commit 93b1e55

File tree

2 files changed

+55
-22
lines changed

2 files changed

+55
-22
lines changed

core/storage/btree.rs

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8006,8 +8006,7 @@ mod tests {
80068006
let cursor = BTreeCursor::new_table(pager.clone(), page_idx, num_columns);
80078007
let page = match cursor.read_page(page_idx).unwrap() {
80088008
IOResult::IO(IOCompletions::Single(c)) => {
8009-
pager.io.wait_for_completion(c);
8010-
// TODO
8009+
pager.io.wait_for_completion(c).unwrap();
80118010
return validate_btree(pager.clone(), page_idx);
80128011
}
80138012
IOResult::Done((page, _c)) => {
@@ -8034,14 +8033,14 @@ mod tests {
80348033
left_child_page, ..
80358034
}) => {
80368035
match cursor.read_page(left_child_page as i64).unwrap() {
8037-
IOResult::Done((child_page, c)) => {
8036+
IOResult::Done((child_page, _c)) => {
80388037
while child_page.is_locked() {
80398038
pager.io.step().unwrap();
80408039
}
80418040
child_pages.push(child_page);
80428041
}
80438042
IOResult::IO(IOCompletions::Single(c)) => {
8044-
pager.io.wait_for_completion(c);
8043+
pager.io.wait_for_completion(c).unwrap();
80458044
}
80468045
}
80478046
if left_child_page == page.get().id as u32 {
@@ -9692,7 +9691,7 @@ mod tests {
96929691
run_until_done(
96939692
|| {
96949693
fill_cell_payload(
9695-
&PinGuard::new(page),
9694+
&PinGuard::new(page.clone()),
96969695
Some(i as i64),
96979696
&mut payload,
96989697
cell_idx,
@@ -9774,7 +9773,7 @@ mod tests {
97749773
run_until_done(
97759774
|| {
97769775
fill_cell_payload(
9777-
&PinGuard::new(page),
9776+
&PinGuard::new(page.clone()),
97789777
Some(i),
97799778
&mut payload,
97809779
cell_idx,
@@ -10147,7 +10146,7 @@ mod tests {
1014710146
run_until_done(
1014810147
|| {
1014910148
fill_cell_payload(
10150-
&PinGuard::new(page),
10149+
&PinGuard::new(page.clone()),
1015110150
Some(0),
1015210151
&mut payload,
1015310152
0,
@@ -10233,7 +10232,7 @@ mod tests {
1023310232
run_until_done(
1023410233
|| {
1023510234
fill_cell_payload(
10236-
&PinGuard::new(page),
10235+
&PinGuard::new(page.clone()),
1023710236
Some(0),
1023810237
&mut payload,
1023910238
0,
@@ -10639,7 +10638,7 @@ mod tests {
1063910638
run_until_done(
1064010639
|| {
1064110640
fill_cell_payload(
10642-
&PinGuard::new(page),
10641+
&PinGuard::new(page.clone()),
1064310642
Some(cell_idx as i64),
1064410643
&mut payload,
1064510644
cell_idx as usize,

core/storage/page_cache.rs

Lines changed: 47 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,9 @@ pub struct PageCacheKey(usize);
1818

1919
const CLEAR: u8 = 0;
2020
const REF_MAX: u8 = 3;
21-
const HOT_PAGE_THRESHOLD: u64 = 5;
21+
22+
/// semi-arbitrary heuristic to determine pages which are frequently touched.
23+
const HOT_PAGE_THRESHOLD: u64 = 6;
2224

2325
/// An entry in the page cache.
2426
///
@@ -186,6 +188,9 @@ impl PageCache {
186188
}
187189

188190
#[inline]
191+
/// Returns true if the page qualifies to be spilled to disk.
192+
///
193+
/// Does not enforce any hot-page policy; callers can layer that separately.
189194
fn can_spill_page(p: &PageRef) -> bool {
190195
p.is_dirty()
191196
&& !p.is_pinned()
@@ -198,30 +203,51 @@ impl PageCache {
198203
.as_ref()
199204
.is_some_and(|c| c.overflow_cells.is_empty())
200205
&& p.get().id.ne(&DatabaseHeader::PAGE_ID) // never spill page 1
201-
// dirty_gen is increased every time `mark_dirty` is called on a PageRef, dont spill hot pages
202-
&& p.dirty_gen() <= HOT_PAGE_THRESHOLD
203206
}
204207

205-
/// Collect candidates for spilling to disk when cache under pressure
208+
/// Collect candidates for spilling to disk when cache under pressure.
206209
pub fn compute_spill_candidates(&mut self) -> Vec<PinGuard> {
207210
const SPILL_BATCH: usize = 128;
208211
if self.len() == 0 || self.clock_hand.is_null() {
209212
return Vec::new();
210213
}
211-
let mut out = Vec::with_capacity(SPILL_BATCH);
214+
// Collect all spillable pages and sort by a composite key:
215+
// cold first (dirty_gen <= HOT_PAGE_THRESHOLD), then lower ref_bit,
216+
// then lower dirty_gen, then lower page id. This keeps the coldest,
217+
// safest pages at the front while still allowing hot pages to be chosen
218+
// when we are fully saturated.
219+
let mut candidates: Vec<(bool, u8, u64, usize, PinGuard)> = Vec::new();
212220
let start = self.clock_hand;
213221
let mut ptr = start;
214222
loop {
215223
let entry = unsafe { &mut *ptr };
216224
let page = &entry.page;
217-
// On the first pass, only spill if ref_bit is CLEAR, we that we can ensure spilling the coldest pages.
218-
// For the second pass, we can be more lenient and accept pages with ref_bit < REF_MAX.
219-
if Self::can_spill_page(page) {
220-
out.push(PinGuard::new(page.clone()));
221-
if out.len() >= SPILL_BATCH {
225+
if !Self::can_spill_page(page) {
226+
let mut cur = unsafe { self.queue.cursor_mut_from_ptr(ptr) };
227+
cur.move_next();
228+
if let Some(next) = cur.get() {
229+
ptr = next as *const _ as *mut PageCacheEntry;
230+
} else if let Some(front) = self.queue.front_mut().get() {
231+
ptr = front as *const _ as *mut PageCacheEntry;
232+
} else {
233+
break;
234+
}
235+
if ptr == start {
222236
break;
223237
}
238+
continue;
224239
}
240+
241+
let gen = page.dirty_gen();
242+
let is_hot = gen > HOT_PAGE_THRESHOLD;
243+
let meta = (
244+
is_hot,
245+
entry.ref_bit,
246+
gen,
247+
page.get().id,
248+
PinGuard::new(page.clone()),
249+
);
250+
candidates.push(meta);
225251
let mut cur = unsafe { self.queue.cursor_mut_from_ptr(ptr) };
226252
cur.move_next();
227253
if let Some(next) = cur.get() {
@@ -235,7 +261,15 @@ impl PageCache {
235261
break;
236262
}
237263
}
238-
out
264+
candidates.sort_by_key(|(is_hot, ref_bit, dirty_gen, id, _)| {
265+
(*is_hot, *ref_bit, *dirty_gen, *id)
266+
});
267+
268+
candidates
269+
.into_iter()
270+
.take(SPILL_BATCH)
271+
.map(|(_, _, _, _, pg)| pg)
272+
.collect()
239273
}
240274

241275
#[inline]
@@ -796,7 +830,7 @@ mod tests {
796830

797831
// Inserting same page instance should return KeyExists error
798832
let result = cache.insert(key1, page1_v2.clone());
799-
assert_eq!(result, Err(CacheError::KeyExists));
833+
assert!(matches!(result, Err(CacheError::KeyExists)));
800834
assert_eq!(cache.len(), 1);
801835

802836
// Verify the page is still accessible
@@ -957,7 +991,7 @@ mod tests {
957991
let page3 = page_with_content(3);
958992
let result = cache.insert(key3, page3);
959993

960-
assert_eq!(result, Err(CacheError::Full));
994+
assert!(matches!(result, Err(CacheError::Full)));
961995
assert_eq!(cache.len(), 2);
962996
cache.verify_cache_integrity();
963997
}

0 commit comments

Comments
 (0)