Skip to content

Commit 9f21fb0

Browse files
committed
Fix tests and improve algo to collect spillable pages
1 parent b08e469 commit 9f21fb0

File tree

2 files changed

+53
-19
lines changed

2 files changed

+53
-19
lines changed

core/storage/btree.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8034,7 +8034,7 @@ mod tests {
80348034
left_child_page, ..
80358035
}) => {
80368036
match cursor.read_page(left_child_page as i64).unwrap() {
8037-
IOResult::Done((child_page, c)) => {
8037+
IOResult::Done((child_page, _c)) => {
80388038
while child_page.is_locked() {
80398039
pager.io.step().unwrap();
80408040
}
@@ -9692,7 +9692,7 @@ mod tests {
96929692
run_until_done(
96939693
|| {
96949694
fill_cell_payload(
9695-
&PinGuard::new(page),
9695+
&PinGuard::new(page.clone()),
96969696
Some(i as i64),
96979697
&mut payload,
96989698
cell_idx,
@@ -9774,7 +9774,7 @@ mod tests {
97749774
run_until_done(
97759775
|| {
97769776
fill_cell_payload(
9777-
&PinGuard::new(page),
9777+
&PinGuard::new(page.clone()),
97789778
Some(i),
97799779
&mut payload,
97809780
cell_idx,
@@ -10147,7 +10147,7 @@ mod tests {
1014710147
run_until_done(
1014810148
|| {
1014910149
fill_cell_payload(
10150-
&PinGuard::new(page),
10150+
&PinGuard::new(page.clone()),
1015110151
Some(0),
1015210152
&mut payload,
1015310153
0,
@@ -10233,7 +10233,7 @@ mod tests {
1023310233
run_until_done(
1023410234
|| {
1023510235
fill_cell_payload(
10236-
&PinGuard::new(page),
10236+
&PinGuard::new(page.clone()),
1023710237
Some(0),
1023810238
&mut payload,
1023910239
0,
@@ -10639,7 +10639,7 @@ mod tests {
1063910639
run_until_done(
1064010640
|| {
1064110641
fill_cell_payload(
10642-
&PinGuard::new(page),
10642+
&PinGuard::new(page.clone()),
1064310643
Some(cell_idx as i64),
1064410644
&mut payload,
1064510645
cell_idx as usize,

core/storage/page_cache.rs

Lines changed: 47 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,9 @@ pub struct PageCacheKey(usize);
1818

1919
const CLEAR: u8 = 0;
2020
const REF_MAX: u8 = 3;
21-
const HOT_PAGE_THRESHOLD: u64 = 5;
21+
22+
/// semi-arbitrary heuristic to determine pages which are frequently touched.
23+
const HOT_PAGE_THRESHOLD: u64 = 6;
2224

2325
/// An entry in the page cache.
2426
///
@@ -186,6 +188,9 @@ impl PageCache {
186188
}
187189

188190
#[inline]
191+
/// Returns true if the page qualifies to be spilled to disk.
192+
///
193+
/// Does not enforce any hot-page policy; callers can layer that separately.
189194
fn can_spill_page(p: &PageRef) -> bool {
190195
p.is_dirty()
191196
&& !p.is_pinned()
@@ -198,30 +203,51 @@ impl PageCache {
198203
.as_ref()
199204
.is_some_and(|c| c.overflow_cells.is_empty())
200205
&& p.get().id.ne(&DatabaseHeader::PAGE_ID) // never spill page 1
201-
// dirty_gen is increased every time `mark_dirty` is called on a PageRef, dont spill hot pages
202-
&& p.dirty_gen() <= HOT_PAGE_THRESHOLD
203206
}
204207

205-
/// Collect candidates for spilling to disk when cache under pressure
208+
/// Collect candidates for spilling to disk when cache under pressure.
206209
pub fn compute_spill_candidates(&mut self) -> Vec<PinGuard> {
207210
const SPILL_BATCH: usize = 128;
208211
if self.len() == 0 || self.clock_hand.is_null() {
209212
return Vec::new();
210213
}
211-
let mut out = Vec::with_capacity(SPILL_BATCH);
214+
// Collect all spillable pages and sort by a composite key:
215+
// cold first (dirty_gen <= HOT_PAGE_THRESHOLD), then lower ref_bit,
216+
// then lower dirty_gen, then lower page id. This keeps the coldest,
217+
// safest pages at the front while still allowing hot pages to be chosen
218+
// when we are fully saturated.
219+
let mut candidates: Vec<(bool, u8, u64, usize, PinGuard)> = Vec::new();
212220
let start = self.clock_hand;
213221
let mut ptr = start;
214222
loop {
215223
let entry = unsafe { &mut *ptr };
216224
let page = &entry.page;
217-
// On the first pass, only spill if ref_bit is CLEAR, we that we can ensure spilling the coldest pages.
218-
// For the second pass, we can be more lenient and accept pages with ref_bit < REF_MAX.
219-
if Self::can_spill_page(page) {
220-
out.push(PinGuard::new(page.clone()));
221-
if out.len() >= SPILL_BATCH {
225+
if !Self::can_spill_page(page) {
226+
let mut cur = unsafe { self.queue.cursor_mut_from_ptr(ptr) };
227+
cur.move_next();
228+
if let Some(next) = cur.get() {
229+
ptr = next as *const _ as *mut PageCacheEntry;
230+
} else if let Some(front) = self.queue.front_mut().get() {
231+
ptr = front as *const _ as *mut PageCacheEntry;
232+
} else {
233+
break;
234+
}
235+
if ptr == start {
222236
break;
223237
}
238+
continue;
224239
}
240+
241+
let gen = page.dirty_gen();
242+
let is_hot = gen > HOT_PAGE_THRESHOLD;
243+
let meta = (
244+
is_hot,
245+
entry.ref_bit,
246+
gen,
247+
page.get().id,
248+
PinGuard::new(page.clone()),
249+
);
250+
candidates.push(meta);
225251
let mut cur = unsafe { self.queue.cursor_mut_from_ptr(ptr) };
226252
cur.move_next();
227253
if let Some(next) = cur.get() {
@@ -235,7 +261,15 @@ impl PageCache {
235261
break;
236262
}
237263
}
238-
out
264+
candidates.sort_by_key(|(is_hot, ref_bit, dirty_gen, id, _)| {
265+
(*is_hot, *ref_bit, *dirty_gen, *id)
266+
});
267+
268+
candidates
269+
.into_iter()
270+
.take(SPILL_BATCH)
271+
.map(|(_, _, _, _, pg)| pg)
272+
.collect()
239273
}
240274

241275
#[inline]
@@ -796,7 +830,7 @@ mod tests {
796830

797831
// Inserting same page instance should return KeyExists error
798832
let result = cache.insert(key1, page1_v2.clone());
799-
assert_eq!(result, Err(CacheError::KeyExists));
833+
assert!(matches!(result, Err(CacheError::KeyExists)));
800834
assert_eq!(cache.len(), 1);
801835

802836
// Verify the page is still accessible
@@ -957,7 +991,7 @@ mod tests {
957991
let page3 = page_with_content(3);
958992
let result = cache.insert(key3, page3);
959993

960-
assert_eq!(result, Err(CacheError::Full));
994+
assert!(matches!(result, Err(CacheError::Full)));
961995
assert_eq!(cache.len(), 2);
962996
cache.verify_cache_integrity();
963997
}

0 commit comments

Comments
 (0)