@@ -18,7 +18,9 @@ pub struct PageCacheKey(usize);
1818
1919const CLEAR : u8 = 0 ;
2020const REF_MAX : u8 = 3 ;
21- const HOT_PAGE_THRESHOLD : u64 = 5 ;
21+
22+ /// semi-arbitrary heuristic to determine pages which are frequently touched.
23+ const HOT_PAGE_THRESHOLD : u64 = 6 ;
2224
2325/// An entry in the page cache.
2426///
@@ -186,6 +188,9 @@ impl PageCache {
186188 }
187189
188190 #[ inline]
191+ /// Returns true if the page qualifies to be spilled to disk.
192+ ///
193+ /// Does not enforce any hot-page policy; callers can layer that separately.
189194 fn can_spill_page ( p : & PageRef ) -> bool {
190195 p. is_dirty ( )
191196 && !p. is_pinned ( )
@@ -198,30 +203,51 @@ impl PageCache {
198203 . as_ref ( )
199204 . is_some_and ( |c| c. overflow_cells . is_empty ( ) )
200205 && p. get ( ) . id . ne ( & DatabaseHeader :: PAGE_ID ) // never spill page 1
201- // dirty_gen is increased every time `mark_dirty` is called on a PageRef, dont spill hot pages
202- && p. dirty_gen ( ) <= HOT_PAGE_THRESHOLD
203206 }
204207
205- /// Collect candidates for spilling to disk when cache under pressure
208+ /// Collect candidates for spilling to disk when cache under pressure.
206209 pub fn compute_spill_candidates ( & mut self ) -> Vec < PinGuard > {
207210 const SPILL_BATCH : usize = 128 ;
208211 if self . len ( ) == 0 || self . clock_hand . is_null ( ) {
209212 return Vec :: new ( ) ;
210213 }
211- let mut out = Vec :: with_capacity ( SPILL_BATCH ) ;
214+ // Collect all spillable pages and sort by a composite key:
215+ // cold first (dirty_gen <= HOT_PAGE_THRESHOLD), then lower ref_bit,
216+ // then lower dirty_gen, then lower page id. This keeps the coldest,
217+ // safest pages at the front while still allowing hot pages to be chosen
218+ // when we are fully saturated.
219+ let mut candidates: Vec < ( bool , u8 , u64 , usize , PinGuard ) > = Vec :: new ( ) ;
212220 let start = self . clock_hand ;
213221 let mut ptr = start;
214222 loop {
215223 let entry = unsafe { & mut * ptr } ;
216224 let page = & entry. page ;
217- // On the first pass, only spill if ref_bit is CLEAR, we that we can ensure spilling the coldest pages.
218- // For the second pass, we can be more lenient and accept pages with ref_bit < REF_MAX.
219- if Self :: can_spill_page ( page) {
220- out. push ( PinGuard :: new ( page. clone ( ) ) ) ;
221- if out. len ( ) >= SPILL_BATCH {
225+ if !Self :: can_spill_page ( page) {
226+ let mut cur = unsafe { self . queue . cursor_mut_from_ptr ( ptr) } ;
227+ cur. move_next ( ) ;
228+ if let Some ( next) = cur. get ( ) {
229+ ptr = next as * const _ as * mut PageCacheEntry ;
230+ } else if let Some ( front) = self . queue . front_mut ( ) . get ( ) {
231+ ptr = front as * const _ as * mut PageCacheEntry ;
232+ } else {
233+ break ;
234+ }
235+ if ptr == start {
222236 break ;
223237 }
238+ continue ;
224239 }
240+
241+ let gen = page. dirty_gen ( ) ;
242+ let is_hot = gen > HOT_PAGE_THRESHOLD ;
243+ let meta = (
244+ is_hot,
245+ entry. ref_bit ,
246+ gen,
247+ page. get ( ) . id ,
248+ PinGuard :: new ( page. clone ( ) ) ,
249+ ) ;
250+ candidates. push ( meta) ;
225251 let mut cur = unsafe { self . queue . cursor_mut_from_ptr ( ptr) } ;
226252 cur. move_next ( ) ;
227253 if let Some ( next) = cur. get ( ) {
@@ -235,7 +261,15 @@ impl PageCache {
235261 break ;
236262 }
237263 }
238- out
264+ candidates. sort_by_key ( |( is_hot, ref_bit, dirty_gen, id, _) | {
265+ ( * is_hot, * ref_bit, * dirty_gen, * id)
266+ } ) ;
267+
268+ candidates
269+ . into_iter ( )
270+ . take ( SPILL_BATCH )
271+ . map ( |( _, _, _, _, pg) | pg)
272+ . collect ( )
239273 }
240274
241275 #[ inline]
@@ -796,7 +830,7 @@ mod tests {
796830
797831 // Inserting same page instance should return KeyExists error
798832 let result = cache. insert ( key1, page1_v2. clone ( ) ) ;
799- assert_eq ! ( result, Err ( CacheError :: KeyExists ) ) ;
833+ assert ! ( matches! ( result, Err ( CacheError :: KeyExists ) ) ) ;
800834 assert_eq ! ( cache. len( ) , 1 ) ;
801835
802836 // Verify the page is still accessible
@@ -957,7 +991,7 @@ mod tests {
957991 let page3 = page_with_content ( 3 ) ;
958992 let result = cache. insert ( key3, page3) ;
959993
960- assert_eq ! ( result, Err ( CacheError :: Full ) ) ;
994+ assert ! ( matches! ( result, Err ( CacheError :: Full ) ) ) ;
961995 assert_eq ! ( cache. len( ) , 2 ) ;
962996 cache. verify_cache_integrity ( ) ;
963997 }
0 commit comments