Skip to content

Commit 164ee5c

Browse files
fyin1akpm00
authored andcommitted
filemap: add filemap_map_folio_range()
filemap_map_folio_range() maps partial/full folio. Comparing to original filemap_map_pages(), it updates refcount once per folio instead of per page and gets minor performance improvement for large folio. With a will-it-scale.page_fault3 like app (change file write fault testing to read fault testing. Trying to upstream it to will-it-scale at [1]), got 2% performance gain on a 48C/96T Cascade Lake test box with 96 processes running against xfs. [1]: antonblanchard/will-it-scale#37 Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Yin Fengwei <[email protected]> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 8c9b4a1 commit 164ee5c

File tree

1 file changed

+55
-54
lines changed

1 file changed

+55
-54
lines changed

mm/filemap.c

Lines changed: 55 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -2168,16 +2168,6 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
21682168
}
21692169
EXPORT_SYMBOL(filemap_get_folios);
21702170

2171-
static inline
2172-
bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
2173-
{
2174-
if (!folio_test_large(folio) || folio_test_hugetlb(folio))
2175-
return false;
2176-
if (index >= max)
2177-
return false;
2178-
return index < folio_next_index(folio) - 1;
2179-
}
2180-
21812171
/**
21822172
* filemap_get_folios_contig - Get a batch of contiguous folios
21832173
* @mapping: The address_space to search
@@ -3436,10 +3426,10 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
34363426
return false;
34373427
}
34383428

3439-
static struct folio *next_uptodate_page(struct folio *folio,
3440-
struct address_space *mapping,
3441-
struct xa_state *xas, pgoff_t end_pgoff)
3429+
static struct folio *next_uptodate_folio(struct xa_state *xas,
3430+
struct address_space *mapping, pgoff_t end_pgoff)
34423431
{
3432+
struct folio *folio = xas_next_entry(xas, end_pgoff);
34433433
unsigned long max_idx;
34443434

34453435
do {
@@ -3477,20 +3467,51 @@ static struct folio *next_uptodate_page(struct folio *folio,
34773467
return NULL;
34783468
}
34793469

3480-
static inline struct folio *first_map_page(struct address_space *mapping,
3481-
struct xa_state *xas,
3482-
pgoff_t end_pgoff)
3470+
/*
3471+
* Map page range [start_page, start_page + nr_pages) of folio.
3472+
* start_page is gotten from start by folio_page(folio, start)
3473+
*/
3474+
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
3475+
struct folio *folio, unsigned long start,
3476+
unsigned long addr, unsigned int nr_pages)
34833477
{
3484-
return next_uptodate_page(xas_find(xas, end_pgoff),
3485-
mapping, xas, end_pgoff);
3486-
}
3478+
vm_fault_t ret = 0;
3479+
struct vm_area_struct *vma = vmf->vma;
3480+
struct file *file = vma->vm_file;
3481+
struct page *page = folio_page(folio, start);
3482+
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
3483+
unsigned int ref_count = 0, count = 0;
34873484

3488-
static inline struct folio *next_map_page(struct address_space *mapping,
3489-
struct xa_state *xas,
3490-
pgoff_t end_pgoff)
3491-
{
3492-
return next_uptodate_page(xas_next_entry(xas, end_pgoff),
3493-
mapping, xas, end_pgoff);
3485+
do {
3486+
if (PageHWPoison(page))
3487+
continue;
3488+
3489+
if (mmap_miss > 0)
3490+
mmap_miss--;
3491+
3492+
/*
3493+
* NOTE: If there're PTE markers, we'll leave them to be
3494+
* handled in the specific fault path, and it'll prohibit the
3495+
* fault-around logic.
3496+
*/
3497+
if (!pte_none(*vmf->pte))
3498+
continue;
3499+
3500+
if (vmf->address == addr)
3501+
ret = VM_FAULT_NOPAGE;
3502+
3503+
ref_count++;
3504+
do_set_pte(vmf, page, addr);
3505+
update_mmu_cache(vma, addr, vmf->pte);
3506+
} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);
3507+
3508+
/* Restore the vmf->pte */
3509+
vmf->pte -= nr_pages;
3510+
3511+
folio_ref_add(folio, ref_count);
3512+
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
3513+
3514+
return ret;
34943515
}
34953516

34963517
vm_fault_t filemap_map_pages(struct vm_fault *vmf,
@@ -3503,12 +3524,11 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
35033524
unsigned long addr;
35043525
XA_STATE(xas, &mapping->i_pages, start_pgoff);
35053526
struct folio *folio;
3506-
struct page *page;
3507-
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
35083527
vm_fault_t ret = 0;
3528+
int nr_pages = 0;
35093529

35103530
rcu_read_lock();
3511-
folio = first_map_page(mapping, &xas, end_pgoff);
3531+
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
35123532
if (!folio)
35133533
goto out;
35143534

@@ -3525,17 +3545,13 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
35253545
goto out;
35263546
}
35273547
do {
3528-
again:
3529-
page = folio_file_page(folio, xas.xa_index);
3530-
if (PageHWPoison(page))
3531-
goto unlock;
3532-
3533-
if (mmap_miss > 0)
3534-
mmap_miss--;
3548+
unsigned long end;
35353549

35363550
addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
35373551
vmf->pte += xas.xa_index - last_pgoff;
35383552
last_pgoff = xas.xa_index;
3553+
end = folio->index + folio_nr_pages(folio) - 1;
3554+
nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
35393555

35403556
/*
35413557
* NOTE: If there're PTE markers, we'll leave them to be
@@ -3545,32 +3561,17 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
35453561
if (!pte_none(ptep_get(vmf->pte)))
35463562
goto unlock;
35473563

3548-
/* We're about to handle the fault */
3549-
if (vmf->address == addr)
3550-
ret = VM_FAULT_NOPAGE;
3564+
ret |= filemap_map_folio_range(vmf, folio,
3565+
xas.xa_index - folio->index, addr, nr_pages);
35513566

3552-
do_set_pte(vmf, page, addr);
3553-
/* no need to invalidate: a not-present page won't be cached */
3554-
update_mmu_cache(vma, addr, vmf->pte);
3555-
if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3556-
xas.xa_index++;
3557-
folio_ref_inc(folio);
3558-
goto again;
3559-
}
3560-
folio_unlock(folio);
3561-
continue;
35623567
unlock:
3563-
if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3564-
xas.xa_index++;
3565-
goto again;
3566-
}
35673568
folio_unlock(folio);
35683569
folio_put(folio);
3569-
} while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
3570+
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
3571+
} while (folio);
35703572
pte_unmap_unlock(vmf->pte, vmf->ptl);
35713573
out:
35723574
rcu_read_unlock();
3573-
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
35743575
return ret;
35753576
}
35763577
EXPORT_SYMBOL(filemap_map_pages);

0 commit comments

Comments
 (0)