Skip to content

Commit fda5b38

Browse files
fyin1xen0n
authored andcommitted
filemap: Add filemap_map_folio_range()
filemap_map_folio_range() maps partial/full folio. Comparing to original filemap_map_pages(), it updates refcount once per folio instead of per page and gets minor performance improvement for large folio. With a will-it-scale.page_fault3 like app (change file write fault testing to read fault testing. Trying to upstream it to will-it-scale at [1]), got 2% performance gain on a 48C/96T Cascade Lake test box with 96 processes running against xfs. [1]: antonblanchard/will-it-scale#37 Signed-off-by: Yin Fengwei <[email protected]> Signed-off-by: Matthew Wilcox (Oracle) <[email protected]>
1 parent 70cafe0 commit fda5b38

File tree

1 file changed

+54
-44
lines changed

1 file changed

+54
-44
lines changed

mm/filemap.c

Lines changed: 54 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -2202,16 +2202,6 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
22022202
}
22032203
EXPORT_SYMBOL(filemap_get_folios);
22042204

2205-
static inline
2206-
bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
2207-
{
2208-
if (!folio_test_large(folio) || folio_test_hugetlb(folio))
2209-
return false;
2210-
if (index >= max)
2211-
return false;
2212-
return index < folio->index + folio_nr_pages(folio) - 1;
2213-
}
2214-
22152205
/**
22162206
* filemap_get_folios_contig - Get a batch of contiguous folios
22172207
* @mapping: The address_space to search
@@ -3483,6 +3473,53 @@ static inline struct folio *next_map_page(struct address_space *mapping,
34833473
mapping, xas, end_pgoff);
34843474
}
34853475

3476+
/*
3477+
* Map page range [start_page, start_page + nr_pages) of folio.
3478+
* start_page is gotten from start by folio_page(folio, start)
3479+
*/
3480+
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
3481+
struct folio *folio, unsigned long start,
3482+
unsigned long addr, unsigned int nr_pages)
3483+
{
3484+
vm_fault_t ret = 0;
3485+
struct vm_area_struct *vma = vmf->vma;
3486+
struct file *file = vma->vm_file;
3487+
struct page *page = folio_page(folio, start);
3488+
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
3489+
unsigned int ref_count = 0, count = 0;
3490+
3491+
do {
3492+
if (PageHWPoison(page))
3493+
continue;
3494+
3495+
if (mmap_miss > 0)
3496+
mmap_miss--;
3497+
3498+
/*
3499+
* NOTE: If there're PTE markers, we'll leave them to be
3500+
* handled in the specific fault path, and it'll prohibit the
3501+
* fault-around logic.
3502+
*/
3503+
if (!pte_none(*vmf->pte))
3504+
continue;
3505+
3506+
if (vmf->address == addr)
3507+
ret = VM_FAULT_NOPAGE;
3508+
3509+
ref_count++;
3510+
do_set_pte(vmf, page, addr);
3511+
update_mmu_cache(vma, addr, vmf->pte);
3512+
} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);
3513+
3514+
/* Restore the vmf->pte */
3515+
vmf->pte -= nr_pages;
3516+
3517+
folio_ref_add(folio, ref_count);
3518+
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
3519+
3520+
return ret;
3521+
}
3522+
34863523
vm_fault_t filemap_map_pages(struct vm_fault *vmf,
34873524
pgoff_t start_pgoff, pgoff_t end_pgoff)
34883525
{
@@ -3493,9 +3530,9 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
34933530
unsigned long addr;
34943531
XA_STATE(xas, &mapping->i_pages, start_pgoff);
34953532
struct folio *folio;
3496-
struct page *page;
34973533
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
34983534
vm_fault_t ret = 0;
3535+
int nr_pages = 0;
34993536

35003537
rcu_read_lock();
35013538
folio = first_map_page(mapping, &xas, end_pgoff);
@@ -3510,45 +3547,18 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
35103547
addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
35113548
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
35123549
do {
3513-
again:
3514-
page = folio_file_page(folio, xas.xa_index);
3515-
if (PageHWPoison(page))
3516-
goto unlock;
3517-
3518-
if (mmap_miss > 0)
3519-
mmap_miss--;
3550+
unsigned long end;
35203551

35213552
addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
35223553
vmf->pte += xas.xa_index - last_pgoff;
35233554
last_pgoff = xas.xa_index;
3555+
end = folio->index + folio_nr_pages(folio) - 1;
3556+
nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
35243557

3525-
/*
3526-
* NOTE: If there're PTE markers, we'll leave them to be
3527-
* handled in the specific fault path, and it'll prohibit the
3528-
* fault-around logic.
3529-
*/
3530-
if (!pte_none(*vmf->pte))
3531-
goto unlock;
3558+
ret |= filemap_map_folio_range(vmf, folio,
3559+
xas.xa_index - folio->index, addr, nr_pages);
3560+
xas.xa_index += nr_pages;
35323561

3533-
/* We're about to handle the fault */
3534-
if (vmf->address == addr)
3535-
ret = VM_FAULT_NOPAGE;
3536-
3537-
do_set_pte(vmf, page, addr);
3538-
/* no need to invalidate: a not-present page won't be cached */
3539-
update_mmu_cache(vma, addr, vmf->pte);
3540-
if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3541-
xas.xa_index++;
3542-
folio_ref_inc(folio);
3543-
goto again;
3544-
}
3545-
folio_unlock(folio);
3546-
continue;
3547-
unlock:
3548-
if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3549-
xas.xa_index++;
3550-
goto again;
3551-
}
35523562
folio_unlock(folio);
35533563
folio_put(folio);
35543564
} while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);

0 commit comments

Comments
 (0)