Merge 5f029be65d ("mm: khugepaged: fix the arguments order in khugepaged_collapse_file trace point") into android15-6.6-lts

Steps on the way to 6.6.59

Change-Id: I2565078655dd6ea7e7f820632289e0be42ee290d
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-12-05 08:55:19 +00:00
commit 4c172ac049
2 changed files with 56 additions and 65 deletions

View File

@ -207,10 +207,10 @@ TRACE_EVENT(mm_khugepaged_scan_file,
); );
TRACE_EVENT(mm_khugepaged_collapse_file, TRACE_EVENT(mm_khugepaged_collapse_file,
TP_PROTO(struct mm_struct *mm, struct page *hpage, pgoff_t index, TP_PROTO(struct mm_struct *mm, struct folio *new_folio, pgoff_t index,
bool is_shmem, unsigned long addr, struct file *file, unsigned long addr, bool is_shmem, struct file *file,
int nr, int result), int nr, int result),
TP_ARGS(mm, hpage, index, addr, is_shmem, file, nr, result), TP_ARGS(mm, new_folio, index, addr, is_shmem, file, nr, result),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct mm_struct *, mm) __field(struct mm_struct *, mm)
__field(unsigned long, hpfn) __field(unsigned long, hpfn)
@ -224,7 +224,7 @@ TRACE_EVENT(mm_khugepaged_collapse_file,
TP_fast_assign( TP_fast_assign(
__entry->mm = mm; __entry->mm = mm;
__entry->hpfn = hpage ? page_to_pfn(hpage) : -1; __entry->hpfn = new_folio ? folio_pfn(new_folio) : -1;
__entry->index = index; __entry->index = index;
__entry->addr = addr; __entry->addr = addr;
__entry->is_shmem = is_shmem; __entry->is_shmem = is_shmem;
@ -233,7 +233,7 @@ TRACE_EVENT(mm_khugepaged_collapse_file,
__entry->result = result; __entry->result = result;
), ),
TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%ld, is_shmem=%d, filename=%s, nr=%d, result=%s", TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%lx, is_shmem=%d, filename=%s, nr=%d, result=%s",
__entry->mm, __entry->mm,
__entry->hpfn, __entry->hpfn,
__entry->index, __entry->index,

View File

@ -886,20 +886,6 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
} }
#endif #endif
static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
nodemask_t *nmask)
{
*folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
if (unlikely(!*folio)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
return false;
}
count_vm_event(THP_COLLAPSE_ALLOC);
return true;
}
/* /*
* If mmap_lock temporarily dropped, revalidate vma * If mmap_lock temporarily dropped, revalidate vma
* before taking mmap_lock. * before taking mmap_lock.
@ -1054,7 +1040,7 @@ out:
return result; return result;
} }
static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
struct collapse_control *cc) struct collapse_control *cc)
{ {
gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() : gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
@ -1062,20 +1048,23 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
int node = hpage_collapse_find_target_node(cc); int node = hpage_collapse_find_target_node(cc);
struct folio *folio; struct folio *folio;
if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) { folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
*hpage = NULL; if (!folio) {
*foliop = NULL;
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
return SCAN_ALLOC_HUGE_PAGE_FAIL; return SCAN_ALLOC_HUGE_PAGE_FAIL;
} }
count_vm_event(THP_COLLAPSE_ALLOC);
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) { if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
folio_put(folio); folio_put(folio);
*hpage = NULL; *foliop = NULL;
return SCAN_CGROUP_CHARGE_FAIL; return SCAN_CGROUP_CHARGE_FAIL;
} }
count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1); count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
*hpage = folio_page(folio, 0); *foliop = folio;
return SCAN_SUCCEED; return SCAN_SUCCEED;
} }
@ -1104,7 +1093,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
*/ */
mmap_read_unlock(mm); mmap_read_unlock(mm);
result = alloc_charge_hpage(&hpage, mm, cc); result = alloc_charge_folio(&folio, mm, cc);
hpage = &folio->page;
if (result != SCAN_SUCCEED) if (result != SCAN_SUCCEED)
goto out_nolock; goto out_nolock;
@ -1210,7 +1200,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
if (unlikely(result != SCAN_SUCCEED)) if (unlikely(result != SCAN_SUCCEED))
goto out_up_write; goto out_up_write;
folio = page_folio(hpage);
/* /*
* The smp_wmb() inside __folio_mark_uptodate() ensures the * The smp_wmb() inside __folio_mark_uptodate() ensures the
* copy_huge_page writes become visible before the set_pmd_at() * copy_huge_page writes become visible before the set_pmd_at()
@ -1791,29 +1780,27 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
struct collapse_control *cc) struct collapse_control *cc)
{ {
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
struct page *hpage;
struct page *page; struct page *page;
struct page *tmp; struct page *tmp, *dst;
struct folio *folio; struct folio *folio, *new_folio;
pgoff_t index = 0, end = start + HPAGE_PMD_NR; pgoff_t index = 0, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist); LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
int nr_none = 0, result = SCAN_SUCCEED; int nr_none = 0, result = SCAN_SUCCEED;
bool is_shmem = shmem_file(file); bool is_shmem = shmem_file(file);
int nr = 0;
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem); VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
result = alloc_charge_hpage(&hpage, mm, cc); result = alloc_charge_folio(&new_folio, mm, cc);
if (result != SCAN_SUCCEED) if (result != SCAN_SUCCEED)
goto out; goto out;
__SetPageLocked(hpage); __folio_set_locked(new_folio);
if (is_shmem) if (is_shmem)
__SetPageSwapBacked(hpage); __folio_set_swapbacked(new_folio);
hpage->index = start; new_folio->index = start;
hpage->mapping = mapping; new_folio->mapping = mapping;
/* /*
* Ensure we have slots for all the pages in the range. This is * Ensure we have slots for all the pages in the range. This is
@ -2046,20 +2033,24 @@ xa_unlocked:
* The old pages are locked, so they won't change anymore. * The old pages are locked, so they won't change anymore.
*/ */
index = start; index = start;
dst = folio_page(new_folio, 0);
list_for_each_entry(page, &pagelist, lru) { list_for_each_entry(page, &pagelist, lru) {
while (index < page->index) { while (index < page->index) {
clear_highpage(hpage + (index % HPAGE_PMD_NR)); clear_highpage(dst);
index++; index++;
dst++;
} }
if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) { if (copy_mc_highpage(dst, page) > 0) {
result = SCAN_COPY_MC; result = SCAN_COPY_MC;
goto rollback; goto rollback;
} }
index++; index++;
dst++;
} }
while (index < end) { while (index < end) {
clear_highpage(hpage + (index % HPAGE_PMD_NR)); clear_highpage(dst);
index++; index++;
dst++;
} }
if (nr_none) { if (nr_none) {
@ -2087,16 +2078,17 @@ xa_unlocked:
} }
/* /*
* If userspace observed a missing page in a VMA with a MODE_MISSING * If userspace observed a missing page in a VMA with
* userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that * a MODE_MISSING userfaultfd, then it might expect a
* page. If so, we need to roll back to avoid suppressing such an * UFFD_EVENT_PAGEFAULT for that page. If so, we need to
* event. Since wp/minor userfaultfds don't give userspace any * roll back to avoid suppressing such an event. Since
* guarantees that the kernel doesn't fill a missing page with a zero * wp/minor userfaultfds don't give userspace any
* page, so they don't matter here. * guarantees that the kernel doesn't fill a missing
* page with a zero page, so they don't matter here.
* *
* Any userfaultfds registered after this point will not be able to * Any userfaultfds registered after this point will
* observe any missing pages due to the previously inserted retry * not be able to observe any missing pages due to the
* entries. * previously inserted retry entries.
*/ */
vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) { vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
if (userfaultfd_missing(vma)) { if (userfaultfd_missing(vma)) {
@ -2121,33 +2113,32 @@ immap_locked:
xas_lock_irq(&xas); xas_lock_irq(&xas);
} }
folio = page_folio(hpage);
nr = folio_nr_pages(folio);
if (is_shmem) if (is_shmem)
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
else else
__lruvec_stat_mod_folio(folio, NR_FILE_THPS, nr); __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
if (nr_none) { if (nr_none) {
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_none); __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */ /* nr_none is always 0 for non-shmem. */
__lruvec_stat_mod_folio(folio, NR_SHMEM, nr_none); __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
} }
/* /*
* Mark hpage as uptodate before inserting it into the page cache so * Mark new_folio as uptodate before inserting it into the
* that it isn't mistaken for an fallocated but unwritten page. * page cache so that it isn't mistaken for an fallocated but
* unwritten page.
*/ */
folio_mark_uptodate(folio); folio_mark_uptodate(new_folio);
folio_ref_add(folio, HPAGE_PMD_NR - 1); folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
if (is_shmem) if (is_shmem)
folio_mark_dirty(folio); folio_mark_dirty(new_folio);
folio_add_lru(folio); folio_add_lru(new_folio);
/* Join all the small entries into a single multi-index entry. */ /* Join all the small entries into a single multi-index entry. */
xas_set_order(&xas, start, HPAGE_PMD_ORDER); xas_set_order(&xas, start, HPAGE_PMD_ORDER);
xas_store(&xas, folio); xas_store(&xas, new_folio);
WARN_ON_ONCE(xas_error(&xas)); WARN_ON_ONCE(xas_error(&xas));
xas_unlock_irq(&xas); xas_unlock_irq(&xas);
@ -2158,7 +2149,7 @@ immap_locked:
retract_page_tables(mapping, start); retract_page_tables(mapping, start);
if (cc && !cc->is_khugepaged) if (cc && !cc->is_khugepaged)
result = SCAN_PTE_MAPPED_HUGEPAGE; result = SCAN_PTE_MAPPED_HUGEPAGE;
folio_unlock(folio); folio_unlock(new_folio);
/* /*
* The collapse has succeeded, so free the old pages. * The collapse has succeeded, so free the old pages.
@ -2203,13 +2194,13 @@ rollback:
smp_mb(); smp_mb();
} }
hpage->mapping = NULL; new_folio->mapping = NULL;
unlock_page(hpage); folio_unlock(new_folio);
put_page(hpage); folio_put(new_folio);
out: out:
VM_BUG_ON(!list_empty(&pagelist)); VM_BUG_ON(!list_empty(&pagelist));
trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result); trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result);
return result; return result;
} }