mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-06 17:35:20 +02:00
khugepaged: convert alloc_charge_hpage to alloc_charge_folio
[ Upstream commitd5ab50b941
] Both callers want to deal with a folio, so return a folio from this function. Link: https://lkml.kernel.org/r/20240403171838.1445826-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Stable-dep-of:37f0b47c51
("mm: khugepaged: fix the arguments order in khugepaged_collapse_file trace point") Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
281a0312ce
commit
03e36dba00
|
@ -1041,7 +1041,7 @@ out:
|
|||
return result;
|
||||
}
|
||||
|
||||
static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
|
||||
static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
|
||||
struct collapse_control *cc)
|
||||
{
|
||||
gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
|
||||
|
@ -1051,7 +1051,7 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
|
|||
|
||||
folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
|
||||
if (!folio) {
|
||||
*hpage = NULL;
|
||||
*foliop = NULL;
|
||||
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
|
||||
return SCAN_ALLOC_HUGE_PAGE_FAIL;
|
||||
}
|
||||
|
@ -1059,13 +1059,13 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
|
|||
count_vm_event(THP_COLLAPSE_ALLOC);
|
||||
if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
|
||||
folio_put(folio);
|
||||
*hpage = NULL;
|
||||
*foliop = NULL;
|
||||
return SCAN_CGROUP_CHARGE_FAIL;
|
||||
}
|
||||
|
||||
count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
|
||||
|
||||
*hpage = folio_page(folio, 0);
|
||||
*foliop = folio;
|
||||
return SCAN_SUCCEED;
|
||||
}
|
||||
|
||||
|
@ -1094,7 +1094,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
|
|||
*/
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
result = alloc_charge_hpage(&hpage, mm, cc);
|
||||
result = alloc_charge_folio(&folio, mm, cc);
|
||||
hpage = &folio->page;
|
||||
if (result != SCAN_SUCCEED)
|
||||
goto out_nolock;
|
||||
|
||||
|
@ -1197,7 +1198,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
|
|||
if (unlikely(result != SCAN_SUCCEED))
|
||||
goto out_up_write;
|
||||
|
||||
folio = page_folio(hpage);
|
||||
/*
|
||||
* The smp_wmb() inside __folio_mark_uptodate() ensures the
|
||||
* copy_huge_page writes become visible before the set_pmd_at()
|
||||
|
@ -1786,7 +1786,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
|
|||
struct page *hpage;
|
||||
struct page *page;
|
||||
struct page *tmp;
|
||||
struct folio *folio;
|
||||
struct folio *folio, *new_folio;
|
||||
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
|
||||
LIST_HEAD(pagelist);
|
||||
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
|
||||
|
@ -1797,7 +1797,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
|
|||
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
|
||||
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
|
||||
|
||||
result = alloc_charge_hpage(&hpage, mm, cc);
|
||||
result = alloc_charge_folio(&new_folio, mm, cc);
|
||||
hpage = &new_folio->page;
|
||||
if (result != SCAN_SUCCEED)
|
||||
goto out;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user