UPSTREAM: mm: convert collapse_huge_page() to use a folio

Replace three calls to compound_head() with one.

Link: https://lkml.kernel.org/r/20231211162214.2146080-9-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
(cherry picked from commit 5432726848)
Bug: 313807618
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
Change-Id: I0d5049d53b0ca4413b69123b99205772f4e1342a
This commit is contained in:
Matthew Wilcox (Oracle) 2023-12-11 16:22:13 +00:00 committed by Kalesh Singh
parent 297debad09
commit cfdfb5e043

View File

@ -1087,6 +1087,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, _pmd; pmd_t *pmd, _pmd;
pte_t *pte; pte_t *pte;
pgtable_t pgtable; pgtable_t pgtable;
struct folio *folio;
struct page *hpage; struct page *hpage;
spinlock_t *pmd_ptl, *pte_ptl; spinlock_t *pmd_ptl, *pte_ptl;
int result = SCAN_FAIL; int result = SCAN_FAIL;
@ -1209,13 +1210,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
if (unlikely(result != SCAN_SUCCEED)) if (unlikely(result != SCAN_SUCCEED))
goto out_up_write; goto out_up_write;
folio = page_folio(hpage);
/* /*
* spin_lock() below is not the equivalent of smp_wmb(), but * The smp_wmb() inside __folio_mark_uptodate() ensures the
* the smp_wmb() inside __SetPageUptodate() can be reused to * copy_huge_page writes become visible before the set_pmd_at()
* avoid the copy_huge_page writes to become visible after * write.
* the set_pmd_at() write.
*/ */
__SetPageUptodate(hpage); __folio_mark_uptodate(folio);
pgtable = pmd_pgtable(_pmd); pgtable = pmd_pgtable(_pmd);
_pmd = mk_huge_pmd(hpage, vma->vm_page_prot); _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
@ -1223,8 +1224,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
spin_lock(pmd_ptl); spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd)); BUG_ON(!pmd_none(*pmd));
page_add_new_anon_rmap(hpage, vma, address); folio_add_new_anon_rmap(folio, vma, address);
lru_cache_add_inactive_or_unevictable(hpage, vma); folio_add_lru_vma(folio, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable); pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd); set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd); update_mmu_cache_pmd(vma, address, pmd);