mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-07 18:05:21 +02:00
mm/numa: no task_numa_fault() call if PMD is changed
commitfd8c35a929
upstream. When handling a numa page fault, task_numa_fault() should be called by a process that restores the page table of the faulted folio to avoid duplicated stats counting. Commitc5b5a3dd2c
("mm: thp: refactor NUMA fault handling") restructured do_huge_pmd_numa_page() and did not avoid task_numa_fault() call in the second page table check after a numa migration failure. Fix it by making all !pmd_same() return immediately. This issue can cause task_numa_fault() being called more than necessary and lead to unexpected numa balancing results (It is hard to tell whether the issue will cause positive or negative performance impact due to duplicated numa fault counting). Link: https://lkml.kernel.org/r/20240809145906.1513458-3-ziy@nvidia.com Fixes:c5b5a3dd2c
("mm: thp: refactor NUMA fault handling") Reported-by: "Huang, Ying" <ying.huang@intel.com> Closes: https://lore.kernel.org/linux-mm/87zfqfw0yw.fsf@yhuang6-desk2.ccr.corp.intel.com/ Signed-off-by: Zi Yan <ziy@nvidia.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Yang Shi <shy828301@gmail.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
bb121128fd
commit
c789a78151
|
@ -1504,7 +1504,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
|
||||||
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
|
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
|
||||||
if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
|
if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
|
||||||
spin_unlock(vmf->ptl);
|
spin_unlock(vmf->ptl);
|
||||||
goto out;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pmd = pmd_modify(oldpmd, vma->vm_page_prot);
|
pmd = pmd_modify(oldpmd, vma->vm_page_prot);
|
||||||
|
@ -1548,23 +1548,16 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
|
||||||
if (migrated) {
|
if (migrated) {
|
||||||
flags |= TNF_MIGRATED;
|
flags |= TNF_MIGRATED;
|
||||||
page_nid = target_nid;
|
page_nid = target_nid;
|
||||||
} else {
|
task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
|
||||||
flags |= TNF_MIGRATE_FAIL;
|
return 0;
|
||||||
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
|
|
||||||
if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
|
|
||||||
spin_unlock(vmf->ptl);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
goto out_map;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
flags |= TNF_MIGRATE_FAIL;
|
||||||
if (page_nid != NUMA_NO_NODE)
|
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
|
||||||
task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
|
if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
|
||||||
flags);
|
spin_unlock(vmf->ptl);
|
||||||
|
return 0;
|
||||||
return 0;
|
}
|
||||||
|
|
||||||
out_map:
|
out_map:
|
||||||
/* Restore the PMD */
|
/* Restore the PMD */
|
||||||
pmd = pmd_modify(oldpmd, vma->vm_page_prot);
|
pmd = pmd_modify(oldpmd, vma->vm_page_prot);
|
||||||
|
@ -1574,7 +1567,10 @@ out_map:
|
||||||
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
|
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
|
||||||
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
|
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
|
||||||
spin_unlock(vmf->ptl);
|
spin_unlock(vmf->ptl);
|
||||||
goto out;
|
|
||||||
|
if (page_nid != NUMA_NO_NODE)
|
||||||
|
task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue
Block a user