mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-05 13:25:20 +02:00
mm: simplify thp_vma_allowable_order
Combine the three boolean arguments into one flags argument for readability. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
dc6e0ae5b1
commit
e0ffb29bc5
|
@ -871,8 +871,8 @@ static int show_smap(struct seq_file *m, void *v)
|
||||||
__show_smap(m, &mss, false);
|
__show_smap(m, &mss, false);
|
||||||
|
|
||||||
seq_printf(m, "THPeligible: %8u\n",
|
seq_printf(m, "THPeligible: %8u\n",
|
||||||
!!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
|
!!thp_vma_allowable_orders(vma, vma->vm_flags,
|
||||||
true, THP_ORDERS_ALL));
|
TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
|
||||||
|
|
||||||
if (arch_pkeys_enabled())
|
if (arch_pkeys_enabled())
|
||||||
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
|
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
|
||||||
|
|
|
@ -81,8 +81,12 @@ extern struct kobj_attribute shmem_enabled_attr;
|
||||||
*/
|
*/
|
||||||
#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
|
#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
|
||||||
|
|
||||||
#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
|
#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
|
||||||
(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
|
#define TVA_IN_PF (1 << 1) /* Page fault handler */
|
||||||
|
#define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
|
||||||
|
|
||||||
|
#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
|
||||||
|
(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
|
||||||
|
|
||||||
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
|
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
|
||||||
#define HPAGE_PMD_SHIFT PMD_SHIFT
|
#define HPAGE_PMD_SHIFT PMD_SHIFT
|
||||||
|
@ -218,17 +222,15 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
|
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
|
||||||
unsigned long vm_flags, bool smaps,
|
unsigned long vm_flags,
|
||||||
bool in_pf, bool enforce_sysfs,
|
unsigned long tva_flags,
|
||||||
unsigned long orders);
|
unsigned long orders);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
|
* thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
|
||||||
* @vma: the vm area to check
|
* @vma: the vm area to check
|
||||||
* @vm_flags: use these vm_flags instead of vma->vm_flags
|
* @vm_flags: use these vm_flags instead of vma->vm_flags
|
||||||
* @smaps: whether answer will be used for smaps file
|
* @tva_flags: Which TVA flags to honour
|
||||||
* @in_pf: whether answer will be used by page fault handler
|
|
||||||
* @enforce_sysfs: whether sysfs config should be taken into account
|
|
||||||
* @orders: bitfield of all orders to consider
|
* @orders: bitfield of all orders to consider
|
||||||
*
|
*
|
||||||
* Calculates the intersection of the requested hugepage orders and the allowed
|
* Calculates the intersection of the requested hugepage orders and the allowed
|
||||||
|
@ -241,12 +243,12 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
|
||||||
*/
|
*/
|
||||||
static inline
|
static inline
|
||||||
unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
|
unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
|
||||||
unsigned long vm_flags, bool smaps,
|
unsigned long vm_flags,
|
||||||
bool in_pf, bool enforce_sysfs,
|
unsigned long tva_flags,
|
||||||
unsigned long orders)
|
unsigned long orders)
|
||||||
{
|
{
|
||||||
/* Optimization to check if required orders are enabled early. */
|
/* Optimization to check if required orders are enabled early. */
|
||||||
if (enforce_sysfs && vma_is_anonymous(vma)) {
|
if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
|
||||||
unsigned long mask = READ_ONCE(huge_anon_orders_always);
|
unsigned long mask = READ_ONCE(huge_anon_orders_always);
|
||||||
|
|
||||||
if (vm_flags & VM_HUGEPAGE)
|
if (vm_flags & VM_HUGEPAGE)
|
||||||
|
@ -260,8 +262,7 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
|
return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
|
||||||
enforce_sysfs, orders);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum mthp_stat_item {
|
enum mthp_stat_item {
|
||||||
|
@ -428,8 +429,8 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
|
static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
|
||||||
unsigned long vm_flags, bool smaps,
|
unsigned long vm_flags,
|
||||||
bool in_pf, bool enforce_sysfs,
|
unsigned long tva_flags,
|
||||||
unsigned long orders)
|
unsigned long orders)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -81,10 +81,13 @@ unsigned long huge_anon_orders_madvise __read_mostly;
|
||||||
unsigned long huge_anon_orders_inherit __read_mostly;
|
unsigned long huge_anon_orders_inherit __read_mostly;
|
||||||
|
|
||||||
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
|
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
|
||||||
unsigned long vm_flags, bool smaps,
|
unsigned long vm_flags,
|
||||||
bool in_pf, bool enforce_sysfs,
|
unsigned long tva_flags,
|
||||||
unsigned long orders)
|
unsigned long orders)
|
||||||
{
|
{
|
||||||
|
bool smaps = tva_flags & TVA_SMAPS;
|
||||||
|
bool in_pf = tva_flags & TVA_IN_PF;
|
||||||
|
bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
|
||||||
/* Check the intersection of requested and supported orders. */
|
/* Check the intersection of requested and supported orders. */
|
||||||
orders &= vma_is_anonymous(vma) ?
|
orders &= vma_is_anonymous(vma) ?
|
||||||
THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
|
THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
|
||||||
|
|
|
@ -453,7 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
|
||||||
{
|
{
|
||||||
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
|
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
|
||||||
hugepage_flags_enabled()) {
|
hugepage_flags_enabled()) {
|
||||||
if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
|
if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
|
||||||
PMD_ORDER))
|
PMD_ORDER))
|
||||||
__khugepaged_enter(vma->vm_mm);
|
__khugepaged_enter(vma->vm_mm);
|
||||||
}
|
}
|
||||||
|
@ -900,6 +900,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
|
||||||
struct collapse_control *cc)
|
struct collapse_control *cc)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
|
||||||
|
|
||||||
if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
|
if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
|
||||||
return SCAN_ANY_PROCESS;
|
return SCAN_ANY_PROCESS;
|
||||||
|
@ -910,8 +911,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
|
||||||
|
|
||||||
if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
|
if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
|
||||||
return SCAN_ADDRESS_RANGE;
|
return SCAN_ADDRESS_RANGE;
|
||||||
if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
|
if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
|
||||||
cc->is_khugepaged, PMD_ORDER))
|
|
||||||
return SCAN_VMA_CHECK;
|
return SCAN_VMA_CHECK;
|
||||||
/*
|
/*
|
||||||
* Anon VMA expected, the address may be unmapped then
|
* Anon VMA expected, the address may be unmapped then
|
||||||
|
@ -1501,8 +1501,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
|
||||||
* and map it by a PMD, regardless of sysfs THP settings. As such, let's
|
* and map it by a PMD, regardless of sysfs THP settings. As such, let's
|
||||||
* analogously elide sysfs THP settings here.
|
* analogously elide sysfs THP settings here.
|
||||||
*/
|
*/
|
||||||
if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
|
if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
|
||||||
PMD_ORDER))
|
|
||||||
return SCAN_VMA_CHECK;
|
return SCAN_VMA_CHECK;
|
||||||
|
|
||||||
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
|
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
|
||||||
|
@ -2363,8 +2362,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
|
||||||
progress++;
|
progress++;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
|
if (!thp_vma_allowable_order(vma, vma->vm_flags,
|
||||||
true, PMD_ORDER)) {
|
TVA_ENFORCE_SYSFS, PMD_ORDER)) {
|
||||||
skip:
|
skip:
|
||||||
progress++;
|
progress++;
|
||||||
continue;
|
continue;
|
||||||
|
@ -2701,8 +2700,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
|
||||||
|
|
||||||
*prev = vma;
|
*prev = vma;
|
||||||
|
|
||||||
if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
|
if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
|
||||||
PMD_ORDER))
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
|
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
|
||||||
|
|
10
mm/memory.c
10
mm/memory.c
|
@ -4334,8 +4334,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
|
||||||
* for this vma. Then filter out the orders that can't be allocated over
|
* for this vma. Then filter out the orders that can't be allocated over
|
||||||
* the faulting address and still be fully contained in the vma.
|
* the faulting address and still be fully contained in the vma.
|
||||||
*/
|
*/
|
||||||
orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
|
orders = thp_vma_allowable_orders(vma, vma->vm_flags,
|
||||||
BIT(PMD_ORDER) - 1);
|
TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
|
||||||
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
|
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
|
||||||
|
|
||||||
if (!orders)
|
if (!orders)
|
||||||
|
@ -5438,7 +5438,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
retry_pud:
|
retry_pud:
|
||||||
if (pud_none(*vmf.pud) &&
|
if (pud_none(*vmf.pud) &&
|
||||||
thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
|
thp_vma_allowable_order(vma, vm_flags,
|
||||||
|
TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
|
||||||
ret = create_huge_pud(&vmf);
|
ret = create_huge_pud(&vmf);
|
||||||
if (!(ret & VM_FAULT_FALLBACK))
|
if (!(ret & VM_FAULT_FALLBACK))
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -5472,7 +5473,8 @@ retry_pud:
|
||||||
goto retry_pud;
|
goto retry_pud;
|
||||||
|
|
||||||
if (pmd_none(*vmf.pmd) &&
|
if (pmd_none(*vmf.pmd) &&
|
||||||
thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
|
thp_vma_allowable_order(vma, vm_flags,
|
||||||
|
TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
|
||||||
ret = create_huge_pmd(&vmf);
|
ret = create_huge_pmd(&vmf);
|
||||||
if (!(ret & VM_FAULT_FALLBACK))
|
if (!(ret & VM_FAULT_FALLBACK))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user