mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
mm: mass constification of folio/page pointers
Now that page_pgoff() takes const pointers, we can constify the pointers to a lot of functions. Link: https://lkml.kernel.org/r/20241005200121.3231142-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
713da0b33b
commit
68158bfa3d
|
@ -90,7 +90,7 @@ struct folio *ksm_might_need_to_copy(struct folio *folio,
|
||||||
|
|
||||||
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
|
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
|
||||||
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
|
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
|
||||||
void collect_procs_ksm(struct folio *folio, struct page *page,
|
void collect_procs_ksm(const struct folio *folio, const struct page *page,
|
||||||
struct list_head *to_kill, int force_early);
|
struct list_head *to_kill, int force_early);
|
||||||
long ksm_process_profit(struct mm_struct *);
|
long ksm_process_profit(struct mm_struct *);
|
||||||
|
|
||||||
|
@ -122,8 +122,9 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void collect_procs_ksm(struct folio *folio, struct page *page,
|
static inline void collect_procs_ksm(const struct folio *folio,
|
||||||
struct list_head *to_kill, int force_early)
|
const struct page *page, struct list_head *to_kill,
|
||||||
|
int force_early)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -171,7 +171,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
|
||||||
unlink_anon_vmas(next);
|
unlink_anon_vmas(next);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct anon_vma *folio_get_anon_vma(struct folio *folio);
|
struct anon_vma *folio_get_anon_vma(const struct folio *folio);
|
||||||
|
|
||||||
/* RMAP flags, currently only relevant for some anon rmap operations. */
|
/* RMAP flags, currently only relevant for some anon rmap operations. */
|
||||||
typedef int __bitwise rmap_t;
|
typedef int __bitwise rmap_t;
|
||||||
|
@ -194,8 +194,8 @@ enum rmap_level {
|
||||||
RMAP_LEVEL_PMD,
|
RMAP_LEVEL_PMD,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void __folio_rmap_sanity_checks(struct folio *folio,
|
static inline void __folio_rmap_sanity_checks(const struct folio *folio,
|
||||||
struct page *page, int nr_pages, enum rmap_level level)
|
const struct page *page, int nr_pages, enum rmap_level level)
|
||||||
{
|
{
|
||||||
/* hugetlb folios are handled separately. */
|
/* hugetlb folios are handled separately. */
|
||||||
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
|
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
|
||||||
|
@ -771,14 +771,14 @@ struct rmap_walk_control {
|
||||||
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
|
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
|
||||||
unsigned long addr, void *arg);
|
unsigned long addr, void *arg);
|
||||||
int (*done)(struct folio *folio);
|
int (*done)(struct folio *folio);
|
||||||
struct anon_vma *(*anon_lock)(struct folio *folio,
|
struct anon_vma *(*anon_lock)(const struct folio *folio,
|
||||||
struct rmap_walk_control *rwc);
|
struct rmap_walk_control *rwc);
|
||||||
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
|
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
|
||||||
};
|
};
|
||||||
|
|
||||||
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
|
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
|
||||||
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
|
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
|
||||||
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
|
struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
|
||||||
struct rmap_walk_control *rwc);
|
struct rmap_walk_control *rwc);
|
||||||
|
|
||||||
#else /* !CONFIG_MMU */
|
#else /* !CONFIG_MMU */
|
||||||
|
|
|
@ -1117,10 +1117,11 @@ void ClearPageHWPoisonTakenOff(struct page *page);
|
||||||
bool take_page_off_buddy(struct page *page);
|
bool take_page_off_buddy(struct page *page);
|
||||||
bool put_page_back_buddy(struct page *page);
|
bool put_page_back_buddy(struct page *page);
|
||||||
struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
|
struct task_struct *task_early_kill(struct task_struct *tsk, int force_early);
|
||||||
void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
|
void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
|
||||||
struct vm_area_struct *vma, struct list_head *to_kill,
|
struct vm_area_struct *vma, struct list_head *to_kill,
|
||||||
unsigned long ksm_addr);
|
unsigned long ksm_addr);
|
||||||
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
|
unsigned long page_mapped_in_vma(const struct page *page,
|
||||||
|
struct vm_area_struct *vma);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
|
static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu)
|
||||||
|
|
5
mm/ksm.c
5
mm/ksm.c
|
@ -1051,7 +1051,8 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct ksm_stable_node *folio_stable_node(struct folio *folio)
|
static inline
|
||||||
|
struct ksm_stable_node *folio_stable_node(const struct folio *folio)
|
||||||
{
|
{
|
||||||
return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
|
return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
|
||||||
}
|
}
|
||||||
|
@ -3067,7 +3068,7 @@ again:
|
||||||
/*
|
/*
|
||||||
* Collect processes when the error hit an ksm page.
|
* Collect processes when the error hit an ksm page.
|
||||||
*/
|
*/
|
||||||
void collect_procs_ksm(struct folio *folio, struct page *page,
|
void collect_procs_ksm(const struct folio *folio, const struct page *page,
|
||||||
struct list_head *to_kill, int force_early)
|
struct list_head *to_kill, int force_early)
|
||||||
{
|
{
|
||||||
struct ksm_stable_node *stable_node;
|
struct ksm_stable_node *stable_node;
|
||||||
|
|
|
@ -445,7 +445,7 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma,
|
||||||
* Schedule a process for later kill.
|
* Schedule a process for later kill.
|
||||||
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
|
* Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
|
||||||
*/
|
*/
|
||||||
static void __add_to_kill(struct task_struct *tsk, struct page *p,
|
static void __add_to_kill(struct task_struct *tsk, const struct page *p,
|
||||||
struct vm_area_struct *vma, struct list_head *to_kill,
|
struct vm_area_struct *vma, struct list_head *to_kill,
|
||||||
unsigned long addr)
|
unsigned long addr)
|
||||||
{
|
{
|
||||||
|
@ -461,7 +461,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
|
||||||
if (is_zone_device_page(p))
|
if (is_zone_device_page(p))
|
||||||
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
|
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
|
||||||
else
|
else
|
||||||
tk->size_shift = page_shift(compound_head(p));
|
tk->size_shift = folio_shift(page_folio(p));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Send SIGKILL if "tk->addr == -EFAULT". Also, as
|
* Send SIGKILL if "tk->addr == -EFAULT". Also, as
|
||||||
|
@ -486,7 +486,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
|
||||||
list_add_tail(&tk->nd, to_kill);
|
list_add_tail(&tk->nd, to_kill);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void add_to_kill_anon_file(struct task_struct *tsk, struct page *p,
|
static void add_to_kill_anon_file(struct task_struct *tsk, const struct page *p,
|
||||||
struct vm_area_struct *vma, struct list_head *to_kill,
|
struct vm_area_struct *vma, struct list_head *to_kill,
|
||||||
unsigned long addr)
|
unsigned long addr)
|
||||||
{
|
{
|
||||||
|
@ -509,7 +509,7 @@ static bool task_in_to_kill_list(struct list_head *to_kill,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void add_to_kill_ksm(struct task_struct *tsk, struct page *p,
|
void add_to_kill_ksm(struct task_struct *tsk, const struct page *p,
|
||||||
struct vm_area_struct *vma, struct list_head *to_kill,
|
struct vm_area_struct *vma, struct list_head *to_kill,
|
||||||
unsigned long addr)
|
unsigned long addr)
|
||||||
{
|
{
|
||||||
|
@ -606,8 +606,9 @@ struct task_struct *task_early_kill(struct task_struct *tsk, int force_early)
|
||||||
/*
|
/*
|
||||||
* Collect processes when the error hit an anonymous page.
|
* Collect processes when the error hit an anonymous page.
|
||||||
*/
|
*/
|
||||||
static void collect_procs_anon(struct folio *folio, struct page *page,
|
static void collect_procs_anon(const struct folio *folio,
|
||||||
struct list_head *to_kill, int force_early)
|
const struct page *page, struct list_head *to_kill,
|
||||||
|
int force_early)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk;
|
struct task_struct *tsk;
|
||||||
struct anon_vma *av;
|
struct anon_vma *av;
|
||||||
|
@ -643,8 +644,9 @@ static void collect_procs_anon(struct folio *folio, struct page *page,
|
||||||
/*
|
/*
|
||||||
* Collect processes when the error hit a file mapped page.
|
* Collect processes when the error hit a file mapped page.
|
||||||
*/
|
*/
|
||||||
static void collect_procs_file(struct folio *folio, struct page *page,
|
static void collect_procs_file(const struct folio *folio,
|
||||||
struct list_head *to_kill, int force_early)
|
const struct page *page, struct list_head *to_kill,
|
||||||
|
int force_early)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
struct task_struct *tsk;
|
struct task_struct *tsk;
|
||||||
|
@ -680,7 +682,7 @@ static void collect_procs_file(struct folio *folio, struct page *page,
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FS_DAX
|
#ifdef CONFIG_FS_DAX
|
||||||
static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
|
static void add_to_kill_fsdax(struct task_struct *tsk, const struct page *p,
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
struct list_head *to_kill, pgoff_t pgoff)
|
struct list_head *to_kill, pgoff_t pgoff)
|
||||||
{
|
{
|
||||||
|
@ -691,7 +693,7 @@ static void add_to_kill_fsdax(struct task_struct *tsk, struct page *p,
|
||||||
/*
|
/*
|
||||||
* Collect processes when the error hit a fsdax page.
|
* Collect processes when the error hit a fsdax page.
|
||||||
*/
|
*/
|
||||||
static void collect_procs_fsdax(struct page *page,
|
static void collect_procs_fsdax(const struct page *page,
|
||||||
struct address_space *mapping, pgoff_t pgoff,
|
struct address_space *mapping, pgoff_t pgoff,
|
||||||
struct list_head *to_kill, bool pre_remove)
|
struct list_head *to_kill, bool pre_remove)
|
||||||
{
|
{
|
||||||
|
@ -725,7 +727,7 @@ static void collect_procs_fsdax(struct page *page,
|
||||||
/*
|
/*
|
||||||
* Collect the processes who have the corrupted page mapped to kill.
|
* Collect the processes who have the corrupted page mapped to kill.
|
||||||
*/
|
*/
|
||||||
static void collect_procs(struct folio *folio, struct page *page,
|
static void collect_procs(const struct folio *folio, const struct page *page,
|
||||||
struct list_head *tokill, int force_early)
|
struct list_head *tokill, int force_early)
|
||||||
{
|
{
|
||||||
if (!folio->mapping)
|
if (!folio->mapping)
|
||||||
|
|
|
@ -337,9 +337,10 @@ next_pte:
|
||||||
* outside the VMA or not present, returns -EFAULT.
|
* outside the VMA or not present, returns -EFAULT.
|
||||||
* Only valid for normal file or anonymous VMAs.
|
* Only valid for normal file or anonymous VMAs.
|
||||||
*/
|
*/
|
||||||
unsigned long page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
|
unsigned long page_mapped_in_vma(const struct page *page,
|
||||||
|
struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
const struct folio *folio = page_folio(page);
|
||||||
struct page_vma_mapped_walk pvmw = {
|
struct page_vma_mapped_walk pvmw = {
|
||||||
.pfn = page_to_pfn(page),
|
.pfn = page_to_pfn(page),
|
||||||
.nr_pages = 1,
|
.nr_pages = 1,
|
||||||
|
|
11
mm/rmap.c
11
mm/rmap.c
|
@ -496,7 +496,7 @@ void __init anon_vma_init(void)
|
||||||
* concurrently without folio lock protection). See folio_lock_anon_vma_read()
|
* concurrently without folio lock protection). See folio_lock_anon_vma_read()
|
||||||
* which has already covered that, and comment above remap_pages().
|
* which has already covered that, and comment above remap_pages().
|
||||||
*/
|
*/
|
||||||
struct anon_vma *folio_get_anon_vma(struct folio *folio)
|
struct anon_vma *folio_get_anon_vma(const struct folio *folio)
|
||||||
{
|
{
|
||||||
struct anon_vma *anon_vma = NULL;
|
struct anon_vma *anon_vma = NULL;
|
||||||
unsigned long anon_mapping;
|
unsigned long anon_mapping;
|
||||||
|
@ -540,7 +540,7 @@ out:
|
||||||
* reference like with folio_get_anon_vma() and then block on the mutex
|
* reference like with folio_get_anon_vma() and then block on the mutex
|
||||||
* on !rwc->try_lock case.
|
* on !rwc->try_lock case.
|
||||||
*/
|
*/
|
||||||
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
|
struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
|
||||||
struct rmap_walk_control *rwc)
|
struct rmap_walk_control *rwc)
|
||||||
{
|
{
|
||||||
struct anon_vma *anon_vma = NULL;
|
struct anon_vma *anon_vma = NULL;
|
||||||
|
@ -1271,8 +1271,9 @@ static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma,
|
||||||
* @vma: the vm area in which the mapping is added
|
* @vma: the vm area in which the mapping is added
|
||||||
* @address: the user virtual address mapped
|
* @address: the user virtual address mapped
|
||||||
*/
|
*/
|
||||||
static void __page_check_anon_rmap(struct folio *folio, struct page *page,
|
static void __page_check_anon_rmap(const struct folio *folio,
|
||||||
struct vm_area_struct *vma, unsigned long address)
|
const struct page *page, struct vm_area_struct *vma,
|
||||||
|
unsigned long address)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* The page's anon-rmap details (mapping and index) are guaranteed to
|
* The page's anon-rmap details (mapping and index) are guaranteed to
|
||||||
|
@ -2569,7 +2570,7 @@ void __put_anon_vma(struct anon_vma *anon_vma)
|
||||||
anon_vma_free(root);
|
anon_vma_free(root);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
|
static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio,
|
||||||
struct rmap_walk_control *rwc)
|
struct rmap_walk_control *rwc)
|
||||||
{
|
{
|
||||||
struct anon_vma *anon_vma;
|
struct anon_vma *anon_vma;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user