mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 15:03:53 +02:00
mm/ptdump: split effective_prot() into level specific callbacks
Last argument in effective_prot() is u64 assuming pxd_val() returned value (all page table levels) is 64 bit. pxd_val() is very platform specific and its type should not be assumed in generic MM. Split effective_prot() into individual page table level specific callbacks which accepts corresponding pxd_t argument instead and then the subscribing platform (only x86) just derive pxd_val() from the entries as required and proceed as earlier. Link: https://lkml.kernel.org/r/20250407053113.746295-3-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
e064e7384f
commit
08978fc3b0
|
@ -266,6 +266,32 @@ static void effective_prot(struct ptdump_state *pt_st, int level, u64 val)
|
|||
st->prot_levels[level] = effective;
|
||||
}
|
||||
|
||||
static void effective_prot_pte(struct ptdump_state *st, pte_t pte)
|
||||
{
|
||||
effective_prot(st, 4, pte_val(pte));
|
||||
}
|
||||
|
||||
static void effective_prot_pmd(struct ptdump_state *st, pmd_t pmd)
|
||||
{
|
||||
effective_prot(st, 3, pmd_val(pmd));
|
||||
}
|
||||
|
||||
static void effective_prot_pud(struct ptdump_state *st, pud_t pud)
|
||||
{
|
||||
effective_prot(st, 2, pud_val(pud));
|
||||
}
|
||||
|
||||
static void effective_prot_p4d(struct ptdump_state *st, p4d_t p4d)
|
||||
{
|
||||
effective_prot(st, 1, p4d_val(p4d));
|
||||
}
|
||||
|
||||
static void effective_prot_pgd(struct ptdump_state *st, pgd_t pgd)
|
||||
{
|
||||
effective_prot(st, 0, pgd_val(pgd));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This function gets called on a break in a continuous series
|
||||
* of PTE entries; the next one is different so we need to
|
||||
|
@ -416,7 +442,11 @@ bool ptdump_walk_pgd_level_core(struct seq_file *m,
|
|||
.note_page_p4d = note_page_p4d,
|
||||
.note_page_pgd = note_page_pgd,
|
||||
.note_page_flush = note_page_flush,
|
||||
.effective_prot = effective_prot,
|
||||
.effective_prot_pte = effective_prot_pte,
|
||||
.effective_prot_pmd = effective_prot_pmd,
|
||||
.effective_prot_pud = effective_prot_pud,
|
||||
.effective_prot_p4d = effective_prot_p4d,
|
||||
.effective_prot_pgd = effective_prot_pgd,
|
||||
.range = ptdump_ranges
|
||||
},
|
||||
.level = -1,
|
||||
|
|
|
@ -17,7 +17,11 @@ struct ptdump_state {
|
|||
void (*note_page_p4d)(struct ptdump_state *st, unsigned long addr, p4d_t p4d);
|
||||
void (*note_page_pgd)(struct ptdump_state *st, unsigned long addr, pgd_t pgd);
|
||||
void (*note_page_flush)(struct ptdump_state *st);
|
||||
void (*effective_prot)(struct ptdump_state *st, int level, u64 val);
|
||||
void (*effective_prot_pte)(struct ptdump_state *st, pte_t pte);
|
||||
void (*effective_prot_pmd)(struct ptdump_state *st, pmd_t pmd);
|
||||
void (*effective_prot_pud)(struct ptdump_state *st, pud_t pud);
|
||||
void (*effective_prot_p4d)(struct ptdump_state *st, p4d_t p4d);
|
||||
void (*effective_prot_pgd)(struct ptdump_state *st, pgd_t pgd);
|
||||
const struct ptdump_range *range;
|
||||
};
|
||||
|
||||
|
|
20
mm/ptdump.c
20
mm/ptdump.c
|
@ -38,8 +38,8 @@ static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr,
|
|||
return note_kasan_page_table(walk, addr);
|
||||
#endif
|
||||
|
||||
if (st->effective_prot)
|
||||
st->effective_prot(st, 0, pgd_val(val));
|
||||
if (st->effective_prot_pgd)
|
||||
st->effective_prot_pgd(st, val);
|
||||
|
||||
if (pgd_leaf(val)) {
|
||||
st->note_page_pgd(st, addr, val);
|
||||
|
@ -61,8 +61,8 @@ static int ptdump_p4d_entry(p4d_t *p4d, unsigned long addr,
|
|||
return note_kasan_page_table(walk, addr);
|
||||
#endif
|
||||
|
||||
if (st->effective_prot)
|
||||
st->effective_prot(st, 1, p4d_val(val));
|
||||
if (st->effective_prot_p4d)
|
||||
st->effective_prot_p4d(st, val);
|
||||
|
||||
if (p4d_leaf(val)) {
|
||||
st->note_page_p4d(st, addr, val);
|
||||
|
@ -84,8 +84,8 @@ static int ptdump_pud_entry(pud_t *pud, unsigned long addr,
|
|||
return note_kasan_page_table(walk, addr);
|
||||
#endif
|
||||
|
||||
if (st->effective_prot)
|
||||
st->effective_prot(st, 2, pud_val(val));
|
||||
if (st->effective_prot_pud)
|
||||
st->effective_prot_pud(st, val);
|
||||
|
||||
if (pud_leaf(val)) {
|
||||
st->note_page_pud(st, addr, val);
|
||||
|
@ -106,8 +106,8 @@ static int ptdump_pmd_entry(pmd_t *pmd, unsigned long addr,
|
|||
return note_kasan_page_table(walk, addr);
|
||||
#endif
|
||||
|
||||
if (st->effective_prot)
|
||||
st->effective_prot(st, 3, pmd_val(val));
|
||||
if (st->effective_prot_pmd)
|
||||
st->effective_prot_pmd(st, val);
|
||||
if (pmd_leaf(val)) {
|
||||
st->note_page_pmd(st, addr, val);
|
||||
walk->action = ACTION_CONTINUE;
|
||||
|
@ -122,8 +122,8 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
|
|||
struct ptdump_state *st = walk->private;
|
||||
pte_t val = ptep_get_lockless(pte);
|
||||
|
||||
if (st->effective_prot)
|
||||
st->effective_prot(st, 4, pte_val(val));
|
||||
if (st->effective_prot_pte)
|
||||
st->effective_prot_pte(st, val);
|
||||
|
||||
st->note_page_pte(st, addr, val);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user