mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
We already have a generic implementation of alloc/free up to P4D level, as
well as pgd_free(). Let's finish the work and add a generic PGD-level
alloc helper as well.
Unlike at lower levels, almost all architectures need some specific magic
at PGD level (typically initialising PGD entries), so introducing a
generic pgd_alloc() isn't worth it. Instead we introduce two new helpers,
__pgd_alloc() and __pgd_free(), and make use of them in the arch-specific
pgd_alloc() and pgd_free() wherever possible. To accommodate as many arch
as possible, __pgd_alloc() takes a page allocation order.
Because pagetable_alloc() allocates zeroed pages, explicit zeroing in
pgd_alloc() becomes redundant and we can get rid of it. Some trivial
implementations of pgd_free() also become unnecessary once __pgd_alloc()
is used; remove them.
Another small improvement is consistent accounting of PGD pages by using
GFP_PGTABLE_{USER,KERNEL} as appropriate.
Not all PGD allocations can be handled by the generic helpers. In
particular, multiple architectures allocate PGDs from a kmem_cache, and
those PGDs may not be page-sized.
Link: https://lkml.kernel.org/r/20250103184415.2744423-6-kevin.brodsky@arm.com
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Linus Walleij <linus.walleij@linaro.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
166 lines
3.1 KiB
C
166 lines
3.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/export.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
struct page *dmw_virt_to_page(unsigned long kaddr)
|
|
{
|
|
return phys_to_page(__pa(kaddr));
|
|
}
|
|
EXPORT_SYMBOL(dmw_virt_to_page);
|
|
|
|
struct page *tlb_virt_to_page(unsigned long kaddr)
|
|
{
|
|
return phys_to_page(pfn_to_phys(pte_pfn(*virt_to_kpte(kaddr))));
|
|
}
|
|
EXPORT_SYMBOL(tlb_virt_to_page);
|
|
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *init, *ret;
|
|
|
|
ret = __pgd_alloc(mm, 0);
|
|
if (ret) {
|
|
init = pgd_offset(&init_mm, 0UL);
|
|
pgd_init(ret);
|
|
memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
|
|
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(pgd_alloc);
|
|
|
|
void pgd_init(void *addr)
|
|
{
|
|
unsigned long *p, *end;
|
|
unsigned long entry;
|
|
|
|
#if !defined(__PAGETABLE_PUD_FOLDED)
|
|
entry = (unsigned long)invalid_pud_table;
|
|
#elif !defined(__PAGETABLE_PMD_FOLDED)
|
|
entry = (unsigned long)invalid_pmd_table;
|
|
#else
|
|
entry = (unsigned long)invalid_pte_table;
|
|
#endif
|
|
|
|
p = (unsigned long *)addr;
|
|
end = p + PTRS_PER_PGD;
|
|
|
|
do {
|
|
p[0] = entry;
|
|
p[1] = entry;
|
|
p[2] = entry;
|
|
p[3] = entry;
|
|
p[4] = entry;
|
|
p += 8;
|
|
p[-3] = entry;
|
|
p[-2] = entry;
|
|
p[-1] = entry;
|
|
} while (p != end);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pgd_init);
|
|
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
void pmd_init(void *addr)
|
|
{
|
|
unsigned long *p, *end;
|
|
unsigned long pagetable = (unsigned long)invalid_pte_table;
|
|
|
|
p = (unsigned long *)addr;
|
|
end = p + PTRS_PER_PMD;
|
|
|
|
do {
|
|
p[0] = pagetable;
|
|
p[1] = pagetable;
|
|
p[2] = pagetable;
|
|
p[3] = pagetable;
|
|
p[4] = pagetable;
|
|
p += 8;
|
|
p[-3] = pagetable;
|
|
p[-2] = pagetable;
|
|
p[-1] = pagetable;
|
|
} while (p != end);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pmd_init);
|
|
#endif
|
|
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
void pud_init(void *addr)
|
|
{
|
|
unsigned long *p, *end;
|
|
unsigned long pagetable = (unsigned long)invalid_pmd_table;
|
|
|
|
p = (unsigned long *)addr;
|
|
end = p + PTRS_PER_PUD;
|
|
|
|
do {
|
|
p[0] = pagetable;
|
|
p[1] = pagetable;
|
|
p[2] = pagetable;
|
|
p[3] = pagetable;
|
|
p[4] = pagetable;
|
|
p += 8;
|
|
p[-3] = pagetable;
|
|
p[-2] = pagetable;
|
|
p[-1] = pagetable;
|
|
} while (p != end);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pud_init);
|
|
#endif
|
|
|
|
void kernel_pte_init(void *addr)
|
|
{
|
|
unsigned long *p, *end;
|
|
|
|
p = (unsigned long *)addr;
|
|
end = p + PTRS_PER_PTE;
|
|
|
|
do {
|
|
p[0] = _PAGE_GLOBAL;
|
|
p[1] = _PAGE_GLOBAL;
|
|
p[2] = _PAGE_GLOBAL;
|
|
p[3] = _PAGE_GLOBAL;
|
|
p[4] = _PAGE_GLOBAL;
|
|
p += 8;
|
|
p[-3] = _PAGE_GLOBAL;
|
|
p[-2] = _PAGE_GLOBAL;
|
|
p[-1] = _PAGE_GLOBAL;
|
|
} while (p != end);
|
|
}
|
|
|
|
pmd_t mk_pmd(struct page *page, pgprot_t prot)
|
|
{
|
|
pmd_t pmd;
|
|
|
|
pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
|
|
|
|
return pmd;
|
|
}
|
|
|
|
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
pmd_t *pmdp, pmd_t pmd)
|
|
{
|
|
WRITE_ONCE(*pmdp, pmd);
|
|
flush_tlb_all();
|
|
}
|
|
|
|
void __init pagetable_init(void)
|
|
{
|
|
/* Initialize the entire pgd. */
|
|
pgd_init(swapper_pg_dir);
|
|
pgd_init(invalid_pg_dir);
|
|
#ifndef __PAGETABLE_PUD_FOLDED
|
|
pud_init(invalid_pud_table);
|
|
#endif
|
|
#ifndef __PAGETABLE_PMD_FOLDED
|
|
pmd_init(invalid_pmd_table);
|
|
#endif
|
|
}
|