mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
mm/memory_hotplug: move debug_pagealloc_map_pages() into online_pages_range()
In the near future, we want to have a single way to handover PageOffline pages to the buddy, whereby they could have: (a) Never been exposed to the buddy before: kept PageOffline when onlining the memory block. (b) Been allocated from the buddy, for example using alloc_contig_range() to then be set PageOffline, Let's start by making generic_online_page()->__free_pages_core() less special compared to ordinary page freeing (e.g., free_contig_range()), and perform the debug_pagealloc_map_pages() call unconditionally, even when the online callback might decide to keep the pages offline. All pages are already initialized with PageOffline, so nobody touches them either way. Link: https://lkml.kernel.org/r/20241203102050.223318-1-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
bef5418d1f
commit
dd467f92db
|
@ -650,6 +650,7 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
|
|||
* this and the first chunk to online will be pageblock_nr_pages.
|
||||
*/
|
||||
for (pfn = start_pfn; pfn < end_pfn;) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
int order;
|
||||
|
||||
/*
|
||||
|
@ -664,7 +665,14 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
|
|||
else
|
||||
order = MAX_PAGE_ORDER;
|
||||
|
||||
(*online_page_callback)(pfn_to_page(pfn), order);
|
||||
/*
|
||||
* Exposing the page to the buddy by freeing can cause
|
||||
* issues with debug_pagealloc enabled: some archs don't
|
||||
* like double-unmappings. So treat them like any pages that
|
||||
* were allocated from the buddy.
|
||||
*/
|
||||
debug_pagealloc_map_pages(page, 1 << order);
|
||||
(*online_page_callback)(page, order);
|
||||
pfn += (1UL << order);
|
||||
}
|
||||
|
||||
|
|
|
@ -1295,12 +1295,6 @@ void __meminit __free_pages_core(struct page *page, unsigned int order,
|
|||
set_page_count(p, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Freeing the page with debug_pagealloc enabled will try to
|
||||
* unmap it; some archs don't like double-unmappings, so
|
||||
* map it first.
|
||||
*/
|
||||
debug_pagealloc_map_pages(page, nr_pages);
|
||||
adjust_managed_page_count(page, nr_pages);
|
||||
} else {
|
||||
for (loop = 0; loop < nr_pages; loop++, p++) {
|
||||
|
|
Loading…
Reference in New Issue
Block a user