mm: revert "mm/gup: clear the LRU flag of a page before adding to LRU batch"

[ Upstream commit afb99e9f500485160f34b8cad6d3763ada3e80e8 ]

This reverts commit 33dfe9204f29: now that
collect_longterm_unpinnable_folios() is checking ref_count instead of lru,
and mlock/munlock do not participate in the revised LRU flag clearing,
those changes are misleading, and enlarge the window during which
mlock/munlock may miss an mlock_count update.

It is possible (I'd hesitate to claim probable) that the greater
likelihood of missed mlock_count updates would explain the "Realtime
threads delayed due to kcompactd0" observed on 6.12 in the Link below.  If
that is the case, this reversion will help; but a complete solution needs
also a further patch, beyond the scope of this series.

Included some 80-column cleanup around folio_batch_add_and_move().

The role of folio_test_clear_lru() (before taking per-memcg lru_lock) is
questionable since 6.13 removed mem_cgroup_move_account() etc; but perhaps
there are still some races which need it - not examined here.

Link: https://lore.kernel.org/linux-mm/DU0PR01MB10385345F7153F334100981888259A@DU0PR01MB10385.eurprd01.prod.exchangelabs.com/
Link: https://lkml.kernel.org/r/05905d7b-ed14-68b1-79d8-bdec30367eba@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Keir Fraser <keirf@google.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Li Zhe <lizhe.67@bytedance.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Shivank Garg <shivankg@amd.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Xu <weixugc@google.com>
Cc: Will Deacon <will@kernel.org>
Cc: yangge <yangge1116@126.com>
Cc: Yuanchu Xie <yuanchu@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
[ Resolved conflicts in applying the revert to this tree ]
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Hugh Dickins 2025-09-08 15:19:17 -07:00 committed by Greg Kroah-Hartman
parent 0db0d69bc9
commit 9422cfa89e

View File

@ -195,6 +195,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
for (i = 0; i < folio_batch_count(fbatch); i++) { for (i = 0; i < folio_batch_count(fbatch); i++) {
struct folio *folio = fbatch->folios[i]; struct folio *folio = fbatch->folios[i];
/* block memcg migration while the folio moves between lru */
if (move_fn != lru_add && !folio_test_clear_lru(folio))
continue;
folio_lruvec_relock_irqsave(folio, &lruvec, &flags); folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
move_fn(lruvec, folio); move_fn(lruvec, folio);
@ -207,14 +211,10 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
} }
static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch, static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
struct folio *folio, move_fn_t move_fn, struct folio *folio, move_fn_t move_fn, bool disable_irq)
bool on_lru, bool disable_irq)
{ {
unsigned long flags; unsigned long flags;
if (on_lru && !folio_test_clear_lru(folio))
return;
folio_get(folio); folio_get(folio);
if (disable_irq) if (disable_irq)
@ -222,8 +222,8 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
else else
local_lock(&cpu_fbatches.lock); local_lock(&cpu_fbatches.lock);
if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) || if (!folio_batch_add(this_cpu_ptr(fbatch), folio) ||
lru_cache_disabled()) folio_test_large(folio) || lru_cache_disabled())
folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn); folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
if (disable_irq) if (disable_irq)
@ -232,13 +232,13 @@ static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
local_unlock(&cpu_fbatches.lock); local_unlock(&cpu_fbatches.lock);
} }
#define folio_batch_add_and_move(folio, op, on_lru) \ #define folio_batch_add_and_move(folio, op) \
__folio_batch_add_and_move( \ __folio_batch_add_and_move( \
&cpu_fbatches.op, \ &cpu_fbatches.op, \
folio, \ folio, \
op, \ op, \
on_lru, \ offsetof(struct cpu_fbatches, op) >= \
offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \ offsetof(struct cpu_fbatches, lock_irq) \
) )
static void lru_move_tail(struct lruvec *lruvec, struct folio *folio) static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
@ -262,10 +262,10 @@ static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
void folio_rotate_reclaimable(struct folio *folio) void folio_rotate_reclaimable(struct folio *folio)
{ {
if (folio_test_locked(folio) || folio_test_dirty(folio) || if (folio_test_locked(folio) || folio_test_dirty(folio) ||
folio_test_unevictable(folio)) folio_test_unevictable(folio) || !folio_test_lru(folio))
return; return;
folio_batch_add_and_move(folio, lru_move_tail, true); folio_batch_add_and_move(folio, lru_move_tail);
} }
void lru_note_cost(struct lruvec *lruvec, bool file, void lru_note_cost(struct lruvec *lruvec, bool file,
@ -354,10 +354,11 @@ static void folio_activate_drain(int cpu)
void folio_activate(struct folio *folio) void folio_activate(struct folio *folio)
{ {
if (folio_test_active(folio) || folio_test_unevictable(folio)) if (folio_test_active(folio) || folio_test_unevictable(folio) ||
!folio_test_lru(folio))
return; return;
folio_batch_add_and_move(folio, lru_activate, true); folio_batch_add_and_move(folio, lru_activate);
} }
#else #else
@ -510,7 +511,7 @@ void folio_add_lru(struct folio *folio)
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
folio_set_active(folio); folio_set_active(folio);
folio_batch_add_and_move(folio, lru_add, false); folio_batch_add_and_move(folio, lru_add);
} }
EXPORT_SYMBOL(folio_add_lru); EXPORT_SYMBOL(folio_add_lru);
@ -685,10 +686,10 @@ void lru_add_drain_cpu(int cpu)
void deactivate_file_folio(struct folio *folio) void deactivate_file_folio(struct folio *folio)
{ {
/* Deactivating an unevictable folio will not accelerate reclaim */ /* Deactivating an unevictable folio will not accelerate reclaim */
if (folio_test_unevictable(folio)) if (folio_test_unevictable(folio) || !folio_test_lru(folio))
return; return;
folio_batch_add_and_move(folio, lru_deactivate_file, true); folio_batch_add_and_move(folio, lru_deactivate_file);
} }
/* /*
@ -701,10 +702,11 @@ void deactivate_file_folio(struct folio *folio)
*/ */
void folio_deactivate(struct folio *folio) void folio_deactivate(struct folio *folio)
{ {
if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled())) if (folio_test_unevictable(folio) || !folio_test_lru(folio) ||
!(folio_test_active(folio) || lru_gen_enabled()))
return; return;
folio_batch_add_and_move(folio, lru_deactivate, true); folio_batch_add_and_move(folio, lru_deactivate);
} }
/** /**
@ -717,10 +719,11 @@ void folio_deactivate(struct folio *folio)
void folio_mark_lazyfree(struct folio *folio) void folio_mark_lazyfree(struct folio *folio)
{ {
if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) || if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
!folio_test_lru(folio) ||
folio_test_swapcache(folio) || folio_test_unevictable(folio)) folio_test_swapcache(folio) || folio_test_unevictable(folio))
return; return;
folio_batch_add_and_move(folio, lru_lazyfree, true); folio_batch_add_and_move(folio, lru_lazyfree);
} }
void lru_add_drain(void) void lru_add_drain(void)