mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
mm: swap: move nr_swap_pages counter decrement from folio_alloc_swap() to swap_range_alloc()
commit4f78252da8
upstream. Patch series "Some randome fixes and cleanups to swapfile". Patch 0-3 are some random fixes. Patch 4 is a cleanup. More details can be found in respective patches. This patch (of 4): When folio_alloc_swap() encounters a failure in either mem_cgroup_try_charge_swap() or add_to_swap_cache(), nr_swap_pages counter is not decremented for allocated entry. However, the following put_swap_folio() will increase nr_swap_pages counter unpairly and lead to an imbalance. Move nr_swap_pages decrement from folio_alloc_swap() to swap_range_alloc() to pair the nr_swap_pages counting. Link: https://lkml.kernel.org/r/20250522122554.12209-1-shikemeng@huaweicloud.com Link: https://lkml.kernel.org/r/20250522122554.12209-2-shikemeng@huaweicloud.com Fixes:0ff67f990b
("mm, swap: remove swap slot cache") Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Reviewed-by: Kairui Song <kasong@tencent.com> Reviewed-by: Baoquan He <bhe@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
815c528b13
commit
795fead6bf
|
@ -1115,6 +1115,7 @@ static void swap_range_alloc(struct swap_info_struct *si,
|
|||
if (vm_swap_full())
|
||||
schedule_work(&si->reclaim_work);
|
||||
}
|
||||
atomic_long_sub(nr_entries, &nr_swap_pages);
|
||||
}
|
||||
|
||||
static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
|
||||
|
@ -1313,7 +1314,6 @@ int folio_alloc_swap(struct folio *folio, gfp_t gfp)
|
|||
if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL))
|
||||
goto out_free;
|
||||
|
||||
atomic_long_sub(size, &nr_swap_pages);
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
|
|
Loading…
Reference in New Issue
Block a user