mirror of
https://github.com/nxp-imx/linux-imx.git
synced 2025-07-11 11:55:28 +02:00
BACKPORT: mm: memcg: move vmstats structs definition above flushing code
The following patch will make use of those structs in the flushing code,
so move their definitions (and a few other dependencies) a little bit up
to reduce the diff noise in the following patch.
No functional change intended.
Link: https://lkml.kernel.org/r/20231129032154.3710765-3-yosryahmed@google.com
Change-Id: I823689b286e7336dd61e2468af02b00e488bfd25
Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Tested-by: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Acked-by: Shakeel Butt <shakeelb@google.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: Greg Thelen <gthelen@google.com>
Cc: Ivan Babrou <ivan@cloudflare.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Koutny <mkoutny@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Tejun Heo <tj@kernel.org>
Cc: Waiman Long <longman@redhat.com>
Cc: Wei Xu <weixugc@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
(cherry picked from commit e0bf1dc859
)
[TJ: ZSWPWB, THP_SWPOUT, THP_SWPOUT_FALLBACK are not in our
memcg_vm_event_stat list on this branch]
Bug: 322544714
Signed-off-by: T.J. Mercier <tjmercier@google.com>
This commit is contained in:
parent
01d12c8b2c
commit
84b2003d45
142
mm/memcontrol.c
142
mm/memcontrol.c
|
@ -572,6 +572,77 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
|
|||
return mz;
|
||||
}
|
||||
|
||||
/* Subset of vm_event_item to report for memcg event stats */
|
||||
static const unsigned int memcg_vm_event_stat[] = {
|
||||
PGPGIN,
|
||||
PGPGOUT,
|
||||
PGSCAN_KSWAPD,
|
||||
PGSCAN_DIRECT,
|
||||
PGSCAN_KHUGEPAGED,
|
||||
PGSTEAL_KSWAPD,
|
||||
PGSTEAL_DIRECT,
|
||||
PGSTEAL_KHUGEPAGED,
|
||||
PGFAULT,
|
||||
PGMAJFAULT,
|
||||
PGREFILL,
|
||||
PGACTIVATE,
|
||||
PGDEACTIVATE,
|
||||
PGLAZYFREE,
|
||||
PGLAZYFREED,
|
||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
|
||||
ZSWPIN,
|
||||
ZSWPOUT,
|
||||
#endif
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
THP_FAULT_ALLOC,
|
||||
THP_COLLAPSE_ALLOC,
|
||||
#endif
|
||||
};
|
||||
|
||||
#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
|
||||
static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
|
||||
|
||||
static void init_memcg_events(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_MEMCG_EVENTS; ++i)
|
||||
mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
|
||||
}
|
||||
|
||||
static inline int memcg_events_index(enum vm_event_item idx)
|
||||
{
|
||||
return mem_cgroup_events_index[idx] - 1;
|
||||
}
|
||||
|
||||
struct memcg_vmstats_percpu {
|
||||
/* Local (CPU and cgroup) page state & events */
|
||||
long state[MEMCG_NR_STAT];
|
||||
unsigned long events[NR_MEMCG_EVENTS];
|
||||
|
||||
/* Delta calculation for lockless upward propagation */
|
||||
long state_prev[MEMCG_NR_STAT];
|
||||
unsigned long events_prev[NR_MEMCG_EVENTS];
|
||||
|
||||
/* Cgroup1: threshold notifications & softlimit tree updates */
|
||||
unsigned long nr_page_events;
|
||||
unsigned long targets[MEM_CGROUP_NTARGETS];
|
||||
};
|
||||
|
||||
struct memcg_vmstats {
|
||||
/* Aggregated (CPU and subtree) page state & events */
|
||||
long state[MEMCG_NR_STAT];
|
||||
unsigned long events[NR_MEMCG_EVENTS];
|
||||
|
||||
/* Non-hierarchical (CPU aggregated) page state & events */
|
||||
long state_local[MEMCG_NR_STAT];
|
||||
unsigned long events_local[NR_MEMCG_EVENTS];
|
||||
|
||||
/* Pending child counts during tree propagation */
|
||||
long state_pending[MEMCG_NR_STAT];
|
||||
unsigned long events_pending[NR_MEMCG_EVENTS];
|
||||
};
|
||||
|
||||
/*
|
||||
* memcg and lruvec stats flushing
|
||||
*
|
||||
|
@ -683,77 +754,6 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
|
|||
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
|
||||
}
|
||||
|
||||
/* Subset of vm_event_item to report for memcg event stats */
|
||||
static const unsigned int memcg_vm_event_stat[] = {
|
||||
PGPGIN,
|
||||
PGPGOUT,
|
||||
PGSCAN_KSWAPD,
|
||||
PGSCAN_DIRECT,
|
||||
PGSCAN_KHUGEPAGED,
|
||||
PGSTEAL_KSWAPD,
|
||||
PGSTEAL_DIRECT,
|
||||
PGSTEAL_KHUGEPAGED,
|
||||
PGFAULT,
|
||||
PGMAJFAULT,
|
||||
PGREFILL,
|
||||
PGACTIVATE,
|
||||
PGDEACTIVATE,
|
||||
PGLAZYFREE,
|
||||
PGLAZYFREED,
|
||||
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
|
||||
ZSWPIN,
|
||||
ZSWPOUT,
|
||||
#endif
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
THP_FAULT_ALLOC,
|
||||
THP_COLLAPSE_ALLOC,
|
||||
#endif
|
||||
};
|
||||
|
||||
#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
|
||||
static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
|
||||
|
||||
static void init_memcg_events(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_MEMCG_EVENTS; ++i)
|
||||
mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
|
||||
}
|
||||
|
||||
static inline int memcg_events_index(enum vm_event_item idx)
|
||||
{
|
||||
return mem_cgroup_events_index[idx] - 1;
|
||||
}
|
||||
|
||||
struct memcg_vmstats_percpu {
|
||||
/* Local (CPU and cgroup) page state & events */
|
||||
long state[MEMCG_NR_STAT];
|
||||
unsigned long events[NR_MEMCG_EVENTS];
|
||||
|
||||
/* Delta calculation for lockless upward propagation */
|
||||
long state_prev[MEMCG_NR_STAT];
|
||||
unsigned long events_prev[NR_MEMCG_EVENTS];
|
||||
|
||||
/* Cgroup1: threshold notifications & softlimit tree updates */
|
||||
unsigned long nr_page_events;
|
||||
unsigned long targets[MEM_CGROUP_NTARGETS];
|
||||
};
|
||||
|
||||
struct memcg_vmstats {
|
||||
/* Aggregated (CPU and subtree) page state & events */
|
||||
long state[MEMCG_NR_STAT];
|
||||
unsigned long events[NR_MEMCG_EVENTS];
|
||||
|
||||
/* Non-hierarchical (CPU aggregated) page state & events */
|
||||
long state_local[MEMCG_NR_STAT];
|
||||
unsigned long events_local[NR_MEMCG_EVENTS];
|
||||
|
||||
/* Pending child counts during tree propagation */
|
||||
long state_pending[MEMCG_NR_STAT];
|
||||
unsigned long events_pending[NR_MEMCG_EVENTS];
|
||||
};
|
||||
|
||||
unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
|
||||
{
|
||||
long x = READ_ONCE(memcg->vmstats->state[idx]);
|
||||
|
|
Loading…
Reference in New Issue
Block a user