lkml.org 
[lkml]   [2021]   [Feb]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 7/8] mm: memcontrol: consolidate lruvec stat flushing
Date
There are two functions to flush the per-cpu data of an lruvec into
the rest of the cgroup tree: when the cgroup is being freed, and when
a CPU disappears during hotplug. The difference is whether all CPUs or
just one is being collected, but the rest of the flushing code is the
same. Merge them into one function and share the common code.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
mm/memcontrol.c | 74 +++++++++++++++++++------------------------------
1 file changed, 28 insertions(+), 46 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5dc0bd53b64a..490357945f2c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2410,39 +2410,39 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
mutex_unlock(&percpu_charge_mutex);
}

-static int memcg_hotplug_cpu_dead(unsigned int cpu)
+static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg, int cpu)
{
- struct memcg_stock_pcp *stock;
- struct mem_cgroup *memcg;
-
- stock = &per_cpu(memcg_stock, cpu);
- drain_stock(stock);
+ int nid;

- for_each_mem_cgroup(memcg) {
+ for_each_node(nid) {
+ struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
+ unsigned long stat[NR_VM_NODE_STAT_ITEMS];
+ struct batched_lruvec_stat *lstatc;
int i;

+ lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
- int nid;
+ stat[i] = lstatc->count[i];
+ lstatc->count[i] = 0;
+ }

- for_each_node(nid) {
- struct batched_lruvec_stat *lstatc;
- struct mem_cgroup_per_node *pn;
- long x;
+ do {
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+ atomic_long_add(stat[i], &pn->lruvec_stat[i]);
+ } while ((pn = parent_nodeinfo(pn, nid)));
+ }
+}

- pn = memcg->nodeinfo[nid];
- lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
+static int memcg_hotplug_cpu_dead(unsigned int cpu)
+{
+ struct memcg_stock_pcp *stock;
+ struct mem_cgroup *memcg;

- x = lstatc->count[i];
- lstatc->count[i] = 0;
+ stock = &per_cpu(memcg_stock, cpu);
+ drain_stock(stock);

- if (x) {
- do {
- atomic_long_add(x, &pn->lruvec_stat[i]);
- } while ((pn = parent_nodeinfo(pn, nid)));
- }
- }
- }
- }
+ for_each_mem_cgroup(memcg)
+ memcg_flush_lruvec_page_state(memcg, cpu);

return 0;
}
@@ -3636,27 +3636,6 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
}
}

-static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg)
-{
- int node;
-
- for_each_node(node) {
- struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
- unsigned long stat[NR_VM_NODE_STAT_ITEMS] = { 0 };
- struct mem_cgroup_per_node *pi;
- int cpu, i;
-
- for_each_online_cpu(cpu)
- for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
- stat[i] += per_cpu(
- pn->lruvec_stat_cpu->count[i], cpu);
-
- for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
- for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
- atomic_long_add(stat[i], &pi->lruvec_stat[i]);
- }
-}
-
#ifdef CONFIG_MEMCG_KMEM
static int memcg_online_kmem(struct mem_cgroup *memcg)
{
@@ -5192,12 +5171,15 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)

static void mem_cgroup_free(struct mem_cgroup *memcg)
{
+ int cpu;
+
memcg_wb_domain_exit(memcg);
/*
* Flush percpu lruvec stats to guarantee the value
* correctness on parent's and all ancestor levels.
*/
- memcg_flush_lruvec_page_state(memcg);
+ for_each_online_cpu(cpu)
+ memcg_flush_lruvec_page_state(memcg, cpu);
__mem_cgroup_free(memcg);
}

--
2.30.0
\
 
 \ /
  Last update: 2021-02-05 19:35    [W:0.164 / U:0.084 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site