lkml.org 
[lkml]   [2020]   [Mar]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 3/6] mm, memcg: Prevent memory.low load/store tearing
This can be set concurrently with reads, which may cause the wrong value
to be propagated.

Signed-off-by: Chris Down <chris@chrisdown.name>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Roman Gushchin <guro@fb.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: linux-mm@kvack.org
Cc: cgroups@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: kernel-team@fb.com
---
mm/memcontrol.c | 4 ++--
mm/page_counter.c | 9 ++++++---
2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index aca2964ea494..c85a304fa4a1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6262,7 +6262,7 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
return MEMCG_PROT_NONE;

emin = memcg->memory.min;
- elow = memcg->memory.low;
+ elow = READ_ONCE(memcg->memory.low);

parent = parent_mem_cgroup(memcg);
/* No parent means a non-hierarchical mode on v1 memcg */
@@ -6291,7 +6291,7 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
if (elow && parent_elow) {
unsigned long low_usage, siblings_low_usage;

- low_usage = min(usage, memcg->memory.low);
+ low_usage = min(usage, READ_ONCE(memcg->memory.low));
siblings_low_usage = atomic_long_read(
&parent->memory.children_low_usage);

diff --git a/mm/page_counter.c b/mm/page_counter.c
index 50184929b61f..18b7f779f2e2 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -17,6 +17,7 @@ static void propagate_protected_usage(struct page_counter *c,
unsigned long usage)
{
unsigned long protected, old_protected;
+ unsigned long low;
long delta;

if (!c->parent)
@@ -34,8 +35,10 @@ static void propagate_protected_usage(struct page_counter *c,
atomic_long_add(delta, &c->parent->children_min_usage);
}

- if (c->low || atomic_long_read(&c->low_usage)) {
- if (usage <= c->low)
+ low = READ_ONCE(c->low);
+
+ if (low || atomic_long_read(&c->low_usage)) {
+ if (usage <= low)
protected = usage;
else
protected = 0;
@@ -231,7 +234,7 @@ void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
{
struct page_counter *c;

- counter->low = nr_pages;
+ WRITE_ONCE(counter->low, nr_pages);

for (c = counter; c; c = c->parent)
propagate_protected_usage(c, atomic_long_read(&c->usage));
--
2.25.1
\
 
 \ /
  Last update: 2020-03-12 18:33    [W:0.092 / U:13.028 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site