lkml.org 
[lkml]   [2009]   [Mar]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch 2/2] slub: enforce cpuset restrictions for cpu slabs
Slab allocations should respect cpuset hardwall restrictions.  Otherwise,
it is possible for tasks in a cpuset to fill slabs allocated on mems
assigned to a disjoint cpuset.

When an allocation is attempted for a cpu slab that resides on a node that
is not allowed by a task's cpuset, an appropriate partial slab or new slab
is allocated.

If an allocation is intended for a particular node that the task does not
have access to because of its cpuset, an allowed partial slab is used
instead of failing.

Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: David Rientjes <rientjes@google.com>
---
mm/slub.c | 10 ++++++----
1 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1353,6 +1353,8 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
struct page *page;
int searchnode = (node == -1) ? numa_node_id() : node;

+ if (!cpuset_node_allowed_hardwall(searchnode, flags))
+ searchnode = cpuset_mem_spread_node();
page = get_partial_node(get_node(s, searchnode));
if (page || (flags & __GFP_THISNODE))
return page;
@@ -1477,13 +1479,13 @@ static void flush_all(struct kmem_cache *s)
* Check if the objects in a per cpu structure fit numa
* locality expectations.
*/
-static inline int node_match(struct kmem_cache_cpu *c, int node)
+static inline int node_match(struct kmem_cache_cpu *c, int node, gfp_t gfpflags)
{
#ifdef CONFIG_NUMA
if (node != -1 && c->node != node)
return 0;
#endif
- return 1;
+ return cpuset_node_allowed_hardwall(c->node, gfpflags);
}

/*
@@ -1517,7 +1519,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto new_slab;

slab_lock(c->page);
- if (unlikely(!node_match(c, node)))
+ if (unlikely(!node_match(c, node, gfpflags)))
goto another_slab;

stat(c, ALLOC_REFILL);
@@ -1604,7 +1606,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
objsize = c->objsize;
- if (unlikely(!c->freelist || !node_match(c, node)))
+ if (unlikely(!c->freelist || !node_match(c, node, gfpflags)))

object = __slab_alloc(s, gfpflags, node, addr, c);


\
 
 \ /
  Last update: 2009-03-03 05:53    [from the cache]
©2003-2011 Jasper Spaans