lkml.org 
[lkml]   [2010]   [Jul]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[S+Q2 13/19] slub: Extract hooks for memory checkers from hotpaths
Extract the code that memory checkers and other verification tools use from
the hotpaths. Makes it easier to add new ones and reduces the disturbances
of the hotpaths.

Signed-off-by: Christoph Lameter <cl@linux-foundation.org>

---
mm/slub.c | 49 ++++++++++++++++++++++++++++++++++++++-----------
1 file changed, 38 insertions(+), 11 deletions(-)
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2010-07-07 09:14:45.000000000 -0500
+++ linux-2.6/mm/slub.c 2010-07-07 10:38:17.000000000 -0500
@@ -793,6 +793,37 @@ static void trace(struct kmem_cache *s,
}

/*
+ * Hooks for other subsystems that check memory allocations. In a typical
+ * production configuration these hooks all should produce no code at all.
+ */
+static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
+{
+ lockdep_trace_alloc(flags);
+ might_sleep_if(flags & __GFP_WAIT);
+
+ return should_failslab(s->objsize, flags, s->flags);
+}
+
+static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
+{
+ kmemcheck_slab_alloc(s, flags, object, s->objsize);
+ kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
+}
+
+static inline void slab_free_hook(struct kmem_cache *s, void *x)
+{
+ kmemleak_free_recursive(x, s->flags);
+}
+
+static inline void slab_free_hook_irq(struct kmem_cache *s, void *object)
+{
+ kmemcheck_slab_free(s, object, s->objsize);
+ debug_check_no_locks_freed(object, s->objsize);
+ if (!(s->flags & SLAB_DEBUG_OBJECTS))
+ debug_check_no_obj_freed(object, s->objsize);
+}
+
+/*
* Tracking of fully allocated slabs for debugging purposes.
*/
static void add_full(struct kmem_cache_node *n, struct page *page)
@@ -1704,10 +1735,7 @@ static __always_inline void *slab_alloc(

gfpflags &= gfp_allowed_mask;

- lockdep_trace_alloc(gfpflags);
- might_sleep_if(gfpflags & __GFP_WAIT);
-
- if (should_failslab(s->objsize, gfpflags, s->flags))
+ if (!slab_pre_alloc_hook(s, gfpflags))
return NULL;

local_irq_save(flags);
@@ -1726,8 +1754,7 @@ static __always_inline void *slab_alloc(
if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, s->objsize);

- kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
- kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
+ slab_post_alloc_hook(s, gfpflags, object);

return object;
}
@@ -1856,13 +1883,13 @@ static __always_inline void slab_free(st
struct kmem_cache_cpu *c;
unsigned long flags;

- kmemleak_free_recursive(x, s->flags);
+ slab_free_hook(s, x);
+
local_irq_save(flags);
c = __this_cpu_ptr(s->cpu_slab);
- kmemcheck_slab_free(s, object, s->objsize);
- debug_check_no_locks_freed(object, s->objsize);
- if (!(s->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(object, s->objsize);
+
+ slab_free_hook_irq(s, x);
+
if (likely(page == c->page && c->node >= 0)) {
set_freepointer(s, object, c->freelist);
c->freelist = object;


\
 
 \ /
  Last update: 2010-07-09 21:15    [W:1.935 / U:1.128 seconds]
©2003-2014 Jasper Spaans. Advertise on this site