lkml.org 
[lkml]   [2010]   [May]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH 3/4] mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to <linux/slub_def.h>
From
Date
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
---
include/linux/slub_def.h | 8 ++++++++
mm/slub.c | 8 --------
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 0249d41..55695c8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -116,6 +116,14 @@ struct kmem_cache {

#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)

+#ifndef ARCH_KMALLOC_MINALIGN
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#endif
+
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
/*
* Maximum kmalloc object size handled by SLUB. Larger object allocations
* are passed through to the page allocator. The page allocator "fastpath"
diff --git a/mm/slub.c b/mm/slub.c
index d2a54fe..c874c3e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -157,14 +157,6 @@
#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
SLAB_CACHE_DMA | SLAB_NOTRACK)

-#ifndef ARCH_KMALLOC_MINALIGN
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
-#endif
-
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-#endif
-
#define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1)
#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
--
1.6.6.1




\
 
 \ /
  Last update: 2010-05-19 13:05    [from the cache]
©2003-2011 Jasper Spaans