lkml.org 
[lkml]   [2004]   [Dec]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH] ARCH_SLAB_MINALIGN for 2.6.10-rc3
Hi Manfred,

On Sun, Dec 12, 2004 at 11:48:02AM +0100, Manfred Spraul wrote:
> Sorry for the late reply, attached is my proposal:
> I've added the ARCH_SLAB_MINALIGN flag, together with some documentation
> and a small restructuring.
> What do you think?
>
Looks fine to me, just tested on sh64 and it works ok.

> #ifndef CONFIG_DEBUG_SLAB
> #define ARCH_SLAB_MINALIGN 8
> #endif
>
Right now ARCH_KMALLOC_MINALIGN is set unconditionally on sh64, so it
seems that we lose some debug features there. However, even with
ARCH_SLAB_MINALIGN being set to non-zero, redzoning and slab poisoning
still seem to be functional to some extent.

For instance, I've been using the following patch and this did help pin
down a rather irritating bug in the sh64 switch_to().

Is there any reason not to wrap redzoning to ARCH_SLAB_MINALIGN (and set
it to BYTES_PER_WORD by default). It seems at least that the only thing
BYTES_PER_WORD is needed for in the redzoning case is to determine where
to place the second marker. I can see why this would be problematic with
dynamic slab alignment, but when it is fixed at compile time there
shouldn't be anything prohibiting the use of a non-BYTES_PER_WORD value.

We can live with the unaligned accesses in the CONFIG_DEBUG_SLAB case,
but it would still be nice to at least have some partial redzoning and
poisoning with a forced alignment.

--- linux-2.6.10-rc3/mm/slab.c 2004-12-05 19:05:39.000000000 +0200
+++ linux-sh64-2.6.10-rc3/mm/slab.c 2004-12-08 16:56:25.000000000 +0200
@@ -135,6 +135,10 @@
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN BYTES_PER_WORD
+#endif
+
/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
@@ -404,14 +408,16 @@

/* memory layout of objects:
* 0 : objp
- * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that
- * the end of an object is aligned with the end of the real
- * allocation. Catches writes behind the end of the allocation.
- * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1:
- * redzone word.
+ * 0 .. cachep->dbghead - ARCH_SLAB_MINALIGN - 1: padding. This ensures that
+ * the end of an object is aligned with the end of the real
+ * allocation. Catches writes behind the end of the allocation.
+ * cachep->dbghead - ARCH_SLAB_MINALIGN .. cachep->dbghead - 1:
+ * redzone word.
* cachep->dbghead: The real object.
- * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
- * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
+ * cachep->objsize - 2* ARCH_SLAB_MINALIGN:
+ * redzone word [ARCH_SLAB_MINALIGN long]
+ * cachep->objsize - 1* ARCH_SLAB_MINALIGN:
+ * last caller address [ARCH_SLAB_MINALIGN long]
*/
static int obj_dbghead(kmem_cache_t *cachep)
{
@@ -426,21 +432,21 @@
static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
- return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD);
+ return (unsigned long*) (objp+obj_dbghead(cachep)-ARCH_SLAB_MINALIGN);
}

static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
- return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD);
- return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD);
+ return (unsigned long*) (objp+cachep->objsize-2*ARCH_SLAB_MINALIGN);
+ return (unsigned long*) (objp+cachep->objsize-ARCH_SLAB_MINALIGN);
}

static void **dbg_userword(kmem_cache_t *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
- return (void**)(objp+cachep->objsize-BYTES_PER_WORD);
+ return (void**)(objp+cachep->objsize-ARCH_SLAB_MINALIGN);
}

#else
@@ -1204,7 +1210,7 @@
* above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation.
*/
- if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD)))
+ if ((size < 4096 || fls(size-1) == fls(size-1+3*ARCH_SLAB_MINALIGN)))
flags |= SLAB_RED_ZONE|SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON;
@@ -1237,7 +1243,7 @@
while (size <= align/2)
align /= 2;
} else {
- align = BYTES_PER_WORD;
+ align = ARCH_SLAB_MINALIGN;
}
}

@@ -1255,25 +1261,25 @@
size += (BYTES_PER_WORD-1);
size &= ~(BYTES_PER_WORD-1);
}
-
+
#if DEBUG
cachep->reallen = size;

if (flags & SLAB_RED_ZONE) {
/* redzoning only works with word aligned caches */
- align = BYTES_PER_WORD;
+ align = ARCH_SLAB_MINALIGN;

/* add space for red zone words */
- cachep->dbghead += BYTES_PER_WORD;
- size += 2*BYTES_PER_WORD;
+ cachep->dbghead += ARCH_SLAB_MINALIGN;
+ size += 2*ARCH_SLAB_MINALIGN;
}
if (flags & SLAB_STORE_USER) {
/* user store requires word alignment and
* one word storage behind the end of the real
* object.
*/
- align = BYTES_PER_WORD;
- size += BYTES_PER_WORD;
+ align = ARCH_SLAB_MINALIGN;
+ size += ARCH_SLAB_MINALIGN;
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size > 128 && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
@@ -2292,7 +2298,7 @@
{
unsigned long addr = (unsigned long) ptr;
unsigned long min_addr = PAGE_OFFSET;
- unsigned long align_mask = BYTES_PER_WORD-1;
+ unsigned long align_mask = ARCH_SLAB_MINALIGN-1;
unsigned long size = cachep->objsize;
struct page *page;
[unhandled content-type:application/pgp-signature]
\
 
 \ /
  Last update: 2005-03-22 14:08    [from the cache]
©2003-2011 Jasper Spaans