lkml.org 
[lkml]   [2008]   [May]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch 33/41] cpu alloc: Remove slub fields
Remove the fields in kmem_cache_cpu that were used to cache data from
kmem_cache when they were in different cachelines. The cacheline that holds
the per cpu array pointer now also holds these values. We can cut down the
kmem_cache_cpu size to almost half.

The get_freepointer() and set_freepointer() functions that used to be only
intended for the slow path now are also useful for the hot path since access
to the field does not require accessing an additional cacheline anymore. This
results in consistent use of setting the freepointer for objects throughout
SLUB.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
---
include/linux/slub_def.h | 3 --
mm/slub.c | 48 +++++++++++++++--------------------------------
2 files changed, 16 insertions(+), 35 deletions(-)

Index: linux-2.6/include/linux/slub_def.h
===================================================================
--- linux-2.6.orig/include/linux/slub_def.h 2008-05-28 00:00:27.000000000 -0700
+++ linux-2.6/include/linux/slub_def.h 2008-05-28 00:00:33.000000000 -0700
@@ -36,8 +36,6 @@
void **freelist; /* Pointer to first free per cpu object */
struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */
- unsigned int offset; /* Freepointer offset (in word units) */
- unsigned int objsize; /* Size of an object (from kmem_cache) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c 2008-05-28 00:00:27.000000000 -0700
+++ linux-2.6/mm/slub.c 2008-05-28 00:00:33.000000000 -0700
@@ -276,13 +276,6 @@
return 1;
}

-/*
- * Slow version of get and set free pointer.
- *
- * This version requires touching the cache lines of kmem_cache which
- * we avoid to do in the fast alloc free paths. There we obtain the offset
- * from the page struct.
- */
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return *(void **)(object + s->offset);
@@ -1447,10 +1440,10 @@

/* Retrieve object from cpu_freelist */
object = c->freelist;
- c->freelist = c->freelist[c->offset];
+ c->freelist = get_freepointer(s, c->freelist);

/* And put onto the regular freelist */
- object[c->offset] = page->freelist;
+ set_freepointer(s, object, page->freelist);
page->freelist = object;
page->inuse--;
}
@@ -1555,7 +1548,7 @@
if (unlikely(SlabDebug(c->page)))
goto debug;

- c->freelist = object[c->offset];
+ c->freelist = get_freepointer(s, object);
c->page->inuse = c->page->objects;
c->page->freelist = NULL;
c->node = page_to_nid(c->page);
@@ -1599,7 +1592,7 @@
goto another_slab;

c->page->inuse++;
- c->page->freelist = object[c->offset];
+ c->page->freelist = get_freepointer(s, object);
c->node = -1;
goto unlock_out;
}
@@ -1629,7 +1622,7 @@
object = __slab_alloc(s, gfpflags, node, addr, c);

else {
- c->freelist = object[c->offset];
+ c->freelist = get_freepointer(s, object);
stat(c, ALLOC_FASTPATH);
}
local_irq_restore(flags);
@@ -1663,7 +1656,7 @@
* handling required then we can return immediately.
*/
static void __slab_free(struct kmem_cache *s, struct page *page,
- void *x, void *addr, unsigned int offset)
+ void *x, void *addr)
{
void *prior;
void **object = (void *)x;
@@ -1677,7 +1670,8 @@
goto debug;

checks_ok:
- prior = object[offset] = page->freelist;
+ prior = page->freelist;
+ set_freepointer(s, object, prior);
page->freelist = object;
page->inuse--;

@@ -1741,15 +1735,15 @@

local_irq_save(flags);
c = __THIS_CPU(s->cpu_slab);
- debug_check_no_locks_freed(object, c->objsize);
+ debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, s->objsize);
if (likely(page == c->page && c->node >= 0)) {
- object[c->offset] = c->freelist;
+ set_freepointer(s, object, c->freelist);
c->freelist = object;
stat(c, FREE_FASTPATH);
} else
- __slab_free(s, page, x, addr, c->offset);
+ __slab_free(s, page, x, addr);

local_irq_restore(flags);
}
@@ -1936,8 +1930,6 @@
c->page = NULL;
c->freelist = NULL;
c->node = 0;
- c->offset = s->offset / sizeof(void *);
- c->objsize = s->objsize;
#ifdef CONFIG_SLUB_STATS
memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
#endif
@@ -2993,8 +2985,6 @@
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
- int cpu;
-
s->refcount++;
/*
* Adjust the object sizes so that we clear
@@ -3002,13 +2992,6 @@
*/
s->objsize = max(s->objsize, (int)size);

- /*
- * And then we need to update the object size in the
- * per cpu structures
- */
- for_each_online_cpu(cpu)
- CPU_PTR(s->cpu_slab, cpu)->objsize = s->objsize;
-
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock);

--


\
 
 \ /
  Last update: 2008-05-30 06:11    [W:0.577 / U:0.412 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site