lkml.org 
[lkml]   [2007]   [Oct]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[Patch 001/002] extract kmem_cache_shrink
Make kmem_cache_shrink_node() for callback routine of memory hotplug
notifier. This is just extract a part of kmem_cache_shrink().

Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>

---
mm/slub.c | 111 ++++++++++++++++++++++++++++++++++----------------------------
1 file changed, 61 insertions(+), 50 deletions(-)

Index: current/mm/slub.c
===================================================================
--- current.orig/mm/slub.c 2007-10-11 20:30:45.000000000 +0900
+++ current/mm/slub.c 2007-10-11 21:58:47.000000000 +0900
@@ -2626,6 +2626,56 @@ void kfree(const void *x)
}
EXPORT_SYMBOL(kfree);

+static inline void __kmem_cache_shrink_node(struct kmem_cache *s, int node,
+ struct list_head *slabs_by_inuse)
+{
+ struct kmem_cache_node *n;
+ int i;
+ struct page *page;
+ struct page *t;
+ unsigned long flags;
+
+ n = get_node(s, node);
+
+ if (!n->nr_partial)
+ return;
+
+ for (i = 0; i < s->objects; i++)
+ INIT_LIST_HEAD(slabs_by_inuse + i);
+
+ spin_lock_irqsave(&n->list_lock, flags);
+
+ /*
+ * Build lists indexed by the items in use in each slab.
+ *
+ * Note that concurrent frees may occur while we hold the
+ * list_lock. page->inuse here is the upper limit.
+ */
+ list_for_each_entry_safe(page, t, &n->partial, lru) {
+ if (!page->inuse && slab_trylock(page)) {
+ /*
+ * Must hold slab lock here because slab_free
+ * may have freed the last object and be
+ * waiting to release the slab.
+ */
+ list_del(&page->lru);
+ n->nr_partial--;
+ slab_unlock(page);
+ discard_slab(s, page);
+ } else
+ list_move(&page->lru, slabs_by_inuse + page->inuse);
+ }
+
+ /*
+ * Rebuild the partial list with the slabs filled up most
+ * first and the least used slabs at the end.
+ */
+ for (i = s->objects - 1; i >= 0; i--)
+ list_splice(slabs_by_inuse + i, n->partial.prev);
+
+ spin_unlock_irqrestore(&n->list_lock, flags);
+}
+
/*
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
* the remaining slabs by the number of items in use. The slabs with the
@@ -2636,68 +2686,29 @@ EXPORT_SYMBOL(kfree);
* being allocated from last increasing the chance that the last objects
* are freed in them.
*/
-int kmem_cache_shrink(struct kmem_cache *s)
+int kmem_cache_shrink_node(struct kmem_cache *s, int node)
{
- int node;
- int i;
- struct kmem_cache_node *n;
- struct page *page;
- struct page *t;
struct list_head *slabs_by_inuse =
kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
- unsigned long flags;

if (!slabs_by_inuse)
return -ENOMEM;

flush_all(s);
- for_each_node_state(node, N_NORMAL_MEMORY) {
- n = get_node(s, node);
-
- if (!n->nr_partial)
- continue;
-
- for (i = 0; i < s->objects; i++)
- INIT_LIST_HEAD(slabs_by_inuse + i);
-
- spin_lock_irqsave(&n->list_lock, flags);
-
- /*
- * Build lists indexed by the items in use in each slab.
- *
- * Note that concurrent frees may occur while we hold the
- * list_lock. page->inuse here is the upper limit.
- */
- list_for_each_entry_safe(page, t, &n->partial, lru) {
- if (!page->inuse && slab_trylock(page)) {
- /*
- * Must hold slab lock here because slab_free
- * may have freed the last object and be
- * waiting to release the slab.
- */
- list_del(&page->lru);
- n->nr_partial--;
- slab_unlock(page);
- discard_slab(s, page);
- } else {
- list_move(&page->lru,
- slabs_by_inuse + page->inuse);
- }
- }
-
- /*
- * Rebuild the partial list with the slabs filled up most
- * first and the least used slabs at the end.
- */
- for (i = s->objects - 1; i >= 0; i--)
- list_splice(slabs_by_inuse + i, n->partial.prev);
-
- spin_unlock_irqrestore(&n->list_lock, flags);
- }
+ if (node >= 0)
+ __kmem_cache_shrink_node(s, node, slabs_by_inuse);
+ else
+ for_each_node_state(node, N_NORMAL_MEMORY)
+ __kmem_cache_shrink_node(s, node, slabs_by_inuse);

kfree(slabs_by_inuse);
return 0;
}
+
+int kmem_cache_shrink(struct kmem_cache *s)
+{
+ return kmem_cache_shrink_node(s, -1);
+}
EXPORT_SYMBOL(kmem_cache_shrink);

/********************************************************************
--
Yasunori Goto


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2007-10-12 04:31    [W:0.082 / U:0.876 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site