lkml.org 
[lkml]   [2009]   [Jun]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [Bug #13319] Page allocation failures with b43 and p54usb
From
Date
Hi Mel,

On Mon, 2009-06-08 at 15:12 +0100, Mel Gorman wrote:
> > diff --git a/mm/slub.c b/mm/slub.c
> > index 65ffda5..b5acf18 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -1565,6 +1565,8 @@ new_slab:
> > c->page = new;
> > goto load_freelist;
> > }
> > + printk(KERN_WARNING "SLUB: unable to satisfy allocation for cache %s (size=%d, node=%d, gfp=%x)\n",
> > + s->name, s->size, node, gfpflags);
>
> size could be almost anything here for a casual reader. You are
> outputting the size of the object plus its metadata so the name should
> reflect that. I think it would be better to output objsize= and the
> object size without the metadata overhead. What do you think?
>
> In addition, include how many objects there are per-slab and include what
> the order is being passed to the page allocator when allocating new slabs.
> Would that be enough to determine if fallback-to-smaller orders occured?

So how about something like this then?

Pekka

diff --git a/mm/slub.c b/mm/slub.c
index 65ffda5..a03dbe8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1484,6 +1484,58 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
return 1;
}

+static int count_free(struct page *page)
+{
+ return page->objects - page->inuse;
+}
+
+static unsigned long count_partial(struct kmem_cache_node *n,
+ int (*get_count)(struct page *))
+{
+ unsigned long flags;
+ unsigned long x = 0;
+ struct page *page;
+
+ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, lru)
+ x += get_count(page);
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ return x;
+}
+
+static noinline void
+slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
+{
+ int node;
+
+ printk(KERN_WARNING
+ "SLUB: Unable to allocate memory on node %d (gfp=%x)\n",
+ nid, gfpflags);
+ printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
+ "default order: %d, min order: %d\n", s->name, s->objsize,
+ s->size, oo_order(s->oo), oo_order(s->min));
+
+ for_each_online_node(node) {
+ struct kmem_cache_node *n = get_node(s, node);
+ unsigned long nr_partials;
+ unsigned long nr_slabs;
+ unsigned long nr_objs;
+ unsigned long nr_free;
+
+ if (!n)
+ continue;
+
+ nr_partials = n->nr_partial;
+ nr_slabs = atomic_long_read(&n->nr_slabs);
+ nr_objs = atomic_long_read(&n->total_objects);
+ nr_free = count_partial(n, count_free);
+
+ printk(KERN_WARNING
+ " node %d: partials: %ld, slabs: %ld, objs: %ld, free: %ld\n",
+ node, nr_partials, nr_slabs, nr_objs, nr_free);
+ }
+}
+
/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
@@ -1565,6 +1617,7 @@ new_slab:
c->page = new;
goto load_freelist;
}
+ slab_out_of_memory(s, gfpflags, node);
return NULL;
debug:
if (!alloc_debug_processing(s, c->page, object, addr))
@@ -3318,20 +3371,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}

#ifdef CONFIG_SLUB_DEBUG
-static unsigned long count_partial(struct kmem_cache_node *n,
- int (*get_count)(struct page *))
-{
- unsigned long flags;
- unsigned long x = 0;
- struct page *page;
-
- spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->partial, lru)
- x += get_count(page);
- spin_unlock_irqrestore(&n->list_lock, flags);
- return x;
-}
-
static int count_inuse(struct page *page)
{
return page->inuse;
@@ -3342,11 +3381,6 @@ static int count_total(struct page *page)
return page->objects;
}

-static int count_free(struct page *page)
-{
- return page->objects - page->inuse;
-}
-
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{



\
 
 \ /
  Last update: 2009-06-09 09:09    [W:0.095 / U:3.776 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site