lkml.org 
[lkml]   [2010]   [Feb]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH] [3/4] SLAB: Separate node initialization into separate function
    On Wed, 3 Feb 2010, Andi Kleen wrote:

    >
    > No functional changes.
    >
    > Needed for next patch.
    >
    > Signed-off-by: Andi Kleen <ak@linux.intel.com>
    >
    > ---
    > mm/slab.c | 34 +++++++++++++++++++++-------------
    > 1 file changed, 21 insertions(+), 13 deletions(-)
    >
    > Index: linux-2.6.33-rc3-ak/mm/slab.c
    > ===================================================================
    > --- linux-2.6.33-rc3-ak.orig/mm/slab.c
    > +++ linux-2.6.33-rc3-ak/mm/slab.c
    > @@ -1171,19 +1171,9 @@ free_array_cache:
    > }
    > }
    >
    > -static int __cpuinit cpuup_prepare(long cpu)
    > +static int slab_node_prepare(int node)
    > {
    > struct kmem_cache *cachep;
    > - struct kmem_list3 *l3 = NULL;
    > - int node = cpu_to_node(cpu);
    > - const int memsize = sizeof(struct kmem_list3);
    > -
    > - /*
    > - * We need to do this right in the beginning since
    > - * alloc_arraycache's are going to use this list.
    > - * kmalloc_node allows us to add the slab to the right
    > - * kmem_list3 and not this cpu's kmem_list3
    > - */
    >
    > list_for_each_entry(cachep, &cache_chain, next) {
    > /*

    As Christoph mentioned, this patch is out of order with the previous one
    in the series; slab_node_prepare() is called in that previous patch by a
    memory hotplug callback without holding cache_chain_mutex (it's taken by
    the cpu hotplug callback prior to calling cpuup_prepare() currently). So
    slab_node_prepare() should note that we require the mutex and the memory
    hotplug callback should take it in the previous patch.

    > @@ -1192,9 +1182,10 @@ static int __cpuinit cpuup_prepare(long
    > * node has not already allocated this
    > */
    > if (!cachep->nodelists[node]) {
    > - l3 = kmalloc_node(memsize, GFP_KERNEL, node);
    > + struct kmem_list3 *l3;
    > + l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
    > if (!l3)
    > - goto bad;
    > + return -1;
    > kmem_list3_init(l3);
    > l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
    > ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
    > @@ -1213,6 +1204,23 @@ static int __cpuinit cpuup_prepare(long
    > cachep->batchcount + cachep->num;
    > spin_unlock_irq(&cachep->nodelists[node]->list_lock);
    > }
    > + return 0;
    > +}
    > +
    > +static int __cpuinit cpuup_prepare(long cpu)
    > +{
    > + struct kmem_cache *cachep;
    > + struct kmem_list3 *l3 = NULL;
    > + int node = cpu_to_node(cpu);
    > +
    > + /*
    > + * We need to do this right in the beginning since
    > + * alloc_arraycache's are going to use this list.
    > + * kmalloc_node allows us to add the slab to the right
    > + * kmem_list3 and not this cpu's kmem_list3
    > + */
    > + if (slab_node_prepare(node) < 0)
    > + goto bad;
    >
    > /*
    > * Now we can go ahead with allocating the shared arrays and


    \
     
     \ /
      Last update: 2010-02-05 22:31    [W:0.028 / U:29.336 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site