lkml.org 
[lkml]   [2008]   [Aug]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    SubjectRe: [PATCH] stack and rcu interaction bug in smp_call_function_mask()
    Date
    On Sunday 10 August 2008 16:24, Nick Piggin wrote:

    > I'd suggest something like the attached (untested) patch as the simple
    > fix for now.
    >
    > I expect the benefits from the less synchronized, multiple-in-flight-data
    > global queue will still outweigh the costs of dynamic allocations. But
    > if worst comes to worst then we just go back to a globally synchronous
    > one-at-a-time implementation, but that would be pretty sad!

    Just needed a little fix and it appears to boot now. I think it does
    the right thing...
    Index: linux-2.6/kernel/smp.c
    ===================================================================
    --- linux-2.6.orig/kernel/smp.c
    +++ linux-2.6/kernel/smp.c
    @@ -260,6 +260,41 @@ void __smp_call_function_single(int cpu,
    generic_exec_single(cpu, data);
    }

    +/* Dummy function */
    +static void quiesce_dummy(void *unused)
    +{
    +}
    +
    +/*
    + * Ensure stack based data used in call function mask is safe to free.
    + *
    + * This is needed by smp_call_function_mask when using on-stack data, because
    + * a single call function queue is shared by all CPUs, and any CPU may pick up
    + * the data item on the queue at any time before it is deleted. So we need to
    + * ensure that all CPUs have transitioned through a quiescent state after
    + * this call.
    + *
    + * This is a very slow function, implemented by sending synchronous IPIs to
    + * all possible CPUs. For this reason, we have to alloc data rather than use
    + * stack based data even in the case of synchronous calls. The stack based
    + * data is then just used for deadlock/oom fallback which will be very rare.
    + *
    + * If a faster scheme can be made, we could go back to preferring stack based
    + * data -- the data allocation/free is non-zero cost.
    + */
    +static void smp_call_function_mask_quiesce_stack(cpumask_t mask)
    +{
    + struct call_single_data data;
    + int cpu;
    +
    + data.func = quiesce_dummy;
    + data.info = NULL;
    + data.flags = CSD_FLAG_WAIT;
    +
    + for_each_cpu_mask(cpu, mask)
    + generic_exec_single(cpu, &data);
    +}
    +
    /**
    * smp_call_function_mask(): Run a function on a set of other CPUs.
    * @mask: The set of cpus to run on.
    @@ -285,6 +320,7 @@ int smp_call_function_mask(cpumask_t mas
    cpumask_t allbutself;
    unsigned long flags;
    int cpu, num_cpus;
    + int slowpath = 0;

    /* Can deadlock when called with interrupts disabled */
    WARN_ON(irqs_disabled());
    @@ -306,15 +342,16 @@ int smp_call_function_mask(cpumask_t mas
    return smp_call_function_single(cpu, func, info, wait);
    }

    - if (!wait) {
    - data = kmalloc(sizeof(*data), GFP_ATOMIC);
    - if (data)
    - data->csd.flags = CSD_FLAG_ALLOC;
    - }
    - if (!data) {
    + data = kmalloc(sizeof(*data), GFP_ATOMIC);
    + if (data) {
    + data->csd.flags = CSD_FLAG_ALLOC;
    + if (wait)
    + data->csd.flags |= CSD_FLAG_WAIT;
    + } else {
    data = &d;
    data->csd.flags = CSD_FLAG_WAIT;
    wait = 1;
    + slowpath = 1;
    }

    spin_lock_init(&data->lock);
    @@ -331,8 +368,11 @@ int smp_call_function_mask(cpumask_t mas
    arch_send_call_function_ipi(mask);

    /* optionally wait for the CPUs to complete */
    - if (wait)
    + if (wait) {
    csd_flag_wait(&data->csd);
    + if (unlikely(slowpath))
    + smp_call_function_mask_quiesce_stack(allbutself);
    + }

    return 0;
    }
    \
     
     \ /
      Last update: 2008-08-11 05:51    [W:0.024 / U:150.724 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site