lkml.org 
[lkml]   [2011]   [Jan]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[34-longterm 228/260] x86, kexec: Make sure to stop all CPUs before exiting the kernel
    Date
    From: Alok Kataria <akataria@vmware.com>

    commit 76fac077db6b34e2c6383a7b4f3f4f7b7d06d8ce upstream.

    x86 smp_ops now has a new op, stop_other_cpus which takes a parameter
    "wait" this allows the caller to specify if it wants to stop until all
    the cpus have processed the stop IPI. This is required specifically
    for the kexec case where we should wait for all the cpus to be stopped
    before starting the new kernel. We now wait for the cpus to stop in
    all cases except for panic/kdump where we expect things to be broken
    and we are doing our best to make things work anyway.

    This patch fixes a legitimate regression, which was introduced during
    2.6.30, by commit id 4ef702c10b5df18ab04921fc252c26421d4d6c75.

    Signed-off-by: Alok N Kataria <akataria@vmware.com>
    LKML-Reference: <1286833028.1372.20.camel@ank32.eng.vmware.com>
    Cc: Eric W. Biederman <ebiederm@xmission.com>
    Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
    Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
    Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
    ---
    arch/x86/include/asm/smp.h | 9 +++++++--
    arch/x86/kernel/reboot.c | 2 +-
    arch/x86/kernel/smp.c | 15 +++++++++------
    arch/x86/xen/enlighten.c | 2 +-
    arch/x86/xen/smp.c | 6 +++---
    5 files changed, 21 insertions(+), 13 deletions(-)

    diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
    index 4cfc908..4c2f63c 100644
    --- a/arch/x86/include/asm/smp.h
    +++ b/arch/x86/include/asm/smp.h
    @@ -50,7 +50,7 @@ struct smp_ops {
    void (*smp_prepare_cpus)(unsigned max_cpus);
    void (*smp_cpus_done)(unsigned max_cpus);

    - void (*smp_send_stop)(void);
    + void (*stop_other_cpus)(int wait);
    void (*smp_send_reschedule)(int cpu);

    int (*cpu_up)(unsigned cpu);
    @@ -73,7 +73,12 @@ extern struct smp_ops smp_ops;

    static inline void smp_send_stop(void)
    {
    - smp_ops.smp_send_stop();
    + smp_ops.stop_other_cpus(0);
    +}
    +
    +static inline void stop_other_cpus(void)
    +{
    + smp_ops.stop_other_cpus(1);
    }

    static inline void smp_prepare_boot_cpu(void)
    diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
    index 8e1aac8..ff8cc40 100644
    --- a/arch/x86/kernel/reboot.c
    +++ b/arch/x86/kernel/reboot.c
    @@ -633,7 +633,7 @@ void native_machine_shutdown(void)
    /* O.K Now that I'm on the appropriate processor,
    * stop all of the others.
    */
    - smp_send_stop();
    + stop_other_cpus();
    #endif

    lapic_shutdown();
    diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
    index d801210..513deac 100644
    --- a/arch/x86/kernel/smp.c
    +++ b/arch/x86/kernel/smp.c
    @@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void)
    irq_exit();
    }

    -static void native_smp_send_stop(void)
    +static void native_stop_other_cpus(int wait)
    {
    unsigned long flags;
    - unsigned long wait;
    + unsigned long timeout;

    if (reboot_force)
    return;
    @@ -179,9 +179,12 @@ static void native_smp_send_stop(void)
    if (num_online_cpus() > 1) {
    apic->send_IPI_allbutself(REBOOT_VECTOR);

    - /* Don't wait longer than a second */
    - wait = USEC_PER_SEC;
    - while (num_online_cpus() > 1 && wait--)
    + /*
    + * Don't wait longer than a second if the caller
    + * didn't ask us to wait.
    + */
    + timeout = USEC_PER_SEC;
    + while (num_online_cpus() > 1 && (wait || timeout--))
    udelay(1);
    }

    @@ -227,7 +230,7 @@ struct smp_ops smp_ops = {
    .smp_prepare_cpus = native_smp_prepare_cpus,
    .smp_cpus_done = native_smp_cpus_done,

    - .smp_send_stop = native_smp_send_stop,
    + .stop_other_cpus = native_stop_other_cpus,
    .smp_send_reschedule = native_smp_send_reschedule,

    .cpu_up = native_cpu_up,
    diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
    index 27dff06..4d32089 100644
    --- a/arch/x86/xen/enlighten.c
    +++ b/arch/x86/xen/enlighten.c
    @@ -1001,7 +1001,7 @@ static void xen_reboot(int reason)
    struct sched_shutdown r = { .reason = reason };

    #ifdef CONFIG_SMP
    - smp_send_stop();
    + stop_other_cpus();
    #endif

    if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
    diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
    index a29693f..d2dfbf5 100644
    --- a/arch/x86/xen/smp.c
    +++ b/arch/x86/xen/smp.c
    @@ -398,9 +398,9 @@ static void stop_self(void *v)
    BUG();
    }

    -static void xen_smp_send_stop(void)
    +static void xen_stop_other_cpus(int wait)
    {
    - smp_call_function(stop_self, NULL, 0);
    + smp_call_function(stop_self, NULL, wait);
    }

    static void xen_smp_send_reschedule(int cpu)
    @@ -468,7 +468,7 @@ static const struct smp_ops xen_smp_ops __initdata = {
    .cpu_disable = xen_cpu_disable,
    .play_dead = xen_play_dead,

    - .smp_send_stop = xen_smp_send_stop,
    + .stop_other_cpus = xen_stop_other_cpus,
    .smp_send_reschedule = xen_smp_send_reschedule,

    .send_call_func_ipi = xen_smp_send_call_function_ipi,
    --
    1.7.3.3


    \
     
     \ /
      Last update: 2011-01-02 08:37    [W:4.049 / U:0.144 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site