lkml.org 
[lkml]   [2009]   [Mar]   [31]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[GIT PULL] RCU updates for v2.6.30
    Linus,

    Please pull the latest rcu-for-linus git tree from:

    git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git rcu-for-linus

    A quiet RCU cycle. No known regressions.

    Thanks,

    Ingo

    ------------------>
    Lai Jiangshan (1):
    rcu: rcu_barrier VS cpu_hotplug: Ensure callbacks in dead cpu are migrated to online cpu


    kernel/rcupdate.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
    1 files changed, 44 insertions(+), 0 deletions(-)

    diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
    index cae8a05..2c7b845 100644
    --- a/kernel/rcupdate.c
    +++ b/kernel/rcupdate.c
    @@ -122,6 +122,8 @@ static void rcu_barrier_func(void *type)
    }
    }

    +static inline void wait_migrated_callbacks(void);
    +
    /*
    * Orchestrate the specified type of RCU barrier, waiting for all
    * RCU callbacks of the specified type to complete.
    @@ -147,6 +149,7 @@ static void _rcu_barrier(enum rcu_barrier type)
    complete(&rcu_barrier_completion);
    wait_for_completion(&rcu_barrier_completion);
    mutex_unlock(&rcu_barrier_mutex);
    + wait_migrated_callbacks();
    }

    /**
    @@ -176,9 +179,50 @@ void rcu_barrier_sched(void)
    }
    EXPORT_SYMBOL_GPL(rcu_barrier_sched);

    +static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
    +static struct rcu_head rcu_migrate_head[3];
    +static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
    +
    +static void rcu_migrate_callback(struct rcu_head *notused)
    +{
    + if (atomic_dec_and_test(&rcu_migrate_type_count))
    + wake_up(&rcu_migrate_wq);
    +}
    +
    +static inline void wait_migrated_callbacks(void)
    +{
    + wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
    +}
    +
    +static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
    + unsigned long action, void *hcpu)
    +{
    + if (action == CPU_DYING) {
    + /*
    + * preempt_disable() in on_each_cpu() prevents stop_machine(),
    + * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
    + * returns, all online cpus have queued rcu_barrier_func(),
    + * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
    + *
    + * These callbacks ensure _rcu_barrier() waits for all
    + * RCU callbacks of the specified type to complete.
    + */
    + atomic_set(&rcu_migrate_type_count, 3);
    + call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
    + call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
    + call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
    + } else if (action == CPU_POST_DEAD) {
    + /* rcu_migrate_head is protected by cpu_add_remove_lock */
    + wait_migrated_callbacks();
    + }
    +
    + return NOTIFY_OK;
    +}
    +
    void __init rcu_init(void)
    {
    __rcu_init();
    + hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
    }

    void rcu_scheduler_starting(void)

    \
     
     \ /
      Last update: 2009-03-31 14:45    [W:0.023 / U:61.816 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site