lkml.org 
[lkml]   [2009]   [Feb]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH 2/3] workqueue: not allow recursion run_workqueue
    On 02/06, Lai Jiangshan wrote:
    >
    > 1) lockdep will complain when recursion run_workqueue()
    > 2) The recursive implement of run_workqueue() makes flush_workqueue()
    > and it's doc are inconsistent. It may hide deadlock and other bugs.
    > 3) recursion run_workqueue() will poison cwq->current_work,
    > but flush_work() and __cancel_work_timer() ...etc. need
    > reliable cwq->current_work.

    I think this change is good. If we still have users which call flush
    from work->func() they should be fixed, imho.

    And while I knew this recursive flush is bad, I didn't realize how
    bad it is until Lai spelled this. Thanks.

    Acked-by: Oleg Nesterov <oleg@redhat.com>

    > Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
    > ---
    > diff --git a/kernel/workqueue.c b/kernel/workqueue.c
    > index 2f44583..1129cde 100644
    > --- a/kernel/workqueue.c
    > +++ b/kernel/workqueue.c
    > @@ -48,8 +48,6 @@ struct cpu_workqueue_struct {
    >
    > struct workqueue_struct *wq;
    > struct task_struct *thread;
    > -
    > - int run_depth; /* Detect run_workqueue() recursion depth */
    > } ____cacheline_aligned;
    >
    > /*
    > @@ -262,13 +260,6 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on);
    > static void run_workqueue(struct cpu_workqueue_struct *cwq)
    > {
    > spin_lock_irq(&cwq->lock);
    > - cwq->run_depth++;
    > - if (cwq->run_depth > 3) {
    > - /* morton gets to eat his hat */
    > - printk("%s: recursion depth exceeded: %d\n",
    > - __func__, cwq->run_depth);
    > - dump_stack();
    > - }
    > while (!list_empty(&cwq->worklist)) {
    > struct work_struct *work = list_entry(cwq->worklist.next,
    > struct work_struct, entry);
    > @@ -311,7 +302,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
    > spin_lock_irq(&cwq->lock);
    > cwq->current_work = NULL;
    > }
    > - cwq->run_depth--;
    > spin_unlock_irq(&cwq->lock);
    > }
    >
    > @@ -368,29 +358,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
    >
    > static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
    > {
    > - int active;
    > + int active = 0;
    > + struct wq_barrier barr;
    >
    > - if (cwq->thread == current) {
    > - /*
    > - * Probably keventd trying to flush its own queue. So simply run
    > - * it by hand rather than deadlocking.
    > - */
    > - run_workqueue(cwq);
    > - active = 1;
    > - } else {
    > - struct wq_barrier barr;
    > + WARN_ON(cwq->thread == current);
    >
    > - active = 0;
    > - spin_lock_irq(&cwq->lock);
    > - if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
    > - insert_wq_barrier(cwq, &barr, &cwq->worklist);
    > - active = 1;
    > - }
    > - spin_unlock_irq(&cwq->lock);
    > -
    > - if (active)
    > - wait_for_completion(&barr.done);
    > + spin_lock_irq(&cwq->lock);
    > + if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
    > + insert_wq_barrier(cwq, &barr, &cwq->worklist);
    > + active = 1;
    > }
    > + spin_unlock_irq(&cwq->lock);
    > +
    > + if (active)
    > + wait_for_completion(&barr.done);
    >
    > return active;
    > }
    >



    \
     
     \ /
      Last update: 2009-02-09 20:21    [W:5.177 / U:0.220 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site