lkml.org 
[lkml]   [2021]   [Jul]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Date
SubjectRe: [PATCH v2] workqueue: fix UAF in pwq_unbound_release_workfn()
On Tue, Jul 13, 2021 at 1:12 AM Tejun Heo <tj@kernel.org> wrote:
>
> Hello, Yang.
>
> > +static void free_pwq(struct pool_workqueue *pwq)
> > +{
> > + if (!pwq || --pwq->refcnt)
> > + return;
> > +
> > + put_unbound_pool(pwq->pool);
> > + kmem_cache_free(pwq_cache, pwq);
> > +}
> > +
> > +static void free_wqattrs_ctx(struct apply_wqattrs_ctx *ctx)
> > +{
> > + int node;
> > +
> > + if (!ctx)
> > + return;
> > +
> > + for_each_node(node)
> > + free_pwq(ctx->pwq_tbl[node]);
> > + free_pwq(ctx->dfl_pwq);
> > +
> > + free_workqueue_attrs(ctx->attrs);
> > +
> > + kfree(ctx);
> > +}
>
> It bothers me that we're partially replicating the free path including pwq
> refcnting.

The replicating code can be reduced by merging
apply_wqattrs_cleanup() into apply_wqattrs_commit().

> Does something like the following work?

It works since it has a flush_scheduled_work() in
alloc_and_link_pwqs(). But I don't think it works for
workqueue_apply_unbound_cpumask() when apply_wqattrs_commit()
is not called.

If we want to reuse the current apply_wqattrs_cleanup(), I would prefer
something like this: (untested)

@@ -3680,15 +3676,21 @@ static void pwq_unbound_release_workfn(struct
work_struct *work)
unbound_release_work);
struct workqueue_struct *wq = pwq->wq;
struct worker_pool *pool = pwq->pool;
- bool is_last;
+ bool is_last = false;

- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
- return;
+ /*
+ * when @pwq is not linked, it doesn't hold any reference to the
+ * @wq, and @wq is invalid to access.
+ */
+ if (!list_empty(&pwq->pwqs_node)) {
+ if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
+ return;

- mutex_lock(&wq->mutex);
- list_del_rcu(&pwq->pwqs_node);
- is_last = list_empty(&wq->pwqs);
- mutex_unlock(&wq->mutex);
+ mutex_lock(&wq->mutex);
+ list_del_rcu(&pwq->pwqs_node);
+ is_last = list_empty(&wq->pwqs);
+ mutex_unlock(&wq->mutex);
+ }

mutex_lock(&wq_pool_mutex);
put_unbound_pool(pool);

>
> diff --git a/kernel/workqueue.c b/kernel/workqueue.c
> index 104e3ef04e33..0c0ab363edeb 100644
> --- a/kernel/workqueue.c
> +++ b/kernel/workqueue.c
> @@ -3693,7 +3693,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
> * If we're the last pwq going away, @wq is already dead and no one
> * is gonna access it anymore. Schedule RCU free.
> */
> - if (is_last) {
> + if (is_last && !list_empty(&wq->list)) {
> wq_unregister_lockdep(wq);
> call_rcu(&wq->rcu, rcu_free_wq);
> }
> @@ -4199,6 +4199,10 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
> }
> put_online_cpus();
>
> + if (ret) {
> + flush_scheduled_work();
> + }
> +
> return ret;
> }
>
> --
> tejun

\
 
 \ /
  Last update: 2021-07-13 07:57    [W:0.052 / U:0.336 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site