lkml.org 
[lkml]   [2023]   [Jun]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [syzbot] [kvm?] [net?] [virt?] general protection fault in vhost_work_queue
    On Wed, May 31, 2023 at 11:27:12AM -0500, Mike Christie wrote:
    >On 5/31/23 10:15 AM, Mike Christie wrote:
    >>>> rcu would work for your case and for what Jason had requested.
    >>> Yeah, so you already have some patches?
    >>>
    >>> Do you want to send it to solve this problem?
    >>>
    >> Yeah, I'll break them out and send them later today when I can retest
    >> rebased patches.
    >>
    >
    >Just one question. Do you core vhost developers consider RCU more complex
    >or switching to READ_ONCE/WRITE_ONCE? I am asking because for this immediate
    >regression fix we could just switch to the latter like below to just fix
    >the crash if we think that is more simple.
    >
    >I think RCU is just a little more complex/invasive because it will have the
    >extra synchronize_rcu calls.

    Yes, you may be right, in this case we should just need
    READ_ONCE/WRITE_ONCE if dev->worker is no longer a pointer.

    >
    >
    >diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
    >index a92af08e7864..03fd47a22a73 100644
    >--- a/drivers/vhost/vhost.c
    >+++ b/drivers/vhost/vhost.c
    >@@ -235,7 +235,7 @@ void vhost_dev_flush(struct vhost_dev *dev)
    > {
    > struct vhost_flush_struct flush;
    >
    >- if (dev->worker) {
    >+ if (READ_ONCE(dev->worker.vtsk)) {
    > init_completion(&flush.wait_event);
    > vhost_work_init(&flush.work, vhost_flush_work);
    >
    >@@ -247,7 +247,9 @@ EXPORT_SYMBOL_GPL(vhost_dev_flush);
    >
    > void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
    > {
    >- if (!dev->worker)
    >+ struct vhost_task *vtsk = READ_ONCE(dev->worker.vtsk);
    >+
    >+ if (!vtsk)
    > return;
    >
    > if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
    >@@ -255,8 +257,8 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
    > * sure it was not in the list.
    > * test_and_set_bit() implies a memory barrier.
    > */
    >- llist_add(&work->node, &dev->worker->work_list);
    >- wake_up_process(dev->worker->vtsk->task);
    >+ llist_add(&work->node, &dev->worker.work_list);
    >+ wake_up_process(vtsk->task);
    > }
    > }
    > EXPORT_SYMBOL_GPL(vhost_work_queue);
    >@@ -264,7 +266,7 @@ EXPORT_SYMBOL_GPL(vhost_work_queue);
    > /* A lockless hint for busy polling code to exit the loop */
    > bool vhost_has_work(struct vhost_dev *dev)
    > {
    >- return dev->worker && !llist_empty(&dev->worker->work_list);
    >+ return !llist_empty(&dev->worker.work_list);
    > }
    > EXPORT_SYMBOL_GPL(vhost_has_work);
    >
    >@@ -468,7 +470,7 @@ void vhost_dev_init(struct vhost_dev *dev,
    > dev->umem = NULL;
    > dev->iotlb = NULL;
    > dev->mm = NULL;
    >- dev->worker = NULL;
    >+ memset(&dev->worker, 0, sizeof(dev->worker));
    > dev->iov_limit = iov_limit;
    > dev->weight = weight;
    > dev->byte_weight = byte_weight;
    >@@ -542,46 +544,38 @@ static void vhost_detach_mm(struct vhost_dev *dev)
    >
    > static void vhost_worker_free(struct vhost_dev *dev)
    > {
    >- struct vhost_worker *worker = dev->worker;
    >+ struct vhost_task *vtsk = READ_ONCE(dev->worker.vtsk);
    >
    >- if (!worker)
    >+ if (!vtsk)
    > return;
    >
    >- dev->worker = NULL;
    >- WARN_ON(!llist_empty(&worker->work_list));
    >- vhost_task_stop(worker->vtsk);
    >- kfree(worker);
    >+ vhost_task_stop(vtsk);
    >+ WARN_ON(!llist_empty(&dev->worker.work_list));
    >+ WRITE_ONCE(dev->worker.vtsk, NULL);

    The patch LGTM, I just wonder if we should set dev->worker to zero here,
    but maybe we don't need to.

    Thanks,
    Stefano

    > }
    >
    > static int vhost_worker_create(struct vhost_dev *dev)
    > {
    >- struct vhost_worker *worker;
    > struct vhost_task *vtsk;
    > char name[TASK_COMM_LEN];
    > int ret;
    >
    >- worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
    >- if (!worker)
    >- return -ENOMEM;
    >-
    >- dev->worker = worker;
    >- worker->kcov_handle = kcov_common_handle();
    >- init_llist_head(&worker->work_list);
    >+ dev->worker.kcov_handle = kcov_common_handle();
    >+ init_llist_head(&dev->worker.work_list);
    > snprintf(name, sizeof(name), "vhost-%d", current->pid);
    >
    >- vtsk = vhost_task_create(vhost_worker, worker, name);
    >+ vtsk = vhost_task_create(vhost_worker, &dev->worker, name);
    > if (!vtsk) {
    > ret = -ENOMEM;
    > goto free_worker;
    > }
    >
    >- worker->vtsk = vtsk;
    >+ WRITE_ONCE(dev->worker.vtsk, vtsk);
    > vhost_task_start(vtsk);
    > return 0;
    >
    > free_worker:
    >- kfree(worker);
    >- dev->worker = NULL;
    >+ WRITE_ONCE(dev->worker.vtsk, NULL);
    > return ret;
    > }
    >
    >diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
    >index 0308638cdeee..305ec8593d46 100644
    >--- a/drivers/vhost/vhost.h
    >+++ b/drivers/vhost/vhost.h
    >@@ -154,7 +154,7 @@ struct vhost_dev {
    > struct vhost_virtqueue **vqs;
    > int nvqs;
    > struct eventfd_ctx *log_ctx;
    >- struct vhost_worker *worker;
    >+ struct vhost_worker worker;
    > struct vhost_iotlb *umem;
    > struct vhost_iotlb *iotlb;
    > spinlock_t iotlb_lock;
    >

    \
     
     \ /
      Last update: 2023-06-01 09:59    [W:3.748 / U:0.120 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site