lkml.org 
[lkml]   [2020]   [Oct]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.9 02/54] vsock/virtio: stop workers during the .remove()
    Date
    From: Stefano Garzarella <sgarzare@redhat.com>

    [ Upstream commit 17dd1367389cfe7f150790c83247b68e0c19d106 ]

    Before to call vdev->config->reset(vdev) we need to be sure that
    no one is accessing the device, for this reason, we add new variables
    in the struct virtio_vsock to stop the workers during the .remove().

    This patch also add few comments before vdev->config->reset(vdev)
    and vdev->config->del_vqs(vdev).

    Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
    Suggested-by: Michael S. Tsirkin <mst@redhat.com>
    Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
    Signed-off-by: David S. Miller <davem@davemloft.net>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    net/vmw_vsock/virtio_transport.c | 46 +++++++++++++++++++++++++++++++-
    1 file changed, 45 insertions(+), 1 deletion(-)

    diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
    index 32ad7cfa5fa74..67aba63b5c96d 100644
    --- a/net/vmw_vsock/virtio_transport.c
    +++ b/net/vmw_vsock/virtio_transport.c
    @@ -39,6 +39,7 @@ struct virtio_vsock {
    * must be accessed with tx_lock held.
    */
    struct mutex tx_lock;
    + bool tx_run;

    struct work_struct send_pkt_work;
    spinlock_t send_pkt_list_lock;
    @@ -50,6 +51,7 @@ struct virtio_vsock {
    * must be accessed with rx_lock held.
    */
    struct mutex rx_lock;
    + bool rx_run;
    int rx_buf_nr;
    int rx_buf_max_nr;

    @@ -57,6 +59,7 @@ struct virtio_vsock {
    * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
    */
    struct mutex event_lock;
    + bool event_run;
    struct virtio_vsock_event event_list[8];

    u32 guest_cid;
    @@ -91,6 +94,9 @@ virtio_transport_send_pkt_work(struct work_struct *work)

    mutex_lock(&vsock->tx_lock);

    + if (!vsock->tx_run)
    + goto out;
    +
    vq = vsock->vqs[VSOCK_VQ_TX];

    for (;;) {
    @@ -147,6 +153,7 @@ virtio_transport_send_pkt_work(struct work_struct *work)
    if (added)
    virtqueue_kick(vq);

    +out:
    mutex_unlock(&vsock->tx_lock);

    if (restart_rx)
    @@ -230,6 +237,10 @@ static void virtio_transport_tx_work(struct work_struct *work)

    vq = vsock->vqs[VSOCK_VQ_TX];
    mutex_lock(&vsock->tx_lock);
    +
    + if (!vsock->tx_run)
    + goto out;
    +
    do {
    struct virtio_vsock_pkt *pkt;
    unsigned int len;
    @@ -240,6 +251,8 @@ static void virtio_transport_tx_work(struct work_struct *work)
    added = true;
    }
    } while (!virtqueue_enable_cb(vq));
    +
    +out:
    mutex_unlock(&vsock->tx_lock);

    if (added)
    @@ -268,6 +281,9 @@ static void virtio_transport_rx_work(struct work_struct *work)

    mutex_lock(&vsock->rx_lock);

    + if (!vsock->rx_run)
    + goto out;
    +
    do {
    virtqueue_disable_cb(vq);
    for (;;) {
    @@ -376,6 +392,9 @@ static void virtio_transport_event_work(struct work_struct *work)

    mutex_lock(&vsock->event_lock);

    + if (!vsock->event_run)
    + goto out;
    +
    do {
    struct virtio_vsock_event *event;
    unsigned int len;
    @@ -390,7 +409,7 @@ static void virtio_transport_event_work(struct work_struct *work)
    } while (!virtqueue_enable_cb(vq));

    virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
    -
    +out:
    mutex_unlock(&vsock->event_lock);
    }

    @@ -521,12 +540,18 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
    INIT_WORK(&vsock->event_work, virtio_transport_event_work);
    INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);

    + mutex_lock(&vsock->tx_lock);
    + vsock->tx_run = true;
    + mutex_unlock(&vsock->tx_lock);
    +
    mutex_lock(&vsock->rx_lock);
    virtio_vsock_rx_fill(vsock);
    + vsock->rx_run = true;
    mutex_unlock(&vsock->rx_lock);

    mutex_lock(&vsock->event_lock);
    virtio_vsock_event_fill(vsock);
    + vsock->event_run = true;
    mutex_unlock(&vsock->event_lock);

    vdev->priv = vsock;
    @@ -560,6 +585,24 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
    /* Reset all connected sockets when the device disappear */
    vsock_for_each_connected_socket(virtio_vsock_reset_sock);

    + /* Stop all work handlers to make sure no one is accessing the device,
    + * so we can safely call vdev->config->reset().
    + */
    + mutex_lock(&vsock->rx_lock);
    + vsock->rx_run = false;
    + mutex_unlock(&vsock->rx_lock);
    +
    + mutex_lock(&vsock->tx_lock);
    + vsock->tx_run = false;
    + mutex_unlock(&vsock->tx_lock);
    +
    + mutex_lock(&vsock->event_lock);
    + vsock->event_run = false;
    + mutex_unlock(&vsock->event_lock);
    +
    + /* Flush all device writes and interrupts, device will not use any
    + * more buffers.
    + */
    vdev->config->reset(vdev);

    mutex_lock(&vsock->rx_lock);
    @@ -581,6 +624,7 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
    }
    spin_unlock_bh(&vsock->send_pkt_list_lock);

    + /* Delete virtqueues and flush outstanding callbacks if any */
    vdev->config->del_vqs(vdev);

    mutex_unlock(&the_virtio_vsock_mutex);
    --
    2.25.1


    \
     
     \ /
      Last update: 2020-10-12 15:35    [W:4.808 / U:0.132 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site