lkml.org 
[lkml]   [2020]   [Dec]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.10 483/717] io_uring: cancel only requests of current task
    Date
    From: Pavel Begunkov <asml.silence@gmail.com>

    [ Upstream commit df9923f96717d0aebb0a73adbcf6285fa79e38cb ]

    io_uring_cancel_files() cancels all request that match files regardless
    of task. There is no real need in that, cancel only requests of the
    specified task. That also handles SQPOLL case as it already changes task
    to it.

    Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
    Signed-off-by: Jens Axboe <axboe@kernel.dk>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    fs/io_uring.c | 23 +++++------------------
    1 file changed, 5 insertions(+), 18 deletions(-)

    diff --git a/fs/io_uring.c b/fs/io_uring.c
    index 86dac2b2e2763..0621f581943cd 100644
    --- a/fs/io_uring.c
    +++ b/fs/io_uring.c
    @@ -8421,14 +8421,6 @@ static int io_uring_release(struct inode *inode, struct file *file)
    return 0;
    }

    -static bool io_wq_files_match(struct io_wq_work *work, void *data)
    -{
    - struct files_struct *files = data;
    -
    - return !files || ((work->flags & IO_WQ_WORK_FILES) &&
    - work->identity->files == files);
    -}
    -
    /*
    * Returns true if 'preq' is the link parent of 'req'
    */
    @@ -8566,21 +8558,20 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
    * Returns true if we found and killed one or more files pinning requests
    */
    static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
    + struct task_struct *task,
    struct files_struct *files)
    {
    if (list_empty_careful(&ctx->inflight_list))
    return false;

    - /* cancel all at once, should be faster than doing it one by one*/
    - io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
    -
    while (!list_empty_careful(&ctx->inflight_list)) {
    struct io_kiocb *cancel_req = NULL, *req;
    DEFINE_WAIT(wait);

    spin_lock_irq(&ctx->inflight_lock);
    list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
    - if (files && (req->work.flags & IO_WQ_WORK_FILES) &&
    + if (req->task == task &&
    + (req->work.flags & IO_WQ_WORK_FILES) &&
    req->work.identity->files != files)
    continue;
    /* req is being completed, ignore */
    @@ -8623,7 +8614,7 @@ static bool __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
    {
    bool ret;

    - ret = io_uring_cancel_files(ctx, files);
    + ret = io_uring_cancel_files(ctx, task, files);
    if (!files) {
    enum io_wq_cancel cret;

    @@ -8662,11 +8653,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
    io_sq_thread_park(ctx->sq_data);
    }

    - if (files)
    - io_cancel_defer_files(ctx, NULL, files);
    - else
    - io_cancel_defer_files(ctx, task, NULL);
    -
    + io_cancel_defer_files(ctx, task, files);
    io_cqring_overflow_flush(ctx, true, task, files);

    while (__io_uring_cancel_task_requests(ctx, task, files)) {
    --
    2.27.0


    \
     
     \ /
      Last update: 2020-12-28 15:24    [W:2.833 / U:0.020 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site