lkml.org 
[lkml]   [2021]   [Jan]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.10 070/199] io_uring: flush timeouts that should already have expired
    Date
    From: Marcelo Diop-Gonzalez <marcelo827@gmail.com>

    [ Upstream commit f010505b78a4fa8d5b6480752566e7313fb5ca6e ]

    Right now io_flush_timeouts() checks if the current number of events
    is equal to ->timeout.target_seq, but this will miss some timeouts if
    there have been more than 1 event added since the last time they were
    flushed (possible in io_submit_flush_completions(), for example). Fix
    it by recording the last sequence at which timeouts were flushed so
    that the number of events seen can be compared to the number of events
    needed without overflow.

    Signed-off-by: Marcelo Diop-Gonzalez <marcelo827@gmail.com>
    Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
    Signed-off-by: Jens Axboe <axboe@kernel.dk>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    fs/io_uring.c | 34 ++++++++++++++++++++++++++++++----
    1 file changed, 30 insertions(+), 4 deletions(-)

    diff --git a/fs/io_uring.c b/fs/io_uring.c
    index 265aea2cd7bc8..2348104857000 100644
    --- a/fs/io_uring.c
    +++ b/fs/io_uring.c
    @@ -353,6 +353,7 @@ struct io_ring_ctx {
    unsigned cq_entries;
    unsigned cq_mask;
    atomic_t cq_timeouts;
    + unsigned cq_last_tm_flush;
    unsigned long cq_check_overflow;
    struct wait_queue_head cq_wait;
    struct fasync_struct *cq_fasync;
    @@ -1521,19 +1522,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)

    static void io_flush_timeouts(struct io_ring_ctx *ctx)
    {
    - while (!list_empty(&ctx->timeout_list)) {
    + u32 seq;
    +
    + if (list_empty(&ctx->timeout_list))
    + return;
    +
    + seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
    +
    + do {
    + u32 events_needed, events_got;
    struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
    struct io_kiocb, timeout.list);

    if (io_is_timeout_noseq(req))
    break;
    - if (req->timeout.target_seq != ctx->cached_cq_tail
    - - atomic_read(&ctx->cq_timeouts))
    +
    + /*
    + * Since seq can easily wrap around over time, subtract
    + * the last seq at which timeouts were flushed before comparing.
    + * Assuming not more than 2^31-1 events have happened since,
    + * these subtractions won't have wrapped, so we can check if
    + * target is in [last_seq, current_seq] by comparing the two.
    + */
    + events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
    + events_got = seq - ctx->cq_last_tm_flush;
    + if (events_got < events_needed)
    break;

    list_del_init(&req->timeout.list);
    io_kill_timeout(req);
    - }
    + } while (!list_empty(&ctx->timeout_list));
    +
    + ctx->cq_last_tm_flush = seq;
    }

    static void io_commit_cqring(struct io_ring_ctx *ctx)
    @@ -5582,6 +5602,12 @@ static int io_timeout(struct io_kiocb *req)
    tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
    req->timeout.target_seq = tail + off;

    + /* Update the last seq here in case io_flush_timeouts() hasn't.
    + * This is safe because ->completion_lock is held, and submissions
    + * and completions are never mixed in the same ->completion_lock section.
    + */
    + ctx->cq_last_tm_flush = tail;
    +
    /*
    * Insertion sort, ensuring the first entry in the list is always
    * the one we need first.
    --
    2.27.0


    \
     
     \ /
      Last update: 2021-01-26 10:37    [W:4.061 / U:1.136 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site