lkml.org 
[lkml]   [2020]   [Jan]   [31]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 2/6] io_uring: place io_submit_state into ctx
    Date
    io_submit_state is used only during submmission and holding
    ctx->uring_lock, so only one instance is used at a time. Move it into
    struct io_ring_ctx, so it:
    - doesn't consume on-stack memory
    - persists across io_uring_enter
    - available without passing it through the call-stack

    The last point is very useful to make opcode handlers manage their
    resources themselfs, like splice would. Also, it's a base for other
    hackish optimisations in the future.

    Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
    ---
    fs/io_uring.c | 75 +++++++++++++++++++++++++++------------------------
    1 file changed, 40 insertions(+), 35 deletions(-)

    diff --git a/fs/io_uring.c b/fs/io_uring.c
    index 6f3998e6475a..6109969709ff 100644
    --- a/fs/io_uring.c
    +++ b/fs/io_uring.c
    @@ -197,6 +197,27 @@ struct fixed_file_data {
    struct completion done;
    };

    +#define IO_PLUG_THRESHOLD 2
    +#define IO_IOPOLL_BATCH 8
    +
    +struct io_submit_state {
    + /*
    + * io_kiocb alloc cache
    + */
    + void *reqs[IO_IOPOLL_BATCH];
    + unsigned int free_reqs;
    + unsigned int cur_req;
    +
    + /*
    + * File reference cache
    + */
    + struct file *file;
    + unsigned int fd;
    + unsigned int has_refs;
    + unsigned int used_refs;
    + unsigned int ios_left;
    +};
    +
    struct io_ring_ctx {
    struct {
    struct percpu_ref refs;
    @@ -310,6 +331,9 @@ struct io_ring_ctx {
    spinlock_t inflight_lock;
    struct list_head inflight_list;
    } ____cacheline_aligned_in_smp;
    +
    + /* protected by uring_lock */
    + struct io_submit_state submit_state;
    };

    /*
    @@ -575,27 +599,6 @@ struct io_kiocb {
    struct io_wq_work work;
    };

    -#define IO_PLUG_THRESHOLD 2
    -#define IO_IOPOLL_BATCH 8
    -
    -struct io_submit_state {
    - /*
    - * io_kiocb alloc cache
    - */
    - void *reqs[IO_IOPOLL_BATCH];
    - unsigned int free_reqs;
    - unsigned int cur_req;
    -
    - /*
    - * File reference cache
    - */
    - struct file *file;
    - unsigned int fd;
    - unsigned int has_refs;
    - unsigned int used_refs;
    - unsigned int ios_left;
    -};
    -
    struct io_op_def {
    /* needs req->io allocated for deferral/async */
    unsigned async_ctx : 1;
    @@ -1158,11 +1161,11 @@ static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
    return NULL;
    }

    -static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
    - struct io_submit_state *state)
    +static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx)
    {
    gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
    struct io_kiocb *req;
    + struct io_submit_state *state = &ctx->submit_state;

    if (!state->free_reqs) {
    size_t sz;
    @@ -4475,10 +4478,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
    return table->files[index & IORING_FILE_TABLE_MASK];;
    }

    -static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
    - const struct io_uring_sqe *sqe)
    +static int io_req_set_file(struct io_kiocb *req, const struct io_uring_sqe *sqe)
    {
    struct io_ring_ctx *ctx = req->ctx;
    + struct io_submit_state *state = &ctx->submit_state;
    unsigned flags;
    int fd;

    @@ -4717,7 +4720,7 @@ static inline void io_queue_link_head(struct io_kiocb *req)
    IOSQE_IO_HARDLINK | IOSQE_ASYNC)

    static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
    - struct io_submit_state *state, struct io_kiocb **link)
    + struct io_kiocb **link)
    {
    const struct cred *old_creds = NULL;
    struct io_ring_ctx *ctx = req->ctx;
    @@ -4748,7 +4751,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
    req->flags |= sqe_flags & (IOSQE_IO_DRAIN|IOSQE_IO_HARDLINK|
    IOSQE_ASYNC);

    - ret = io_req_set_file(state, req, sqe);
    + ret = io_req_set_file(req, sqe);
    if (unlikely(ret)) {
    err_req:
    io_cqring_add_event(req, ret);
    @@ -4823,8 +4826,10 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
    /*
    * Batched submission is done, ensure local IO is flushed out.
    */
    -static void io_submit_state_end(struct io_submit_state *state)
    +static void io_submit_end(struct io_ring_ctx *ctx)
    {
    + struct io_submit_state *state = &ctx->submit_state;
    +
    io_file_put(state);
    if (state->free_reqs)
    kmem_cache_free_bulk(req_cachep, state->free_reqs,
    @@ -4834,9 +4839,10 @@ static void io_submit_state_end(struct io_submit_state *state)
    /*
    * Start submission side cache.
    */
    -static void io_submit_state_start(struct io_submit_state *state,
    - unsigned int max_ios)
    +static void io_submit_start(struct io_ring_ctx *ctx, unsigned int max_ios)
    {
    + struct io_submit_state *state = &ctx->submit_state;
    +
    state->free_reqs = 0;
    state->file = NULL;
    state->ios_left = max_ios;
    @@ -4903,7 +4909,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
    struct mm_struct **mm, bool async)
    {
    struct blk_plug plug;
    - struct io_submit_state state;
    struct io_kiocb *link = NULL;
    int i, submitted = 0;
    bool mm_fault = false;
    @@ -4921,7 +4926,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
    if (!percpu_ref_tryget_many(&ctx->refs, nr))
    return -EAGAIN;

    - io_submit_state_start(&state, nr);
    + io_submit_start(ctx, nr);
    if (nr > IO_PLUG_THRESHOLD)
    blk_start_plug(&plug);

    @@ -4932,7 +4937,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
    const struct io_uring_sqe *sqe;
    struct io_kiocb *req;

    - req = io_get_req(ctx, &state);
    + req = io_get_req(ctx);
    if (unlikely(!req)) {
    if (!submitted)
    submitted = -EAGAIN;
    @@ -4965,7 +4970,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
    req->needs_fixed_file = async;
    trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
    true, async);
    - if (!io_submit_sqe(req, sqe, &state, &link))
    + if (!io_submit_sqe(req, sqe, &link))
    break;
    }

    @@ -4977,7 +4982,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
    if (link)
    io_queue_link_head(link);

    - io_submit_state_end(&state);
    + io_submit_end(ctx);
    if (nr > IO_PLUG_THRESHOLD)
    blk_finish_plug(&plug);

    --
    2.24.0
    \
     
     \ /
      Last update: 2020-01-31 23:17    [W:6.868 / U:0.516 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site