lkml.org 
[lkml]   [2021]   [Mar]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.11 341/775] RDMA/rtrs-clt: Set mininum limit when create QP
    Date
    From: Jack Wang <jinpu.wang@cloud.ionos.com>

    [ Upstream commit f47e4e3e71724f625958b0059f6c8ac5d44d27ef ]

    Currently rtrs when create_qp use a coarse numbers (bigger in general),
    which leads to hardware create more resources which only waste memory
    with no benefits.

    - SERVICE con,
    For max_send_wr/max_recv_wr, it's 2 times SERVICE_CON_QUEUE_DEPTH + 2

    - IO con
    For max_send_wr/max_recv_wr, it's sess->queue_depth * 3 + 1

    Fixes: 6a98d71daea1 ("RDMA/rtrs: client: main functionality")
    Link: https://lore.kernel.org/r/20201217141915.56989-6-jinpu.wang@cloud.ionos.com
    Signed-off-by: Jack Wang <jinpu.wang@cloud.ionos.com>
    Reviewed-by: Md Haris Iqbal <haris.iqbal@cloud.ionos.com>
    Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    drivers/infiniband/ulp/rtrs/rtrs-clt.c | 19 ++++++++++++-------
    1 file changed, 12 insertions(+), 7 deletions(-)

    diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
    index 719254fc83a1c..b3fb5fb93815f 100644
    --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
    +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
    @@ -1511,7 +1511,7 @@ static void destroy_con(struct rtrs_clt_con *con)
    static int create_con_cq_qp(struct rtrs_clt_con *con)
    {
    struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
    - u32 wr_queue_size;
    + u32 max_send_wr, max_recv_wr, cq_size;
    int err, cq_vector;
    struct rtrs_msg_rkey_rsp *rsp;

    @@ -1523,7 +1523,8 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
    * + 2 for drain and heartbeat
    * in case qp gets into error state
    */
    - wr_queue_size = SERVICE_CON_QUEUE_DEPTH * 3 + 2;
    + max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
    + max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
    /* We must be the first here */
    if (WARN_ON(sess->s.dev))
    return -EINVAL;
    @@ -1555,25 +1556,29 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)

    /* Shared between connections */
    sess->s.dev_ref++;
    - wr_queue_size =
    + max_send_wr =
    min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
    /* QD * (REQ + RSP + FR REGS or INVS) + drain */
    sess->queue_depth * 3 + 1);
    + max_recv_wr =
    + min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
    + sess->queue_depth * 3 + 1);
    }
    /* alloc iu to recv new rkey reply when server reports flags set */
    if (sess->flags == RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
    - con->rsp_ius = rtrs_iu_alloc(wr_queue_size, sizeof(*rsp),
    + con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
    GFP_KERNEL, sess->s.dev->ib_dev,
    DMA_FROM_DEVICE,
    rtrs_clt_rdma_done);
    if (!con->rsp_ius)
    return -ENOMEM;
    - con->queue_size = wr_queue_size;
    + con->queue_size = max_recv_wr;
    }
    + cq_size = max_send_wr + max_recv_wr;
    cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
    err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
    - cq_vector, wr_queue_size, wr_queue_size,
    - wr_queue_size, IB_POLL_SOFTIRQ);
    + cq_vector, cq_size, max_send_wr,
    + max_recv_wr, IB_POLL_SOFTIRQ);
    /*
    * In case of error we do not bother to clean previous allocations,
    * since destroy_con_cq_qp() must be called.
    --
    2.27.0


    \
     
     \ /
      Last update: 2021-03-02 03:10    [W:2.285 / U:0.088 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site