lkml.org 
[lkml]   [2018]   [Jan]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 4.14 017/118] iw_cxgb4: only call the cq comp_handler when the cq is armed
Date
4.14-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Steve Wise <swise@opengridcomputing.com>

commit cbb40fadd31c6bbc59104e58ac95c6ef492d038b upstream.

The ULPs completion handler should only be called if the CQ is
armed for notification.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

---
drivers/infiniband/hw/cxgb4/ev.c | 8 +++++---
drivers/infiniband/hw/cxgb4/qp.c | 20 ++++++++++++--------
2 files changed, 17 insertions(+), 11 deletions(-)

--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_de
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);

- spin_lock_irqsave(&chp->comp_handler_lock, flag);
- (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+ if (t4_clear_cq_armed(&chp->cq)) {
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
+ (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+ }
}

void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -817,10 +817,12 @@ static void complete_sq_drain_wr(struct
t4_swcq_produce(cq);
spin_unlock_irqrestore(&schp->lock, flag);

- spin_lock_irqsave(&schp->comp_handler_lock, flag);
- (*schp->ibcq.comp_handler)(&schp->ibcq,
- schp->ibcq.cq_context);
- spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ if (t4_clear_cq_armed(&schp->cq)) {
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ }
}

static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -846,10 +848,12 @@ static void complete_rq_drain_wr(struct
t4_swcq_produce(cq);
spin_unlock_irqrestore(&rchp->lock, flag);

- spin_lock_irqsave(&rchp->comp_handler_lock, flag);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq,
- rchp->ibcq.cq_context);
- spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ if (t4_clear_cq_armed(&rchp->cq)) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ }
}

int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,

\
 
 \ /
  Last update: 2018-01-15 14:25    [W:0.324 / U:6.044 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site