lkml.org 
[lkml]   [2018]   [Oct]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 4.18 159/168] crypto: chelsio - Fix memory corruption in DMA Mapped buffers.
Date
4.18-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Harsh Jain <harsh@chelsio.com>

commit add92a817e60e308a419693413a38d9d1e663aff upstream.

Update PCI Id in "cpl_rx_phys_dsgl" header. In case pci_chan_id and
tx_chan_id are not derived from same queue, H/W can send request
completion indication before completing DMA Transfer.

Herbert, It would be good if fix can be merge to stable tree.
For 4.14 kernel, It requires some update to avoid mege conficts.

Cc: <stable@vger.kernel.org>
Signed-off-by: Harsh Jain <harsh@chelsio.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

---
drivers/crypto/chelsio/chcr_algo.c | 32 ++++++++++++++++++++++----------
drivers/crypto/chelsio/chcr_crypto.h | 2 ++
2 files changed, 24 insertions(+), 10 deletions(-)

--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct
walk->to = (struct phys_sge_pairs *)(dsgl + 1);
}

-static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
+ int pci_chan_id)
{
struct cpl_rx_phys_dsgl *phys_cpl;

@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
phys_cpl->rss_hdr_int.qid = htons(qid);
phys_cpl->rss_hdr_int.hash_val = 0;
+ phys_cpl->rss_hdr_int.channel = pci_chan_id;
}

static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@@ -718,7 +720,7 @@ static inline void create_wreq(struct ch
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
!!lcb, ctx->tx_qidx);

- chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
qid);
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
((sizeof(chcr_req->wreq)) >> 4)));
@@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_
adap->vres.ncrypto_fc);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
- rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
- rxq_idx += id % rxq_perchan;
- txq_idx = ctx->dev->tx_channel_id * txq_perchan;
- txq_idx += id % txq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
- ctx->rx_qidx = rxq_idx;
- ctx->tx_qidx = txq_idx;
+ ctx->tx_chan_id = ctx->dev->tx_channel_id;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev);
+ rxq_idx = ctx->tx_chan_id * rxq_perchan;
+ rxq_idx += id % rxq_perchan;
+ txq_idx = ctx->tx_chan_id * txq_perchan;
+ txq_idx += id % txq_perchan;
+ ctx->rx_qidx = rxq_idx;
+ ctx->tx_qidx = txq_idx;
+ /* Channel Id used by SGE to forward packet to Host.
+ * Same value should be used in cpl_fw6_pld RSS_CH field
+ * by FW. Driver programs PCI channel ID to be used in fw
+ * at the time of queue allocation with value "pi->tx_chan"
+ */
+ ctx->pci_chan_id = txq_idx / txq_perchan;
}
out:
return err;
@@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_r
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct dsgl_walk dsgl_walk;
unsigned int authsize = crypto_aead_authsize(tfm);
+ struct chcr_context *ctx = a_ctx(tfm);
u32 temp;

dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_r
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
- dsgl_walk_end(&dsgl_walk, qid);
+ dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}

void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablk
unsigned short qid)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+ struct chcr_context *ctx = c_ctx(tfm);
struct dsgl_walk dsgl_walk;

dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablk
reqctx->dstsg = dsgl_walk.last_sg;
reqctx->dst_ofst = dsgl_walk.last_sg_len;

- dsgl_walk_end(&dsgl_walk, qid);
+ dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}

void chcr_add_hash_src_ent(struct ahash_request *req,
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -255,6 +255,8 @@ struct chcr_context {
struct chcr_dev *dev;
unsigned char tx_qidx;
unsigned char rx_qidx;
+ unsigned char tx_chan_id;
+ unsigned char pci_chan_id;
struct __crypto_ctx crypto_ctx[0];
};


\
 
 \ /
  Last update: 2018-10-08 20:56    [W:0.683 / U:0.224 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site