lkml.org 
[lkml]   [2021]   [Jul]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 5.13 285/800] crypto: hisilicon/hpre - fix unmapping invalid dma address
Date
From: Hui Tang <tanghui20@huawei.com>

[ Upstream commit 0b0553b701f830d820ba9026e5799c24e400a4b5 ]

Currently, an invalid dma address may be unmapped when calling
'xx_data_clr_all' in error path, so check dma address of sqe in/out
if initialized before calling 'dma_free_coherent' or 'dma_unmap_single'.

Fixes: a9214b0b6ed2 ("crypto: hisilicon - fix the check on dma address")
Signed-off-by: Hui Tang <tanghui20@huawei.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
drivers/crypto/hisilicon/hpre/hpre_crypto.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)

diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index a380087c83f7..782ddffa5d90 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -298,6 +298,8 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
dma_addr_t tmp;

tmp = le64_to_cpu(sqe->in);
+ if (unlikely(dma_mapping_error(dev, tmp)))
+ return;

if (src) {
if (req->src)
@@ -307,6 +309,8 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
}

tmp = le64_to_cpu(sqe->out);
+ if (unlikely(dma_mapping_error(dev, tmp)))
+ return;

if (req->dst) {
if (dst)
@@ -524,6 +528,8 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
}

+ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
h_req->ctx = ctx;
@@ -1372,11 +1378,15 @@ static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
dma_addr_t dma;

dma = le64_to_cpu(sqe->in);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;

if (src && req->src)
dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);

dma = le64_to_cpu(sqe->out);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;

if (req->dst)
dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
@@ -1431,6 +1441,8 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
h_req->areq.ecdh = req;
msg = &h_req->req;
memset(msg, 0, sizeof(*msg));
+ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
msg->key = cpu_to_le64(ctx->ecdh.dma_p);

msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
@@ -1667,11 +1679,15 @@ static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
dma_addr_t dma;

dma = le64_to_cpu(sqe->in);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;

if (src && req->src)
dma_free_coherent(dev, ctx->key_sz, req->src, dma);

dma = le64_to_cpu(sqe->out);
+ if (unlikely(dma_mapping_error(dev, dma)))
+ return;

if (req->dst)
dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
@@ -1722,6 +1738,8 @@ static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
h_req->areq.curve25519 = req;
msg = &h_req->req;
memset(msg, 0, sizeof(*msg));
+ msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
+ msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
msg->key = cpu_to_le64(ctx->curve25519.dma_p);

msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
--
2.30.2


\
 
 \ /
  Last update: 2021-07-12 10:40    [W:2.120 / U:0.412 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site