lkml.org 
[lkml]   [2022]   [Nov]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 07/11] misc: fastrpc: Rework fastrpc_req_munmap
Date
From: Abel Vesa <abel.vesa@linaro.org>

Move the lookup of the munmap request to the fastrpc_req_munmap and pass
on only the buf to the lower level fastrpc_req_munmap_impl. That way
we can use the lower level fastrpc_req_munmap_impl on error path in
fastrpc_req_mmap to free the buf without searching for the munmap
request it belongs to.

Signed-off-by: Abel Vesa <abel.vesa@linaro.org>
Co-developed-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
---
drivers/misc/fastrpc.c | 47 +++++++++++++++++++++---------------------
1 file changed, 23 insertions(+), 24 deletions(-)

diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index b1e16bd56c6b..9a373fcb4ba0 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1633,30 +1633,14 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
return 0;
}

-static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
- struct fastrpc_req_munmap *req)
+static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
- struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_munmap_req_msg req_msg;
struct device *dev = fl->sctx->dev;
int err;
u32 sc;

- spin_lock(&fl->lock);
- list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
- if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
- buf = iter;
- break;
- }
- }
- spin_unlock(&fl->lock);
-
- if (!buf) {
- dev_err(dev, "mmap not in list\n");
- return -EINVAL;
- }
-
req_msg.pgid = fl->tgid;
req_msg.size = buf->size;
req_msg.vaddr = buf->raddr;
@@ -1682,12 +1666,29 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,

static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
{
+ struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_req_munmap req;
+ struct device *dev = fl->sctx->dev;

if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;

- return fastrpc_req_munmap_impl(fl, &req);
+ spin_lock(&fl->lock);
+ list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
+ if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
+ buf = iter;
+ break;
+ }
+ }
+ spin_unlock(&fl->lock);
+
+ if (!buf) {
+ dev_err(dev, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n",
+ req.vaddrout, req.size);
+ return -EINVAL;
+ }
+
+ return fastrpc_req_munmap_impl(fl, buf);
}

static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
@@ -1696,7 +1697,6 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
struct fastrpc_buf *buf = NULL;
struct fastrpc_mmap_req_msg req_msg;
struct fastrpc_mmap_rsp_msg rsp_msg;
- struct fastrpc_req_munmap req_unmap;
struct fastrpc_phy_page pages;
struct fastrpc_req_mmap req;
struct device *dev = fl->sctx->dev;
@@ -1758,11 +1758,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
spin_unlock(&fl->lock);

if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
- /* unmap the memory and release the buffer */
- req_unmap.vaddrout = buf->raddr;
- req_unmap.size = buf->size;
- fastrpc_req_munmap_impl(fl, &req_unmap);
- return -EFAULT;
+ err = -EFAULT;
+ goto err_assign;
}

dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
@@ -1770,6 +1767,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)

return 0;

+err_assign:
+ fastrpc_req_munmap_impl(fl, buf);
err_invoke:
fastrpc_buf_free(buf);

--
2.25.1
\
 
 \ /
  Last update: 2022-11-24 18:52    [W:0.063 / U:0.632 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site