lkml.org 
[lkml]   [2017]   [Dec]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 36/60] hyper_dmabuf: error handling when share_pages fails
Date
From: Mateusz Polrola <mateuszx.potrola@intel.com>

When error occurs while sharing pages, all pages already shared
needs to be un-shared and proper error code has to be returned.

Signed-off-by: Dongwon Kim <dongwon.kim@intel.com>
---
drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 6 ++-
.../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c | 50 ++++++++++++++++++++++
2 files changed, 55 insertions(+), 1 deletion(-)

diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index f1581d5..375b664 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -31,7 +31,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
-#include <linux/uaccess.h>
+#include <asm/uaccess.h>
#include <linux/dma-buf.h>
#include <linux/delay.h>
#include <linux/list.h>
@@ -242,6 +242,10 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
operands[6] = page_info->last_len;
operands[7] = ops->share_pages (page_info->pages, export_remote_attr->remote_domain,
page_info->nents, &sgt_info->refs_info);
+ if (operands[7] < 0) {
+ dev_err(hyper_dmabuf_private.device, "pages sharing failed\n");
+ goto fail_map_req;
+ }

/* driver/application specific private info, max 4x4 bytes */
operands[8] = export_remote_attr->private[0];
diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
index 1416a69..908eda8 100644
--- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
+++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c
@@ -109,6 +109,16 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
lvl2_table[i] = gnttab_grant_foreign_access(domid,
pfn_to_mfn(page_to_pfn(pages[i])),
true /* read-only from remote domain */);
+ if (lvl2_table[i] == -ENOSPC) {
+ dev_err(hyper_dmabuf_private.device, "No more space left in grant table\n");
+
+ /* Unshare all already shared pages for lvl2 */
+ while(i--) {
+ gnttab_end_foreign_access_ref(lvl2_table[i], 0);
+ gnttab_free_grant_reference(lvl2_table[i]);
+ }
+ goto err_cleanup;
+ }
}

/* Share 2nd level addressing pages in readonly mode*/
@@ -116,6 +126,23 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
lvl3_table[i] = gnttab_grant_foreign_access(domid,
virt_to_mfn((unsigned long)lvl2_table+i*PAGE_SIZE ),
true);
+ if (lvl3_table[i] == -ENOSPC) {
+ dev_err(hyper_dmabuf_private.device, "No more space left in grant table\n");
+
+ /* Unshare all already shared pages for lvl3 */
+ while(i--) {
+ gnttab_end_foreign_access_ref(lvl3_table[i], 1);
+ gnttab_free_grant_reference(lvl3_table[i]);
+ }
+
+ /* Unshare all pages for lvl2 */
+ while(nents--) {
+ gnttab_end_foreign_access_ref(lvl2_table[nents], 0);
+ gnttab_free_grant_reference(lvl2_table[nents]);
+ }
+
+ goto err_cleanup;
+ }
}

/* Share lvl3_table in readonly mode*/
@@ -123,6 +150,23 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
virt_to_mfn((unsigned long)lvl3_table),
true);

+ if (lvl3_gref == -ENOSPC) {
+ dev_err(hyper_dmabuf_private.device, "No more space left in grant table\n");
+
+ /* Unshare all pages for lvl3 */
+ while(i--) {
+ gnttab_end_foreign_access_ref(lvl3_table[i], 1);
+ gnttab_free_grant_reference(lvl3_table[i]);
+ }
+
+ /* Unshare all pages for lvl2 */
+ while(nents--) {
+ gnttab_end_foreign_access_ref(lvl2_table[nents], 0);
+ gnttab_free_grant_reference(lvl2_table[nents]);
+ }
+
+ goto err_cleanup;
+ }

/* Store lvl3_table page to be freed later */
sh_pages_info->lvl3_table = lvl3_table;
@@ -136,6 +180,12 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,

dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
return lvl3_gref;
+
+err_cleanup:
+ free_pages((unsigned long)lvl2_table, n_lvl2_grefs);
+ free_pages((unsigned long)lvl3_table, 1);
+
+ return -ENOSPC;
}

int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents) {
--
2.7.4
\
 
 \ /
  Last update: 2017-12-19 20:39    [W:0.285 / U:0.752 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site