lkml.org 
[lkml]   [2017]   [Aug]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH RESEND 05/11] ALSA: vsnd: Implement handling of shared buffers
Date
From: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>

Implement shared buffer handling according to the
para-virtualized sound device protocol at xen/interface/io/sndif.h:
- manage buffer memory
- handle granted references
- handle page directories

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
---
sound/drivers/xen-front.c | 178 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 178 insertions(+)

diff --git a/sound/drivers/xen-front.c b/sound/drivers/xen-front.c
index a92459b2737e..04ebc15757f4 100644
--- a/sound/drivers/xen-front.c
+++ b/sound/drivers/xen-front.c
@@ -58,6 +58,14 @@ struct xdrv_evtchnl_info {
uint16_t resp_id;
};

+struct sh_buf_info {
+ int num_grefs;
+ grant_ref_t *grefs;
+ uint8_t *vdirectory;
+ uint8_t *vbuffer;
+ size_t vbuffer_sz;
+};
+
struct cfg_stream {
int unique_id;
char *xenstore_path;
@@ -825,6 +833,176 @@ static void xdrv_remove_internal(struct xdrv_info *drv_info)
xdrv_evtchnl_free_all(drv_info);
}

+static inline grant_ref_t sh_buf_get_dir_start(struct sh_buf_info *buf)
+{
+ if (!buf->grefs)
+ return GRANT_INVALID_REF;
+ return buf->grefs[0];
+}
+
+static inline void sh_buf_clear(struct sh_buf_info *buf)
+{
+ memset(buf, 0, sizeof(*buf));
+}
+
+static void sh_buf_free(struct sh_buf_info *buf)
+{
+ int i;
+
+ if (buf->grefs) {
+ for (i = 0; i < buf->num_grefs; i++)
+ if (buf->grefs[i] != GRANT_INVALID_REF)
+ gnttab_end_foreign_access(buf->grefs[i],
+ 0, 0UL);
+ kfree(buf->grefs);
+ }
+ kfree(buf->vdirectory);
+ free_pages_exact(buf->vbuffer, buf->vbuffer_sz);
+ sh_buf_clear(buf);
+}
+
+/*
+ * number of grant references a page can hold with respect to the
+ * xendispl_page_directory header
+ */
+#define XENSND_NUM_GREFS_PER_PAGE ((XEN_PAGE_SIZE - \
+ offsetof(struct xensnd_page_directory, gref)) / \
+ sizeof(grant_ref_t))
+
+static void sh_buf_fill_page_dir(struct sh_buf_info *buf, int num_pages_dir)
+{
+ struct xensnd_page_directory *page_dir;
+ unsigned char *ptr;
+ int i, cur_gref, grefs_left, to_copy;
+
+ ptr = buf->vdirectory;
+ grefs_left = buf->num_grefs - num_pages_dir;
+ /*
+ * skip grant references at the beginning, they are for pages granted
+ * for the page directory itself
+ */
+ cur_gref = num_pages_dir;
+ for (i = 0; i < num_pages_dir; i++) {
+ page_dir = (struct xensnd_page_directory *)ptr;
+ if (grefs_left <= XENSND_NUM_GREFS_PER_PAGE) {
+ to_copy = grefs_left;
+ page_dir->gref_dir_next_page = GRANT_INVALID_REF;
+ } else {
+ to_copy = XENSND_NUM_GREFS_PER_PAGE;
+ page_dir->gref_dir_next_page = buf->grefs[i + 1];
+ }
+ memcpy(&page_dir->gref, &buf->grefs[cur_gref],
+ to_copy * sizeof(grant_ref_t));
+ ptr += XEN_PAGE_SIZE;
+ grefs_left -= to_copy;
+ cur_gref += to_copy;
+ }
+}
+
+static int sh_buf_grant_refs(struct xenbus_device *xb_dev,
+ struct sh_buf_info *buf,
+ int num_pages_dir, int num_pages_vbuffer, int num_grefs)
+{
+ grant_ref_t priv_gref_head;
+ int ret, i, j, cur_ref;
+ int otherend_id;
+
+ ret = gnttab_alloc_grant_references(num_grefs, &priv_gref_head);
+ if (ret)
+ return ret;
+
+ buf->num_grefs = num_grefs;
+ otherend_id = xb_dev->otherend_id;
+ j = 0;
+
+ for (i = 0; i < num_pages_dir; i++) {
+ cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
+ if (cur_ref < 0) {
+ ret = cur_ref;
+ goto fail;
+ }
+
+ gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
+ xen_page_to_gfn(virt_to_page(buf->vdirectory +
+ XEN_PAGE_SIZE * i)), 0);
+ buf->grefs[j++] = cur_ref;
+ }
+
+ for (i = 0; i < num_pages_vbuffer; i++) {
+ cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
+ if (cur_ref < 0) {
+ ret = cur_ref;
+ goto fail;
+ }
+
+ gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
+ xen_page_to_gfn(virt_to_page(buf->vbuffer +
+ XEN_PAGE_SIZE * i)), 0);
+ buf->grefs[j++] = cur_ref;
+ }
+
+ gnttab_free_grant_references(priv_gref_head);
+ sh_buf_fill_page_dir(buf, num_pages_dir);
+ return 0;
+
+fail:
+ gnttab_free_grant_references(priv_gref_head);
+ return ret;
+}
+
+static int sh_buf_alloc_int_buffers(struct sh_buf_info *buf,
+ int num_pages_dir, int num_pages_vbuffer, int num_grefs)
+{
+ buf->grefs = kcalloc(num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
+ if (!buf->grefs)
+ return -ENOMEM;
+
+ buf->vdirectory = kcalloc(num_pages_dir, XEN_PAGE_SIZE, GFP_KERNEL);
+ if (!buf->vdirectory)
+ goto fail;
+
+ buf->vbuffer_sz = num_pages_vbuffer * XEN_PAGE_SIZE;
+ buf->vbuffer = alloc_pages_exact(buf->vbuffer_sz, GFP_KERNEL);
+ if (!buf->vbuffer)
+ goto fail;
+ return 0;
+
+fail:
+ kfree(buf->grefs);
+ buf->grefs = NULL;
+ kfree(buf->vdirectory);
+ buf->vdirectory = NULL;
+ return -ENOMEM;
+}
+
+static int sh_buf_alloc(struct xenbus_device *xb_dev,
+ struct sh_buf_info *buf, unsigned int buffer_size)
+{
+ int num_pages_vbuffer, num_pages_dir, num_grefs;
+ int ret;
+
+ sh_buf_clear(buf);
+
+ num_pages_vbuffer = DIV_ROUND_UP(buffer_size, XEN_PAGE_SIZE);
+ /* number of pages the page directory consumes itself */
+ num_pages_dir = DIV_ROUND_UP(num_pages_vbuffer,
+ XENSND_NUM_GREFS_PER_PAGE);
+ num_grefs = num_pages_vbuffer + num_pages_dir;
+
+ ret = sh_buf_alloc_int_buffers(buf, num_pages_dir,
+ num_pages_vbuffer, num_grefs);
+ if (ret < 0)
+ return ret;
+
+ ret = sh_buf_grant_refs(xb_dev, buf,
+ num_pages_dir, num_pages_vbuffer, num_grefs);
+ if (ret < 0)
+ return ret;
+
+ sh_buf_fill_page_dir(buf, num_pages_dir);
+ return 0;
+}
+
static int xdrv_be_on_initwait(struct xdrv_info *drv_info)
{
int stream_idx;
--
2.7.4
\
 
 \ /
  Last update: 2017-08-07 13:55    [W:0.073 / U:0.972 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site