lkml.org 
[lkml]   [2015]   [May]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCHv3 08/16] staging: vme_user: provide DMA functionality
Date
This introduces a new dma device that provides a single ioctl call that
provides DMA read and write functionality to the user space.

Signed-off-by: Dmitry Kalinkin <dmitry.kalinkin@gmail.com>
Cc: Igor Alekseev <igor.alekseev@itep.ru>
---
drivers/staging/vme/devices/vme_user.c | 201 ++++++++++++++++++++++++++++++++-
drivers/staging/vme/devices/vme_user.h | 11 ++
2 files changed, 209 insertions(+), 3 deletions(-)

diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index da828f4..e8a1ca6 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -79,15 +79,18 @@ static unsigned int bus_num;
* We shall support 4 masters and 4 slaves with this driver.
*/
#define VME_MAJOR 221 /* VME Major Device Number */
-#define VME_DEVS 9 /* Number of dev entries */
+#define VME_DEVS 10 /* Number of dev entries */

#define MASTER_MINOR 0
#define MASTER_MAX 3
#define SLAVE_MINOR 4
#define SLAVE_MAX 7
#define CONTROL_MINOR 8
+#define DMA_MINOR 9

-#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
+#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
+
+#define VME_MAX_DMA_LEN 0x4000000 /* Maximal DMA transfer length */

/*
* Structure to handle image related parameters.
@@ -125,7 +128,7 @@ static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
MASTER_MINOR, MASTER_MINOR,
SLAVE_MINOR, SLAVE_MINOR,
SLAVE_MINOR, SLAVE_MINOR,
- CONTROL_MINOR
+ CONTROL_MINOR, DMA_MINOR
};


@@ -443,6 +446,168 @@ static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
return -EINVAL;
}

+static int vme_user_sg_to_dma_list(const struct vme_dma_op *dma_op,
+ struct sg_table *sgt,
+ int sg_count, struct vme_dma_list *dma_list)
+{
+ ssize_t pos = 0;
+ struct scatterlist *sg;
+ int i, ret;
+
+ for_each_sg(sgt->sgl, sg, sg_count, i) {
+ struct vme_dma_attr *pci_attr, *vme_attr, *src, *dest;
+ dma_addr_t hw_address = sg_dma_address(sg);
+ unsigned int hw_len = sg_dma_len(sg);
+
+ vme_attr = vme_dma_vme_attribute(dma_op->vme_addr + pos,
+ dma_op->aspace,
+ dma_op->cycle,
+ dma_op->dwidth);
+ if (!vme_attr)
+ return -ENOMEM;
+
+ pci_attr = vme_dma_pci_attribute(hw_address);
+ if (!pci_attr) {
+ vme_dma_free_attribute(vme_attr);
+ return -ENOMEM;
+ }
+
+ switch (dma_op->dir) {
+ case VME_DMA_MEM_TO_VME:
+ src = pci_attr;
+ dest = vme_attr;
+ break;
+ case VME_DMA_VME_TO_MEM:
+ src = vme_attr;
+ dest = pci_attr;
+ break;
+ }
+
+ ret = vme_dma_list_add(dma_list, src, dest, hw_len);
+
+ /*
+ * XXX VME API doesn't mention whether we should keep
+ * attributes around
+ */
+ vme_dma_free_attribute(vme_attr);
+ vme_dma_free_attribute(pci_attr);
+
+ if (ret)
+ return ret;
+
+ pos += hw_len;
+ }
+
+ return 0;
+}
+
+static enum dma_data_direction vme_dir_to_dma_dir(unsigned vme_dir)
+{
+ switch (vme_dir) {
+ case VME_DMA_VME_TO_MEM:
+ return DMA_FROM_DEVICE;
+ case VME_DMA_MEM_TO_VME:
+ return DMA_TO_DEVICE;
+ }
+
+ return DMA_NONE;
+}
+
+static ssize_t vme_user_dma_ioctl(unsigned int minor,
+ const struct vme_dma_op *dma_op)
+{
+ unsigned int offset = offset_in_page(dma_op->buf_vaddr);
+ unsigned long nr_pages;
+ enum dma_data_direction dir;
+ struct vme_dma_list *dma_list;
+ struct sg_table *sgt = NULL;
+ struct page **pages = NULL;
+ long got_pages;
+ ssize_t count;
+ int retval, sg_count;
+
+ /* Prevent WARN from dma_map_sg */
+ if (dma_op->count == 0)
+ return 0;
+
+ /*
+ * This is a voluntary limit to prevent huge allocation for pages
+ * array. VME_MAX_DMA_LEN is not a fundamental VME constraint.
+ */
+ count = min_t(size_t, dma_op->count, VME_MAX_DMA_LEN);
+ nr_pages = (offset + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ dir = vme_dir_to_dma_dir(dma_op->dir);
+ if (dir == DMA_NONE)
+ return -EINVAL;
+
+ pages = kmalloc_array(nr_pages, sizeof(pages[0]), GFP_KERNEL);
+ if (!pages) {
+ retval = -ENOMEM;
+ goto free;
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ retval = -ENOMEM;
+ goto free;
+ }
+
+ dma_list = vme_new_dma_list(image[minor].resource);
+ if (!dma_list) {
+ retval = -ENOMEM;
+ goto free;
+ }
+
+ got_pages = get_user_pages_fast(dma_op->buf_vaddr, nr_pages,
+ dir == DMA_FROM_DEVICE, pages);
+ if (got_pages != nr_pages) {
+ pr_debug("Not all pages were pinned\n");
+ retval = (got_pages < 0) ? got_pages : -EFAULT;
+ goto release_pages;
+ }
+
+ retval = sg_alloc_table_from_pages(sgt, pages, nr_pages,
+ offset, count, GFP_KERNEL);
+ if (retval)
+ goto release_pages;
+
+ sg_count = dma_map_sg(vme_user_bridge->dev.parent,
+ sgt->sgl, sgt->nents, dir);
+ if (!sg_count) {
+ pr_debug("DMA mapping error\n");
+ retval = -EFAULT;
+ goto free_sgt;
+ }
+
+ retval = vme_user_sg_to_dma_list(dma_op, sgt, sg_count, dma_list);
+ if (retval)
+ goto dma_unmap;
+
+ retval = vme_dma_list_exec(dma_list);
+
+dma_unmap:
+ dma_unmap_sg(vme_user_bridge->dev.parent, sgt->sgl, sgt->nents, dir);
+
+free_sgt:
+ sg_free_table(sgt);
+
+release_pages:
+ if (got_pages > 0)
+ release_pages(pages, got_pages, 0);
+
+ vme_dma_list_free(dma_list);
+
+free:
+ kfree(sgt);
+ kfree(pages);
+
+ if (retval)
+ return retval;
+
+ return count;
+}
+
/*
* The ioctls provided by the old VME access method (the one at vmelinux.org)
* are most certainly wrong as the effectively push the registers layout
@@ -459,6 +624,7 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
struct vme_master master;
struct vme_slave slave;
struct vme_irq_id irq_req;
+ struct vme_dma_op dma_op;
unsigned long copied;
unsigned int minor = MINOR(inode->i_rdev);
int retval;
@@ -569,6 +735,19 @@ static int vme_user_ioctl(struct inode *inode, struct file *file,
break;
}
break;
+ case DMA_MINOR:
+ switch (cmd) {
+ case VME_DO_DMA:
+ copied = copy_from_user(&dma_op, argp,
+ sizeof(dma_op));
+ if (copied != 0) {
+ pr_warn("Partial copy from userspace\n");
+ return -EFAULT;
+ }
+
+ return vme_user_dma_ioctl(minor, &dma_op);
+ }
+ break;
}

return -EINVAL;
@@ -842,6 +1021,15 @@ static int vme_user_probe(struct vme_dev *vdev)
}
}

+ image[DMA_MINOR].resource = vme_dma_request(vme_user_bridge,
+ VME_DMA_VME_TO_MEM | VME_DMA_MEM_TO_VME);
+ if (!image[DMA_MINOR].resource) {
+ dev_warn(&vdev->dev,
+ "Unable to allocate dma resource\n");
+ err = -ENOMEM;
+ goto err_master;
+ }
+
/* Create sysfs entries - on udev systems this creates the dev files */
vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
if (IS_ERR(vme_user_sysfs_class)) {
@@ -864,6 +1052,9 @@ static int vme_user_probe(struct vme_dev *vdev)
case SLAVE_MINOR:
name = "bus/vme/s%d";
break;
+ case DMA_MINOR:
+ name = "bus/vme/dma0";
+ break;
default:
err = -EINVAL;
goto err_sysfs;
@@ -888,6 +1079,8 @@ err_sysfs:
}
class_destroy(vme_user_sysfs_class);

+ vme_dma_free(image[DMA_MINOR].resource);
+
/* Ensure counter set correcty to unalloc all master windows */
i = MASTER_MAX + 1;
err_master:
@@ -927,6 +1120,8 @@ static int vme_user_remove(struct vme_dev *dev)
}
class_destroy(vme_user_sysfs_class);

+ vme_dma_free(image[DMA_MINOR].resource);
+
for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
kfree(image[i].kern_buf);
vme_master_free(image[i].resource);
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index b8cc7bc..252b1c9 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -48,11 +48,22 @@ struct vme_irq_id {
__u8 statid;
};

+struct vme_dma_op {
+ __u64 vme_addr; /* Starting Address on the VMEbus */
+ __u64 buf_vaddr; /* Pointer to userspace memory */
+ __u32 count; /* Count of bytes to copy */
+ __u32 aspace; /* Address Space */
+ __u32 cycle; /* Cycle properties */
+ __u32 dwidth; /* Data transfer width */
+ __u32 dir; /* Transfer Direction */
+};
+
#define VME_GET_SLAVE _IOR(VME_IOC_MAGIC, 1, struct vme_slave)
#define VME_SET_SLAVE _IOW(VME_IOC_MAGIC, 2, struct vme_slave)
#define VME_GET_MASTER _IOR(VME_IOC_MAGIC, 3, struct vme_master)
#define VME_SET_MASTER _IOW(VME_IOC_MAGIC, 4, struct vme_master)
#define VME_IRQ_GEN _IOW(VME_IOC_MAGIC, 5, struct vme_irq_id)
+#define VME_DO_DMA _IOW(VME_IOC_MAGIC, 7, struct vme_dma_op)

#endif /* _VME_USER_H_ */

--
1.8.3.1


\
 
 \ /
  Last update: 2015-05-28 14:41    [W:0.164 / U:0.664 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site