lkml.org 
[lkml]   [2016]   [Mar]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC/PATCH 2/4] dma-mapping: Add dma_remap() APIs
Date
From: Laura Abbott <lauraa@codeaurora.org>

Some systems are memory constrained but they need to load very
large firmwares. The firmware subsystem allows drivers to request
this firmware be loaded from the filesystem, but this requires
that the entire firmware be loaded into kernel memory first
before it's provided to the driver. This can lead to a situation
where we map the firmware twice, once to load the firmware into
kernel memory and once to copy the firmware into the final
resting place.

This design creates needless memory pressure and delays loading
because we have to copy from kernel memory to somewhere else.
Let's add a couple DMA APIs that allow us to map DMA buffers into
the CPU's address space in arbitrary sizes. With this API, we can
allocate a DMA buffer with DMA_ATTR_NO_KERNEL_MAPPING and move a
small mapping window across our large DMA buffer to load the
firmware directly into buffer.

Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
[stephen.boyd@linaro.org: Add dma_attrs and offset to API, use
dma_common_contiguous_remap() instead of ioremap_page_range(),
support dma_remap() even when DMA_ATTR_NO_KERNEL_MAPPING isn't
specified, rewrite commit text]
Signed-off-by: Stephen Boyd <stephen.boyd@linaro.org>
---

TODO: Split off arm64 part into own patch?

arch/arm64/mm/dma-mapping.c | 39 +++++++++++++++++++++++++++++++++++++++
include/linux/dma-mapping.h | 35 +++++++++++++++++++++++++++++++++++
2 files changed, 74 insertions(+)

diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 06a593653f23..92a313a7309b 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -345,6 +345,43 @@ static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
return ret;
}

+static void *arm64_dma_remap(struct device *dev, void *cpu_addr,
+ dma_addr_t handle, size_t size,
+ unsigned long offset, struct dma_attrs *attrs)
+{
+ struct page *page = phys_to_page(dma_to_phys(dev, handle) + offset);
+ bool coherent = is_device_dma_coherent(dev);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+ void *ptr;
+
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+ offset &= ~PAGE_MASK;
+ size = PAGE_ALIGN(size + offset);
+
+ ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
+ NULL);
+ } else {
+ ptr = cpu_addr;
+ }
+ if (!ptr)
+ return NULL;
+
+ return ptr + offset;
+}
+
+static void arm64_dma_unremap(struct device *dev, void *cpu_addr,
+ size_t size, unsigned long offset,
+ struct dma_attrs *attrs)
+{
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return;
+
+ offset &= ~PAGE_MASK;
+ cpu_addr -= offset;
+
+ vunmap(cpu_addr);
+}
+
static struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
@@ -360,6 +397,8 @@ static struct dma_map_ops swiotlb_dma_ops = {
.sync_sg_for_device = __swiotlb_sync_sg_for_device,
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
+ .remap = arm64_dma_remap,
+ .unremap = arm64_dma_unremap,
};

static int __init atomic_pool_init(void)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 75857cda38e9..d4ae45746d61 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -64,6 +64,12 @@ struct dma_map_ops {
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
int (*set_dma_mask)(struct device *dev, u64 mask);
+ void *(*remap)(struct device *dev, void *cpu_addr, dma_addr_t handle,
+ size_t size, unsigned long offset,
+ struct dma_attrs *attrs);
+ void (*unremap)(struct device *dev, void *cpu_addr,
+ size_t size, unsigned long offset,
+ struct dma_attrs *attrs);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
u64 (*get_required_mask)(struct device *dev);
#endif
@@ -465,6 +471,35 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
}
#endif

+static inline void *dma_remap(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_handle, size_t size, unsigned long offset,
+ struct dma_attrs *attrs)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops)
+ return NULL;
+ if (!ops->remap)
+ return NULL;
+
+ return ops->remap(dev, cpu_addr, dma_handle, size, offset, attrs);
+}
+
+
+static inline void dma_unremap(struct device *dev, void *remapped_addr,
+ size_t size, unsigned long offset,
+ struct dma_attrs *attrs)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops)
+ return;
+ if (!ops->unremap)
+ return;
+
+ return ops->unremap(dev, remapped_addr, size, offset, attrs);
+}
+
static inline u64 dma_get_mask(struct device *dev)
{
if (dev && dev->dma_mask && *dev->dma_mask)
--
2.7.0.25.gfc10eb5
\
 
 \ /
  Last update: 2016-03-08 11:01    [W:0.118 / U:0.240 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site