lkml.org 
[lkml]   [2009]   [May]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH -tip 1/5] add asm-generic/dma-mappig-common.h
    Date
    This header file provides some mapping function definitions that the
    users of struct dma_map_ops can use.

    Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
    Acked-by: Joerg Roedel <joerg.roedel@amd.com>
    ---
    include/asm-generic/dma-mapping-common.h | 190 ++++++++++++++++++++++++++++++
    1 files changed, 190 insertions(+), 0 deletions(-)
    create mode 100644 include/asm-generic/dma-mapping-common.h

    diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
    new file mode 100644
    index 0000000..5406a60
    --- /dev/null
    +++ b/include/asm-generic/dma-mapping-common.h
    @@ -0,0 +1,190 @@
    +#ifndef _ASM_GENERIC_DMA_MAPPING_H
    +#define _ASM_GENERIC_DMA_MAPPING_H
    +
    +#include <linux/kmemcheck.h>
    +#include <linux/scatterlist.h>
    +#include <linux/dma-debug.h>
    +#include <linux/dma-attrs.h>
    +
    +static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
    + size_t size,
    + enum dma_data_direction dir,
    + struct dma_attrs *attrs)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    + dma_addr_t addr;
    +
    + kmemcheck_mark_initialized(ptr, size);
    + BUG_ON(!valid_dma_direction(dir));
    + addr = ops->map_page(dev, virt_to_page(ptr),
    + (unsigned long)ptr & ~PAGE_MASK, size,
    + dir, attrs);
    + debug_dma_map_page(dev, virt_to_page(ptr),
    + (unsigned long)ptr & ~PAGE_MASK, size,
    + dir, addr, true);
    + return addr;
    +}
    +
    +static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
    + size_t size,
    + enum dma_data_direction dir,
    + struct dma_attrs *attrs)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    +
    + BUG_ON(!valid_dma_direction(dir));
    + if (ops->unmap_page)
    + ops->unmap_page(dev, addr, size, dir, attrs);
    + debug_dma_unmap_page(dev, addr, size, dir, true);
    +}
    +
    +static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
    + int nents, enum dma_data_direction dir,
    + struct dma_attrs *attrs)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    + int i, ents;
    + struct scatterlist *s;
    +
    + for_each_sg(sg, s, nents, i)
    + kmemcheck_mark_initialized(sg_virt(s), s->length);
    + BUG_ON(!valid_dma_direction(dir));
    + ents = ops->map_sg(dev, sg, nents, dir, attrs);
    + debug_dma_map_sg(dev, sg, nents, ents, dir);
    +
    + return ents;
    +}
    +
    +static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
    + int nents, enum dma_data_direction dir,
    + struct dma_attrs *attrs)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    +
    + BUG_ON(!valid_dma_direction(dir));
    + debug_dma_unmap_sg(dev, sg, nents, dir);
    + if (ops->unmap_sg)
    + ops->unmap_sg(dev, sg, nents, dir, attrs);
    +}
    +
    +static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
    + size_t offset, size_t size,
    + enum dma_data_direction dir)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    + dma_addr_t addr;
    +
    + kmemcheck_mark_initialized(page_address(page) + offset, size);
    + BUG_ON(!valid_dma_direction(dir));
    + addr = ops->map_page(dev, page, offset, size, dir, NULL);
    + debug_dma_map_page(dev, page, offset, size, dir, addr, false);
    +
    + return addr;
    +}
    +
    +static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
    + size_t size, enum dma_data_direction dir)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    +
    + BUG_ON(!valid_dma_direction(dir));
    + if (ops->unmap_page)
    + ops->unmap_page(dev, addr, size, dir, NULL);
    + debug_dma_unmap_page(dev, addr, size, dir, false);
    +}
    +
    +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
    + size_t size,
    + enum dma_data_direction dir)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    +
    + BUG_ON(!valid_dma_direction(dir));
    + if (ops->sync_single_for_cpu)
    + ops->sync_single_for_cpu(dev, addr, size, dir);
    + debug_dma_sync_single_for_cpu(dev, addr, size, dir);
    + flush_write_buffers();
    +}
    +
    +static inline void dma_sync_single_for_device(struct device *dev,
    + dma_addr_t addr, size_t size,
    + enum dma_data_direction dir)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    +
    + BUG_ON(!valid_dma_direction(dir));
    + if (ops->sync_single_for_device)
    + ops->sync_single_for_device(dev, addr, size, dir);
    + debug_dma_sync_single_for_device(dev, addr, size, dir);
    + flush_write_buffers();
    +}
    +
    +static inline void dma_sync_single_range_for_cpu(struct device *dev,
    + dma_addr_t addr,
    + unsigned long offset,
    + size_t size,
    + enum dma_data_direction dir)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    +
    + BUG_ON(!valid_dma_direction(dir));
    + if (ops->sync_single_range_for_cpu) {
    + ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
    + debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
    +
    + flush_write_buffers();
    + } else
    + dma_sync_single_for_cpu(dev, addr, size, dir);
    +}
    +
    +static inline void dma_sync_single_range_for_device(struct device *dev,
    + dma_addr_t addr,
    + unsigned long offset,
    + size_t size,
    + enum dma_data_direction dir)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    +
    + BUG_ON(!valid_dma_direction(dir));
    + if (ops->sync_single_range_for_device) {
    + ops->sync_single_range_for_device(dev, addr, offset, size, dir);
    + debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
    +
    + flush_write_buffers();
    + } else
    + dma_sync_single_for_device(dev, addr, size, dir);
    +}
    +
    +static inline void
    +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
    + int nelems, enum dma_data_direction dir)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    +
    + BUG_ON(!valid_dma_direction(dir));
    + if (ops->sync_sg_for_cpu)
    + ops->sync_sg_for_cpu(dev, sg, nelems, dir);
    + debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
    + flush_write_buffers();
    +}
    +
    +static inline void
    +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
    + int nelems, enum dma_data_direction dir)
    +{
    + struct dma_map_ops *ops = get_dma_ops(dev);
    +
    + BUG_ON(!valid_dma_direction(dir));
    + if (ops->sync_sg_for_device)
    + ops->sync_sg_for_device(dev, sg, nelems, dir);
    + debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
    +
    + flush_write_buffers();
    +}
    +
    +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
    +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
    +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
    +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
    +
    +#endif
    --
    1.6.0.6


    \
     
     \ /
      Last update: 2009-05-15 06:01    [W:0.039 / U:0.180 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site