Messages in this thread Patch in this message | | | From | Christoph Hellwig <> | Subject | [PATCH 21/24] iommu/dma: Don't depend on CONFIG_DMA_DIRECT_REMAP | Date | Mon, 20 May 2019 09:29:45 +0200 |
| |
For entirely dma coherent architectures there is no requirement to ever remap dma coherent allocation. Move all the remap and pool code under IS_ENABLED() checks and drop the Kconfig dependency.
Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com> --- drivers/iommu/Kconfig | 1 - drivers/iommu/dma-iommu.c | 16 +++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 130e94477b6d..e559e43c8ac2 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -96,7 +96,6 @@ config IOMMU_DMA select IOMMU_IOVA select IRQ_MSI_IOMMU select NEED_SG_DMA_LENGTH - depends on DMA_DIRECT_REMAP config FSL_PAMU bool "Freescale IOMMU support" diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 84150ca7b572..0aff220c4aed 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -942,10 +942,11 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) struct page *page = NULL, **pages = NULL; /* Non-coherent atomic allocation? Easy */ - if (dma_free_from_pool(cpu_addr, alloc_size)) + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + dma_free_from_pool(cpu_addr, alloc_size)) return; - if (is_vmalloc_addr(cpu_addr)) { + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { /* * If it the address is remapped, then it's either non-coherent * or highmem CMA, or an iommu_dma_alloc_remap() construction. @@ -989,7 +990,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, if (!page) return NULL; - if (!coherent || PageHighMem(page)) { + if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); cpu_addr = dma_common_contiguous_remap(page, alloc_size, @@ -1022,11 +1023,12 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, gfp |= __GFP_ZERO; - if (gfpflags_allow_blocking(gfp) && + if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs); - if (!gfpflags_allow_blocking(gfp) && !coherent) + if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !gfpflags_allow_blocking(gfp) && !coherent) cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp); else cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); @@ -1058,7 +1060,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, if (off >= nr_pages || vma_pages(vma) > nr_pages - off) return -ENXIO; - if (is_vmalloc_addr(cpu_addr)) { + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { struct page **pages = __iommu_dma_get_pages(cpu_addr); if (pages) @@ -1080,7 +1082,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, struct page *page; int ret; - if (is_vmalloc_addr(cpu_addr)) { + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { struct page **pages = __iommu_dma_get_pages(cpu_addr); if (pages) { -- 2.20.1
| |