lkml.org 
[lkml]   [2019]   [May]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 15/24] iommu/dma: Merge the CMA and alloc_pages allocation paths
    Date
    Instead of having a separate code path for the non-blocking alloc_pages
    and CMA allocations paths merge them into one. There is a slight
    behavior change here in that we try the page allocator if CMA fails.
    This matches what dma-direct and other iommu drivers do and will be
    needed to use the dma-iommu code on architectures without DMA remapping
    later on.

    Signed-off-by: Christoph Hellwig <hch@lst.de>
    ---
    drivers/iommu/dma-iommu.c | 32 ++++++++++++--------------------
    1 file changed, 12 insertions(+), 20 deletions(-)

    diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
    index cffd30810d41..ee7dcf03c304 100644
    --- a/drivers/iommu/dma-iommu.c
    +++ b/drivers/iommu/dma-iommu.c
    @@ -974,7 +974,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
    bool coherent = dev_is_dma_coherent(dev);
    int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
    size_t iosize = size;
    - struct page *page;
    + struct page *page = NULL;
    void *addr;

    size = PAGE_ALIGN(size);
    @@ -984,35 +984,26 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
    return iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);

    - if (!gfpflags_allow_blocking(gfp)) {
    - /*
    - * In atomic context we can't remap anything, so we'll only
    - * get the virtually contiguous buffer we need by way of a
    - * physically contiguous allocation.
    - */
    - if (coherent) {
    - page = alloc_pages(gfp, get_order(size));
    - addr = page ? page_address(page) : NULL;
    - } else {
    - addr = dma_alloc_from_pool(size, &page, gfp);
    - }
    + if (!gfpflags_allow_blocking(gfp) && !coherent) {
    + addr = dma_alloc_from_pool(size, &page, gfp);
    if (!addr)
    return NULL;

    *handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
    ioprot);
    if (*handle == DMA_MAPPING_ERROR) {
    - if (coherent)
    - __free_pages(page, get_order(size));
    - else
    - dma_free_from_pool(addr, size);
    + dma_free_from_pool(addr, size);
    return NULL;
    }
    return addr;
    }

    - page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
    - get_order(size), gfp & __GFP_NOWARN);
    + if (gfpflags_allow_blocking(gfp))
    + page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
    + get_order(size),
    + gfp & __GFP_NOWARN);
    + if (!page)
    + page = alloc_pages(gfp, get_order(size));
    if (!page)
    return NULL;

    @@ -1038,7 +1029,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
    out_unmap:
    __iommu_dma_unmap(dev, *handle, iosize);
    out_free_pages:
    - dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
    + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
    + __free_pages(page, get_order(size));
    return NULL;
    }

    --
    2.20.1
    \
     
     \ /
      Last update: 2019-05-20 09:32    [W:4.186 / U:0.272 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site