lkml.org 
[lkml]   [2019]   [Jun]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 9/9] iommu/vt-d: Use bounce buffer for untrusted devices
    Date
    The Intel VT-d hardware uses paging for DMA remapping.
    The minimum mapped window is a page size. The device
    drivers may map buffers not filling the whole IOMMU
    window. This allows the device to access to possibly
    unrelated memory and a malicious device could exploit
    this to perform DMA attacks. To address this, the
    Intel IOMMU driver will use bounce pages for those
    buffers which don't fill whole IOMMU pages.

    Cc: Ashok Raj <ashok.raj@intel.com>
    Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
    Cc: Kevin Tian <kevin.tian@intel.com>
    Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
    Tested-by: Xu Pengfei <pengfei.xu@intel.com>
    Tested-by: Mika Westerberg <mika.westerberg@intel.com>
    ---
    drivers/iommu/intel-iommu.c | 128 ++++++++++++++++++++++++++++++++----
    1 file changed, 117 insertions(+), 11 deletions(-)

    diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
    index 2f54734d1c43..4bf744a1c239 100644
    --- a/drivers/iommu/intel-iommu.c
    +++ b/drivers/iommu/intel-iommu.c
    @@ -52,6 +52,7 @@
    #include <asm/irq_remapping.h>
    #include <asm/cacheflush.h>
    #include <asm/iommu.h>
    +#include <trace/events/intel_iommu.h>

    #include "irq_remapping.h"
    #include "intel-pasid.h"
    @@ -3537,6 +3538,19 @@ __intel_map_single(struct device *dev, phys_addr_t paddr, size_t size,
    if (!iova_pfn)
    goto error;

    + if (device_needs_bounce(dev)) {
    + dma_addr_t ret_addr;
    +
    + ret_addr = iommu_bounce_map(dev, iova_pfn << PAGE_SHIFT,
    + paddr, size, dir, attrs);
    + if (ret_addr == DMA_MAPPING_ERROR)
    + goto error;
    + trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT,
    + paddr, size);
    +
    + return ret_addr;
    + }
    +
    /*
    * Check if DMAR supports zero-length reads on write only
    * mappings..
    @@ -3620,14 +3634,28 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size,
    start_pfn = mm_to_dma_pfn(iova_pfn);
    last_pfn = start_pfn + nrpages - 1;

    - freelist = domain_unmap(domain, start_pfn, last_pfn);
    + if (device_needs_bounce(dev))
    + for_each_sg(sglist, sg, nelems, i) {
    + iommu_bounce_unmap(dev, sg_dma_address(sg),
    + sg->length, dir, attrs);
    + trace_bounce_unmap_sg(dev, i, nelems,
    + sg_dma_address(sg),
    + sg_phys(sg), sg->length);
    + }
    + else
    + freelist = domain_unmap(domain, start_pfn, last_pfn);
    } else {
    iova_pfn = IOVA_PFN(dev_addr);
    nrpages = aligned_nrpages(dev_addr, size);
    start_pfn = mm_to_dma_pfn(iova_pfn);
    last_pfn = start_pfn + nrpages - 1;

    - freelist = domain_unmap(domain, start_pfn, last_pfn);
    + if (device_needs_bounce(dev)) {
    + iommu_bounce_unmap(dev, dev_addr, size, dir, attrs);
    + trace_bounce_unmap_single(dev, dev_addr, size);
    + } else {
    + freelist = domain_unmap(domain, start_pfn, last_pfn);
    + }
    }

    if (dev_is_pci(dev))
    @@ -3774,6 +3802,26 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
    prot |= DMA_PTE_WRITE;

    start_vpfn = mm_to_dma_pfn(iova_pfn);
    + if (device_needs_bounce(dev)) {
    + for_each_sg(sglist, sg, nelems, i) {
    + dma_addr_t ret_addr;
    +
    + ret_addr = iommu_bounce_map(dev,
    + start_vpfn << VTD_PAGE_SHIFT,
    + sg_phys(sg), sg->length, dir, attrs);
    + if (ret_addr == DMA_MAPPING_ERROR)
    + break;
    +
    + trace_bounce_map_sg(dev, i, nelems, ret_addr,
    + sg_phys(sg), sg->length);
    +
    + sg->dma_address = ret_addr;
    + sg->dma_length = sg->length;
    + start_vpfn += aligned_nrpages(sg->offset, sg->length);
    + }
    +
    + return i;
    + }

    ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
    if (unlikely(ret)) {
    @@ -3787,16 +3835,74 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
    return nelems;
    }

    +static void
    +intel_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
    + size_t size, enum dma_data_direction dir)
    +{
    + if (!iommu_need_mapping(dev))
    + dma_direct_sync_single_for_cpu(dev, addr, size, dir);
    +
    + if (device_needs_bounce(dev))
    + iommu_bounce_sync(dev, addr, size, dir, SYNC_FOR_CPU);
    +}
    +
    +static void
    +intel_sync_single_for_device(struct device *dev, dma_addr_t addr,
    + size_t size, enum dma_data_direction dir)
    +{
    + if (!iommu_need_mapping(dev))
    + dma_direct_sync_single_for_device(dev, addr, size, dir);
    +
    + if (device_needs_bounce(dev))
    + iommu_bounce_sync(dev, addr, size, dir, SYNC_FOR_DEVICE);
    +}
    +
    +static void
    +intel_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
    + int nelems, enum dma_data_direction dir)
    +{
    + struct scatterlist *sg;
    + int i;
    +
    + if (!iommu_need_mapping(dev))
    + dma_direct_sync_sg_for_cpu(dev, sglist, nelems, dir);
    +
    + if (device_needs_bounce(dev))
    + for_each_sg(sglist, sg, nelems, i)
    + iommu_bounce_sync(dev, sg_dma_address(sg),
    + sg_dma_len(sg), dir, SYNC_FOR_CPU);
    +}
    +
    +static void
    +intel_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
    + int nelems, enum dma_data_direction dir)
    +{
    + struct scatterlist *sg;
    + int i;
    +
    + if (!iommu_need_mapping(dev))
    + dma_direct_sync_sg_for_device(dev, sglist, nelems, dir);
    +
    + if (device_needs_bounce(dev))
    + for_each_sg(sglist, sg, nelems, i)
    + iommu_bounce_sync(dev, sg_dma_address(sg),
    + sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
    +}
    +
    static const struct dma_map_ops intel_dma_ops = {
    - .alloc = intel_alloc_coherent,
    - .free = intel_free_coherent,
    - .map_sg = intel_map_sg,
    - .unmap_sg = intel_unmap_sg,
    - .map_page = intel_map_page,
    - .unmap_page = intel_unmap_page,
    - .map_resource = intel_map_resource,
    - .unmap_resource = intel_unmap_resource,
    - .dma_supported = dma_direct_supported,
    + .alloc = intel_alloc_coherent,
    + .free = intel_free_coherent,
    + .map_sg = intel_map_sg,
    + .unmap_sg = intel_unmap_sg,
    + .map_page = intel_map_page,
    + .unmap_page = intel_unmap_page,
    + .sync_single_for_cpu = intel_sync_single_for_cpu,
    + .sync_single_for_device = intel_sync_single_for_device,
    + .sync_sg_for_cpu = intel_sync_sg_for_cpu,
    + .sync_sg_for_device = intel_sync_sg_for_device,
    + .map_resource = intel_map_resource,
    + .unmap_resource = intel_unmap_resource,
    + .dma_supported = dma_direct_supported,
    };

    static inline int iommu_domain_cache_init(void)
    --
    2.17.1
    \
     
     \ /
      Last update: 2019-06-03 03:24    [W:4.764 / U:0.616 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site