lkml.org 
[lkml]   [2009]   [Nov]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 05/34] x86/amd-iommu: Add function to complete a tlb flush
    Date
    This patch adds a function to the AMD IOMMU driver which
    completes all queued commands an all IOMMUs a specific
    domain has devices attached on. This is required in a later
    patch when per-domain flushing is implemented.

    Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
    ---
    arch/x86/kernel/amd_iommu.c | 28 ++++++++++++++++++++++------
    1 files changed, 22 insertions(+), 6 deletions(-)

    diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
    index 8c38f00..8fa5cc3 100644
    --- a/arch/x86/kernel/amd_iommu.c
    +++ b/arch/x86/kernel/amd_iommu.c
    @@ -376,6 +376,22 @@ out:
    return 0;
    }

    +static void iommu_flush_complete(struct protection_domain *domain)
    +{
    + int i;
    +
    + for (i = 0; i < amd_iommus_present; ++i) {
    + if (!domain->dev_iommu[i])
    + continue;
    +
    + /*
    + * Devices of this domain are behind this IOMMU
    + * We need to wait for completion of all commands.
    + */
    + iommu_completion_wait(amd_iommus[i]);
    + }
    +}
    +
    /*
    * Command send function for invalidating a device table entry
    */
    @@ -1758,7 +1774,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
    if (addr == DMA_ERROR_CODE)
    goto out;

    - iommu_completion_wait(iommu);
    + iommu_flush_complete(domain);

    out:
    spin_unlock_irqrestore(&domain->lock, flags);
    @@ -1791,7 +1807,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,

    __unmap_single(iommu, domain->priv, dma_addr, size, dir);

    - iommu_completion_wait(iommu);
    + iommu_flush_complete(domain);

    spin_unlock_irqrestore(&domain->lock, flags);
    }
    @@ -1863,7 +1879,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
    goto unmap;
    }

    - iommu_completion_wait(iommu);
    + iommu_flush_complete(domain);

    out:
    spin_unlock_irqrestore(&domain->lock, flags);
    @@ -1914,7 +1930,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
    s->dma_address = s->dma_length = 0;
    }

    - iommu_completion_wait(iommu);
    + iommu_flush_complete(domain);

    spin_unlock_irqrestore(&domain->lock, flags);
    }
    @@ -1969,7 +1985,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
    goto out_free;
    }

    - iommu_completion_wait(iommu);
    + iommu_flush_complete(domain);

    spin_unlock_irqrestore(&domain->lock, flags);

    @@ -2010,7 +2026,7 @@ static void free_coherent(struct device *dev, size_t size,

    __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);

    - iommu_completion_wait(iommu);
    + iommu_flush_complete(domain);

    spin_unlock_irqrestore(&domain->lock, flags);

    --
    1.6.5.3



    \
     
     \ /
      Last update: 2009-11-27 15:05    [W:0.025 / U:2.264 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site