lkml.org 
[lkml]   [2009]   [Feb]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v3 5/6] VT-d: cleanup iommu_flush_iotlb_psi and flush_unmaps
Date
Make iommu_flush_iotlb_psi() and flush_unmaps() easier to read.

Signed-off-by: Yu Zhao <yu.zhao@intel.com>
---
drivers/pci/intel-iommu.c | 46 +++++++++++++++++++++-----------------------
1 files changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index f4b7c79..5fdbed3 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -925,30 +925,27 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int pages, int non_present_entry_flush)
{
- unsigned int mask;
+ int rc;
+ unsigned int mask = ilog2(__roundup_pow_of_two(pages));

BUG_ON(addr & (~VTD_PAGE_MASK));
BUG_ON(pages == 0);

- /* Fallback to domain selective flush if no PSI support */
- if (!cap_pgsel_inv(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH,
- non_present_entry_flush);
-
/*
+ * Fallback to domain selective flush if no PSI support or the size is
+ * too big.
* PSI requires page size to be 2 ^ x, and the base address is naturally
* aligned to the size
*/
- mask = ilog2(__roundup_pow_of_two(pages));
- /* Fallback to domain selective flush if size is too big */
- if (mask > cap_max_amask_val(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH, non_present_entry_flush);
-
- return iommu->flush.flush_iotlb(iommu, did, addr, mask,
- DMA_TLB_PSI_FLUSH,
- non_present_entry_flush);
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ rc = iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH,
+ non_present_entry_flush);
+ else
+ rc = iommu->flush.flush_iotlb(iommu, did, addr, mask,
+ DMA_TLB_PSI_FLUSH,
+ non_present_entry_flush);
+ return rc;
}

static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -2301,15 +2298,16 @@ static void flush_unmaps(void)
if (!iommu)
continue;

- if (deferred_flush[i].next) {
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
- for (j = 0; j < deferred_flush[i].next; j++) {
- __free_iova(&deferred_flush[i].domain[j]->iovad,
- deferred_flush[i].iova[j]);
- }
- deferred_flush[i].next = 0;
+ if (!deferred_flush[i].next)
+ continue;
+
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH, 0);
+ for (j = 0; j < deferred_flush[i].next; j++) {
+ __free_iova(&deferred_flush[i].domain[j]->iovad,
+ deferred_flush[i].iova[j]);
}
+ deferred_flush[i].next = 0;
}

list_size = 0;
--
1.6.1


\
 
 \ /
  Last update: 2009-02-13 03:55    [W:0.109 / U:1.240 seconds]
©2003-2017 Jasper Spaans. hosted at Digital OceanAdvertise on this site