diff options
author | Zhen Lei <thunder.leizhen@huawei.com> | 2017-09-21 16:52:45 +0100 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2017-09-27 17:09:57 +0200 |
commit | aa3ac9469c1850ed00741955b975c3a19029763a (patch) | |
tree | 998e22c451f149ef7c2cd5284d1539cc9b33496f /drivers/iommu | |
parent | e60aa7b53845a261dd419652f12ab9f89e668843 (diff) | |
download | blackbird-op-linux-aa3ac9469c1850ed00741955b975c3a19029763a.tar.gz blackbird-op-linux-aa3ac9469c1850ed00741955b975c3a19029763a.zip |
iommu/iova: Make dma_32bit_pfn implicit
Now that the cached node optimisation can apply to all allocations, the
couple of users which were playing tricks with dma_32bit_pfn in order to
benefit from it can stop doing so. Conversely, there is also no need for
all the other users to explicitly calculate a 'real' 32-bit PFN, when
init_iova_domain() can happily do that itself from the page granularity.
CC: Thierry Reding <thierry.reding@gmail.com>
CC: Jonathan Hunter <jonathanh@nvidia.com>
CC: David Airlie <airlied@linux.ie>
CC: Sudeep Dutt <sudeep.dutt@intel.com>
CC: Ashutosh Dixit <ashutosh.dixit@intel.com>
Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Zhen Lei <thunder.leizhen@huawei.com>
Tested-by: Nate Watterson <nwatters@codeaurora.org>
[rm: use iova_shift(), rewrote commit message]
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 7 | ||||
-rw-r--r-- | drivers/iommu/dma-iommu.c | 18 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 11 | ||||
-rw-r--r-- | drivers/iommu/iova.c | 4 |
4 files changed, 8 insertions, 32 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 51f8215877f5..647ab7691aee 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -63,7 +63,6 @@ /* IO virtual address start page frame number */ #define IOVA_START_PFN (1) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) -#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) /* Reserved IOVA ranges */ #define MSI_RANGE_START (0xfee00000) @@ -1788,8 +1787,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) if (!dma_dom->domain.pt_root) goto free_dma_dom; - init_iova_domain(&dma_dom->iovad, PAGE_SIZE, - IOVA_START_PFN, DMA_32BIT_PFN); + init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN); if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL)) goto free_dma_dom; @@ -2696,8 +2694,7 @@ static int init_reserved_iova_ranges(void) struct pci_dev *pdev = NULL; struct iova *val; - init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, - IOVA_START_PFN, DMA_32BIT_PFN); + init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN); lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock, &reserved_rbtree_key); diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9d1cebe7f6cb..191be9c80a8a 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -292,18 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, /* ...then finally give it a kicking to make sure it fits */ base_pfn = max_t(unsigned long, base_pfn, domain->geometry.aperture_start >> order); - end_pfn = min_t(unsigned long, end_pfn, - domain->geometry.aperture_end >> order); } - /* - * PCI devices may have larger DMA masks, but still prefer allocating - * within a 32-bit mask to avoid DAC addressing. Such limitations don't - * apply to the typical platform device, so for those we may as well - * leave the cache limit at the top of their range to save an rb_last() - * traversal on every allocation. - */ - if (dev && dev_is_pci(dev)) - end_pfn &= DMA_BIT_MASK(32) >> order; /* start_pfn is always nonzero for an already-initialised domain */ if (iovad->start_pfn) { @@ -312,16 +301,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, pr_warn("Incompatible range for DMA domain\n"); return -EFAULT; } - /* - * If we have devices with different DMA masks, move the free - * area cache limit down for the benefit of the smaller one. - */ - iovad->dma_32bit_pfn = min(end_pfn + 1, iovad->dma_32bit_pfn); return 0; } - init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); + init_iova_domain(iovad, 1UL << order, base_pfn); if (!dev) return 0; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 6784a05dd6b2..ebb48353dd39 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -82,8 +82,6 @@ #define IOVA_START_PFN (1) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) -#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) -#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) /* page table handling */ #define LEVEL_STRIDE (9) @@ -1878,8 +1876,7 @@ static int dmar_init_reserved_ranges(void) struct iova *iova; int i; - init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN, - DMA_32BIT_PFN); + init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN); lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, &reserved_rbtree_key); @@ -1938,8 +1935,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, unsigned long sagaw; int err; - init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, - DMA_32BIT_PFN); + init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); err = init_iova_flush_queue(&domain->iovad, iommu_flush_iova, iova_entry_free); @@ -4897,8 +4893,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) { int adjust_width; - init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, - DMA_32BIT_PFN); + init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); domain_reserve_special_ranges(domain); /* calculate AGAW */ diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index c6f5a22f8d20..65032e60a5d1 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -37,7 +37,7 @@ static void fq_flush_timeout(unsigned long data); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, - unsigned long start_pfn, unsigned long pfn_32bit) + unsigned long start_pfn) { /* * IOVA granularity will normally be equal to the smallest @@ -52,7 +52,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->cached32_node = NULL; iovad->granule = granule; iovad->start_pfn = start_pfn; - iovad->dma_32bit_pfn = pfn_32bit + 1; + iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad)); iovad->flush_cb = NULL; iovad->fq = NULL; init_iova_rcaches(iovad); |