From d900bd7366463fd96a907b2c212242e2b68b27d8 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 3 Oct 2012 18:57:10 +0000 Subject: powerpc/iommu: Fix multiple issues with IOMMU pools code There are a number of issues in the recent IOMMU pools code: - On a preempt kernel we might switch CPUs in the middle of building a scatter gather list. When this happens the handle hint passed in no longer falls within the local CPU's pool. Check for this and fall back to the pool hint. - We were missing a spin_unlock/spin_lock in one spot where we switch pools. - We need to provide locking around dart_tlb_invalidate_all and dart_tlb_invalidate_one now that the global lock is gone. Reported-by: Alexander Graf Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt CC: [v3.6] --- arch/powerpc/kernel/iommu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index ff5a6ce027b8..8226c6cb348a 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -215,7 +215,8 @@ static unsigned long iommu_range_alloc(struct device *dev, spin_lock_irqsave(&(pool->lock), flags); again: - if ((pass == 0) && handle && *handle) + if ((pass == 0) && handle && *handle && + (*handle >= pool->start) && (*handle < pool->end)) start = *handle; else start = pool->hint; @@ -236,7 +237,9 @@ again: * but on second pass, start at 0 in pool 0. */ if ((start & mask) >= limit || pass > 0) { + spin_unlock(&(pool->lock)); pool = &(tbl->pools[0]); + spin_lock(&(pool->lock)); start = pool->start; } else { start &= mask; -- cgit v1.2.1