diff options
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 17 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/lpar.c | 1 | ||||
-rw-r--r-- | include/asm-powerpc/mmu-hash64.h | 1 |
4 files changed, 16 insertions, 13 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 2d0c9ef555e9..79a85d656871 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -278,6 +278,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, unsigned long flags; struct scatterlist *s, *outs, *segstart; int outcount, incount, i; + unsigned int align; unsigned long handle; BUG_ON(direction == DMA_NONE); @@ -309,7 +310,12 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, /* Allocate iommu entries for that segment */ vaddr = (unsigned long) sg_virt(s); npages = iommu_num_pages(vaddr, slen); - entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0); + align = 0; + if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && + (vaddr & ~PAGE_MASK) == 0) + align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; + entry = iommu_range_alloc(tbl, npages, &handle, + mask >> IOMMU_PAGE_SHIFT, align); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); @@ -572,7 +578,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, { dma_addr_t dma_handle = DMA_ERROR_CODE; unsigned long uaddr; - unsigned int npages; + unsigned int npages, align; BUG_ON(direction == DMA_NONE); @@ -580,8 +586,13 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, npages = iommu_num_pages(uaddr, size); if (tbl) { + align = 0; + if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && + ((unsigned long)vaddr & ~PAGE_MASK) == 0) + align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; + dma_handle = iommu_alloc(tbl, vaddr, npages, direction, - mask >> IOMMU_PAGE_SHIFT, 0); + mask >> IOMMU_PAGE_SHIFT, align); if (dma_handle == DMA_ERROR_CODE) { if (printk_ratelimit()) { printk(KERN_INFO "iommu_alloc failed, " diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index a282bc212e80..50d7372bc2ce 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -82,14 +82,6 @@ static inline void slb_shadow_clear(unsigned long entry) get_slb_shadow()->save_area[entry].esid = 0; } -void slb_shadow_clear_all(void) -{ - int i; - - for (i = 0; i < SLB_NUM_BOLTED; i++) - slb_shadow_clear(i); -} - static inline void create_shadowed_slbe(unsigned long ea, int ssize, unsigned long flags, unsigned long entry) @@ -300,6 +292,8 @@ void slb_initialize(void) create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); + slb_shadow_clear(2); + /* We don't bolt the stack for the time being - we're in boot, * so the stack is in the bolted segment. By the time it goes * elsewhere, we'll call _switch() which will bolt in the new diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 34317aa148a8..9a455d46379d 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -272,7 +272,6 @@ void vpa_init(int cpu) */ addr = __pa(&slb_shadow[cpu]); if (firmware_has_feature(FW_FEATURE_SPLPAR)) { - slb_shadow_clear_all(); ret = register_slb_shadow(hwcpu, addr); if (ret) printk(KERN_ERR diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h index 951e2487aa69..82328dec2b52 100644 --- a/include/asm-powerpc/mmu-hash64.h +++ b/include/asm-powerpc/mmu-hash64.h @@ -286,7 +286,6 @@ extern void hpte_init_iSeries(void); extern void hpte_init_beat(void); extern void hpte_init_beat_v3(void); -extern void slb_shadow_clear_all(void); extern void stabs_alloc(void); extern void slb_initialize(void); extern void slb_flush_and_rebolt(void); |