diff options
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r-- | arch/sparc/kernel/head_64.S | 37 | ||||
-rw-r--r-- | arch/sparc/kernel/hvapi.c | 1 | ||||
-rw-r--r-- | arch/sparc/kernel/iommu.c | 8 | ||||
-rw-r--r-- | arch/sparc/kernel/iommu_common.h | 1 | ||||
-rw-r--r-- | arch/sparc/kernel/jump_label.c | 23 | ||||
-rw-r--r-- | arch/sparc/kernel/mdesc.c | 46 | ||||
-rw-r--r-- | arch/sparc/kernel/pci_sun4v.c | 418 | ||||
-rw-r--r-- | arch/sparc/kernel/pci_sun4v.h | 21 | ||||
-rw-r--r-- | arch/sparc/kernel/pci_sun4v_asm.S | 68 | ||||
-rw-r--r-- | arch/sparc/kernel/ptrace_64.c | 24 | ||||
-rw-r--r-- | arch/sparc/kernel/signal_32.c | 4 | ||||
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 8 |
12 files changed, 527 insertions, 132 deletions
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index beba6c11554c..6aa3da152c20 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -926,48 +926,11 @@ tlb_type: .word 0 /* Must NOT end up in BSS */ EXPORT_SYMBOL(tlb_type) .section ".fixup",#alloc,#execinstr - .globl __ret_efault, __retl_efault, __ret_one, __retl_one -ENTRY(__ret_efault) - ret - restore %g0, -EFAULT, %o0 -ENDPROC(__ret_efault) -EXPORT_SYMBOL(__ret_efault) - ENTRY(__retl_efault) retl mov -EFAULT, %o0 ENDPROC(__retl_efault) -ENTRY(__retl_one) - retl - mov 1, %o0 -ENDPROC(__retl_one) - -ENTRY(__retl_one_fp) - VISExitHalf - retl - mov 1, %o0 -ENDPROC(__retl_one_fp) - -ENTRY(__ret_one_asi) - wr %g0, ASI_AIUS, %asi - ret - restore %g0, 1, %o0 -ENDPROC(__ret_one_asi) - -ENTRY(__retl_one_asi) - wr %g0, ASI_AIUS, %asi - retl - mov 1, %o0 -ENDPROC(__retl_one_asi) - -ENTRY(__retl_one_asi_fp) - wr %g0, ASI_AIUS, %asi - VISExitHalf - retl - mov 1, %o0 -ENDPROC(__retl_one_asi_fp) - ENTRY(__retl_o1) retl mov %o1, %o0 diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 662500fa555f..267731234ce8 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c @@ -39,6 +39,7 @@ static struct api_info api_table[] = { { .group = HV_GRP_SDIO, }, { .group = HV_GRP_SDIO_ERR, }, { .group = HV_GRP_REBOOT_DATA, }, + { .group = HV_GRP_ATU, .flags = FLAG_PRE_API }, { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, { .group = HV_GRP_FIRE_PERF, }, { .group = HV_GRP_N2_CPU, }, diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 5c615abff030..852a3291db96 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask) struct iommu *iommu = dev->archdata.iommu; u64 dma_addr_mask = iommu->dma_addr_mask; - if (device_mask >= (1UL << 32UL)) - return 0; + if (device_mask > DMA_BIT_MASK(32)) { + if (iommu->atu) + dma_addr_mask = iommu->atu->dma_addr_mask; + else + return 0; + } if ((device_mask & dma_addr_mask) == dma_addr_mask) return 1; diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h index b40cec252905..828493329f68 100644 --- a/arch/sparc/kernel/iommu_common.h +++ b/arch/sparc/kernel/iommu_common.h @@ -13,7 +13,6 @@ #include <linux/scatterlist.h> #include <linux/device.h> #include <linux/iommu-helper.h> -#include <linux/scatterlist.h> #include <asm/iommu.h> diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c index 59bbeff55024..07933b9e9ce0 100644 --- a/arch/sparc/kernel/jump_label.c +++ b/arch/sparc/kernel/jump_label.c @@ -13,19 +13,30 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - u32 val; u32 *insn = (u32 *) (unsigned long) entry->code; + u32 val; if (type == JUMP_LABEL_JMP) { s32 off = (s32)entry->target - (s32)entry->code; + bool use_v9_branch = false; + + BUG_ON(off & 3); #ifdef CONFIG_SPARC64 - /* ba,pt %xcc, . + (off << 2) */ - val = 0x10680000 | ((u32) off >> 2); -#else - /* ba . + (off << 2) */ - val = 0x10800000 | ((u32) off >> 2); + if (off <= 0xfffff && off >= -0x100000) + use_v9_branch = true; #endif + if (use_v9_branch) { + /* WDISP19 - target is . + immed << 2 */ + /* ba,pt %xcc, . + off */ + val = 0x10680000 | (((u32) off >> 2) & 0x7ffff); + } else { + /* WDISP22 - target is . + immed << 2 */ + BUG_ON(off > 0x7fffff); + BUG_ON(off < -0x800000); + /* ba . + off */ + val = 0x10800000 | (((u32) off >> 2) & 0x3fffff); + } } else { val = 0x01000000; } diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 11228861d9b4..8a6982dfd733 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -645,13 +645,20 @@ static void __mark_core_id(struct mdesc_handle *hp, u64 node, cpu_data(*id).core_id = core_id; } -static void __mark_sock_id(struct mdesc_handle *hp, u64 node, - int sock_id) +static void __mark_max_cache_id(struct mdesc_handle *hp, u64 node, + int max_cache_id) { const u64 *id = mdesc_get_property(hp, node, "id", NULL); - if (*id < num_possible_cpus()) - cpu_data(*id).sock_id = sock_id; + if (*id < num_possible_cpus()) { + cpu_data(*id).max_cache_id = max_cache_id; + + /** + * On systems without explicit socket descriptions socket + * is max_cache_id + */ + cpu_data(*id).sock_id = max_cache_id; + } } static void mark_core_ids(struct mdesc_handle *hp, u64 mp, @@ -660,10 +667,11 @@ static void mark_core_ids(struct mdesc_handle *hp, u64 mp, find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10); } -static void mark_sock_ids(struct mdesc_handle *hp, u64 mp, - int sock_id) +static void mark_max_cache_ids(struct mdesc_handle *hp, u64 mp, + int max_cache_id) { - find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10); + find_back_node_value(hp, mp, "cpu", __mark_max_cache_id, + max_cache_id, 10); } static void set_core_ids(struct mdesc_handle *hp) @@ -694,14 +702,15 @@ static void set_core_ids(struct mdesc_handle *hp) } } -static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) +static int set_max_cache_ids_by_cache(struct mdesc_handle *hp, int level) { u64 mp; int idx = 1; int fnd = 0; - /* Identify unique sockets by looking for cpus backpointed to by - * shared level n caches. + /** + * Identify unique highest level of shared cache by looking for cpus + * backpointed to by shared level N caches. */ mdesc_for_each_node_by_name(hp, mp, "cache") { const u64 *cur_lvl; @@ -709,8 +718,7 @@ static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level) cur_lvl = mdesc_get_property(hp, mp, "level", NULL); if (*cur_lvl != level) continue; - - mark_sock_ids(hp, mp, idx); + mark_max_cache_ids(hp, mp, idx); idx++; fnd = 1; } @@ -745,15 +753,17 @@ static void set_sock_ids(struct mdesc_handle *hp) { u64 mp; - /* If machine description exposes sockets data use it. - * Otherwise fallback to use shared L3 or L2 caches. + /** + * Find the highest level of shared cache which pre-T7 is also + * the socket. */ + if (!set_max_cache_ids_by_cache(hp, 3)) + set_max_cache_ids_by_cache(hp, 2); + + /* If machine description exposes sockets data use it.*/ mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets"); if (mp != MDESC_NODE_NULL) - return set_sock_ids_by_socket(hp, mp); - - if (!set_sock_ids_by_cache(hp, 3)) - set_sock_ids_by_cache(hp, 2); + set_sock_ids_by_socket(hp, mp); } static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index db57d8acdc01..06981cc716b6 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -44,6 +44,9 @@ static struct vpci_version vpci_versions[] = { { .major = 1, .minor = 1 }, }; +static unsigned long vatu_major = 1; +static unsigned long vatu_minor = 1; + #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) struct iommu_batch { @@ -69,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns } /* Interrupts must be disabled. */ -static long iommu_batch_flush(struct iommu_batch *p) +static long iommu_batch_flush(struct iommu_batch *p, u64 mask) { struct pci_pbm_info *pbm = p->dev->archdata.host_controller; + u64 *pglist = p->pglist; + u64 index_count; unsigned long devhandle = pbm->devhandle; unsigned long prot = p->prot; unsigned long entry = p->entry; - u64 *pglist = p->pglist; unsigned long npages = p->npages; + unsigned long iotsb_num; + unsigned long ret; + long num; /* VPCI maj=1, min=[0,1] only supports read and write */ if (vpci_major < 2) prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); while (npages != 0) { - long num; - - num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), - npages, prot, __pa(pglist)); - if (unlikely(num < 0)) { - if (printk_ratelimit()) - printk("iommu_batch_flush: IOMMU map of " - "[%08lx:%08llx:%lx:%lx:%lx] failed with " - "status %ld\n", - devhandle, HV_PCI_TSBID(0, entry), - npages, prot, __pa(pglist), num); - return -1; + if (mask <= DMA_BIT_MASK(32)) { + num = pci_sun4v_iommu_map(devhandle, + HV_PCI_TSBID(0, entry), + npages, + prot, + __pa(pglist)); + if (unlikely(num < 0)) { + pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n", + __func__, + devhandle, + HV_PCI_TSBID(0, entry), + npages, prot, __pa(pglist), + num); + return -1; + } + } else { + index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), + iotsb_num = pbm->iommu->atu->iotsb->iotsb_num; + ret = pci_sun4v_iotsb_map(devhandle, + iotsb_num, + index_count, + prot, + __pa(pglist), + &num); + if (unlikely(ret != HV_EOK)) { + pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n", + __func__, + devhandle, iotsb_num, + index_count, prot, + __pa(pglist), ret); + return -1; + } } - entry += num; npages -= num; pglist += num; @@ -108,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p) return 0; } -static inline void iommu_batch_new_entry(unsigned long entry) +static inline void iommu_batch_new_entry(unsigned long entry, u64 mask) { struct iommu_batch *p = this_cpu_ptr(&iommu_batch); if (p->entry + p->npages == entry) return; if (p->entry != ~0UL) - iommu_batch_flush(p); + iommu_batch_flush(p, mask); p->entry = entry; } /* Interrupts must be disabled. */ -static inline long iommu_batch_add(u64 phys_page) +static inline long iommu_batch_add(u64 phys_page, u64 mask) { struct iommu_batch *p = this_cpu_ptr(&iommu_batch); @@ -128,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page) p->pglist[p->npages++] = phys_page; if (p->npages == PGLIST_NENTS) - return iommu_batch_flush(p); + return iommu_batch_flush(p, mask); return 0; } /* Interrupts must be disabled. */ -static inline long iommu_batch_end(void) +static inline long iommu_batch_end(u64 mask) { struct iommu_batch *p = this_cpu_ptr(&iommu_batch); BUG_ON(p->npages >= PGLIST_NENTS); - return iommu_batch_flush(p); + return iommu_batch_flush(p, mask); } static void *dma_4v_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, unsigned long attrs) { + u64 mask; unsigned long flags, order, first_page, npages, n; unsigned long prot = 0; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; struct page *page; void *ret; long entry; @@ -174,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, memset((char *)first_page, 0, PAGE_SIZE << order); iommu = dev->archdata.iommu; + atu = iommu->atu; + + mask = dev->coherent_dma_mask; + if (mask <= DMA_BIT_MASK(32)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, (unsigned long)(-1), 0); if (unlikely(entry == IOMMU_ERROR_CODE)) goto range_alloc_fail; - *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); + *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); ret = (void *) first_page; first_page = __pa(first_page); @@ -193,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, entry); for (n = 0; n < npages; n++) { - long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); + long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask); if (unlikely(err < 0L)) goto iommu_map_fail; } - if (unlikely(iommu_batch_end() < 0L)) + if (unlikely(iommu_batch_end(mask) < 0L)) goto iommu_map_fail; local_irq_restore(flags); @@ -206,25 +242,71 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, return ret; iommu_map_fail: - iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); + iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); range_alloc_fail: free_pages(first_page, order); return NULL; } -static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, - unsigned long npages) +unsigned long dma_4v_iotsb_bind(unsigned long devhandle, + unsigned long iotsb_num, + struct pci_bus *bus_dev) +{ + struct pci_dev *pdev; + unsigned long err; + unsigned int bus; + unsigned int device; + unsigned int fun; + + list_for_each_entry(pdev, &bus_dev->devices, bus_list) { + if (pdev->subordinate) { + /* No need to bind pci bridge */ + dma_4v_iotsb_bind(devhandle, iotsb_num, + pdev->subordinate); + } else { + bus = bus_dev->number; + device = PCI_SLOT(pdev->devfn); + fun = PCI_FUNC(pdev->devfn); + err = pci_sun4v_iotsb_bind(devhandle, iotsb_num, + HV_PCI_DEVICE_BUILD(bus, + device, + fun)); + + /* If bind fails for one device it is going to fail + * for rest of the devices because we are sharing + * IOTSB. So in case of failure simply return with + * error. + */ + if (err) + return err; + } + } + + return 0; +} + +static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, + dma_addr_t dvma, unsigned long iotsb_num, + unsigned long entry, unsigned long npages) { - u32 devhandle = *(u32 *)demap_arg; unsigned long num, flags; + unsigned long ret; local_irq_save(flags); do { - num = pci_sun4v_iommu_demap(devhandle, - HV_PCI_TSBID(0, entry), - npages); - + if (dvma <= DMA_BIT_MASK(32)) { + num = pci_sun4v_iommu_demap(devhandle, + HV_PCI_TSBID(0, entry), + npages); + } else { + ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num, + entry, npages, &num); + if (unlikely(ret != HV_EOK)) { + pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n", + ret); + } + } entry += num; npages -= num; } while (npages != 0); @@ -236,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, { struct pci_pbm_info *pbm; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; unsigned long order, npages, entry; + unsigned long iotsb_num; u32 devhandle; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; + atu = iommu->atu; devhandle = pbm->devhandle; - entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); - dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); + + if (dvma <= DMA_BIT_MASK(32)) { + tbl = &iommu->tbl; + iotsb_num = 0; /* we don't care for legacy iommu */ + } else { + tbl = &atu->tbl; + iotsb_num = atu->iotsb->iotsb_num; + } + entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); + dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); + iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); order = get_order(size); if (order < 10) free_pages((unsigned long)cpu, order); @@ -257,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, unsigned long attrs) { struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; + u64 mask; unsigned long flags, npages, oaddr; unsigned long i, base_paddr; - u32 bus_addr, ret; unsigned long prot; + dma_addr_t bus_addr, ret; long entry; iommu = dev->archdata.iommu; + atu = iommu->atu; if (unlikely(direction == DMA_NONE)) goto bad; @@ -272,13 +370,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + mask = *dev->dma_mask; + if (mask <= DMA_BIT_MASK(32)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; + + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, (unsigned long)(-1), 0); if (unlikely(entry == IOMMU_ERROR_CODE)) goto bad; - bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); + bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); base_paddr = __pa(oaddr & IO_PAGE_MASK); prot = HV_PCI_MAP_ATTR_READ; @@ -293,11 +397,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, iommu_batch_start(dev, prot, entry); for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { - long err = iommu_batch_add(base_paddr); + long err = iommu_batch_add(base_paddr, mask); if (unlikely(err < 0L)) goto iommu_map_fail; } - if (unlikely(iommu_batch_end() < 0L)) + if (unlikely(iommu_batch_end(mask) < 0L)) goto iommu_map_fail; local_irq_restore(flags); @@ -310,7 +414,7 @@ bad: return DMA_ERROR_CODE; iommu_map_fail: - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); return DMA_ERROR_CODE; } @@ -320,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, { struct pci_pbm_info *pbm; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; unsigned long npages; + unsigned long iotsb_num; long entry; u32 devhandle; @@ -332,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; + atu = iommu->atu; devhandle = pbm->devhandle; npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; bus_addr &= IO_PAGE_MASK; - entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; - dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); + + if (bus_addr <= DMA_BIT_MASK(32)) { + iotsb_num = 0; /* we don't care for legacy iommu */ + tbl = &iommu->tbl; + } else { + iotsb_num = atu->iotsb->iotsb_num; + tbl = &atu->tbl; + } + entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT; + dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages); + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); } static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, @@ -353,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, unsigned long seg_boundary_size; int outcount, incount, i; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; + u64 mask; unsigned long base_shift; long err; BUG_ON(direction == DMA_NONE); iommu = dev->archdata.iommu; + atu = iommu->atu; + if (nelems == 0 || !iommu) return 0; @@ -384,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, max_seg_size = dma_get_max_seg_size(dev); seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, IO_PAGE_SIZE) >> IO_PAGE_SHIFT; - base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; + + mask = *dev->dma_mask; + if (mask <= DMA_BIT_MASK(32)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; + + base_shift = tbl->table_map_base >> IO_PAGE_SHIFT; + for_each_sg(sglist, s, nelems, i) { unsigned long paddr, npages, entry, out_entry = 0, slen; @@ -397,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, /* Allocate iommu entries for that segment */ paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, + entry = iommu_tbl_range_alloc(dev, tbl, npages, &handle, (unsigned long)(-1), 0); /* Handle failure */ if (unlikely(entry == IOMMU_ERROR_CODE)) { - if (printk_ratelimit()) - printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" - " npages %lx\n", iommu, paddr, npages); + pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n", + tbl, paddr, npages); goto iommu_map_failed; } - iommu_batch_new_entry(entry); + iommu_batch_new_entry(entry, mask); /* Convert entry to a dma_addr_t */ - dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); + dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT); dma_addr |= (s->offset & ~IO_PAGE_MASK); /* Insert into HW table */ paddr &= IO_PAGE_MASK; while (npages--) { - err = iommu_batch_add(paddr); + err = iommu_batch_add(paddr, mask); if (unlikely(err < 0L)) goto iommu_map_failed; paddr += IO_PAGE_SIZE; @@ -452,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, dma_next = dma_addr + slen; } - err = iommu_batch_end(); + err = iommu_batch_end(mask); if (unlikely(err < 0L)) goto iommu_map_failed; @@ -475,7 +603,7 @@ iommu_map_failed: vaddr = s->dma_address & IO_PAGE_MASK; npages = iommu_num_pages(s->dma_address, s->dma_length, IO_PAGE_SIZE); - iommu_tbl_range_free(&iommu->tbl, vaddr, npages, + iommu_tbl_range_free(tbl, vaddr, npages, IOMMU_ERROR_CODE); /* XXX demap? XXX */ s->dma_address = DMA_ERROR_CODE; @@ -496,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, struct pci_pbm_info *pbm; struct scatterlist *sg; struct iommu *iommu; + struct atu *atu; unsigned long flags, entry; + unsigned long iotsb_num; u32 devhandle; BUG_ON(direction == DMA_NONE); iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; + atu = iommu->atu; devhandle = pbm->devhandle; local_irq_save(flags); @@ -512,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, dma_addr_t dma_handle = sg->dma_address; unsigned int len = sg->dma_length; unsigned long npages; - struct iommu_map_table *tbl = &iommu->tbl; + struct iommu_map_table *tbl; unsigned long shift = IO_PAGE_SHIFT; if (!len) break; npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); + + if (dma_handle <= DMA_BIT_MASK(32)) { + iotsb_num = 0; /* we don't care for legacy iommu */ + tbl = &iommu->tbl; + } else { + iotsb_num = atu->iotsb->iotsb_num; + tbl = &atu->tbl; + } entry = ((dma_handle - tbl->table_map_base) >> shift); - dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, + dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num, + entry, npages); + iommu_tbl_range_free(tbl, dma_handle, npages, IOMMU_ERROR_CODE); sg = sg_next(sg); } @@ -581,6 +721,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, return cnt; } +static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm) +{ + struct atu *atu = pbm->iommu->atu; + struct atu_iotsb *iotsb; + void *table; + u64 table_size; + u64 iotsb_num; + unsigned long order; + unsigned long err; + + iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL); + if (!iotsb) { + err = -ENOMEM; + goto out_err; + } + atu->iotsb = iotsb; + + /* calculate size of IOTSB */ + table_size = (atu->size / IO_PAGE_SIZE) * 8; + order = get_order(table_size); + table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!table) { + err = -ENOMEM; + goto table_failed; + } + iotsb->table = table; + iotsb->ra = __pa(table); + iotsb->dvma_size = atu->size; + iotsb->dvma_base = atu->base; + iotsb->table_size = table_size; + iotsb->page_size = IO_PAGE_SIZE; + + /* configure and register IOTSB with HV */ + err = pci_sun4v_iotsb_conf(pbm->devhandle, + iotsb->ra, + iotsb->table_size, + iotsb->page_size, + iotsb->dvma_base, + &iotsb_num); + if (err) { + pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err); + goto iotsb_conf_failed; + } + iotsb->iotsb_num = iotsb_num; + + err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus); + if (err) { + pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err); + goto iotsb_conf_failed; + } + + return 0; + +iotsb_conf_failed: + free_pages((unsigned long)table, order); +table_failed: + kfree(iotsb); +out_err: + return err; +} + +static int pci_sun4v_atu_init(struct pci_pbm_info *pbm) +{ + struct atu *atu = pbm->iommu->atu; + unsigned long err; + const u64 *ranges; + u64 map_size, num_iotte; + u64 dma_mask; + const u32 *page_size; + int len; + + ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges", + &len); + if (!ranges) { + pr_err(PFX "No iommu-address-ranges\n"); + return -EINVAL; + } + + page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes", + NULL); + if (!page_size) { + pr_err(PFX "No iommu-pagesizes\n"); + return -EINVAL; + } + + /* There are 4 iommu-address-ranges supported. Each range is pair of + * {base, size}. The ranges[0] and ranges[1] are 32bit address space + * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit + * address ranges to support 64bit addressing. Because 'size' for + * address ranges[2] and ranges[3] are same we can select either of + * ranges[2] or ranges[3] for mapping. However due to 'size' is too + * large for OS to allocate IOTSB we are using fix size 32G + * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices + * to share. + */ + atu->ranges = (struct atu_ranges *)ranges; + atu->base = atu->ranges[3].base; + atu->size = ATU_64_SPACE_SIZE; + + /* Create IOTSB */ + err = pci_sun4v_atu_alloc_iotsb(pbm); + if (err) { + pr_err(PFX "Error creating ATU IOTSB\n"); + return err; + } + + /* Create ATU iommu map. + * One bit represents one iotte in IOTSB table. + */ + dma_mask = (roundup_pow_of_two(atu->size) - 1UL); + num_iotte = atu->size / IO_PAGE_SIZE; + map_size = num_iotte / 8; + atu->tbl.table_map_base = atu->base; + atu->dma_addr_mask = dma_mask; + atu->tbl.map = kzalloc(map_size, GFP_KERNEL); + if (!atu->tbl.map) + return -ENOMEM; + + iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT, + NULL, false /* no large_pool */, + 0 /* default npools */, + false /* want span boundary checking */); + + return 0; +} + static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) { static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; @@ -918,6 +1184,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm, pci_sun4v_scan_bus(pbm, &op->dev); + /* if atu_init fails its not complete failure. + * we can still continue using legacy iommu. + */ + if (pbm->iommu->atu) { + err = pci_sun4v_atu_init(pbm); + if (err) { + kfree(pbm->iommu->atu); + pbm->iommu->atu = NULL; + pr_err(PFX "ATU init failed, err=%d\n", err); + } + } + pbm->next = pci_pbm_root; pci_pbm_root = pbm; @@ -931,8 +1209,10 @@ static int pci_sun4v_probe(struct platform_device *op) struct pci_pbm_info *pbm; struct device_node *dp; struct iommu *iommu; + struct atu *atu; u32 devhandle; int i, err = -ENODEV; + static bool hv_atu = true; dp = op->dev.of_node; @@ -954,6 +1234,19 @@ static int pci_sun4v_probe(struct platform_device *op) pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", vpci_major, vpci_minor); + err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor); + if (err) { + /* don't return an error if we fail to register the + * ATU group, but ATU hcalls won't be available. + */ + hv_atu = false; + pr_err(PFX "Could not register hvapi ATU err=%d\n", + err); + } else { + pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", + vatu_major, vatu_minor); + } + dma_ops = &sun4v_dma_ops; } @@ -991,6 +1284,14 @@ static int pci_sun4v_probe(struct platform_device *op) } pbm->iommu = iommu; + iommu->atu = NULL; + if (hv_atu) { + atu = kzalloc(sizeof(*atu), GFP_KERNEL); + if (!atu) + pr_err(PFX "Could not allocate atu\n"); + else + iommu->atu = atu; + } err = pci_sun4v_pbm_init(pbm, op, devhandle); if (err) @@ -1001,6 +1302,7 @@ static int pci_sun4v_probe(struct platform_device *op) return 0; out_free_iommu: + kfree(iommu->atu); kfree(pbm->iommu); out_free_controller: diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h index 5642212390b2..22603a4e48bf 100644 --- a/arch/sparc/kernel/pci_sun4v.h +++ b/arch/sparc/kernel/pci_sun4v.h @@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle, unsigned long msinum, unsigned long valid); +/* Sun4v HV IOMMU v2 APIs */ +unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle, + unsigned long ra, + unsigned long table_size, + unsigned long page_size, + unsigned long dvma_base, + u64 *iotsb_num); +unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle, + unsigned long iotsb_num, + unsigned int pci_device); +unsigned long pci_sun4v_iotsb_map(unsigned long devhandle, + unsigned long iotsb_num, + unsigned long iotsb_index_iottes, + unsigned long io_attributes, + unsigned long io_page_list_pa, + long *mapped); +unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle, + unsigned long iotsb_num, + unsigned long iotsb_index, + unsigned long iottes, + unsigned long *demapped); #endif /* !(_PCI_SUN4V_H) */ diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S index e606d46c6815..578f09657916 100644 --- a/arch/sparc/kernel/pci_sun4v_asm.S +++ b/arch/sparc/kernel/pci_sun4v_asm.S @@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid) mov %o0, %o0 ENDPROC(pci_sun4v_msg_setvalid) + /* + * %o0: devhandle + * %o1: r_addr + * %o2: size + * %o3: pagesize + * %o4: virt + * %o5: &iotsb_num/&iotsb_handle + * + * returns %o0: status + * %o1: iotsb_num/iotsb_handle + */ +ENTRY(pci_sun4v_iotsb_conf) + mov %o5, %g1 + mov HV_FAST_PCI_IOTSB_CONF, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%g1] +ENDPROC(pci_sun4v_iotsb_conf) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: pci_device + * + * returns %o0: status + */ +ENTRY(pci_sun4v_iotsb_bind) + mov HV_FAST_PCI_IOTSB_BIND, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(pci_sun4v_iotsb_bind) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: index_count + * %o3: iotte_attributes + * %o4: io_page_list_p + * %o5: &mapped + * + * returns %o0: status + * %o1: #mapped + */ +ENTRY(pci_sun4v_iotsb_map) + mov %o5, %g1 + mov HV_FAST_PCI_IOTSB_MAP, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%g1] +ENDPROC(pci_sun4v_iotsb_map) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: iotsb_index + * %o3: #iottes + * %o4: &demapped + * + * returns %o0: status + * %o1: #demapped + */ +ENTRY(pci_sun4v_iotsb_demap) + mov HV_FAST_PCI_IOTSB_DEMAP, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%o4] +ENDPROC(pci_sun4v_iotsb_demap) diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 9ddc4928a089..ac082dd8c67d 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -127,7 +127,8 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr, if (copy_from_user(kbuf, (void __user *) uaddr, len)) return -EFAULT; } else { - int len2 = access_process_vm(target, uaddr, kbuf, len, 0); + int len2 = access_process_vm(target, uaddr, kbuf, len, + FOLL_FORCE); if (len2 != len) return -EFAULT; } @@ -141,7 +142,8 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr, if (copy_to_user((void __user *) uaddr, kbuf, len)) return -EFAULT; } else { - int len2 = access_process_vm(target, uaddr, kbuf, len, 1); + int len2 = access_process_vm(target, uaddr, kbuf, len, + FOLL_FORCE | FOLL_WRITE); if (len2 != len) return -EFAULT; } @@ -505,7 +507,8 @@ static int genregs32_get(struct task_struct *target, if (access_process_vm(target, (unsigned long) ®_window[pos], - k, sizeof(*k), 0) + k, sizeof(*k), + FOLL_FORCE) != sizeof(*k)) return -EFAULT; k++; @@ -531,12 +534,14 @@ static int genregs32_get(struct task_struct *target, if (access_process_vm(target, (unsigned long) ®_window[pos], - ®, sizeof(reg), 0) + ®, sizeof(reg), + FOLL_FORCE) != sizeof(reg)) return -EFAULT; if (access_process_vm(target, (unsigned long) u, - ®, sizeof(reg), 1) + ®, sizeof(reg), + FOLL_FORCE | FOLL_WRITE) != sizeof(reg)) return -EFAULT; pos++; @@ -615,7 +620,8 @@ static int genregs32_set(struct task_struct *target, (unsigned long) ®_window[pos], (void *) k, - sizeof(*k), 1) + sizeof(*k), + FOLL_FORCE | FOLL_WRITE) != sizeof(*k)) return -EFAULT; k++; @@ -642,13 +648,15 @@ static int genregs32_set(struct task_struct *target, if (access_process_vm(target, (unsigned long) u, - ®, sizeof(reg), 0) + ®, sizeof(reg), + FOLL_FORCE) != sizeof(reg)) return -EFAULT; if (access_process_vm(target, (unsigned long) ®_window[pos], - ®, sizeof(reg), 1) + ®, sizeof(reg), + FOLL_FORCE | FOLL_WRITE) != sizeof(reg)) return -EFAULT; pos++; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index c3c12efe0bc0..9c0c8fd0b292 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!invalid_frame_pointer(sf, sizeof(*sf))) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv_and_exit; if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) @@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) synchronize_user_stack(); sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; - if (!invalid_frame_pointer(sf, sizeof(*sf))) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv; if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index d3035ba6cd31..8182f7caf5b1 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -63,9 +63,13 @@ cpumask_t cpu_core_map[NR_CPUS] __read_mostly = cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; +cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = { + [0 ... NR_CPUS - 1] = CPU_MASK_NONE }; + EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_SYMBOL(cpu_core_map); EXPORT_SYMBOL(cpu_core_sib_map); +EXPORT_SYMBOL(cpu_core_sib_cache_map); static cpumask_t smp_commenced_mask; @@ -1265,6 +1269,10 @@ void smp_fill_in_sib_core_maps(void) unsigned int j; for_each_present_cpu(j) { + if (cpu_data(i).max_cache_id == + cpu_data(j).max_cache_id) + cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]); + if (cpu_data(i).sock_id == cpu_data(j).sock_id) cpumask_set_cpu(j, &cpu_core_sib_map[i]); } |