diff options
Diffstat (limited to 'arch/powerpc/kernel')
43 files changed, 877 insertions, 317 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index de91f3ae631e..94908af308d8 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -73,7 +73,7 @@ static struct aligninfo aligninfo[128] = { { 8, LD+F }, /* 00 0 1001: lfd */ { 4, ST+F+S }, /* 00 0 1010: stfs */ { 8, ST+F }, /* 00 0 1011: stfd */ - INVALID, /* 00 0 1100 */ + { 16, LD }, /* 00 0 1100: lq */ { 8, LD }, /* 00 0 1101: ld/ldu/lwa */ INVALID, /* 00 0 1110 */ { 8, ST }, /* 00 0 1111: std/stdu */ @@ -140,7 +140,7 @@ static struct aligninfo aligninfo[128] = { { 2, LD+SW }, /* 10 0 1100: lhbrx */ { 4, LD+SE }, /* 10 0 1101 lwa */ { 2, ST+SW }, /* 10 0 1110: sthbrx */ - INVALID, /* 10 0 1111 */ + { 16, ST }, /* 10 0 1111: stq */ INVALID, /* 10 1 0000 */ INVALID, /* 10 1 0001 */ INVALID, /* 10 1 0010 */ @@ -385,8 +385,6 @@ static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, char *ptr1 = (char *) ¤t->thread.TS_FPR(reg+1); int i, ret, sw = 0; - if (!(flags & F)) - return 0; if (reg & 1) return 0; /* invalid form: FRS/FRT must be even */ if (flags & SW) @@ -406,6 +404,34 @@ static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, return 1; /* exception handled and fixed up */ } +#ifdef CONFIG_PPC64 +static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr, + unsigned int reg, unsigned int flags) +{ + char *ptr0 = (char *)®s->gpr[reg]; + char *ptr1 = (char *)®s->gpr[reg+1]; + int i, ret, sw = 0; + + if (reg & 1) + return 0; /* invalid form: GPR must be even */ + if (flags & SW) + sw = 7; + ret = 0; + for (i = 0; i < 8; ++i) { + if (!(flags & ST)) { + ret |= __get_user(ptr0[i^sw], addr + i); + ret |= __get_user(ptr1[i^sw], addr + i + 8); + } else { + ret |= __put_user(ptr0[i^sw], addr + i); + ret |= __put_user(ptr1[i^sw], addr + i + 8); + } + } + if (ret) + return -EFAULT; + return 1; /* exception handled and fixed up */ +} +#endif /* CONFIG_PPC64 */ + #ifdef CONFIG_SPE static struct aligninfo spe_aligninfo[32] = { @@ -914,10 +940,20 @@ int fix_alignment(struct pt_regs *regs) flush_fp_to_thread(current); } - /* Special case for 16-byte FP loads and stores */ - if (nb == 16) { - PPC_WARN_ALIGNMENT(fp_pair, regs); - return emulate_fp_pair(addr, reg, flags); + if ((nb == 16)) { + if (flags & F) { + /* Special case for 16-byte FP loads and stores */ + PPC_WARN_ALIGNMENT(fp_pair, regs); + return emulate_fp_pair(addr, reg, flags); + } else { +#ifdef CONFIG_PPC64 + /* Special case for 16-byte loads and stores */ + PPC_WARN_ALIGNMENT(lq_stq, regs); + return emulate_lq_stq(regs, addr, reg, flags); +#else + return 0; +#endif + } } PPC_WARN_ALIGNMENT(unaligned, regs); diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index b5aacf72ae6f..dba8140ebc20 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -253,7 +253,7 @@ int main(void) DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); - DEFINE(PACA_SPRG3, offsetof(struct paca_struct, sprg3)); + DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso)); #endif /* CONFIG_PPC64 */ /* RTAS */ diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c index 2912b8787aa4..40198d50b4c2 100644 --- a/arch/powerpc/kernel/cacheinfo.c +++ b/arch/powerpc/kernel/cacheinfo.c @@ -756,7 +756,10 @@ void cacheinfo_cpu_online(unsigned int cpu_id) cacheinfo_sysfs_populate(cpu_id, cache); } -#ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */ +/* functions needed to remove cache entry for cpu offline or suspend/resume */ + +#if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \ + defined(CONFIG_HOTPLUG_CPU) static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) { @@ -843,4 +846,4 @@ void cacheinfo_cpu_offline(unsigned int cpu_id) if (cache) cache_cpu_clear(cache, cpu_id); } -#endif /* CONFIG_HOTPLUG_CPU */ +#endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */ diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 37d1bb002aa9..1557e7c2c7e1 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -56,7 +56,6 @@ _GLOBAL(__setup_cpu_power8) li r0,0 mtspr SPRN_LPID,r0 mfspr r3,SPRN_LPCR - oris r3, r3, LPCR_AIL_3@h bl __init_LPCR bl __init_HFSCR bl __init_tlb_power8 @@ -75,7 +74,6 @@ _GLOBAL(__restore_cpu_power8) li r0,0 mtspr SPRN_LPID,r0 mfspr r3,SPRN_LPCR - oris r3, r3, LPCR_AIL_3@h bl __init_LPCR bl __init_HFSCR bl __init_tlb_power8 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 6c8dd5da4de5..c1faade6506d 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -510,7 +510,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .pvr_mask = 0xffff0000, .pvr_value = 0x004b0000, .cpu_name = "POWER8E (raw)", - .cpu_features = CPU_FTRS_POWER8, + .cpu_features = CPU_FTRS_POWER8E, .cpu_user_features = COMMON_USER_POWER8, .cpu_user_features2 = COMMON_USER2_POWER8, .mmu_features = MMU_FTRS_POWER8, diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 11c1d069d920..7a13f378ca2c 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) { void *vaddr; + phys_addr_t paddr; if (!csize) return 0; csize = min_t(size_t, csize, PAGE_SIZE); + paddr = pfn << PAGE_SHIFT; - if ((min_low_pfn < pfn) && (pfn < max_pfn)) { - vaddr = __va(pfn << PAGE_SHIFT); + if (memblock_is_region_memory(paddr, csize)) { + vaddr = __va(paddr); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); } else { - vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); + vaddr = __ioremap(paddr, PAGE_SIZE, 0); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); iounmap(vaddr); } diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 8032b97ccdcb..ee78f6e49d64 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) -int dma_set_mask(struct device *dev, u64 dma_mask) +int __dma_set_mask(struct device *dev, u64 dma_mask) { struct dma_map_ops *dma_ops = get_dma_ops(dev); - if (ppc_md.dma_set_mask) - return ppc_md.dma_set_mask(dev, dma_mask); if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) return dma_ops->set_dma_mask(dev, dma_mask); if (!dev->dma_mask || !dma_supported(dev, dma_mask)) @@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask) *dev->dma_mask = dma_mask; return 0; } +int dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (ppc_md.dma_set_mask) + return ppc_md.dma_set_mask(dev, dma_mask); + return __dma_set_mask(dev, dma_mask); +} EXPORT_SYMBOL(dma_set_mask); u64 dma_get_required_mask(struct device *dev) diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 148db72a8c43..e7b76a6bf150 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -28,6 +28,7 @@ #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/rbtree.h> +#include <linux/reboot.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/export.h> @@ -89,7 +90,7 @@ /* Platform dependent EEH operations */ struct eeh_ops *eeh_ops = NULL; -int eeh_subsystem_enabled; +bool eeh_subsystem_enabled = false; EXPORT_SYMBOL(eeh_subsystem_enabled); /* @@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) eeh_stats.total_mmio_ffs++; - if (!eeh_subsystem_enabled) + if (!eeh_enabled()) return 0; if (!edev) { @@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name) return -EEXIST; } +static int eeh_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) +{ + eeh_set_enable(false); + return NOTIFY_DONE; +} + +static struct notifier_block eeh_reboot_nb = { + .notifier_call = eeh_reboot_notifier, +}; + /** * eeh_init - EEH initialization * @@ -778,6 +790,14 @@ int eeh_init(void) if (machine_is(powernv) && cnt++ <= 0) return ret; + /* Register reboot notifier */ + ret = register_reboot_notifier(&eeh_reboot_nb); + if (ret) { + pr_warn("%s: Failed to register notifier (%d)\n", + __func__, ret); + return ret; + } + /* call platform initialization function */ if (!eeh_ops) { pr_warning("%s: Platform EEH operation not found\n", @@ -822,7 +842,7 @@ int eeh_init(void) return ret; } - if (eeh_subsystem_enabled) + if (eeh_enabled()) pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n"); else pr_warning("EEH: No capable adapters found\n"); @@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev) struct device_node *dn; struct eeh_dev *edev; - if (!dev || !eeh_subsystem_enabled) + if (!dev || !eeh_enabled()) return; pr_debug("EEH: Adding device %s\n", pci_name(dev)); @@ -1005,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev) { struct eeh_dev *edev; - if (!dev || !eeh_subsystem_enabled) + if (!dev || !eeh_enabled()) return; edev = pci_dev_to_eeh_dev(dev); @@ -1045,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev) static int proc_eeh_show(struct seq_file *m, void *v) { - if (0 == eeh_subsystem_enabled) { + if (!eeh_enabled()) { seq_printf(m, "EEH Subsystem is globally disabled\n"); seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs); } else { diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 7bb30dca4e19..bb61ca58ca6d 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -143,13 +143,30 @@ static void eeh_disable_irq(struct pci_dev *dev) static void eeh_enable_irq(struct pci_dev *dev) { struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); - struct irq_desc *desc; if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { edev->mode &= ~EEH_DEV_IRQ_DISABLED; - - desc = irq_to_desc(dev->irq); - if (desc && desc->depth > 0) + /* + * FIXME !!!!! + * + * This is just ass backwards. This maze has + * unbalanced irq_enable/disable calls. So instead of + * finding the root cause it works around the warning + * in the irq_enable code by conditionally calling + * into it. + * + * That's just wrong.The warning in the core code is + * there to tell people to fix their assymetries in + * their own code, not by abusing the core information + * to avoid it. + * + * I so wish that the assymetry would be the other way + * round and a few more irq_disable calls render that + * shit unusable forever. + * + * tglx + */ + if (irqd_irq_disabled(irq_get_irq_data(dev->irq))) enable_irq(dev->irq); } } @@ -362,9 +379,13 @@ static void *eeh_rmv_device(void *data, void *userdata) */ if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) return NULL; + driver = eeh_pcid_get(dev); - if (driver && driver->err_handler) - return NULL; + if (driver) { + eeh_pcid_put(dev); + if (driver->err_handler) + return NULL; + } /* Remove it from PCI subsystem */ pr_debug("EEH: Removing %s without EEH sensitive driver\n", diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 063b65dd4f27..c1bee3ce9d1f 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -34,7 +34,250 @@ * special interrupts from within a non-standard level will probably * blow you up */ -#define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE +#define SPECIAL_EXC_SRR0 0 +#define SPECIAL_EXC_SRR1 1 +#define SPECIAL_EXC_SPRG_GEN 2 +#define SPECIAL_EXC_SPRG_TLB 3 +#define SPECIAL_EXC_MAS0 4 +#define SPECIAL_EXC_MAS1 5 +#define SPECIAL_EXC_MAS2 6 +#define SPECIAL_EXC_MAS3 7 +#define SPECIAL_EXC_MAS6 8 +#define SPECIAL_EXC_MAS7 9 +#define SPECIAL_EXC_MAS5 10 /* E.HV only */ +#define SPECIAL_EXC_MAS8 11 /* E.HV only */ +#define SPECIAL_EXC_IRQHAPPENED 12 +#define SPECIAL_EXC_DEAR 13 +#define SPECIAL_EXC_ESR 14 +#define SPECIAL_EXC_SOFTE 15 +#define SPECIAL_EXC_CSRR0 16 +#define SPECIAL_EXC_CSRR1 17 +/* must be even to keep 16-byte stack alignment */ +#define SPECIAL_EXC_END 18 + +#define SPECIAL_EXC_FRAME_SIZE (INT_FRAME_SIZE + SPECIAL_EXC_END * 8) +#define SPECIAL_EXC_FRAME_OFFS (INT_FRAME_SIZE - 288) + +#define SPECIAL_EXC_STORE(reg, name) \ + std reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) + +#define SPECIAL_EXC_LOAD(reg, name) \ + ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) + +special_reg_save: + lbz r9,PACAIRQHAPPENED(r13) + RECONCILE_IRQ_STATE(r3,r4) + + /* + * We only need (or have stack space) to save this stuff if + * we interrupted the kernel. + */ + ld r3,_MSR(r1) + andi. r3,r3,MSR_PR + bnelr + + /* Copy info into temporary exception thread info */ + ld r11,PACAKSAVE(r13) + CURRENT_THREAD_INFO(r11, r11) + CURRENT_THREAD_INFO(r12, r1) + ld r10,TI_FLAGS(r11) + std r10,TI_FLAGS(r12) + ld r10,TI_PREEMPT(r11) + std r10,TI_PREEMPT(r12) + ld r10,TI_TASK(r11) + std r10,TI_TASK(r12) + + /* + * Advance to the next TLB exception frame for handler + * types that don't do it automatically. + */ + LOAD_REG_ADDR(r11,extlb_level_exc) + lwz r12,0(r11) + mfspr r10,SPRN_SPRG_TLB_EXFRAME + add r10,r10,r12 + mtspr SPRN_SPRG_TLB_EXFRAME,r10 + + /* + * Save registers needed to allow nesting of certain exceptions + * (such as TLB misses) inside special exception levels + */ + mfspr r10,SPRN_SRR0 + SPECIAL_EXC_STORE(r10,SRR0) + mfspr r10,SPRN_SRR1 + SPECIAL_EXC_STORE(r10,SRR1) + mfspr r10,SPRN_SPRG_GEN_SCRATCH + SPECIAL_EXC_STORE(r10,SPRG_GEN) + mfspr r10,SPRN_SPRG_TLB_SCRATCH + SPECIAL_EXC_STORE(r10,SPRG_TLB) + mfspr r10,SPRN_MAS0 + SPECIAL_EXC_STORE(r10,MAS0) + mfspr r10,SPRN_MAS1 + SPECIAL_EXC_STORE(r10,MAS1) + mfspr r10,SPRN_MAS2 + SPECIAL_EXC_STORE(r10,MAS2) + mfspr r10,SPRN_MAS3 + SPECIAL_EXC_STORE(r10,MAS3) + mfspr r10,SPRN_MAS6 + SPECIAL_EXC_STORE(r10,MAS6) + mfspr r10,SPRN_MAS7 + SPECIAL_EXC_STORE(r10,MAS7) +BEGIN_FTR_SECTION + mfspr r10,SPRN_MAS5 + SPECIAL_EXC_STORE(r10,MAS5) + mfspr r10,SPRN_MAS8 + SPECIAL_EXC_STORE(r10,MAS8) + + /* MAS5/8 could have inappropriate values if we interrupted KVM code */ + li r10,0 + mtspr SPRN_MAS5,r10 + mtspr SPRN_MAS8,r10 +END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) + SPECIAL_EXC_STORE(r9,IRQHAPPENED) + + mfspr r10,SPRN_DEAR + SPECIAL_EXC_STORE(r10,DEAR) + mfspr r10,SPRN_ESR + SPECIAL_EXC_STORE(r10,ESR) + + lbz r10,PACASOFTIRQEN(r13) + SPECIAL_EXC_STORE(r10,SOFTE) + ld r10,_NIP(r1) + SPECIAL_EXC_STORE(r10,CSRR0) + ld r10,_MSR(r1) + SPECIAL_EXC_STORE(r10,CSRR1) + + blr + +ret_from_level_except: + ld r3,_MSR(r1) + andi. r3,r3,MSR_PR + beq 1f + b ret_from_except +1: + + LOAD_REG_ADDR(r11,extlb_level_exc) + lwz r12,0(r11) + mfspr r10,SPRN_SPRG_TLB_EXFRAME + sub r10,r10,r12 + mtspr SPRN_SPRG_TLB_EXFRAME,r10 + + /* + * It's possible that the special level exception interrupted a + * TLB miss handler, and inserted the same entry that the + * interrupted handler was about to insert. On CPUs without TLB + * write conditional, this can result in a duplicate TLB entry. + * Wipe all non-bolted entries to be safe. + * + * Note that this doesn't protect against any TLB misses + * we may take accessing the stack from here to the end of + * the special level exception. It's not clear how we can + * reasonably protect against that, but only CPUs with + * neither TLB write conditional nor bolted kernel memory + * are affected. Do any such CPUs even exist? + */ + PPC_TLBILX_ALL(0,R0) + + REST_NVGPRS(r1) + + SPECIAL_EXC_LOAD(r10,SRR0) + mtspr SPRN_SRR0,r10 + SPECIAL_EXC_LOAD(r10,SRR1) + mtspr SPRN_SRR1,r10 + SPECIAL_EXC_LOAD(r10,SPRG_GEN) + mtspr SPRN_SPRG_GEN_SCRATCH,r10 + SPECIAL_EXC_LOAD(r10,SPRG_TLB) + mtspr SPRN_SPRG_TLB_SCRATCH,r10 + SPECIAL_EXC_LOAD(r10,MAS0) + mtspr SPRN_MAS0,r10 + SPECIAL_EXC_LOAD(r10,MAS1) + mtspr SPRN_MAS1,r10 + SPECIAL_EXC_LOAD(r10,MAS2) + mtspr SPRN_MAS2,r10 + SPECIAL_EXC_LOAD(r10,MAS3) + mtspr SPRN_MAS3,r10 + SPECIAL_EXC_LOAD(r10,MAS6) + mtspr SPRN_MAS6,r10 + SPECIAL_EXC_LOAD(r10,MAS7) + mtspr SPRN_MAS7,r10 +BEGIN_FTR_SECTION + SPECIAL_EXC_LOAD(r10,MAS5) + mtspr SPRN_MAS5,r10 + SPECIAL_EXC_LOAD(r10,MAS8) + mtspr SPRN_MAS8,r10 +END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) + + lbz r6,PACASOFTIRQEN(r13) + ld r5,SOFTE(r1) + + /* Interrupts had better not already be enabled... */ + twnei r6,0 + + cmpwi cr0,r5,0 + beq 1f + + TRACE_ENABLE_INTS + stb r5,PACASOFTIRQEN(r13) +1: + /* + * Restore PACAIRQHAPPENED rather than setting it based on + * the return MSR[EE], since we could have interrupted + * __check_irq_replay() or other inconsistent transitory + * states that must remain that way. + */ + SPECIAL_EXC_LOAD(r10,IRQHAPPENED) + stb r10,PACAIRQHAPPENED(r13) + + SPECIAL_EXC_LOAD(r10,DEAR) + mtspr SPRN_DEAR,r10 + SPECIAL_EXC_LOAD(r10,ESR) + mtspr SPRN_ESR,r10 + + stdcx. r0,0,r1 /* to clear the reservation */ + + REST_4GPRS(2, r1) + REST_4GPRS(6, r1) + + ld r10,_CTR(r1) + ld r11,_XER(r1) + mtctr r10 + mtxer r11 + + blr + +.macro ret_from_level srr0 srr1 paca_ex scratch + bl ret_from_level_except + + ld r10,_LINK(r1) + ld r11,_CCR(r1) + ld r0,GPR13(r1) + mtlr r10 + mtcr r11 + + ld r10,GPR10(r1) + ld r11,GPR11(r1) + ld r12,GPR12(r1) + mtspr \scratch,r0 + + std r10,\paca_ex+EX_R10(r13); + std r11,\paca_ex+EX_R11(r13); + ld r10,_NIP(r1) + ld r11,_MSR(r1) + ld r0,GPR0(r1) + ld r1,GPR1(r1) + mtspr \srr0,r10 + mtspr \srr1,r11 + ld r10,\paca_ex+EX_R10(r13) + ld r11,\paca_ex+EX_R11(r13) + mfspr r13,\scratch +.endm + +ret_from_crit_except: + ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH + rfci + +ret_from_mc_except: + ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH + rfmci /* Exception prolog code for all exceptions */ #define EXCEPTION_PROLOG(n, intnum, type, addition) \ @@ -42,7 +285,6 @@ mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ std r10,PACA_EX##type+EX_R10(r13); \ std r11,PACA_EX##type+EX_R11(r13); \ - PROLOG_STORE_RESTORE_SCRATCH_##type; \ mfcr r10; /* save CR */ \ mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \ @@ -69,19 +311,19 @@ #define CRIT_SET_KSTACK \ ld r1,PACA_CRIT_STACK(r13); \ - subi r1,r1,SPECIAL_EXC_FRAME_SIZE; + subi r1,r1,SPECIAL_EXC_FRAME_SIZE #define SPRN_CRIT_SRR0 SPRN_CSRR0 #define SPRN_CRIT_SRR1 SPRN_CSRR1 #define DBG_SET_KSTACK \ ld r1,PACA_DBG_STACK(r13); \ - subi r1,r1,SPECIAL_EXC_FRAME_SIZE; + subi r1,r1,SPECIAL_EXC_FRAME_SIZE #define SPRN_DBG_SRR0 SPRN_DSRR0 #define SPRN_DBG_SRR1 SPRN_DSRR1 #define MC_SET_KSTACK \ ld r1,PACA_MC_STACK(r13); \ - subi r1,r1,SPECIAL_EXC_FRAME_SIZE; + subi r1,r1,SPECIAL_EXC_FRAME_SIZE #define SPRN_MC_SRR0 SPRN_MCSRR0 #define SPRN_MC_SRR1 SPRN_MCSRR1 @@ -100,20 +342,6 @@ #define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n)) -/* - * Store user-visible scratch in PACA exception slots and restore proper value - */ -#define PROLOG_STORE_RESTORE_SCRATCH_GEN -#define PROLOG_STORE_RESTORE_SCRATCH_GDBELL -#define PROLOG_STORE_RESTORE_SCRATCH_DBG -#define PROLOG_STORE_RESTORE_SCRATCH_MC - -#define PROLOG_STORE_RESTORE_SCRATCH_CRIT \ - mfspr r10,SPRN_SPRG_CRIT_SCRATCH; /* get r13 */ \ - std r10,PACA_EXCRIT+EX_R13(r13); \ - ld r11,PACA_SPRG3(r13); \ - mtspr SPRN_SPRG_CRIT_SCRATCH,r11; - /* Variants of the "addition" argument for the prolog */ #define PROLOG_ADDITION_NONE_GEN(n) @@ -147,10 +375,8 @@ std r15,PACA_EXMC+EX_R15(r13) -/* Core exception code for all exceptions except TLB misses. - * XXX: Needs to make SPRN_SPRG_GEN depend on exception type - */ -#define EXCEPTION_COMMON(n, excf, ints) \ +/* Core exception code for all exceptions except TLB misses. */ +#define EXCEPTION_COMMON_LVL(n, scratch, excf) \ exc_##n##_common: \ std r0,GPR0(r1); /* save r0 in stackframe */ \ std r2,GPR2(r1); /* save r2 in stackframe */ \ @@ -163,7 +389,7 @@ exc_##n##_common: \ ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \ 2: ld r3,excf+EX_R10(r13); /* get back r10 */ \ ld r4,excf+EX_R11(r13); /* get back r11 */ \ - mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 */ \ + mfspr r5,scratch; /* get back r13 */ \ std r12,GPR12(r1); /* save r12 in stackframe */ \ ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ mflr r6; /* save LR in stackframe */ \ @@ -187,24 +413,29 @@ exc_##n##_common: \ std r11,SOFTE(r1); /* and save it to stackframe */ \ std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ std r3,_TRAP(r1); /* set trap number */ \ - std r0,RESULT(r1); /* clear regs->result */ \ - ints; + std r0,RESULT(r1); /* clear regs->result */ -/* Variants for the "ints" argument. This one does nothing when we want - * to keep interrupts in their original state - */ -#define INTS_KEEP +#define EXCEPTION_COMMON(n) \ + EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN) +#define EXCEPTION_COMMON_CRIT(n) \ + EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT) +#define EXCEPTION_COMMON_MC(n) \ + EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC) +#define EXCEPTION_COMMON_DBG(n) \ + EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG) -/* This second version is meant for exceptions that don't immediately - * hard-enable. We set a bit in paca->irq_happened to ensure that - * a subsequent call to arch_local_irq_restore() will properly - * hard-enable and avoid the fast-path, and then reconcile irq state. +/* + * This is meant for exceptions that don't immediately hard-enable. We + * set a bit in paca->irq_happened to ensure that a subsequent call to + * arch_local_irq_restore() will properly hard-enable and avoid the + * fast-path, and then reconcile irq state. */ #define INTS_DISABLE RECONCILE_IRQ_STATE(r3,r4) -/* This is called by exceptions that used INTS_KEEP (that did not touch - * irq indicators in the PACA). This will restore MSR:EE to it's previous - * value +/* + * This is called by exceptions that don't use INTS_DISABLE (that did not + * touch irq indicators in the PACA). This will restore MSR:EE to it's + * previous value * * XXX In the long run, we may want to open-code it in order to separate the * load from the wrtee, thus limiting the latency caused by the dependency @@ -262,7 +493,8 @@ exc_##n##_bad_stack: \ #define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \ START_EXCEPTION(label); \ NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\ - EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \ + EXCEPTION_COMMON(trapnum) \ + INTS_DISABLE; \ ack(r8); \ CHECK_NAPPING(); \ addi r3,r1,STACK_FRAME_OVERHEAD; \ @@ -283,8 +515,8 @@ exception_marker: .balign 0x1000 .globl interrupt_base_book3e interrupt_base_book3e: /* fake trap */ - EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */ - EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */ + EXCEPTION_STUB(0x000, machine_check) + EXCEPTION_STUB(0x020, critical_input) /* 0x0100 */ EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */ EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */ @@ -299,8 +531,8 @@ interrupt_base_book3e: /* fake trap */ EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ EXCEPTION_STUB(0x1c0, data_tlb_miss) EXCEPTION_STUB(0x1e0, instruction_tlb_miss) - EXCEPTION_STUB(0x200, altivec_unavailable) /* 0x0f20 */ - EXCEPTION_STUB(0x220, altivec_assist) /* 0x1700 */ + EXCEPTION_STUB(0x200, altivec_unavailable) + EXCEPTION_STUB(0x220, altivec_assist) EXCEPTION_STUB(0x260, perfmon) EXCEPTION_STUB(0x280, doorbell) EXCEPTION_STUB(0x2a0, doorbell_crit) @@ -317,25 +549,25 @@ interrupt_end_book3e: START_EXCEPTION(critical_input); CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, PROLOG_ADDITION_NONE) -// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE) -// bl special_reg_save_crit -// CHECK_NAPPING(); -// addi r3,r1,STACK_FRAME_OVERHEAD -// bl .critical_exception -// b ret_from_crit_except - b . + EXCEPTION_COMMON_CRIT(0x100) + bl .save_nvgprs + bl special_reg_save + CHECK_NAPPING(); + addi r3,r1,STACK_FRAME_OVERHEAD + bl .unknown_exception + b ret_from_crit_except /* Machine Check Interrupt */ START_EXCEPTION(machine_check); - MC_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_MACHINE_CHECK, + MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, PROLOG_ADDITION_NONE) -// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE) -// bl special_reg_save_mc -// addi r3,r1,STACK_FRAME_OVERHEAD -// CHECK_NAPPING(); -// bl .machine_check_exception -// b ret_from_mc_except - b . + EXCEPTION_COMMON_MC(0x000) + bl .save_nvgprs + bl special_reg_save + CHECK_NAPPING(); + addi r3,r1,STACK_FRAME_OVERHEAD + bl .machine_check_exception + b ret_from_mc_except /* Data Storage Interrupt */ START_EXCEPTION(data_storage) @@ -343,7 +575,8 @@ interrupt_end_book3e: PROLOG_ADDITION_2REGS) mfspr r14,SPRN_DEAR mfspr r15,SPRN_ESR - EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE) + EXCEPTION_COMMON(0x300) + INTS_DISABLE b storage_fault_common /* Instruction Storage Interrupt */ @@ -352,7 +585,8 @@ interrupt_end_book3e: PROLOG_ADDITION_2REGS) li r15,0 mr r14,r10 - EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE) + EXCEPTION_COMMON(0x400) + INTS_DISABLE b storage_fault_common /* External Input Interrupt */ @@ -365,7 +599,7 @@ interrupt_end_book3e: PROLOG_ADDITION_2REGS) mfspr r14,SPRN_DEAR mfspr r15,SPRN_ESR - EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP) + EXCEPTION_COMMON(0x600) b alignment_more /* no room, go out of line */ /* Program Interrupt */ @@ -373,7 +607,8 @@ interrupt_end_book3e: NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM, PROLOG_ADDITION_1REG) mfspr r14,SPRN_ESR - EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE) + EXCEPTION_COMMON(0x700) + INTS_DISABLE std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) @@ -386,7 +621,7 @@ interrupt_end_book3e: NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL, PROLOG_ADDITION_NONE) /* we can probably do a shorter exception entry for that one... */ - EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) + EXCEPTION_COMMON(0x800) ld r12,_MSR(r1) andi. r0,r12,MSR_PR; beq- 1f @@ -403,7 +638,7 @@ interrupt_end_book3e: NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL, PROLOG_ADDITION_NONE) /* we can probably do a shorter exception entry for that one... */ - EXCEPTION_COMMON(0x200, PACA_EXGEN, INTS_KEEP) + EXCEPTION_COMMON(0x200) #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION ld r12,_MSR(r1) @@ -425,7 +660,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) NORMAL_EXCEPTION_PROLOG(0x220, BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST, PROLOG_ADDITION_NONE) - EXCEPTION_COMMON(0x220, PACA_EXGEN, INTS_DISABLE) + EXCEPTION_COMMON(0x220) + INTS_DISABLE bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_ALTIVEC @@ -450,13 +686,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) START_EXCEPTION(watchdog); CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, PROLOG_ADDITION_NONE) -// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE) -// bl special_reg_save_crit -// CHECK_NAPPING(); -// addi r3,r1,STACK_FRAME_OVERHEAD -// bl .unknown_exception -// b ret_from_crit_except - b . + EXCEPTION_COMMON_CRIT(0x9f0) + bl .save_nvgprs + bl special_reg_save + CHECK_NAPPING(); + addi r3,r1,STACK_FRAME_OVERHEAD +#ifdef CONFIG_BOOKE_WDT + bl .WatchdogException +#else + bl .unknown_exception +#endif + b ret_from_crit_except /* System Call Interrupt */ START_EXCEPTION(system_call) @@ -470,7 +710,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) START_EXCEPTION(ap_unavailable); NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, PROLOG_ADDITION_NONE) - EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE) + EXCEPTION_COMMON(0xf20) + INTS_DISABLE bl .save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl .unknown_exception @@ -513,7 +754,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mtcr r10 ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ ld r11,PACA_EXCRIT+EX_R11(r13) - ld r13,PACA_EXCRIT+EX_R13(r13) + mfspr r13,SPRN_SPRG_CRIT_SCRATCH rfci /* Normal debug exception */ @@ -526,10 +767,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* Now we mash up things to make it look like we are coming on a * normal exception */ - ld r15,PACA_EXCRIT+EX_R13(r13) - mtspr SPRN_SPRG_GEN_SCRATCH,r15 mfspr r14,SPRN_DBSR - EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE) + EXCEPTION_COMMON_CRIT(0xd00) std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r14 @@ -592,10 +831,9 @@ kernel_dbg_exc: /* Now we mash up things to make it look like we are coming on a * normal exception */ - mfspr r15,SPRN_SPRG_DBG_SCRATCH - mtspr SPRN_SPRG_GEN_SCRATCH,r15 mfspr r14,SPRN_DBSR - EXCEPTION_COMMON(0xd08, PACA_EXDBG, INTS_DISABLE) + EXCEPTION_COMMON_DBG(0xd08) + INTS_DISABLE std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD mr r4,r14 @@ -608,7 +846,8 @@ kernel_dbg_exc: START_EXCEPTION(perfmon); NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, PROLOG_ADDITION_NONE) - EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) + EXCEPTION_COMMON(0x260) + INTS_DISABLE CHECK_NAPPING() addi r3,r1,STACK_FRAME_OVERHEAD bl .performance_monitor_exception @@ -622,13 +861,13 @@ kernel_dbg_exc: START_EXCEPTION(doorbell_crit); CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, PROLOG_ADDITION_NONE) -// EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE) -// bl special_reg_save_crit -// CHECK_NAPPING(); -// addi r3,r1,STACK_FRAME_OVERHEAD -// bl .doorbell_critical_exception -// b ret_from_crit_except - b . + EXCEPTION_COMMON_CRIT(0x2a0) + bl .save_nvgprs + bl special_reg_save + CHECK_NAPPING(); + addi r3,r1,STACK_FRAME_OVERHEAD + bl .unknown_exception + b ret_from_crit_except /* * Guest doorbell interrupt @@ -637,7 +876,7 @@ kernel_dbg_exc: START_EXCEPTION(guest_doorbell); GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL, PROLOG_ADDITION_NONE) - EXCEPTION_COMMON(0x2c0, PACA_EXGEN, INTS_KEEP) + EXCEPTION_COMMON(0x2c0) addi r3,r1,STACK_FRAME_OVERHEAD bl .save_nvgprs INTS_RESTORE_HARD @@ -648,19 +887,19 @@ kernel_dbg_exc: START_EXCEPTION(guest_doorbell_crit); CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, PROLOG_ADDITION_NONE) -// EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE) -// bl special_reg_save_crit -// CHECK_NAPPING(); -// addi r3,r1,STACK_FRAME_OVERHEAD -// bl .guest_doorbell_critical_exception -// b ret_from_crit_except - b . + EXCEPTION_COMMON_CRIT(0x2e0) + bl .save_nvgprs + bl special_reg_save + CHECK_NAPPING(); + addi r3,r1,STACK_FRAME_OVERHEAD + bl .unknown_exception + b ret_from_crit_except /* Hypervisor call */ START_EXCEPTION(hypercall); NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL, PROLOG_ADDITION_NONE) - EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP) + EXCEPTION_COMMON(0x310) addi r3,r1,STACK_FRAME_OVERHEAD bl .save_nvgprs INTS_RESTORE_HARD @@ -671,7 +910,7 @@ kernel_dbg_exc: START_EXCEPTION(ehpriv); NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV, PROLOG_ADDITION_NONE) - EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP) + EXCEPTION_COMMON(0x320) addi r3,r1,STACK_FRAME_OVERHEAD bl .save_nvgprs INTS_RESTORE_HARD @@ -682,7 +921,7 @@ kernel_dbg_exc: START_EXCEPTION(lrat_error); NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR, PROLOG_ADDITION_NONE) - EXCEPTION_COMMON(0x340, PACA_EXGEN, INTS_KEEP) + EXCEPTION_COMMON(0x340) addi r3,r1,STACK_FRAME_OVERHEAD bl .save_nvgprs INTS_RESTORE_HARD diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 38d507306a11..3afd3915921a 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -54,14 +54,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ xori r12,r12,MSR_LE ; \ mtspr SPRN_SRR1,r12 ; \ rfid ; /* return to userspace */ \ - b . ; \ -2: mfspr r12,SPRN_SRR1 ; \ - andi. r12,r12,MSR_PR ; \ - bne 0b ; \ - mtspr SPRN_SRR0,r3 ; \ - mtspr SPRN_SRR1,r4 ; \ - mtspr SPRN_SDR1,r5 ; \ - rfid ; \ b . ; /* prevent speculative execution */ #if defined(CONFIG_RELOCATABLE) @@ -121,9 +113,10 @@ BEGIN_FTR_SECTION cmpwi cr1,r13,2 /* Total loss of HV state is fatal, we could try to use the * PIR to locate a PACA, then use an emergency stack etc... - * but for now, let's just stay stuck here + * OPAL v3 based powernv platforms have new idle states + * which fall in this catagory. */ - bgt cr1,. + bgt cr1,8f GET_PACA(r13) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -141,6 +134,11 @@ BEGIN_FTR_SECTION beq cr1,2f b .power7_wakeup_noloss 2: b .power7_wakeup_loss + + /* Fast Sleep wakeup on PowerNV */ +8: GET_PACA(r13) + b .power7_wakeup_tb_loss + 9: END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) #endif /* CONFIG_PPC_P7_NAP */ @@ -164,13 +162,18 @@ BEGIN_FTR_SECTION */ mfspr r13,SPRN_SRR1 rlwinm. r13,r13,47-31,30,31 + OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) beq 9f + mfspr r13,SPRN_SRR1 + rlwinm. r13,r13,47-31,30,31 /* waking up from powersave (nap) state */ cmpwi cr1,r13,2 /* Total loss of HV state is fatal. let's just stay stuck here */ + OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) bgt cr1,. 9: + OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR) END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) #endif /* CONFIG_PPC_P7_NAP */ EXCEPTION_PROLOG_0(PACA_EXMC) diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 9b27b293a922..6a014c763cc7 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -74,6 +74,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) */ static int test_24bit_addr(unsigned long ip, unsigned long addr) { + addr = ppc_function_entry((void *)addr); /* use the create_branch to verify that this offset can be branched */ return create_branch((unsigned int *)ip, addr, 0); @@ -531,13 +532,8 @@ void arch_ftrace_update_code(int command) ftrace_disable_ftrace_graph_caller(); } -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void) { - /* caller expects data to be zero */ - unsigned long *p = data; - - *p = 0; - return 0; } #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index 3fdef0f0c67f..c3ab86975614 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -17,20 +17,31 @@ #include <asm/ppc-opcode.h> #include <asm/hw_irq.h> #include <asm/kvm_book3s_asm.h> +#include <asm/opal.h> #undef DEBUG - .text +/* Idle state entry routines */ -_GLOBAL(power7_idle) - /* Now check if user or arch enabled NAP mode */ - LOAD_REG_ADDRBASE(r3,powersave_nap) - lwz r4,ADDROFF(powersave_nap)(r3) - cmpwi 0,r4,0 - beqlr - /* fall through */ +#define IDLE_STATE_ENTER_SEQ(IDLE_INST) \ + /* Magic NAP/SLEEP/WINKLE mode enter sequence */ \ + std r0,0(r1); \ + ptesync; \ + ld r0,0(r1); \ +1: cmp cr0,r0,r0; \ + bne 1b; \ + IDLE_INST; \ + b . -_GLOBAL(power7_nap) + .text + +/* + * Pass requested state in r3: + * 0 - nap + * 1 - sleep + */ +_GLOBAL(power7_powersave_common) + /* Use r3 to pass state nap/sleep/winkle */ /* NAP is a state loss, we create a regs frame on the * stack, fill it up with the state we care about and * stick a pointer to it in PACAR1. We really only @@ -79,8 +90,8 @@ _GLOBAL(power7_nap) /* Continue saving state */ SAVE_GPR(2, r1) SAVE_NVGPRS(r1) - mfcr r3 - std r3,_CCR(r1) + mfcr r4 + std r4,_CCR(r1) std r9,_MSR(r1) std r1,PACAR1(r13) @@ -90,15 +101,56 @@ _GLOBAL(power7_enter_nap_mode) li r4,KVM_HWTHREAD_IN_NAP stb r4,HSTATE_HWTHREAD_STATE(r13) #endif + cmpwi cr0,r3,1 + beq 2f + IDLE_STATE_ENTER_SEQ(PPC_NAP) + /* No return */ +2: IDLE_STATE_ENTER_SEQ(PPC_SLEEP) + /* No return */ - /* Magic NAP mode enter sequence */ - std r0,0(r1) - ptesync - ld r0,0(r1) -1: cmp cr0,r0,r0 - bne 1b - PPC_NAP - b . +_GLOBAL(power7_idle) + /* Now check if user or arch enabled NAP mode */ + LOAD_REG_ADDRBASE(r3,powersave_nap) + lwz r4,ADDROFF(powersave_nap)(r3) + cmpwi 0,r4,0 + beqlr + /* fall through */ + +_GLOBAL(power7_nap) + li r3,0 + b power7_powersave_common + /* No return */ + +_GLOBAL(power7_sleep) + li r3,1 + b power7_powersave_common + /* No return */ + +_GLOBAL(power7_wakeup_tb_loss) + ld r2,PACATOC(r13); + ld r1,PACAR1(r13) + + /* Time base re-sync */ + li r0,OPAL_RESYNC_TIMEBASE + LOAD_REG_ADDR(r11,opal); + ld r12,8(r11); + ld r2,0(r11); + mtctr r12 + bctrl + + /* TODO: Check r3 for failure */ + + REST_NVGPRS(r1) + REST_GPR(2, r1) + ld r3,_CCR(r1) + ld r4,_MSR(r1) + ld r5,_NIP(r1) + addi r1,r1,INT_FRAME_SIZE + mtcr r3 + mfspr r3,SPRN_SRR1 /* Return SRR1 */ + mtspr SPRN_SRR1,r4 + mtspr SPRN_SRR0,r5 + rfid _GLOBAL(power7_wakeup_loss) ld r1,PACAR1(r13) diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d773dd440a45..88e3ec6e1d96 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl) memset(tbl->it_map, 0xff, sz); iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); + /* + * Disable iommu bypass, otherwise the user can DMA to all of + * our physical memory via the bypass window instead of just + * the pages that has been explicitly mapped into the iommu + */ + if (tbl->set_bypass) + tbl->set_bypass(tbl, false); + return 0; } EXPORT_SYMBOL_GPL(iommu_take_ownership); @@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl) /* Restore bit#0 set by iommu_init_table() */ if (tbl->it_offset == 0) set_bit(0, tbl->it_map); + + /* The kernel owns the device now, we can restore the iommu bypass */ + if (tbl->set_bypass) + tbl->set_bypass(tbl, true); } EXPORT_SYMBOL_GPL(iommu_release_ownership); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 9729b23bfb0a..ca1cd7459c4a 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -465,7 +465,6 @@ static inline void check_stack_overflow(void) void __do_irq(struct pt_regs *regs) { - struct irq_desc *desc; unsigned int irq; irq_enter(); @@ -487,11 +486,8 @@ void __do_irq(struct pt_regs *regs) /* And finally process it */ if (unlikely(irq == NO_IRQ)) __get_cpu_var(irq_stat).spurious_irqs++; - else { - desc = irq_to_desc(irq); - if (likely(desc)) - desc->handle_irq(irq, desc); - } + else + generic_handle_irq(irq); trace_irq_exit(regs); @@ -559,8 +555,13 @@ void exc_lvl_ctx_init(void) #ifdef CONFIG_PPC64 cpu_nr = i; #else +#ifdef CONFIG_SMP cpu_nr = get_hard_smp_processor_id(i); +#else + cpu_nr = 0; +#endif #endif + memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); tp = critirq_ctx[cpu_nr]; tp->cpu = cpu_nr; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 75d4f7340da8..015ae55c1868 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size) /* Values we need to export to the second kernel via the device tree. */ static phys_addr_t kernel_end; +static phys_addr_t crashk_base; static phys_addr_t crashk_size; +static unsigned long long mem_limit; static struct property kernel_end_prop = { .name = "linux,kernel-end", @@ -207,7 +209,7 @@ static struct property kernel_end_prop = { static struct property crashk_base_prop = { .name = "linux,crashkernel-base", .length = sizeof(phys_addr_t), - .value = &crashk_res.start, + .value = &crashk_base }; static struct property crashk_size_prop = { @@ -219,9 +221,11 @@ static struct property crashk_size_prop = { static struct property memory_limit_prop = { .name = "linux,memory-limit", .length = sizeof(unsigned long long), - .value = &memory_limit, + .value = &mem_limit, }; +#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG) + static void __init export_crashk_values(struct device_node *node) { struct property *prop; @@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node) of_remove_property(node, prop); if (crashk_res.start != 0) { + crashk_base = cpu_to_be_ulong(crashk_res.start), of_add_property(node, &crashk_base_prop); - crashk_size = resource_size(&crashk_res); + crashk_size = cpu_to_be_ulong(resource_size(&crashk_res)); of_add_property(node, &crashk_size_prop); } @@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node) * memory_limit is required by the kexec-tools to limit the * crash regions to the actual memory used. */ + mem_limit = cpu_to_be_ulong(memory_limit); of_update_property(node, &memory_limit_prop); } @@ -264,7 +270,7 @@ static int __init kexec_setup(void) of_remove_property(node, prop); /* information needed by userspace when using default_machine_kexec */ - kernel_end = __pa(_end); + kernel_end = cpu_to_be_ulong(__pa(_end)); of_add_property(node, &kernel_end_prop); export_crashk_values(node); diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index be4e6d648f60..59d229a2a3e0 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image) /* Values we need to export to the second kernel via the device tree. */ static unsigned long htab_base; +static unsigned long htab_size; static struct property htab_base_prop = { .name = "linux,htab-base", @@ -379,7 +380,7 @@ static struct property htab_base_prop = { static struct property htab_size_prop = { .name = "linux,htab-size", .length = sizeof(unsigned long), - .value = &htab_size_bytes, + .value = &htab_size, }; static int __init export_htab_values(void) @@ -403,8 +404,9 @@ static int __init export_htab_values(void) if (prop) of_remove_property(node, prop); - htab_base = __pa(htab_address); + htab_base = cpu_to_be64(__pa(htab_address)); of_add_property(node, &htab_base_prop); + htab_size = cpu_to_be64(htab_size_bytes); of_add_property(node, &htab_size_prop); of_node_put(node); diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index cadef7e64e42..a7fd4cb78b78 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -70,7 +70,7 @@ static void mce_set_error_info(struct machine_check_event *mce, */ void save_mce_event(struct pt_regs *regs, long handled, struct mce_error_info *mce_err, - uint64_t addr) + uint64_t nip, uint64_t addr) { uint64_t srr1; int index = __get_cpu_var(mce_nest_count)++; @@ -86,7 +86,7 @@ void save_mce_event(struct pt_regs *regs, long handled, /* Populate generic machine check info */ mce->version = MCE_V1; - mce->srr0 = regs->nip; + mce->srr0 = nip; mce->srr1 = regs->msr; mce->gpr3 = regs->gpr[3]; mce->in_use = 1; diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 27c93f41166f..aa9aff3d6ad3 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -26,6 +26,7 @@ #include <linux/ptrace.h> #include <asm/mmu.h> #include <asm/mce.h> +#include <asm/machdep.h> /* flush SLBs and reload */ static void flush_and_reload_slb(void) @@ -197,13 +198,32 @@ static void mce_get_derror_p7(struct mce_error_info *mce_err, uint64_t dsisr) } } +static long mce_handle_ue_error(struct pt_regs *regs) +{ + long handled = 0; + + /* + * On specific SCOM read via MMIO we may get a machine check + * exception with SRR0 pointing inside opal. If that is the + * case OPAL may have recovery address to re-read SCOM data in + * different way and hence we can recover from this MC. + */ + + if (ppc_md.mce_check_early_recovery) { + if (ppc_md.mce_check_early_recovery(regs)) + handled = 1; + } + return handled; +} + long __machine_check_early_realmode_p7(struct pt_regs *regs) { - uint64_t srr1, addr; + uint64_t srr1, nip, addr; long handled = 1; struct mce_error_info mce_error_info = { 0 }; srr1 = regs->msr; + nip = regs->nip; /* * Handle memory errors depending whether this was a load/store or @@ -221,7 +241,11 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs) addr = regs->nip; } - save_mce_event(regs, handled, &mce_error_info, addr); + /* Handle UE error. */ + if (mce_error_info.error_type == MCE_ERROR_TYPE_UE) + handled = mce_handle_ue_error(regs); + + save_mce_event(regs, handled, &mce_error_info, nip, addr); return handled; } @@ -263,11 +287,12 @@ static long mce_handle_derror_p8(uint64_t dsisr) long __machine_check_early_realmode_p8(struct pt_regs *regs) { - uint64_t srr1, addr; + uint64_t srr1, nip, addr; long handled = 1; struct mce_error_info mce_error_info = { 0 }; srr1 = regs->msr; + nip = regs->nip; if (P7_SRR1_MC_LOADSTORE(srr1)) { handled = mce_handle_derror_p8(regs->dsisr); @@ -279,6 +304,10 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs) addr = regs->nip; } - save_mce_event(regs, handled, &mce_error_info, addr); + /* Handle UE error. */ + if (mce_error_info.error_type == MCE_ERROR_TYPE_UE) + handled = mce_handle_ue_error(regs); + + save_mce_event(regs, handled, &mce_error_info, nip, addr); return handled; } diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 879f09620f83..7c6bb4b17b49 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq) mtlr r0 blr +/* + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); + */ _GLOBAL(call_do_irq) mflr r0 stw r0,4(r1) lwz r10,THREAD+KSP_LIMIT(r2) - addi r11,r3,THREAD_INFO_GAP + addi r11,r4,THREAD_INFO_GAP stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) mr r1,r4 stw r10,8(r1) diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index bf0aada02fe4..ad302f845e5d 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -152,7 +152,8 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) new_paca->paca_index = cpu; new_paca->kernel_toc = kernel_toc; new_paca->kernelbase = (unsigned long) _stext; - new_paca->kernel_msr = MSR_KERNEL; + /* Only set MSR:IR/DR when MMU is initialized */ + new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR); new_paca->hw_cpu_id = 0xffff; new_paca->kexec_state = KEXEC_STATE_NONE; new_paca->__current = &init_task; diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index a9e311f7a9dd..2a4779091a58 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -208,7 +208,6 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, unsigned long in_devfn) { struct pci_controller* hose; - struct list_head *ln; struct pci_bus *bus = NULL; struct device_node *hose_node; @@ -230,8 +229,7 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, * used on pre-domains setup. We return the first match */ - for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { - bus = pci_bus_b(ln); + list_for_each_entry(bus, &pci_root_buses, node) { if (in_bus >= bus->number && in_bus <= bus->busn_res.end) break; bus = NULL; diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8d4c247f1738..31d021506d21 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -610,6 +610,31 @@ out_and_saveregs: tm_save_sprs(thr); } +extern void __tm_recheckpoint(struct thread_struct *thread, + unsigned long orig_msr); + +void tm_recheckpoint(struct thread_struct *thread, + unsigned long orig_msr) +{ + unsigned long flags; + + /* We really can't be interrupted here as the TEXASR registers can't + * change and later in the trecheckpoint code, we have a userspace R1. + * So let's hard disable over this region. + */ + local_irq_save(flags); + hard_irq_disable(); + + /* The TM SPRs are restored here, so that TEXASR.FS can be set + * before the trecheckpoint and no explosion occurs. + */ + tm_restore_sprs(thread); + + __tm_recheckpoint(thread, orig_msr); + + local_irq_restore(flags); +} + static inline void tm_recheckpoint_new_task(struct task_struct *new) { unsigned long msr; @@ -628,13 +653,10 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new) if (!new->thread.regs) return; - /* The TM SPRs are restored here, so that TEXASR.FS can be set - * before the trecheckpoint and no explosion occurs. - */ - tm_restore_sprs(&new->thread); - - if (!MSR_TM_ACTIVE(new->thread.regs->msr)) + if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ + tm_restore_sprs(&new->thread); return; + } msr = new->thread.tm_orig_msr; /* Recheckpoint to restore original checkpointed register state. */ TM_DEBUG("*** tm_recheckpoint of pid %d " @@ -1048,6 +1070,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) flush_altivec_to_thread(src); flush_vsx_to_thread(src); flush_spe_to_thread(src); + /* + * Flush TM state out so we can copy it. __switch_to_tm() does this + * flush but it removes the checkpointed state from the current CPU and + * transitions the CPU out of TM mode. Hence we need to call + * tm_recheckpoint_new_task() (on the same task) to restore the + * checkpointed state back and the TM mode. + */ + __switch_to_tm(src); + tm_recheckpoint_new_task(src); *dst = *src; diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index f58c0d3aaeb4..668aa4791fd7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -33,6 +33,7 @@ #include <linux/irq.h> #include <linux/memblock.h> #include <linux/of.h> +#include <linux/of_fdt.h> #include <asm/prom.h> #include <asm/rtas.h> @@ -346,45 +347,45 @@ static int __init early_init_dt_scan_cpus(unsigned long node, #endif } - if (found >= 0) { - DBG("boot cpu: logical %d physical %d\n", found, - be32_to_cpu(intserv[found_thread])); - boot_cpuid = found; - set_hard_smp_processor_id(found, - be32_to_cpu(intserv[found_thread])); + /* Not the boot CPU */ + if (found < 0) + return 0; - /* - * PAPR defines "logical" PVR values for cpus that - * meet various levels of the architecture: - * 0x0f000001 Architecture version 2.04 - * 0x0f000002 Architecture version 2.05 - * If the cpu-version property in the cpu node contains - * such a value, we call identify_cpu again with the - * logical PVR value in order to use the cpu feature - * bits appropriate for the architecture level. - * - * A POWER6 partition in "POWER6 architected" mode - * uses the 0x0f000002 PVR value; in POWER5+ mode - * it uses 0x0f000001. - */ - prop = of_get_flat_dt_prop(node, "cpu-version", NULL); - if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) - identify_cpu(0, be32_to_cpup(prop)); + DBG("boot cpu: logical %d physical %d\n", found, + be32_to_cpu(intserv[found_thread])); + boot_cpuid = found; + set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread])); - identical_pvr_fixup(node); - } + /* + * PAPR defines "logical" PVR values for cpus that + * meet various levels of the architecture: + * 0x0f000001 Architecture version 2.04 + * 0x0f000002 Architecture version 2.05 + * If the cpu-version property in the cpu node contains + * such a value, we call identify_cpu again with the + * logical PVR value in order to use the cpu feature + * bits appropriate for the architecture level. + * + * A POWER6 partition in "POWER6 architected" mode + * uses the 0x0f000002 PVR value; in POWER5+ mode + * it uses 0x0f000001. + */ + prop = of_get_flat_dt_prop(node, "cpu-version", NULL); + if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) + identify_cpu(0, be32_to_cpup(prop)); + + identical_pvr_fixup(node); check_cpu_feature_properties(node); check_cpu_pa_features(node); check_cpu_slb_size(node); -#ifdef CONFIG_PPC_PSERIES +#ifdef CONFIG_PPC64 if (nthreads > 1) cur_cpu_spec->cpu_features |= CPU_FTR_SMT; else cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; #endif - return 0; } @@ -588,6 +589,8 @@ static void __init early_reserve_mem_dt(void) memblock_reserve(base, size); } } + + early_init_fdt_scan_reserved_mem(); } static void __init early_reserve_mem(void) @@ -744,6 +747,10 @@ void __init early_init_devtree(void *params) * (altivec support, boot CPU ID, ...) */ of_scan_flat_dt(early_init_dt_scan_cpus, NULL); + if (boot_cpuid < 0) { + printk("Failed to indentify boot CPU !\n"); + BUG(); + } #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) /* We'll later wait for secondaries to check in; there are @@ -752,6 +759,11 @@ void __init early_init_devtree(void *params) spinning_secondaries = boot_cpu_count - 1; #endif +#ifdef CONFIG_PPC_POWERNV + /* Scan and build the list of machine check recoverable ranges */ + of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL); +#endif + DBG(" <- early_init_devtree()\n"); } diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index b47a0e1ab001..d88736fbece6 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S @@ -69,8 +69,8 @@ _GLOBAL(relocate) * R_PPC64_RELATIVE ones. */ mtctr r8 -5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */ - cmpwi r0,R_PPC64_RELATIVE +5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */ + cmpdi r0,R_PPC64_RELATIVE bne 6f ld r6,0(r9) /* reloc->r_offset */ ld r0,16(r9) /* reloc->r_addend */ @@ -81,6 +81,7 @@ _GLOBAL(relocate) 6: blr +.balign 8 p_dyn: .llong __dynamic_start - 0b p_rela: .llong __rela_dyn_start - 0b p_st: .llong _stext - 0b diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 4cf674d7d5ae..8cd5ed049b5d 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -993,32 +993,36 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, (struct rtas_ext_event_log_v6 *)log->buffer; struct pseries_errorlog *sect; unsigned char *p, *log_end; + uint32_t ext_log_length = rtas_error_extended_log_length(log); + uint8_t log_format = rtas_ext_event_log_format(ext_log); + uint32_t company_id = rtas_ext_event_company_id(ext_log); /* Check that we understand the format */ - if (log->extended_log_length < sizeof(struct rtas_ext_event_log_v6) || - ext_log->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || - ext_log->company_id != RTAS_V6EXT_COMPANY_ID_IBM) + if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) || + log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || + company_id != RTAS_V6EXT_COMPANY_ID_IBM) return NULL; - log_end = log->buffer + log->extended_log_length; + log_end = log->buffer + ext_log_length; p = ext_log->vendor_log; while (p < log_end) { sect = (struct pseries_errorlog *)p; - if (sect->id == section_id) + if (pseries_errorlog_id(sect) == section_id) return sect; - p += sect->length; + p += pseries_errorlog_length(sect); } return NULL; } +/* We assume to be passed big endian arguments */ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) { struct rtas_args args; unsigned long flags; char *buff_copy, *errbuf = NULL; - int nargs; + int nargs, nret, token; int rc; if (!capable(CAP_SYS_ADMIN)) @@ -1027,10 +1031,13 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) return -EFAULT; - nargs = args.nargs; + nargs = be32_to_cpu(args.nargs); + nret = be32_to_cpu(args.nret); + token = be32_to_cpu(args.token); + if (nargs > ARRAY_SIZE(args.args) - || args.nret > ARRAY_SIZE(args.args) - || nargs + args.nret > ARRAY_SIZE(args.args)) + || nret > ARRAY_SIZE(args.args) + || nargs + nret > ARRAY_SIZE(args.args)) return -EINVAL; /* Copy in args. */ @@ -1038,14 +1045,14 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) nargs * sizeof(rtas_arg_t)) != 0) return -EFAULT; - if (args.token == RTAS_UNKNOWN_SERVICE) + if (token == RTAS_UNKNOWN_SERVICE) return -EINVAL; args.rets = &args.args[nargs]; - memset(args.rets, 0, args.nret * sizeof(rtas_arg_t)); + memset(args.rets, 0, nret * sizeof(rtas_arg_t)); /* Need to handle ibm,suspend_me call specially */ - if (args.token == ibm_suspend_me_token) { + if (token == ibm_suspend_me_token) { rc = rtas_ibm_suspend_me(&args); if (rc) return rc; @@ -1062,7 +1069,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) /* A -1 return code indicates that the last command couldn't be completed due to a hardware error. */ - if (args.rets[0] == -1) + if (be32_to_cpu(args.rets[0]) == -1) errbuf = __fetch_rtas_last_error(buff_copy); unlock_rtas(flags); @@ -1077,7 +1084,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) /* Copy out args. */ if (copy_to_user(uargs->args + nargs, args.args + nargs, - args.nret * sizeof(rtas_arg_t)) != 0) + nret * sizeof(rtas_arg_t)) != 0) return -EFAULT; return 0; diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 1130c53ad652..e736387fee6a 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c @@ -150,8 +150,8 @@ static void printk_log_rtas(char *buf, int len) struct rtas_error_log *errlog = (struct rtas_error_log *)buf; printk(RTAS_DEBUG "event: %d, Type: %s, Severity: %d\n", - error_log_cnt, rtas_event_type(errlog->type), - errlog->severity); + error_log_cnt, rtas_event_type(rtas_error_type(errlog)), + rtas_error_severity(errlog)); } } @@ -159,14 +159,16 @@ static int log_rtas_len(char * buf) { int len; struct rtas_error_log *err; + uint32_t extended_log_length; /* rtas fixed header */ len = 8; err = (struct rtas_error_log *)buf; - if (err->extended && err->extended_log_length) { + extended_log_length = rtas_error_extended_log_length(err); + if (rtas_error_extended(err) && extended_log_length) { /* extended header */ - len += err->extended_log_length; + len += extended_log_length; } if (rtas_error_log_max == 0) @@ -293,15 +295,13 @@ void prrn_schedule_update(u32 scope) static void handle_rtas_event(const struct rtas_error_log *log) { - if (log->type == RTAS_TYPE_PRRN) { - /* For PRRN Events the extended log length is used to denote - * the scope for calling rtas update-nodes. - */ - if (prrn_is_enabled()) - prrn_schedule_update(log->extended_log_length); - } + if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled()) + return; - return; + /* For PRRN Events the extended log length is used to denote + * the scope for calling rtas update-nodes. + */ + prrn_schedule_update(rtas_error_extended_log_length(log)); } #else diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index bc76cc6b419c..79b7612ac6fa 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -76,6 +76,9 @@ EXPORT_SYMBOL(ppc_md); struct machdep_calls *machine_id; EXPORT_SYMBOL(machine_id); +int boot_cpuid = -1; +EXPORT_SYMBOL_GPL(boot_cpuid); + unsigned long klimit = (unsigned long) _end; char cmd_line[COMMAND_LINE_SIZE]; diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 2b0da27eaee4..ea4fda60e57b 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -44,8 +44,6 @@ extern void bootx_init(unsigned long r4, unsigned long phys); -int boot_cpuid = -1; -EXPORT_SYMBOL_GPL(boot_cpuid); int boot_cpuid_phys; EXPORT_SYMBOL_GPL(boot_cpuid_phys); @@ -247,7 +245,12 @@ static void __init exc_lvl_early_init(void) /* interrupt stacks must be in lowmem, we get that for free on ppc32 * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ for_each_possible_cpu(i) { +#ifdef CONFIG_SMP hw_cpu = get_hard_smp_processor_id(i); +#else + hw_cpu = 0; +#endif + critirq_ctx[hw_cpu] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); #ifdef CONFIG_BOOKE diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index f5f11a7d30e5..fbe24377eda3 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -74,7 +74,6 @@ #define DBG(fmt...) #endif -int boot_cpuid = 0; int spinning_secondaries; u64 ppc64_pft_size; @@ -102,6 +101,8 @@ static void setup_tlb_core_data(void) { int cpu; + BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0); + for_each_possible_cpu(cpu) { int first = cpu_first_thread_sibling(cpu); @@ -194,6 +195,19 @@ static void fixup_boot_paca(void) get_paca()->data_offset = 0; } +static void cpu_ready_for_interrupts(void) +{ + /* Set IR and DR in PACA MSR */ + get_paca()->kernel_msr = MSR_KERNEL; + + /* Enable AIL if supported */ + if (cpu_has_feature(CPU_FTR_HVMODE) && + cpu_has_feature(CPU_FTR_ARCH_207S)) { + unsigned long lpcr = mfspr(SPRN_LPCR); + mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); + } +} + /* * Early initialization entry point. This is called by head.S * with MMU translation disabled. We rely on the "feature" of @@ -260,6 +274,14 @@ void __init early_setup(unsigned long dt_ptr) /* Initialize the hash table or TLB handling */ early_init_mmu(); + /* + * At this point, we can let interrupts switch to virtual mode + * (the MMU has been setup), so adjust the MSR in the PACA to + * have IR and DR set and enable AIL if it exists + */ + cpu_ready_for_interrupts(); + + /* Reserve large chunks of memory for use by CMA for KVM */ kvm_cma_reserve(); /* @@ -292,6 +314,13 @@ void early_setup_secondary(void) /* Initialize the hash table or TLB handling */ early_init_mmu_secondary(); + + /* + * At this point, we can let interrupts switch to virtual mode + * (the MMU has been setup), so adjust the MSR in the PACA to + * have IR and DR set. + */ + cpu_ready_for_interrupts(); } #endif /* CONFIG_SMP */ @@ -552,14 +581,20 @@ static void __init irqstack_early_init(void) static void __init exc_lvl_early_init(void) { unsigned int i; + unsigned long sp; for_each_possible_cpu(i) { - critirq_ctx[i] = (struct thread_info *) - __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); - dbgirq_ctx[i] = (struct thread_info *) - __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); - mcheckirq_ctx[i] = (struct thread_info *) - __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); + sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); + critirq_ctx[i] = (struct thread_info *)__va(sp); + paca[i].crit_kstack = __va(sp + THREAD_SIZE); + + sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); + dbgirq_ctx[i] = (struct thread_info *)__va(sp); + paca[i].dbg_kstack = __va(sp + THREAD_SIZE); + + sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE); + mcheckirq_ctx[i] = (struct thread_info *)__va(sp); + paca[i].mc_kstack = __va(sp + THREAD_SIZE); } if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index a67e00aa3caa..4e47db686b5d 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -881,6 +881,8 @@ static long restore_tm_user_regs(struct pt_regs *regs, * transactional versions should be loaded. */ tm_enable(); + /* Make sure the transaction is marked as failed */ + current->thread.tm_texasr |= TEXASR_FS; /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(¤t->thread, msr); /* Get the top half of the MSR */ diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index e35bf773df7a..d501dc4dc3e6 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -65,8 +65,8 @@ struct rt_sigframe { struct siginfo __user *pinfo; void __user *puc; struct siginfo info; - /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ - char abigap[288]; + /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ + char abigap[USER_REDZONE_SIZE]; } __attribute__ ((aligned (16))); static const char fmt32[] = KERN_INFO \ @@ -527,6 +527,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs, } #endif tm_enable(); + /* Make sure the transaction is marked as failed */ + current->thread.tm_texasr |= TEXASR_FS; /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(¤t->thread, msr); diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index ac2621af3154..e2a4232c5871 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -35,6 +35,7 @@ #include <asm/ptrace.h> #include <linux/atomic.h> #include <asm/irq.h> +#include <asm/hw_irq.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/prom.h> @@ -145,9 +146,9 @@ static irqreturn_t reschedule_action(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t call_function_single_action(int irq, void *data) +static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) { - generic_smp_call_function_single_interrupt(); + tick_broadcast_ipi_handler(); return IRQ_HANDLED; } @@ -168,14 +169,14 @@ static irqreturn_t debug_ipi_action(int irq, void *data) static irq_handler_t smp_ipi_action[] = { [PPC_MSG_CALL_FUNCTION] = call_function_action, [PPC_MSG_RESCHEDULE] = reschedule_action, - [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action, + [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action, }; const char *smp_ipi_name[] = { [PPC_MSG_CALL_FUNCTION] = "ipi call function", [PPC_MSG_RESCHEDULE] = "ipi reschedule", - [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single", + [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast", [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger", }; @@ -251,8 +252,8 @@ irqreturn_t smp_ipi_demux(void) generic_smp_call_function_interrupt(); if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) scheduler_ipi(); - if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNC_SINGLE)) - generic_smp_call_function_single_interrupt(); + if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) + tick_broadcast_ipi_handler(); if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK)) debug_ipi_action(0, NULL); } while (info->messages); @@ -280,7 +281,7 @@ EXPORT_SYMBOL_GPL(smp_send_reschedule); void arch_send_call_function_single_ipi(int cpu) { - do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); + do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -291,6 +292,16 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) do_message_pass(cpu, PPC_MSG_CALL_FUNCTION); } +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void tick_broadcast(const struct cpumask *mask) +{ + unsigned int cpu; + + for_each_cpu(cpu, mask) + do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); +} +#endif + #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) void smp_send_debugger_break(void) { diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 97e1dc917683..d90d4b7810d6 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -975,7 +975,8 @@ static int __init topology_init(void) int cpu; register_nodes(); - register_cpu_notifier(&sysfs_cpu_nb); + + cpu_notifier_register_begin(); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); @@ -999,6 +1000,11 @@ static int __init topology_init(void) if (cpu_online(cpu)) register_cpu_online(cpu); } + + __register_cpu_notifier(&sysfs_cpu_nb); + + cpu_notifier_register_done(); + #ifdef CONFIG_PPC64 sysfs_create_dscr_default(); #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index b3dab20acf34..122a580f7322 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -42,6 +42,7 @@ #include <linux/timex.h> #include <linux/kernel_stat.h> #include <linux/time.h> +#include <linux/clockchips.h> #include <linux/init.h> #include <linux/profile.h> #include <linux/cpu.h> @@ -106,7 +107,7 @@ struct clock_event_device decrementer_clockevent = { .irq = 0, .set_next_event = decrementer_set_next_event, .set_mode = decrementer_set_mode, - .features = CLOCK_EVT_FEAT_ONESHOT, + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP, }; EXPORT_SYMBOL(decrementer_clockevent); @@ -478,6 +479,47 @@ void arch_irq_work_raise(void) #endif /* CONFIG_IRQ_WORK */ +void __timer_interrupt(void) +{ + struct pt_regs *regs = get_irq_regs(); + u64 *next_tb = &__get_cpu_var(decrementers_next_tb); + struct clock_event_device *evt = &__get_cpu_var(decrementers); + u64 now; + + trace_timer_interrupt_entry(regs); + + if (test_irq_work_pending()) { + clear_irq_work_pending(); + irq_work_run(); + } + + now = get_tb_or_rtc(); + if (now >= *next_tb) { + *next_tb = ~(u64)0; + if (evt->event_handler) + evt->event_handler(evt); + __get_cpu_var(irq_stat).timer_irqs_event++; + } else { + now = *next_tb - now; + if (now <= DECREMENTER_MAX) + set_dec((int)now); + /* We may have raced with new irq work */ + if (test_irq_work_pending()) + set_dec(1); + __get_cpu_var(irq_stat).timer_irqs_others++; + } + +#ifdef CONFIG_PPC64 + /* collect purr register values often, for accurate calculations */ + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { + struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); + cu->current_tb = mfspr(SPRN_PURR); + } +#endif + + trace_timer_interrupt_exit(regs); +} + /* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. @@ -486,8 +528,6 @@ void timer_interrupt(struct pt_regs * regs) { struct pt_regs *old_regs; u64 *next_tb = &__get_cpu_var(decrementers_next_tb); - struct clock_event_device *evt = &__get_cpu_var(decrementers); - u64 now; /* Ensure a positive value is written to the decrementer, or else * some CPUs will continue to take decrementer exceptions. @@ -519,39 +559,7 @@ void timer_interrupt(struct pt_regs * regs) old_regs = set_irq_regs(regs); irq_enter(); - trace_timer_interrupt_entry(regs); - - if (test_irq_work_pending()) { - clear_irq_work_pending(); - irq_work_run(); - } - - now = get_tb_or_rtc(); - if (now >= *next_tb) { - *next_tb = ~(u64)0; - if (evt->event_handler) - evt->event_handler(evt); - __get_cpu_var(irq_stat).timer_irqs_event++; - } else { - now = *next_tb - now; - if (now <= DECREMENTER_MAX) - set_dec((int)now); - /* We may have raced with new irq work */ - if (test_irq_work_pending()) - set_dec(1); - __get_cpu_var(irq_stat).timer_irqs_others++; - } - -#ifdef CONFIG_PPC64 - /* collect purr register values often, for accurate calculations */ - if (firmware_has_feature(FW_FEATURE_SPLPAR)) { - struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); - cu->current_tb = mfspr(SPRN_PURR); - } -#endif - - trace_timer_interrupt_exit(regs); - + __timer_interrupt(); irq_exit(); set_irq_regs(old_regs); } @@ -825,6 +833,15 @@ static void decrementer_set_mode(enum clock_event_mode mode, decrementer_set_next_event(DECREMENTER_MAX, dev); } +/* Interrupt handler for the timer broadcast IPI */ +void tick_broadcast_ipi_handler(void) +{ + u64 *next_tb = &__get_cpu_var(decrementers_next_tb); + + *next_tb = get_tb_or_rtc(); + __timer_interrupt(); +} + static void register_decrementer_clockevent(int cpu) { struct clock_event_device *dec = &per_cpu(decrementers, cpu); @@ -928,6 +945,7 @@ void __init time_init(void) clocksource_init(); init_decrementer_clockevent(); + tick_setup_hrtimer_broadcast(); } diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index ef47bcbd4352..03567c05950a 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -307,7 +307,7 @@ dont_backup_fp: * Call with IRQs off, stacks get all out of sync for * some periods in here! */ -_GLOBAL(tm_recheckpoint) +_GLOBAL(__tm_recheckpoint) mfcr r5 mflr r0 stw r5, 8(r1) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 33cd7a0b8e73..1bd7ca298fa1 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -1379,8 +1379,9 @@ void facility_unavailable_exception(struct pt_regs *regs) if (!arch_irq_disabled_regs(regs)) local_irq_enable(); - pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", - hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); + pr_err_ratelimited( + "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", + hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); if (user_mode(regs)) { _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); @@ -1867,6 +1868,7 @@ struct ppc_emulated ppc_emulated = { #ifdef CONFIG_PPC64 WARN_EMULATED_SETUP(mfdscr), WARN_EMULATED_SETUP(mtdscr), + WARN_EMULATED_SETUP(lq_stq), #endif }; diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 094e45c16a17..ce74c335a6a4 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -715,8 +715,8 @@ int vdso_getcpu_init(void) unsigned long cpu, node, val; /* - * SPRG3 contains the CPU in the bottom 16 bits and the NUMA node in - * the next 16 bits. The VDSO uses this to implement getcpu(). + * SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node + * in the next 16 bits. The VDSO uses this to implement getcpu(). */ cpu = get_cpu(); WARN_ON_ONCE(cpu > 0xffff); @@ -725,8 +725,8 @@ int vdso_getcpu_init(void) WARN_ON_ONCE(node > 0xffff); val = (cpu & 0xfff) | ((node & 0xffff) << 16); - mtspr(SPRN_SPRG3, val); - get_paca()->sprg3 = val; + mtspr(SPRN_SPRG_VDSO_WRITE, val); + get_paca()->sprg_vdso = val; put_cpu(); diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S index 47afd08c90f7..23eb9a9441bd 100644 --- a/arch/powerpc/kernel/vdso32/getcpu.S +++ b/arch/powerpc/kernel/vdso32/getcpu.S @@ -29,7 +29,7 @@ */ V_FUNCTION_BEGIN(__kernel_getcpu) .cfi_startproc - mfspr r5,SPRN_USPRG3 + mfspr r5,SPRN_SPRG_VDSO_READ cmpdi cr0,r3,0 cmpdi cr1,r4,0 clrlwi r6,r5,16 diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S index 79683d0393f5..6ac107ac402a 100644 --- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S +++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S @@ -6,7 +6,7 @@ .globl vdso32_start, vdso32_end .balign PAGE_SIZE vdso32_start: - .incbin "arch/powerpc/kernel/vdso32/vdso32.so" + .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg" .balign PAGE_SIZE vdso32_end: diff --git a/arch/powerpc/kernel/vdso64/getcpu.S b/arch/powerpc/kernel/vdso64/getcpu.S index 47afd08c90f7..23eb9a9441bd 100644 --- a/arch/powerpc/kernel/vdso64/getcpu.S +++ b/arch/powerpc/kernel/vdso64/getcpu.S @@ -29,7 +29,7 @@ */ V_FUNCTION_BEGIN(__kernel_getcpu) .cfi_startproc - mfspr r5,SPRN_USPRG3 + mfspr r5,SPRN_SPRG_VDSO_READ cmpdi cr0,r3,0 cmpdi cr1,r4,0 clrlwi r6,r5,16 diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S index 8df9e2463007..df60fca6a13d 100644 --- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S +++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S @@ -6,7 +6,7 @@ .globl vdso64_start, vdso64_end .balign PAGE_SIZE vdso64_start: - .incbin "arch/powerpc/kernel/vdso64/vdso64.so" + .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg" .balign PAGE_SIZE vdso64_end: diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 826d8bd9e522..904c66128fae 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -1432,7 +1432,8 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) /* needed to ensure proper operation of coherent allocations * later, in case driver doesn't set it explicitly */ - dma_coerce_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64)); + viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64); + viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask; } /* register with generic device framework */ |