summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/asm-compat.h9
-rw-r--r--arch/powerpc/include/asm/btext.h1
-rw-r--r--arch/powerpc/include/asm/cacheflush.h8
-rw-r--r--arch/powerpc/include/asm/cputable.h9
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h2
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h6
-rw-r--r--arch/powerpc/include/asm/exception-64s.h35
-rw-r--r--arch/powerpc/include/asm/io.h33
-rw-r--r--arch/powerpc/include/asm/irqflags.h7
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h38
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h14
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h25
-rw-r--r--arch/powerpc/include/asm/lppaca.h68
-rw-r--r--arch/powerpc/include/asm/mpc5121.h18
-rw-r--r--arch/powerpc/include/asm/mpc85xx.h92
-rw-r--r--arch/powerpc/include/asm/mpic.h7
-rw-r--r--arch/powerpc/include/asm/opal.h27
-rw-r--r--arch/powerpc/include/asm/paca.h11
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h2
-rw-r--r--arch/powerpc/include/asm/perf_event_fsl_emb.h2
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h4
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h300
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h47
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h22
-rw-r--r--arch/powerpc/include/asm/prom.h10
-rw-r--r--arch/powerpc/include/asm/reg.h29
-rw-r--r--arch/powerpc/include/asm/reg_booke.h8
-rw-r--r--arch/powerpc/include/asm/reg_fsl_emb.h24
-rw-r--r--arch/powerpc/include/asm/rtas.h8
-rw-r--r--arch/powerpc/include/asm/smp.h3
-rw-r--r--arch/powerpc/include/asm/spinlock.h6
-rw-r--r--arch/powerpc/include/asm/switch_to.h11
-rw-r--r--arch/powerpc/include/asm/timex.h4
-rw-r--r--arch/powerpc/include/asm/topology.h1
-rw-r--r--arch/powerpc/include/asm/udbg.h9
-rw-r--r--arch/powerpc/include/uapi/asm/elf.h21
38 files changed, 744 insertions, 182 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 650757c300db..704e6f10ae80 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -2,3 +2,4 @@
generic-y += clkdev.h
generic-y += rwsem.h
generic-y += trace_clock.h
+generic-y += vtime.h \ No newline at end of file
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 6e82f5f9a6fd..4b237aa35660 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -32,6 +32,15 @@
#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS)
#define PPC_LR_STKOFF 16
#define PPC_MIN_STKFRM 112
+
+#ifdef __BIG_ENDIAN__
+#define LDX_BE stringify_in_c(ldx)
+#define STDX_BE stringify_in_c(stdx)
+#else
+#define LDX_BE stringify_in_c(ldbrx)
+#define STDX_BE stringify_in_c(stdbrx)
+#endif
+
#else /* 32-bit */
/* operations for longs and pointers */
diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h
index 906f46e31006..89fc382648bc 100644
--- a/arch/powerpc/include/asm/btext.h
+++ b/arch/powerpc/include/asm/btext.h
@@ -13,6 +13,7 @@ extern void btext_update_display(unsigned long phys, int width, int height,
extern void btext_setup_display(int width, int height, int depth, int pitch,
unsigned long address);
extern void btext_prepare_BAT(void);
+extern void btext_map(void);
extern void btext_unmap(void);
extern void btext_drawchar(char c);
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index b843e35122e8..5b9312220e84 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -32,13 +32,7 @@ extern void flush_dcache_page(struct page *page);
extern void __flush_disable_L1(void);
-extern void __flush_icache_range(unsigned long, unsigned long);
-static inline void flush_icache_range(unsigned long start, unsigned long stop)
-{
- if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- __flush_icache_range(start, stop);
-}
-
+extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
int len);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 6f3887d884d2..0d4939ba48e7 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -371,14 +371,19 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
+/*
+ * e5500/e6500 erratum A-006958 is a timebase bug that can use the
+ * same workaround as CPU_FTR_CELL_TB_BUG.
+ */
#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG)
#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP)
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
+ CPU_FTR_CELL_TB_BUG)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index 63f2a22e9954..5a8b82aa7241 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -46,8 +46,6 @@ extern struct ppc_emulated {
struct ppc_emulated_entry unaligned;
#ifdef CONFIG_MATH_EMULATION
struct ppc_emulated_entry math;
-#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
- struct ppc_emulated_entry 8xx;
#endif
#ifdef CONFIG_VSX
struct ppc_emulated_entry vsx;
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index d3d634274d2c..86b0ac79990c 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -105,6 +105,12 @@
extern bool epapr_paravirt_enabled;
extern u32 epapr_hypercall_start[];
+#ifdef CONFIG_EPAPR_PARAVIRT
+int __init epapr_paravirt_early_init(void);
+#else
+static inline int epapr_paravirt_early_init(void) { return 0; }
+#endif
+
/*
* We use "uintptr_t" to define a register because it's guaranteed to be a
* 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 07ca627e52c0..cca12f084842 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -48,17 +48,18 @@
#define EX_LR 72
#define EX_CFAR 80
#define EX_PPR 88 /* SMT thread status register (priority) */
+#define EX_CTR 96
#ifdef CONFIG_RELOCATABLE
#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
ld r12,PACAKBASE(r13); /* get high part of &label */ \
mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
LOAD_HANDLER(r12,label); \
- mtlr r12; \
+ mtctr r12; \
mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
li r10,MSR_RI; \
mtmsrd r10,1; /* Set RI (EE=0) */ \
- blr;
+ bctr;
#else
/* If not relocatable, we can jump directly -- and save messing with LR */
#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
@@ -97,18 +98,18 @@
#if defined(CONFIG_RELOCATABLE)
/*
- * If we support interrupts with relocation on AND we're a relocatable
- * kernel, we need to use LR to get to the 2nd level handler. So, save/restore
- * it when required.
+ * If we support interrupts with relocation on AND we're a relocatable kernel,
+ * we need to use CTR to get to the 2nd level handler. So, save/restore it
+ * when required.
*/
-#define SAVE_LR(reg, area) mflr reg ; std reg,area+EX_LR(r13)
-#define GET_LR(reg, area) ld reg,area+EX_LR(r13)
-#define RESTORE_LR(reg, area) ld reg,area+EX_LR(r13) ; mtlr reg
+#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
+#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
+#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
#else
-/* ...else LR is unused and in register. */
-#define SAVE_LR(reg, area)
-#define GET_LR(reg, area) mflr reg
-#define RESTORE_LR(reg, area)
+/* ...else CTR is unused and in register. */
+#define SAVE_CTR(reg, area)
+#define GET_CTR(reg, area) mfctr reg
+#define RESTORE_CTR(reg, area)
#endif
/*
@@ -164,7 +165,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define __EXCEPTION_PROLOG_1(area, extra, vec) \
OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
- SAVE_LR(r10, area); \
+ SAVE_CTR(r10, area); \
mfcr r9; \
extra(vec); \
std r11,area+EX_R11(r13); \
@@ -270,7 +271,7 @@ do_kvm_##n: \
sth r1,PACA_TRAP_SAVE(r13); \
std r3,area+EX_R3(r13); \
addi r3,r13,area; /* r3 -> where regs are saved*/ \
- RESTORE_LR(r1, area); \
+ RESTORE_CTR(r1, area); \
b bad_stack; \
3: std r9,_CCR(r1); /* save CR in stackframe */ \
std r11,_NIP(r1); /* save SRR0 in stackframe */ \
@@ -298,10 +299,10 @@ do_kvm_##n: \
ld r10,area+EX_CFAR(r13); \
std r10,ORIG_GPR3(r1); \
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
- GET_LR(r9,area); /* Get LR, later save to stack */ \
+ mflr r9; /* Get LR, later save to stack */ \
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
std r9,_LINK(r1); \
- mfctr r10; /* save CTR in stackframe */ \
+ GET_CTR(r10, area); \
std r10,_CTR(r1); \
lbz r10,PACASOFTIRQEN(r13); \
mfspr r11,SPRN_XER; /* save XER in stackframe */ \
@@ -479,7 +480,7 @@ label##_relon_hv: \
*/
/* Exception addition: Hard disable interrupts */
-#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11)
+#define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11)
#define ADD_NVGPRS \
bl .save_nvgprs
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index dd15e5e37d6d..5a64757dc0d1 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -69,8 +69,18 @@ extern unsigned long pci_dram_offset;
extern resource_size_t isa_mem_base;
-#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_INDIRECT_IO)
-#error CONFIG_PPC_INDIRECT_IO is not yet supported on 32 bits
+/* Boolean set by platform if PIO accesses are suppored while _IO_BASE
+ * is not set or addresses cannot be translated to MMIO. This is typically
+ * set when the platform supports "special" PIO accesses via a non memory
+ * mapped mechanism, and allows things like the early udbg UART code to
+ * function.
+ */
+extern bool isa_io_special;
+
+#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
+#error CONFIG_PPC_INDIRECT_{PIO,MMIO} are not yet supported on 32 bits
+#endif
#endif
/*
@@ -222,9 +232,9 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
* for PowerPC is as close as possible to the x86 version of these, and thus
* provides fairly heavy weight barriers for the non-raw versions
*
- * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_IO
- * allowing the platform to provide its own implementation of some or all
- * of the accessors.
+ * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_MMIO
+ * or CONFIG_PPC_INDIRECT_PIO are set allowing the platform to provide its
+ * own implementation of some or all of the accessors.
*/
/*
@@ -240,8 +250,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
/* Indirect IO address tokens:
*
- * When CONFIG_PPC_INDIRECT_IO is set, the platform can provide hooks
- * on all IOs. (Note that this is all 64 bits only for now)
+ * When CONFIG_PPC_INDIRECT_MMIO is set, the platform can provide hooks
+ * on all MMIOs. (Note that this is all 64 bits only for now)
*
* To help platforms who may need to differenciate MMIO addresses in
* their hooks, a bitfield is reserved for use by the platform near the
@@ -263,11 +273,14 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
*
* The direct IO mapping operations will then mask off those bits
* before doing the actual access, though that only happen when
- * CONFIG_PPC_INDIRECT_IO is set, thus be careful when you use that
+ * CONFIG_PPC_INDIRECT_MMIO is set, thus be careful when you use that
* mechanism
+ *
+ * For PIO, there is a separate CONFIG_PPC_INDIRECT_PIO which makes
+ * all PIO functions call through a hook.
*/
-#ifdef CONFIG_PPC_INDIRECT_IO
+#ifdef CONFIG_PPC_INDIRECT_MMIO
#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul
#define PCI_IO_IND_TOKEN_SHIFT 48
#define PCI_FIX_ADDR(addr) \
@@ -672,7 +685,7 @@ extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
extern void __iounmap_at(void *ea, unsigned long size);
/*
- * When CONFIG_PPC_INDIRECT_IO is set, we use the generic iomap implementation
+ * When CONFIG_PPC_INDIRECT_PIO is set, we use the generic iomap implementation
* which needs some additional definitions here. They basically allow PIO
* space overall to be 1GB. This will work as long as we never try to use
* iomap to map MMIO below 1GB which should be fine on ppc64
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index 6f9b6e23dc5a..f51a5580bfd0 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -40,9 +40,10 @@
#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
/*
- * This is used by assembly code to soft-disable interrupts
+ * This is used by assembly code to soft-disable interrupts first and
+ * reconcile irq state.
*/
-#define SOFT_DISABLE_INTS(__rA, __rB) \
+#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \
lbz __rB,PACAIRQHAPPENED(r13); \
cmpwi cr0,__rA,0; \
@@ -58,7 +59,7 @@
#define TRACE_ENABLE_INTS
#define TRACE_DISABLE_INTS
-#define SOFT_DISABLE_INTS(__rA, __rB) \
+#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACAIRQHAPPENED(r13); \
li __rB,0; \
ori __rA,__rA,PACA_IRQ_HARD_DIS; \
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 08891d07aeb6..fa19e2f1a874 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -334,6 +334,27 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
return r;
}
+/*
+ * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
+ * Because the sc instruction sets SRR0 to point to the following
+ * instruction, we have to fetch from pc - 4.
+ */
+static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu) - 4;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ u32 r;
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
+
+ r = svcpu->last_inst;
+ svcpu_put(svcpu);
+ return r;
+}
+
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
@@ -446,6 +467,23 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
return vcpu->arch.last_inst;
}
+/*
+ * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
+ * Because the sc instruction sets SRR0 to point to the following
+ * instruction, we have to fetch from pc - 4.
+ */
+static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu) - 4;
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
+
+ return vcpu->arch.last_inst;
+}
+
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index a1ecb14e4442..86d638a3b359 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -37,7 +37,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
#ifdef CONFIG_KVM_BOOK3S_64_HV
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
-extern int kvm_hpt_order; /* order of preallocated HPTs */
+extern unsigned long kvm_rma_pages;
#endif
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
@@ -100,7 +100,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
/* (masks depend on page size) */
rb |= 0x1000; /* page encoding in LP field */
rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
- rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
+ rb |= ((va_low << 4) & 0xf0); /* AVAL field (P7 doesn't seem to care) */
}
} else {
/* 4kB page */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index af326cde7cb6..33283532e9d8 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -183,13 +183,9 @@ struct kvmppc_spapr_tce_table {
struct page *pages[0];
};
-struct kvmppc_linear_info {
- void *base_virt;
- unsigned long base_pfn;
- unsigned long npages;
- struct list_head list;
- atomic_t use_count;
- int type;
+struct kvm_rma_info {
+ atomic_t use_count;
+ unsigned long base_pfn;
};
/* XICS components, defined in book3s_xics.c */
@@ -246,7 +242,7 @@ struct kvm_arch {
int tlbie_lock;
unsigned long lpcr;
unsigned long rmor;
- struct kvmppc_linear_info *rma;
+ struct kvm_rma_info *rma;
unsigned long vrma_slb_v;
int rma_setup_done;
int using_mmu_notifiers;
@@ -259,7 +255,7 @@ struct kvm_arch {
spinlock_t slot_phys_lock;
cpumask_t need_tlb_flush;
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
- struct kvmppc_linear_info *hpt_li;
+ int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a5287fe03d77..b15554a26c20 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -137,10 +137,10 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma);
-extern struct kvmppc_linear_info *kvm_alloc_rma(void);
-extern void kvm_release_rma(struct kvmppc_linear_info *ri);
-extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
-extern void kvm_release_hpt(struct kvmppc_linear_info *li);
+extern struct kvm_rma_info *kvm_alloc_rma(void);
+extern void kvm_release_rma(struct kvm_rma_info *ri);
+extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
+extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
@@ -261,6 +261,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
struct openpic;
#ifdef CONFIG_KVM_BOOK3S_64_HV
+extern void kvm_cma_reserve(void) __init;
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{
paca[cpu].kvm_hstate.xics_phys = addr;
@@ -281,13 +282,12 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
}
extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
-extern void kvm_linear_init(void);
#else
-static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+static inline void __init kvm_cma_reserve(void)
{}
-static inline void kvm_linear_init(void)
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{}
static inline u32 kvmppc_get_xics_latch(void)
@@ -394,10 +394,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
}
}
-/* Please call after prepare_to_enter. This function puts the lazy ee state
- back to normal mode, without actually enabling interrupts. */
-static inline void kvmppc_lazy_ee_enable(void)
+/*
+ * Please call after prepare_to_enter. This function puts the lazy ee and irq
+ * disabled tracking state back to normal mode, without actually enabling
+ * interrupts.
+ */
+static inline void kvmppc_fix_ee_before_entry(void)
{
+ trace_hardirqs_on();
+
#ifdef CONFIG_PPC64
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 9b12f88d4adb..4470d1e34d23 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -48,15 +48,13 @@
struct lppaca {
/* cacheline 1 contains read-only data */
- u32 desc; /* Eye catcher 0xD397D781 */
- u16 size; /* Size of this struct */
- u16 reserved1;
- u16 reserved2:14;
- u8 shared_proc:1; /* Shared processor indicator */
- u8 secondary_thread:1; /* Secondary thread indicator */
+ __be32 desc; /* Eye catcher 0xD397D781 */
+ __be16 size; /* Size of this struct */
+ u8 reserved1[3];
+ u8 __old_status; /* Old status, including shared proc */
u8 reserved3[14];
- volatile u32 dyn_hw_node_id; /* Dynamic hardware node id */
- volatile u32 dyn_hw_proc_id; /* Dynamic hardware proc id */
+ volatile __be32 dyn_hw_node_id; /* Dynamic hardware node id */
+ volatile __be32 dyn_hw_proc_id; /* Dynamic hardware proc id */
u8 reserved4[56];
volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
/* associativity change counters */
@@ -73,9 +71,9 @@ struct lppaca {
u8 fpregs_in_use;
u8 pmcregs_in_use;
u8 reserved8[28];
- u64 wait_state_cycles; /* Wait cycles for this proc */
+ __be64 wait_state_cycles; /* Wait cycles for this proc */
u8 reserved9[28];
- u16 slb_count; /* # of SLBs to maintain */
+ __be16 slb_count; /* # of SLBs to maintain */
u8 idle; /* Indicate OS is idle */
u8 vmxregs_in_use;
@@ -89,17 +87,17 @@ struct lppaca {
* NOTE: This value will ALWAYS be zero for dedicated processors and
* will NEVER be zero for shared processors (ie, initialized to a 1).
*/
- volatile u32 yield_count;
- volatile u32 dispersion_count; /* dispatch changed physical cpu */
- volatile u64 cmo_faults; /* CMO page fault count */
- volatile u64 cmo_fault_time; /* CMO page fault time */
+ volatile __be32 yield_count;
+ volatile __be32 dispersion_count; /* dispatch changed physical cpu */
+ volatile __be64 cmo_faults; /* CMO page fault count */
+ volatile __be64 cmo_fault_time; /* CMO page fault time */
u8 reserved10[104];
/* cacheline 4-5 */
- u32 page_ins; /* CMO Hint - # page ins by OS */
+ __be32 page_ins; /* CMO Hint - # page ins by OS */
u8 reserved11[148];
- volatile u64 dtl_idx; /* Dispatch Trace Log head index */
+ volatile __be64 dtl_idx; /* Dispatch Trace Log head index */
u8 reserved12[96];
} __attribute__((__aligned__(0x400)));
@@ -108,17 +106,29 @@ extern struct lppaca lppaca[];
#define lppaca_of(cpu) (*paca[cpu].lppaca_ptr)
/*
+ * Old kernels used a reserved bit in the VPA to determine if it was running
+ * in shared processor mode. New kernels look for a non zero yield count
+ * but KVM still needs to set the bit to keep the old stuff happy.
+ */
+#define LPPACA_OLD_SHARED_PROC 2
+
+static inline bool lppaca_shared_proc(struct lppaca *l)
+{
+ return l->yield_count != 0;
+}
+
+/*
* SLB shadow buffer structure as defined in the PAPR. The save_area
* contains adjacent ESID and VSID pairs for each shadowed SLB. The
* ESID is stored in the lower 64bits, then the VSID.
*/
struct slb_shadow {
- u32 persistent; /* Number of persistent SLBs */
- u32 buffer_length; /* Total shadow buffer length */
- u64 reserved;
+ __be32 persistent; /* Number of persistent SLBs */
+ __be32 buffer_length; /* Total shadow buffer length */
+ __be64 reserved;
struct {
- u64 esid;
- u64 vsid;
+ __be64 esid;
+ __be64 vsid;
} save_area[SLB_NUM_BOLTED];
} ____cacheline_aligned;
@@ -130,14 +140,14 @@ extern struct slb_shadow slb_shadow[];
struct dtl_entry {
u8 dispatch_reason;
u8 preempt_reason;
- u16 processor_id;
- u32 enqueue_to_dispatch_time;
- u32 ready_to_enqueue_time;
- u32 waiting_to_ready_time;
- u64 timebase;
- u64 fault_addr;
- u64 srr0;
- u64 srr1;
+ __be16 processor_id;
+ __be32 enqueue_to_dispatch_time;
+ __be32 ready_to_enqueue_time;
+ __be32 waiting_to_ready_time;
+ __be64 timebase;
+ __be64 fault_addr;
+ __be64 srr0;
+ __be64 srr1;
};
#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h
index 8ae133eaf9fa..887d3d6133e3 100644
--- a/arch/powerpc/include/asm/mpc5121.h
+++ b/arch/powerpc/include/asm/mpc5121.h
@@ -32,25 +32,11 @@ struct mpc512x_ccm {
u32 scfr2; /* System Clock Frequency Register 2 */
u32 scfr2s; /* System Clock Frequency Shadow Register 2 */
u32 bcr; /* Bread Crumb Register */
- u32 p0ccr; /* PSC0 Clock Control Register */
- u32 p1ccr; /* PSC1 CCR */
- u32 p2ccr; /* PSC2 CCR */
- u32 p3ccr; /* PSC3 CCR */
- u32 p4ccr; /* PSC4 CCR */
- u32 p5ccr; /* PSC5 CCR */
- u32 p6ccr; /* PSC6 CCR */
- u32 p7ccr; /* PSC7 CCR */
- u32 p8ccr; /* PSC8 CCR */
- u32 p9ccr; /* PSC9 CCR */
- u32 p10ccr; /* PSC10 CCR */
- u32 p11ccr; /* PSC11 CCR */
+ u32 psc_ccr[12]; /* PSC Clock Control Registers */
u32 spccr; /* SPDIF Clock Control Register */
u32 cccr; /* CFM Clock Control Register */
u32 dccr; /* DIU Clock Control Register */
- u32 m1ccr; /* MSCAN1 CCR */
- u32 m2ccr; /* MSCAN2 CCR */
- u32 m3ccr; /* MSCAN3 CCR */
- u32 m4ccr; /* MSCAN4 CCR */
+ u32 mscan_ccr[4]; /* MSCAN Clock Control Registers */
u8 res[0x98]; /* Reserved */
};
diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h
new file mode 100644
index 000000000000..736d4acc05a8
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc85xx.h
@@ -0,0 +1,92 @@
+/*
+ * MPC85xx cpu type detection
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_PPC_MPC85XX_H
+#define __ASM_PPC_MPC85XX_H
+
+#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
+#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
+#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
+
+/* Some parts define SVR[0:23] as the SOC version */
+#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
+
+#define SVR_8533 0x803400
+#define SVR_8535 0x803701
+#define SVR_8536 0x803700
+#define SVR_8540 0x803000
+#define SVR_8541 0x807200
+#define SVR_8543 0x803200
+#define SVR_8544 0x803401
+#define SVR_8545 0x803102
+#define SVR_8547 0x803101
+#define SVR_8548 0x803100
+#define SVR_8555 0x807100
+#define SVR_8560 0x807000
+#define SVR_8567 0x807501
+#define SVR_8568 0x807500
+#define SVR_8569 0x808000
+#define SVR_8572 0x80E000
+#define SVR_P1010 0x80F100
+#define SVR_P1011 0x80E500
+#define SVR_P1012 0x80E501
+#define SVR_P1013 0x80E700
+#define SVR_P1014 0x80F101
+#define SVR_P1017 0x80F700
+#define SVR_P1020 0x80E400
+#define SVR_P1021 0x80E401
+#define SVR_P1022 0x80E600
+#define SVR_P1023 0x80F600
+#define SVR_P1024 0x80E402
+#define SVR_P1025 0x80E403
+#define SVR_P2010 0x80E300
+#define SVR_P2020 0x80E200
+#define SVR_P2040 0x821000
+#define SVR_P2041 0x821001
+#define SVR_P3041 0x821103
+#define SVR_P4040 0x820100
+#define SVR_P4080 0x820000
+#define SVR_P5010 0x822100
+#define SVR_P5020 0x822000
+#define SVR_P5021 0X820500
+#define SVR_P5040 0x820400
+#define SVR_T4240 0x824000
+#define SVR_T4120 0x824001
+#define SVR_T4160 0x824100
+#define SVR_C291 0x850000
+#define SVR_C292 0x850020
+#define SVR_C293 0x850030
+#define SVR_B4860 0X868000
+#define SVR_G4860 0x868001
+#define SVR_G4060 0x868003
+#define SVR_B4440 0x868100
+#define SVR_G4440 0x868101
+#define SVR_B4420 0x868102
+#define SVR_B4220 0x868103
+#define SVR_T1040 0x852000
+#define SVR_T1041 0x852001
+#define SVR_T1042 0x852002
+#define SVR_T1020 0x852100
+#define SVR_T1021 0x852101
+#define SVR_T1022 0x852102
+
+#define SVR_8610 0x80A000
+#define SVR_8641 0x809000
+#define SVR_8641D 0x809001
+
+#define SVR_9130 0x860001
+#define SVR_9131 0x860000
+#define SVR_9132 0x861000
+#define SVR_9232 0x861400
+
+#define SVR_Unknown 0xFFFFFF
+
+#endif
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 4a1ac9fbf186..754f93d208fa 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -396,7 +396,14 @@ extern struct bus_type mpic_subsys;
#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
/* Get the version of primary MPIC */
+#ifdef CONFIG_MPIC
extern u32 fsl_mpic_primary_get_version(void);
+#else
+static inline u32 fsl_mpic_primary_get_version(void)
+{
+ return 0;
+}
+#endif
/* Allocate the controller structure and setup the linux irq descs
* for the range if interrupts passed in. No HW initialization is
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 029fe85722aa..c5cd72833d6e 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -124,6 +124,11 @@ extern int opal_enter_rtas(struct rtas_args *args,
#define OPAL_PCI_POLL 62
#define OPAL_PCI_MSI_EOI 63
#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
+#define OPAL_XSCOM_READ 65
+#define OPAL_XSCOM_WRITE 66
+#define OPAL_LPC_READ 67
+#define OPAL_LPC_WRITE 68
+#define OPAL_RETURN_CPU 69
#ifndef __ASSEMBLY__
@@ -337,6 +342,17 @@ enum OpalEpowStatus {
OPAL_EPOW_OVER_INTERNAL_TEMP = 3
};
+/*
+ * Address cycle types for LPC accesses. These also correspond
+ * to the content of the first cell of the "reg" property for
+ * device nodes on the LPC bus
+ */
+enum OpalLPCAddressType {
+ OPAL_LPC_MEM = 0,
+ OPAL_LPC_IO = 1,
+ OPAL_LPC_FW = 2,
+};
+
struct opal_machine_check_event {
enum OpalMCE_Version version:8; /* 0x00 */
uint8_t in_use; /* 0x01 */
@@ -631,6 +647,15 @@ int64_t opal_set_system_attention_led(uint8_t led_action);
int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
uint16_t *pci_error_type, uint16_t *severity);
int64_t opal_pci_poll(uint64_t phb_id);
+int64_t opal_return_cpu(void);
+
+int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val);
+int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
+
+int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+ uint32_t addr, uint32_t data, uint32_t sz);
+int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
+ uint32_t addr, uint32_t *data, uint32_t sz);
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
@@ -664,6 +689,8 @@ extern int opal_machine_check(struct pt_regs *regs);
extern void opal_shutdown(void);
+extern void opal_lpc_init(void);
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 77c91e74b612..a5954cebbc55 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -68,8 +68,13 @@ struct paca_struct {
* instruction. They must travel together and be properly
* aligned.
*/
+#ifdef __BIG_ENDIAN__
u16 lock_token; /* Constant 0x8000, used in locks */
u16 paca_index; /* Logical processor number */
+#else
+ u16 paca_index; /* Logical processor number */
+ u16 lock_token; /* Constant 0x8000, used in locks */
+#endif
u64 kernel_toc; /* Kernel TOC address */
u64 kernelbase; /* Base address of kernel */
@@ -93,9 +98,9 @@ struct paca_struct {
* Now, starting in cacheline 2, the exception save areas
*/
/* used for most interrupts/exceptions */
- u64 exgen[12] __attribute__((aligned(0x80)));
- u64 exmc[12]; /* used for machine checks */
- u64 exslb[12]; /* used for SLB/segment table misses
+ u64 exgen[13] __attribute__((aligned(0x80)));
+ u64 exmc[13]; /* used for machine checks */
+ u64 exslb[13]; /* used for SLB/segment table misses
* on the linear mapping */
/* SLB related definitions */
u16 vmalloc_sllp;
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 32d0d2018faf..4ca90a39d6d0 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -159,7 +159,7 @@ struct pci_dn {
int pci_ext_config_space; /* for pci devices */
- int force_32bit_msi:1;
+ bool force_32bit_msi;
struct pci_dev *pcidev; /* back-pointer to the pci device */
#ifdef CONFIG_EEH
diff --git a/arch/powerpc/include/asm/perf_event_fsl_emb.h b/arch/powerpc/include/asm/perf_event_fsl_emb.h
index 718a9fa94e68..a58165450f6f 100644
--- a/arch/powerpc/include/asm/perf_event_fsl_emb.h
+++ b/arch/powerpc/include/asm/perf_event_fsl_emb.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
#include <asm/hw_irq.h>
-#define MAX_HWEVENTS 4
+#define MAX_HWEVENTS 6
/* event flags */
#define FSL_EMB_EVENT_VALID 1
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 8b2492644754..3fd2f1b6f906 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -138,11 +138,11 @@ extern ssize_t power_events_sysfs_show(struct device *dev,
#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
#define EVENT_ATTR(_name, _id, _suffix) \
- PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \
+ PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_##_id, \
power_events_sysfs_show)
#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
-#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p)
+#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p)
#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
new file mode 100644
index 000000000000..a63b045e707c
--- /dev/null
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -0,0 +1,300 @@
+#ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
+#define _ASM_POWERPC_PLPAR_WRAPPERS_H
+
+#include <linux/string.h>
+#include <linux/irqflags.h>
+
+#include <asm/hvcall.h>
+#include <asm/paca.h>
+#include <asm/page.h>
+
+/* Get state of physical CPU from query_cpu_stopped */
+int smp_query_cpu_stopped(unsigned int pcpu);
+#define QCSS_STOPPED 0
+#define QCSS_STOPPING 1
+#define QCSS_NOT_STOPPED 2
+#define QCSS_HARDWARE_ERROR -1
+#define QCSS_HARDWARE_BUSY -2
+
+static inline long poll_pending(void)
+{
+ return plpar_hcall_norets(H_POLL_PENDING);
+}
+
+static inline u8 get_cede_latency_hint(void)
+{
+ return get_lppaca()->cede_latency_hint;
+}
+
+static inline void set_cede_latency_hint(u8 latency_hint)
+{
+ get_lppaca()->cede_latency_hint = latency_hint;
+}
+
+static inline long cede_processor(void)
+{
+ return plpar_hcall_norets(H_CEDE);
+}
+
+static inline long extended_cede_processor(unsigned long latency_hint)
+{
+ long rc;
+ u8 old_latency_hint = get_cede_latency_hint();
+
+ set_cede_latency_hint(latency_hint);
+
+ rc = cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Ensure that H_CEDE returns with IRQs on */
+ if (WARN_ON(!(mfmsr() & MSR_EE)))
+ __hard_irq_enable();
+#endif
+
+ set_cede_latency_hint(old_latency_hint);
+
+ return rc;
+}
+
+static inline long vpa_call(unsigned long flags, unsigned long cpu,
+ unsigned long vpa)
+{
+ flags = flags << H_VPA_FUNC_SHIFT;
+
+ return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
+}
+
+static inline long unregister_vpa(unsigned long cpu)
+{
+ return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
+}
+
+static inline long register_vpa(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(H_VPA_REG_VPA, cpu, vpa);
+}
+
+static inline long unregister_slb_shadow(unsigned long cpu)
+{
+ return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
+}
+
+static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(H_VPA_REG_SLB, cpu, vpa);
+}
+
+static inline long unregister_dtl(unsigned long cpu)
+{
+ return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
+}
+
+static inline long register_dtl(unsigned long cpu, unsigned long vpa)
+{
+ return vpa_call(H_VPA_REG_DTL, cpu, vpa);
+}
+
+static inline long plpar_page_set_loaned(unsigned long vpa)
+{
+ unsigned long cmo_page_sz = cmo_get_page_size();
+ long rc = 0;
+ int i;
+
+ for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
+ rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
+
+ for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
+ plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
+ vpa + i - cmo_page_sz, 0);
+
+ return rc;
+}
+
+static inline long plpar_page_set_active(unsigned long vpa)
+{
+ unsigned long cmo_page_sz = cmo_get_page_size();
+ long rc = 0;
+ int i;
+
+ for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
+ rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
+
+ for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
+ plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
+ vpa + i - cmo_page_sz, 0);
+
+ return rc;
+}
+
+extern void vpa_init(int cpu);
+
+static inline long plpar_pte_enter(unsigned long flags,
+ unsigned long hpte_group, unsigned long hpte_v,
+ unsigned long hpte_r, unsigned long *slot)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
+
+ *slot = retbuf[0];
+
+ return rc;
+}
+
+static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
+ unsigned long avpn, unsigned long *old_pteh_ret,
+ unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
+ unsigned long avpn, unsigned long *old_pteh_ret,
+ unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
+ unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_READ, retbuf, flags, ptex);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
+static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
+ unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
+
+ *old_pteh_ret = retbuf[0];
+ *old_ptel_ret = retbuf[1];
+
+ return rc;
+}
+
+/*
+ * plpar_pte_read_4_raw can be called in real mode.
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
+ unsigned long *ptes)
+
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
+
+ memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+ return rc;
+}
+
+static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
+ unsigned long avpn)
+{
+ return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
+}
+
+static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
+ unsigned long *tce_ret)
+{
+ long rc;
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
+
+ *tce_ret = retbuf[0];
+
+ return rc;
+}
+
+static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
+ unsigned long tceval)
+{
+ return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
+}
+
+static inline long plpar_tce_put_indirect(unsigned long liobn,
+ unsigned long ioba, unsigned long page, unsigned long count)
+{
+ return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
+}
+
+static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
+ unsigned long tceval, unsigned long count)
+{
+ return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
+}
+
+/* Set various resource mode parameters */
+static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
+ unsigned long value1, unsigned long value2)
+{
+ return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
+}
+
+/*
+ * Enable relocation on exceptions on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long enable_reloc_on_exceptions(void)
+{
+ /* mflags = 3: Exceptions at 0xC000000000004000 */
+ return plpar_set_mode(3, 3, 0, 0);
+}
+
+/*
+ * Disable relocation on exceptions on this partition
+ *
+ * Note: this call has a partition wide scope and can take a while to complete.
+ * If it returns H_LONG_BUSY_* it should be retried periodically until it
+ * returns H_SUCCESS.
+ */
+static inline long disable_reloc_on_exceptions(void) {
+ return plpar_set_mode(0, 3, 0, 0);
+}
+
+static inline long plapr_set_ciabr(unsigned long ciabr)
+{
+ return plpar_set_mode(0, 1, ciabr, 0);
+}
+
+static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
+{
+ return plpar_set_mode(0, 2, dawr0, dawrx0);
+}
+
+#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index eccfc161e58e..d7fe9f5b46d4 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -81,6 +81,53 @@
#define __REGA0_R30 30
#define __REGA0_R31 31
+/* opcode and xopcode for instructions */
+#define OP_TRAP 3
+#define OP_TRAP_64 2
+
+#define OP_31_XOP_TRAP 4
+#define OP_31_XOP_LWZX 23
+#define OP_31_XOP_DCBST 54
+#define OP_31_XOP_LWZUX 55
+#define OP_31_XOP_TRAP_64 68
+#define OP_31_XOP_DCBF 86
+#define OP_31_XOP_LBZX 87
+#define OP_31_XOP_STWX 151
+#define OP_31_XOP_STBX 215
+#define OP_31_XOP_LBZUX 119
+#define OP_31_XOP_STBUX 247
+#define OP_31_XOP_LHZX 279
+#define OP_31_XOP_LHZUX 311
+#define OP_31_XOP_MFSPR 339
+#define OP_31_XOP_LHAX 343
+#define OP_31_XOP_LHAUX 375
+#define OP_31_XOP_STHX 407
+#define OP_31_XOP_STHUX 439
+#define OP_31_XOP_MTSPR 467
+#define OP_31_XOP_DCBI 470
+#define OP_31_XOP_LWBRX 534
+#define OP_31_XOP_TLBSYNC 566
+#define OP_31_XOP_STWBRX 662
+#define OP_31_XOP_LHBRX 790
+#define OP_31_XOP_STHBRX 918
+
+#define OP_LWZ 32
+#define OP_LD 58
+#define OP_LWZU 33
+#define OP_LBZ 34
+#define OP_LBZU 35
+#define OP_STW 36
+#define OP_STWU 37
+#define OP_STD 62
+#define OP_STB 38
+#define OP_STBU 39
+#define OP_LHZ 40
+#define OP_LHZU 41
+#define OP_LHA 42
+#define OP_LHAU 43
+#define OP_STH 44
+#define OP_STHU 45
+
/* sorted alphabetically */
#define PPC_INST_BHRBE 0x7c00025c
#define PPC_INST_CLRBHRB 0x7c00035c
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 2f1b6c5f8174..599545738af3 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -54,7 +54,8 @@ BEGIN_FW_FTR_SECTION; \
/* from user - see if there are any DTL entries to process */ \
ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
- ld r10,LPPACA_DTLIDX(r10); /* get log write index */ \
+ addi r10,r10,LPPACA_DTLIDX; \
+ LDX_BE r10,0,r10; /* get log write index */ \
cmpd cr1,r11,r10; \
beq+ cr1,33f; \
bl .accumulate_stolen_time; \
@@ -219,19 +220,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
-/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
-#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,R##base,R##b)
-#define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
-#define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
-#define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
-#define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
-#define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base)
-#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,R##base,R##b)
-#define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
-#define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
-#define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
-#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
-#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
/*
* b = base register for addressing, o = base offset from register of 1st EVR
@@ -443,15 +431,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
#define ISYNC_601
#endif
-#ifdef CONFIG_PPC_CELL
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define MFTB(dest) \
-90: mftb dest; \
+90: mfspr dest, SPRN_TBRL; \
BEGIN_FTR_SECTION_NESTED(96); \
cmpwi dest,0; \
beq- 90b; \
END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
#else
-#define MFTB(dest) mftb dest
+#define MFTB(dest) mfspr dest, SPRN_TBRL
#endif
#ifndef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index bc2da154f68b..7d0c7f3a7171 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -38,14 +38,12 @@ extern unsigned long pci_address_to_pio(phys_addr_t address);
/* Parse the ibm,dma-window property of an OF node into the busno, phys and
* size parameters.
*/
-void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
- unsigned long *busno, unsigned long *phys, unsigned long *size);
+void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
+ unsigned long *busno, unsigned long *phys,
+ unsigned long *size);
extern void kdump_move_device_tree(void);
-/* CPU OF node matching */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
-
/* cache lookup */
struct device_node *of_find_next_cache_node(struct device_node *np);
@@ -58,6 +56,8 @@ static inline int of_node_to_nid(struct device_node *device) { return 0; }
extern void of_instantiate_rtc(void);
+extern int of_get_ibm_chip_id(struct device_node *np);
+
/* The of_drconf_cell struct defines the layout of the LMB array
* specified in the device tree property
* ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 99222e27f173..10d1ef016bf1 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -115,10 +115,10 @@
#define MSR_64BIT MSR_SF
/* Server variant */
-#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV
-#define MSR_KERNEL MSR_ | MSR_64BIT
-#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
-#define MSR_USER64 MSR_USER32 | MSR_64BIT
+#define MSR_ (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#define MSR_KERNEL (MSR_ | MSR_64BIT)
+#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
+#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx)
/* Default MSR for kernel mode. */
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
@@ -258,8 +258,8 @@
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
#define FSCR_TM_LG 5 /* Enable Transactional Memory */
-#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
-#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
+#define FSCR_BHRB_LG 4 /* Enable Branch History Rolling Buffer*/
+#define FSCR_PM_LG 3 /* Enable prob/priv access to PMU SPRs */
#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
@@ -1126,10 +1126,10 @@
: "memory")
#ifdef __powerpc64__
-#ifdef CONFIG_PPC_CELL
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
#define mftb() ({unsigned long rval; \
asm volatile( \
- "90: mftb %0;\n" \
+ "90: mfspr %0, %2;\n" \
"97: cmpwi %0,0;\n" \
" beq- 90b;\n" \
"99:\n" \
@@ -1143,18 +1143,23 @@
" .llong 0\n" \
" .llong 0\n" \
".previous" \
- : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;})
+ : "=r" (rval) \
+ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \
+ rval;})
#else
#define mftb() ({unsigned long rval; \
- asm volatile("mftb %0" : "=r" (rval)); rval;})
+ asm volatile("mfspr %0, %1" : \
+ "=r" (rval) : "i" (SPRN_TBRL)); rval;})
#endif /* !CONFIG_PPC_CELL */
#else /* __powerpc64__ */
#define mftbl() ({unsigned long rval; \
- asm volatile("mftbl %0" : "=r" (rval)); rval;})
+ asm volatile("mfspr %0, %1" : "=r" (rval) : \
+ "i" (SPRN_TBRL)); rval;})
#define mftbu() ({unsigned long rval; \
- asm volatile("mftbu %0" : "=r" (rval)); rval;})
+ asm volatile("mfspr %0, %1" : "=r" (rval) : \
+ "i" (SPRN_TBRU)); rval;})
#endif /* !__powerpc64__ */
#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index b417de3cc2c4..ed8f836da094 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -29,10 +29,10 @@
#if defined(CONFIG_PPC_BOOK3E_64)
#define MSR_64BIT MSR_CM
-#define MSR_ MSR_ME | MSR_CE
-#define MSR_KERNEL MSR_ | MSR_64BIT
-#define MSR_USER32 MSR_ | MSR_PR | MSR_EE
-#define MSR_USER64 MSR_USER32 | MSR_64BIT
+#define MSR_ (MSR_ME | MSR_CE)
+#define MSR_KERNEL (MSR_ | MSR_64BIT)
+#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
+#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
#elif defined (CONFIG_40x)
#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
index 77bb71cfd991..0e3ddf5177f6 100644
--- a/arch/powerpc/include/asm/reg_fsl_emb.h
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -17,12 +17,16 @@
/* Freescale Book E Performance Monitor APU Registers */
#define PMRN_PMC0 0x010 /* Performance Monitor Counter 0 */
#define PMRN_PMC1 0x011 /* Performance Monitor Counter 1 */
-#define PMRN_PMC2 0x012 /* Performance Monitor Counter 1 */
-#define PMRN_PMC3 0x013 /* Performance Monitor Counter 1 */
+#define PMRN_PMC2 0x012 /* Performance Monitor Counter 2 */
+#define PMRN_PMC3 0x013 /* Performance Monitor Counter 3 */
+#define PMRN_PMC4 0x014 /* Performance Monitor Counter 4 */
+#define PMRN_PMC5 0x015 /* Performance Monitor Counter 5 */
#define PMRN_PMLCA0 0x090 /* PM Local Control A0 */
#define PMRN_PMLCA1 0x091 /* PM Local Control A1 */
#define PMRN_PMLCA2 0x092 /* PM Local Control A2 */
#define PMRN_PMLCA3 0x093 /* PM Local Control A3 */
+#define PMRN_PMLCA4 0x094 /* PM Local Control A4 */
+#define PMRN_PMLCA5 0x095 /* PM Local Control A5 */
#define PMLCA_FC 0x80000000 /* Freeze Counter */
#define PMLCA_FCS 0x40000000 /* Freeze in Supervisor */
@@ -30,14 +34,18 @@
#define PMLCA_FCM1 0x10000000 /* Freeze when PMM==1 */
#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */
#define PMLCA_CE 0x04000000 /* Condition Enable */
+#define PMLCA_FGCS1 0x00000002 /* Freeze in guest state */
+#define PMLCA_FGCS0 0x00000001 /* Freeze in hypervisor state */
-#define PMLCA_EVENT_MASK 0x00ff0000 /* Event field */
+#define PMLCA_EVENT_MASK 0x01ff0000 /* Event field */
#define PMLCA_EVENT_SHIFT 16
#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */
#define PMRN_PMLCB1 0x111 /* PM Local Control B1 */
#define PMRN_PMLCB2 0x112 /* PM Local Control B2 */
#define PMRN_PMLCB3 0x113 /* PM Local Control B3 */
+#define PMRN_PMLCB4 0x114 /* PM Local Control B4 */
+#define PMRN_PMLCB5 0x115 /* PM Local Control B5 */
#define PMLCB_THRESHMUL_MASK 0x0700 /* Threshold Multiple Field */
#define PMLCB_THRESHMUL_SHIFT 8
@@ -55,16 +63,22 @@
#define PMRN_UPMC0 0x000 /* User Performance Monitor Counter 0 */
#define PMRN_UPMC1 0x001 /* User Performance Monitor Counter 1 */
-#define PMRN_UPMC2 0x002 /* User Performance Monitor Counter 1 */
-#define PMRN_UPMC3 0x003 /* User Performance Monitor Counter 1 */
+#define PMRN_UPMC2 0x002 /* User Performance Monitor Counter 2 */
+#define PMRN_UPMC3 0x003 /* User Performance Monitor Counter 3 */
+#define PMRN_UPMC4 0x004 /* User Performance Monitor Counter 4 */
+#define PMRN_UPMC5 0x005 /* User Performance Monitor Counter 5 */
#define PMRN_UPMLCA0 0x080 /* User PM Local Control A0 */
#define PMRN_UPMLCA1 0x081 /* User PM Local Control A1 */
#define PMRN_UPMLCA2 0x082 /* User PM Local Control A2 */
#define PMRN_UPMLCA3 0x083 /* User PM Local Control A3 */
+#define PMRN_UPMLCA4 0x084 /* User PM Local Control A4 */
+#define PMRN_UPMLCA5 0x085 /* User PM Local Control A5 */
#define PMRN_UPMLCB0 0x100 /* User PM Local Control B0 */
#define PMRN_UPMLCB1 0x101 /* User PM Local Control B1 */
#define PMRN_UPMLCB2 0x102 /* User PM Local Control B2 */
#define PMRN_UPMLCB3 0x103 /* User PM Local Control B3 */
+#define PMRN_UPMLCB4 0x104 /* User PM Local Control B4 */
+#define PMRN_UPMLCB5 0x105 /* User PM Local Control B5 */
#define PMRN_UPMGC0 0x180 /* User PM Global Control 0 */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index c7a8bfc9f6f5..9bd52c65e66f 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -44,12 +44,12 @@
*
*/
-typedef u32 rtas_arg_t;
+typedef __be32 rtas_arg_t;
struct rtas_args {
- u32 token;
- u32 nargs;
- u32 nret;
+ __be32 token;
+ __be32 nargs;
+ __be32 nret;
rtas_arg_t args[16];
rtas_arg_t *rets; /* Pointer to return values in args[]. */
};
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 48cfc858abd6..98da78e0c2c0 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -112,6 +112,7 @@ static inline struct cpumask *cpu_core_mask(int cpu)
}
extern int cpu_to_core_id(int cpu);
+extern int cpu_to_chip_id(int cpu);
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
*
@@ -186,6 +187,8 @@ extern int smt_enabled_at_boot;
extern int smp_mpic_probe(void);
extern void smp_mpic_setup_cpu(int cpu);
extern int smp_generic_kick_cpu(int nr);
+extern int smp_generic_cpu_bootable(unsigned int nr);
+
extern void smp_generic_give_timebase(void);
extern void smp_generic_take_timebase(void);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 5b23f910ee57..5f54a744dcc5 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -32,8 +32,12 @@
#ifdef CONFIG_PPC64
/* use 0x800000yy when locked, where yy == CPU number */
+#ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
#else
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
+#endif
+#else
#define LOCK_TOKEN 1
#endif
@@ -96,7 +100,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR)
/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (local_paca->lppaca_ptr->shared_proc)
+#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
extern void __spin_yield(arch_spinlock_t *lock);
extern void __rw_yield(arch_rwlock_t *lock);
#else /* SPLPAR */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 294c2cedcf7a..2be5618cdec6 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -25,11 +25,8 @@ static inline void save_tar(struct thread_struct *prev)
static inline void save_tar(struct thread_struct *prev) {}
#endif
-extern void giveup_fpu(struct task_struct *);
extern void load_up_fpu(void);
-extern void disable_kernel_fp(void);
extern void enable_kernel_fp(void);
-extern void flush_fp_to_thread(struct task_struct *);
extern void enable_kernel_altivec(void);
extern void load_up_altivec(struct task_struct *);
extern int emulate_altivec(struct pt_regs *);
@@ -47,6 +44,14 @@ static inline void discard_lazy_cpu_state(void)
}
#endif
+#ifdef CONFIG_PPC_FPU
+extern void flush_fp_to_thread(struct task_struct *);
+extern void giveup_fpu(struct task_struct *);
+#else
+static inline void flush_fp_to_thread(struct task_struct *t) { }
+static inline void giveup_fpu(struct task_struct *t) { }
+#endif
+
#ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *);
extern void giveup_altivec(struct task_struct *);
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index c55e14f7ef44..18908caa1f3b 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -29,7 +29,7 @@ static inline cycles_t get_cycles(void)
ret = 0;
__asm__ __volatile__(
- "97: mftb %0\n"
+ "97: mfspr %0, %2\n"
"99:\n"
".section __ftr_fixup,\"a\"\n"
".align 2\n"
@@ -41,7 +41,7 @@ static inline cycles_t get_cycles(void)
" .long 0\n"
" .long 0\n"
".previous"
- : "=r" (ret) : "i" (CPU_FTR_601));
+ : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
return ret;
#endif
}
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 161ab662843b..89e3ef2496ac 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -96,6 +96,7 @@ static inline int prrn_is_enabled(void)
#ifdef CONFIG_PPC64
#include <asm/smp.h>
+#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index dc590919f8eb..b51fba10e733 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -27,10 +27,11 @@ extern void udbg_printf(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
extern void udbg_progress(char *s, unsigned short hex);
-extern void udbg_init_uart(void __iomem *comport, unsigned int speed,
- unsigned int clock);
-extern unsigned int udbg_probe_uart_speed(void __iomem *comport,
- unsigned int clock);
+extern void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
+extern void udbg_uart_init_pio(unsigned long port, unsigned int stride);
+
+extern void udbg_uart_setup(unsigned int speed, unsigned int clock);
+extern unsigned int udbg_probe_uart_speed(unsigned int clock);
struct device_node;
extern void udbg_scc_init(int force_scc);
diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
index 05b8d560cfba..7e39c9146a71 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -107,26 +107,25 @@ typedef elf_gregset_t32 compat_elf_gregset_t;
# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
# define ELF_NVSRHALFREG 32 /* Half the vsx registers */
# define ELF_GREG_TYPE elf_greg_t64
+# define ELF_ARCH EM_PPC64
+# define ELF_CLASS ELFCLASS64
+typedef elf_greg_t64 elf_greg_t;
+typedef elf_gregset_t64 elf_gregset_t;
#else
# define ELF_NEVRREG 34 /* includes acc (as 2) */
# define ELF_NVRREG 33 /* includes vscr */
# define ELF_GREG_TYPE elf_greg_t32
# define ELF_ARCH EM_PPC
# define ELF_CLASS ELFCLASS32
-# define ELF_DATA ELFDATA2MSB
+typedef elf_greg_t32 elf_greg_t;
+typedef elf_gregset_t32 elf_gregset_t;
#endif /* __powerpc64__ */
-#ifndef ELF_ARCH
-# define ELF_ARCH EM_PPC64
-# define ELF_CLASS ELFCLASS64
-# define ELF_DATA ELFDATA2MSB
- typedef elf_greg_t64 elf_greg_t;
- typedef elf_gregset_t64 elf_gregset_t;
+#ifdef __BIG_ENDIAN__
+#define ELF_DATA ELFDATA2MSB
#else
- /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
- typedef elf_greg_t32 elf_greg_t;
- typedef elf_gregset_t32 elf_gregset_t;
-#endif /* ELF_ARCH */
+#define ELF_DATA ELFDATA2LSB
+#endif
/* Floating point registers */
typedef double elf_fpreg_t;
OpenPOWER on IntegriCloud