diff options
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash-4k.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/book3s/64/hash-64k.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/bug.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cpm1.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cpu_has_feature.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cputable.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/dt_cpu_ftrs.h | 26 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kprobes.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_asm.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 28 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 74 | ||||
-rw-r--r-- | arch/powerpc/include/asm/module.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/page.h | 12 | ||||
-rw-r--r-- | arch/powerpc/include/asm/processor.h | 26 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/topology.h | 14 | ||||
-rw-r--r-- | arch/powerpc/include/asm/uaccess.h | 8 | ||||
-rw-r--r-- | arch/powerpc/include/asm/xive.h | 21 |
18 files changed, 198 insertions, 42 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index b4b5e6b671ca..0c4e470571ca 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -8,7 +8,7 @@ #define H_PTE_INDEX_SIZE 9 #define H_PMD_INDEX_SIZE 7 #define H_PUD_INDEX_SIZE 9 -#define H_PGD_INDEX_SIZE 12 +#define H_PGD_INDEX_SIZE 9 #ifndef __ASSEMBLY__ #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE) diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h index 214219dff87c..9732837aaae8 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-64k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h @@ -2,9 +2,9 @@ #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H #define H_PTE_INDEX_SIZE 8 -#define H_PMD_INDEX_SIZE 5 -#define H_PUD_INDEX_SIZE 5 -#define H_PGD_INDEX_SIZE 15 +#define H_PMD_INDEX_SIZE 10 +#define H_PUD_INDEX_SIZE 7 +#define H_PGD_INDEX_SIZE 8 /* * 64k aligned address free up few of the lower bits of RPN for us diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index f2c562a0a427..0151af6c2a50 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -104,7 +104,7 @@ "1: "PPC_TLNEI" %4,0\n" \ _EMIT_BUG_ENTRY \ : : "i" (__FILE__), "i" (__LINE__), \ - "i" (BUGFLAG_TAINT(TAINT_WARN)), \ + "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\ "i" (sizeof(struct bug_entry)), \ "r" (__ret_warn_on)); \ } \ diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index 8ee4211ca0c6..14ad37865000 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h @@ -560,6 +560,8 @@ typedef struct risc_timer_pram { #define CPM_PIN_SECONDARY 2 #define CPM_PIN_GPIO 4 #define CPM_PIN_OPENDRAIN 8 +#define CPM_PIN_FALLEDGE 16 +#define CPM_PIN_ANYEDGE 0 enum cpm_port { CPM_PORTA, diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index 6e834caa3720..0d1df02bf99d 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -1,5 +1,5 @@ -#ifndef __ASM_POWERPC_CPUFEATURES_H -#define __ASM_POWERPC_CPUFEATURES_H +#ifndef __ASM_POWERPC_CPU_HAS_FEATURE_H +#define __ASM_POWERPC_CPU_HAS_FEATURE_H #ifndef __ASSEMBLY__ @@ -52,4 +52,4 @@ static inline bool cpu_has_feature(unsigned long feature) #endif #endif /* __ASSEMBLY__ */ -#endif /* __ASM_POWERPC_CPUFEATURE_H */ +#endif /* __ASM_POWERPC_CPU_HAS_FEATURE_H */ diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 1f6847b107e4..d02ad93bf708 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -118,7 +118,9 @@ extern struct cpu_spec *cur_cpu_spec; extern unsigned int __start___ftr_fixup, __stop___ftr_fixup; +extern void set_cur_cpu_spec(struct cpu_spec *s); extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr); +extern void identify_cpu_name(unsigned int pvr); extern void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end); @@ -212,7 +214,6 @@ enum { #define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) #define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) #define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000) -#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000) #define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000) #ifndef __ASSEMBLY__ @@ -461,7 +462,7 @@ enum { CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ - CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE) + CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL) #define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ diff --git a/arch/powerpc/include/asm/dt_cpu_ftrs.h b/arch/powerpc/include/asm/dt_cpu_ftrs.h new file mode 100644 index 000000000000..7a34fc11bf63 --- /dev/null +++ b/arch/powerpc/include/asm/dt_cpu_ftrs.h @@ -0,0 +1,26 @@ +#ifndef __ASM_POWERPC_DT_CPU_FTRS_H +#define __ASM_POWERPC_DT_CPU_FTRS_H + +/* + * Copyright 2017, IBM Corporation + * cpufeatures is the new way to discover CPU features with /cpus/features + * devicetree. This supersedes PVR based discovery ("cputable"), and older + * device tree feature advertisement. + */ + +#include <linux/types.h> +#include <asm/asm-compat.h> +#include <asm/feature-fixups.h> +#include <uapi/asm/cputable.h> + +#ifdef CONFIG_PPC_DT_CPU_FTRS +bool dt_cpu_ftrs_init(void *fdt); +void dt_cpu_ftrs_scan(void); +bool dt_cpu_ftrs_in_use(void); +#else +static inline bool dt_cpu_ftrs_init(void *fdt) { return false; } +static inline void dt_cpu_ftrs_scan(void) { } +static inline bool dt_cpu_ftrs_in_use(void) { return false; } +#endif + +#endif /* __ASM_POWERPC_DT_CPU_FTRS_H */ diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index a83821f33ea3..8814a7249ceb 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h @@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self, extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_post_handler(struct pt_regs *regs); +extern int is_current_kprobe_addr(unsigned long addr); #ifdef CONFIG_KPROBES_ON_FTRACE extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb); diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0593d9479f74..b148496ffe36 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -111,6 +111,8 @@ struct kvmppc_host_state { struct kvm_vcpu *kvm_vcpu; struct kvmppc_vcore *kvm_vcore; void __iomem *xics_phys; + void __iomem *xive_tima_phys; + void __iomem *xive_tima_virt; u32 saved_xirr; u64 dabr; u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 77c60826d145..9c51ac4b8f36 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -210,6 +210,12 @@ struct kvmppc_spapr_tce_table { /* XICS components, defined in book3s_xics.c */ struct kvmppc_xics; struct kvmppc_icp; +extern struct kvm_device_ops kvm_xics_ops; + +/* XIVE components, defined in book3s_xive.c */ +struct kvmppc_xive; +struct kvmppc_xive_vcpu; +extern struct kvm_device_ops kvm_xive_ops; struct kvmppc_passthru_irqmap; @@ -298,6 +304,7 @@ struct kvm_arch { #endif #ifdef CONFIG_KVM_XICS struct kvmppc_xics *xics; + struct kvmppc_xive *xive; struct kvmppc_passthru_irqmap *pimap; #endif struct kvmppc_ops *kvm_ops; @@ -427,7 +434,7 @@ struct kvmppc_passthru_irqmap { #define KVMPPC_IRQ_DEFAULT 0 #define KVMPPC_IRQ_MPIC 1 -#define KVMPPC_IRQ_XICS 2 +#define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */ #define MMIO_HPTE_CACHE_SIZE 4 @@ -454,6 +461,21 @@ struct mmio_hpte_cache { struct openpic; +/* W0 and W1 of a XIVE thread management context */ +union xive_tma_w01 { + struct { + u8 nsr; + u8 cppr; + u8 ipb; + u8 lsmfb; + u8 ack; + u8 inc; + u8 age; + u8 pipr; + }; + __be64 w01; +}; + struct kvm_vcpu_arch { ulong host_stack; u32 host_pid; @@ -714,6 +736,10 @@ struct kvm_vcpu_arch { struct openpic *mpic; /* KVM_IRQ_MPIC */ #ifdef CONFIG_KVM_XICS struct kvmppc_icp *icp; /* XICS presentation controller */ + struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */ + __be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */ + u32 xive_pushed; /* Is the VP pushed on the physical CPU ? */ + union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */ #endif #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 76e940a3c145..e0d88c38602b 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -240,6 +240,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp); extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu); extern void kvmppc_rtas_tokens_free(struct kvm *kvm); + extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority); extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, @@ -428,6 +429,14 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr; } +static inline void kvmppc_set_xive_tima(int cpu, + unsigned long phys_addr, + void __iomem *virt_addr) +{ + paca[cpu].kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr; + paca[cpu].kvm_hstate.xive_tima_virt = virt_addr; +} + static inline u32 kvmppc_get_xics_latch(void) { u32 xirr; @@ -458,6 +467,11 @@ static inline void __init kvm_cma_reserve(void) static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) {} +static inline void kvmppc_set_xive_tima(int cpu, + unsigned long phys_addr, + void __iomem *virt_addr) +{} + static inline u32 kvmppc_get_xics_latch(void) { return 0; @@ -508,6 +522,10 @@ extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr, struct kvmppc_irq_map *irq_map, struct kvmppc_passthru_irqmap *pimap, bool *again); + +extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, + int level, bool line_status); + extern int h_ipi_redirect; #else static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( @@ -525,6 +543,60 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) { return 0; } #endif +#ifdef CONFIG_KVM_XIVE +/* + * Below the first "xive" is the "eXternal Interrupt Virtualization Engine" + * ie. P9 new interrupt controller, while the second "xive" is the legacy + * "eXternal Interrupt Vector Entry" which is the configuration of an + * interrupt on the "xics" interrupt controller on P8 and earlier. Those + * two function consume or produce a legacy "XIVE" state from the + * new "XIVE" interrupt controller. + */ +extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, + u32 priority); +extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, + u32 *priority); +extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq); +extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq); +extern void kvmppc_xive_init_module(void); +extern void kvmppc_xive_exit_module(void); + +extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 cpu); +extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu); +extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc); +extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc); +extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu); +extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval); + +extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, + int level, bool line_status); +#else +static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, + u32 priority) { return -1; } +static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, + u32 *priority) { return -1; } +static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; } +static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; } +static inline void kvmppc_xive_init_module(void) { } +static inline void kvmppc_xive_exit_module(void) { } + +static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } +static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { } +static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc) { return -ENODEV; } +static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc) { return -ENODEV; } +static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; } +static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; } + +static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, + int level, bool line_status) { return -ENODEV; } +#endif /* CONFIG_KVM_XIVE */ + /* * Prototypes for functions called only from assembler code. * Having prototypes reduces sparse errors. @@ -562,6 +634,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, unsigned long slb_v, unsigned int status, bool data); unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu); +unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu); +unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server); int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr); int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 53885512b8d3..6c0132c7212f 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -14,6 +14,10 @@ #include <asm-generic/module.h> +#ifdef CC_USING_MPROFILE_KERNEL +#define MODULE_ARCH_VERMAGIC "mprofile-kernel" +#endif + #ifndef __powerpc64__ /* * Thanks to Paul M for explaining this. diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 2a32483c7b6c..8da5d4c1cab2 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -132,7 +132,19 @@ extern long long virt_phys_offset; #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) + +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * On hash the vmalloc and other regions alias to the kernel region when passed + * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can + * return true for some vmalloc addresses, which is incorrect. So explicitly + * check that the address is in the kernel region. + */ +#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \ + pfn_valid(virt_to_pfn(kaddr))) +#else #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) +#endif /* * On Book-E parts we need __va to parse the device tree and we can't diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index a4b1d8d6b793..1189d04f3bd1 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -110,13 +110,18 @@ void release_thread(struct task_struct *); #define TASK_SIZE_128TB (0x0000800000000000UL) #define TASK_SIZE_512TB (0x0002000000000000UL) -#ifdef CONFIG_PPC_BOOK3S_64 +/* + * For now 512TB is only supported with book3s and 64K linux page size. + */ +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES) /* * Max value currently used: */ -#define TASK_SIZE_USER64 TASK_SIZE_512TB +#define TASK_SIZE_USER64 TASK_SIZE_512TB +#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB #else -#define TASK_SIZE_USER64 TASK_SIZE_64TB +#define TASK_SIZE_USER64 TASK_SIZE_64TB +#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB #endif /* @@ -132,7 +137,7 @@ void release_thread(struct task_struct *); * space during mmap's. */ #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) -#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4)) +#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4)) #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \ TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) @@ -143,16 +148,15 @@ void release_thread(struct task_struct *); * with 128TB and conditionally enable upto 512TB */ #ifdef CONFIG_PPC_BOOK3S_64 -#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ - TASK_SIZE_USER32 : TASK_SIZE_128TB) +#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ + TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64) #else #define DEFAULT_MAP_WINDOW TASK_SIZE #endif #ifdef __powerpc64__ -/* Limit stack to 128TB */ -#define STACK_TOP_USER64 TASK_SIZE_128TB +#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64 #define STACK_TOP_USER32 TASK_SIZE_USER32 #define STACK_TOP (is_32bit_task() ? \ @@ -374,12 +378,6 @@ struct thread_struct { } #endif -/* - * Return saved PC of a blocked thread. For now, this is the "user" PC - */ -#define thread_saved_pc(tsk) \ - ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) - #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs) unsigned long get_wchan(struct task_struct *p); diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d4f653c9259a..7e50e47375d6 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1229,6 +1229,7 @@ #define PVR_POWER8E 0x004B #define PVR_POWER8NVL 0x004C #define PVR_POWER8 0x004D +#define PVR_POWER9 0x004E #define PVR_BE 0x0070 #define PVR_PA6T 0x0090 diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 8b3b46b7b0f2..329771559cbb 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void); extern int sysfs_add_device_to_node(struct device *dev, int nid); extern void sysfs_remove_device_from_node(struct device *dev, int nid); +static inline int early_cpu_to_node(int cpu) +{ + int nid; + + nid = numa_cpu_lookup_table[cpu]; + + /* + * Fall back to node 0 if nid is unset (it should be, except bugs). + * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)). + */ + return (nid < 0) ? 0 : nid; +} #else +static inline int early_cpu_to_node(int cpu) { return 0; } + static inline void dump_numa_cpu_topology(void) {} static inline int sysfs_add_device_to_node(struct device *dev, int nid) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 5c0d8a8cdae5..41e88d3ce36b 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -267,13 +267,7 @@ do { \ extern unsigned long __copy_tofrom_user(void __user *to, const void __user *from, unsigned long size); -#ifndef __powerpc64__ - -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER - -#else /* __powerpc64__ */ - +#ifdef __powerpc64__ static inline unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) { diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 3cdbeaeac397..c23ff4389ca2 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -94,12 +94,13 @@ struct xive_q { * store at 0 and some ESBs support doing a trigger via a * separate trigger page. */ -#define XIVE_ESB_GET 0x800 -#define XIVE_ESB_SET_PQ_00 0xc00 -#define XIVE_ESB_SET_PQ_01 0xd00 -#define XIVE_ESB_SET_PQ_10 0xe00 -#define XIVE_ESB_SET_PQ_11 0xf00 -#define XIVE_ESB_MASK XIVE_ESB_SET_PQ_01 +#define XIVE_ESB_STORE_EOI 0x400 /* Store */ +#define XIVE_ESB_LOAD_EOI 0x000 /* Load */ +#define XIVE_ESB_GET 0x800 /* Load */ +#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */ +#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */ +#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */ +#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */ #define XIVE_ESB_VAL_P 0x2 #define XIVE_ESB_VAL_Q 0x1 @@ -136,11 +137,11 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, __be32 *qpage, u32 order, bool can_escalate); extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); -extern bool __xive_irq_trigger(struct xive_irq_data *xd); -extern bool __xive_irq_retrigger(struct xive_irq_data *xd); -extern void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd); - +extern void xive_native_sync_source(u32 hw_irq); extern bool is_xive_irq(struct irq_chip *chip); +extern int xive_native_enable_vp(u32 vp_id); +extern int xive_native_disable_vp(u32 vp_id); +extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id); #else |