summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig8
-rw-r--r--arch/powerpc/Makefile.postlink2
l---------arch/powerpc/boot/dts/include/dt-bindings1
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h6
-rw-r--r--arch/powerpc/include/asm/bug.h2
-rw-r--r--arch/powerpc/include/asm/cpm1.h2
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h6
-rw-r--r--arch/powerpc/include/asm/cputable.h5
-rw-r--r--arch/powerpc/include/asm/dt_cpu_ftrs.h26
-rw-r--r--arch/powerpc/include/asm/kprobes.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h28
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h74
-rw-r--r--arch/powerpc/include/asm/module.h4
-rw-r--r--arch/powerpc/include/asm/page.h12
-rw-r--r--arch/powerpc/include/asm/processor.h26
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/topology.h14
-rw-r--r--arch/powerpc/include/asm/uaccess.h8
-rw-r--r--arch/powerpc/include/asm/xive.h21
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild45
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h9
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c10
-rw-r--r--arch/powerpc/kernel/cputable.c40
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c1069
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S12
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S15
-rw-r--r--arch/powerpc/kernel/idle_book3s.S2
-rw-r--r--arch/powerpc/kernel/kprobes.c20
-rw-r--r--arch/powerpc/kernel/process.c22
-rw-r--r--arch/powerpc/kernel/prom.c31
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c45
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S59
-rw-r--r--arch/powerpc/kvm/Kconfig7
-rw-r--r--arch/powerpc/kvm/Makefile8
-rw-r--r--arch/powerpc/kvm/book3s.c75
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv.c102
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c112
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S12
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c10
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xive.c47
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S137
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c70
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c21
-rw-r--r--arch/powerpc/kvm/book3s_xics.c35
-rw-r--r--arch/powerpc/kvm/book3s_xics.h7
-rw-r--r--arch/powerpc/kvm/book3s_xive.c1894
-rw-r--r--arch/powerpc/kvm/book3s_xive.h256
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c503
-rw-r--r--arch/powerpc/kvm/irq.h1
-rw-r--r--arch/powerpc/kvm/powerpc.c21
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables.c7
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c2
-rw-r--r--arch/powerpc/mm/mmap.c4
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c2
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/perf/perf_regs.c3
-rw-r--r--arch/powerpc/perf/power9-pmu.c4
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c3
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c102
-rw-r--r--arch/powerpc/platforms/powernv/opal.c1
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c8
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c2
-rw-r--r--arch/powerpc/sysdev/cpm1.c25
-rw-r--r--arch/powerpc/sysdev/simple_gpio.c3
-rw-r--r--arch/powerpc/sysdev/xive/common.c144
-rw-r--r--arch/powerpc/sysdev/xive/native.c86
75 files changed, 5076 insertions, 307 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d8834e8bfb05..6189238e69f8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -146,6 +146,7 @@ config PPC
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
@@ -183,7 +184,7 @@ config PPC
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
- select HAVE_GENERIC_RCU_GUP
+ select HAVE_GENERIC_GUP
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE
select HAVE_IOREMAP_PROT
@@ -1198,11 +1199,6 @@ source "arch/powerpc/Kconfig.debug"
source "security/Kconfig"
-config KEYS_COMPAT
- bool
- depends on COMPAT && KEYS
- default y
-
source "crypto/Kconfig"
config PPC_LIB_RHEAP
diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
index 3c22d64b2de9..eccfcc88afae 100644
--- a/arch/powerpc/Makefile.postlink
+++ b/arch/powerpc/Makefile.postlink
@@ -7,7 +7,7 @@
PHONY := __archpost
__archpost:
-include include/config/auto.conf
+-include include/config/auto.conf
include scripts/Kbuild.include
quiet_cmd_relocs_check = CHKREL $@
diff --git a/arch/powerpc/boot/dts/include/dt-bindings b/arch/powerpc/boot/dts/include/dt-bindings
deleted file mode 120000
index 08c00e4972fa..000000000000
--- a/arch/powerpc/boot/dts/include/dt-bindings
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index b4b5e6b671ca..0c4e470571ca 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -8,7 +8,7 @@
#define H_PTE_INDEX_SIZE 9
#define H_PMD_INDEX_SIZE 7
#define H_PUD_INDEX_SIZE 9
-#define H_PGD_INDEX_SIZE 12
+#define H_PGD_INDEX_SIZE 9
#ifndef __ASSEMBLY__
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index 214219dff87c..9732837aaae8 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -2,9 +2,9 @@
#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
#define H_PTE_INDEX_SIZE 8
-#define H_PMD_INDEX_SIZE 5
-#define H_PUD_INDEX_SIZE 5
-#define H_PGD_INDEX_SIZE 15
+#define H_PMD_INDEX_SIZE 10
+#define H_PUD_INDEX_SIZE 7
+#define H_PGD_INDEX_SIZE 8
/*
* 64k aligned address free up few of the lower bits of RPN for us
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index f2c562a0a427..0151af6c2a50 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -104,7 +104,7 @@
"1: "PPC_TLNEI" %4,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
- "i" (BUGFLAG_TAINT(TAINT_WARN)), \
+ "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
"i" (sizeof(struct bug_entry)), \
"r" (__ret_warn_on)); \
} \
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index 8ee4211ca0c6..14ad37865000 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -560,6 +560,8 @@ typedef struct risc_timer_pram {
#define CPM_PIN_SECONDARY 2
#define CPM_PIN_GPIO 4
#define CPM_PIN_OPENDRAIN 8
+#define CPM_PIN_FALLEDGE 16
+#define CPM_PIN_ANYEDGE 0
enum cpm_port {
CPM_PORTA,
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 6e834caa3720..0d1df02bf99d 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -1,5 +1,5 @@
-#ifndef __ASM_POWERPC_CPUFEATURES_H
-#define __ASM_POWERPC_CPUFEATURES_H
+#ifndef __ASM_POWERPC_CPU_HAS_FEATURE_H
+#define __ASM_POWERPC_CPU_HAS_FEATURE_H
#ifndef __ASSEMBLY__
@@ -52,4 +52,4 @@ static inline bool cpu_has_feature(unsigned long feature)
#endif
#endif /* __ASSEMBLY__ */
-#endif /* __ASM_POWERPC_CPUFEATURE_H */
+#endif /* __ASM_POWERPC_CPU_HAS_FEATURE_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 1f6847b107e4..d02ad93bf708 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -118,7 +118,9 @@ extern struct cpu_spec *cur_cpu_spec;
extern unsigned int __start___ftr_fixup, __stop___ftr_fixup;
+extern void set_cur_cpu_spec(struct cpu_spec *s);
extern struct cpu_spec *identify_cpu(unsigned long offset, unsigned int pvr);
+extern void identify_cpu_name(unsigned int pvr);
extern void do_feature_fixups(unsigned long value, void *fixup_start,
void *fixup_end);
@@ -212,7 +214,6 @@ enum {
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
#ifndef __ASSEMBLY__
@@ -461,7 +462,7 @@ enum {
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
- CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
+ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
diff --git a/arch/powerpc/include/asm/dt_cpu_ftrs.h b/arch/powerpc/include/asm/dt_cpu_ftrs.h
new file mode 100644
index 000000000000..7a34fc11bf63
--- /dev/null
+++ b/arch/powerpc/include/asm/dt_cpu_ftrs.h
@@ -0,0 +1,26 @@
+#ifndef __ASM_POWERPC_DT_CPU_FTRS_H
+#define __ASM_POWERPC_DT_CPU_FTRS_H
+
+/*
+ * Copyright 2017, IBM Corporation
+ * cpufeatures is the new way to discover CPU features with /cpus/features
+ * devicetree. This supersedes PVR based discovery ("cputable"), and older
+ * device tree feature advertisement.
+ */
+
+#include <linux/types.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+#include <uapi/asm/cputable.h>
+
+#ifdef CONFIG_PPC_DT_CPU_FTRS
+bool dt_cpu_ftrs_init(void *fdt);
+void dt_cpu_ftrs_scan(void);
+bool dt_cpu_ftrs_in_use(void);
+#else
+static inline bool dt_cpu_ftrs_init(void *fdt) { return false; }
+static inline void dt_cpu_ftrs_scan(void) { }
+static inline bool dt_cpu_ftrs_in_use(void) { return false; }
+#endif
+
+#endif /* __ASM_POWERPC_DT_CPU_FTRS_H */
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index a83821f33ea3..8814a7249ceb 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs);
+extern int is_current_kprobe_addr(unsigned long addr);
#ifdef CONFIG_KPROBES_ON_FTRACE
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb);
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 0593d9479f74..b148496ffe36 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -111,6 +111,8 @@ struct kvmppc_host_state {
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
void __iomem *xics_phys;
+ void __iomem *xive_tima_phys;
+ void __iomem *xive_tima_virt;
u32 saved_xirr;
u64 dabr;
u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 77c60826d145..9c51ac4b8f36 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -210,6 +210,12 @@ struct kvmppc_spapr_tce_table {
/* XICS components, defined in book3s_xics.c */
struct kvmppc_xics;
struct kvmppc_icp;
+extern struct kvm_device_ops kvm_xics_ops;
+
+/* XIVE components, defined in book3s_xive.c */
+struct kvmppc_xive;
+struct kvmppc_xive_vcpu;
+extern struct kvm_device_ops kvm_xive_ops;
struct kvmppc_passthru_irqmap;
@@ -298,6 +304,7 @@ struct kvm_arch {
#endif
#ifdef CONFIG_KVM_XICS
struct kvmppc_xics *xics;
+ struct kvmppc_xive *xive;
struct kvmppc_passthru_irqmap *pimap;
#endif
struct kvmppc_ops *kvm_ops;
@@ -427,7 +434,7 @@ struct kvmppc_passthru_irqmap {
#define KVMPPC_IRQ_DEFAULT 0
#define KVMPPC_IRQ_MPIC 1
-#define KVMPPC_IRQ_XICS 2
+#define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */
#define MMIO_HPTE_CACHE_SIZE 4
@@ -454,6 +461,21 @@ struct mmio_hpte_cache {
struct openpic;
+/* W0 and W1 of a XIVE thread management context */
+union xive_tma_w01 {
+ struct {
+ u8 nsr;
+ u8 cppr;
+ u8 ipb;
+ u8 lsmfb;
+ u8 ack;
+ u8 inc;
+ u8 age;
+ u8 pipr;
+ };
+ __be64 w01;
+};
+
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
@@ -714,6 +736,10 @@ struct kvm_vcpu_arch {
struct openpic *mpic; /* KVM_IRQ_MPIC */
#ifdef CONFIG_KVM_XICS
struct kvmppc_icp *icp; /* XICS presentation controller */
+ struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
+ __be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */
+ u32 xive_pushed; /* Is the VP pushed on the physical CPU ? */
+ union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 76e940a3c145..e0d88c38602b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -240,6 +240,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
+
extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
u32 priority);
extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
@@ -428,6 +429,14 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr;
}
+static inline void kvmppc_set_xive_tima(int cpu,
+ unsigned long phys_addr,
+ void __iomem *virt_addr)
+{
+ paca[cpu].kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
+ paca[cpu].kvm_hstate.xive_tima_virt = virt_addr;
+}
+
static inline u32 kvmppc_get_xics_latch(void)
{
u32 xirr;
@@ -458,6 +467,11 @@ static inline void __init kvm_cma_reserve(void)
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{}
+static inline void kvmppc_set_xive_tima(int cpu,
+ unsigned long phys_addr,
+ void __iomem *virt_addr)
+{}
+
static inline u32 kvmppc_get_xics_latch(void)
{
return 0;
@@ -508,6 +522,10 @@ extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
struct kvmppc_irq_map *irq_map,
struct kvmppc_passthru_irqmap *pimap,
bool *again);
+
+extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status);
+
extern int h_ipi_redirect;
#else
static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
@@ -525,6 +543,60 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{ return 0; }
#endif
+#ifdef CONFIG_KVM_XIVE
+/*
+ * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
+ * ie. P9 new interrupt controller, while the second "xive" is the legacy
+ * "eXternal Interrupt Vector Entry" which is the configuration of an
+ * interrupt on the "xics" interrupt controller on P8 and earlier. Those
+ * two function consume or produce a legacy "XIVE" state from the
+ * new "XIVE" interrupt controller.
+ */
+extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority);
+extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority);
+extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
+extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
+extern void kvmppc_xive_init_module(void);
+extern void kvmppc_xive_exit_module(void);
+
+extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc);
+extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc);
+extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
+extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
+
+extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status);
+#else
+static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority) { return -1; }
+static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority) { return -1; }
+static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
+static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
+static inline void kvmppc_xive_init_module(void) { }
+static inline void kvmppc_xive_exit_module(void) { }
+
+static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
+static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
+static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc) { return -ENODEV; }
+static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc) { return -ENODEV; }
+static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
+static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
+
+static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
+ int level, bool line_status) { return -ENODEV; }
+#endif /* CONFIG_KVM_XIVE */
+
/*
* Prototypes for functions called only from assembler code.
* Having prototypes reduces sparse errors.
@@ -562,6 +634,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
unsigned long slb_v, unsigned int status, bool data);
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
+unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
+unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 53885512b8d3..6c0132c7212f 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -14,6 +14,10 @@
#include <asm-generic/module.h>
+#ifdef CC_USING_MPROFILE_KERNEL
+#define MODULE_ARCH_VERMAGIC "mprofile-kernel"
+#endif
+
#ifndef __powerpc64__
/*
* Thanks to Paul M for explaining this.
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 2a32483c7b6c..8da5d4c1cab2 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -132,7 +132,19 @@ extern long long virt_phys_offset;
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * On hash the vmalloc and other regions alias to the kernel region when passed
+ * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can
+ * return true for some vmalloc addresses, which is incorrect. So explicitly
+ * check that the address is in the kernel region.
+ */
+#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \
+ pfn_valid(virt_to_pfn(kaddr)))
+#else
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
+#endif
/*
* On Book-E parts we need __va to parse the device tree and we can't
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a4b1d8d6b793..1189d04f3bd1 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
#define TASK_SIZE_128TB (0x0000800000000000UL)
#define TASK_SIZE_512TB (0x0002000000000000UL)
-#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
/*
* Max value currently used:
*/
-#define TASK_SIZE_USER64 TASK_SIZE_512TB
+#define TASK_SIZE_USER64 TASK_SIZE_512TB
+#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB
#else
-#define TASK_SIZE_USER64 TASK_SIZE_64TB
+#define TASK_SIZE_USER64 TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB
#endif
/*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,16 +148,15 @@ void release_thread(struct task_struct *);
* with 128TB and conditionally enable upto 512TB
*/
#ifdef CONFIG_PPC_BOOK3S_64
-#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
- TASK_SIZE_USER32 : TASK_SIZE_128TB)
+#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
+ TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
#else
#define DEFAULT_MAP_WINDOW TASK_SIZE
#endif
#ifdef __powerpc64__
-/* Limit stack to 128TB */
-#define STACK_TOP_USER64 TASK_SIZE_128TB
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
#define STACK_TOP_USER32 TASK_SIZE_USER32
#define STACK_TOP (is_32bit_task() ? \
@@ -374,12 +378,6 @@ struct thread_struct {
}
#endif
-/*
- * Return saved PC of a blocked thread. For now, this is the "user" PC
- */
-#define thread_saved_pc(tsk) \
- ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
-
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index d4f653c9259a..7e50e47375d6 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1229,6 +1229,7 @@
#define PVR_POWER8E 0x004B
#define PVR_POWER8NVL 0x004C
#define PVR_POWER8 0x004D
+#define PVR_POWER9 0x004E
#define PVR_BE 0x0070
#define PVR_PA6T 0x0090
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8b3b46b7b0f2..329771559cbb 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
extern int sysfs_add_device_to_node(struct device *dev, int nid);
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
+static inline int early_cpu_to_node(int cpu)
+{
+ int nid;
+
+ nid = numa_cpu_lookup_table[cpu];
+
+ /*
+ * Fall back to node 0 if nid is unset (it should be, except bugs).
+ * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+ */
+ return (nid < 0) ? 0 : nid;
+}
#else
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
static inline void dump_numa_cpu_topology(void) {}
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 5c0d8a8cdae5..41e88d3ce36b 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -267,13 +267,7 @@ do { \
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
-#ifndef __powerpc64__
-
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-
-#else /* __powerpc64__ */
-
+#ifdef __powerpc64__
static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 3cdbeaeac397..c23ff4389ca2 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -94,12 +94,13 @@ struct xive_q {
* store at 0 and some ESBs support doing a trigger via a
* separate trigger page.
*/
-#define XIVE_ESB_GET 0x800
-#define XIVE_ESB_SET_PQ_00 0xc00
-#define XIVE_ESB_SET_PQ_01 0xd00
-#define XIVE_ESB_SET_PQ_10 0xe00
-#define XIVE_ESB_SET_PQ_11 0xf00
-#define XIVE_ESB_MASK XIVE_ESB_SET_PQ_01
+#define XIVE_ESB_STORE_EOI 0x400 /* Store */
+#define XIVE_ESB_LOAD_EOI 0x000 /* Load */
+#define XIVE_ESB_GET 0x800 /* Load */
+#define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
+#define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
+#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
+#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
#define XIVE_ESB_VAL_P 0x2
#define XIVE_ESB_VAL_Q 0x1
@@ -136,11 +137,11 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
__be32 *qpage, u32 order, bool can_escalate);
extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
-extern bool __xive_irq_trigger(struct xive_irq_data *xd);
-extern bool __xive_irq_retrigger(struct xive_irq_data *xd);
-extern void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd);
-
+extern void xive_native_sync_source(u32 hw_irq);
extern bool is_xive_irq(struct irq_chip *chip);
+extern int xive_native_enable_vp(u32 vp_id);
+extern int xive_native_disable_vp(u32 vp_id);
+extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
#else
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index dab3717e3ea0..b15bf6bc0e94 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -1,47 +1,2 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += bootx.h
-header-y += byteorder.h
-header-y += cputable.h
-header-y += eeh.h
-header-y += elf.h
-header-y += epapr_hcalls.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += nvram.h
-header-y += opal-prd.h
-header-y += param.h
-header-y += perf_event.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ps3fb.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += spu_info.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += tm.h
-header-y += types.h
-header-y += ucontext.h
-header-y += unistd.h
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index f63c96cd3608..4d877144f377 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -46,5 +46,14 @@
#define PPC_FEATURE2_HTM_NOSC 0x01000000
#define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */
#define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */
+#define PPC_FEATURE2_DARN 0x00200000 /* darn random number insn */
+#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
+
+/*
+ * IMPORTANT!
+ * All future PPC_FEATURE definitions should be allocated in cooperation with
+ * OPAL / skiboot firmware, in accordance with the ibm,powerpc-cpu-features
+ * device tree binding.
+ */
#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b9db46ae545b..e132902e1f14 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y)
obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
+obj-$(CONFIG_PPC_DT_CPU_FTRS) += dt_cpu_ftrs.o
obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
eeh_driver.o eeh_event.o eeh_sysfs.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 439c257dec4a..709e23425317 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -634,6 +634,8 @@ int main(void)
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
+ HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
+ HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
@@ -719,6 +721,14 @@ int main(void)
OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6);
#endif
+#ifdef CONFIG_KVM_XICS
+ DEFINE(VCPU_XIVE_SAVED_STATE, offsetof(struct kvm_vcpu,
+ arch.xive_saved_state));
+ DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu,
+ arch.xive_cam_word));
+ DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed));
+#endif
+
#ifdef CONFIG_KVM_EXIT_TIMING
OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu);
OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl);
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e79b9daa873c..6f849832a669 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -23,7 +23,9 @@
#include <asm/mmu.h>
#include <asm/setup.h>
-struct cpu_spec* cur_cpu_spec = NULL;
+static struct cpu_spec the_cpu_spec __read_mostly;
+
+struct cpu_spec* cur_cpu_spec __read_mostly = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
/* The platform string corresponding to the real PVR */
@@ -122,7 +124,8 @@ extern void __restore_cpu_e6500(void);
#define COMMON_USER_POWER9 COMMON_USER_POWER8
#define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \
PPC_FEATURE2_ARCH_3_00 | \
- PPC_FEATURE2_HAS_IEEE128)
+ PPC_FEATURE2_HAS_IEEE128 | \
+ PPC_FEATURE2_DARN )
#ifdef CONFIG_PPC_BOOK3E_64
#define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
@@ -2179,7 +2182,15 @@ static struct cpu_spec __initdata cpu_specs[] = {
#endif /* CONFIG_E500 */
};
-static struct cpu_spec the_cpu_spec;
+void __init set_cur_cpu_spec(struct cpu_spec *s)
+{
+ struct cpu_spec *t = &the_cpu_spec;
+
+ t = PTRRELOC(t);
+ *t = *s;
+
+ *PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
+}
static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
struct cpu_spec *s)
@@ -2266,6 +2277,29 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
return NULL;
}
+/*
+ * Used by cpufeatures to get the name for CPUs with a PVR table.
+ * If they don't hae a PVR table, cpufeatures gets the name from
+ * cpu device-tree node.
+ */
+void __init identify_cpu_name(unsigned int pvr)
+{
+ struct cpu_spec *s = cpu_specs;
+ struct cpu_spec *t = &the_cpu_spec;
+ int i;
+
+ s = PTRRELOC(s);
+ t = PTRRELOC(t);
+
+ for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
+ if ((pvr & s->pvr_mask) == s->pvr_value) {
+ t->cpu_name = s->cpu_name;
+ return;
+ }
+ }
+}
+
+
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = {
[0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
new file mode 100644
index 000000000000..4c7656dc4e04
--- /dev/null
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -0,0 +1,1069 @@
+/*
+ * Copyright 2017, Nicholas Piggin, IBM Corporation
+ * Licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) "dt-cpu-ftrs: " fmt
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <linux/libfdt.h>
+#include <linux/memblock.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/threads.h>
+
+#include <asm/cputable.h>
+#include <asm/dt_cpu_ftrs.h>
+#include <asm/mmu.h>
+#include <asm/oprofile_impl.h>
+#include <asm/prom.h>
+#include <asm/setup.h>
+
+
+/* Device-tree visible constants follow */
+#define ISA_V2_07B 2070
+#define ISA_V3_0B 3000
+
+#define USABLE_PR (1U << 0)
+#define USABLE_OS (1U << 1)
+#define USABLE_HV (1U << 2)
+
+#define HV_SUPPORT_HFSCR (1U << 0)
+#define OS_SUPPORT_FSCR (1U << 0)
+
+/* For parsing, we define all bits set as "NONE" case */
+#define HV_SUPPORT_NONE 0xffffffffU
+#define OS_SUPPORT_NONE 0xffffffffU
+
+struct dt_cpu_feature {
+ const char *name;
+ uint32_t isa;
+ uint32_t usable_privilege;
+ uint32_t hv_support;
+ uint32_t os_support;
+ uint32_t hfscr_bit_nr;
+ uint32_t fscr_bit_nr;
+ uint32_t hwcap_bit_nr;
+ /* fdt parsing */
+ unsigned long node;
+ int enabled;
+ int disabled;
+};
+
+#define CPU_FTRS_BASE \
+ (CPU_FTR_USE_TB | \
+ CPU_FTR_LWSYNC | \
+ CPU_FTR_FPU_UNAVAILABLE |\
+ CPU_FTR_NODSISRALIGN |\
+ CPU_FTR_NOEXECUTE |\
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_STCX_CHECKS_ADDRESS |\
+ CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_DAWR | \
+ CPU_FTR_ARCH_206 |\
+ CPU_FTR_ARCH_207S)
+
+#define MMU_FTRS_HASH_BASE (MMU_FTRS_POWER8)
+
+#define COMMON_USER_BASE (PPC_FEATURE_32 | PPC_FEATURE_64 | \
+ PPC_FEATURE_ARCH_2_06 |\
+ PPC_FEATURE_ICACHE_SNOOP)
+#define COMMON_USER2_BASE (PPC_FEATURE2_ARCH_2_07 | \
+ PPC_FEATURE2_ISEL)
+/*
+ * Set up the base CPU
+ */
+
+extern void __flush_tlb_power8(unsigned int action);
+extern void __flush_tlb_power9(unsigned int action);
+extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
+extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
+
+static int hv_mode;
+
+static struct {
+ u64 lpcr;
+ u64 hfscr;
+ u64 fscr;
+} system_registers;
+
+static void (*init_pmu_registers)(void);
+
+static void cpufeatures_flush_tlb(void)
+{
+ unsigned long rb;
+ unsigned int i, num_sets;
+
+ /*
+ * This is a temporary measure to keep equivalent TLB flush as the
+ * cputable based setup code.
+ */
+ switch (PVR_VER(mfspr(SPRN_PVR))) {
+ case PVR_POWER8:
+ case PVR_POWER8E:
+ case PVR_POWER8NVL:
+ num_sets = POWER8_TLB_SETS;
+ break;
+ case PVR_POWER9:
+ num_sets = POWER9_TLB_SETS_HASH;
+ break;
+ default:
+ num_sets = 1;
+ pr_err("unknown CPU version for boot TLB flush\n");
+ break;
+ }
+
+ asm volatile("ptesync" : : : "memory");
+ rb = TLBIEL_INVAL_SET;
+ for (i = 0; i < num_sets; i++) {
+ asm volatile("tlbiel %0" : : "r" (rb));
+ rb += 1 << TLBIEL_INVAL_SET_SHIFT;
+ }
+ asm volatile("ptesync" : : : "memory");
+}
+
+static void __restore_cpu_cpufeatures(void)
+{
+ /*
+ * LPCR is restored by the power on engine already. It can be changed
+ * after early init e.g., by radix enable, and we have no unified API
+ * for saving and restoring such SPRs.
+ *
+ * This ->restore hook should really be removed from idle and register
+ * restore moved directly into the idle restore code, because this code
+ * doesn't know how idle is implemented or what it needs restored here.
+ *
+ * The best we can do to accommodate secondary boot and idle restore
+ * for now is "or" LPCR with existing.
+ */
+
+ mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
+ if (hv_mode) {
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_HFSCR, system_registers.hfscr);
+ }
+ mtspr(SPRN_FSCR, system_registers.fscr);
+
+ if (init_pmu_registers)
+ init_pmu_registers();
+
+ cpufeatures_flush_tlb();
+}
+
+static char dt_cpu_name[64];
+
+static struct cpu_spec __initdata base_cpu_spec = {
+ .cpu_name = NULL,
+ .cpu_features = CPU_FTRS_BASE,
+ .cpu_user_features = COMMON_USER_BASE,
+ .cpu_user_features2 = COMMON_USER2_BASE,
+ .mmu_features = 0,
+ .icache_bsize = 32, /* minimum block size, fixed by */
+ .dcache_bsize = 32, /* cache info init. */
+ .num_pmcs = 0,
+ .pmc_type = PPC_PMC_DEFAULT,
+ .oprofile_cpu_type = NULL,
+ .oprofile_type = PPC_OPROFILE_INVALID,
+ .cpu_setup = NULL,
+ .cpu_restore = __restore_cpu_cpufeatures,
+ .flush_tlb = NULL,
+ .machine_check_early = NULL,
+ .platform = NULL,
+};
+
+static void __init cpufeatures_setup_cpu(void)
+{
+ set_cur_cpu_spec(&base_cpu_spec);
+
+ cur_cpu_spec->pvr_mask = -1;
+ cur_cpu_spec->pvr_value = mfspr(SPRN_PVR);
+
+ /* Initialize the base environment -- clear FSCR/HFSCR. */
+ hv_mode = !!(mfmsr() & MSR_HV);
+ if (hv_mode) {
+ /* CPU_FTR_HVMODE is used early in PACA setup */
+ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
+ mtspr(SPRN_HFSCR, 0);
+ }
+ mtspr(SPRN_FSCR, 0);
+
+ /*
+ * LPCR does not get cleared, to match behaviour with secondaries
+ * in __restore_cpu_cpufeatures. Once the idle code is fixed, this
+ * could clear LPCR too.
+ */
+}
+
+static int __init feat_try_enable_unknown(struct dt_cpu_feature *f)
+{
+ if (f->hv_support == HV_SUPPORT_NONE) {
+ } else if (f->hv_support & HV_SUPPORT_HFSCR) {
+ u64 hfscr = mfspr(SPRN_HFSCR);
+ hfscr |= 1UL << f->hfscr_bit_nr;
+ mtspr(SPRN_HFSCR, hfscr);
+ } else {
+ /* Does not have a known recipe */
+ return 0;
+ }
+
+ if (f->os_support == OS_SUPPORT_NONE) {
+ } else if (f->os_support & OS_SUPPORT_FSCR) {
+ u64 fscr = mfspr(SPRN_FSCR);
+ fscr |= 1UL << f->fscr_bit_nr;
+ mtspr(SPRN_FSCR, fscr);
+ } else {
+ /* Does not have a known recipe */
+ return 0;
+ }
+
+ if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
+ uint32_t word = f->hwcap_bit_nr / 32;
+ uint32_t bit = f->hwcap_bit_nr % 32;
+
+ if (word == 0)
+ cur_cpu_spec->cpu_user_features |= 1U << bit;
+ else if (word == 1)
+ cur_cpu_spec->cpu_user_features2 |= 1U << bit;
+ else
+ pr_err("%s could not advertise to user (no hwcap bits)\n", f->name);
+ }
+
+ return 1;
+}
+
+static int __init feat_enable(struct dt_cpu_feature *f)
+{
+ if (f->hv_support != HV_SUPPORT_NONE) {
+ if (f->hfscr_bit_nr != -1) {
+ u64 hfscr = mfspr(SPRN_HFSCR);
+ hfscr |= 1UL << f->hfscr_bit_nr;
+ mtspr(SPRN_HFSCR, hfscr);
+ }
+ }
+
+ if (f->os_support != OS_SUPPORT_NONE) {
+ if (f->fscr_bit_nr != -1) {
+ u64 fscr = mfspr(SPRN_FSCR);
+ fscr |= 1UL << f->fscr_bit_nr;
+ mtspr(SPRN_FSCR, fscr);
+ }
+ }
+
+ if ((f->usable_privilege & USABLE_PR) && (f->hwcap_bit_nr != -1)) {
+ uint32_t word = f->hwcap_bit_nr / 32;
+ uint32_t bit = f->hwcap_bit_nr % 32;
+
+ if (word == 0)
+ cur_cpu_spec->cpu_user_features |= 1U << bit;
+ else if (word == 1)
+ cur_cpu_spec->cpu_user_features2 |= 1U << bit;
+ else
+ pr_err("CPU feature: %s could not advertise to user (no hwcap bits)\n", f->name);
+ }
+
+ return 1;
+}
+
+static int __init feat_disable(struct dt_cpu_feature *f)
+{
+ return 0;
+}
+
+static int __init feat_enable_hv(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ if (!hv_mode) {
+ pr_err("CPU feature hypervisor present in device tree but HV mode not enabled in the CPU. Ignoring.\n");
+ return 0;
+ }
+
+ mtspr(SPRN_LPID, 0);
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_LPES0; /* HV external interrupts */
+ mtspr(SPRN_LPCR, lpcr);
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
+
+ return 1;
+}
+
+static int __init feat_enable_le(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_TRUE_LE;
+ return 1;
+}
+
+static int __init feat_enable_smt(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_SMT;
+ return 1;
+}
+
+static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /* Set PECE wakeup modes for ISA 207 */
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_PECE0;
+ lpcr |= LPCR_PECE1;
+ lpcr |= LPCR_PECE2;
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
+
+ return 1;
+}
+
+static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /* Set PECE wakeup modes for ISAv3.0B */
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_PECE0;
+ lpcr |= LPCR_PECE1;
+ lpcr |= LPCR_PECE2;
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_ISL;
+
+ /* VRMASD */
+ lpcr |= LPCR_VPM0;
+ lpcr &= ~LPCR_VPM1;
+ lpcr |= 0x10UL << LPCR_VRMASD_SH; /* L=1 LP=00 */
+ mtspr(SPRN_LPCR, lpcr);
+
+ cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
+
+ return 1;
+}
+
+static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_ISL;
+ mtspr(SPRN_LPCR, lpcr);
+
+ cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
+
+ return 1;
+}
+
+
+static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_PPC_RADIX_MMU
+ cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
+ cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
+
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_dscr(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ feat_enable(f);
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr &= ~LPCR_DPFD;
+ lpcr |= (4UL << LPCR_DPFD_SH);
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static void hfscr_pmu_enable(void)
+{
+ u64 hfscr = mfspr(SPRN_HFSCR);
+ hfscr |= PPC_BIT(60);
+ mtspr(SPRN_HFSCR, hfscr);
+}
+
+static void init_pmu_power8(void)
+{
+ if (hv_mode) {
+ mtspr(SPRN_MMCRC, 0);
+ mtspr(SPRN_MMCRH, 0);
+ }
+
+ mtspr(SPRN_MMCRA, 0);
+ mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR1, 0);
+ mtspr(SPRN_MMCR2, 0);
+ mtspr(SPRN_MMCRS, 0);
+}
+
+static int __init feat_enable_mce_power8(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->platform = "power8";
+ cur_cpu_spec->flush_tlb = __flush_tlb_power8;
+ cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p8;
+
+ return 1;
+}
+
+static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
+{
+ hfscr_pmu_enable();
+
+ init_pmu_power8();
+ init_pmu_registers = init_pmu_power8;
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
+ if (pvr_version_is(PVR_POWER8E))
+ cur_cpu_spec->cpu_features |= CPU_FTR_PMAO_BUG;
+
+ cur_cpu_spec->num_pmcs = 6;
+ cur_cpu_spec->pmc_type = PPC_PMC_IBM;
+ cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
+
+ return 1;
+}
+
+static void init_pmu_power9(void)
+{
+ if (hv_mode)
+ mtspr(SPRN_MMCRC, 0);
+
+ mtspr(SPRN_MMCRA, 0);
+ mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR1, 0);
+ mtspr(SPRN_MMCR2, 0);
+}
+
+static int __init feat_enable_mce_power9(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->platform = "power9";
+ cur_cpu_spec->flush_tlb = __flush_tlb_power9;
+ cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p9;
+
+ return 1;
+}
+
+static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
+{
+ hfscr_pmu_enable();
+
+ init_pmu_power9();
+ init_pmu_registers = init_pmu_power9;
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
+
+ cur_cpu_spec->num_pmcs = 6;
+ cur_cpu_spec->pmc_type = PPC_PMC_IBM;
+ cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
+
+ return 1;
+}
+
+static int __init feat_enable_tm(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ feat_enable(f);
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NOSC;
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_fp(struct dt_cpu_feature *f)
+{
+ feat_enable(f);
+ cur_cpu_spec->cpu_features &= ~CPU_FTR_FPU_UNAVAILABLE;
+
+ return 1;
+}
+
+static int __init feat_enable_vector(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_ALTIVEC
+ feat_enable(f);
+ cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
+ cur_cpu_spec->cpu_features |= CPU_FTR_VMX_COPY;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
+
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_vsx(struct dt_cpu_feature *f)
+{
+#ifdef CONFIG_VSX
+ feat_enable(f);
+ cur_cpu_spec->cpu_features |= CPU_FTR_VSX;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_VSX;
+
+ return 1;
+#endif
+ return 0;
+}
+
+static int __init feat_enable_purr(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->cpu_features |= CPU_FTR_PURR | CPU_FTR_SPURR;
+
+ return 1;
+}
+
+static int __init feat_enable_ebb(struct dt_cpu_feature *f)
+{
+ /*
+ * PPC_FEATURE2_EBB is enabled in PMU init code because it has
+ * historically been related to the PMU facility. This may have
+ * to be decoupled if EBB becomes more generic. For now, follow
+ * existing convention.
+ */
+ f->hwcap_bit_nr = -1;
+ feat_enable(f);
+
+ return 1;
+}
+
+static int __init feat_enable_dbell(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /* P9 has an HFSCR for privileged state */
+ feat_enable(f);
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_DBELL;
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_PECEDH; /* hyp doorbell wakeup */
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_hvi(struct dt_cpu_feature *f)
+{
+ u64 lpcr;
+
+ /*
+ * POWER9 XIVE interrupts including in OPAL XICS compatibility
+ * are always delivered as hypervisor virtualization interrupts (HVI)
+ * rather than EE.
+ *
+ * However LPES0 is not set here, in the chance that an EE does get
+ * delivered to the host somehow, the EE handler would not expect it
+ * to be delivered in LPES0 mode (e.g., using SRR[01]). This could
+ * happen if there is a bug in interrupt controller code, or IC is
+ * misconfigured in systemsim.
+ */
+
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= LPCR_HVICE; /* enable hvi interrupts */
+ lpcr |= LPCR_HEIC; /* disable ee interrupts when MSR_HV */
+ lpcr |= LPCR_PECE_HVEE; /* hvi can wake from stop */
+ mtspr(SPRN_LPCR, lpcr);
+
+ return 1;
+}
+
+static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->mmu_features |= MMU_FTR_CI_LARGE_PAGE;
+
+ return 1;
+}
+
+struct dt_cpu_feature_match {
+ const char *name;
+ int (*enable)(struct dt_cpu_feature *f);
+ u64 cpu_ftr_bit_mask;
+};
+
+static struct dt_cpu_feature_match __initdata
+ dt_cpu_feature_match_table[] = {
+ {"hypervisor", feat_enable_hv, 0},
+ {"big-endian", feat_enable, 0},
+ {"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
+ {"smt", feat_enable_smt, 0},
+ {"interrupt-facilities", feat_enable, 0},
+ {"timer-facilities", feat_enable, 0},
+ {"timer-facilities-v3", feat_enable, 0},
+ {"debug-facilities", feat_enable, 0},
+ {"come-from-address-register", feat_enable, CPU_FTR_CFAR},
+ {"branch-tracing", feat_enable, 0},
+ {"floating-point", feat_enable_fp, 0},
+ {"vector", feat_enable_vector, 0},
+ {"vector-scalar", feat_enable_vsx, 0},
+ {"vector-scalar-v3", feat_enable, 0},
+ {"decimal-floating-point", feat_enable, 0},
+ {"decimal-integer", feat_enable, 0},
+ {"quadword-load-store", feat_enable, 0},
+ {"vector-crypto", feat_enable, 0},
+ {"mmu-hash", feat_enable_mmu_hash, 0},
+ {"mmu-radix", feat_enable_mmu_radix, 0},
+ {"mmu-hash-v3", feat_enable_mmu_hash_v3, 0},
+ {"virtual-page-class-key-protection", feat_enable, 0},
+ {"transactional-memory", feat_enable_tm, CPU_FTR_TM},
+ {"transactional-memory-v3", feat_enable_tm, 0},
+ {"idle-nap", feat_enable_idle_nap, 0},
+ {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
+ {"idle-stop", feat_enable_idle_stop, 0},
+ {"machine-check-power8", feat_enable_mce_power8, 0},
+ {"performance-monitor-power8", feat_enable_pmu_power8, 0},
+ {"data-stream-control-register", feat_enable_dscr, CPU_FTR_DSCR},
+ {"event-based-branch", feat_enable_ebb, 0},
+ {"target-address-register", feat_enable, 0},
+ {"branch-history-rolling-buffer", feat_enable, 0},
+ {"control-register", feat_enable, CPU_FTR_CTRL},
+ {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
+ {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
+ {"processor-utilization-of-resources-register", feat_enable_purr, 0},
+ {"no-execute", feat_enable, 0},
+ {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
+ {"cache-inhibited-large-page", feat_enable_large_ci, 0},
+ {"coprocessor-icswx", feat_enable, CPU_FTR_ICSWX},
+ {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
+ {"program-priority-register", feat_enable, CPU_FTR_HAS_PPR},
+ {"wait", feat_enable, 0},
+ {"atomic-memory-operations", feat_enable, 0},
+ {"branch-v3", feat_enable, 0},
+ {"copy-paste", feat_enable, 0},
+ {"decimal-floating-point-v3", feat_enable, 0},
+ {"decimal-integer-v3", feat_enable, 0},
+ {"fixed-point-v3", feat_enable, 0},
+ {"floating-point-v3", feat_enable, 0},
+ {"group-start-register", feat_enable, 0},
+ {"pc-relative-addressing", feat_enable, 0},
+ {"machine-check-power9", feat_enable_mce_power9, 0},
+ {"performance-monitor-power9", feat_enable_pmu_power9, 0},
+ {"event-based-branch-v3", feat_enable, 0},
+ {"random-number-generator", feat_enable, 0},
+ {"system-call-vectored", feat_disable, 0},
+ {"trace-interrupt-v3", feat_enable, 0},
+ {"vector-v3", feat_enable, 0},
+ {"vector-binary128", feat_enable, 0},
+ {"vector-binary16", feat_enable, 0},
+ {"wait-v3", feat_enable, 0},
+};
+
+static bool __initdata using_dt_cpu_ftrs;
+static bool __initdata enable_unknown = true;
+
+static int __init dt_cpu_ftrs_parse(char *str)
+{
+ if (!str)
+ return 0;
+
+ if (!strcmp(str, "off"))
+ using_dt_cpu_ftrs = false;
+ else if (!strcmp(str, "known"))
+ enable_unknown = false;
+ else
+ return 1;
+
+ return 0;
+}
+early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
+
+static void __init cpufeatures_setup_start(u32 isa)
+{
+ pr_info("setup for ISA %d\n", isa);
+
+ if (isa >= 3000) {
+ cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
+ }
+}
+
+static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
+{
+ const struct dt_cpu_feature_match *m;
+ bool known = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dt_cpu_feature_match_table); i++) {
+ m = &dt_cpu_feature_match_table[i];
+ if (!strcmp(f->name, m->name)) {
+ known = true;
+ if (m->enable(f))
+ break;
+
+ pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
+ f->name);
+ return false;
+ }
+ }
+
+ if (!known && enable_unknown) {
+ if (!feat_try_enable_unknown(f)) {
+ pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
+ f->name);
+ return false;
+ }
+ }
+
+ if (m->cpu_ftr_bit_mask)
+ cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
+
+ if (known)
+ pr_debug("enabling: %s\n", f->name);
+ else
+ pr_debug("enabling: %s (unknown)\n", f->name);
+
+ return true;
+}
+
+static __init void cpufeatures_cpu_quirks(void)
+{
+ int version = mfspr(SPRN_PVR);
+
+ /*
+ * Not all quirks can be derived from the cpufeatures device tree.
+ */
+ if ((version & 0xffffff00) == 0x004e0100)
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
+}
+
+static void __init cpufeatures_setup_finished(void)
+{
+ cpufeatures_cpu_quirks();
+
+ if (hv_mode && !(cur_cpu_spec->cpu_features & CPU_FTR_HVMODE)) {
+ pr_err("hypervisor not present in device tree but HV mode is enabled in the CPU. Enabling.\n");
+ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
+ }
+
+ system_registers.lpcr = mfspr(SPRN_LPCR);
+ system_registers.hfscr = mfspr(SPRN_HFSCR);
+ system_registers.fscr = mfspr(SPRN_FSCR);
+
+ cpufeatures_flush_tlb();
+
+ pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
+ cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
+}
+
+static int __init disabled_on_cmdline(void)
+{
+ unsigned long root, chosen;
+ const char *p;
+
+ root = of_get_flat_dt_root();
+ chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+ if (chosen == -FDT_ERR_NOTFOUND)
+ return false;
+
+ p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
+ if (!p)
+ return false;
+
+ if (strstr(p, "dt_cpu_ftrs=off"))
+ return true;
+
+ return false;
+}
+
+static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ if (of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features")
+ && of_get_flat_dt_prop(node, "isa", NULL))
+ return 1;
+
+ return 0;
+}
+
+bool __init dt_cpu_ftrs_in_use(void)
+{
+ return using_dt_cpu_ftrs;
+}
+
+bool __init dt_cpu_ftrs_init(void *fdt)
+{
+ using_dt_cpu_ftrs = false;
+
+ /* Setup and verify the FDT, if it fails we just bail */
+ if (!early_init_dt_verify(fdt))
+ return false;
+
+ if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
+ return false;
+
+ if (disabled_on_cmdline())
+ return false;
+
+ cpufeatures_setup_cpu();
+
+ using_dt_cpu_ftrs = true;
+ return true;
+}
+
+static int nr_dt_cpu_features;
+static struct dt_cpu_feature *dt_cpu_features;
+
+static int __init process_cpufeatures_node(unsigned long node,
+ const char *uname, int i)
+{
+ const __be32 *prop;
+ struct dt_cpu_feature *f;
+ int len;
+
+ f = &dt_cpu_features[i];
+ memset(f, 0, sizeof(struct dt_cpu_feature));
+
+ f->node = node;
+
+ f->name = uname;
+
+ prop = of_get_flat_dt_prop(node, "isa", &len);
+ if (!prop) {
+ pr_warn("%s: missing isa property\n", uname);
+ return 0;
+ }
+ f->isa = be32_to_cpup(prop);
+
+ prop = of_get_flat_dt_prop(node, "usable-privilege", &len);
+ if (!prop) {
+ pr_warn("%s: missing usable-privilege property", uname);
+ return 0;
+ }
+ f->usable_privilege = be32_to_cpup(prop);
+
+ prop = of_get_flat_dt_prop(node, "hv-support", &len);
+ if (prop)
+ f->hv_support = be32_to_cpup(prop);
+ else
+ f->hv_support = HV_SUPPORT_NONE;
+
+ prop = of_get_flat_dt_prop(node, "os-support", &len);
+ if (prop)
+ f->os_support = be32_to_cpup(prop);
+ else
+ f->os_support = OS_SUPPORT_NONE;
+
+ prop = of_get_flat_dt_prop(node, "hfscr-bit-nr", &len);
+ if (prop)
+ f->hfscr_bit_nr = be32_to_cpup(prop);
+ else
+ f->hfscr_bit_nr = -1;
+ prop = of_get_flat_dt_prop(node, "fscr-bit-nr", &len);
+ if (prop)
+ f->fscr_bit_nr = be32_to_cpup(prop);
+ else
+ f->fscr_bit_nr = -1;
+ prop = of_get_flat_dt_prop(node, "hwcap-bit-nr", &len);
+ if (prop)
+ f->hwcap_bit_nr = be32_to_cpup(prop);
+ else
+ f->hwcap_bit_nr = -1;
+
+ if (f->usable_privilege & USABLE_HV) {
+ if (!(mfmsr() & MSR_HV)) {
+ pr_warn("%s: HV feature passed to guest\n", uname);
+ return 0;
+ }
+
+ if (f->hv_support == HV_SUPPORT_NONE && f->hfscr_bit_nr != -1) {
+ pr_warn("%s: unwanted hfscr_bit_nr\n", uname);
+ return 0;
+ }
+
+ if (f->hv_support == HV_SUPPORT_HFSCR) {
+ if (f->hfscr_bit_nr == -1) {
+ pr_warn("%s: missing hfscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+ } else {
+ if (f->hv_support != HV_SUPPORT_NONE || f->hfscr_bit_nr != -1) {
+ pr_warn("%s: unwanted hv_support/hfscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+
+ if (f->usable_privilege & USABLE_OS) {
+ if (f->os_support == OS_SUPPORT_NONE && f->fscr_bit_nr != -1) {
+ pr_warn("%s: unwanted fscr_bit_nr\n", uname);
+ return 0;
+ }
+
+ if (f->os_support == OS_SUPPORT_FSCR) {
+ if (f->fscr_bit_nr == -1) {
+ pr_warn("%s: missing fscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+ } else {
+ if (f->os_support != OS_SUPPORT_NONE || f->fscr_bit_nr != -1) {
+ pr_warn("%s: unwanted os_support/fscr_bit_nr\n", uname);
+ return 0;
+ }
+ }
+
+ if (!(f->usable_privilege & USABLE_PR)) {
+ if (f->hwcap_bit_nr != -1) {
+ pr_warn("%s: unwanted hwcap_bit_nr\n", uname);
+ return 0;
+ }
+ }
+
+ /* Do all the independent features in the first pass */
+ if (!of_get_flat_dt_prop(node, "dependencies", &len)) {
+ if (cpufeatures_process_feature(f))
+ f->enabled = 1;
+ else
+ f->disabled = 1;
+ }
+
+ return 0;
+}
+
+static void __init cpufeatures_deps_enable(struct dt_cpu_feature *f)
+{
+ const __be32 *prop;
+ int len;
+ int nr_deps;
+ int i;
+
+ if (f->enabled || f->disabled)
+ return;
+
+ prop = of_get_flat_dt_prop(f->node, "dependencies", &len);
+ if (!prop) {
+ pr_warn("%s: missing dependencies property", f->name);
+ return;
+ }
+
+ nr_deps = len / sizeof(int);
+
+ for (i = 0; i < nr_deps; i++) {
+ unsigned long phandle = be32_to_cpu(prop[i]);
+ int j;
+
+ for (j = 0; j < nr_dt_cpu_features; j++) {
+ struct dt_cpu_feature *d = &dt_cpu_features[j];
+
+ if (of_get_flat_dt_phandle(d->node) == phandle) {
+ cpufeatures_deps_enable(d);
+ if (d->disabled) {
+ f->disabled = 1;
+ return;
+ }
+ }
+ }
+ }
+
+ if (cpufeatures_process_feature(f))
+ f->enabled = 1;
+ else
+ f->disabled = 1;
+}
+
+static int __init scan_cpufeatures_subnodes(unsigned long node,
+ const char *uname,
+ void *data)
+{
+ int *count = data;
+
+ process_cpufeatures_node(node, uname, *count);
+
+ (*count)++;
+
+ return 0;
+}
+
+static int __init count_cpufeatures_subnodes(unsigned long node,
+ const char *uname,
+ void *data)
+{
+ int *count = data;
+
+ (*count)++;
+
+ return 0;
+}
+
+static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
+ *uname, int depth, void *data)
+{
+ const __be32 *prop;
+ int count, i;
+ u32 isa;
+
+ /* We are scanning "ibm,powerpc-cpu-features" nodes only */
+ if (!of_flat_dt_is_compatible(node, "ibm,powerpc-cpu-features"))
+ return 0;
+
+ prop = of_get_flat_dt_prop(node, "isa", NULL);
+ if (!prop)
+ /* We checked before, "can't happen" */
+ return 0;
+
+ isa = be32_to_cpup(prop);
+
+ /* Count and allocate space for cpu features */
+ of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes,
+ &nr_dt_cpu_features);
+ dt_cpu_features = __va(
+ memblock_alloc(sizeof(struct dt_cpu_feature)*
+ nr_dt_cpu_features, PAGE_SIZE));
+
+ cpufeatures_setup_start(isa);
+
+ /* Scan nodes into dt_cpu_features and enable those without deps */
+ count = 0;
+ of_scan_flat_dt_subnodes(node, scan_cpufeatures_subnodes, &count);
+
+ /* Recursive enable remaining features with dependencies */
+ for (i = 0; i < nr_dt_cpu_features; i++) {
+ struct dt_cpu_feature *f = &dt_cpu_features[i];
+
+ cpufeatures_deps_enable(f);
+ }
+
+ prop = of_get_flat_dt_prop(node, "display-name", NULL);
+ if (prop && strlen((char *)prop) != 0) {
+ strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
+ cur_cpu_spec->cpu_name = dt_cpu_name;
+ }
+
+ cpufeatures_setup_finished();
+
+ memblock_free(__pa(dt_cpu_features),
+ sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
+
+ return 0;
+}
+
+void __init dt_cpu_ftrs_scan(void)
+{
+ if (!using_dt_cpu_ftrs)
+ return;
+
+ of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
+}
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 45b453e4d0c8..acd8ca76233e 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -735,8 +735,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
+#ifdef CONFIG_RELOCATABLE
+ ld r15,PACATOC(r13)
+ ld r14,interrupt_base_book3e@got(r15)
+ ld r15,__end_interrupts@got(r15)
+#else
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
@@ -799,8 +805,14 @@ kernel_dbg_exc:
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
+#ifdef CONFIG_RELOCATABLE
+ ld r15,PACATOC(r13)
+ ld r14,interrupt_base_book3e@got(r15)
+ ld r15,__end_interrupts@got(r15)
+#else
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index a9312b52fe6f..b886795060fd 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -391,9 +391,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
*/
BEGIN_FTR_SECTION
rlwinm. r11,r12,47-31,30,31
- beq- 4f
- BRANCH_TO_COMMON(r10, machine_check_idle_common)
-4:
+ bne machine_check_idle_common
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif
@@ -1413,10 +1411,8 @@ USE_TEXT_SECTION()
.balign IFETCH_ALIGN_BYTES
do_hash_page:
#ifdef CONFIG_PPC_STD_MMU_64
- andis. r0,r4,0xa410 /* weird error? */
+ andis. r0,r4,0xa450 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
- andis. r0,r4,DSISR_DABRMATCH@h
- bne- handle_dabr_fault
CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@@ -1440,11 +1436,16 @@ do_hash_page:
/* Error */
blt- 13f
+
+ /* Reload DSISR into r4 for the DABR check below */
+ ld r4,_DSISR(r1)
#endif /* CONFIG_PPC_STD_MMU_64 */
/* Here we have a page fault that hash_page can't handle. */
handle_page_fault:
-11: ld r4,_DAR(r1)
+11: andis. r0,r4,DSISR_DABRMATCH@h
+ bne- handle_dabr_fault
+ ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 07d4e0ad60db..4898d676dcae 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -416,7 +416,7 @@ power9_dd1_recover_paca:
* which needs to be restored from the stack.
*/
li r3, 1
- stb r0,PACA_NAPSTATELOST(r13)
+ stb r3,PACA_NAPSTATELOST(r13)
blr
/*
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 160ae0fa7d0d..01addfb0ed0a 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
+int is_current_kprobe_addr(unsigned long addr)
+{
+ struct kprobe *p = kprobe_running();
+ return (p && (unsigned long)p->addr == addr) ? 1 : 0;
+}
+
bool arch_within_kprobe_blacklist(unsigned long addr)
{
return (addr >= (unsigned long)__kprobes_text_start &&
@@ -305,16 +311,17 @@ int kprobe_handler(struct pt_regs *regs)
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
- prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
if (p->ainsn.boostable >= 0) {
ret = try_to_emulate(p, regs);
if (ret > 0) {
restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
return 1;
}
}
+ prepare_singlestep(p, regs);
return 1;
} else {
if (*addr != BREAKPOINT_INSTRUCTION) {
@@ -616,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif
+ /*
+ * jprobes use jprobe_return() which skips the normal return
+ * path of the function, and this messes up the accounting of the
+ * function graph tracer.
+ *
+ * Pause function graph tracing while performing the jprobe function.
+ */
+ pause_graph_tracing();
+
return 1;
}
NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -641,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
* saved regs...
*/
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+ /* It's OK to start function graph tracing again */
+ unpause_graph_tracing();
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index d645da302bf2..2ad725ef4368 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -864,6 +864,25 @@ static void tm_reclaim_thread(struct thread_struct *thr,
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
+ /*
+ * If we are in a transaction and FP is off then we can't have
+ * used FP inside that transaction. Hence the checkpointed
+ * state is the same as the live state. We need to copy the
+ * live state to the checkpointed state so that when the
+ * transaction is restored, the checkpointed state is correct
+ * and the aborted transaction sees the correct state. We use
+ * ckpt_regs.msr here as that's what tm_reclaim will use to
+ * determine if it's going to write the checkpointed state or
+ * not. So either this will write the checkpointed registers,
+ * or reclaim will. Similarly for VMX.
+ */
+ if ((thr->ckpt_regs.msr & MSR_FP) == 0)
+ memcpy(&thr->ckfp_state, &thr->fp_state,
+ sizeof(struct thread_fp_state));
+ if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
+ memcpy(&thr->ckvr_state, &thr->vr_state,
+ sizeof(struct thread_vr_state));
+
giveup_all(container_of(thr, struct task_struct, thread));
tm_reclaim(thr, thr->ckpt_regs.msr, cause);
@@ -1647,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
+ current->thread.load_fp = 0;
memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL;
#ifdef CONFIG_ALTIVEC
@@ -1655,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.vr_save_area = NULL;
current->thread.vrsave = 0;
current->thread.used_vr = 0;
+ current->thread.load_vec = 0;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1666,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.tm_tfhar = 0;
current->thread.tm_texasr = 0;
current->thread.tm_tfiar = 0;
+ current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
}
EXPORT_SYMBOL(start_thread);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index d2f0afeae5a0..f83056297441 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -57,6 +57,7 @@
#include <asm/fadump.h>
#include <asm/epapr_hcalls.h>
#include <asm/firmware.h>
+#include <asm/dt_cpu_ftrs.h>
#include <mm/mmu_decl.h>
@@ -160,7 +161,9 @@ static struct ibm_pa_feature {
{ .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
{ .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
{ .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
+#ifdef CONFIG_PPC_RADIX_MMU
{ .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX },
+#endif
{ .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
{ .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
.cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
@@ -375,23 +378,31 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* A POWER6 partition in "POWER6 architected" mode
* uses the 0x0f000002 PVR value; in POWER5+ mode
* it uses 0x0f000001.
+ *
+ * If we're using device tree CPU feature discovery then we don't
+ * support the cpu-version property, and it's the responsibility of the
+ * firmware/hypervisor to provide the correct feature set for the
+ * architecture level via the ibm,powerpc-cpu-features binding.
*/
- prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
- if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
- identify_cpu(0, be32_to_cpup(prop));
+ if (!dt_cpu_ftrs_in_use()) {
+ prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
+ if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
+ identify_cpu(0, be32_to_cpup(prop));
- identical_pvr_fixup(node);
+ check_cpu_feature_properties(node);
+ check_cpu_pa_features(node);
+ }
- check_cpu_feature_properties(node);
- check_cpu_pa_features(node);
+ identical_pvr_fixup(node);
init_mmu_slb_size(node);
#ifdef CONFIG_PPC64
- if (nthreads > 1)
- cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
- else
+ if (nthreads == 1)
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
+ else if (!dt_cpu_ftrs_in_use())
+ cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
#endif
+
return 0;
}
@@ -721,6 +732,8 @@ void __init early_init_devtree(void *params)
DBG("Scanning CPUs ...\n");
+ dt_cpu_ftrs_scan();
+
/* Retrieve CPU related informations from the flat tree
* (altivec support, boot CPU ID, ...)
*/
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 69e077180db6..857129acf960 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -261,7 +261,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "processor\t: %lu\n", cpu_id);
seq_printf(m, "cpu\t\t: ");
- if (cur_cpu_spec->pvr_mask)
+ if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
seq_printf(m, "%s", cur_cpu_spec->cpu_name);
else
seq_printf(m, "unknown (%08x)", pvr);
@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_PPC_MM_SLICES
#ifdef CONFIG_PPC64
- init_mm.context.addr_limit = TASK_SIZE_128TB;
+ init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
#else
#error "context.addr_limit not initialized."
#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 0d4dcaeaafcb..4640f6d64f8b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -49,6 +49,7 @@
#include <asm/paca.h>
#include <asm/time.h>
#include <asm/cputable.h>
+#include <asm/dt_cpu_ftrs.h>
#include <asm/sections.h>
#include <asm/btext.h>
#include <asm/nvram.h>
@@ -274,8 +275,10 @@ void __init early_setup(unsigned long dt_ptr)
/* -------- printk is _NOT_ safe to use here ! ------- */
- /* Identify CPU type */
- identify_cpu(0, mfspr(SPRN_PVR));
+ /* Try new device tree based feature discovery ... */
+ if (!dt_cpu_ftrs_init(__va(dt_ptr)))
+ /* Otherwise use the old style CPU table */
+ identify_cpu(0, mfspr(SPRN_PVR));
/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
initialise_paca(&boot_paca, 0);
@@ -541,6 +544,9 @@ void __init initialize_cache_info(void)
dcache_bsize = ppc64_caches.l1d.block_size;
icache_bsize = ppc64_caches.l1i.block_size;
+ cur_cpu_spec->dcache_bsize = dcache_bsize;
+ cur_cpu_spec->icache_bsize = icache_bsize;
+
DBG(" <- initialize_cache_info()\n");
}
@@ -610,6 +616,24 @@ void __init exc_lvl_early_init(void)
#endif
/*
+ * Emergency stacks are used for a range of things, from asynchronous
+ * NMIs (system reset, machine check) to synchronous, process context.
+ * We set preempt_count to zero, even though that isn't necessarily correct. To
+ * get the right value we'd need to copy it from the previous thread_info, but
+ * doing that might fault causing more problems.
+ * TODO: what to do with accounting?
+ */
+static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
+{
+ ti->task = NULL;
+ ti->cpu = cpu;
+ ti->preempt_count = 0;
+ ti->local_flags = 0;
+ ti->flags = 0;
+ klp_init_thread_info(ti);
+}
+
+/*
* Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled. Exclusive emergency
* stack for machine checks.
@@ -627,24 +651,31 @@ void __init emergency_stack_init(void)
* Since we use these as temporary stacks during secondary CPU
* bringup, we need to get at them in real mode. This means they
* must also be within the RMO region.
+ *
+ * The IRQ stacks allocated elsewhere in this file are zeroed and
+ * initialized in kernel/irq.c. These are initialized here in order
+ * to have emergency stacks available as early as possible.
*/
limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) {
struct thread_info *ti;
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
- klp_init_thread_info(ti);
+ memset(ti, 0, THREAD_SIZE);
+ emerg_stack_init_thread_info(ti, i);
paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for NMI exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
- klp_init_thread_info(ti);
+ memset(ti, 0, THREAD_SIZE);
+ emerg_stack_init_thread_info(ti, i);
paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
/* emergency stack for machine check exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
- klp_init_thread_info(ti);
+ memset(ti, 0, THREAD_SIZE);
+ emerg_stack_init_thread_info(ti, i);
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif
}
@@ -655,7 +686,7 @@ void __init emergency_stack_init(void)
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
- return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+ return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
__pa(MAX_DMA_ADDRESS));
}
@@ -666,7 +697,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- if (cpu_to_node(from) == cpu_to_node(to))
+ if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index df2a41647d8e..1069f74fca47 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -97,7 +97,7 @@ int smp_generic_cpu_bootable(unsigned int nr)
/* Special case - we inhibit secondary thread startup
* during boot if the user requests it.
*/
- if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
+ if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
return 0;
if (smt_enabled_at_boot
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index 7c933a99f5d5..c98e90b4ea7b 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
stdu r1,-SWITCH_FRAME_SIZE(r1)
/* Save all gprs to pt_regs */
- SAVE_8GPRS(0,r1)
- SAVE_8GPRS(8,r1)
- SAVE_8GPRS(16,r1)
- SAVE_8GPRS(24,r1)
+ SAVE_GPR(0, r1)
+ SAVE_10GPRS(2, r1)
+ SAVE_10GPRS(12, r1)
+ SAVE_10GPRS(22, r1)
+
+ /* Save previous stack pointer (r1) */
+ addi r8, r1, SWITCH_FRAME_SIZE
+ std r8, GPR1(r1)
/* Load special regs for save below */
mfmsr r8
@@ -95,18 +99,44 @@ ftrace_call:
bl ftrace_stub
nop
- /* Load ctr with the possibly modified NIP */
- ld r3, _NIP(r1)
- mtctr r3
+ /* Load the possibly modified NIP */
+ ld r15, _NIP(r1)
+
#ifdef CONFIG_LIVEPATCH
- cmpd r14,r3 /* has NIP been altered? */
+ cmpd r14, r15 /* has NIP been altered? */
+#endif
+
+#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
+ /* NIP has not been altered, skip over further checks */
+ beq 1f
+
+ /* Check if there is an active kprobe on us */
+ subi r3, r14, 4
+ bl is_current_kprobe_addr
+ nop
+
+ /*
+ * If r3 == 1, then this is a kprobe/jprobe.
+ * else, this is livepatched function.
+ *
+ * The conditional branch for livepatch_handler below will use the
+ * result of this comparison. For kprobe/jprobe, we just need to branch to
+ * the new NIP, not call livepatch_handler. The branch below is bne, so we
+ * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
+ * CR0[EQ] = (r3 == 1).
+ */
+ cmpdi r3, 1
+1:
#endif
+ /* Load CTR with the possibly modified NIP */
+ mtctr r15
+
/* Restore gprs */
- REST_8GPRS(0,r1)
- REST_8GPRS(8,r1)
- REST_8GPRS(16,r1)
- REST_8GPRS(24,r1)
+ REST_GPR(0,r1)
+ REST_10GPRS(2,r1)
+ REST_10GPRS(12,r1)
+ REST_10GPRS(22,r1)
/* Restore possibly modified LR */
ld r0, _LINK(r1)
@@ -119,7 +149,10 @@ ftrace_call:
addi r1, r1, SWITCH_FRAME_SIZE
#ifdef CONFIG_LIVEPATCH
- /* Based on the cmpd above, if the NIP was altered handle livepatch */
+ /*
+ * Based on the cmpd or cmpdi above, if the NIP was altered and we're
+ * not on a kprobe/jprobe, then handle livepatch.
+ */
bne- livepatch_handler
#endif
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 65a471de96de..0c52cb5d43f5 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -67,7 +67,7 @@ config KVM_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
- select SPAPR_TCE_IOMMU if IOMMU_SUPPORT
+ select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV)
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
@@ -197,6 +197,11 @@ config KVM_XICS
Specification) interrupt controller architecture used on
IBM POWER (pSeries) servers.
+config KVM_XIVE
+ bool
+ default y
+ depends on KVM_XICS && PPC_XIVE_NATIVE && KVM_BOOK3S_HV_POSSIBLE
+
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index b87ccde2137a..381a6ec0ff3b 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -46,7 +46,7 @@ kvm-e500mc-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
+kvm-book3s_64-builtin-objs-$(CONFIG_SPAPR_TCE_IOMMU) := \
book3s_64_vio_hv.o
kvm-pr-y := \
@@ -74,7 +74,7 @@ kvm-hv-y += \
book3s_64_mmu_radix.o
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
- book3s_hv_rm_xics.o
+ book3s_hv_rm_xics.o book3s_hv_rm_xive.o
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
@@ -89,10 +89,12 @@ endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o
+kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o
+kvm-book3s_64-objs-$(CONFIG_SPAPR_TCE_IOMMU) += book3s_64_vio.o
+
kvm-book3s_64-module-objs := \
$(common-objs-y) \
book3s.o \
- book3s_64_vio.o \
book3s_rtas.o \
$(kvm-book3s_64-objs-y)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 8c4d7e9d27d2..72d977e30952 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -35,6 +35,7 @@
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
+#include <asm/xive.h>
#include "book3s.h"
#include "trace.h"
@@ -596,11 +597,14 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
- if (!vcpu->arch.icp) {
+ if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
r = -ENXIO;
break;
}
- *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
+ if (xive_enabled())
+ *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
+ else
+ *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
break;
#endif /* CONFIG_KVM_XICS */
case KVM_REG_PPC_FSCR:
@@ -666,12 +670,14 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
#endif /* CONFIG_VSX */
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
- if (!vcpu->arch.icp) {
+ if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
r = -ENXIO;
break;
}
- r = kvmppc_xics_set_icp(vcpu,
- set_reg_val(id, *val));
+ if (xive_enabled())
+ r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
+ else
+ r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
break;
#endif /* CONFIG_KVM_XICS */
case KVM_REG_PPC_FSCR:
@@ -942,6 +948,50 @@ int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
return kvm->arch.kvm_ops->hcall_implemented(hcall);
}
+#ifdef CONFIG_KVM_XICS
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
+{
+ if (xive_enabled())
+ return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
+ line_status);
+ else
+ return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
+ line_status);
+}
+
+int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status)
+{
+ return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
+ level, line_status);
+}
+static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
+{
+ return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
+}
+
+int kvm_irq_map_gsi(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *entries, int gsi)
+{
+ entries->gsi = gsi;
+ entries->type = KVM_IRQ_ROUTING_IRQCHIP;
+ entries->set = kvmppc_book3s_set_irq;
+ entries->irqchip.irqchip = 0;
+ entries->irqchip.pin = gsi;
+ return 1;
+}
+
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
+{
+ return pin;
+}
+
+#endif /* CONFIG_KVM_XICS */
+
static int kvmppc_book3s_init(void)
{
int r;
@@ -952,12 +1002,25 @@ static int kvmppc_book3s_init(void)
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
r = kvmppc_book3s_init_pr();
#endif
- return r;
+#ifdef CONFIG_KVM_XICS
+#ifdef CONFIG_KVM_XIVE
+ if (xive_enabled()) {
+ kvmppc_xive_init_module();
+ kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
+ } else
+#endif
+ kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
+#endif
+ return r;
}
static void kvmppc_book3s_exit(void)
{
+#ifdef CONFIG_KVM_XICS
+ if (xive_enabled())
+ kvmppc_xive_exit_module();
+#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr();
#endif
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index eda0a8f6fae8..3adfd2f5301c 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -301,6 +301,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/* liobn, ioba, tce); */
+ /* For radix, we might be in virtual mode, so punt */
+ if (kvm_is_radix(vcpu->kvm))
+ return H_TOO_HARD;
+
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -381,6 +385,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
bool prereg = false;
struct kvmppc_spapr_tce_iommu_table *stit;
+ /* For radix, we might be in virtual mode, so punt */
+ if (kvm_is_radix(vcpu->kvm))
+ return H_TOO_HARD;
+
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -491,6 +499,10 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
long i, ret;
struct kvmppc_spapr_tce_iommu_table *stit;
+ /* For radix, we might be in virtual mode, so punt */
+ if (kvm_is_radix(vcpu->kvm))
+ return H_TOO_HARD;
+
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -527,6 +539,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
return H_SUCCESS;
}
+/* This can be called in either virtual mode or real mode */
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba)
{
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 549dd6070dee..8d1a365b8edc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -67,6 +67,7 @@
#include <asm/mmu.h>
#include <asm/opal.h>
#include <asm/xics.h>
+#include <asm/xive.h>
#include "book3s.h"
@@ -837,6 +838,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_IPOLL:
case H_XIRR_X:
if (kvmppc_xics_enabled(vcpu)) {
+ if (xive_enabled()) {
+ ret = H_NOT_AVAILABLE;
+ return RESUME_GUEST;
+ }
ret = kvmppc_xics_hcall(vcpu, req);
break;
}
@@ -1481,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
case KVM_REG_PPC_TB_OFFSET:
+ /*
+ * POWER9 DD1 has an erratum where writing TBU40 causes
+ * the timebase to lose ticks. So we don't let the
+ * timebase offset be changed on P9 DD1. (It is
+ * initialized to zero.)
+ */
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1))
+ break;
/* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2902,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
+ unsigned long ebb_regs[3] = {}; /* shut up GCC */
+ unsigned long user_tar = 0;
+ unsigned int user_vrsave;
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
+ /*
+ * Don't allow entry with a suspended transaction, because
+ * the guest entry/exit code will lose it.
+ * If the guest has TM enabled, save away their TM-related SPRs
+ * (they will get restored by the TM unavailable interrupt).
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+ (current->thread.regs->msr & MSR_TM)) {
+ if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ return -EINVAL;
+ }
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+ current->thread.regs->msr &= ~MSR_TM;
+ }
+#endif
+
kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */
@@ -2929,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
flush_all_to_thread(current);
+ /* Save userspace EBB and other register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ ebb_regs[0] = mfspr(SPRN_EBBHR);
+ ebb_regs[1] = mfspr(SPRN_EBBRR);
+ ebb_regs[2] = mfspr(SPRN_BESCR);
+ user_tar = mfspr(SPRN_TAR);
+ }
+ user_vrsave = mfspr(SPRN_VRSAVE);
+
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2947,10 +2993,24 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = kvmppc_book3s_hv_page_fault(run, vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
- } else if (r == RESUME_PASSTHROUGH)
- r = kvmppc_xics_rm_complete(vcpu, 0);
+ } else if (r == RESUME_PASSTHROUGH) {
+ if (WARN_ON(xive_enabled()))
+ r = H_SUCCESS;
+ else
+ r = kvmppc_xics_rm_complete(vcpu, 0);
+ }
} while (is_kvmppc_resume_guest(r));
+ /* Restore userspace EBB and other register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ mtspr(SPRN_EBBHR, ebb_regs[0]);
+ mtspr(SPRN_EBBRR, ebb_regs[1]);
+ mtspr(SPRN_BESCR, ebb_regs[2]);
+ mtspr(SPRN_TAR, user_tar);
+ mtspr(SPRN_FSCR, current->thread.fscr);
+ }
+ mtspr(SPRN_VRSAVE, user_vrsave);
+
out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
@@ -3400,10 +3460,20 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/*
* On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
* Set HVICE bit to enable hypervisor virtualization interrupts.
+ * Set HEIC to prevent OS interrupts to go to hypervisor (should
+ * be unnecessary but better safe than sorry in case we re-enable
+ * EE in HV mode with this LPCR still set)
*/
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
lpcr &= ~LPCR_VPM0;
- lpcr |= LPCR_HVICE;
+ lpcr |= LPCR_HVICE | LPCR_HEIC;
+
+ /*
+ * If xive is enabled, we route 0x500 interrupts directly
+ * to the guest.
+ */
+ if (xive_enabled())
+ lpcr |= LPCR_LPES;
}
/*
@@ -3533,7 +3603,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
struct kvmppc_irq_map *irq_map;
struct kvmppc_passthru_irqmap *pimap;
struct irq_chip *chip;
- int i;
+ int i, rc = 0;
if (!kvm_irq_bypass)
return 1;
@@ -3558,10 +3628,10 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
/*
* For now, we only support interrupts for which the EOI operation
* is an OPAL call followed by a write to XIRR, since that's
- * what our real-mode EOI code does.
+ * what our real-mode EOI code does, or a XIVE interrupt
*/
chip = irq_data_get_irq_chip(&desc->irq_data);
- if (!chip || !is_pnv_opal_msi(chip)) {
+ if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) {
pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
host_irq, guest_gsi);
mutex_unlock(&kvm->lock);
@@ -3603,7 +3673,12 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
if (i == pimap->n_mapped)
pimap->n_mapped++;
- kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
+ if (xive_enabled())
+ rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
+ else
+ kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
+ if (rc)
+ irq_map->r_hwirq = 0;
mutex_unlock(&kvm->lock);
@@ -3614,7 +3689,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
{
struct irq_desc *desc;
struct kvmppc_passthru_irqmap *pimap;
- int i;
+ int i, rc = 0;
if (!kvm_irq_bypass)
return 0;
@@ -3639,9 +3714,12 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
return -ENODEV;
}
- kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
+ if (xive_enabled())
+ rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
+ else
+ kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
- /* invalidate the entry */
+ /* invalidate the entry (what do do on error from the above ?) */
pimap->mapped[i].r_hwirq = 0;
/*
@@ -3650,7 +3728,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
*/
unlock:
mutex_unlock(&kvm->lock);
- return 0;
+ return rc;
}
static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
@@ -3928,7 +4006,7 @@ static int kvmppc_book3s_init_hv(void)
* indirectly, via OPAL.
*/
#ifdef CONFIG_SMP
- if (!get_paca()->kvm_hstate.xics_phys) {
+ if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) {
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 9c71c72e65ce..ee4c2558c305 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -32,6 +32,24 @@
#define KVM_CMA_CHUNK_ORDER 18
+#include "book3s_xics.h"
+#include "book3s_xive.h"
+
+/*
+ * The XIVE module will populate these when it loads
+ */
+unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
+unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
+int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
+int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
+EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
+EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
+EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
+EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
+EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
+
/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
* should be power of 2.
@@ -189,7 +207,14 @@ EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
long kvmppc_h_random(struct kvm_vcpu *vcpu)
{
- if (powernv_get_random_real_mode(&vcpu->arch.gpr[4]))
+ int r;
+
+ /* Only need to do the expensive mfmsr() on radix */
+ if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
+ r = powernv_get_random_long(&vcpu->arch.gpr[4]);
+ else
+ r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]);
+ if (r)
return H_SUCCESS;
return H_HARDWARE;
@@ -211,6 +236,7 @@ void kvmhv_rm_send_ipi(int cpu)
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
return;
}
+
/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
cpu_first_thread_sibling(cpu) ==
@@ -407,6 +433,9 @@ static long kvmppc_read_one_intr(bool *again)
u8 host_ipi;
int64_t rc;
+ if (xive_enabled())
+ return 1;
+
/* see if a host IPI is pending */
host_ipi = local_paca->kvm_hstate.host_ipi;
if (host_ipi)
@@ -491,3 +520,84 @@ static long kvmppc_read_one_intr(bool *again)
return kvmppc_check_passthru(xisr, xirr, again);
}
+
+#ifdef CONFIG_KVM_XICS
+static inline bool is_rm(void)
+{
+ return !(mfmsr() & MSR_DR);
+}
+
+unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_xirr(vcpu);
+ if (unlikely(!__xive_vm_h_xirr))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_xirr(vcpu);
+ } else
+ return xics_rm_h_xirr(vcpu);
+}
+
+unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.gpr[5] = get_tb();
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_xirr(vcpu);
+ if (unlikely(!__xive_vm_h_xirr))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_xirr(vcpu);
+ } else
+ return xics_rm_h_xirr(vcpu);
+}
+
+unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_ipoll(vcpu, server);
+ if (unlikely(!__xive_vm_h_ipoll))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_ipoll(vcpu, server);
+ } else
+ return H_TOO_HARD;
+}
+
+int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_ipi(vcpu, server, mfrr);
+ if (unlikely(!__xive_vm_h_ipi))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_ipi(vcpu, server, mfrr);
+ } else
+ return xics_rm_h_ipi(vcpu, server, mfrr);
+}
+
+int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_cppr(vcpu, cppr);
+ if (unlikely(!__xive_vm_h_cppr))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_cppr(vcpu, cppr);
+ } else
+ return xics_rm_h_cppr(vcpu, cppr);
+}
+
+int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ if (xive_enabled()) {
+ if (is_rm())
+ return xive_rm_h_eoi(vcpu, xirr);
+ if (unlikely(!__xive_vm_h_eoi))
+ return H_NOT_AVAILABLE;
+ return __xive_vm_h_eoi(vcpu, xirr);
+ } else
+ return xics_rm_h_eoi(vcpu, xirr);
+}
+#endif /* CONFIG_KVM_XICS */
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 0fdc4a28970b..404deb512844 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* Put whatever is in the decrementer into the
* hypervisor decrementer.
*/
+BEGIN_FTR_SECTION
+ ld r5, HSTATE_KVM_VCORE(r13)
+ ld r6, VCORE_KVM(r5)
+ ld r9, KVM_HOST_LPCR(r6)
+ andis. r9, r9, LPCR_LD@h
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mfspr r8,SPRN_DEC
mftb r7
- mtspr SPRN_HDEC,r8
+BEGIN_FTR_SECTION
+ /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
+ bne 32f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8
+32: mtspr SPRN_HDEC,r8
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index ffde4507ddfd..2a862618f072 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -484,7 +484,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
}
-unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
+unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
{
union kvmppc_icp_state old_state, new_state;
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -522,8 +522,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
return check_too_hard(xics, icp);
}
-int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr)
+int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
{
union kvmppc_icp_state old_state, new_state;
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -609,7 +609,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
return check_too_hard(xics, this_icp);
}
-int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{
union kvmppc_icp_state old_state, new_state;
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
@@ -729,7 +729,7 @@ static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
return check_too_hard(xics, icp);
}
-int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
{
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
struct kvmppc_icp *icp = vcpu->arch.icp;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c
new file mode 100644
index 000000000000..abf5f01b6eb1
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c
@@ -0,0 +1,47 @@
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xics.h>
+#include <asm/debug.h>
+#include <asm/synch.h>
+#include <asm/cputhreads.h>
+#include <asm/pgtable.h>
+#include <asm/ppc-opcode.h>
+#include <asm/pnv-pci.h>
+#include <asm/opal.h>
+#include <asm/smp.h>
+#include <asm/asm-prototypes.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+
+#include "book3s_xive.h"
+
+/* XXX */
+#include <asm/udbg.h>
+//#define DBG(fmt...) udbg_printf(fmt)
+#define DBG(fmt...) do { } while(0)
+
+static inline void __iomem *get_tima_phys(void)
+{
+ return local_paca->kvm_hstate.xive_tima_phys;
+}
+
+#undef XIVE_RUNTIME_CHECKS
+#define X_PFX xive_rm_
+#define X_STATIC
+#define X_STAT_PFX stat_rm_
+#define __x_tima get_tima_phys()
+#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page))
+#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page))
+#define __x_readb __raw_rm_readb
+#define __x_writeb __raw_rm_writeb
+#define __x_readw __raw_rm_readw
+#define __x_readq __raw_rm_readq
+#define __x_writeq __raw_rm_writeq
+
+#include "book3s_xive_template.c"
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 7c6477d1840a..4888dd494604 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -30,6 +30,13 @@
#include <asm/book3s/64/mmu-hash.h>
#include <asm/tm.h>
#include <asm/opal.h>
+#include <asm/xive-regs.h>
+
+/* Sign-extend HDEC if not on POWER9 */
+#define EXTEND_HDEC(reg) \
+BEGIN_FTR_SECTION; \
+ extsw reg, reg; \
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
@@ -37,6 +44,17 @@
#define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2
+/* Stack frame offsets for kvmppc_hv_entry */
+#define SFS 144
+#define STACK_SLOT_TRAP (SFS-4)
+#define STACK_SLOT_TID (SFS-16)
+#define STACK_SLOT_PSSCR (SFS-24)
+#define STACK_SLOT_PID (SFS-32)
+#define STACK_SLOT_IAMR (SFS-40)
+#define STACK_SLOT_CIABR (SFS-48)
+#define STACK_SLOT_DAWR (SFS-56)
+#define STACK_SLOT_DAWRX (SFS-64)
+
/*
* Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled.
@@ -213,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
+ /* HDEC may be larger than DEC for arch >= v3.00, but since the */
+ /* HDEC value came from DEC in the first place, it will fit */
mfspr r3, SPRN_HDEC
mtspr SPRN_DEC, r3
/*
@@ -294,8 +314,9 @@ kvm_novcpu_wakeup:
/* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC
+ EXTEND_HDEC(r0)
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
- cmpwi r0, 0
+ cmpdi r0, 0
blt kvm_novcpu_exit
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -318,10 +339,10 @@ kvm_novcpu_exit:
bl kvmhv_accumulate_time
#endif
13: mr r3, r12
- stw r12, 112-4(r1)
+ stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit
nop
- lwz r12, 112-4(r1)
+ lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host
/*
@@ -389,8 +410,8 @@ kvm_secondary_got_guest:
lbz r4, HSTATE_PTID(r13)
cmpwi r4, 0
bne 63f
- lis r6, 0x7fff
- ori r6, r6, 0xffff
+ LOAD_REG_ADDR(r6, decrementer_max)
+ ld r6, 0(r6)
mtspr SPRN_HDEC, r6
/* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13)
@@ -544,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* *
*****************************************************************************/
-/* Stack frame offsets */
-#define STACK_SLOT_TID (112-16)
-#define STACK_SLOT_PSSCR (112-24)
-#define STACK_SLOT_PID (112-32)
-
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -564,7 +580,7 @@ kvmppc_hv_entry:
*/
mflr r0
std r0, PPC_LR_STKOFF(r1)
- stdu r1, -112(r1)
+ stdu r1, -SFS(r1)
/* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13)
@@ -748,10 +764,20 @@ BEGIN_FTR_SECTION
mfspr r5, SPRN_TIDR
mfspr r6, SPRN_PSSCR
mfspr r7, SPRN_PID
+ mfspr r8, SPRN_IAMR
std r5, STACK_SLOT_TID(r1)
std r6, STACK_SLOT_PSSCR(r1)
std r7, STACK_SLOT_PID(r1)
+ std r8, STACK_SLOT_IAMR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+ mfspr r5, SPRN_CIABR
+ mfspr r6, SPRN_DAWR
+ mfspr r7, SPRN_DAWRX
+ std r5, STACK_SLOT_CIABR(r1)
+ std r6, STACK_SLOT_DAWR(r1)
+ std r7, STACK_SLOT_DAWRX(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION
/* Set partition DABR */
@@ -967,9 +993,27 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/* Check if HDEC expires soon */
mfspr r3, SPRN_HDEC
- cmpwi r3, 512 /* 1 microsecond */
+ EXTEND_HDEC(r3)
+ cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon
+#ifdef CONFIG_KVM_XICS
+ /* We are entering the guest on that thread, push VCPU to XIVE */
+ ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
+ cmpldi cr0, r10, r0
+ beq no_xive
+ ld r11, VCPU_XIVE_SAVED_STATE(r4)
+ li r9, TM_QW1_OS
+ stdcix r11,r9,r10
+ eieio
+ lwz r11, VCPU_XIVE_CAM_WORD(r4)
+ li r9, TM_QW1_OS + TM_WORD2
+ stwcix r11,r9,r10
+ li r9, 1
+ stw r9, VCPU_XIVE_PUSHED(r4)
+no_xive:
+#endif /* CONFIG_KVM_XICS */
+
deliver_guest_interrupt:
ld r6, VCPU_CTR(r4)
ld r7, VCPU_XER(r4)
@@ -1307,6 +1351,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
blt deliver_guest_interrupt
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
+#ifdef CONFIG_KVM_XICS
+ /* We are exiting, pull the VP from the XIVE */
+ lwz r0, VCPU_XIVE_PUSHED(r9)
+ cmpwi cr0, r0, 0
+ beq 1f
+ li r7, TM_SPC_PULL_OS_CTX
+ li r6, TM_QW1_OS
+ mfmsr r0
+ andi. r0, r0, MSR_IR /* in real mode? */
+ beq 2f
+ ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
+ cmpldi cr0, r10, 0
+ beq 1f
+ /* First load to pull the context, we ignore the value */
+ lwzx r11, r7, r10
+ eieio
+ /* Second load to recover the context state (Words 0 and 1) */
+ ldx r11, r6, r10
+ b 3f
+2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
+ cmpldi cr0, r10, 0
+ beq 1f
+ /* First load to pull the context, we ignore the value */
+ lwzcix r11, r7, r10
+ eieio
+ /* Second load to recover the context state (Words 0 and 1) */
+ ldcix r11, r6, r10
+3: std r11, VCPU_XIVE_SAVED_STATE(r9)
+ /* Fixup some of the state for the next load */
+ li r10, 0
+ li r0, 0xff
+ stw r10, VCPU_XIVE_PUSHED(r9)
+ stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
+ stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
+1:
+#endif /* CONFIG_KVM_XICS */
/* Save more register state */
mfdar r6
mfdsisr r7
@@ -1451,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
* set by the guest could disrupt the host.
*/
li r0, 0
- mtspr SPRN_IAMR, r0
- mtspr SPRN_CIABR, r0
- mtspr SPRN_DAWRX, r0
+ mtspr SPRN_PSPB, r0
mtspr SPRN_WORT, r0
BEGIN_FTR_SECTION
+ mtspr SPRN_IAMR, r0
mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
@@ -1471,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
std r6,VCPU_UAMOR(r9)
li r6,0
mtspr SPRN_AMR,r6
+ mtspr SPRN_UAMOR, r6
/* Switch DSCR back to host value */
mfspr r8, SPRN_DSCR
@@ -1616,12 +1696,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* Restore host values of some registers */
BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_CIABR(r1)
+ ld r6, STACK_SLOT_DAWR(r1)
+ ld r7, STACK_SLOT_DAWRX(r1)
+ mtspr SPRN_CIABR, r5
+ mtspr SPRN_DAWR, r6
+ mtspr SPRN_DAWRX, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+BEGIN_FTR_SECTION
ld r5, STACK_SLOT_TID(r1)
ld r6, STACK_SLOT_PSSCR(r1)
ld r7, STACK_SLOT_PID(r1)
+ ld r8, STACK_SLOT_IAMR(r1)
mtspr SPRN_TIDR, r5
mtspr SPRN_PSSCR, r6
mtspr SPRN_PID, r7
+ mtspr SPRN_IAMR, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
@@ -1765,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
- ld r0, 112+PPC_LR_STKOFF(r1)
- addi r1, r1, 112
+ ld r0, SFS+PPC_LR_STKOFF(r1)
+ addi r1, r1, SFS
mtlr r0
blr
@@ -2011,7 +2101,7 @@ hcall_real_table:
.long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
- .long 0 /* 0x70 - H_IPOLL */
+ .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
#else
.long 0 /* 0x64 - H_EOI */
@@ -2181,7 +2271,11 @@ hcall_real_table:
.long 0 /* 0x2f0 */
.long 0 /* 0x2f4 */
.long 0 /* 0x2f8 */
- .long 0 /* 0x2fc */
+#ifdef CONFIG_KVM_XICS
+ .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
+#else
+ .long 0 /* 0x2fc - H_XIRR_X*/
+#endif
.long DOTSYM(kvmppc_h_random) - hcall_real_table
.globl hcall_real_table_end
hcall_real_table_end:
@@ -2308,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC
mftb r5
- cmpw r3, r4
+ extsw r3, r3
+ EXTEND_HDEC(r4)
+ cmpd r3, r4
ble 67f
mtspr SPRN_DEC, r4
67:
/* save expiry time of guest decrementer */
- extsw r3, r3
add r3, r3, r5
ld r4, HSTATE_KVM_VCPU(r13)
ld r5, HSTATE_KVM_VCORE(r13)
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index bcbeeb62dd13..8a4205fa774f 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
pteg_addr = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
+ ret = H_FUNCTION;
+ if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
+ goto done;
hpte = pteg;
ret = H_PTEG_FULL;
@@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
pteg_addr += i * HPTE_SIZE;
- copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
+ goto done;
kvmppc_set_gpr(vcpu, 4, pte_index | i);
ret = H_SUCCESS;
@@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+ goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
goto done;
- copy_to_user((void __user *)pteg, &v, sizeof(v));
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
+ goto done;
rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
@@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
}
pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
+ ret = H_FUNCTION;
+ break;
+ }
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
tsh |= H_BULK_REMOVE_NOT_FOUND;
} else {
/* Splat the pteg in (userland) hpt */
- copy_to_user((void __user *)pteg, &v, sizeof(v));
+ if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
+ ret = H_FUNCTION;
+ break;
+ }
rb = compute_tlbie_rb(pte[0], pte[1],
tsh & H_BULK_REMOVE_PTEX);
@@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+ goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);
@@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
pte[0] = (__force u64)cpu_to_be64(pte[0]);
pte[1] = (__force u64)cpu_to_be64(pte[1]);
- copy_to_user((void __user *)pteg, pte, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
+ goto done;
ret = H_SUCCESS;
done:
@@ -244,36 +262,37 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
-static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
+static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
{
- unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
- unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
- unsigned long tce = kvmppc_get_gpr(vcpu, 6);
long rc;
- rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
+ rc = kvmppc_h_logical_ci_load(vcpu);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
return EMULATE_DONE;
}
-static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu)
+static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
{
long rc;
- rc = kvmppc_h_logical_ci_load(vcpu);
+ rc = kvmppc_h_logical_ci_store(vcpu);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
return EMULATE_DONE;
}
-static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
+ unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
+ unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
+ unsigned long tce = kvmppc_get_gpr(vcpu, 6);
long rc;
- rc = kvmppc_h_logical_ci_store(vcpu);
+ rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
if (rc == H_TOO_HARD)
return EMULATE_FAIL;
kvmppc_set_gpr(vcpu, 3, rc);
@@ -311,6 +330,23 @@ static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+#else /* CONFIG_SPAPR_TCE_IOMMU */
+static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
+{
+ return EMULATE_FAIL;
+}
+
+static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
+{
+ return EMULATE_FAIL;
+}
+
+static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
+{
+ return EMULATE_FAIL;
+}
+#endif /* CONFIG_SPAPR_TCE_IOMMU */
+
static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{
long rc = kvmppc_xics_hcall(vcpu, cmd);
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 20528701835b..2d3b2b1cc272 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -16,6 +16,7 @@
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
#include <asm/rtas.h>
+#include <asm/xive.h>
#ifdef CONFIG_KVM_XICS
static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -32,7 +33,10 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
server = be32_to_cpu(args->args[1]);
priority = be32_to_cpu(args->args[2]);
- rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
+ if (xive_enabled())
+ rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority);
+ else
+ rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
if (rc)
rc = -3;
out:
@@ -52,7 +56,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
server = priority = 0;
- rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
+ if (xive_enabled())
+ rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority);
+ else
+ rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
if (rc) {
rc = -3;
goto out;
@@ -76,7 +83,10 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
- rc = kvmppc_xics_int_off(vcpu->kvm, irq);
+ if (xive_enabled())
+ rc = kvmppc_xive_int_off(vcpu->kvm, irq);
+ else
+ rc = kvmppc_xics_int_off(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
@@ -95,7 +105,10 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
irq = be32_to_cpu(args->args[0]);
- rc = kvmppc_xics_int_on(vcpu->kvm, irq);
+ if (xive_enabled())
+ rc = kvmppc_xive_int_on(vcpu->kvm, irq);
+ else
+ rc = kvmppc_xics_int_on(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 459b72cb617a..d329b2add7e2 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -1306,8 +1306,8 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
return 0;
}
-int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
- bool line_status)
+int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
{
struct kvmppc_xics *xics = kvm->arch.xics;
@@ -1316,14 +1316,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
return ics_deliver_irq(xics, irq, level);
}
-int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
- struct kvm *kvm, int irq_source_id,
- int level, bool line_status)
-{
- return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
- level, line_status);
-}
-
static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
struct kvmppc_xics *xics = dev->private;
@@ -1457,29 +1449,6 @@ void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
}
-static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
-}
-
-int kvm_irq_map_gsi(struct kvm *kvm,
- struct kvm_kernel_irq_routing_entry *entries, int gsi)
-{
- entries->gsi = gsi;
- entries->type = KVM_IRQ_ROUTING_IRQCHIP;
- entries->set = xics_set_irq;
- entries->irqchip.irqchip = 0;
- entries->irqchip.pin = gsi;
- return 1;
-}
-
-int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
-{
- return pin;
-}
-
void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
unsigned long host_irq)
{
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index ec5474cf70c6..453c9e518c19 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -10,6 +10,7 @@
#ifndef _KVM_PPC_BOOK3S_XICS_H
#define _KVM_PPC_BOOK3S_XICS_H
+#ifdef CONFIG_KVM_XICS
/*
* We use a two-level tree to store interrupt source information.
* There are up to 1024 ICS nodes, each of which can represent
@@ -144,5 +145,11 @@ static inline struct kvmppc_ics *kvmppc_xics_find_ics(struct kvmppc_xics *xics,
return ics;
}
+extern unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu);
+extern int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+extern int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
+extern int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+#endif /* CONFIG_KVM_XICS */
#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
new file mode 100644
index 000000000000..ffe1da95033a
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -0,0 +1,1894 @@
+/*
+ * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "xive-kvm: " fmt
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
+#include <asm/hvcall.h>
+#include <asm/xics.h>
+#include <asm/xive.h>
+#include <asm/xive-regs.h>
+#include <asm/debug.h>
+#include <asm/debugfs.h>
+#include <asm/time.h>
+#include <asm/opal.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "book3s_xive.h"
+
+
+/*
+ * Virtual mode variants of the hcalls for use on radix/radix
+ * with AIL. They require the VCPU's VP to be "pushed"
+ *
+ * We still instanciate them here because we use some of the
+ * generated utility functions as well in this file.
+ */
+#define XIVE_RUNTIME_CHECKS
+#define X_PFX xive_vm_
+#define X_STATIC static
+#define X_STAT_PFX stat_vm_
+#define __x_tima xive_tima
+#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
+#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
+#define __x_readb __raw_readb
+#define __x_writeb __raw_writeb
+#define __x_readw __raw_readw
+#define __x_readq __raw_readq
+#define __x_writeq __raw_writeq
+
+#include "book3s_xive_template.c"
+
+/*
+ * We leave a gap of a couple of interrupts in the queue to
+ * account for the IPI and additional safety guard.
+ */
+#define XIVE_Q_GAP 2
+
+/*
+ * This is a simple trigger for a generic XIVE IRQ. This must
+ * only be called for interrupts that support a trigger page
+ */
+static bool xive_irq_trigger(struct xive_irq_data *xd)
+{
+ /* This should be only for MSIs */
+ if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
+ return false;
+
+ /* Those interrupts should always have a trigger page */
+ if (WARN_ON(!xd->trig_mmio))
+ return false;
+
+ out_be64(xd->trig_mmio, 0);
+
+ return true;
+}
+
+static irqreturn_t xive_esc_irq(int irq, void *data)
+{
+ struct kvm_vcpu *vcpu = data;
+
+ /* We use the existing H_PROD mechanism to wake up the target */
+ vcpu->arch.prodded = 1;
+ smp_mb();
+ if (vcpu->arch.ceded)
+ kvmppc_fast_vcpu_kick(vcpu);
+
+ return IRQ_HANDLED;
+}
+
+static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_q *q = &xc->queues[prio];
+ char *name = NULL;
+ int rc;
+
+ /* Already there ? */
+ if (xc->esc_virq[prio])
+ return 0;
+
+ /* Hook up the escalation interrupt */
+ xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
+ if (!xc->esc_virq[prio]) {
+ pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
+ prio, xc->server_num);
+ return -EIO;
+ }
+
+ /*
+ * Future improvement: start with them disabled
+ * and handle DD2 and later scheme of merged escalation
+ * interrupts
+ */
+ name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
+ vcpu->kvm->arch.lpid, xc->server_num, prio);
+ if (!name) {
+ pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
+ prio, xc->server_num);
+ rc = -ENOMEM;
+ goto error;
+ }
+ rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
+ IRQF_NO_THREAD, name, vcpu);
+ if (rc) {
+ pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
+ prio, xc->server_num);
+ goto error;
+ }
+ xc->esc_virq_names[prio] = name;
+ return 0;
+error:
+ irq_dispose_mapping(xc->esc_virq[prio]);
+ xc->esc_virq[prio] = 0;
+ kfree(name);
+ return rc;
+}
+
+static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = xc->xive;
+ struct xive_q *q = &xc->queues[prio];
+ void *qpage;
+ int rc;
+
+ if (WARN_ON(q->qpage))
+ return 0;
+
+ /* Allocate the queue and retrieve infos on current node for now */
+ qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
+ if (!qpage) {
+ pr_err("Failed to allocate queue %d for VCPU %d\n",
+ prio, xc->server_num);
+ return -ENOMEM;;
+ }
+ memset(qpage, 0, 1 << xive->q_order);
+
+ /*
+ * Reconfigure the queue. This will set q->qpage only once the
+ * queue is fully configured. This is a requirement for prio 0
+ * as we will stop doing EOIs for every IPI as soon as we observe
+ * qpage being non-NULL, and instead will only EOI when we receive
+ * corresponding queue 0 entries
+ */
+ rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
+ xive->q_order, true);
+ if (rc)
+ pr_err("Failed to configure queue %d for VCPU %d\n",
+ prio, xc->server_num);
+ return rc;
+}
+
+/* Called with kvm_lock held */
+static int xive_check_provisioning(struct kvm *kvm, u8 prio)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvm_vcpu *vcpu;
+ int i, rc;
+
+ lockdep_assert_held(&kvm->lock);
+
+ /* Already provisioned ? */
+ if (xive->qmap & (1 << prio))
+ return 0;
+
+ pr_devel("Provisioning prio... %d\n", prio);
+
+ /* Provision each VCPU and enable escalations */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.xive_vcpu)
+ continue;
+ rc = xive_provision_queue(vcpu, prio);
+ if (rc == 0)
+ xive_attach_escalation(vcpu, prio);
+ if (rc)
+ return rc;
+ }
+
+ /* Order previous stores and mark it as provisioned */
+ mb();
+ xive->qmap |= (1 << prio);
+ return 0;
+}
+
+static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_xive_vcpu *xc;
+ struct xive_q *q;
+
+ /* Locate target server */
+ vcpu = kvmppc_xive_find_server(kvm, server);
+ if (!vcpu) {
+ pr_warn("%s: Can't find server %d\n", __func__, server);
+ return;
+ }
+ xc = vcpu->arch.xive_vcpu;
+ if (WARN_ON(!xc))
+ return;
+
+ q = &xc->queues[prio];
+ atomic_inc(&q->pending_count);
+}
+
+static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_q *q;
+ u32 max;
+
+ if (WARN_ON(!xc))
+ return -ENXIO;
+ if (!xc->valid)
+ return -ENXIO;
+
+ q = &xc->queues[prio];
+ if (WARN_ON(!q->qpage))
+ return -ENXIO;
+
+ /* Calculate max number of interrupts in that queue. */
+ max = (q->msk + 1) - XIVE_Q_GAP;
+ return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
+}
+
+static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
+{
+ struct kvm_vcpu *vcpu;
+ int i, rc;
+
+ /* Locate target server */
+ vcpu = kvmppc_xive_find_server(kvm, *server);
+ if (!vcpu) {
+ pr_devel("Can't find server %d\n", *server);
+ return -EINVAL;
+ }
+
+ pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
+
+ /* Try pick it */
+ rc = xive_try_pick_queue(vcpu, prio);
+ if (rc == 0)
+ return rc;
+
+ pr_devel(" .. failed, looking up candidate...\n");
+
+ /* Failed, pick another VCPU */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.xive_vcpu)
+ continue;
+ rc = xive_try_pick_queue(vcpu, prio);
+ if (rc == 0) {
+ *server = vcpu->arch.xive_vcpu->server_num;
+ pr_devel(" found on 0x%x/%d\n", *server, prio);
+ return rc;
+ }
+ }
+ pr_devel(" no available target !\n");
+
+ /* No available target ! */
+ return -EBUSY;
+}
+
+static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state)
+{
+ struct xive_irq_data *xd;
+ u32 hw_num;
+ u8 old_prio;
+ u64 val;
+
+ /*
+ * Take the lock, set masked, try again if racing
+ * with H_EOI
+ */
+ for (;;) {
+ arch_spin_lock(&sb->lock);
+ old_prio = state->guest_priority;
+ state->guest_priority = MASKED;
+ mb();
+ if (!state->in_eoi)
+ break;
+ state->guest_priority = old_prio;
+ arch_spin_unlock(&sb->lock);
+ }
+
+ /* No change ? Bail */
+ if (old_prio == MASKED)
+ return old_prio;
+
+ /* Get the right irq */
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ /*
+ * If the interrupt is marked as needing masking via
+ * firmware, we do it here. Firmware masking however
+ * is "lossy", it won't return the old p and q bits
+ * and won't set the interrupt to a state where it will
+ * record queued ones. If this is an issue we should do
+ * lazy masking instead.
+ *
+ * For now, we work around this in unmask by forcing
+ * an interrupt whenever we unmask a non-LSI via FW
+ * (if ever).
+ */
+ if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
+ xive_native_configure_irq(hw_num,
+ xive->vp_base + state->act_server,
+ MASKED, state->number);
+ /* set old_p so we can track if an H_EOI was done */
+ state->old_p = true;
+ state->old_q = false;
+ } else {
+ /* Set PQ to 10, return old P and old Q and remember them */
+ val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
+ state->old_p = !!(val & 2);
+ state->old_q = !!(val & 1);
+
+ /*
+ * Synchronize hardware to sensure the queues are updated
+ * when masking
+ */
+ xive_native_sync_source(hw_num);
+ }
+
+ return old_prio;
+}
+
+static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state)
+{
+ /*
+ * Take the lock try again if racing with H_EOI
+ */
+ for (;;) {
+ arch_spin_lock(&sb->lock);
+ if (!state->in_eoi)
+ break;
+ arch_spin_unlock(&sb->lock);
+ }
+}
+
+static void xive_finish_unmask(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ struct kvmppc_xive_irq_state *state,
+ u8 prio)
+{
+ struct xive_irq_data *xd;
+ u32 hw_num;
+
+ /* If we aren't changing a thing, move on */
+ if (state->guest_priority != MASKED)
+ goto bail;
+
+ /* Get the right irq */
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ /*
+ * See command in xive_lock_and_mask() concerning masking
+ * via firmware.
+ */
+ if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
+ xive_native_configure_irq(hw_num,
+ xive->vp_base + state->act_server,
+ state->act_priority, state->number);
+ /* If an EOI is needed, do it here */
+ if (!state->old_p)
+ xive_vm_source_eoi(hw_num, xd);
+ /* If this is not an LSI, force a trigger */
+ if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
+ xive_irq_trigger(xd);
+ goto bail;
+ }
+
+ /* Old Q set, set PQ to 11 */
+ if (state->old_q)
+ xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
+
+ /*
+ * If not old P, then perform an "effective" EOI,
+ * on the source. This will handle the cases where
+ * FW EOI is needed.
+ */
+ if (!state->old_p)
+ xive_vm_source_eoi(hw_num, xd);
+
+ /* Synchronize ordering and mark unmasked */
+ mb();
+bail:
+ state->guest_priority = prio;
+}
+
+/*
+ * Target an interrupt to a given server/prio, this will fallback
+ * to another server if necessary and perform the HW targetting
+ * updates as needed
+ *
+ * NOTE: Must be called with the state lock held
+ */
+static int xive_target_interrupt(struct kvm *kvm,
+ struct kvmppc_xive_irq_state *state,
+ u32 server, u8 prio)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ u32 hw_num;
+ int rc;
+
+ /*
+ * This will return a tentative server and actual
+ * priority. The count for that new target will have
+ * already been incremented.
+ */
+ rc = xive_select_target(kvm, &server, prio);
+
+ /*
+ * We failed to find a target ? Not much we can do
+ * at least until we support the GIQ.
+ */
+ if (rc)
+ return rc;
+
+ /*
+ * Increment the old queue pending count if there
+ * was one so that the old queue count gets adjusted later
+ * when observed to be empty.
+ */
+ if (state->act_priority != MASKED)
+ xive_inc_q_pending(kvm,
+ state->act_server,
+ state->act_priority);
+ /*
+ * Update state and HW
+ */
+ state->act_priority = prio;
+ state->act_server = server;
+
+ /* Get the right irq */
+ kvmppc_xive_select_irq(state, &hw_num, NULL);
+
+ return xive_native_configure_irq(hw_num,
+ xive->vp_base + server,
+ prio, state->number);
+}
+
+/*
+ * Targetting rules: In order to avoid losing track of
+ * pending interrupts accross mask and unmask, which would
+ * allow queue overflows, we implement the following rules:
+ *
+ * - Unless it was never enabled (or we run out of capacity)
+ * an interrupt is always targetted at a valid server/queue
+ * pair even when "masked" by the guest. This pair tends to
+ * be the last one used but it can be changed under some
+ * circumstances. That allows us to separate targetting
+ * from masking, we only handle accounting during (re)targetting,
+ * this also allows us to let an interrupt drain into its target
+ * queue after masking, avoiding complex schemes to remove
+ * interrupts out of remote processor queues.
+ *
+ * - When masking, we set PQ to 10 and save the previous value
+ * of P and Q.
+ *
+ * - When unmasking, if saved Q was set, we set PQ to 11
+ * otherwise we leave PQ to the HW state which will be either
+ * 10 if nothing happened or 11 if the interrupt fired while
+ * masked. Effectively we are OR'ing the previous Q into the
+ * HW Q.
+ *
+ * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
+ * which will unmask the interrupt and shoot a new one if Q was
+ * set.
+ *
+ * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
+ * effectively meaning an H_EOI from the guest is still expected
+ * for that interrupt).
+ *
+ * - If H_EOI occurs while masked, we clear the saved P.
+ *
+ * - When changing target, we account on the new target and
+ * increment a separate "pending" counter on the old one.
+ * This pending counter will be used to decrement the old
+ * target's count when its queue has been observed empty.
+ */
+
+int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
+ u32 priority)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u8 new_act_prio;
+ int rc = 0;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
+ irq, server, priority);
+
+ /* First, check provisioning of queues */
+ if (priority != MASKED)
+ rc = xive_check_provisioning(xive->kvm,
+ xive_prio_from_guest(priority));
+ if (rc) {
+ pr_devel(" provisioning failure %d !\n", rc);
+ return rc;
+ }
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ /*
+ * We first handle masking/unmasking since the locking
+ * might need to be retried due to EOIs, we'll handle
+ * targetting changes later. These functions will return
+ * with the SB lock held.
+ *
+ * xive_lock_and_mask() will also set state->guest_priority
+ * but won't otherwise change other fields of the state.
+ *
+ * xive_lock_for_unmask will not actually unmask, this will
+ * be done later by xive_finish_unmask() once the targetting
+ * has been done, so we don't try to unmask an interrupt
+ * that hasn't yet been targetted.
+ */
+ if (priority == MASKED)
+ xive_lock_and_mask(xive, sb, state);
+ else
+ xive_lock_for_unmask(sb, state);
+
+
+ /*
+ * Then we handle targetting.
+ *
+ * First calculate a new "actual priority"
+ */
+ new_act_prio = state->act_priority;
+ if (priority != MASKED)
+ new_act_prio = xive_prio_from_guest(priority);
+
+ pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
+ new_act_prio, state->act_server, state->act_priority);
+
+ /*
+ * Then check if we actually need to change anything,
+ *
+ * The condition for re-targetting the interrupt is that
+ * we have a valid new priority (new_act_prio is not 0xff)
+ * and either the server or the priority changed.
+ *
+ * Note: If act_priority was ff and the new priority is
+ * also ff, we don't do anything and leave the interrupt
+ * untargetted. An attempt of doing an int_on on an
+ * untargetted interrupt will fail. If that is a problem
+ * we could initialize interrupts with valid default
+ */
+
+ if (new_act_prio != MASKED &&
+ (state->act_server != server ||
+ state->act_priority != new_act_prio))
+ rc = xive_target_interrupt(kvm, state, server, new_act_prio);
+
+ /*
+ * Perform the final unmasking of the interrupt source
+ * if necessary
+ */
+ if (priority != MASKED)
+ xive_finish_unmask(xive, sb, state, priority);
+
+ /*
+ * Finally Update saved_priority to match. Only int_on/off
+ * set this field to a different value.
+ */
+ state->saved_priority = priority;
+
+ arch_spin_unlock(&sb->lock);
+ return rc;
+}
+
+int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
+ u32 *priority)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+ arch_spin_lock(&sb->lock);
+ *server = state->guest_server;
+ *priority = state->guest_priority;
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+
+int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ pr_devel("int_on(irq=0x%x)\n", irq);
+
+ /*
+ * Check if interrupt was not targetted
+ */
+ if (state->act_priority == MASKED) {
+ pr_devel("int_on on untargetted interrupt\n");
+ return -EINVAL;
+ }
+
+ /* If saved_priority is 0xff, do nothing */
+ if (state->saved_priority == MASKED)
+ return 0;
+
+ /*
+ * Lock and unmask it.
+ */
+ xive_lock_for_unmask(sb, state);
+ xive_finish_unmask(xive, sb, state, state->saved_priority);
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+
+int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ pr_devel("int_off(irq=0x%x)\n", irq);
+
+ /*
+ * Lock and mask
+ */
+ state->saved_priority = xive_lock_and_mask(xive, sb, state);
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+
+static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return false;
+ state = &sb->irq_state[idx];
+ if (!state->valid)
+ return false;
+
+ /*
+ * Trigger the IPI. This assumes we never restore a pass-through
+ * interrupt which should be safe enough
+ */
+ xive_irq_trigger(&state->ipi_data);
+
+ return true;
+}
+
+u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ return 0;
+
+ /* Return the per-cpu state for state saving/migration */
+ return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
+ (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT;
+}
+
+int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+ u8 cppr, mfrr;
+ u32 xisr;
+
+ if (!xc || !xive)
+ return -ENOENT;
+
+ /* Grab individual state fields. We don't use pending_pri */
+ cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
+ xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
+ KVM_REG_PPC_ICP_XISR_MASK;
+ mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
+
+ pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
+ xc->server_num, cppr, mfrr, xisr);
+
+ /*
+ * We can't update the state of a "pushed" VCPU, but that
+ * shouldn't happen.
+ */
+ if (WARN_ON(vcpu->arch.xive_pushed))
+ return -EIO;
+
+ /* Update VCPU HW saved state */
+ vcpu->arch.xive_saved_state.cppr = cppr;
+ xc->hw_cppr = xc->cppr = cppr;
+
+ /*
+ * Update MFRR state. If it's not 0xff, we mark the VCPU as
+ * having a pending MFRR change, which will re-evaluate the
+ * target. The VCPU will thus potentially get a spurious
+ * interrupt but that's not a big deal.
+ */
+ xc->mfrr = mfrr;
+ if (mfrr < cppr)
+ xive_irq_trigger(&xc->vp_ipi_data);
+
+ /*
+ * Now saved XIRR is "interesting". It means there's something in
+ * the legacy "1 element" queue... for an IPI we simply ignore it,
+ * as the MFRR restore will handle that. For anything else we need
+ * to force a resend of the source.
+ * However the source may not have been setup yet. If that's the
+ * case, we keep that info and increment a counter in the xive to
+ * tell subsequent xive_set_source() to go look.
+ */
+ if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
+ xc->delayed_irq = xisr;
+ xive->delayed_irqs++;
+ pr_devel(" xisr restore delayed\n");
+ }
+
+ return 0;
+}
+
+int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
+ unsigned int host_irq = irq_desc_get_irq(host_desc);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
+ u16 idx;
+ u8 prio;
+ int rc;
+
+ if (!xive)
+ return -ENODEV;
+
+ pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
+
+ sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ /*
+ * Mark the passed-through interrupt as going to a VCPU,
+ * this will prevent further EOIs and similar operations
+ * from the XIVE code. It will also mask the interrupt
+ * to either PQ=10 or 11 state, the latter if the interrupt
+ * is pending. This will allow us to unmask or retrigger it
+ * after routing it to the guest with a simple EOI.
+ *
+ * The "state" argument is a "token", all it needs is to be
+ * non-NULL to switch to passed-through or NULL for the
+ * other way around. We may not yet have an actual VCPU
+ * target here and we don't really care.
+ */
+ rc = irq_set_vcpu_affinity(host_irq, state);
+ if (rc) {
+ pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
+ return rc;
+ }
+
+ /*
+ * Mask and read state of IPI. We need to know if its P bit
+ * is set as that means it's potentially already using a
+ * queue entry in the target
+ */
+ prio = xive_lock_and_mask(xive, sb, state);
+ pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
+ state->old_p, state->old_q);
+
+ /* Turn the IPI hard off */
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+
+ /* Grab info about irq */
+ state->pt_number = hw_irq;
+ state->pt_data = irq_data_get_irq_handler_data(host_data);
+
+ /*
+ * Configure the IRQ to match the existing configuration of
+ * the IPI if it was already targetted. Otherwise this will
+ * mask the interrupt in a lossy way (act_priority is 0xff)
+ * which is fine for a never started interrupt.
+ */
+ xive_native_configure_irq(hw_irq,
+ xive->vp_base + state->act_server,
+ state->act_priority, state->number);
+
+ /*
+ * We do an EOI to enable the interrupt (and retrigger if needed)
+ * if the guest has the interrupt unmasked and the P bit was *not*
+ * set in the IPI. If it was set, we know a slot may still be in
+ * use in the target queue thus we have to wait for a guest
+ * originated EOI
+ */
+ if (prio != MASKED && !state->old_p)
+ xive_vm_source_eoi(hw_irq, state->pt_data);
+
+ /* Clear old_p/old_q as they are no longer relevant */
+ state->old_p = state->old_q = false;
+
+ /* Restore guest prio (unlocks EOI) */
+ mb();
+ state->guest_priority = prio;
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
+
+int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
+ struct irq_desc *host_desc)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ unsigned int host_irq = irq_desc_get_irq(host_desc);
+ u16 idx;
+ u8 prio;
+ int rc;
+
+ if (!xive)
+ return -ENODEV;
+
+ pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
+
+ sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
+ if (!sb)
+ return -EINVAL;
+ state = &sb->irq_state[idx];
+
+ /*
+ * Mask and read state of IRQ. We need to know if its P bit
+ * is set as that means it's potentially already using a
+ * queue entry in the target
+ */
+ prio = xive_lock_and_mask(xive, sb, state);
+ pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
+ state->old_p, state->old_q);
+
+ /*
+ * If old_p is set, the interrupt is pending, we switch it to
+ * PQ=11. This will force a resend in the host so the interrupt
+ * isn't lost to whatver host driver may pick it up
+ */
+ if (state->old_p)
+ xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
+
+ /* Release the passed-through interrupt to the host */
+ rc = irq_set_vcpu_affinity(host_irq, NULL);
+ if (rc) {
+ pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
+ return rc;
+ }
+
+ /* Forget about the IRQ */
+ state->pt_number = 0;
+ state->pt_data = NULL;
+
+ /* Reconfigure the IPI */
+ xive_native_configure_irq(state->ipi_number,
+ xive->vp_base + state->act_server,
+ state->act_priority, state->number);
+
+ /*
+ * If old_p is set (we have a queue entry potentially
+ * occupied) or the interrupt is masked, we set the IPI
+ * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
+ */
+ if (prio == MASKED || state->old_p)
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
+ else
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
+
+ /* Restore guest prio (unlocks EOI) */
+ mb();
+ state->guest_priority = prio;
+ arch_spin_unlock(&sb->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
+
+static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ int i, j;
+
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
+
+ if (!state->valid)
+ continue;
+ if (state->act_priority == MASKED)
+ continue;
+ if (state->act_server != xc->server_num)
+ continue;
+
+ /* Clean it up */
+ arch_spin_lock(&sb->lock);
+ state->act_priority = MASKED;
+ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
+ if (state->pt_number) {
+ xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
+ }
+ arch_spin_unlock(&sb->lock);
+ }
+ }
+}
+
+void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = xc->xive;
+ int i;
+
+ pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
+
+ /* Ensure no interrupt is still routed to that VP */
+ xc->valid = false;
+ kvmppc_xive_disable_vcpu_interrupts(vcpu);
+
+ /* Mask the VP IPI */
+ xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
+
+ /* Disable the VP */
+ xive_native_disable_vp(xc->vp_id);
+
+ /* Free the queues & associated interrupts */
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ struct xive_q *q = &xc->queues[i];
+
+ /* Free the escalation irq */
+ if (xc->esc_virq[i]) {
+ free_irq(xc->esc_virq[i], vcpu);
+ irq_dispose_mapping(xc->esc_virq[i]);
+ kfree(xc->esc_virq_names[i]);
+ }
+ /* Free the queue */
+ xive_native_disable_queue(xc->vp_id, q, i);
+ if (q->qpage) {
+ free_pages((unsigned long)q->qpage,
+ xive->q_page_order);
+ q->qpage = NULL;
+ }
+ }
+
+ /* Free the IPI */
+ if (xc->vp_ipi) {
+ xive_cleanup_irq_data(&xc->vp_ipi_data);
+ xive_native_free_irq(xc->vp_ipi);
+ }
+ /* Free the VP */
+ kfree(xc);
+}
+
+int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
+ struct kvm_vcpu *vcpu, u32 cpu)
+{
+ struct kvmppc_xive *xive = dev->private;
+ struct kvmppc_xive_vcpu *xc;
+ int i, r = -EBUSY;
+
+ pr_devel("connect_vcpu(cpu=%d)\n", cpu);
+
+ if (dev->ops != &kvm_xive_ops) {
+ pr_devel("Wrong ops !\n");
+ return -EPERM;
+ }
+ if (xive->kvm != vcpu->kvm)
+ return -EPERM;
+ if (vcpu->arch.irq_type)
+ return -EBUSY;
+ if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
+ pr_devel("Duplicate !\n");
+ return -EEXIST;
+ }
+ if (cpu >= KVM_MAX_VCPUS) {
+ pr_devel("Out of bounds !\n");
+ return -EINVAL;
+ }
+ xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+ if (!xc)
+ return -ENOMEM;
+
+ /* We need to synchronize with queue provisioning */
+ mutex_lock(&vcpu->kvm->lock);
+ vcpu->arch.xive_vcpu = xc;
+ xc->xive = xive;
+ xc->vcpu = vcpu;
+ xc->server_num = cpu;
+ xc->vp_id = xive->vp_base + cpu;
+ xc->mfrr = 0xff;
+ xc->valid = true;
+
+ r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
+ if (r)
+ goto bail;
+
+ /* Configure VCPU fields for use by assembly push/pull */
+ vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
+ vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
+
+ /* Allocate IPI */
+ xc->vp_ipi = xive_native_alloc_irq();
+ if (!xc->vp_ipi) {
+ r = -EIO;
+ goto bail;
+ }
+ pr_devel(" IPI=0x%x\n", xc->vp_ipi);
+
+ r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
+ if (r)
+ goto bail;
+
+ /*
+ * Initialize queues. Initially we set them all for no queueing
+ * and we enable escalation for queue 0 only which we'll use for
+ * our mfrr change notifications. If the VCPU is hot-plugged, we
+ * do handle provisioning however.
+ */
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ struct xive_q *q = &xc->queues[i];
+
+ /* Is queue already enabled ? Provision it */
+ if (xive->qmap & (1 << i)) {
+ r = xive_provision_queue(vcpu, i);
+ if (r == 0)
+ xive_attach_escalation(vcpu, i);
+ if (r)
+ goto bail;
+ } else {
+ r = xive_native_configure_queue(xc->vp_id,
+ q, i, NULL, 0, true);
+ if (r) {
+ pr_err("Failed to configure queue %d for VCPU %d\n",
+ i, cpu);
+ goto bail;
+ }
+ }
+ }
+
+ /* If not done above, attach priority 0 escalation */
+ r = xive_attach_escalation(vcpu, 0);
+ if (r)
+ goto bail;
+
+ /* Enable the VP */
+ r = xive_native_enable_vp(xc->vp_id);
+ if (r)
+ goto bail;
+
+ /* Route the IPI */
+ r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
+ if (!r)
+ xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
+
+bail:
+ mutex_unlock(&vcpu->kvm->lock);
+ if (r) {
+ kvmppc_xive_cleanup_vcpu(vcpu);
+ return r;
+ }
+
+ vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
+ return 0;
+}
+
+/*
+ * Scanning of queues before/after migration save
+ */
+static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return;
+
+ state = &sb->irq_state[idx];
+
+ /* Some sanity checking */
+ if (!state->valid) {
+ pr_err("invalid irq 0x%x in cpu queue!\n", irq);
+ return;
+ }
+
+ /*
+ * If the interrupt is in a queue it should have P set.
+ * We warn so that gets reported. A backtrace isn't useful
+ * so no need to use a WARN_ON.
+ */
+ if (!state->saved_p)
+ pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
+
+ /* Set flag */
+ state->in_queue = true;
+}
+
+static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ u32 irq)
+{
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
+
+ if (!state->valid)
+ return;
+
+ /* Mask and save state, this will also sync HW queues */
+ state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
+
+ /* Transfer P and Q */
+ state->saved_p = state->old_p;
+ state->saved_q = state->old_q;
+
+ /* Unlock */
+ arch_spin_unlock(&sb->lock);
+}
+
+static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
+ struct kvmppc_xive_src_block *sb,
+ u32 irq)
+{
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
+
+ if (!state->valid)
+ return;
+
+ /*
+ * Lock / exclude EOI (not technically necessary if the
+ * guest isn't running concurrently. If this becomes a
+ * performance issue we can probably remove the lock.
+ */
+ xive_lock_for_unmask(sb, state);
+
+ /* Restore mask/prio if it wasn't masked */
+ if (state->saved_scan_prio != MASKED)
+ xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
+
+ /* Unlock */
+ arch_spin_unlock(&sb->lock);
+}
+
+static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
+{
+ u32 idx = q->idx;
+ u32 toggle = q->toggle;
+ u32 irq;
+
+ do {
+ irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
+ if (irq > XICS_IPI)
+ xive_pre_save_set_queued(xive, irq);
+ } while(irq);
+}
+
+static void xive_pre_save_scan(struct kvmppc_xive *xive)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i, j;
+
+ /*
+ * See comment in xive_get_source() about how this
+ * work. Collect a stable state for all interrupts
+ */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
+ xive_pre_save_mask_irq(xive, sb, j);
+ }
+
+ /* Then scan the queues and update the "in_queue" flag */
+ kvm_for_each_vcpu(i, vcpu, xive->kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ if (!xc)
+ continue;
+ for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
+ if (xc->queues[i].qpage)
+ xive_pre_save_queue(xive, &xc->queues[i]);
+ }
+ }
+
+ /* Finally restore interrupt states */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
+ xive_pre_save_unmask_irq(xive, sb, j);
+ }
+}
+
+static void xive_post_save_scan(struct kvmppc_xive *xive)
+{
+ u32 i, j;
+
+ /* Clear all the in_queue flags */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+ if (!sb)
+ continue;
+ for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
+ sb->irq_state[j].in_queue = false;
+ }
+
+ /* Next get_source() will do a new scan */
+ xive->saved_src_count = 0;
+}
+
+/*
+ * This returns the source configuration and state to user space.
+ */
+static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u64 val, prio;
+ u16 idx;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -ENOENT;
+
+ state = &sb->irq_state[idx];
+
+ if (!state->valid)
+ return -ENOENT;
+
+ pr_devel("get_source(%ld)...\n", irq);
+
+ /*
+ * So to properly save the state into something that looks like a
+ * XICS migration stream we cannot treat interrupts individually.
+ *
+ * We need, instead, mask them all (& save their previous PQ state)
+ * to get a stable state in the HW, then sync them to ensure that
+ * any interrupt that had already fired hits its queue, and finally
+ * scan all the queues to collect which interrupts are still present
+ * in the queues, so we can set the "pending" flag on them and
+ * they can be resent on restore.
+ *
+ * So we do it all when the "first" interrupt gets saved, all the
+ * state is collected at that point, the rest of xive_get_source()
+ * will merely collect and convert that state to the expected
+ * userspace bit mask.
+ */
+ if (xive->saved_src_count == 0)
+ xive_pre_save_scan(xive);
+ xive->saved_src_count++;
+
+ /* Convert saved state into something compatible with xics */
+ val = state->guest_server;
+ prio = state->saved_scan_prio;
+
+ if (prio == MASKED) {
+ val |= KVM_XICS_MASKED;
+ prio = state->saved_priority;
+ }
+ val |= prio << KVM_XICS_PRIORITY_SHIFT;
+ if (state->lsi) {
+ val |= KVM_XICS_LEVEL_SENSITIVE;
+ if (state->saved_p)
+ val |= KVM_XICS_PENDING;
+ } else {
+ if (state->saved_p)
+ val |= KVM_XICS_PRESENTED;
+
+ if (state->saved_q)
+ val |= KVM_XICS_QUEUED;
+
+ /*
+ * We mark it pending (which will attempt a re-delivery)
+ * if we are in a queue *or* we were masked and had
+ * Q set which is equivalent to the XICS "masked pending"
+ * state
+ */
+ if (state->in_queue || (prio == MASKED && state->saved_q))
+ val |= KVM_XICS_PENDING;
+ }
+
+ /*
+ * If that was the last interrupt saved, reset the
+ * in_queue flags
+ */
+ if (xive->saved_src_count == xive->src_count)
+ xive_post_save_scan(xive);
+
+ /* Copy the result to userspace */
+ if (put_user(val, ubufp))
+ return -EFAULT;
+
+ return 0;
+}
+
+static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive,
+ int irq)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvmppc_xive_src_block *sb;
+ int i, bid;
+
+ bid = irq >> KVMPPC_XICS_ICS_SHIFT;
+
+ mutex_lock(&kvm->lock);
+
+ /* block already exists - somebody else got here first */
+ if (xive->src_blocks[bid])
+ goto out;
+
+ /* Create the ICS */
+ sb = kzalloc(sizeof(*sb), GFP_KERNEL);
+ if (!sb)
+ goto out;
+
+ sb->id = bid;
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
+ sb->irq_state[i].guest_priority = MASKED;
+ sb->irq_state[i].saved_priority = MASKED;
+ sb->irq_state[i].act_priority = MASKED;
+ }
+ smp_wmb();
+ xive->src_blocks[bid] = sb;
+
+ if (bid > xive->max_sbid)
+ xive->max_sbid = bid;
+
+out:
+ mutex_unlock(&kvm->lock);
+ return xive->src_blocks[bid];
+}
+
+static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
+{
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ continue;
+
+ if (xc->delayed_irq == irq) {
+ xc->delayed_irq = 0;
+ xive->delayed_irqs--;
+ return true;
+ }
+ }
+ return false;
+}
+
+static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
+{
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u64 __user *ubufp = (u64 __user *) addr;
+ u16 idx;
+ u64 val;
+ u8 act_prio, guest_prio;
+ u32 server;
+ int rc = 0;
+
+ if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
+ return -ENOENT;
+
+ pr_devel("set_source(irq=0x%lx)\n", irq);
+
+ /* Find the source */
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb) {
+ pr_devel("No source, creating source block...\n");
+ sb = xive_create_src_block(xive, irq);
+ if (!sb) {
+ pr_devel("Failed to create block...\n");
+ return -ENOMEM;
+ }
+ }
+ state = &sb->irq_state[idx];
+
+ /* Read user passed data */
+ if (get_user(val, ubufp)) {
+ pr_devel("fault getting user info !\n");
+ return -EFAULT;
+ }
+
+ server = val & KVM_XICS_DESTINATION_MASK;
+ guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
+
+ pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
+ val, server, guest_prio);
+ /*
+ * If the source doesn't already have an IPI, allocate
+ * one and get the corresponding data
+ */
+ if (!state->ipi_number) {
+ state->ipi_number = xive_native_alloc_irq();
+ if (state->ipi_number == 0) {
+ pr_devel("Failed to allocate IPI !\n");
+ return -ENOMEM;
+ }
+ xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
+ pr_devel(" src_ipi=0x%x\n", state->ipi_number);
+ }
+
+ /*
+ * We use lock_and_mask() to set us in the right masked
+ * state. We will override that state from the saved state
+ * further down, but this will handle the cases of interrupts
+ * that need FW masking. We set the initial guest_priority to
+ * 0 before calling it to ensure it actually performs the masking.
+ */
+ state->guest_priority = 0;
+ xive_lock_and_mask(xive, sb, state);
+
+ /*
+ * Now, we select a target if we have one. If we don't we
+ * leave the interrupt untargetted. It means that an interrupt
+ * can become "untargetted" accross migration if it was masked
+ * by set_xive() but there is little we can do about it.
+ */
+
+ /* First convert prio and mark interrupt as untargetted */
+ act_prio = xive_prio_from_guest(guest_prio);
+ state->act_priority = MASKED;
+ state->guest_server = server;
+
+ /*
+ * We need to drop the lock due to the mutex below. Hopefully
+ * nothing is touching that interrupt yet since it hasn't been
+ * advertized to a running guest yet
+ */
+ arch_spin_unlock(&sb->lock);
+
+ /* If we have a priority target the interrupt */
+ if (act_prio != MASKED) {
+ /* First, check provisioning of queues */
+ mutex_lock(&xive->kvm->lock);
+ rc = xive_check_provisioning(xive->kvm, act_prio);
+ mutex_unlock(&xive->kvm->lock);
+
+ /* Target interrupt */
+ if (rc == 0)
+ rc = xive_target_interrupt(xive->kvm, state,
+ server, act_prio);
+ /*
+ * If provisioning or targetting failed, leave it
+ * alone and masked. It will remain disabled until
+ * the guest re-targets it.
+ */
+ }
+
+ /*
+ * Find out if this was a delayed irq stashed in an ICP,
+ * in which case, treat it as pending
+ */
+ if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
+ val |= KVM_XICS_PENDING;
+ pr_devel(" Found delayed ! forcing PENDING !\n");
+ }
+
+ /* Cleanup the SW state */
+ state->old_p = false;
+ state->old_q = false;
+ state->lsi = false;
+ state->asserted = false;
+
+ /* Restore LSI state */
+ if (val & KVM_XICS_LEVEL_SENSITIVE) {
+ state->lsi = true;
+ if (val & KVM_XICS_PENDING)
+ state->asserted = true;
+ pr_devel(" LSI ! Asserted=%d\n", state->asserted);
+ }
+
+ /*
+ * Restore P and Q. If the interrupt was pending, we
+ * force both P and Q, which will trigger a resend.
+ *
+ * That means that a guest that had both an interrupt
+ * pending (queued) and Q set will restore with only
+ * one instance of that interrupt instead of 2, but that
+ * is perfectly fine as coalescing interrupts that haven't
+ * been presented yet is always allowed.
+ */
+ if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
+ state->old_p = true;
+ if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
+ state->old_q = true;
+
+ pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
+
+ /*
+ * If the interrupt was unmasked, update guest priority and
+ * perform the appropriate state transition and do a
+ * re-trigger if necessary.
+ */
+ if (val & KVM_XICS_MASKED) {
+ pr_devel(" masked, saving prio\n");
+ state->guest_priority = MASKED;
+ state->saved_priority = guest_prio;
+ } else {
+ pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
+ xive_finish_unmask(xive, sb, state, guest_prio);
+ state->saved_priority = guest_prio;
+ }
+
+ /* Increment the number of valid sources and mark this one valid */
+ if (!state->valid)
+ xive->src_count++;
+ state->valid = true;
+
+ return 0;
+}
+
+int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status)
+{
+ struct kvmppc_xive *xive = kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ u16 idx;
+
+ if (!xive)
+ return -ENODEV;
+
+ sb = kvmppc_xive_find_source(xive, irq, &idx);
+ if (!sb)
+ return -EINVAL;
+
+ /* Perform locklessly .... (we need to do some RCUisms here...) */
+ state = &sb->irq_state[idx];
+ if (!state->valid)
+ return -EINVAL;
+
+ /* We don't allow a trigger on a passed-through interrupt */
+ if (state->pt_number)
+ return -EINVAL;
+
+ if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
+ state->asserted = 1;
+ else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
+ state->asserted = 0;
+ return 0;
+ }
+
+ /* Trigger the IPI */
+ xive_irq_trigger(&state->ipi_data);
+
+ return 0;
+}
+
+static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ /* We honor the existing XICS ioctl */
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ return xive_set_source(xive, attr->attr, attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ struct kvmppc_xive *xive = dev->private;
+
+ /* We honor the existing XICS ioctl */
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ return xive_get_source(xive, attr->attr, attr->addr);
+ }
+ return -ENXIO;
+}
+
+static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ /* We honor the same limits as XICS, at least for now */
+ switch (attr->group) {
+ case KVM_DEV_XICS_GRP_SOURCES:
+ if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
+ attr->attr < KVMPPC_XICS_NR_IRQS)
+ return 0;
+ break;
+ }
+ return -ENXIO;
+}
+
+static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
+{
+ xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
+ xive_native_configure_irq(hw_num, 0, MASKED, 0);
+ xive_cleanup_irq_data(xd);
+}
+
+static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
+{
+ int i;
+
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
+
+ if (!state->valid)
+ continue;
+
+ kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
+ xive_native_free_irq(state->ipi_number);
+
+ /* Pass-through, cleanup too */
+ if (state->pt_number)
+ kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
+
+ state->valid = false;
+ }
+}
+
+static void kvmppc_xive_free(struct kvm_device *dev)
+{
+ struct kvmppc_xive *xive = dev->private;
+ struct kvm *kvm = xive->kvm;
+ int i;
+
+ debugfs_remove(xive->dentry);
+
+ if (kvm)
+ kvm->arch.xive = NULL;
+
+ /* Mask and free interrupts */
+ for (i = 0; i <= xive->max_sbid; i++) {
+ if (xive->src_blocks[i])
+ kvmppc_xive_free_sources(xive->src_blocks[i]);
+ kfree(xive->src_blocks[i]);
+ xive->src_blocks[i] = NULL;
+ }
+
+ if (xive->vp_base != XIVE_INVALID_VP)
+ xive_native_free_vp_block(xive->vp_base);
+
+
+ kfree(xive);
+ kfree(dev);
+}
+
+static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
+{
+ struct kvmppc_xive *xive;
+ struct kvm *kvm = dev->kvm;
+ int ret = 0;
+
+ pr_devel("Creating xive for partition\n");
+
+ xive = kzalloc(sizeof(*xive), GFP_KERNEL);
+ if (!xive)
+ return -ENOMEM;
+
+ dev->private = xive;
+ xive->dev = dev;
+ xive->kvm = kvm;
+
+ /* Already there ? */
+ if (kvm->arch.xive)
+ ret = -EEXIST;
+ else
+ kvm->arch.xive = xive;
+
+ /* We use the default queue size set by the host */
+ xive->q_order = xive_native_default_eq_shift();
+ if (xive->q_order < PAGE_SHIFT)
+ xive->q_page_order = 0;
+ else
+ xive->q_page_order = xive->q_order - PAGE_SHIFT;
+
+ /* Allocate a bunch of VPs */
+ xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS);
+ pr_devel("VP_Base=%x\n", xive->vp_base);
+
+ if (xive->vp_base == XIVE_INVALID_VP)
+ ret = -ENOMEM;
+
+ if (ret) {
+ kfree(xive);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+static int xive_debug_show(struct seq_file *m, void *private)
+{
+ struct kvmppc_xive *xive = m->private;
+ struct kvm *kvm = xive->kvm;
+ struct kvm_vcpu *vcpu;
+ u64 t_rm_h_xirr = 0;
+ u64 t_rm_h_ipoll = 0;
+ u64 t_rm_h_cppr = 0;
+ u64 t_rm_h_eoi = 0;
+ u64 t_rm_h_ipi = 0;
+ u64 t_vm_h_xirr = 0;
+ u64 t_vm_h_ipoll = 0;
+ u64 t_vm_h_cppr = 0;
+ u64 t_vm_h_eoi = 0;
+ u64 t_vm_h_ipi = 0;
+ unsigned int i;
+
+ if (!kvm)
+ return 0;
+
+ seq_printf(m, "=========\nVCPU state\n=========\n");
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ if (!xc)
+ continue;
+
+ seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x"
+ " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
+ xc->server_num, xc->cppr, xc->hw_cppr,
+ xc->mfrr, xc->pending,
+ xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
+
+ t_rm_h_xirr += xc->stat_rm_h_xirr;
+ t_rm_h_ipoll += xc->stat_rm_h_ipoll;
+ t_rm_h_cppr += xc->stat_rm_h_cppr;
+ t_rm_h_eoi += xc->stat_rm_h_eoi;
+ t_rm_h_ipi += xc->stat_rm_h_ipi;
+ t_vm_h_xirr += xc->stat_vm_h_xirr;
+ t_vm_h_ipoll += xc->stat_vm_h_ipoll;
+ t_vm_h_cppr += xc->stat_vm_h_cppr;
+ t_vm_h_eoi += xc->stat_vm_h_eoi;
+ t_vm_h_ipi += xc->stat_vm_h_ipi;
+ }
+
+ seq_printf(m, "Hcalls totals\n");
+ seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
+ seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
+ seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
+ seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
+ seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
+
+ return 0;
+}
+
+static int xive_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, xive_debug_show, inode->i_private);
+}
+
+static const struct file_operations xive_debug_fops = {
+ .open = xive_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void xive_debugfs_init(struct kvmppc_xive *xive)
+{
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
+ if (!name) {
+ pr_err("%s: no memory for name\n", __func__);
+ return;
+ }
+
+ xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
+ xive, &xive_debug_fops);
+
+ pr_debug("%s: created %s\n", __func__, name);
+ kfree(name);
+}
+
+static void kvmppc_xive_init(struct kvm_device *dev)
+{
+ struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
+
+ /* Register some debug interfaces */
+ xive_debugfs_init(xive);
+}
+
+struct kvm_device_ops kvm_xive_ops = {
+ .name = "kvm-xive",
+ .create = kvmppc_xive_create,
+ .init = kvmppc_xive_init,
+ .destroy = kvmppc_xive_free,
+ .set_attr = xive_set_attr,
+ .get_attr = xive_get_attr,
+ .has_attr = xive_has_attr,
+};
+
+void kvmppc_xive_init_module(void)
+{
+ __xive_vm_h_xirr = xive_vm_h_xirr;
+ __xive_vm_h_ipoll = xive_vm_h_ipoll;
+ __xive_vm_h_ipi = xive_vm_h_ipi;
+ __xive_vm_h_cppr = xive_vm_h_cppr;
+ __xive_vm_h_eoi = xive_vm_h_eoi;
+}
+
+void kvmppc_xive_exit_module(void)
+{
+ __xive_vm_h_xirr = NULL;
+ __xive_vm_h_ipoll = NULL;
+ __xive_vm_h_ipi = NULL;
+ __xive_vm_h_cppr = NULL;
+ __xive_vm_h_eoi = NULL;
+}
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
new file mode 100644
index 000000000000..5938f7644dc1
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _KVM_PPC_BOOK3S_XIVE_H
+#define _KVM_PPC_BOOK3S_XIVE_H
+
+#ifdef CONFIG_KVM_XICS
+#include "book3s_xics.h"
+
+/*
+ * State for one guest irq source.
+ *
+ * For each guest source we allocate a HW interrupt in the XIVE
+ * which we use for all SW triggers. It will be unused for
+ * pass-through but it's easier to keep around as the same
+ * guest interrupt can alternatively be emulated or pass-through
+ * if a physical device is hot unplugged and replaced with an
+ * emulated one.
+ *
+ * This state structure is very similar to the XICS one with
+ * additional XIVE specific tracking.
+ */
+struct kvmppc_xive_irq_state {
+ bool valid; /* Interrupt entry is valid */
+
+ u32 number; /* Guest IRQ number */
+ u32 ipi_number; /* XIVE IPI HW number */
+ struct xive_irq_data ipi_data; /* XIVE IPI associated data */
+ u32 pt_number; /* XIVE Pass-through number if any */
+ struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
+
+ /* Targetting as set by guest */
+ u32 guest_server; /* Current guest selected target */
+ u8 guest_priority; /* Guest set priority */
+ u8 saved_priority; /* Saved priority when masking */
+
+ /* Actual targetting */
+ u32 act_server; /* Actual server */
+ u8 act_priority; /* Actual priority */
+
+ /* Various state bits */
+ bool in_eoi; /* Synchronize with H_EOI */
+ bool old_p; /* P bit state when masking */
+ bool old_q; /* Q bit state when masking */
+ bool lsi; /* level-sensitive interrupt */
+ bool asserted; /* Only for emulated LSI: current state */
+
+ /* Saved for migration state */
+ bool in_queue;
+ bool saved_p;
+ bool saved_q;
+ u8 saved_scan_prio;
+};
+
+/* Select the "right" interrupt (IPI vs. passthrough) */
+static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
+ u32 *out_hw_irq,
+ struct xive_irq_data **out_xd)
+{
+ if (state->pt_number) {
+ if (out_hw_irq)
+ *out_hw_irq = state->pt_number;
+ if (out_xd)
+ *out_xd = state->pt_data;
+ } else {
+ if (out_hw_irq)
+ *out_hw_irq = state->ipi_number;
+ if (out_xd)
+ *out_xd = &state->ipi_data;
+ }
+}
+
+/*
+ * This corresponds to an "ICS" in XICS terminology, we use it
+ * as a mean to break up source information into multiple structures.
+ */
+struct kvmppc_xive_src_block {
+ arch_spinlock_t lock;
+ u16 id;
+ struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
+};
+
+
+struct kvmppc_xive {
+ struct kvm *kvm;
+ struct kvm_device *dev;
+ struct dentry *dentry;
+
+ /* VP block associated with the VM */
+ u32 vp_base;
+
+ /* Blocks of sources */
+ struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1];
+ u32 max_sbid;
+
+ /*
+ * For state save, we lazily scan the queues on the first interrupt
+ * being migrated. We don't have a clean way to reset that flags
+ * so we keep track of the number of valid sources and how many of
+ * them were migrated so we can reset when all of them have been
+ * processed.
+ */
+ u32 src_count;
+ u32 saved_src_count;
+
+ /*
+ * Some irqs are delayed on restore until the source is created,
+ * keep track here of how many of them
+ */
+ u32 delayed_irqs;
+
+ /* Which queues (priorities) are in use by the guest */
+ u8 qmap;
+
+ /* Queue orders */
+ u32 q_order;
+ u32 q_page_order;
+
+};
+
+#define KVMPPC_XIVE_Q_COUNT 8
+
+struct kvmppc_xive_vcpu {
+ struct kvmppc_xive *xive;
+ struct kvm_vcpu *vcpu;
+ bool valid;
+
+ /* Server number. This is the HW CPU ID from a guest perspective */
+ u32 server_num;
+
+ /*
+ * HW VP corresponding to this VCPU. This is the base of the VP
+ * block plus the server number.
+ */
+ u32 vp_id;
+ u32 vp_chip_id;
+ u32 vp_cam;
+
+ /* IPI used for sending ... IPIs */
+ u32 vp_ipi;
+ struct xive_irq_data vp_ipi_data;
+
+ /* Local emulation state */
+ uint8_t cppr; /* guest CPPR */
+ uint8_t hw_cppr;/* Hardware CPPR */
+ uint8_t mfrr;
+ uint8_t pending;
+
+ /* Each VP has 8 queues though we only provision some */
+ struct xive_q queues[KVMPPC_XIVE_Q_COUNT];
+ u32 esc_virq[KVMPPC_XIVE_Q_COUNT];
+ char *esc_virq_names[KVMPPC_XIVE_Q_COUNT];
+
+ /* Stash a delayed irq on restore from migration (see set_icp) */
+ u32 delayed_irq;
+
+ /* Stats */
+ u64 stat_rm_h_xirr;
+ u64 stat_rm_h_ipoll;
+ u64 stat_rm_h_cppr;
+ u64 stat_rm_h_eoi;
+ u64 stat_rm_h_ipi;
+ u64 stat_vm_h_xirr;
+ u64 stat_vm_h_ipoll;
+ u64 stat_vm_h_cppr;
+ u64 stat_vm_h_eoi;
+ u64 stat_vm_h_ipi;
+};
+
+static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
+{
+ struct kvm_vcpu *vcpu = NULL;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
+ return vcpu;
+ }
+ return NULL;
+}
+
+static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive,
+ u32 irq, u16 *source)
+{
+ u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
+ u16 src = irq & KVMPPC_XICS_SRC_MASK;
+
+ if (source)
+ *source = src;
+ if (bid > KVMPPC_XICS_MAX_ICS_ID)
+ return NULL;
+ return xive->src_blocks[bid];
+}
+
+/*
+ * Mapping between guest priorities and host priorities
+ * is as follow.
+ *
+ * Guest request for 0...6 are honored. Guest request for anything
+ * higher results in a priority of 7 being applied.
+ *
+ * However, when XIRR is returned via H_XIRR, 7 is translated to 0xb
+ * in order to match AIX expectations
+ *
+ * Similar mapping is done for CPPR values
+ */
+static inline u8 xive_prio_from_guest(u8 prio)
+{
+ if (prio == 0xff || prio < 8)
+ return prio;
+ return 7;
+}
+
+static inline u8 xive_prio_to_guest(u8 prio)
+{
+ if (prio == 0xff || prio < 7)
+ return prio;
+ return 0xb;
+}
+
+static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
+{
+ u32 cur;
+
+ if (!qpage)
+ return 0;
+ cur = be32_to_cpup(qpage + *idx);
+ if ((cur >> 31) == *toggle)
+ return 0;
+ *idx = (*idx + 1) & msk;
+ if (*idx == 0)
+ (*toggle) ^= 1;
+ return cur & 0x7fffffff;
+}
+
+extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
+extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
+extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
+extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+
+extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
+extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
+extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr);
+extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
+extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
+
+#endif /* CONFIG_KVM_XICS */
+#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
new file mode 100644
index 000000000000..4636ca6e7d38
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+/* File to be included by other .c files */
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
+
+static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
+{
+ u8 cppr;
+ u16 ack;
+
+ /* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
+
+ /* Perform the acknowledge OS to register cycle. */
+ ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
+
+ /* Synchronize subsequent queue accesses */
+ mb();
+
+ /* XXX Check grouping level */
+
+ /* Anything ? */
+ if (!((ack >> 8) & TM_QW1_NSR_EO))
+ return;
+
+ /* Grab CPPR of the most favored pending interrupt */
+ cppr = ack & 0xff;
+ if (cppr < 8)
+ xc->pending |= 1 << cppr;
+
+#ifdef XIVE_RUNTIME_CHECKS
+ /* Check consistency */
+ if (cppr >= xc->hw_cppr)
+ pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
+ smp_processor_id(), cppr, xc->hw_cppr);
+#endif
+
+ /*
+ * Update our image of the HW CPPR. We don't yet modify
+ * xc->cppr, this will be done as we scan for interrupts
+ * in the queues.
+ */
+ xc->hw_cppr = cppr;
+}
+
+static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
+{
+ u64 val;
+
+ if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
+ offset |= offset << 4;
+
+ val =__x_readq(__x_eoi_page(xd) + offset);
+#ifdef __LITTLE_ENDIAN__
+ val >>= 64-8;
+#endif
+ return (u8)val;
+}
+
+
+static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
+{
+ /* If the XIVE supports the new "store EOI facility, use it */
+ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
+ else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
+ opal_int_eoi(hw_irq);
+ } else {
+ uint64_t eoi_val;
+
+ /*
+ * Otherwise for EOI, we use the special MMIO that does
+ * a clear of both P and Q and returns the old Q,
+ * except for LSIs where we use the "EOI cycle" special
+ * load.
+ *
+ * This allows us to then do a re-trigger if Q was set
+ * rather than synthetizing an interrupt in software
+ *
+ * For LSIs, using the HW EOI cycle works around a problem
+ * on P9 DD1 PHBs where the other ESB accesses don't work
+ * properly.
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_LSI)
+ __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
+ else {
+ eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
+
+ /* Re-trigger if needed */
+ if ((eoi_val & 1) && __x_trig_page(xd))
+ __x_writeq(0, __x_trig_page(xd));
+ }
+ }
+}
+
+enum {
+ scan_fetch,
+ scan_poll,
+ scan_eoi,
+};
+
+static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
+ u8 pending, int scan_type)
+{
+ u32 hirq = 0;
+ u8 prio = 0xff;
+
+ /* Find highest pending priority */
+ while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
+ struct xive_q *q;
+ u32 idx, toggle;
+ __be32 *qpage;
+
+ /*
+ * If pending is 0 this will return 0xff which is what
+ * we want
+ */
+ prio = ffs(pending) - 1;
+
+ /*
+ * If the most favoured prio we found pending is less
+ * favored (or equal) than a pending IPI, we return
+ * the IPI instead.
+ *
+ * Note: If pending was 0 and mfrr is 0xff, we will
+ * not spurriously take an IPI because mfrr cannot
+ * then be smaller than cppr.
+ */
+ if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ break;
+ }
+
+ /* Don't scan past the guest cppr */
+ if (prio >= xc->cppr || prio > 7)
+ break;
+
+ /* Grab queue and pointers */
+ q = &xc->queues[prio];
+ idx = q->idx;
+ toggle = q->toggle;
+
+ /*
+ * Snapshot the queue page. The test further down for EOI
+ * must use the same "copy" that was used by __xive_read_eq
+ * since qpage can be set concurrently and we don't want
+ * to miss an EOI.
+ */
+ qpage = READ_ONCE(q->qpage);
+
+skip_ipi:
+ /*
+ * Try to fetch from the queue. Will return 0 for a
+ * non-queueing priority (ie, qpage = 0).
+ */
+ hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
+
+ /*
+ * If this was a signal for an MFFR change done by
+ * H_IPI we skip it. Additionally, if we were fetching
+ * we EOI it now, thus re-enabling reception of a new
+ * such signal.
+ *
+ * We also need to do that if prio is 0 and we had no
+ * page for the queue. In this case, we have non-queued
+ * IPI that needs to be EOId.
+ *
+ * This is safe because if we have another pending MFRR
+ * change that wasn't observed above, the Q bit will have
+ * been set and another occurrence of the IPI will trigger.
+ */
+ if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
+ if (scan_type == scan_fetch)
+ GLUE(X_PFX,source_eoi)(xc->vp_ipi,
+ &xc->vp_ipi_data);
+ /* Loop back on same queue with updated idx/toggle */
+#ifdef XIVE_RUNTIME_CHECKS
+ WARN_ON(hirq && hirq != XICS_IPI);
+#endif
+ if (hirq)
+ goto skip_ipi;
+ }
+
+ /* If fetching, update queue pointers */
+ if (scan_type == scan_fetch) {
+ q->idx = idx;
+ q->toggle = toggle;
+ }
+
+ /* Something found, stop searching */
+ if (hirq)
+ break;
+
+ /* Clear the pending bit on the now empty queue */
+ pending &= ~(1 << prio);
+
+ /*
+ * Check if the queue count needs adjusting due to
+ * interrupts being moved away.
+ */
+ if (atomic_read(&q->pending_count)) {
+ int p = atomic_xchg(&q->pending_count, 0);
+ if (p) {
+#ifdef XIVE_RUNTIME_CHECKS
+ WARN_ON(p > atomic_read(&q->count));
+#endif
+ atomic_sub(p, &q->count);
+ }
+ }
+ }
+
+ /* If we are just taking a "peek", do nothing else */
+ if (scan_type == scan_poll)
+ return hirq;
+
+ /* Update the pending bits */
+ xc->pending = pending;
+
+ /*
+ * If this is an EOI that's it, no CPPR adjustment done here,
+ * all we needed was cleanup the stale pending bits and check
+ * if there's anything left.
+ */
+ if (scan_type == scan_eoi)
+ return hirq;
+
+ /*
+ * If we found an interrupt, adjust what the guest CPPR should
+ * be as if we had just fetched that interrupt from HW.
+ */
+ if (hirq)
+ xc->cppr = prio;
+ /*
+ * If it was an IPI the HW CPPR might have been lowered too much
+ * as the HW interrupt we use for IPIs is routed to priority 0.
+ *
+ * We re-sync it here.
+ */
+ if (xc->cppr != xc->hw_cppr) {
+ xc->hw_cppr = xc->cppr;
+ __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
+ }
+
+ return hirq;
+}
+
+X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 old_cppr;
+ u32 hirq;
+
+ pr_devel("H_XIRR\n");
+
+ xc->GLUE(X_STAT_PFX,h_xirr)++;
+
+ /* First collect pending bits from HW */
+ GLUE(X_PFX,ack_pending)(xc);
+
+ /*
+ * Cleanup the old-style bits if needed (they may have been
+ * set by pull or an escalation interrupts).
+ */
+ if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
+ clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
+ &vcpu->arch.pending_exceptions);
+
+ pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
+ xc->pending, xc->hw_cppr, xc->cppr);
+
+ /* Grab previous CPPR and reverse map it */
+ old_cppr = xive_prio_to_guest(xc->cppr);
+
+ /* Scan for actual interrupts */
+ hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
+
+ pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
+ hirq, xc->hw_cppr, xc->cppr);
+
+#ifdef XIVE_RUNTIME_CHECKS
+ /* That should never hit */
+ if (hirq & 0xff000000)
+ pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
+#endif
+
+ /*
+ * XXX We could check if the interrupt is masked here and
+ * filter it. If we chose to do so, we would need to do:
+ *
+ * if (masked) {
+ * lock();
+ * if (masked) {
+ * old_Q = true;
+ * hirq = 0;
+ * }
+ * unlock();
+ * }
+ */
+
+ /* Return interrupt and old CPPR in GPR4 */
+ vcpu->arch.gpr[4] = hirq | (old_cppr << 24);
+
+ return H_SUCCESS;
+}
+
+X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 pending = xc->pending;
+ u32 hirq;
+ u8 pipr;
+
+ pr_devel("H_IPOLL(server=%ld)\n", server);
+
+ xc->GLUE(X_STAT_PFX,h_ipoll)++;
+
+ /* Grab the target VCPU if not the current one */
+ if (xc->server_num != server) {
+ vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
+ if (!vcpu)
+ return H_PARAMETER;
+ xc = vcpu->arch.xive_vcpu;
+
+ /* Scan all priorities */
+ pending = 0xff;
+ } else {
+ /* Grab pending interrupt if any */
+ pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
+ if (pipr < 8)
+ pending |= 1 << pipr;
+ }
+
+ hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
+
+ /* Return interrupt and old CPPR in GPR4 */
+ vcpu->arch.gpr[4] = hirq | (xc->cppr << 24);
+
+ return H_SUCCESS;
+}
+
+static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
+{
+ u8 pending, prio;
+
+ pending = xc->pending;
+ if (xc->mfrr != 0xff) {
+ if (xc->mfrr < 8)
+ pending |= 1 << xc->mfrr;
+ else
+ pending |= 0x80;
+ }
+ if (!pending)
+ return;
+ prio = ffs(pending) - 1;
+
+ __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
+}
+
+X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 old_cppr;
+
+ pr_devel("H_CPPR(cppr=%ld)\n", cppr);
+
+ xc->GLUE(X_STAT_PFX,h_cppr)++;
+
+ /* Map CPPR */
+ cppr = xive_prio_from_guest(cppr);
+
+ /* Remember old and update SW state */
+ old_cppr = xc->cppr;
+ xc->cppr = cppr;
+
+ /*
+ * We are masking less, we need to look for pending things
+ * to deliver and set VP pending bits accordingly to trigger
+ * a new interrupt otherwise we might miss MFRR changes for
+ * which we have optimized out sending an IPI signal.
+ */
+ if (cppr > old_cppr)
+ GLUE(X_PFX,push_pending_to_hw)(xc);
+
+ /* Apply new CPPR */
+ xc->hw_cppr = cppr;
+ __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
+
+ return H_SUCCESS;
+}
+
+X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_irq_data *xd;
+ u8 new_cppr = xirr >> 24;
+ u32 irq = xirr & 0x00ffffff, hw_num;
+ u16 src;
+ int rc = 0;
+
+ pr_devel("H_EOI(xirr=%08lx)\n", xirr);
+
+ xc->GLUE(X_STAT_PFX,h_eoi)++;
+
+ xc->cppr = xive_prio_from_guest(new_cppr);
+
+ /*
+ * IPIs are synthetized from MFRR and thus don't need
+ * any special EOI handling. The underlying interrupt
+ * used to signal MFRR changes is EOId when fetched from
+ * the queue.
+ */
+ if (irq == XICS_IPI || irq == 0)
+ goto bail;
+
+ /* Find interrupt source */
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb) {
+ pr_devel(" source not found !\n");
+ rc = H_PARAMETER;
+ goto bail;
+ }
+ state = &sb->irq_state[src];
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ state->in_eoi = true;
+ mb();
+
+again:
+ if (state->guest_priority == MASKED) {
+ arch_spin_lock(&sb->lock);
+ if (state->guest_priority != MASKED) {
+ arch_spin_unlock(&sb->lock);
+ goto again;
+ }
+ pr_devel(" EOI on saved P...\n");
+
+ /* Clear old_p, that will cause unmask to perform an EOI */
+ state->old_p = false;
+
+ arch_spin_unlock(&sb->lock);
+ } else {
+ pr_devel(" EOI on source...\n");
+
+ /* Perform EOI on the source */
+ GLUE(X_PFX,source_eoi)(hw_num, xd);
+
+ /* If it's an emulated LSI, check level and resend */
+ if (state->lsi && state->asserted)
+ __x_writeq(0, __x_trig_page(xd));
+
+ }
+
+ mb();
+ state->in_eoi = false;
+bail:
+
+ /* Re-evaluate pending IRQs and update HW */
+ GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
+ GLUE(X_PFX,push_pending_to_hw)(xc);
+ pr_devel(" after scan pending=%02x\n", xc->pending);
+
+ /* Apply new CPPR */
+ xc->hw_cppr = xc->cppr;
+ __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
+
+ return rc;
+}
+
+X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
+
+ xc->GLUE(X_STAT_PFX,h_ipi)++;
+
+ /* Find target */
+ vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
+ if (!vcpu)
+ return H_PARAMETER;
+ xc = vcpu->arch.xive_vcpu;
+
+ /* Locklessly write over MFRR */
+ xc->mfrr = mfrr;
+
+ /* Shoot the IPI if most favored than target cppr */
+ if (mfrr < xc->cppr)
+ __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
+
+ return H_SUCCESS;
+}
diff --git a/arch/powerpc/kvm/irq.h b/arch/powerpc/kvm/irq.h
index 5a9a10b90762..3f1be85a83bc 100644
--- a/arch/powerpc/kvm/irq.h
+++ b/arch/powerpc/kvm/irq.h
@@ -12,6 +12,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
#endif
#ifdef CONFIG_KVM_XICS
ret = ret || (kvm->arch.xics != NULL);
+ ret = ret || (kvm->arch.xive != NULL);
#endif
smp_rmb();
return ret;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1ee22a910074..7f71ab5fcad1 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -38,6 +38,8 @@
#include <asm/irqflags.h>
#include <asm/iommu.h>
#include <asm/switch_to.h>
+#include <asm/xive.h>
+
#include "timing.h"
#include "irq.h"
#include "../mm/mmu_decl.h"
@@ -697,7 +699,10 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
break;
case KVMPPC_IRQ_XICS:
- kvmppc_xics_free_icp(vcpu);
+ if (xive_enabled())
+ kvmppc_xive_cleanup_vcpu(vcpu);
+ else
+ kvmppc_xics_free_icp(vcpu);
break;
}
@@ -1522,8 +1527,12 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = -EPERM;
dev = kvm_device_from_filp(f.file);
- if (dev)
- r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
+ if (dev) {
+ if (xive_enabled())
+ r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
+ else
+ r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
+ }
fdput(f);
break;
@@ -1547,7 +1556,7 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
return true;
#endif
#ifdef CONFIG_KVM_XICS
- if (kvm->arch.xics)
+ if (kvm->arch.xics || kvm->arch.xive)
return true;
#endif
return false;
@@ -1740,7 +1749,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_enable_cap(kvm, &cap);
break;
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_CREATE_SPAPR_TCE_64: {
struct kvm_create_spapr_tce_64 create_tce_64;
@@ -1771,6 +1780,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
goto out;
}
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
case KVM_PPC_GET_SMMU_INFO: {
struct kvm_ppc_smmu_info info;
struct kvm *kvm = filp->private_data;
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index d659345a98d6..44fe4833910f 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -16,6 +16,7 @@
*/
#include <linux/debugfs.h>
#include <linux/fs.h>
+#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
@@ -391,7 +392,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
addr = start + i * PMD_SIZE;
- if (!pmd_none(*pmd))
+ if (!pmd_none(*pmd) && !pmd_huge(*pmd))
/* pmd exists */
walk_pte(st, pmd, addr);
else
@@ -407,7 +408,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
addr = start + i * PUD_SIZE;
- if (!pud_none(*pud))
+ if (!pud_none(*pud) && !pud_huge(*pud))
/* pud exists */
walk_pmd(st, pud, addr);
else
@@ -427,7 +428,7 @@ static void walk_pagetables(struct pg_state *st)
*/
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = KERN_VIRT_START + i * PGDIR_SIZE;
- if (!pgd_none(*pgd))
+ if (!pgd_none(*pgd) && !pgd_huge(*pgd))
/* pgd exists */
walk_pud(st, pgd, addr);
else
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 6575b9aabef4..a12e86395025 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
/*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 9dbd2a733d6b..0ee6be4f1ba4 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
@@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vma->vm_start))
+ (!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index c6dca2ae78ef..a3edf813d455 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
* mm->context.addr_limit. Default to max task size so that we copy the
* default values to paca which will help us to handle slb miss early.
*/
- mm->context.addr_limit = TASK_SIZE_128TB;
+ mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
/*
* The old code would re-promote on fork, we don't do that when using
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 966b9fccfa66..45f6740dd407 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
if ((mm->task_size - len) < addr)
return 0;
vma = find_vma(mm, addr);
- return (!vma || (addr + len) <= vma->vm_start);
+ return (!vma || (addr + len) <= vm_start_gap(vma));
}
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index cbd82fde5770..09ceea6175ba 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs_user_copy)
{
regs_user->regs = task_pt_regs(current);
- regs_user->abi = perf_reg_abi(current);
+ regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
+ PERF_SAMPLE_REGS_ABI_NONE;
}
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 018f8e90ac35..bb28e1a41257 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
- .test_adder = ISA207_TEST_ADDER,
+ .test_adder = P9_DD1_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
- .test_adder = P9_DD1_TEST_ADDER,
+ .test_adder = ISA207_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map,
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 33244e3d9375..4fd64d3f5c44 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
In case of doubt, say Y
+config PPC_DT_CPU_FTRS
+ bool "Device-tree based CPU feature discovery & setup"
+ depends on PPC_BOOK3S_64
+ default y
+ help
+ This enables code to use a new device tree binding for describing CPU
+ compatibility and features. Saying Y here will attempt to use the new
+ binding if the firmware provides it. Currently only the skiboot
+ firmware provides this binding.
+ If you're not sure say Y.
+
config UDBG_RTAS_CONSOLE
bool "RTAS based debug console"
depends on PPC_RTAS
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 96c2b8a40630..0c45cdbac4cf 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -197,7 +197,9 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
(REGION_ID(ea) != USER_REGION_ID)) {
spin_unlock(&spu->register_lock);
- ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr);
+ ret = hash_page(ea,
+ _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
+ 0x300, dsisr);
spin_lock(&spu->register_lock);
if (!ret) {
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index e5a891ae80ee..84b7ac926ce6 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
if (!dump_skip(cprm, skip))
goto Eio;
+
+ rc = 0;
out:
free_page((unsigned long)buf);
return rc;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index d2f19821d71d..d12ea7b9fd47 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -412,11 +412,14 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
* been set for the PE, we will set EEH_PE_CFG_BLOCKED for
* that PE to block its config space.
*
+ * Broadcom BCM5718 2-ports NICs (14e4:1656)
* Broadcom Austin 4-ports NICs (14e4:1657)
* Broadcom Shiner 4-ports 1G NICs (14e4:168a)
* Broadcom Shiner 2-ports 10G NICs (14e4:168e)
*/
if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
+ pdn->device_id == 0x1656) ||
+ (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
pdn->device_id == 0x1657) ||
(pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
pdn->device_id == 0x168a) ||
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 067defeea691..b5d960d6db3d 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
if (WARN_ON(!gpdev))
return NULL;
- if (WARN_ON(!gpdev->dev.of_node))
+ /* Not all PCI devices have device-tree nodes */
+ if (!gpdev->dev.of_node)
return NULL;
/* Get assoicated PCI device */
@@ -448,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
return mmio_atsd_reg;
}
-static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
+static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
{
unsigned long launch;
@@ -464,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
/* PID */
launch |= pid << PPC_BITLSHIFT(38);
+ /* No flush */
+ launch |= !flush << PPC_BITLSHIFT(39);
+
/* Invalidating the entire process doesn't use a va */
return mmio_launch_invalidate(npu, launch, 0);
}
static int mmio_invalidate_va(struct npu *npu, unsigned long va,
- unsigned long pid)
+ unsigned long pid, bool flush)
{
unsigned long launch;
@@ -485,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
/* PID */
launch |= pid << PPC_BITLSHIFT(38);
+ /* No flush */
+ launch |= !flush << PPC_BITLSHIFT(39);
+
return mmio_launch_invalidate(npu, launch, va);
}
#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
+struct mmio_atsd_reg {
+ struct npu *npu;
+ int reg;
+};
+
+static void mmio_invalidate_wait(
+ struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
+{
+ struct npu *npu;
+ int i, reg;
+
+ /* Wait for all invalidations to complete */
+ for (i = 0; i <= max_npu2_index; i++) {
+ if (mmio_atsd_reg[i].reg < 0)
+ continue;
+
+ /* Wait for completion */
+ npu = mmio_atsd_reg[i].npu;
+ reg = mmio_atsd_reg[i].reg;
+ while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
+ cpu_relax();
+
+ put_mmio_atsd_reg(npu, reg);
+
+ /*
+ * The GPU requires two flush ATSDs to ensure all entries have
+ * been flushed. We use PID 0 as it will never be used for a
+ * process on the GPU.
+ */
+ if (flush)
+ mmio_invalidate_pid(npu, 0, true);
+ }
+}
+
/*
* Invalidate either a single address or an entire PID depending on
* the value of va.
*/
static void mmio_invalidate(struct npu_context *npu_context, int va,
- unsigned long address)
+ unsigned long address, bool flush)
{
- int i, j, reg;
+ int i, j;
struct npu *npu;
struct pnv_phb *nphb;
struct pci_dev *npdev;
- struct {
- struct npu *npu;
- int reg;
- } mmio_atsd_reg[NV_MAX_NPUS];
+ struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
unsigned long pid = npu_context->mm->context.id;
/*
@@ -524,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
if (va)
mmio_atsd_reg[i].reg =
- mmio_invalidate_va(npu, address, pid);
+ mmio_invalidate_va(npu, address, pid,
+ flush);
else
mmio_atsd_reg[i].reg =
- mmio_invalidate_pid(npu, pid);
+ mmio_invalidate_pid(npu, pid, flush);
/*
* The NPU hardware forwards the shootdown to all GPUs
@@ -543,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
*/
flush_tlb_mm(npu_context->mm);
- /* Wait for all invalidations to complete */
- for (i = 0; i <= max_npu2_index; i++) {
- if (mmio_atsd_reg[i].reg < 0)
- continue;
-
- /* Wait for completion */
- npu = mmio_atsd_reg[i].npu;
- reg = mmio_atsd_reg[i].reg;
- while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
- cpu_relax();
- put_mmio_atsd_reg(npu, reg);
- }
+ mmio_invalidate_wait(mmio_atsd_reg, flush);
+ if (flush)
+ /* Wait for the flush to complete */
+ mmio_invalidate_wait(mmio_atsd_reg, false);
}
static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@@ -570,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
* There should be no more translation requests for this PID, but we
* need to ensure any entries for it are removed from the TLB.
*/
- mmio_invalidate(npu_context, 0, 0);
+ mmio_invalidate(npu_context, 0, 0, true);
}
static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -580,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
{
struct npu_context *npu_context = mn_to_npu_context(mn);
- mmio_invalidate(npu_context, 1, address);
+ mmio_invalidate(npu_context, 1, address, true);
}
static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@@ -589,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
{
struct npu_context *npu_context = mn_to_npu_context(mn);
- mmio_invalidate(npu_context, 1, address);
+ mmio_invalidate(npu_context, 1, address, true);
}
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -599,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct npu_context *npu_context = mn_to_npu_context(mn);
unsigned long address;
- for (address = start; address <= end; address += PAGE_SIZE)
- mmio_invalidate(npu_context, 1, address);
+ for (address = start; address < end; address += PAGE_SIZE)
+ mmio_invalidate(npu_context, 1, address, false);
+
+ /* Do the flush only on the final addess == end */
+ mmio_invalidate(npu_context, 1, address, true);
}
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -650,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
/* No nvlink associated with this GPU device */
return ERR_PTR(-ENODEV);
- if (!mm) {
- /* kernel thread contexts are not supported */
+ if (!mm || mm->context.id == 0) {
+ /*
+ * Kernel thread contexts are not supported and context id 0 is
+ * reserved on the GPU.
+ */
return ERR_PTR(-EINVAL);
}
@@ -714,7 +751,7 @@ static void pnv_npu2_release_context(struct kref *kref)
void pnv_npu2_destroy_context(struct npu_context *npu_context,
struct pci_dev *gpdev)
{
- struct pnv_phb *nphb, *phb;
+ struct pnv_phb *nphb;
struct npu *npu;
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
struct device_node *nvlink_dn;
@@ -728,13 +765,12 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
nphb = pci_bus_to_host(npdev->bus)->private_data;
npu = &nphb->npu;
- phb = pci_bus_to_host(gpdev->bus)->private_data;
nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
&nvlink_index)))
return;
npu_context->npdev[npu->index][nvlink_index] = NULL;
- opal_npu_destroy_context(phb->opal_id, npu_context->mm->context.id,
+ opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
PCI_DEVID(gpdev->bus->number, gpdev->devfn));
kref_put(&npu_context->kref, pnv_npu2_release_context);
}
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 7925a9d72cca..59684b4af4d1 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -967,3 +967,4 @@ EXPORT_SYMBOL_GPL(opal_leds_set_ind);
EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
/* Export this for KVM */
EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
+EXPORT_SYMBOL_GPL(opal_int_eoi);
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 0babef11136f..8c6119280c13 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
static int subcore_init(void)
{
- if (!cpu_has_feature(CPU_FTR_SUBCORE))
+ unsigned pvr_ver;
+
+ pvr_ver = PVR_VER(mfspr(SPRN_PVR));
+
+ if (pvr_ver != PVR_POWER8 &&
+ pvr_ver != PVR_POWER8E &&
+ pvr_ver != PVR_POWER8NVL)
return 0;
/*
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index e104c71ea44a..1fb162ba9d1c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+ lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
}
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+ lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
}
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 986cd111d4df..c651e668996b 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -377,6 +377,10 @@ static void cpm1_set_pin16(int port, int pin, int flags)
setbits16(&iop->odr_sor, pin);
else
clrbits16(&iop->odr_sor, pin);
+ if (flags & CPM_PIN_FALLEDGE)
+ setbits16(&iop->intr, pin);
+ else
+ clrbits16(&iop->intr, pin);
}
}
@@ -528,6 +532,9 @@ struct cpm1_gpio16_chip {
/* shadowed data register to clear/set bits safely */
u16 cpdata;
+
+ /* IRQ associated with Pins when relevant */
+ int irq[16];
};
static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
@@ -578,6 +585,14 @@ static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value)
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
}
+static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
+
+ return cpm1_gc->irq[gpio] ? : -ENXIO;
+}
+
static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
@@ -618,6 +633,7 @@ int cpm1_gpiochip_add16(struct device_node *np)
struct cpm1_gpio16_chip *cpm1_gc;
struct of_mm_gpio_chip *mm_gc;
struct gpio_chip *gc;
+ u16 mask;
cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL);
if (!cpm1_gc)
@@ -625,6 +641,14 @@ int cpm1_gpiochip_add16(struct device_node *np)
spin_lock_init(&cpm1_gc->lock);
+ if (!of_property_read_u16(np, "fsl,cpm1-gpio-irq-mask", &mask)) {
+ int i, j;
+
+ for (i = 0, j = 0; i < 16; i++)
+ if (mask & (1 << (15 - i)))
+ cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++);
+ }
+
mm_gc = &cpm1_gc->mm_gc;
gc = &mm_gc->gc;
@@ -634,6 +658,7 @@ int cpm1_gpiochip_add16(struct device_node *np)
gc->direction_output = cpm1_gpio16_dir_out;
gc->get = cpm1_gpio16_get;
gc->set = cpm1_gpio16_set;
+ gc->to_irq = cpm1_gpio16_to_irq;
return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc);
}
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
index ef470b470b04..6afddae2fb47 100644
--- a/arch/powerpc/sysdev/simple_gpio.c
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
- struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
+ struct u8_gpio_chip *u8_gc =
+ container_of(mm_gc, struct u8_gpio_chip, mm_gc);
u8_gc->data = in_8(mm_gc->regs);
}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 6a98efb14264..8f5e3035483b 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -46,13 +46,15 @@
#endif
bool __xive_enabled;
+EXPORT_SYMBOL_GPL(__xive_enabled);
bool xive_cmdline_disabled;
/* We use only one priority for now */
static u8 xive_irq_priority;
-/* TIMA */
+/* TIMA exported to KVM */
void __iomem *xive_tima;
+EXPORT_SYMBOL_GPL(xive_tima);
u32 xive_tima_offset;
/* Backend ops */
@@ -295,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
{
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
- out_be64(xd->eoi_mmio, 0);
+ out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
/*
* The FW told us to call it. This happens for some
@@ -345,8 +347,11 @@ static void xive_irq_eoi(struct irq_data *d)
DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
d->irq, irqd_to_hwirq(d), xc->pending_prio);
- /* EOI the source if it hasn't been disabled */
- if (!irqd_irq_disabled(d))
+ /*
+ * EOI the source if it hasn't been disabled and hasn't
+ * been passed-through to a KVM guest
+ */
+ if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d))
xive_do_source_eoi(irqd_to_hwirq(d), xd);
/*
@@ -689,9 +694,14 @@ static int xive_irq_set_affinity(struct irq_data *d,
old_target = xd->target;
- rc = xive_ops->configure_irq(hw_irq,
- get_hard_smp_processor_id(target),
- xive_irq_priority, d->irq);
+ /*
+ * Only configure the irq if it's not currently passed-through to
+ * a KVM guest
+ */
+ if (!irqd_is_forwarded_to_vcpu(d))
+ rc = xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(target),
+ xive_irq_priority, d->irq);
if (rc < 0) {
pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
return rc;
@@ -771,6 +781,123 @@ static int xive_irq_retrigger(struct irq_data *d)
return 1;
}
+static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
+{
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ int rc;
+ u8 pq;
+
+ /*
+ * We only support this on interrupts that do not require
+ * firmware calls for masking and unmasking
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
+ return -EIO;
+
+ /*
+ * This is called by KVM with state non-NULL for enabling
+ * pass-through or NULL for disabling it
+ */
+ if (state) {
+ irqd_set_forwarded_to_vcpu(d);
+
+ /* Set it to PQ=10 state to prevent further sends */
+ pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_10);
+
+ /* No target ? nothing to do */
+ if (xd->target == XIVE_INVALID_TARGET) {
+ /*
+ * An untargetted interrupt should have been
+ * also masked at the source
+ */
+ WARN_ON(pq & 2);
+
+ return 0;
+ }
+
+ /*
+ * If P was set, adjust state to PQ=11 to indicate
+ * that a resend is needed for the interrupt to reach
+ * the guest. Also remember the value of P.
+ *
+ * This also tells us that it's in flight to a host queue
+ * or has already been fetched but hasn't been EOIed yet
+ * by the host. This it's potentially using up a host
+ * queue slot. This is important to know because as long
+ * as this is the case, we must not hard-unmask it when
+ * "returning" that interrupt to the host.
+ *
+ * This saved_p is cleared by the host EOI, when we know
+ * for sure the queue slot is no longer in use.
+ */
+ if (pq & 2) {
+ pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_11);
+ xd->saved_p = true;
+
+ /*
+ * Sync the XIVE source HW to ensure the interrupt
+ * has gone through the EAS before we change its
+ * target to the guest. That should guarantee us
+ * that we *will* eventually get an EOI for it on
+ * the host. Otherwise there would be a small window
+ * for P to be seen here but the interrupt going
+ * to the guest queue.
+ */
+ if (xive_ops->sync_source)
+ xive_ops->sync_source(hw_irq);
+ } else
+ xd->saved_p = false;
+ } else {
+ irqd_clr_forwarded_to_vcpu(d);
+
+ /* No host target ? hard mask and return */
+ if (xd->target == XIVE_INVALID_TARGET) {
+ xive_do_source_set_mask(xd, true);
+ return 0;
+ }
+
+ /*
+ * Sync the XIVE source HW to ensure the interrupt
+ * has gone through the EAS before we change its
+ * target to the host.
+ */
+ if (xive_ops->sync_source)
+ xive_ops->sync_source(hw_irq);
+
+ /*
+ * By convention we are called with the interrupt in
+ * a PQ=10 or PQ=11 state, ie, it won't fire and will
+ * have latched in Q whether there's a pending HW
+ * interrupt or not.
+ *
+ * First reconfigure the target.
+ */
+ rc = xive_ops->configure_irq(hw_irq,
+ get_hard_smp_processor_id(xd->target),
+ xive_irq_priority, d->irq);
+ if (rc)
+ return rc;
+
+ /*
+ * Then if saved_p is not set, effectively re-enable the
+ * interrupt with an EOI. If it is set, we know there is
+ * still a message in a host queue somewhere that will be
+ * EOId eventually.
+ *
+ * Note: We don't check irqd_irq_disabled(). Effectively,
+ * we *will* let the irq get through even if masked if the
+ * HW is still firing it in order to deal with the whole
+ * saved_p business properly. If the interrupt triggers
+ * while masked, the generic code will re-mask it anyway.
+ */
+ if (!xd->saved_p)
+ xive_do_source_eoi(hw_irq, xd);
+
+ }
+ return 0;
+}
+
static struct irq_chip xive_irq_chip = {
.name = "XIVE-IRQ",
.irq_startup = xive_irq_startup,
@@ -781,12 +908,14 @@ static struct irq_chip xive_irq_chip = {
.irq_set_affinity = xive_irq_set_affinity,
.irq_set_type = xive_irq_set_type,
.irq_retrigger = xive_irq_retrigger,
+ .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
};
bool is_xive_irq(struct irq_chip *chip)
{
return chip == &xive_irq_chip;
}
+EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
@@ -801,6 +930,7 @@ void xive_cleanup_irq_data(struct xive_irq_data *xd)
xd->trig_mmio = NULL;
}
}
+EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
{
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 1a726229a427..ab9ecce61ee5 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -31,6 +31,7 @@
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/opal.h>
+#include <asm/kvm_ppc.h>
#include "xive-internal.h"
@@ -95,6 +96,7 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
}
return 0;
}
+EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
{
@@ -108,6 +110,8 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
}
return rc == 0 ? 0 : -ENXIO;
}
+EXPORT_SYMBOL_GPL(xive_native_configure_irq);
+
/* This can be called multiple time to change a queue configuration */
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
@@ -172,6 +176,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
fail:
return rc;
}
+EXPORT_SYMBOL_GPL(xive_native_configure_queue);
static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
{
@@ -192,6 +197,7 @@ void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
{
__xive_native_disable_queue(vp_id, q, prio);
}
+EXPORT_SYMBOL_GPL(xive_native_disable_queue);
static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
{
@@ -262,6 +268,7 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
}
return 0;
}
+#endif /* CONFIG_SMP */
u32 xive_native_alloc_irq(void)
{
@@ -277,6 +284,7 @@ u32 xive_native_alloc_irq(void)
return 0;
return rc;
}
+EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
void xive_native_free_irq(u32 irq)
{
@@ -287,7 +295,9 @@ void xive_native_free_irq(u32 irq)
msleep(1);
}
}
+EXPORT_SYMBOL_GPL(xive_native_free_irq);
+#ifdef CONFIG_SMP
static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
s64 rc;
@@ -383,7 +393,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
return;
/* Enable the pool VP */
- vp = xive_pool_vps + get_hard_smp_processor_id(cpu);
+ vp = xive_pool_vps + cpu;
pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
for (;;) {
rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
@@ -428,7 +438,7 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
/* Disable it */
- vp = xive_pool_vps + get_hard_smp_processor_id(cpu);
+ vp = xive_pool_vps + cpu;
for (;;) {
rc = opal_xive_set_vp_info(vp, 0, 0);
if (rc != OPAL_BUSY)
@@ -437,10 +447,11 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
}
}
-static void xive_native_sync_source(u32 hw_irq)
+void xive_native_sync_source(u32 hw_irq)
{
opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
}
+EXPORT_SYMBOL_GPL(xive_native_sync_source);
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
@@ -501,10 +512,24 @@ static bool xive_parse_provisioning(struct device_node *np)
return true;
}
+static void xive_native_setup_pools(void)
+{
+ /* Allocate a pool big enough */
+ pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids);
+
+ xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
+ if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
+ pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
+
+ pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n",
+ xive_pool_vps, nr_cpu_ids);
+}
+
u32 xive_native_default_eq_shift(void)
{
return xive_queue_shift;
}
+EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
bool xive_native_init(void)
{
@@ -514,7 +539,7 @@ bool xive_native_init(void)
struct property *prop;
u8 max_prio = 7;
const __be32 *p;
- u32 val;
+ u32 val, cpu;
s64 rc;
if (xive_cmdline_disabled)
@@ -550,7 +575,11 @@ bool xive_native_init(void)
break;
}
- /* Grab size of provisioning pages */
+ /* Configure Thread Management areas for KVM */
+ for_each_possible_cpu(cpu)
+ kvmppc_set_xive_tima(cpu, r.start, tima);
+
+ /* Grab size of provisionning pages */
xive_parse_provisioning(np);
/* Switch the XIVE to exploitation mode */
@@ -560,6 +589,9 @@ bool xive_native_init(void)
return false;
}
+ /* Setup some dummy HV pool VPs */
+ xive_native_setup_pools();
+
/* Initialize XIVE core with our backend */
if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
max_prio)) {
@@ -638,3 +670,47 @@ void xive_native_free_vp_block(u32 vp_base)
pr_warn("OPAL error %lld freeing VP block\n", rc);
}
EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
+
+int xive_native_enable_vp(u32 vp_id)
+{
+ s64 rc;
+
+ for (;;) {
+ rc = opal_xive_set_vp_info(vp_id, OPAL_XIVE_VP_ENABLED, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ return rc ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_enable_vp);
+
+int xive_native_disable_vp(u32 vp_id)
+{
+ s64 rc;
+
+ for (;;) {
+ rc = opal_xive_set_vp_info(vp_id, 0, 0);
+ if (rc != OPAL_BUSY)
+ break;
+ msleep(1);
+ }
+ return rc ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_disable_vp);
+
+int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
+{
+ __be64 vp_cam_be;
+ __be32 vp_chip_id_be;
+ s64 rc;
+
+ rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
+ if (rc)
+ return -EIO;
+ *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
+ *out_chip_id = be32_to_cpu(vp_chip_id_be);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
OpenPOWER on IntegriCloud