diff options
Diffstat (limited to 'arch/powerpc')
51 files changed, 4947 insertions, 353 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index f07f727cbfd2..964da1891ea9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -146,6 +146,7 @@ config PPC select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WEAK_RELEASE_ACQUIRE select BINFMT_ELF select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS @@ -571,21 +572,23 @@ config RELOCATABLE_TEST relocation code. config CRASH_DUMP - bool "Build a kdump crash kernel" + bool "Build a dump capture kernel" depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) select RELOCATABLE if PPC64 || 44x || FSL_BOOKE help - Build a kernel suitable for use as a kdump capture kernel. + Build a kernel suitable for use as a dump capture kernel. The same kernel binary can be used as production kernel and dump capture kernel. config FA_DUMP bool "Firmware-assisted dump" - depends on PPC64 && PPC_RTAS && CRASH_DUMP && KEXEC_CORE + depends on PPC64 && PPC_RTAS + select CRASH_CORE + select CRASH_DUMP help A robust mechanism to get reliable kernel crash dump with assistance from firmware. This approach does not use kexec, - instead firmware assists in booting the kdump kernel + instead firmware assists in booting the capture kernel while preserving memory contents. Firmware-assisted dump is meant to be a kdump replacement offering robustness and speed not possible without system firmware assistance. diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h index 4852e849128b..c0a55050f70f 100644 --- a/arch/powerpc/include/asm/disassemble.h +++ b/arch/powerpc/include/asm/disassemble.h @@ -87,6 +87,11 @@ static inline unsigned int get_oc(u32 inst) return (inst >> 11) & 0x7fff; } +static inline unsigned int get_tx_or_sx(u32 inst) +{ + return (inst) & 0x1; +} + #define IS_XFORM(inst) (get_op(inst) == 31) #define IS_DSFORM(inst) (get_op(inst) >= 56) diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 0031806475f0..60b91084f33c 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -73,6 +73,8 @@ reg_entry++; \ }) +extern int crashing_cpu; + /* Kernel Dump section info */ struct fadump_section { __be32 request_flag; diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index d96142572e6d..8a8ce220d7d0 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -296,11 +296,21 @@ static inline void iommu_restore(void) #endif /* The API to support IOMMU operations for VFIO */ -extern int iommu_tce_clear_param_check(struct iommu_table *tbl, - unsigned long ioba, unsigned long tce_value, - unsigned long npages); -extern int iommu_tce_put_param_check(struct iommu_table *tbl, - unsigned long ioba, unsigned long tce); +extern int iommu_tce_check_ioba(unsigned long page_shift, + unsigned long offset, unsigned long size, + unsigned long ioba, unsigned long npages); +extern int iommu_tce_check_gpa(unsigned long page_shift, + unsigned long gpa); + +#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \ + (iommu_tce_check_ioba((tbl)->it_page_shift, \ + (tbl)->it_offset, (tbl)->it_size, \ + (ioba), (npages)) || (tce_value)) +#define iommu_tce_put_param_check(tbl, ioba, gpa) \ + (iommu_tce_check_ioba((tbl)->it_page_shift, \ + (tbl)->it_offset, (tbl)->it_size, \ + (ioba), 1) || \ + iommu_tce_check_gpa((tbl)->it_page_shift, (gpa))) extern void iommu_flush_tce(struct iommu_table *tbl); extern int iommu_take_ownership(struct iommu_table *tbl); diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0593d9479f74..b148496ffe36 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -111,6 +111,8 @@ struct kvmppc_host_state { struct kvm_vcpu *kvm_vcpu; struct kvmppc_vcore *kvm_vcore; void __iomem *xics_phys; + void __iomem *xive_tima_phys; + void __iomem *xive_tima_virt; u32 saved_xirr; u64 dabr; u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */ diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 7bba8f415627..9c51ac4b8f36 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -45,9 +45,6 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#ifdef CONFIG_KVM_MMIO -#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 -#endif #define KVM_HALT_POLL_NS_DEFAULT 10000 /* 10 us */ /* These values are internal and can be increased later */ @@ -191,6 +188,13 @@ struct kvmppc_pginfo { atomic_t refcnt; }; +struct kvmppc_spapr_tce_iommu_table { + struct rcu_head rcu; + struct list_head next; + struct iommu_table *tbl; + struct kref kref; +}; + struct kvmppc_spapr_tce_table { struct list_head list; struct kvm *kvm; @@ -199,12 +203,19 @@ struct kvmppc_spapr_tce_table { u32 page_shift; u64 offset; /* in pages */ u64 size; /* window size in pages */ + struct list_head iommu_tables; struct page *pages[0]; }; /* XICS components, defined in book3s_xics.c */ struct kvmppc_xics; struct kvmppc_icp; +extern struct kvm_device_ops kvm_xics_ops; + +/* XIVE components, defined in book3s_xive.c */ +struct kvmppc_xive; +struct kvmppc_xive_vcpu; +extern struct kvm_device_ops kvm_xive_ops; struct kvmppc_passthru_irqmap; @@ -293,6 +304,7 @@ struct kvm_arch { #endif #ifdef CONFIG_KVM_XICS struct kvmppc_xics *xics; + struct kvmppc_xive *xive; struct kvmppc_passthru_irqmap *pimap; #endif struct kvmppc_ops *kvm_ops; @@ -345,6 +357,7 @@ struct kvmppc_pte { bool may_read : 1; bool may_write : 1; bool may_execute : 1; + unsigned long wimg; u8 page_size; /* MMU_PAGE_xxx */ }; @@ -421,7 +434,7 @@ struct kvmppc_passthru_irqmap { #define KVMPPC_IRQ_DEFAULT 0 #define KVMPPC_IRQ_MPIC 1 -#define KVMPPC_IRQ_XICS 2 +#define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */ #define MMIO_HPTE_CACHE_SIZE 4 @@ -441,8 +454,28 @@ struct mmio_hpte_cache { unsigned int index; }; +#define KVMPPC_VSX_COPY_NONE 0 +#define KVMPPC_VSX_COPY_WORD 1 +#define KVMPPC_VSX_COPY_DWORD 2 +#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3 + struct openpic; +/* W0 and W1 of a XIVE thread management context */ +union xive_tma_w01 { + struct { + u8 nsr; + u8 cppr; + u8 ipb; + u8 lsmfb; + u8 ack; + u8 inc; + u8 age; + u8 pipr; + }; + __be64 w01; +}; + struct kvm_vcpu_arch { ulong host_stack; u32 host_pid; @@ -644,6 +677,21 @@ struct kvm_vcpu_arch { u8 io_gpr; /* GPR used as IO source/target */ u8 mmio_host_swabbed; u8 mmio_sign_extend; + /* conversion between single and double precision */ + u8 mmio_sp64_extend; + /* + * Number of simulations for vsx. + * If we use 2*8bytes to simulate 1*16bytes, + * then the number should be 2 and + * mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD. + * If we use 4*4bytes to simulate 1*16bytes, + * the number should be 4 and + * mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD. + */ + u8 mmio_vsx_copy_nums; + u8 mmio_vsx_offset; + u8 mmio_vsx_copy_type; + u8 mmio_vsx_tx_sx_enabled; u8 osi_needed; u8 osi_enabled; u8 papr_enabled; @@ -688,6 +736,10 @@ struct kvm_vcpu_arch { struct openpic *mpic; /* KVM_IRQ_MPIC */ #ifdef CONFIG_KVM_XICS struct kvmppc_icp *icp; /* XICS presentation controller */ + struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */ + __be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */ + u32 xive_pushed; /* Is the VP pushed on the physical CPU ? */ + union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */ #endif #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -732,6 +784,8 @@ struct kvm_vcpu_arch { }; #define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET] +#define VCPU_VSX_FPR(vcpu, i, j) ((vcpu)->arch.fp.fpr[i][j]) +#define VCPU_VSX_VR(vcpu, i) ((vcpu)->arch.vr.vr[i]) /* Values for vcpu->arch.state */ #define KVMPPC_VCPU_NOTREADY 0 @@ -745,6 +799,7 @@ struct kvm_vcpu_arch { #define KVM_MMIO_REG_FPR 0x0020 #define KVM_MMIO_REG_QPR 0x0040 #define KVM_MMIO_REG_FQPR 0x0060 +#define KVM_MMIO_REG_VSX 0x0080 #define __KVM_HAVE_ARCH_WQP #define __KVM_HAVE_CREATE_DEVICE diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index c3877992eff9..e0d88c38602b 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -78,9 +78,15 @@ extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, int is_default_endian); +extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, unsigned int bytes, + int is_default_endian, int mmio_sign_extend); extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian); +extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, + int rs, unsigned int bytes, + int is_default_endian); extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type, u32 *inst); @@ -132,6 +138,9 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); +extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu); +extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu); +extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, @@ -164,13 +173,19 @@ extern long kvmppc_prepare_vrma(struct kvm *kvm, extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, unsigned long porder); extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); +extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, + struct iommu_group *grp); +extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, + struct iommu_group *grp); extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce_64 *args); extern struct kvmppc_spapr_tce_table *kvmppc_find_table( - struct kvm_vcpu *vcpu, unsigned long liobn); -extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, - unsigned long ioba, unsigned long npages); + struct kvm *kvm, unsigned long liobn); +#define kvmppc_ioba_validate(stt, ioba, npages) \ + (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ + (stt)->size, (ioba), (npages)) ? \ + H_PARAMETER : H_SUCCESS) extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, unsigned long tce); extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, @@ -225,6 +240,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp); extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu); extern void kvmppc_rtas_tokens_free(struct kvm *kvm); + extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority); extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, @@ -240,6 +256,7 @@ union kvmppc_one_reg { u64 dval; vector128 vval; u64 vsxval[2]; + u32 vsx32val[4]; struct { u64 addr; u64 length; @@ -412,6 +429,14 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr; } +static inline void kvmppc_set_xive_tima(int cpu, + unsigned long phys_addr, + void __iomem *virt_addr) +{ + paca[cpu].kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr; + paca[cpu].kvm_hstate.xive_tima_virt = virt_addr; +} + static inline u32 kvmppc_get_xics_latch(void) { u32 xirr; @@ -442,6 +467,11 @@ static inline void __init kvm_cma_reserve(void) static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) {} +static inline void kvmppc_set_xive_tima(int cpu, + unsigned long phys_addr, + void __iomem *virt_addr) +{} + static inline u32 kvmppc_get_xics_latch(void) { return 0; @@ -492,6 +522,10 @@ extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr, struct kvmppc_irq_map *irq_map, struct kvmppc_passthru_irqmap *pimap, bool *again); + +extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, + int level, bool line_status); + extern int h_ipi_redirect; #else static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( @@ -509,6 +543,60 @@ static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) { return 0; } #endif +#ifdef CONFIG_KVM_XIVE +/* + * Below the first "xive" is the "eXternal Interrupt Virtualization Engine" + * ie. P9 new interrupt controller, while the second "xive" is the legacy + * "eXternal Interrupt Vector Entry" which is the configuration of an + * interrupt on the "xics" interrupt controller on P8 and earlier. Those + * two function consume or produce a legacy "XIVE" state from the + * new "XIVE" interrupt controller. + */ +extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, + u32 priority); +extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, + u32 *priority); +extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq); +extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq); +extern void kvmppc_xive_init_module(void); +extern void kvmppc_xive_exit_module(void); + +extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 cpu); +extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu); +extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc); +extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc); +extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu); +extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval); + +extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, + int level, bool line_status); +#else +static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, + u32 priority) { return -1; } +static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, + u32 *priority) { return -1; } +static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; } +static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; } +static inline void kvmppc_xive_init_module(void) { } +static inline void kvmppc_xive_exit_module(void) { } + +static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; } +static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { } +static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc) { return -ENODEV; } +static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc) { return -ENODEV; } +static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; } +static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; } + +static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, + int level, bool line_status) { return -ENODEV; } +#endif /* CONFIG_KVM_XIVE */ + /* * Prototypes for functions called only from assembler code. * Having prototypes reduces sparse errors. @@ -546,6 +634,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, unsigned long slb_v, unsigned int status, bool data); unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu); +unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu); +unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server); int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, unsigned long mfrr); int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 5011b69107a7..f90b22c722e1 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -173,6 +173,8 @@ struct machdep_calls { /* Called after scan and before resource survey */ void (*pcibios_fixup_phb)(struct pci_controller *hose); + resource_size_t (*pcibios_default_alignment)(void); + #ifdef CONFIG_PCI_IOV void (*pcibios_fixup_sriov)(struct pci_dev *pdev); resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno); diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 93eded8d3843..c8975dac535f 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -77,12 +77,11 @@ extern int pci_domain_nr(struct pci_bus *bus); extern int pci_proc_domain(struct pci_bus *bus); struct vm_area_struct; -/* Map a range of PCI memory or I/O space for a device into user space */ -int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); -/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ -#define HAVE_PCI_MMAP 1 +/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() and it does WC */ +#define HAVE_PCI_MMAP 1 +#define arch_can_pci_mmap_io() 1 +#define arch_can_pci_mmap_wc() 1 extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t count); diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 142d78d645f4..3a8d278e7421 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -86,32 +86,79 @@ #define OP_TRAP_64 2 #define OP_31_XOP_TRAP 4 +#define OP_31_XOP_LDX 21 #define OP_31_XOP_LWZX 23 +#define OP_31_XOP_LDUX 53 #define OP_31_XOP_DCBST 54 #define OP_31_XOP_LWZUX 55 #define OP_31_XOP_TRAP_64 68 #define OP_31_XOP_DCBF 86 #define OP_31_XOP_LBZX 87 +#define OP_31_XOP_STDX 149 #define OP_31_XOP_STWX 151 +#define OP_31_XOP_STDUX 181 +#define OP_31_XOP_STWUX 183 #define OP_31_XOP_STBX 215 #define OP_31_XOP_LBZUX 119 #define OP_31_XOP_STBUX 247 #define OP_31_XOP_LHZX 279 #define OP_31_XOP_LHZUX 311 #define OP_31_XOP_MFSPR 339 +#define OP_31_XOP_LWAX 341 #define OP_31_XOP_LHAX 343 +#define OP_31_XOP_LWAUX 373 #define OP_31_XOP_LHAUX 375 #define OP_31_XOP_STHX 407 #define OP_31_XOP_STHUX 439 #define OP_31_XOP_MTSPR 467 #define OP_31_XOP_DCBI 470 +#define OP_31_XOP_LDBRX 532 #define OP_31_XOP_LWBRX 534 #define OP_31_XOP_TLBSYNC 566 +#define OP_31_XOP_STDBRX 660 #define OP_31_XOP_STWBRX 662 +#define OP_31_XOP_STFSX 663 +#define OP_31_XOP_STFSUX 695 +#define OP_31_XOP_STFDX 727 +#define OP_31_XOP_STFDUX 759 #define OP_31_XOP_LHBRX 790 +#define OP_31_XOP_LFIWAX 855 +#define OP_31_XOP_LFIWZX 887 #define OP_31_XOP_STHBRX 918 +#define OP_31_XOP_STFIWX 983 + +/* VSX Scalar Load Instructions */ +#define OP_31_XOP_LXSDX 588 +#define OP_31_XOP_LXSSPX 524 +#define OP_31_XOP_LXSIWAX 76 +#define OP_31_XOP_LXSIWZX 12 + +/* VSX Scalar Store Instructions */ +#define OP_31_XOP_STXSDX 716 +#define OP_31_XOP_STXSSPX 652 +#define OP_31_XOP_STXSIWX 140 + +/* VSX Vector Load Instructions */ +#define OP_31_XOP_LXVD2X 844 +#define OP_31_XOP_LXVW4X 780 + +/* VSX Vector Load and Splat Instruction */ +#define OP_31_XOP_LXVDSX 332 + +/* VSX Vector Store Instructions */ +#define OP_31_XOP_STXVD2X 972 +#define OP_31_XOP_STXVW4X 908 + +#define OP_31_XOP_LFSX 535 +#define OP_31_XOP_LFSUX 567 +#define OP_31_XOP_LFDX 599 +#define OP_31_XOP_LFDUX 631 #define OP_LWZ 32 +#define OP_STFS 52 +#define OP_STFSU 53 +#define OP_STFD 54 +#define OP_STFDU 55 #define OP_LD 58 #define OP_LWZU 33 #define OP_LBZ 34 @@ -127,6 +174,17 @@ #define OP_LHAU 43 #define OP_STH 44 #define OP_STHU 45 +#define OP_LMW 46 +#define OP_STMW 47 +#define OP_LFS 48 +#define OP_LFSU 49 +#define OP_LFD 50 +#define OP_LFDU 51 +#define OP_STFS 52 +#define OP_STFSU 53 +#define OP_STFD 54 +#define OP_STFDU 55 +#define OP_LQ 56 /* sorted alphabetically */ #define PPC_INST_BHRBE 0x7c00025c diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h index 3cdbeaeac397..c8a822acf962 100644 --- a/arch/powerpc/include/asm/xive.h +++ b/arch/powerpc/include/asm/xive.h @@ -99,7 +99,6 @@ struct xive_q { #define XIVE_ESB_SET_PQ_01 0xd00 #define XIVE_ESB_SET_PQ_10 0xe00 #define XIVE_ESB_SET_PQ_11 0xf00 -#define XIVE_ESB_MASK XIVE_ESB_SET_PQ_01 #define XIVE_ESB_VAL_P 0x2 #define XIVE_ESB_VAL_Q 0x1 @@ -136,11 +135,11 @@ extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, __be32 *qpage, u32 order, bool can_escalate); extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); -extern bool __xive_irq_trigger(struct xive_irq_data *xd); -extern bool __xive_irq_retrigger(struct xive_irq_data *xd); -extern void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd); - +extern void xive_native_sync_source(u32 hw_irq); extern bool is_xive_irq(struct irq_chip *chip); +extern int xive_native_enable_vp(u32 vp_id); +extern int xive_native_disable_vp(u32 vp_id); +extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id); #else diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index dab3717e3ea0..b15bf6bc0e94 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild @@ -1,47 +1,2 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm - -header-y += auxvec.h -header-y += bitsperlong.h -header-y += bootx.h -header-y += byteorder.h -header-y += cputable.h -header-y += eeh.h -header-y += elf.h -header-y += epapr_hcalls.h -header-y += errno.h -header-y += fcntl.h -header-y += ioctl.h -header-y += ioctls.h -header-y += ipcbuf.h -header-y += kvm.h -header-y += kvm_para.h -header-y += mman.h -header-y += msgbuf.h -header-y += nvram.h -header-y += opal-prd.h -header-y += param.h -header-y += perf_event.h -header-y += poll.h -header-y += posix_types.h -header-y += ps3fb.h -header-y += ptrace.h -header-y += resource.h -header-y += sembuf.h -header-y += setup.h -header-y += shmbuf.h -header-y += sigcontext.h -header-y += siginfo.h -header-y += signal.h -header-y += socket.h -header-y += sockios.h -header-y += spu_info.h -header-y += stat.h -header-y += statfs.h -header-y += swab.h -header-y += termbits.h -header-y += termios.h -header-y += tm.h -header-y += types.h -header-y += ucontext.h -header-y += unistd.h diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 4edbe4bb0e8b..07fbeb927834 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -29,6 +29,9 @@ #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_GUEST_DEBUG +/* Not always available, but if it is, this is the correct offset. */ +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + struct kvm_regs { __u64 pc; __u64 cr; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 439c257dec4a..709e23425317 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -634,6 +634,8 @@ int main(void) HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); + HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys); + HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt); HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); HSTATE_FIELD(HSTATE_PTID, ptid); @@ -719,6 +721,14 @@ int main(void) OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6); #endif +#ifdef CONFIG_KVM_XICS + DEFINE(VCPU_XIVE_SAVED_STATE, offsetof(struct kvm_vcpu, + arch.xive_saved_state)); + DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu, + arch.xive_cam_word)); + DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed)); +#endif + #ifdef CONFIG_KVM_EXIT_TIMING OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu); OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl); diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 47b63de81f9b..cbabb5adccd9 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -43,8 +43,6 @@ #define IPI_TIMEOUT 10000 #define REAL_MODE_TIMEOUT 10000 -/* This keeps a track of which one is the crashing cpu. */ -int crashing_cpu = -1; static int time_to_dump; #define CRASH_HANDLER_MAX 3 diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 243dbef7e926..466569e26278 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -209,14 +209,20 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm, */ static inline unsigned long fadump_calculate_reserve_size(void) { - unsigned long size; + int ret; + unsigned long long base, size; /* - * Check if the size is specified through fadump_reserve_mem= cmdline - * option. If yes, then use that. + * Check if the size is specified through crashkernel= cmdline + * option. If yes, then use that but ignore base as fadump + * reserves memory at end of RAM. */ - if (fw_dump.reserve_bootvar) + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + &size, &base); + if (ret == 0 && size > 0) { + fw_dump.reserve_bootvar = (unsigned long)size; return fw_dump.reserve_bootvar; + } /* divide by 20 to get 5% of value */ size = memblock_end_of_DRAM() / 20; @@ -371,15 +377,6 @@ static int __init early_fadump_param(char *p) } early_param("fadump", early_fadump_param); -/* Look for fadump_reserve_mem= cmdline option */ -static int __init early_fadump_reserve_mem(char *p) -{ - if (p) - fw_dump.reserve_bootvar = memparse(p, &p); - return 0; -} -early_param("fadump_reserve_mem", early_fadump_reserve_mem); - static void register_fw_dump(struct fadump_mem_struct *fdm) { int rc; @@ -527,34 +524,6 @@ fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs) return reg_entry; } -static u32 *fadump_append_elf_note(u32 *buf, char *name, unsigned type, - void *data, size_t data_len) -{ - struct elf_note note; - - note.n_namesz = strlen(name) + 1; - note.n_descsz = data_len; - note.n_type = type; - memcpy(buf, ¬e, sizeof(note)); - buf += (sizeof(note) + 3)/4; - memcpy(buf, name, note.n_namesz); - buf += (note.n_namesz + 3)/4; - memcpy(buf, data, note.n_descsz); - buf += (note.n_descsz + 3)/4; - - return buf; -} - -static void fadump_final_note(u32 *buf) -{ - struct elf_note note; - - note.n_namesz = 0; - note.n_descsz = 0; - note.n_type = 0; - memcpy(buf, ¬e, sizeof(note)); -} - static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) { struct elf_prstatus prstatus; @@ -565,8 +534,8 @@ static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs) * prstatus.pr_pid = ???? */ elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); - buf = fadump_append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, - &prstatus, sizeof(prstatus)); + buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS, + &prstatus, sizeof(prstatus)); return buf; } @@ -707,7 +676,7 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm) note_buf = fadump_regs_to_elf_notes(note_buf, ®s); } } - fadump_final_note(note_buf); + final_note(note_buf); if (fdh) { pr_debug("Updating elfcore header (%llx) with cpu notes\n", diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 5a3231fedf08..f2b724cd9e64 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -963,47 +963,36 @@ void iommu_flush_tce(struct iommu_table *tbl) } EXPORT_SYMBOL_GPL(iommu_flush_tce); -int iommu_tce_clear_param_check(struct iommu_table *tbl, - unsigned long ioba, unsigned long tce_value, - unsigned long npages) +int iommu_tce_check_ioba(unsigned long page_shift, + unsigned long offset, unsigned long size, + unsigned long ioba, unsigned long npages) { - /* tbl->it_ops->clear() does not support any value but 0 */ - if (tce_value) - return -EINVAL; + unsigned long mask = (1UL << page_shift) - 1; - if (ioba & ~IOMMU_PAGE_MASK(tbl)) + if (ioba & mask) return -EINVAL; - ioba >>= tbl->it_page_shift; - if (ioba < tbl->it_offset) + ioba >>= page_shift; + if (ioba < offset) return -EINVAL; - if ((ioba + npages) > (tbl->it_offset + tbl->it_size)) + if ((ioba + 1) > (offset + size)) return -EINVAL; return 0; } -EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check); +EXPORT_SYMBOL_GPL(iommu_tce_check_ioba); -int iommu_tce_put_param_check(struct iommu_table *tbl, - unsigned long ioba, unsigned long tce) +int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa) { - if (tce & ~IOMMU_PAGE_MASK(tbl)) - return -EINVAL; - - if (ioba & ~IOMMU_PAGE_MASK(tbl)) - return -EINVAL; - - ioba >>= tbl->it_page_shift; - if (ioba < tbl->it_offset) - return -EINVAL; + unsigned long mask = (1UL << page_shift) - 1; - if ((ioba + 1) > (tbl->it_offset + tbl->it_size)) + if (gpa & mask) return -EINVAL; return 0; } -EXPORT_SYMBOL_GPL(iommu_tce_put_param_check); +EXPORT_SYMBOL_GPL(iommu_tce_check_gpa); long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction) diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index ffda24a38dda..341a7469cab8 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -233,6 +233,14 @@ void pcibios_reset_secondary_bus(struct pci_dev *dev) pci_reset_secondary_bus(dev); } +resource_size_t pcibios_default_alignment(void) +{ + if (ppc_md.pcibios_default_alignment) + return ppc_md.pcibios_default_alignment(); + + return 0; +} + #ifdef CONFIG_PCI_IOV resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno) { @@ -513,7 +521,8 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, * * Returns a negative error code on failure, zero on success. */ -int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, +int pci_mmap_page_range(struct pci_dev *dev, int bar, + struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { resource_size_t offset = diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 5c10b5925ac2..69e077180db6 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -125,6 +125,11 @@ int ppc_do_canonicalize_irqs; EXPORT_SYMBOL(ppc_do_canonicalize_irqs); #endif +#ifdef CONFIG_CRASH_CORE +/* This keeps a track of which one is the crashing cpu. */ +int crashing_cpu = -1; +#endif + /* also used by kexec */ void machine_shutdown(void) { diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 029be26b5a17..24de532c1736 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -67,6 +67,7 @@ config KVM_BOOK3S_64 select KVM_BOOK3S_64_HANDLER select KVM select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE + select SPAPR_TCE_IOMMU if IOMMU_SUPPORT ---help--- Support running unmodified book3s_64 and book3s_32 guest kernels in virtual machines on book3s_64 host processors. @@ -196,6 +197,11 @@ config KVM_XICS Specification) interrupt controller architecture used on IBM POWER (pSeries) servers. +config KVM_XIVE + bool + default y + depends on KVM_XICS && PPC_XIVE_NATIVE && KVM_BOOK3S_HV_POSSIBLE + source drivers/vhost/Kconfig endif # VIRTUALIZATION diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index b87ccde2137a..d91a2604c496 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -74,7 +74,7 @@ kvm-hv-y += \ book3s_64_mmu_radix.o kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \ - book3s_hv_rm_xics.o + book3s_hv_rm_xics.o book3s_hv_rm_xive.o ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ @@ -89,6 +89,8 @@ endif kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ book3s_xics.o +kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o + kvm-book3s_64-module-objs := \ $(common-objs-y) \ book3s.o \ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index aedacefd961d..72d977e30952 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -35,6 +35,7 @@ #include <asm/kvm_book3s.h> #include <asm/mmu_context.h> #include <asm/page.h> +#include <asm/xive.h> #include "book3s.h" #include "trace.h" @@ -197,6 +198,24 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) } EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); +void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) +{ + /* might as well deliver this straight away */ + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0); +} + +void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) +{ + /* might as well deliver this straight away */ + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0); +} + +void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu) +{ + /* might as well deliver this straight away */ + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0); +} + void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); @@ -578,11 +597,14 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, break; #ifdef CONFIG_KVM_XICS case KVM_REG_PPC_ICP_STATE: - if (!vcpu->arch.icp) { + if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { r = -ENXIO; break; } - *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); + if (xive_enabled()) + *val = get_reg_val(id, kvmppc_xive_get_icp(vcpu)); + else + *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); break; #endif /* CONFIG_KVM_XICS */ case KVM_REG_PPC_FSCR: @@ -648,12 +670,14 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, #endif /* CONFIG_VSX */ #ifdef CONFIG_KVM_XICS case KVM_REG_PPC_ICP_STATE: - if (!vcpu->arch.icp) { + if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { r = -ENXIO; break; } - r = kvmppc_xics_set_icp(vcpu, - set_reg_val(id, *val)); + if (xive_enabled()) + r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val)); + else + r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val)); break; #endif /* CONFIG_KVM_XICS */ case KVM_REG_PPC_FSCR: @@ -924,6 +948,50 @@ int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall) return kvm->arch.kvm_ops->hcall_implemented(hcall); } +#ifdef CONFIG_KVM_XICS +int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, + bool line_status) +{ + if (xive_enabled()) + return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level, + line_status); + else + return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level, + line_status); +} + +int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi, + level, line_status); +} +static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, + bool line_status) +{ + return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status); +} + +int kvm_irq_map_gsi(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *entries, int gsi) +{ + entries->gsi = gsi; + entries->type = KVM_IRQ_ROUTING_IRQCHIP; + entries->set = kvmppc_book3s_set_irq; + entries->irqchip.irqchip = 0; + entries->irqchip.pin = gsi; + return 1; +} + +int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) +{ + return pin; +} + +#endif /* CONFIG_KVM_XICS */ + static int kvmppc_book3s_init(void) { int r; @@ -934,12 +1002,25 @@ static int kvmppc_book3s_init(void) #ifdef CONFIG_KVM_BOOK3S_32_HANDLER r = kvmppc_book3s_init_pr(); #endif - return r; +#ifdef CONFIG_KVM_XICS +#ifdef CONFIG_KVM_XIVE + if (xive_enabled()) { + kvmppc_xive_init_module(); + kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS); + } else +#endif + kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS); +#endif + return r; } static void kvmppc_book3s_exit(void) { +#ifdef CONFIG_KVM_XICS + if (xive_enabled()) + kvmppc_xive_exit_module(); +#endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER kvmppc_book3s_exit_pr(); #endif diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 70153578131a..29ebe2fd5867 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -319,6 +319,7 @@ do_second: gpte->may_execute = true; gpte->may_read = false; gpte->may_write = false; + gpte->wimg = r & HPTE_R_WIMG; switch (pp) { case 0: diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 74b0153780e3..9a4614cd0e53 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -145,6 +145,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, else kvmppc_mmu_flush_icache(pfn); + rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg; + /* * Use 64K pages if possible; otherwise, on 64K page kernels, * we need to transfer 4 more bits from guest real to host real addr. @@ -177,12 +179,15 @@ map_again: ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, hpsize, hpsize, MMU_SEGSIZE_256M); - if (ret < 0) { + if (ret == -1) { /* If we couldn't map a primary PTE, try a secondary */ hash = ~hash; vflags ^= HPTE_V_SECONDARY; attempt++; goto map_again; + } else if (ret < 0) { + r = -EIO; + goto out_unlock; } else { trace_kvm_book3s_64_mmu_map(rflags, hpteg, vpn, hpaddr, orig_pte); diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 3e26cd4979f9..a160c14304eb 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -28,6 +28,8 @@ #include <linux/hugetlb.h> #include <linux/list.h> #include <linux/anon_inodes.h> +#include <linux/iommu.h> +#include <linux/file.h> #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> @@ -40,6 +42,7 @@ #include <asm/udbg.h> #include <asm/iommu.h> #include <asm/tce.h> +#include <asm/mmu_context.h> static unsigned long kvmppc_tce_pages(unsigned long iommu_pages) { @@ -91,6 +94,137 @@ static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc) return ret; } +static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head) +{ + struct kvmppc_spapr_tce_iommu_table *stit = container_of(head, + struct kvmppc_spapr_tce_iommu_table, rcu); + + iommu_tce_table_put(stit->tbl); + + kfree(stit); +} + +static void kvm_spapr_tce_liobn_put(struct kref *kref) +{ + struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref, + struct kvmppc_spapr_tce_iommu_table, kref); + + list_del_rcu(&stit->next); + + call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free); +} + +extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, + struct iommu_group *grp) +{ + int i; + struct kvmppc_spapr_tce_table *stt; + struct kvmppc_spapr_tce_iommu_table *stit, *tmp; + struct iommu_table_group *table_group = NULL; + + list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { + + table_group = iommu_group_get_iommudata(grp); + if (WARN_ON(!table_group)) + continue; + + list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { + if (table_group->tables[i] != stit->tbl) + continue; + + kref_put(&stit->kref, kvm_spapr_tce_liobn_put); + return; + } + } + } +} + +extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, + struct iommu_group *grp) +{ + struct kvmppc_spapr_tce_table *stt = NULL; + bool found = false; + struct iommu_table *tbl = NULL; + struct iommu_table_group *table_group; + long i; + struct kvmppc_spapr_tce_iommu_table *stit; + struct fd f; + + f = fdget(tablefd); + if (!f.file) + return -EBADF; + + list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { + if (stt == f.file->private_data) { + found = true; + break; + } + } + + fdput(f); + + if (!found) + return -EINVAL; + + table_group = iommu_group_get_iommudata(grp); + if (WARN_ON(!table_group)) + return -EFAULT; + + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { + struct iommu_table *tbltmp = table_group->tables[i]; + + if (!tbltmp) + continue; + /* + * Make sure hardware table parameters are exactly the same; + * this is used in the TCE handlers where boundary checks + * use only the first attached table. + */ + if ((tbltmp->it_page_shift == stt->page_shift) && + (tbltmp->it_offset == stt->offset) && + (tbltmp->it_size == stt->size)) { + /* + * Reference the table to avoid races with + * add/remove DMA windows. + */ + tbl = iommu_tce_table_get(tbltmp); + break; + } + } + if (!tbl) + return -EINVAL; + + list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { + if (tbl != stit->tbl) + continue; + + if (!kref_get_unless_zero(&stit->kref)) { + /* stit is being destroyed */ + iommu_tce_table_put(tbl); + return -ENOTTY; + } + /* + * The table is already known to this KVM, we just increased + * its KVM reference counter and can return. + */ + return 0; + } + + stit = kzalloc(sizeof(*stit), GFP_KERNEL); + if (!stit) { + iommu_tce_table_put(tbl); + return -ENOMEM; + } + + stit->tbl = tbl; + kref_init(&stit->kref); + + list_add_rcu(&stit->next, &stt->iommu_tables); + + return 0; +} + static void release_spapr_tce_table(struct rcu_head *head) { struct kvmppc_spapr_tce_table *stt = container_of(head, @@ -130,9 +264,18 @@ static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) { struct kvmppc_spapr_tce_table *stt = filp->private_data; + struct kvmppc_spapr_tce_iommu_table *stit, *tmp; list_del_rcu(&stt->list); + list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { + WARN_ON(!kref_read(&stit->kref)); + while (1) { + if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put)) + break; + } + } + kvm_put_kvm(stt->kvm); kvmppc_account_memlimit( @@ -164,7 +307,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, return -EBUSY; } - size = args->size; + size = _ALIGN_UP(args->size, PAGE_SIZE >> 3); npages = kvmppc_tce_pages(size); ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); if (ret) { @@ -183,6 +326,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, stt->offset = args->offset; stt->size = size; stt->kvm = kvm; + INIT_LIST_HEAD_RCU(&stt->iommu_tables); for (i = 0; i < npages; i++) { stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); @@ -211,15 +355,106 @@ fail: return ret; } +static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry) +{ + unsigned long hpa = 0; + enum dma_data_direction dir = DMA_NONE; + + iommu_tce_xchg(tbl, entry, &hpa, &dir); +} + +static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, + struct iommu_table *tbl, unsigned long entry) +{ + struct mm_iommu_table_group_mem_t *mem = NULL; + const unsigned long pgsize = 1ULL << tbl->it_page_shift; + unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + + if (!pua) + /* it_userspace allocation might be delayed */ + return H_TOO_HARD; + + mem = mm_iommu_lookup(kvm->mm, *pua, pgsize); + if (!mem) + return H_TOO_HARD; + + mm_iommu_mapped_dec(mem); + + *pua = 0; + + return H_SUCCESS; +} + +static long kvmppc_tce_iommu_unmap(struct kvm *kvm, + struct iommu_table *tbl, unsigned long entry) +{ + enum dma_data_direction dir = DMA_NONE; + unsigned long hpa = 0; + long ret; + + if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir))) + return H_HARDWARE; + + if (dir == DMA_NONE) + return H_SUCCESS; + + ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); + if (ret != H_SUCCESS) + iommu_tce_xchg(tbl, entry, &hpa, &dir); + + return ret; +} + +long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, + unsigned long entry, unsigned long ua, + enum dma_data_direction dir) +{ + long ret; + unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + struct mm_iommu_table_group_mem_t *mem; + + if (!pua) + /* it_userspace allocation might be delayed */ + return H_TOO_HARD; + + mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift); + if (!mem) + /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ + return H_TOO_HARD; + + if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa))) + return H_HARDWARE; + + if (mm_iommu_mapped_inc(mem)) + return H_CLOSED; + + ret = iommu_tce_xchg(tbl, entry, &hpa, &dir); + if (WARN_ON_ONCE(ret)) { + mm_iommu_mapped_dec(mem); + return H_HARDWARE; + } + + if (dir != DMA_NONE) + kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); + + *pua = ua; + + return 0; +} + long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce) { - struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); - long ret; + struct kvmppc_spapr_tce_table *stt; + long ret, idx; + struct kvmppc_spapr_tce_iommu_table *stit; + unsigned long entry, ua = 0; + enum dma_data_direction dir; /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ /* liobn, ioba, tce); */ + stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; @@ -231,7 +466,35 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; - kvmppc_tce_put(stt, ioba >> stt->page_shift, tce); + dir = iommu_tce_direction(tce); + if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, + tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) + return H_PARAMETER; + + entry = ioba >> stt->page_shift; + + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { + if (dir == DMA_NONE) { + ret = kvmppc_tce_iommu_unmap(vcpu->kvm, + stit->tbl, entry); + } else { + idx = srcu_read_lock(&vcpu->kvm->srcu); + ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, + entry, ua, dir); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + } + + if (ret == H_SUCCESS) + continue; + + if (ret == H_TOO_HARD) + return ret; + + WARN_ON_ONCE(1); + kvmppc_clear_tce(stit->tbl, entry); + } + + kvmppc_tce_put(stt, entry, tce); return H_SUCCESS; } @@ -246,8 +509,9 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsigned long entry, ua = 0; u64 __user *tces; u64 tce; + struct kvmppc_spapr_tce_iommu_table *stit; - stt = kvmppc_find_table(vcpu, liobn); + stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; @@ -284,6 +548,26 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (ret != H_SUCCESS) goto unlock_exit; + if (kvmppc_gpa_to_ua(vcpu->kvm, + tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), + &ua, NULL)) + return H_PARAMETER; + + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { + ret = kvmppc_tce_iommu_map(vcpu->kvm, + stit->tbl, entry + i, ua, + iommu_tce_direction(tce)); + + if (ret == H_SUCCESS) + continue; + + if (ret == H_TOO_HARD) + goto unlock_exit; + + WARN_ON_ONCE(1); + kvmppc_clear_tce(stit->tbl, entry); + } + kvmppc_tce_put(stt, entry + i, tce); } @@ -300,8 +584,9 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, { struct kvmppc_spapr_tce_table *stt; long i, ret; + struct kvmppc_spapr_tce_iommu_table *stit; - stt = kvmppc_find_table(vcpu, liobn); + stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; @@ -313,6 +598,24 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) return H_PARAMETER; + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { + unsigned long entry = ioba >> stit->tbl->it_page_shift; + + for (i = 0; i < npages; ++i) { + ret = kvmppc_tce_iommu_unmap(vcpu->kvm, + stit->tbl, entry + i); + + if (ret == H_SUCCESS) + continue; + + if (ret == H_TOO_HARD) + return ret; + + WARN_ON_ONCE(1); + kvmppc_clear_tce(stit->tbl, entry); + } + } + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index e4c4ea973e57..eda0a8f6fae8 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -40,6 +40,31 @@ #include <asm/iommu.h> #include <asm/tce.h> +#ifdef CONFIG_BUG + +#define WARN_ON_ONCE_RM(condition) ({ \ + static bool __section(.data.unlikely) __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \ + __stringify(condition), \ + __func__, __LINE__); \ + dump_stack(); \ + } \ + unlikely(__ret_warn_once); \ +}) + +#else + +#define WARN_ON_ONCE_RM(condition) ({ \ + int __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) + +#endif + #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) /* @@ -48,10 +73,9 @@ * WARNING: This will be called in real or virtual mode on HV KVM and virtual * mode on PR KVM */ -struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu, +struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, unsigned long liobn) { - struct kvm *kvm = vcpu->kvm; struct kvmppc_spapr_tce_table *stt; list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list) @@ -63,27 +87,6 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu, EXPORT_SYMBOL_GPL(kvmppc_find_table); /* - * Validates IO address. - * - * WARNING: This will be called in real-mode on HV KVM and virtual - * mode on PR KVM - */ -long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, - unsigned long ioba, unsigned long npages) -{ - unsigned long mask = (1ULL << stt->page_shift) - 1; - unsigned long idx = ioba >> stt->page_shift; - - if ((ioba & mask) || (idx < stt->offset) || - (idx - stt->offset + npages > stt->size) || - (idx + npages < idx)) - return H_PARAMETER; - - return H_SUCCESS; -} -EXPORT_SYMBOL_GPL(kvmppc_ioba_validate); - -/* * Validates TCE address. * At the moment flags and page mask are validated. * As the host kernel does not access those addresses (just puts them @@ -96,10 +99,14 @@ EXPORT_SYMBOL_GPL(kvmppc_ioba_validate); */ long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) { - unsigned long page_mask = ~((1ULL << stt->page_shift) - 1); - unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ); + unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); + enum dma_data_direction dir = iommu_tce_direction(tce); + + /* Allow userspace to poison TCE table */ + if (dir == DMA_NONE) + return H_SUCCESS; - if (tce & mask) + if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_PARAMETER; return H_SUCCESS; @@ -179,15 +186,122 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry) +{ + unsigned long hpa = 0; + enum dma_data_direction dir = DMA_NONE; + + iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); +} + +static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, + struct iommu_table *tbl, unsigned long entry) +{ + struct mm_iommu_table_group_mem_t *mem = NULL; + const unsigned long pgsize = 1ULL << tbl->it_page_shift; + unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + + if (!pua) + /* it_userspace allocation might be delayed */ + return H_TOO_HARD; + + pua = (void *) vmalloc_to_phys(pua); + if (WARN_ON_ONCE_RM(!pua)) + return H_HARDWARE; + + mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize); + if (!mem) + return H_TOO_HARD; + + mm_iommu_mapped_dec(mem); + + *pua = 0; + + return H_SUCCESS; +} + +static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm, + struct iommu_table *tbl, unsigned long entry) +{ + enum dma_data_direction dir = DMA_NONE; + unsigned long hpa = 0; + long ret; + + if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir)) + /* + * real mode xchg can fail if struct page crosses + * a page boundary + */ + return H_TOO_HARD; + + if (dir == DMA_NONE) + return H_SUCCESS; + + ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); + if (ret) + iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); + + return ret; +} + +static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, + unsigned long entry, unsigned long ua, + enum dma_data_direction dir) +{ + long ret; + unsigned long hpa = 0; + unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); + struct mm_iommu_table_group_mem_t *mem; + + if (!pua) + /* it_userspace allocation might be delayed */ + return H_TOO_HARD; + + mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift); + if (!mem) + return H_TOO_HARD; + + if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa))) + return H_HARDWARE; + + pua = (void *) vmalloc_to_phys(pua); + if (WARN_ON_ONCE_RM(!pua)) + return H_HARDWARE; + + if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) + return H_CLOSED; + + ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); + if (ret) { + mm_iommu_mapped_dec(mem); + /* + * real mode xchg can fail if struct page crosses + * a page boundary + */ + return H_TOO_HARD; + } + + if (dir != DMA_NONE) + kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); + + *pua = ua; + + return 0; +} + long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce) { - struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); + struct kvmppc_spapr_tce_table *stt; long ret; + struct kvmppc_spapr_tce_iommu_table *stit; + unsigned long entry, ua = 0; + enum dma_data_direction dir; /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ /* liobn, ioba, tce); */ + stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; @@ -199,7 +313,32 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (ret != H_SUCCESS) return ret; - kvmppc_tce_put(stt, ioba >> stt->page_shift, tce); + dir = iommu_tce_direction(tce); + if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, + tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) + return H_PARAMETER; + + entry = ioba >> stt->page_shift; + + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { + if (dir == DMA_NONE) + ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, + stit->tbl, entry); + else + ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, + stit->tbl, entry, ua, dir); + + if (ret == H_SUCCESS) + continue; + + if (ret == H_TOO_HARD) + return ret; + + WARN_ON_ONCE_RM(1); + kvmppc_rm_clear_tce(stit->tbl, entry); + } + + kvmppc_tce_put(stt, entry, tce); return H_SUCCESS; } @@ -239,8 +378,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, long i, ret = H_SUCCESS; unsigned long tces, entry, ua = 0; unsigned long *rmap = NULL; + bool prereg = false; + struct kvmppc_spapr_tce_iommu_table *stit; - stt = kvmppc_find_table(vcpu, liobn); + stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; @@ -259,23 +400,49 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (ret != H_SUCCESS) return ret; - if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) - return H_TOO_HARD; + if (mm_iommu_preregistered(vcpu->kvm->mm)) { + /* + * We get here if guest memory was pre-registered which + * is normally VFIO case and gpa->hpa translation does not + * depend on hpt. + */ + struct mm_iommu_table_group_mem_t *mem; - rmap = (void *) vmalloc_to_phys(rmap); + if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) + return H_TOO_HARD; - /* - * Synchronize with the MMU notifier callbacks in - * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.). - * While we have the rmap lock, code running on other CPUs - * cannot finish unmapping the host real page that backs - * this guest real page, so we are OK to access the host - * real page. - */ - lock_rmap(rmap); - if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) { - ret = H_TOO_HARD; - goto unlock_exit; + mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); + if (mem) + prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0; + } + + if (!prereg) { + /* + * This is usually a case of a guest with emulated devices only + * when TCE list is not in preregistered memory. + * We do not require memory to be preregistered in this case + * so lock rmap and do __find_linux_pte_or_hugepte(). + */ + if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) + return H_TOO_HARD; + + rmap = (void *) vmalloc_to_phys(rmap); + if (WARN_ON_ONCE_RM(!rmap)) + return H_HARDWARE; + + /* + * Synchronize with the MMU notifier callbacks in + * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.). + * While we have the rmap lock, code running on other CPUs + * cannot finish unmapping the host real page that backs + * this guest real page, so we are OK to access the host + * real page. + */ + lock_rmap(rmap); + if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) { + ret = H_TOO_HARD; + goto unlock_exit; + } } for (i = 0; i < npages; ++i) { @@ -285,11 +452,33 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (ret != H_SUCCESS) goto unlock_exit; + ua = 0; + if (kvmppc_gpa_to_ua(vcpu->kvm, + tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), + &ua, NULL)) + return H_PARAMETER; + + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { + ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, + stit->tbl, entry + i, ua, + iommu_tce_direction(tce)); + + if (ret == H_SUCCESS) + continue; + + if (ret == H_TOO_HARD) + goto unlock_exit; + + WARN_ON_ONCE_RM(1); + kvmppc_rm_clear_tce(stit->tbl, entry); + } + kvmppc_tce_put(stt, entry + i, tce); } unlock_exit: - unlock_rmap(rmap); + if (rmap) + unlock_rmap(rmap); return ret; } @@ -300,8 +489,9 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, { struct kvmppc_spapr_tce_table *stt; long i, ret; + struct kvmppc_spapr_tce_iommu_table *stit; - stt = kvmppc_find_table(vcpu, liobn); + stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; @@ -313,6 +503,24 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) return H_PARAMETER; + list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { + unsigned long entry = ioba >> stit->tbl->it_page_shift; + + for (i = 0; i < npages; ++i) { + ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, + stit->tbl, entry + i); + + if (ret == H_SUCCESS) + continue; + + if (ret == H_TOO_HARD) + return ret; + + WARN_ON_ONCE_RM(1); + kvmppc_rm_clear_tce(stit->tbl, entry); + } + } + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); @@ -322,12 +530,13 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba) { - struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); + struct kvmppc_spapr_tce_table *stt; long ret; unsigned long idx; struct page *page; u64 *tbl; + stt = kvmppc_find_table(vcpu->kvm, liobn); if (!stt) return H_TOO_HARD; diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 8359752b3efc..68d68983948e 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -503,10 +503,18 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) break; unprivileged: default: - printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); -#ifndef DEBUG_SPR - emulated = EMULATE_FAIL; -#endif + pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn); + if (sprn & 0x10) { + if (kvmppc_get_msr(vcpu) & MSR_PR) { + kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); + emulated = EMULATE_AGAIN; + } + } else { + if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) { + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + emulated = EMULATE_AGAIN; + } + } break; } @@ -648,10 +656,20 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val break; default: unprivileged: - printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); -#ifndef DEBUG_SPR - emulated = EMULATE_FAIL; -#endif + pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn); + if (sprn & 0x10) { + if (kvmppc_get_msr(vcpu) & MSR_PR) { + kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); + emulated = EMULATE_AGAIN; + } + } else { + if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 || + sprn == 4 || sprn == 5 || sprn == 6) { + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + emulated = EMULATE_AGAIN; + } + } + break; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index fadb75abfe37..42b7a4fd57d9 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -67,6 +67,7 @@ #include <asm/mmu.h> #include <asm/opal.h> #include <asm/xics.h> +#include <asm/xive.h> #include "book3s.h" @@ -837,6 +838,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) case H_IPOLL: case H_XIRR_X: if (kvmppc_xics_enabled(vcpu)) { + if (xive_enabled()) { + ret = H_NOT_AVAILABLE; + return RESUME_GUEST; + } ret = kvmppc_xics_hcall(vcpu, req); break; } @@ -2947,8 +2952,12 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) r = kvmppc_book3s_hv_page_fault(run, vcpu, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); - } else if (r == RESUME_PASSTHROUGH) - r = kvmppc_xics_rm_complete(vcpu, 0); + } else if (r == RESUME_PASSTHROUGH) { + if (WARN_ON(xive_enabled())) + r = H_SUCCESS; + else + r = kvmppc_xics_rm_complete(vcpu, 0); + } } while (is_kvmppc_resume_guest(r)); out: @@ -3400,10 +3409,20 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) /* * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed) * Set HVICE bit to enable hypervisor virtualization interrupts. + * Set HEIC to prevent OS interrupts to go to hypervisor (should + * be unnecessary but better safe than sorry in case we re-enable + * EE in HV mode with this LPCR still set) */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { lpcr &= ~LPCR_VPM0; - lpcr |= LPCR_HVICE; + lpcr |= LPCR_HVICE | LPCR_HEIC; + + /* + * If xive is enabled, we route 0x500 interrupts directly + * to the guest. + */ + if (xive_enabled()) + lpcr |= LPCR_LPES; } /* @@ -3533,7 +3552,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) struct kvmppc_irq_map *irq_map; struct kvmppc_passthru_irqmap *pimap; struct irq_chip *chip; - int i; + int i, rc = 0; if (!kvm_irq_bypass) return 1; @@ -3558,10 +3577,10 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) /* * For now, we only support interrupts for which the EOI operation * is an OPAL call followed by a write to XIRR, since that's - * what our real-mode EOI code does. + * what our real-mode EOI code does, or a XIVE interrupt */ chip = irq_data_get_irq_chip(&desc->irq_data); - if (!chip || !is_pnv_opal_msi(chip)) { + if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) { pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n", host_irq, guest_gsi); mutex_unlock(&kvm->lock); @@ -3603,7 +3622,12 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) if (i == pimap->n_mapped) pimap->n_mapped++; - kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq); + if (xive_enabled()) + rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc); + else + kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq); + if (rc) + irq_map->r_hwirq = 0; mutex_unlock(&kvm->lock); @@ -3614,7 +3638,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) { struct irq_desc *desc; struct kvmppc_passthru_irqmap *pimap; - int i; + int i, rc = 0; if (!kvm_irq_bypass) return 0; @@ -3624,11 +3648,9 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) return -EIO; mutex_lock(&kvm->lock); + if (!kvm->arch.pimap) + goto unlock; - if (kvm->arch.pimap == NULL) { - mutex_unlock(&kvm->lock); - return 0; - } pimap = kvm->arch.pimap; for (i = 0; i < pimap->n_mapped; i++) { @@ -3641,18 +3663,21 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) return -ENODEV; } - kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); + if (xive_enabled()) + rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc); + else + kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); - /* invalidate the entry */ + /* invalidate the entry (what do do on error from the above ?) */ pimap->mapped[i].r_hwirq = 0; /* * We don't free this structure even when the count goes to * zero. The structure is freed when we destroy the VM. */ - + unlock: mutex_unlock(&kvm->lock); - return 0; + return rc; } static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons, @@ -3930,7 +3955,7 @@ static int kvmppc_book3s_init_hv(void) * indirectly, via OPAL. */ #ifdef CONFIG_SMP - if (!get_paca()->kvm_hstate.xics_phys) { + if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 9c71c72e65ce..88a65923c649 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -32,6 +32,24 @@ #define KVM_CMA_CHUNK_ORDER 18 +#include "book3s_xics.h" +#include "book3s_xive.h" + +/* + * The XIVE module will populate these when it loads + */ +unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); +unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); +int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr); +int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); +int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); +EXPORT_SYMBOL_GPL(__xive_vm_h_xirr); +EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll); +EXPORT_SYMBOL_GPL(__xive_vm_h_ipi); +EXPORT_SYMBOL_GPL(__xive_vm_h_cppr); +EXPORT_SYMBOL_GPL(__xive_vm_h_eoi); + /* * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) * should be power of 2. @@ -211,6 +229,7 @@ void kvmhv_rm_send_ipi(int cpu) __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); return; } + /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ if (cpu_has_feature(CPU_FTR_ARCH_207S) && cpu_first_thread_sibling(cpu) == @@ -407,6 +426,9 @@ static long kvmppc_read_one_intr(bool *again) u8 host_ipi; int64_t rc; + if (xive_enabled()) + return 1; + /* see if a host IPI is pending */ host_ipi = local_paca->kvm_hstate.host_ipi; if (host_ipi) @@ -491,3 +513,84 @@ static long kvmppc_read_one_intr(bool *again) return kvmppc_check_passthru(xisr, xirr, again); } + +#ifdef CONFIG_KVM_XICS +static inline bool is_rm(void) +{ + return !(mfmsr() & MSR_DR); +} + +unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) +{ + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_xirr(vcpu); + if (unlikely(!__xive_vm_h_xirr)) + return H_NOT_AVAILABLE; + return __xive_vm_h_xirr(vcpu); + } else + return xics_rm_h_xirr(vcpu); +} + +unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) +{ + vcpu->arch.gpr[5] = get_tb(); + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_xirr(vcpu); + if (unlikely(!__xive_vm_h_xirr)) + return H_NOT_AVAILABLE; + return __xive_vm_h_xirr(vcpu); + } else + return xics_rm_h_xirr(vcpu); +} + +unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) +{ + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_ipoll(vcpu, server); + if (unlikely(!__xive_vm_h_ipoll)) + return H_NOT_AVAILABLE; + return __xive_vm_h_ipoll(vcpu, server); + } else + return H_TOO_HARD; +} + +int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr) +{ + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_ipi(vcpu, server, mfrr); + if (unlikely(!__xive_vm_h_ipi)) + return H_NOT_AVAILABLE; + return __xive_vm_h_ipi(vcpu, server, mfrr); + } else + return xics_rm_h_ipi(vcpu, server, mfrr); +} + +int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) +{ + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_cppr(vcpu, cppr); + if (unlikely(!__xive_vm_h_cppr)) + return H_NOT_AVAILABLE; + return __xive_vm_h_cppr(vcpu, cppr); + } else + return xics_rm_h_cppr(vcpu, cppr); +} + +int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) +{ + if (xive_enabled()) { + if (is_rm()) + return xive_rm_h_eoi(vcpu, xirr); + if (unlikely(!__xive_vm_h_eoi)) + return H_NOT_AVAILABLE; + return __xive_vm_h_eoi(vcpu, xirr); + } else + return xics_rm_h_eoi(vcpu, xirr); +} +#endif /* CONFIG_KVM_XICS */ diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index ffde4507ddfd..2a862618f072 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -484,7 +484,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, } -unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) +unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; @@ -522,8 +522,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) return check_too_hard(xics, icp); } -int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, - unsigned long mfrr) +int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; @@ -609,7 +609,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, return check_too_hard(xics, this_icp); } -int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) +int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) { union kvmppc_icp_state old_state, new_state; struct kvmppc_xics *xics = vcpu->kvm->arch.xics; @@ -729,7 +729,7 @@ static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq) return check_too_hard(xics, icp); } -int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) +int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) { struct kvmppc_xics *xics = vcpu->kvm->arch.xics; struct kvmppc_icp *icp = vcpu->arch.icp; diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c new file mode 100644 index 000000000000..abf5f01b6eb1 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_rm_xive.c @@ -0,0 +1,47 @@ +#include <linux/kernel.h> +#include <linux/kvm_host.h> +#include <linux/err.h> +#include <linux/kernel_stat.h> + +#include <asm/kvm_book3s.h> +#include <asm/kvm_ppc.h> +#include <asm/hvcall.h> +#include <asm/xics.h> +#include <asm/debug.h> +#include <asm/synch.h> +#include <asm/cputhreads.h> +#include <asm/pgtable.h> +#include <asm/ppc-opcode.h> +#include <asm/pnv-pci.h> +#include <asm/opal.h> +#include <asm/smp.h> +#include <asm/asm-prototypes.h> +#include <asm/xive.h> +#include <asm/xive-regs.h> + +#include "book3s_xive.h" + +/* XXX */ +#include <asm/udbg.h> +//#define DBG(fmt...) udbg_printf(fmt) +#define DBG(fmt...) do { } while(0) + +static inline void __iomem *get_tima_phys(void) +{ + return local_paca->kvm_hstate.xive_tima_phys; +} + +#undef XIVE_RUNTIME_CHECKS +#define X_PFX xive_rm_ +#define X_STATIC +#define X_STAT_PFX stat_rm_ +#define __x_tima get_tima_phys() +#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page)) +#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page)) +#define __x_readb __raw_rm_readb +#define __x_writeb __raw_rm_writeb +#define __x_readw __raw_rm_readw +#define __x_readq __raw_rm_readq +#define __x_writeq __raw_rm_writeq + +#include "book3s_xive_template.c" diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 7c6477d1840a..bdb3f76ceb6b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -30,6 +30,7 @@ #include <asm/book3s/64/mmu-hash.h> #include <asm/tm.h> #include <asm/opal.h> +#include <asm/xive-regs.h> #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) @@ -970,6 +971,23 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) cmpwi r3, 512 /* 1 microsecond */ blt hdec_soon +#ifdef CONFIG_KVM_XICS + /* We are entering the guest on that thread, push VCPU to XIVE */ + ld r10, HSTATE_XIVE_TIMA_PHYS(r13) + cmpldi cr0, r10, r0 + beq no_xive + ld r11, VCPU_XIVE_SAVED_STATE(r4) + li r9, TM_QW1_OS + stdcix r11,r9,r10 + eieio + lwz r11, VCPU_XIVE_CAM_WORD(r4) + li r9, TM_QW1_OS + TM_WORD2 + stwcix r11,r9,r10 + li r9, 1 + stw r9, VCPU_XIVE_PUSHED(r4) +no_xive: +#endif /* CONFIG_KVM_XICS */ + deliver_guest_interrupt: ld r6, VCPU_CTR(r4) ld r7, VCPU_XER(r4) @@ -1307,6 +1325,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) blt deliver_guest_interrupt guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ +#ifdef CONFIG_KVM_XICS + /* We are exiting, pull the VP from the XIVE */ + lwz r0, VCPU_XIVE_PUSHED(r9) + cmpwi cr0, r0, 0 + beq 1f + li r7, TM_SPC_PULL_OS_CTX + li r6, TM_QW1_OS + mfmsr r0 + andi. r0, r0, MSR_IR /* in real mode? */ + beq 2f + ld r10, HSTATE_XIVE_TIMA_VIRT(r13) + cmpldi cr0, r10, 0 + beq 1f + /* First load to pull the context, we ignore the value */ + lwzx r11, r7, r10 + eieio + /* Second load to recover the context state (Words 0 and 1) */ + ldx r11, r6, r10 + b 3f +2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) + cmpldi cr0, r10, 0 + beq 1f + /* First load to pull the context, we ignore the value */ + lwzcix r11, r7, r10 + eieio + /* Second load to recover the context state (Words 0 and 1) */ + ldcix r11, r6, r10 +3: std r11, VCPU_XIVE_SAVED_STATE(r9) + /* Fixup some of the state for the next load */ + li r10, 0 + li r0, 0xff + stw r10, VCPU_XIVE_PUSHED(r9) + stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) + stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) +1: +#endif /* CONFIG_KVM_XICS */ /* Save more register state */ mfdar r6 mfdsisr r7 @@ -2011,7 +2065,7 @@ hcall_real_table: .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table - .long 0 /* 0x70 - H_IPOLL */ + .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table #else .long 0 /* 0x64 - H_EOI */ @@ -2181,7 +2235,11 @@ hcall_real_table: .long 0 /* 0x2f0 */ .long 0 /* 0x2f4 */ .long 0 /* 0x2f8 */ - .long 0 /* 0x2fc */ +#ifdef CONFIG_KVM_XICS + .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table +#else + .long 0 /* 0x2fc - H_XIRR_X*/ +#endif .long DOTSYM(kvmppc_h_random) - hcall_real_table .globl hcall_real_table_end hcall_real_table_end: diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d4dfc0ca2a44..69a09444d46e 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -349,7 +349,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) if (msr & MSR_POW) { if (!vcpu->arch.pending_exceptions) { kvm_vcpu_block(vcpu); - clear_bit(KVM_REQ_UNHALT, &vcpu->requests); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); vcpu->stat.halt_wakeup++; /* Unset POW bit after we woke up */ @@ -537,8 +537,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, int r = RESUME_GUEST; int relocated; int page_found = 0; - struct kvmppc_pte pte; - bool is_mmio = false; + struct kvmppc_pte pte = { 0 }; bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; u64 vsid; @@ -616,8 +615,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, /* Page not found in guest SLB */ kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); - } else if (!is_mmio && - kvmppc_visible_gpa(vcpu, pte.raddr)) { + } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) { if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { /* * There is already a host HPTE there, presumably @@ -627,7 +625,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_mmu_unmap_page(vcpu, &pte); } /* The guest's PTE is not mapped yet. Map on the host */ - kvmppc_mmu_map_page(vcpu, &pte, iswrite); + if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { + /* Exit KVM if mapping failed */ + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + return RESUME_HOST; + } if (data) vcpu->stat.sp_storage++; else if (vcpu->arch.mmu.is_dcbz32(vcpu) && diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index f102616febc7..bcbeeb62dd13 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c @@ -344,7 +344,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) case H_CEDE: kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); kvm_vcpu_block(vcpu); - clear_bit(KVM_REQ_UNHALT, &vcpu->requests); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); vcpu->stat.halt_wakeup++; return EMULATE_DONE; case H_LOGICAL_CI_LOAD: diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 20528701835b..2d3b2b1cc272 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -16,6 +16,7 @@ #include <asm/kvm_ppc.h> #include <asm/hvcall.h> #include <asm/rtas.h> +#include <asm/xive.h> #ifdef CONFIG_KVM_XICS static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) @@ -32,7 +33,10 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) server = be32_to_cpu(args->args[1]); priority = be32_to_cpu(args->args[2]); - rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); + if (xive_enabled()) + rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority); + else + rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); if (rc) rc = -3; out: @@ -52,7 +56,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) irq = be32_to_cpu(args->args[0]); server = priority = 0; - rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); + if (xive_enabled()) + rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority); + else + rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); if (rc) { rc = -3; goto out; @@ -76,7 +83,10 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) irq = be32_to_cpu(args->args[0]); - rc = kvmppc_xics_int_off(vcpu->kvm, irq); + if (xive_enabled()) + rc = kvmppc_xive_int_off(vcpu->kvm, irq); + else + rc = kvmppc_xics_int_off(vcpu->kvm, irq); if (rc) rc = -3; out: @@ -95,7 +105,10 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) irq = be32_to_cpu(args->args[0]); - rc = kvmppc_xics_int_on(vcpu->kvm, irq); + if (xive_enabled()) + rc = kvmppc_xive_int_on(vcpu->kvm, irq); + else + rc = kvmppc_xics_int_on(vcpu->kvm, irq); if (rc) rc = -3; out: diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index 459b72cb617a..d329b2add7e2 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -1306,8 +1306,8 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr) return 0; } -int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, - bool line_status) +int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, + bool line_status) { struct kvmppc_xics *xics = kvm->arch.xics; @@ -1316,14 +1316,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, return ics_deliver_irq(xics, irq, level); } -int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry, - struct kvm *kvm, int irq_source_id, - int level, bool line_status) -{ - return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi, - level, line_status); -} - static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { struct kvmppc_xics *xics = dev->private; @@ -1457,29 +1449,6 @@ void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; } -static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e, - struct kvm *kvm, int irq_source_id, int level, - bool line_status) -{ - return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status); -} - -int kvm_irq_map_gsi(struct kvm *kvm, - struct kvm_kernel_irq_routing_entry *entries, int gsi) -{ - entries->gsi = gsi; - entries->type = KVM_IRQ_ROUTING_IRQCHIP; - entries->set = xics_set_irq; - entries->irqchip.irqchip = 0; - entries->irqchip.pin = gsi; - return 1; -} - -int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) -{ - return pin; -} - void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq, unsigned long host_irq) { diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h index ec5474cf70c6..453c9e518c19 100644 --- a/arch/powerpc/kvm/book3s_xics.h +++ b/arch/powerpc/kvm/book3s_xics.h @@ -10,6 +10,7 @@ #ifndef _KVM_PPC_BOOK3S_XICS_H #define _KVM_PPC_BOOK3S_XICS_H +#ifdef CONFIG_KVM_XICS /* * We use a two-level tree to store interrupt source information. * There are up to 1024 ICS nodes, each of which can represent @@ -144,5 +145,11 @@ static inline struct kvmppc_ics *kvmppc_xics_find_ics(struct kvmppc_xics *xics, return ics; } +extern unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu); +extern int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr); +extern int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); +extern int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); +#endif /* CONFIG_KVM_XICS */ #endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c new file mode 100644 index 000000000000..ffe1da95033a --- /dev/null +++ b/arch/powerpc/kvm/book3s_xive.c @@ -0,0 +1,1894 @@ +/* + * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "xive-kvm: " fmt + +#include <linux/kernel.h> +#include <linux/kvm_host.h> +#include <linux/err.h> +#include <linux/gfp.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/percpu.h> +#include <linux/cpumask.h> +#include <asm/uaccess.h> +#include <asm/kvm_book3s.h> +#include <asm/kvm_ppc.h> +#include <asm/hvcall.h> +#include <asm/xics.h> +#include <asm/xive.h> +#include <asm/xive-regs.h> +#include <asm/debug.h> +#include <asm/debugfs.h> +#include <asm/time.h> +#include <asm/opal.h> + +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +#include "book3s_xive.h" + + +/* + * Virtual mode variants of the hcalls for use on radix/radix + * with AIL. They require the VCPU's VP to be "pushed" + * + * We still instanciate them here because we use some of the + * generated utility functions as well in this file. + */ +#define XIVE_RUNTIME_CHECKS +#define X_PFX xive_vm_ +#define X_STATIC static +#define X_STAT_PFX stat_vm_ +#define __x_tima xive_tima +#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) +#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) +#define __x_readb __raw_readb +#define __x_writeb __raw_writeb +#define __x_readw __raw_readw +#define __x_readq __raw_readq +#define __x_writeq __raw_writeq + +#include "book3s_xive_template.c" + +/* + * We leave a gap of a couple of interrupts in the queue to + * account for the IPI and additional safety guard. + */ +#define XIVE_Q_GAP 2 + +/* + * This is a simple trigger for a generic XIVE IRQ. This must + * only be called for interrupts that support a trigger page + */ +static bool xive_irq_trigger(struct xive_irq_data *xd) +{ + /* This should be only for MSIs */ + if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) + return false; + + /* Those interrupts should always have a trigger page */ + if (WARN_ON(!xd->trig_mmio)) + return false; + + out_be64(xd->trig_mmio, 0); + + return true; +} + +static irqreturn_t xive_esc_irq(int irq, void *data) +{ + struct kvm_vcpu *vcpu = data; + + /* We use the existing H_PROD mechanism to wake up the target */ + vcpu->arch.prodded = 1; + smp_mb(); + if (vcpu->arch.ceded) + kvmppc_fast_vcpu_kick(vcpu); + + return IRQ_HANDLED; +} + +static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct xive_q *q = &xc->queues[prio]; + char *name = NULL; + int rc; + + /* Already there ? */ + if (xc->esc_virq[prio]) + return 0; + + /* Hook up the escalation interrupt */ + xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); + if (!xc->esc_virq[prio]) { + pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n", + prio, xc->server_num); + return -EIO; + } + + /* + * Future improvement: start with them disabled + * and handle DD2 and later scheme of merged escalation + * interrupts + */ + name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d", + vcpu->kvm->arch.lpid, xc->server_num, prio); + if (!name) { + pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n", + prio, xc->server_num); + rc = -ENOMEM; + goto error; + } + rc = request_irq(xc->esc_virq[prio], xive_esc_irq, + IRQF_NO_THREAD, name, vcpu); + if (rc) { + pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n", + prio, xc->server_num); + goto error; + } + xc->esc_virq_names[prio] = name; + return 0; +error: + irq_dispose_mapping(xc->esc_virq[prio]); + xc->esc_virq[prio] = 0; + kfree(name); + return rc; +} + +static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvmppc_xive *xive = xc->xive; + struct xive_q *q = &xc->queues[prio]; + void *qpage; + int rc; + + if (WARN_ON(q->qpage)) + return 0; + + /* Allocate the queue and retrieve infos on current node for now */ + qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order); + if (!qpage) { + pr_err("Failed to allocate queue %d for VCPU %d\n", + prio, xc->server_num); + return -ENOMEM;; + } + memset(qpage, 0, 1 << xive->q_order); + + /* + * Reconfigure the queue. This will set q->qpage only once the + * queue is fully configured. This is a requirement for prio 0 + * as we will stop doing EOIs for every IPI as soon as we observe + * qpage being non-NULL, and instead will only EOI when we receive + * corresponding queue 0 entries + */ + rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, + xive->q_order, true); + if (rc) + pr_err("Failed to configure queue %d for VCPU %d\n", + prio, xc->server_num); + return rc; +} + +/* Called with kvm_lock held */ +static int xive_check_provisioning(struct kvm *kvm, u8 prio) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvm_vcpu *vcpu; + int i, rc; + + lockdep_assert_held(&kvm->lock); + + /* Already provisioned ? */ + if (xive->qmap & (1 << prio)) + return 0; + + pr_devel("Provisioning prio... %d\n", prio); + + /* Provision each VCPU and enable escalations */ + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!vcpu->arch.xive_vcpu) + continue; + rc = xive_provision_queue(vcpu, prio); + if (rc == 0) + xive_attach_escalation(vcpu, prio); + if (rc) + return rc; + } + + /* Order previous stores and mark it as provisioned */ + mb(); + xive->qmap |= (1 << prio); + return 0; +} + +static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio) +{ + struct kvm_vcpu *vcpu; + struct kvmppc_xive_vcpu *xc; + struct xive_q *q; + + /* Locate target server */ + vcpu = kvmppc_xive_find_server(kvm, server); + if (!vcpu) { + pr_warn("%s: Can't find server %d\n", __func__, server); + return; + } + xc = vcpu->arch.xive_vcpu; + if (WARN_ON(!xc)) + return; + + q = &xc->queues[prio]; + atomic_inc(&q->pending_count); +} + +static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct xive_q *q; + u32 max; + + if (WARN_ON(!xc)) + return -ENXIO; + if (!xc->valid) + return -ENXIO; + + q = &xc->queues[prio]; + if (WARN_ON(!q->qpage)) + return -ENXIO; + + /* Calculate max number of interrupts in that queue. */ + max = (q->msk + 1) - XIVE_Q_GAP; + return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; +} + +static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio) +{ + struct kvm_vcpu *vcpu; + int i, rc; + + /* Locate target server */ + vcpu = kvmppc_xive_find_server(kvm, *server); + if (!vcpu) { + pr_devel("Can't find server %d\n", *server); + return -EINVAL; + } + + pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); + + /* Try pick it */ + rc = xive_try_pick_queue(vcpu, prio); + if (rc == 0) + return rc; + + pr_devel(" .. failed, looking up candidate...\n"); + + /* Failed, pick another VCPU */ + kvm_for_each_vcpu(i, vcpu, kvm) { + if (!vcpu->arch.xive_vcpu) + continue; + rc = xive_try_pick_queue(vcpu, prio); + if (rc == 0) { + *server = vcpu->arch.xive_vcpu->server_num; + pr_devel(" found on 0x%x/%d\n", *server, prio); + return rc; + } + } + pr_devel(" no available target !\n"); + + /* No available target ! */ + return -EBUSY; +} + +static u8 xive_lock_and_mask(struct kvmppc_xive *xive, + struct kvmppc_xive_src_block *sb, + struct kvmppc_xive_irq_state *state) +{ + struct xive_irq_data *xd; + u32 hw_num; + u8 old_prio; + u64 val; + + /* + * Take the lock, set masked, try again if racing + * with H_EOI + */ + for (;;) { + arch_spin_lock(&sb->lock); + old_prio = state->guest_priority; + state->guest_priority = MASKED; + mb(); + if (!state->in_eoi) + break; + state->guest_priority = old_prio; + arch_spin_unlock(&sb->lock); + } + + /* No change ? Bail */ + if (old_prio == MASKED) + return old_prio; + + /* Get the right irq */ + kvmppc_xive_select_irq(state, &hw_num, &xd); + + /* + * If the interrupt is marked as needing masking via + * firmware, we do it here. Firmware masking however + * is "lossy", it won't return the old p and q bits + * and won't set the interrupt to a state where it will + * record queued ones. If this is an issue we should do + * lazy masking instead. + * + * For now, we work around this in unmask by forcing + * an interrupt whenever we unmask a non-LSI via FW + * (if ever). + */ + if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { + xive_native_configure_irq(hw_num, + xive->vp_base + state->act_server, + MASKED, state->number); + /* set old_p so we can track if an H_EOI was done */ + state->old_p = true; + state->old_q = false; + } else { + /* Set PQ to 10, return old P and old Q and remember them */ + val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10); + state->old_p = !!(val & 2); + state->old_q = !!(val & 1); + + /* + * Synchronize hardware to sensure the queues are updated + * when masking + */ + xive_native_sync_source(hw_num); + } + + return old_prio; +} + +static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb, + struct kvmppc_xive_irq_state *state) +{ + /* + * Take the lock try again if racing with H_EOI + */ + for (;;) { + arch_spin_lock(&sb->lock); + if (!state->in_eoi) + break; + arch_spin_unlock(&sb->lock); + } +} + +static void xive_finish_unmask(struct kvmppc_xive *xive, + struct kvmppc_xive_src_block *sb, + struct kvmppc_xive_irq_state *state, + u8 prio) +{ + struct xive_irq_data *xd; + u32 hw_num; + + /* If we aren't changing a thing, move on */ + if (state->guest_priority != MASKED) + goto bail; + + /* Get the right irq */ + kvmppc_xive_select_irq(state, &hw_num, &xd); + + /* + * See command in xive_lock_and_mask() concerning masking + * via firmware. + */ + if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { + xive_native_configure_irq(hw_num, + xive->vp_base + state->act_server, + state->act_priority, state->number); + /* If an EOI is needed, do it here */ + if (!state->old_p) + xive_vm_source_eoi(hw_num, xd); + /* If this is not an LSI, force a trigger */ + if (!(xd->flags & OPAL_XIVE_IRQ_LSI)) + xive_irq_trigger(xd); + goto bail; + } + + /* Old Q set, set PQ to 11 */ + if (state->old_q) + xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); + + /* + * If not old P, then perform an "effective" EOI, + * on the source. This will handle the cases where + * FW EOI is needed. + */ + if (!state->old_p) + xive_vm_source_eoi(hw_num, xd); + + /* Synchronize ordering and mark unmasked */ + mb(); +bail: + state->guest_priority = prio; +} + +/* + * Target an interrupt to a given server/prio, this will fallback + * to another server if necessary and perform the HW targetting + * updates as needed + * + * NOTE: Must be called with the state lock held + */ +static int xive_target_interrupt(struct kvm *kvm, + struct kvmppc_xive_irq_state *state, + u32 server, u8 prio) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + u32 hw_num; + int rc; + + /* + * This will return a tentative server and actual + * priority. The count for that new target will have + * already been incremented. + */ + rc = xive_select_target(kvm, &server, prio); + + /* + * We failed to find a target ? Not much we can do + * at least until we support the GIQ. + */ + if (rc) + return rc; + + /* + * Increment the old queue pending count if there + * was one so that the old queue count gets adjusted later + * when observed to be empty. + */ + if (state->act_priority != MASKED) + xive_inc_q_pending(kvm, + state->act_server, + state->act_priority); + /* + * Update state and HW + */ + state->act_priority = prio; + state->act_server = server; + + /* Get the right irq */ + kvmppc_xive_select_irq(state, &hw_num, NULL); + + return xive_native_configure_irq(hw_num, + xive->vp_base + server, + prio, state->number); +} + +/* + * Targetting rules: In order to avoid losing track of + * pending interrupts accross mask and unmask, which would + * allow queue overflows, we implement the following rules: + * + * - Unless it was never enabled (or we run out of capacity) + * an interrupt is always targetted at a valid server/queue + * pair even when "masked" by the guest. This pair tends to + * be the last one used but it can be changed under some + * circumstances. That allows us to separate targetting + * from masking, we only handle accounting during (re)targetting, + * this also allows us to let an interrupt drain into its target + * queue after masking, avoiding complex schemes to remove + * interrupts out of remote processor queues. + * + * - When masking, we set PQ to 10 and save the previous value + * of P and Q. + * + * - When unmasking, if saved Q was set, we set PQ to 11 + * otherwise we leave PQ to the HW state which will be either + * 10 if nothing happened or 11 if the interrupt fired while + * masked. Effectively we are OR'ing the previous Q into the + * HW Q. + * + * Then if saved P is clear, we do an effective EOI (Q->P->Trigger) + * which will unmask the interrupt and shoot a new one if Q was + * set. + * + * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11, + * effectively meaning an H_EOI from the guest is still expected + * for that interrupt). + * + * - If H_EOI occurs while masked, we clear the saved P. + * + * - When changing target, we account on the new target and + * increment a separate "pending" counter on the old one. + * This pending counter will be used to decrement the old + * target's count when its queue has been observed empty. + */ + +int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, + u32 priority) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u8 new_act_prio; + int rc = 0; + u16 idx; + + if (!xive) + return -ENODEV; + + pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n", + irq, server, priority); + + /* First, check provisioning of queues */ + if (priority != MASKED) + rc = xive_check_provisioning(xive->kvm, + xive_prio_from_guest(priority)); + if (rc) { + pr_devel(" provisioning failure %d !\n", rc); + return rc; + } + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + + /* + * We first handle masking/unmasking since the locking + * might need to be retried due to EOIs, we'll handle + * targetting changes later. These functions will return + * with the SB lock held. + * + * xive_lock_and_mask() will also set state->guest_priority + * but won't otherwise change other fields of the state. + * + * xive_lock_for_unmask will not actually unmask, this will + * be done later by xive_finish_unmask() once the targetting + * has been done, so we don't try to unmask an interrupt + * that hasn't yet been targetted. + */ + if (priority == MASKED) + xive_lock_and_mask(xive, sb, state); + else + xive_lock_for_unmask(sb, state); + + + /* + * Then we handle targetting. + * + * First calculate a new "actual priority" + */ + new_act_prio = state->act_priority; + if (priority != MASKED) + new_act_prio = xive_prio_from_guest(priority); + + pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n", + new_act_prio, state->act_server, state->act_priority); + + /* + * Then check if we actually need to change anything, + * + * The condition for re-targetting the interrupt is that + * we have a valid new priority (new_act_prio is not 0xff) + * and either the server or the priority changed. + * + * Note: If act_priority was ff and the new priority is + * also ff, we don't do anything and leave the interrupt + * untargetted. An attempt of doing an int_on on an + * untargetted interrupt will fail. If that is a problem + * we could initialize interrupts with valid default + */ + + if (new_act_prio != MASKED && + (state->act_server != server || + state->act_priority != new_act_prio)) + rc = xive_target_interrupt(kvm, state, server, new_act_prio); + + /* + * Perform the final unmasking of the interrupt source + * if necessary + */ + if (priority != MASKED) + xive_finish_unmask(xive, sb, state, priority); + + /* + * Finally Update saved_priority to match. Only int_on/off + * set this field to a different value. + */ + state->saved_priority = priority; + + arch_spin_unlock(&sb->lock); + return rc; +} + +int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, + u32 *priority) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + if (!xive) + return -ENODEV; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + arch_spin_lock(&sb->lock); + *server = state->guest_server; + *priority = state->guest_priority; + arch_spin_unlock(&sb->lock); + + return 0; +} + +int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + if (!xive) + return -ENODEV; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + + pr_devel("int_on(irq=0x%x)\n", irq); + + /* + * Check if interrupt was not targetted + */ + if (state->act_priority == MASKED) { + pr_devel("int_on on untargetted interrupt\n"); + return -EINVAL; + } + + /* If saved_priority is 0xff, do nothing */ + if (state->saved_priority == MASKED) + return 0; + + /* + * Lock and unmask it. + */ + xive_lock_for_unmask(sb, state); + xive_finish_unmask(xive, sb, state, state->saved_priority); + arch_spin_unlock(&sb->lock); + + return 0; +} + +int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + if (!xive) + return -ENODEV; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + + pr_devel("int_off(irq=0x%x)\n", irq); + + /* + * Lock and mask + */ + state->saved_priority = xive_lock_and_mask(xive, sb, state); + arch_spin_unlock(&sb->lock); + + return 0; +} + +static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return false; + state = &sb->irq_state[idx]; + if (!state->valid) + return false; + + /* + * Trigger the IPI. This assumes we never restore a pass-through + * interrupt which should be safe enough + */ + xive_irq_trigger(&state->ipi_data); + + return true; +} + +u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + if (!xc) + return 0; + + /* Return the per-cpu state for state saving/migration */ + return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | + (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT; +} + +int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; + u8 cppr, mfrr; + u32 xisr; + + if (!xc || !xive) + return -ENOENT; + + /* Grab individual state fields. We don't use pending_pri */ + cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT; + xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) & + KVM_REG_PPC_ICP_XISR_MASK; + mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT; + + pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n", + xc->server_num, cppr, mfrr, xisr); + + /* + * We can't update the state of a "pushed" VCPU, but that + * shouldn't happen. + */ + if (WARN_ON(vcpu->arch.xive_pushed)) + return -EIO; + + /* Update VCPU HW saved state */ + vcpu->arch.xive_saved_state.cppr = cppr; + xc->hw_cppr = xc->cppr = cppr; + + /* + * Update MFRR state. If it's not 0xff, we mark the VCPU as + * having a pending MFRR change, which will re-evaluate the + * target. The VCPU will thus potentially get a spurious + * interrupt but that's not a big deal. + */ + xc->mfrr = mfrr; + if (mfrr < cppr) + xive_irq_trigger(&xc->vp_ipi_data); + + /* + * Now saved XIRR is "interesting". It means there's something in + * the legacy "1 element" queue... for an IPI we simply ignore it, + * as the MFRR restore will handle that. For anything else we need + * to force a resend of the source. + * However the source may not have been setup yet. If that's the + * case, we keep that info and increment a counter in the xive to + * tell subsequent xive_set_source() to go look. + */ + if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) { + xc->delayed_irq = xisr; + xive->delayed_irqs++; + pr_devel(" xisr restore delayed\n"); + } + + return 0; +} + +int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + struct irq_data *host_data = irq_desc_get_irq_data(host_desc); + unsigned int host_irq = irq_desc_get_irq(host_desc); + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data); + u16 idx; + u8 prio; + int rc; + + if (!xive) + return -ENODEV; + + pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq); + + sb = kvmppc_xive_find_source(xive, guest_irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + + /* + * Mark the passed-through interrupt as going to a VCPU, + * this will prevent further EOIs and similar operations + * from the XIVE code. It will also mask the interrupt + * to either PQ=10 or 11 state, the latter if the interrupt + * is pending. This will allow us to unmask or retrigger it + * after routing it to the guest with a simple EOI. + * + * The "state" argument is a "token", all it needs is to be + * non-NULL to switch to passed-through or NULL for the + * other way around. We may not yet have an actual VCPU + * target here and we don't really care. + */ + rc = irq_set_vcpu_affinity(host_irq, state); + if (rc) { + pr_err("Failed to set VCPU affinity for irq %d\n", host_irq); + return rc; + } + + /* + * Mask and read state of IPI. We need to know if its P bit + * is set as that means it's potentially already using a + * queue entry in the target + */ + prio = xive_lock_and_mask(xive, sb, state); + pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio, + state->old_p, state->old_q); + + /* Turn the IPI hard off */ + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); + + /* Grab info about irq */ + state->pt_number = hw_irq; + state->pt_data = irq_data_get_irq_handler_data(host_data); + + /* + * Configure the IRQ to match the existing configuration of + * the IPI if it was already targetted. Otherwise this will + * mask the interrupt in a lossy way (act_priority is 0xff) + * which is fine for a never started interrupt. + */ + xive_native_configure_irq(hw_irq, + xive->vp_base + state->act_server, + state->act_priority, state->number); + + /* + * We do an EOI to enable the interrupt (and retrigger if needed) + * if the guest has the interrupt unmasked and the P bit was *not* + * set in the IPI. If it was set, we know a slot may still be in + * use in the target queue thus we have to wait for a guest + * originated EOI + */ + if (prio != MASKED && !state->old_p) + xive_vm_source_eoi(hw_irq, state->pt_data); + + /* Clear old_p/old_q as they are no longer relevant */ + state->old_p = state->old_q = false; + + /* Restore guest prio (unlocks EOI) */ + mb(); + state->guest_priority = prio; + arch_spin_unlock(&sb->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped); + +int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, + struct irq_desc *host_desc) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + unsigned int host_irq = irq_desc_get_irq(host_desc); + u16 idx; + u8 prio; + int rc; + + if (!xive) + return -ENODEV; + + pr_devel("clr_mapped girq 0x%lx...\n", guest_irq); + + sb = kvmppc_xive_find_source(xive, guest_irq, &idx); + if (!sb) + return -EINVAL; + state = &sb->irq_state[idx]; + + /* + * Mask and read state of IRQ. We need to know if its P bit + * is set as that means it's potentially already using a + * queue entry in the target + */ + prio = xive_lock_and_mask(xive, sb, state); + pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio, + state->old_p, state->old_q); + + /* + * If old_p is set, the interrupt is pending, we switch it to + * PQ=11. This will force a resend in the host so the interrupt + * isn't lost to whatver host driver may pick it up + */ + if (state->old_p) + xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11); + + /* Release the passed-through interrupt to the host */ + rc = irq_set_vcpu_affinity(host_irq, NULL); + if (rc) { + pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq); + return rc; + } + + /* Forget about the IRQ */ + state->pt_number = 0; + state->pt_data = NULL; + + /* Reconfigure the IPI */ + xive_native_configure_irq(state->ipi_number, + xive->vp_base + state->act_server, + state->act_priority, state->number); + + /* + * If old_p is set (we have a queue entry potentially + * occupied) or the interrupt is masked, we set the IPI + * to PQ=10 state. Otherwise we just re-enable it (PQ=00). + */ + if (prio == MASKED || state->old_p) + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10); + else + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00); + + /* Restore guest prio (unlocks EOI) */ + mb(); + state->guest_priority = prio; + arch_spin_unlock(&sb->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped); + +static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvm *kvm = vcpu->kvm; + struct kvmppc_xive *xive = kvm->arch.xive; + int i, j; + + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + + if (!sb) + continue; + for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { + struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; + + if (!state->valid) + continue; + if (state->act_priority == MASKED) + continue; + if (state->act_server != xc->server_num) + continue; + + /* Clean it up */ + arch_spin_lock(&sb->lock); + state->act_priority = MASKED; + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); + if (state->pt_number) { + xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(state->pt_number, 0, MASKED, 0); + } + arch_spin_unlock(&sb->lock); + } + } +} + +void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvmppc_xive *xive = xc->xive; + int i; + + pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); + + /* Ensure no interrupt is still routed to that VP */ + xc->valid = false; + kvmppc_xive_disable_vcpu_interrupts(vcpu); + + /* Mask the VP IPI */ + xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); + + /* Disable the VP */ + xive_native_disable_vp(xc->vp_id); + + /* Free the queues & associated interrupts */ + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { + struct xive_q *q = &xc->queues[i]; + + /* Free the escalation irq */ + if (xc->esc_virq[i]) { + free_irq(xc->esc_virq[i], vcpu); + irq_dispose_mapping(xc->esc_virq[i]); + kfree(xc->esc_virq_names[i]); + } + /* Free the queue */ + xive_native_disable_queue(xc->vp_id, q, i); + if (q->qpage) { + free_pages((unsigned long)q->qpage, + xive->q_page_order); + q->qpage = NULL; + } + } + + /* Free the IPI */ + if (xc->vp_ipi) { + xive_cleanup_irq_data(&xc->vp_ipi_data); + xive_native_free_irq(xc->vp_ipi); + } + /* Free the VP */ + kfree(xc); +} + +int kvmppc_xive_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 cpu) +{ + struct kvmppc_xive *xive = dev->private; + struct kvmppc_xive_vcpu *xc; + int i, r = -EBUSY; + + pr_devel("connect_vcpu(cpu=%d)\n", cpu); + + if (dev->ops != &kvm_xive_ops) { + pr_devel("Wrong ops !\n"); + return -EPERM; + } + if (xive->kvm != vcpu->kvm) + return -EPERM; + if (vcpu->arch.irq_type) + return -EBUSY; + if (kvmppc_xive_find_server(vcpu->kvm, cpu)) { + pr_devel("Duplicate !\n"); + return -EEXIST; + } + if (cpu >= KVM_MAX_VCPUS) { + pr_devel("Out of bounds !\n"); + return -EINVAL; + } + xc = kzalloc(sizeof(*xc), GFP_KERNEL); + if (!xc) + return -ENOMEM; + + /* We need to synchronize with queue provisioning */ + mutex_lock(&vcpu->kvm->lock); + vcpu->arch.xive_vcpu = xc; + xc->xive = xive; + xc->vcpu = vcpu; + xc->server_num = cpu; + xc->vp_id = xive->vp_base + cpu; + xc->mfrr = 0xff; + xc->valid = true; + + r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); + if (r) + goto bail; + + /* Configure VCPU fields for use by assembly push/pull */ + vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); + vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); + + /* Allocate IPI */ + xc->vp_ipi = xive_native_alloc_irq(); + if (!xc->vp_ipi) { + r = -EIO; + goto bail; + } + pr_devel(" IPI=0x%x\n", xc->vp_ipi); + + r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); + if (r) + goto bail; + + /* + * Initialize queues. Initially we set them all for no queueing + * and we enable escalation for queue 0 only which we'll use for + * our mfrr change notifications. If the VCPU is hot-plugged, we + * do handle provisioning however. + */ + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { + struct xive_q *q = &xc->queues[i]; + + /* Is queue already enabled ? Provision it */ + if (xive->qmap & (1 << i)) { + r = xive_provision_queue(vcpu, i); + if (r == 0) + xive_attach_escalation(vcpu, i); + if (r) + goto bail; + } else { + r = xive_native_configure_queue(xc->vp_id, + q, i, NULL, 0, true); + if (r) { + pr_err("Failed to configure queue %d for VCPU %d\n", + i, cpu); + goto bail; + } + } + } + + /* If not done above, attach priority 0 escalation */ + r = xive_attach_escalation(vcpu, 0); + if (r) + goto bail; + + /* Enable the VP */ + r = xive_native_enable_vp(xc->vp_id); + if (r) + goto bail; + + /* Route the IPI */ + r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); + if (!r) + xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); + +bail: + mutex_unlock(&vcpu->kvm->lock); + if (r) { + kvmppc_xive_cleanup_vcpu(vcpu); + return r; + } + + vcpu->arch.irq_type = KVMPPC_IRQ_XICS; + return 0; +} + +/* + * Scanning of queues before/after migration save + */ +static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return; + + state = &sb->irq_state[idx]; + + /* Some sanity checking */ + if (!state->valid) { + pr_err("invalid irq 0x%x in cpu queue!\n", irq); + return; + } + + /* + * If the interrupt is in a queue it should have P set. + * We warn so that gets reported. A backtrace isn't useful + * so no need to use a WARN_ON. + */ + if (!state->saved_p) + pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq); + + /* Set flag */ + state->in_queue = true; +} + +static void xive_pre_save_mask_irq(struct kvmppc_xive *xive, + struct kvmppc_xive_src_block *sb, + u32 irq) +{ + struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; + + if (!state->valid) + return; + + /* Mask and save state, this will also sync HW queues */ + state->saved_scan_prio = xive_lock_and_mask(xive, sb, state); + + /* Transfer P and Q */ + state->saved_p = state->old_p; + state->saved_q = state->old_q; + + /* Unlock */ + arch_spin_unlock(&sb->lock); +} + +static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive, + struct kvmppc_xive_src_block *sb, + u32 irq) +{ + struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; + + if (!state->valid) + return; + + /* + * Lock / exclude EOI (not technically necessary if the + * guest isn't running concurrently. If this becomes a + * performance issue we can probably remove the lock. + */ + xive_lock_for_unmask(sb, state); + + /* Restore mask/prio if it wasn't masked */ + if (state->saved_scan_prio != MASKED) + xive_finish_unmask(xive, sb, state, state->saved_scan_prio); + + /* Unlock */ + arch_spin_unlock(&sb->lock); +} + +static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q) +{ + u32 idx = q->idx; + u32 toggle = q->toggle; + u32 irq; + + do { + irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle); + if (irq > XICS_IPI) + xive_pre_save_set_queued(xive, irq); + } while(irq); +} + +static void xive_pre_save_scan(struct kvmppc_xive *xive) +{ + struct kvm_vcpu *vcpu = NULL; + int i, j; + + /* + * See comment in xive_get_source() about how this + * work. Collect a stable state for all interrupts + */ + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + if (!sb) + continue; + for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) + xive_pre_save_mask_irq(xive, sb, j); + } + + /* Then scan the queues and update the "in_queue" flag */ + kvm_for_each_vcpu(i, vcpu, xive->kvm) { + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + if (!xc) + continue; + for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { + if (xc->queues[i].qpage) + xive_pre_save_queue(xive, &xc->queues[i]); + } + } + + /* Finally restore interrupt states */ + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + if (!sb) + continue; + for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) + xive_pre_save_unmask_irq(xive, sb, j); + } +} + +static void xive_post_save_scan(struct kvmppc_xive *xive) +{ + u32 i, j; + + /* Clear all the in_queue flags */ + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + if (!sb) + continue; + for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) + sb->irq_state[j].in_queue = false; + } + + /* Next get_source() will do a new scan */ + xive->saved_src_count = 0; +} + +/* + * This returns the source configuration and state to user space. + */ +static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u64 __user *ubufp = (u64 __user *) addr; + u64 val, prio; + u16 idx; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -ENOENT; + + state = &sb->irq_state[idx]; + + if (!state->valid) + return -ENOENT; + + pr_devel("get_source(%ld)...\n", irq); + + /* + * So to properly save the state into something that looks like a + * XICS migration stream we cannot treat interrupts individually. + * + * We need, instead, mask them all (& save their previous PQ state) + * to get a stable state in the HW, then sync them to ensure that + * any interrupt that had already fired hits its queue, and finally + * scan all the queues to collect which interrupts are still present + * in the queues, so we can set the "pending" flag on them and + * they can be resent on restore. + * + * So we do it all when the "first" interrupt gets saved, all the + * state is collected at that point, the rest of xive_get_source() + * will merely collect and convert that state to the expected + * userspace bit mask. + */ + if (xive->saved_src_count == 0) + xive_pre_save_scan(xive); + xive->saved_src_count++; + + /* Convert saved state into something compatible with xics */ + val = state->guest_server; + prio = state->saved_scan_prio; + + if (prio == MASKED) { + val |= KVM_XICS_MASKED; + prio = state->saved_priority; + } + val |= prio << KVM_XICS_PRIORITY_SHIFT; + if (state->lsi) { + val |= KVM_XICS_LEVEL_SENSITIVE; + if (state->saved_p) + val |= KVM_XICS_PENDING; + } else { + if (state->saved_p) + val |= KVM_XICS_PRESENTED; + + if (state->saved_q) + val |= KVM_XICS_QUEUED; + + /* + * We mark it pending (which will attempt a re-delivery) + * if we are in a queue *or* we were masked and had + * Q set which is equivalent to the XICS "masked pending" + * state + */ + if (state->in_queue || (prio == MASKED && state->saved_q)) + val |= KVM_XICS_PENDING; + } + + /* + * If that was the last interrupt saved, reset the + * in_queue flags + */ + if (xive->saved_src_count == xive->src_count) + xive_post_save_scan(xive); + + /* Copy the result to userspace */ + if (put_user(val, ubufp)) + return -EFAULT; + + return 0; +} + +static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive, + int irq) +{ + struct kvm *kvm = xive->kvm; + struct kvmppc_xive_src_block *sb; + int i, bid; + + bid = irq >> KVMPPC_XICS_ICS_SHIFT; + + mutex_lock(&kvm->lock); + + /* block already exists - somebody else got here first */ + if (xive->src_blocks[bid]) + goto out; + + /* Create the ICS */ + sb = kzalloc(sizeof(*sb), GFP_KERNEL); + if (!sb) + goto out; + + sb->id = bid; + + for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { + sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; + sb->irq_state[i].guest_priority = MASKED; + sb->irq_state[i].saved_priority = MASKED; + sb->irq_state[i].act_priority = MASKED; + } + smp_wmb(); + xive->src_blocks[bid] = sb; + + if (bid > xive->max_sbid) + xive->max_sbid = bid; + +out: + mutex_unlock(&kvm->lock); + return xive->src_blocks[bid]; +} + +static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq) +{ + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu = NULL; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + if (!xc) + continue; + + if (xc->delayed_irq == irq) { + xc->delayed_irq = 0; + xive->delayed_irqs--; + return true; + } + } + return false; +} + +static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u64 __user *ubufp = (u64 __user *) addr; + u16 idx; + u64 val; + u8 act_prio, guest_prio; + u32 server; + int rc = 0; + + if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS) + return -ENOENT; + + pr_devel("set_source(irq=0x%lx)\n", irq); + + /* Find the source */ + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) { + pr_devel("No source, creating source block...\n"); + sb = xive_create_src_block(xive, irq); + if (!sb) { + pr_devel("Failed to create block...\n"); + return -ENOMEM; + } + } + state = &sb->irq_state[idx]; + + /* Read user passed data */ + if (get_user(val, ubufp)) { + pr_devel("fault getting user info !\n"); + return -EFAULT; + } + + server = val & KVM_XICS_DESTINATION_MASK; + guest_prio = val >> KVM_XICS_PRIORITY_SHIFT; + + pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", + val, server, guest_prio); + /* + * If the source doesn't already have an IPI, allocate + * one and get the corresponding data + */ + if (!state->ipi_number) { + state->ipi_number = xive_native_alloc_irq(); + if (state->ipi_number == 0) { + pr_devel("Failed to allocate IPI !\n"); + return -ENOMEM; + } + xive_native_populate_irq_data(state->ipi_number, &state->ipi_data); + pr_devel(" src_ipi=0x%x\n", state->ipi_number); + } + + /* + * We use lock_and_mask() to set us in the right masked + * state. We will override that state from the saved state + * further down, but this will handle the cases of interrupts + * that need FW masking. We set the initial guest_priority to + * 0 before calling it to ensure it actually performs the masking. + */ + state->guest_priority = 0; + xive_lock_and_mask(xive, sb, state); + + /* + * Now, we select a target if we have one. If we don't we + * leave the interrupt untargetted. It means that an interrupt + * can become "untargetted" accross migration if it was masked + * by set_xive() but there is little we can do about it. + */ + + /* First convert prio and mark interrupt as untargetted */ + act_prio = xive_prio_from_guest(guest_prio); + state->act_priority = MASKED; + state->guest_server = server; + + /* + * We need to drop the lock due to the mutex below. Hopefully + * nothing is touching that interrupt yet since it hasn't been + * advertized to a running guest yet + */ + arch_spin_unlock(&sb->lock); + + /* If we have a priority target the interrupt */ + if (act_prio != MASKED) { + /* First, check provisioning of queues */ + mutex_lock(&xive->kvm->lock); + rc = xive_check_provisioning(xive->kvm, act_prio); + mutex_unlock(&xive->kvm->lock); + + /* Target interrupt */ + if (rc == 0) + rc = xive_target_interrupt(xive->kvm, state, + server, act_prio); + /* + * If provisioning or targetting failed, leave it + * alone and masked. It will remain disabled until + * the guest re-targets it. + */ + } + + /* + * Find out if this was a delayed irq stashed in an ICP, + * in which case, treat it as pending + */ + if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) { + val |= KVM_XICS_PENDING; + pr_devel(" Found delayed ! forcing PENDING !\n"); + } + + /* Cleanup the SW state */ + state->old_p = false; + state->old_q = false; + state->lsi = false; + state->asserted = false; + + /* Restore LSI state */ + if (val & KVM_XICS_LEVEL_SENSITIVE) { + state->lsi = true; + if (val & KVM_XICS_PENDING) + state->asserted = true; + pr_devel(" LSI ! Asserted=%d\n", state->asserted); + } + + /* + * Restore P and Q. If the interrupt was pending, we + * force both P and Q, which will trigger a resend. + * + * That means that a guest that had both an interrupt + * pending (queued) and Q set will restore with only + * one instance of that interrupt instead of 2, but that + * is perfectly fine as coalescing interrupts that haven't + * been presented yet is always allowed. + */ + if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) + state->old_p = true; + if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) + state->old_q = true; + + pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q); + + /* + * If the interrupt was unmasked, update guest priority and + * perform the appropriate state transition and do a + * re-trigger if necessary. + */ + if (val & KVM_XICS_MASKED) { + pr_devel(" masked, saving prio\n"); + state->guest_priority = MASKED; + state->saved_priority = guest_prio; + } else { + pr_devel(" unmasked, restoring to prio %d\n", guest_prio); + xive_finish_unmask(xive, sb, state, guest_prio); + state->saved_priority = guest_prio; + } + + /* Increment the number of valid sources and mark this one valid */ + if (!state->valid) + xive->src_count++; + state->valid = true; + + return 0; +} + +int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, + bool line_status) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u16 idx; + + if (!xive) + return -ENODEV; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) + return -EINVAL; + + /* Perform locklessly .... (we need to do some RCUisms here...) */ + state = &sb->irq_state[idx]; + if (!state->valid) + return -EINVAL; + + /* We don't allow a trigger on a passed-through interrupt */ + if (state->pt_number) + return -EINVAL; + + if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL) + state->asserted = 1; + else if (level == 0 || level == KVM_INTERRUPT_UNSET) { + state->asserted = 0; + return 0; + } + + /* Trigger the IPI */ + xive_irq_trigger(&state->ipi_data); + + return 0; +} + +static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + struct kvmppc_xive *xive = dev->private; + + /* We honor the existing XICS ioctl */ + switch (attr->group) { + case KVM_DEV_XICS_GRP_SOURCES: + return xive_set_source(xive, attr->attr, attr->addr); + } + return -ENXIO; +} + +static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + struct kvmppc_xive *xive = dev->private; + + /* We honor the existing XICS ioctl */ + switch (attr->group) { + case KVM_DEV_XICS_GRP_SOURCES: + return xive_get_source(xive, attr->attr, attr->addr); + } + return -ENXIO; +} + +static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) +{ + /* We honor the same limits as XICS, at least for now */ + switch (attr->group) { + case KVM_DEV_XICS_GRP_SOURCES: + if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && + attr->attr < KVMPPC_XICS_NR_IRQS) + return 0; + break; + } + return -ENXIO; +} + +static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) +{ + xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(hw_num, 0, MASKED, 0); + xive_cleanup_irq_data(xd); +} + +static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) +{ + int i; + + for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { + struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; + + if (!state->valid) + continue; + + kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); + xive_native_free_irq(state->ipi_number); + + /* Pass-through, cleanup too */ + if (state->pt_number) + kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); + + state->valid = false; + } +} + +static void kvmppc_xive_free(struct kvm_device *dev) +{ + struct kvmppc_xive *xive = dev->private; + struct kvm *kvm = xive->kvm; + int i; + + debugfs_remove(xive->dentry); + + if (kvm) + kvm->arch.xive = NULL; + + /* Mask and free interrupts */ + for (i = 0; i <= xive->max_sbid; i++) { + if (xive->src_blocks[i]) + kvmppc_xive_free_sources(xive->src_blocks[i]); + kfree(xive->src_blocks[i]); + xive->src_blocks[i] = NULL; + } + + if (xive->vp_base != XIVE_INVALID_VP) + xive_native_free_vp_block(xive->vp_base); + + + kfree(xive); + kfree(dev); +} + +static int kvmppc_xive_create(struct kvm_device *dev, u32 type) +{ + struct kvmppc_xive *xive; + struct kvm *kvm = dev->kvm; + int ret = 0; + + pr_devel("Creating xive for partition\n"); + + xive = kzalloc(sizeof(*xive), GFP_KERNEL); + if (!xive) + return -ENOMEM; + + dev->private = xive; + xive->dev = dev; + xive->kvm = kvm; + + /* Already there ? */ + if (kvm->arch.xive) + ret = -EEXIST; + else + kvm->arch.xive = xive; + + /* We use the default queue size set by the host */ + xive->q_order = xive_native_default_eq_shift(); + if (xive->q_order < PAGE_SHIFT) + xive->q_page_order = 0; + else + xive->q_page_order = xive->q_order - PAGE_SHIFT; + + /* Allocate a bunch of VPs */ + xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS); + pr_devel("VP_Base=%x\n", xive->vp_base); + + if (xive->vp_base == XIVE_INVALID_VP) + ret = -ENOMEM; + + if (ret) { + kfree(xive); + return ret; + } + + return 0; +} + + +static int xive_debug_show(struct seq_file *m, void *private) +{ + struct kvmppc_xive *xive = m->private; + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + u64 t_rm_h_xirr = 0; + u64 t_rm_h_ipoll = 0; + u64 t_rm_h_cppr = 0; + u64 t_rm_h_eoi = 0; + u64 t_rm_h_ipi = 0; + u64 t_vm_h_xirr = 0; + u64 t_vm_h_ipoll = 0; + u64 t_vm_h_cppr = 0; + u64 t_vm_h_eoi = 0; + u64 t_vm_h_ipi = 0; + unsigned int i; + + if (!kvm) + return 0; + + seq_printf(m, "=========\nVCPU state\n=========\n"); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + if (!xc) + continue; + + seq_printf(m, "cpu server %#x CPPR:%#x HWCPPR:%#x" + " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n", + xc->server_num, xc->cppr, xc->hw_cppr, + xc->mfrr, xc->pending, + xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); + + t_rm_h_xirr += xc->stat_rm_h_xirr; + t_rm_h_ipoll += xc->stat_rm_h_ipoll; + t_rm_h_cppr += xc->stat_rm_h_cppr; + t_rm_h_eoi += xc->stat_rm_h_eoi; + t_rm_h_ipi += xc->stat_rm_h_ipi; + t_vm_h_xirr += xc->stat_vm_h_xirr; + t_vm_h_ipoll += xc->stat_vm_h_ipoll; + t_vm_h_cppr += xc->stat_vm_h_cppr; + t_vm_h_eoi += xc->stat_vm_h_eoi; + t_vm_h_ipi += xc->stat_vm_h_ipi; + } + + seq_printf(m, "Hcalls totals\n"); + seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr); + seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll); + seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr); + seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi); + seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi); + + return 0; +} + +static int xive_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, xive_debug_show, inode->i_private); +} + +static const struct file_operations xive_debug_fops = { + .open = xive_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void xive_debugfs_init(struct kvmppc_xive *xive) +{ + char *name; + + name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive); + if (!name) { + pr_err("%s: no memory for name\n", __func__); + return; + } + + xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root, + xive, &xive_debug_fops); + + pr_debug("%s: created %s\n", __func__, name); + kfree(name); +} + +static void kvmppc_xive_init(struct kvm_device *dev) +{ + struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private; + + /* Register some debug interfaces */ + xive_debugfs_init(xive); +} + +struct kvm_device_ops kvm_xive_ops = { + .name = "kvm-xive", + .create = kvmppc_xive_create, + .init = kvmppc_xive_init, + .destroy = kvmppc_xive_free, + .set_attr = xive_set_attr, + .get_attr = xive_get_attr, + .has_attr = xive_has_attr, +}; + +void kvmppc_xive_init_module(void) +{ + __xive_vm_h_xirr = xive_vm_h_xirr; + __xive_vm_h_ipoll = xive_vm_h_ipoll; + __xive_vm_h_ipi = xive_vm_h_ipi; + __xive_vm_h_cppr = xive_vm_h_cppr; + __xive_vm_h_eoi = xive_vm_h_eoi; +} + +void kvmppc_xive_exit_module(void) +{ + __xive_vm_h_xirr = NULL; + __xive_vm_h_ipoll = NULL; + __xive_vm_h_ipi = NULL; + __xive_vm_h_cppr = NULL; + __xive_vm_h_eoi = NULL; +} diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h new file mode 100644 index 000000000000..5938f7644dc1 --- /dev/null +++ b/arch/powerpc/kvm/book3s_xive.h @@ -0,0 +1,256 @@ +/* + * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +#ifndef _KVM_PPC_BOOK3S_XIVE_H +#define _KVM_PPC_BOOK3S_XIVE_H + +#ifdef CONFIG_KVM_XICS +#include "book3s_xics.h" + +/* + * State for one guest irq source. + * + * For each guest source we allocate a HW interrupt in the XIVE + * which we use for all SW triggers. It will be unused for + * pass-through but it's easier to keep around as the same + * guest interrupt can alternatively be emulated or pass-through + * if a physical device is hot unplugged and replaced with an + * emulated one. + * + * This state structure is very similar to the XICS one with + * additional XIVE specific tracking. + */ +struct kvmppc_xive_irq_state { + bool valid; /* Interrupt entry is valid */ + + u32 number; /* Guest IRQ number */ + u32 ipi_number; /* XIVE IPI HW number */ + struct xive_irq_data ipi_data; /* XIVE IPI associated data */ + u32 pt_number; /* XIVE Pass-through number if any */ + struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */ + + /* Targetting as set by guest */ + u32 guest_server; /* Current guest selected target */ + u8 guest_priority; /* Guest set priority */ + u8 saved_priority; /* Saved priority when masking */ + + /* Actual targetting */ + u32 act_server; /* Actual server */ + u8 act_priority; /* Actual priority */ + + /* Various state bits */ + bool in_eoi; /* Synchronize with H_EOI */ + bool old_p; /* P bit state when masking */ + bool old_q; /* Q bit state when masking */ + bool lsi; /* level-sensitive interrupt */ + bool asserted; /* Only for emulated LSI: current state */ + + /* Saved for migration state */ + bool in_queue; + bool saved_p; + bool saved_q; + u8 saved_scan_prio; +}; + +/* Select the "right" interrupt (IPI vs. passthrough) */ +static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state, + u32 *out_hw_irq, + struct xive_irq_data **out_xd) +{ + if (state->pt_number) { + if (out_hw_irq) + *out_hw_irq = state->pt_number; + if (out_xd) + *out_xd = state->pt_data; + } else { + if (out_hw_irq) + *out_hw_irq = state->ipi_number; + if (out_xd) + *out_xd = &state->ipi_data; + } +} + +/* + * This corresponds to an "ICS" in XICS terminology, we use it + * as a mean to break up source information into multiple structures. + */ +struct kvmppc_xive_src_block { + arch_spinlock_t lock; + u16 id; + struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS]; +}; + + +struct kvmppc_xive { + struct kvm *kvm; + struct kvm_device *dev; + struct dentry *dentry; + + /* VP block associated with the VM */ + u32 vp_base; + + /* Blocks of sources */ + struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1]; + u32 max_sbid; + + /* + * For state save, we lazily scan the queues on the first interrupt + * being migrated. We don't have a clean way to reset that flags + * so we keep track of the number of valid sources and how many of + * them were migrated so we can reset when all of them have been + * processed. + */ + u32 src_count; + u32 saved_src_count; + + /* + * Some irqs are delayed on restore until the source is created, + * keep track here of how many of them + */ + u32 delayed_irqs; + + /* Which queues (priorities) are in use by the guest */ + u8 qmap; + + /* Queue orders */ + u32 q_order; + u32 q_page_order; + +}; + +#define KVMPPC_XIVE_Q_COUNT 8 + +struct kvmppc_xive_vcpu { + struct kvmppc_xive *xive; + struct kvm_vcpu *vcpu; + bool valid; + + /* Server number. This is the HW CPU ID from a guest perspective */ + u32 server_num; + + /* + * HW VP corresponding to this VCPU. This is the base of the VP + * block plus the server number. + */ + u32 vp_id; + u32 vp_chip_id; + u32 vp_cam; + + /* IPI used for sending ... IPIs */ + u32 vp_ipi; + struct xive_irq_data vp_ipi_data; + + /* Local emulation state */ + uint8_t cppr; /* guest CPPR */ + uint8_t hw_cppr;/* Hardware CPPR */ + uint8_t mfrr; + uint8_t pending; + + /* Each VP has 8 queues though we only provision some */ + struct xive_q queues[KVMPPC_XIVE_Q_COUNT]; + u32 esc_virq[KVMPPC_XIVE_Q_COUNT]; + char *esc_virq_names[KVMPPC_XIVE_Q_COUNT]; + + /* Stash a delayed irq on restore from migration (see set_icp) */ + u32 delayed_irq; + + /* Stats */ + u64 stat_rm_h_xirr; + u64 stat_rm_h_ipoll; + u64 stat_rm_h_cppr; + u64 stat_rm_h_eoi; + u64 stat_rm_h_ipi; + u64 stat_vm_h_xirr; + u64 stat_vm_h_ipoll; + u64 stat_vm_h_cppr; + u64 stat_vm_h_eoi; + u64 stat_vm_h_ipi; +}; + +static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr) +{ + struct kvm_vcpu *vcpu = NULL; + int i; + + kvm_for_each_vcpu(i, vcpu, kvm) { + if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num) + return vcpu; + } + return NULL; +} + +static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive, + u32 irq, u16 *source) +{ + u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT; + u16 src = irq & KVMPPC_XICS_SRC_MASK; + + if (source) + *source = src; + if (bid > KVMPPC_XICS_MAX_ICS_ID) + return NULL; + return xive->src_blocks[bid]; +} + +/* + * Mapping between guest priorities and host priorities + * is as follow. + * + * Guest request for 0...6 are honored. Guest request for anything + * higher results in a priority of 7 being applied. + * + * However, when XIRR is returned via H_XIRR, 7 is translated to 0xb + * in order to match AIX expectations + * + * Similar mapping is done for CPPR values + */ +static inline u8 xive_prio_from_guest(u8 prio) +{ + if (prio == 0xff || prio < 8) + return prio; + return 7; +} + +static inline u8 xive_prio_to_guest(u8 prio) +{ + if (prio == 0xff || prio < 7) + return prio; + return 0xb; +} + +static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle) +{ + u32 cur; + + if (!qpage) + return 0; + cur = be32_to_cpup(qpage + *idx); + if ((cur >> 31) == *toggle) + return 0; + *idx = (*idx + 1) & msk; + if (*idx == 0) + (*toggle) ^= 1; + return cur & 0x7fffffff; +} + +extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu); +extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server); +extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr); +extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); +extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); + +extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); +extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); +extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr); +extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); +extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); + +#endif /* CONFIG_KVM_XICS */ +#endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c new file mode 100644 index 000000000000..023a31133c37 --- /dev/null +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -0,0 +1,503 @@ +/* + * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + */ + +/* File to be included by other .c files */ + +#define XGLUE(a,b) a##b +#define GLUE(a,b) XGLUE(a,b) + +static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc) +{ + u8 cppr; + u16 ack; + + /* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */ + + /* Perform the acknowledge OS to register cycle. */ + ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG)); + + /* Synchronize subsequent queue accesses */ + mb(); + + /* XXX Check grouping level */ + + /* Anything ? */ + if (!((ack >> 8) & TM_QW1_NSR_EO)) + return; + + /* Grab CPPR of the most favored pending interrupt */ + cppr = ack & 0xff; + if (cppr < 8) + xc->pending |= 1 << cppr; + +#ifdef XIVE_RUNTIME_CHECKS + /* Check consistency */ + if (cppr >= xc->hw_cppr) + pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n", + smp_processor_id(), cppr, xc->hw_cppr); +#endif + + /* + * Update our image of the HW CPPR. We don't yet modify + * xc->cppr, this will be done as we scan for interrupts + * in the queues. + */ + xc->hw_cppr = cppr; +} + +static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset) +{ + u64 val; + + if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) + offset |= offset << 4; + + val =__x_readq(__x_eoi_page(xd) + offset); +#ifdef __LITTLE_ENDIAN__ + val >>= 64-8; +#endif + return (u8)val; +} + + +static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd) +{ + /* If the XIVE supports the new "store EOI facility, use it */ + if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) + __x_writeq(0, __x_eoi_page(xd)); + else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { + opal_int_eoi(hw_irq); + } else { + uint64_t eoi_val; + + /* + * Otherwise for EOI, we use the special MMIO that does + * a clear of both P and Q and returns the old Q, + * except for LSIs where we use the "EOI cycle" special + * load. + * + * This allows us to then do a re-trigger if Q was set + * rather than synthetizing an interrupt in software + * + * For LSIs, using the HW EOI cycle works around a problem + * on P9 DD1 PHBs where the other ESB accesses don't work + * properly. + */ + if (xd->flags & XIVE_IRQ_FLAG_LSI) + __x_readq(__x_eoi_page(xd)); + else { + eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); + + /* Re-trigger if needed */ + if ((eoi_val & 1) && __x_trig_page(xd)) + __x_writeq(0, __x_trig_page(xd)); + } + } +} + +enum { + scan_fetch, + scan_poll, + scan_eoi, +}; + +static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc, + u8 pending, int scan_type) +{ + u32 hirq = 0; + u8 prio = 0xff; + + /* Find highest pending priority */ + while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) { + struct xive_q *q; + u32 idx, toggle; + __be32 *qpage; + + /* + * If pending is 0 this will return 0xff which is what + * we want + */ + prio = ffs(pending) - 1; + + /* + * If the most favoured prio we found pending is less + * favored (or equal) than a pending IPI, we return + * the IPI instead. + * + * Note: If pending was 0 and mfrr is 0xff, we will + * not spurriously take an IPI because mfrr cannot + * then be smaller than cppr. + */ + if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { + prio = xc->mfrr; + hirq = XICS_IPI; + break; + } + + /* Don't scan past the guest cppr */ + if (prio >= xc->cppr || prio > 7) + break; + + /* Grab queue and pointers */ + q = &xc->queues[prio]; + idx = q->idx; + toggle = q->toggle; + + /* + * Snapshot the queue page. The test further down for EOI + * must use the same "copy" that was used by __xive_read_eq + * since qpage can be set concurrently and we don't want + * to miss an EOI. + */ + qpage = READ_ONCE(q->qpage); + +skip_ipi: + /* + * Try to fetch from the queue. Will return 0 for a + * non-queueing priority (ie, qpage = 0). + */ + hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle); + + /* + * If this was a signal for an MFFR change done by + * H_IPI we skip it. Additionally, if we were fetching + * we EOI it now, thus re-enabling reception of a new + * such signal. + * + * We also need to do that if prio is 0 and we had no + * page for the queue. In this case, we have non-queued + * IPI that needs to be EOId. + * + * This is safe because if we have another pending MFRR + * change that wasn't observed above, the Q bit will have + * been set and another occurrence of the IPI will trigger. + */ + if (hirq == XICS_IPI || (prio == 0 && !qpage)) { + if (scan_type == scan_fetch) + GLUE(X_PFX,source_eoi)(xc->vp_ipi, + &xc->vp_ipi_data); + /* Loop back on same queue with updated idx/toggle */ +#ifdef XIVE_RUNTIME_CHECKS + WARN_ON(hirq && hirq != XICS_IPI); +#endif + if (hirq) + goto skip_ipi; + } + + /* If fetching, update queue pointers */ + if (scan_type == scan_fetch) { + q->idx = idx; + q->toggle = toggle; + } + + /* Something found, stop searching */ + if (hirq) + break; + + /* Clear the pending bit on the now empty queue */ + pending &= ~(1 << prio); + + /* + * Check if the queue count needs adjusting due to + * interrupts being moved away. + */ + if (atomic_read(&q->pending_count)) { + int p = atomic_xchg(&q->pending_count, 0); + if (p) { +#ifdef XIVE_RUNTIME_CHECKS + WARN_ON(p > atomic_read(&q->count)); +#endif + atomic_sub(p, &q->count); + } + } + } + + /* If we are just taking a "peek", do nothing else */ + if (scan_type == scan_poll) + return hirq; + + /* Update the pending bits */ + xc->pending = pending; + + /* + * If this is an EOI that's it, no CPPR adjustment done here, + * all we needed was cleanup the stale pending bits and check + * if there's anything left. + */ + if (scan_type == scan_eoi) + return hirq; + + /* + * If we found an interrupt, adjust what the guest CPPR should + * be as if we had just fetched that interrupt from HW. + */ + if (hirq) + xc->cppr = prio; + /* + * If it was an IPI the HW CPPR might have been lowered too much + * as the HW interrupt we use for IPIs is routed to priority 0. + * + * We re-sync it here. + */ + if (xc->cppr != xc->hw_cppr) { + xc->hw_cppr = xc->cppr; + __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR); + } + + return hirq; +} + +X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + u8 old_cppr; + u32 hirq; + + pr_devel("H_XIRR\n"); + + xc->GLUE(X_STAT_PFX,h_xirr)++; + + /* First collect pending bits from HW */ + GLUE(X_PFX,ack_pending)(xc); + + /* + * Cleanup the old-style bits if needed (they may have been + * set by pull or an escalation interrupts). + */ + if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions)) + clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, + &vcpu->arch.pending_exceptions); + + pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n", + xc->pending, xc->hw_cppr, xc->cppr); + + /* Grab previous CPPR and reverse map it */ + old_cppr = xive_prio_to_guest(xc->cppr); + + /* Scan for actual interrupts */ + hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch); + + pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n", + hirq, xc->hw_cppr, xc->cppr); + +#ifdef XIVE_RUNTIME_CHECKS + /* That should never hit */ + if (hirq & 0xff000000) + pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq); +#endif + + /* + * XXX We could check if the interrupt is masked here and + * filter it. If we chose to do so, we would need to do: + * + * if (masked) { + * lock(); + * if (masked) { + * old_Q = true; + * hirq = 0; + * } + * unlock(); + * } + */ + + /* Return interrupt and old CPPR in GPR4 */ + vcpu->arch.gpr[4] = hirq | (old_cppr << 24); + + return H_SUCCESS; +} + +X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + u8 pending = xc->pending; + u32 hirq; + u8 pipr; + + pr_devel("H_IPOLL(server=%ld)\n", server); + + xc->GLUE(X_STAT_PFX,h_ipoll)++; + + /* Grab the target VCPU if not the current one */ + if (xc->server_num != server) { + vcpu = kvmppc_xive_find_server(vcpu->kvm, server); + if (!vcpu) + return H_PARAMETER; + xc = vcpu->arch.xive_vcpu; + + /* Scan all priorities */ + pending = 0xff; + } else { + /* Grab pending interrupt if any */ + pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR); + if (pipr < 8) + pending |= 1 << pipr; + } + + hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll); + + /* Return interrupt and old CPPR in GPR4 */ + vcpu->arch.gpr[4] = hirq | (xc->cppr << 24); + + return H_SUCCESS; +} + +static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc) +{ + u8 pending, prio; + + pending = xc->pending; + if (xc->mfrr != 0xff) { + if (xc->mfrr < 8) + pending |= 1 << xc->mfrr; + else + pending |= 0x80; + } + if (!pending) + return; + prio = ffs(pending) - 1; + + __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING); +} + +X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + u8 old_cppr; + + pr_devel("H_CPPR(cppr=%ld)\n", cppr); + + xc->GLUE(X_STAT_PFX,h_cppr)++; + + /* Map CPPR */ + cppr = xive_prio_from_guest(cppr); + + /* Remember old and update SW state */ + old_cppr = xc->cppr; + xc->cppr = cppr; + + /* + * We are masking less, we need to look for pending things + * to deliver and set VP pending bits accordingly to trigger + * a new interrupt otherwise we might miss MFRR changes for + * which we have optimized out sending an IPI signal. + */ + if (cppr > old_cppr) + GLUE(X_PFX,push_pending_to_hw)(xc); + + /* Apply new CPPR */ + xc->hw_cppr = cppr; + __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR); + + return H_SUCCESS; +} + +X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr) +{ + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct xive_irq_data *xd; + u8 new_cppr = xirr >> 24; + u32 irq = xirr & 0x00ffffff, hw_num; + u16 src; + int rc = 0; + + pr_devel("H_EOI(xirr=%08lx)\n", xirr); + + xc->GLUE(X_STAT_PFX,h_eoi)++; + + xc->cppr = xive_prio_from_guest(new_cppr); + + /* + * IPIs are synthetized from MFRR and thus don't need + * any special EOI handling. The underlying interrupt + * used to signal MFRR changes is EOId when fetched from + * the queue. + */ + if (irq == XICS_IPI || irq == 0) + goto bail; + + /* Find interrupt source */ + sb = kvmppc_xive_find_source(xive, irq, &src); + if (!sb) { + pr_devel(" source not found !\n"); + rc = H_PARAMETER; + goto bail; + } + state = &sb->irq_state[src]; + kvmppc_xive_select_irq(state, &hw_num, &xd); + + state->in_eoi = true; + mb(); + +again: + if (state->guest_priority == MASKED) { + arch_spin_lock(&sb->lock); + if (state->guest_priority != MASKED) { + arch_spin_unlock(&sb->lock); + goto again; + } + pr_devel(" EOI on saved P...\n"); + + /* Clear old_p, that will cause unmask to perform an EOI */ + state->old_p = false; + + arch_spin_unlock(&sb->lock); + } else { + pr_devel(" EOI on source...\n"); + + /* Perform EOI on the source */ + GLUE(X_PFX,source_eoi)(hw_num, xd); + + /* If it's an emulated LSI, check level and resend */ + if (state->lsi && state->asserted) + __x_writeq(0, __x_trig_page(xd)); + + } + + mb(); + state->in_eoi = false; +bail: + + /* Re-evaluate pending IRQs and update HW */ + GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi); + GLUE(X_PFX,push_pending_to_hw)(xc); + pr_devel(" after scan pending=%02x\n", xc->pending); + + /* Apply new CPPR */ + xc->hw_cppr = xc->cppr; + __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR); + + return rc; +} + +X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, + unsigned long mfrr) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr); + + xc->GLUE(X_STAT_PFX,h_ipi)++; + + /* Find target */ + vcpu = kvmppc_xive_find_server(vcpu->kvm, server); + if (!vcpu) + return H_PARAMETER; + xc = vcpu->arch.xive_vcpu; + + /* Locklessly write over MFRR */ + xc->mfrr = mfrr; + + /* Shoot the IPI if most favored than target cppr */ + if (mfrr < xc->cppr) + __x_writeq(0, __x_trig_page(&xc->vp_ipi_data)); + + return H_SUCCESS; +} diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 0514cbd4e533..3eaac3809977 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -300,6 +300,11 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); } +void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) +{ + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); +} + void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) { kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); @@ -579,7 +584,7 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu) * userspace, so clear the KVM_REQ_WATCHDOG request. */ if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS)) - clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests); + kvm_clear_request(KVM_REQ_WATCHDOG, vcpu); spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); nr_jiffies = watchdog_next_timeout(vcpu); @@ -690,7 +695,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) if (vcpu->arch.shared->msr & MSR_WE) { local_irq_enable(); kvm_vcpu_block(vcpu); - clear_bit(KVM_REQ_UNHALT, &vcpu->requests); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); hard_irq_disable(); kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 0fda4230f6c0..77fd043b3ecc 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -797,9 +797,8 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) host_tlb_params[0].sets = host_tlb_params[0].entries / host_tlb_params[0].ways; host_tlb_params[1].sets = 1; - - vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * - host_tlb_params[1].entries, + vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries, + sizeof(*vcpu_e500->h2g_tlb1_rmap), GFP_KERNEL); if (!vcpu_e500->h2g_tlb1_rmap) return -EINVAL; diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index b379146de55b..c873ffe55362 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -259,10 +259,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) case OP_31_XOP_MFSPR: emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); + if (emulated == EMULATE_AGAIN) { + emulated = EMULATE_DONE; + advance = 0; + } break; case OP_31_XOP_MTSPR: emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); + if (emulated == EMULATE_AGAIN) { + emulated = EMULATE_DONE; + advance = 0; + } break; case OP_31_XOP_TLBSYNC: diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index 6d3c0ee1d744..af833531af31 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -34,18 +34,38 @@ #include "timing.h" #include "trace.h" -/* XXX to do: - * lhax - * lhaux - * lswx - * lswi - * stswx - * stswi - * lha - * lhau - * lmw - * stmw +#ifdef CONFIG_PPC_FPU +static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) +{ + if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { + kvmppc_core_queue_fpunavail(vcpu); + return true; + } + + return false; +} +#endif /* CONFIG_PPC_FPU */ + +#ifdef CONFIG_VSX +static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) +{ + if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { + kvmppc_core_queue_vsx_unavail(vcpu); + return true; + } + + return false; +} +#endif /* CONFIG_VSX */ + +/* + * XXX to do: + * lfiwax, lfiwzx + * vector loads and stores * + * Instructions that trap when used on cache-inhibited mappings + * are not emulated here: multiple and string instructions, + * lq/stq, and the load-reserve/store-conditional instructions. */ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { @@ -66,6 +86,19 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) rs = get_rs(inst); rt = get_rt(inst); + /* + * if mmio_vsx_tx_sx_enabled == 0, copy data between + * VSR[0..31] and memory + * if mmio_vsx_tx_sx_enabled == 1, copy data between + * VSR[32..63] and memory + */ + vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst); + vcpu->arch.mmio_vsx_copy_nums = 0; + vcpu->arch.mmio_vsx_offset = 0; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE; + vcpu->arch.mmio_sp64_extend = 0; + vcpu->arch.mmio_sign_extend = 0; + switch (get_op(inst)) { case 31: switch (get_xop(inst)) { @@ -73,6 +106,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; + case OP_31_XOP_LWZUX: + emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; @@ -82,22 +120,36 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; + case OP_31_XOP_STDX: + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 8, 1); + break; + + case OP_31_XOP_STDUX: + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 4, 1); + kvmppc_get_gpr(vcpu, rs), 4, 1); + break; + + case OP_31_XOP_STWUX: + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 1, 1); + kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 1, 1); + kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; @@ -105,6 +157,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; + case OP_31_XOP_LHAUX: + emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; @@ -116,14 +173,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 2, 1); + kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 2, 1); + kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; @@ -143,8 +198,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 4, 0); + kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: @@ -153,10 +207,258 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 2, 0); + kvmppc_get_gpr(vcpu, rs), 2, 0); + break; + + case OP_31_XOP_LDBRX: + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0); + break; + + case OP_31_XOP_STDBRX: + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 8, 0); + break; + + case OP_31_XOP_LDX: + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + break; + + case OP_31_XOP_LDUX: + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_LWAX: + emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); + break; + + case OP_31_XOP_LWAUX: + emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + +#ifdef CONFIG_PPC_FPU + case OP_31_XOP_LFSX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + break; + + case OP_31_XOP_LFSUX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_LFDX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 8, 1); + break; + + case OP_31_XOP_LFDUX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_LFIWAX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_loads(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + break; + + case OP_31_XOP_LFIWZX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + break; + + case OP_31_XOP_STFSX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), 4, 1); + break; + + case OP_31_XOP_STFSUX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_STFDX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), 8, 1); + break; + + case OP_31_XOP_STFDUX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_31_XOP_STFIWX: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), 4, 1); + break; +#endif + +#ifdef CONFIG_VSX + case OP_31_XOP_LXSDX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 8, 1, 0); + break; + + case OP_31_XOP_LXSSPX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 4, 1, 0); + break; + + case OP_31_XOP_LXSIWAX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 4, 1, 1); + break; + + case OP_31_XOP_LXSIWZX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 4, 1, 0); + break; + + case OP_31_XOP_LXVD2X: + /* + * In this case, the official load/store process is like this: + * Step1, exit from vm by page fault isr, then kvm save vsr. + * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS + * as reference. + * + * Step2, copy data between memory and VCPU + * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use + * 2copies*8bytes or 4copies*4bytes + * to simulate one copy of 16bytes. + * Also there is an endian issue here, we should notice the + * layout of memory. + * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference. + * If host is little-endian, kvm will call XXSWAPD for + * LXVD2X_ROT/STXVD2X_ROT. + * So, if host is little-endian, + * the postion of memeory should be swapped. + * + * Step3, return to guest, kvm reset register. + * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS + * as reference. + */ + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 2; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 8, 1, 0); + break; + + case OP_31_XOP_LXVW4X: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 4; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 4, 1, 0); + break; + + case OP_31_XOP_LXVDSX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = + KVMPPC_VSX_COPY_DWORD_LOAD_DUMP; + emulated = kvmppc_handle_vsx_load(run, vcpu, + KVM_MMIO_REG_VSX|rt, 8, 1, 0); + break; + + case OP_31_XOP_STXSDX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_store(run, vcpu, + rs, 8, 1); break; + case OP_31_XOP_STXSSPX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_vsx_store(run, vcpu, + rs, 4, 1); + break; + + case OP_31_XOP_STXSIWX: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_offset = 1; + vcpu->arch.mmio_vsx_copy_nums = 1; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; + emulated = kvmppc_handle_vsx_store(run, vcpu, + rs, 4, 1); + break; + + case OP_31_XOP_STXVD2X: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 2; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD; + emulated = kvmppc_handle_vsx_store(run, vcpu, + rs, 8, 1); + break; + + case OP_31_XOP_STXVW4X: + if (kvmppc_check_vsx_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_vsx_copy_nums = 4; + vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD; + emulated = kvmppc_handle_vsx_store(run, vcpu, + rs, 4, 1); + break; +#endif /* CONFIG_VSX */ default: emulated = EMULATE_FAIL; break; @@ -167,10 +469,60 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; - /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ +#ifdef CONFIG_PPC_FPU + case OP_STFS: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), + 4, 1); + break; + + case OP_STFSU: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), + 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_STFD: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), + 8, 1); + break; + + case OP_STFDU: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_store(run, vcpu, + VCPU_FPR(vcpu, rs), + 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; +#endif + case OP_LD: rt = get_rt(inst); - emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + switch (inst & 3) { + case 0: /* ld */ + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + break; + case 1: /* ldu */ + emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + case 2: /* lwa */ + emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1); + break; + default: + emulated = EMULATE_FAIL; + } break; case OP_LWZU: @@ -193,31 +545,37 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) 4, 1); break; - /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); - emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 8, 1); + switch (inst & 3) { + case 0: /* std */ + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 8, 1); + break; + case 1: /* stdu */ + emulated = kvmppc_handle_store(run, vcpu, + kvmppc_get_gpr(vcpu, rs), 8, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + default: + emulated = EMULATE_FAIL; + } break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 4, 1); + kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 1, 1); + kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 1, 1); + kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; @@ -241,16 +599,48 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) case OP_STH: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 2, 1); + kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, - kvmppc_get_gpr(vcpu, rs), - 2, 1); + kvmppc_get_gpr(vcpu, rs), 2, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + +#ifdef CONFIG_PPC_FPU + case OP_LFS: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + break; + + case OP_LFSU: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + vcpu->arch.mmio_sp64_extend = 1; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 4, 1); + kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); + break; + + case OP_LFD: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 8, 1); + break; + + case OP_LFDU: + if (kvmppc_check_fp_disabled(vcpu)) + return EMULATE_DONE; + emulated = kvmppc_handle_load(run, vcpu, + KVM_MMIO_REG_FPR|rt, 8, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; +#endif default: emulated = EMULATE_FAIL; diff --git a/arch/powerpc/kvm/irq.h b/arch/powerpc/kvm/irq.h index 5a9a10b90762..3f1be85a83bc 100644 --- a/arch/powerpc/kvm/irq.h +++ b/arch/powerpc/kvm/irq.h @@ -12,6 +12,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm) #endif #ifdef CONFIG_KVM_XICS ret = ret || (kvm->arch.xics != NULL); + ret = ret || (kvm->arch.xive != NULL); #endif smp_rmb(); return ret; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 95c91a9de351..f7cf2cd564ef 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -37,6 +37,9 @@ #include <asm/cputhreads.h> #include <asm/irqflags.h> #include <asm/iommu.h> +#include <asm/switch_to.h> +#include <asm/xive.h> + #include "timing.h" #include "irq.h" #include "../mm/mmu_decl.h" @@ -232,7 +235,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) case EV_HCALL_TOKEN(EV_IDLE): r = EV_SUCCESS; kvm_vcpu_block(vcpu); - clear_bit(KVM_REQ_UNHALT, &vcpu->requests); + kvm_clear_request(KVM_REQ_UNHALT, vcpu); break; default: r = EV_UNIMPLEMENTED; @@ -524,11 +527,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) /* We support this only for PR */ r = !hv_enabled; break; -#ifdef CONFIG_KVM_MMIO - case KVM_CAP_COALESCED_MMIO: - r = KVM_COALESCED_MMIO_PAGE_OFFSET; - break; -#endif #ifdef CONFIG_KVM_MPIC case KVM_CAP_IRQ_MPIC: r = 1; @@ -538,6 +536,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) #ifdef CONFIG_PPC_BOOK3S_64 case KVM_CAP_SPAPR_TCE: case KVM_CAP_SPAPR_TCE_64: + /* fallthrough */ + case KVM_CAP_SPAPR_TCE_VFIO: case KVM_CAP_PPC_RTAS: case KVM_CAP_PPC_FIXUP_HCALL: case KVM_CAP_PPC_ENABLE_HCALL: @@ -699,7 +699,10 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); break; case KVMPPC_IRQ_XICS: - kvmppc_xics_free_icp(vcpu); + if (xive_enabled()) + kvmppc_xive_cleanup_vcpu(vcpu); + else + kvmppc_xics_free_icp(vcpu); break; } @@ -806,6 +809,129 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); } +#ifdef CONFIG_VSX +static inline int kvmppc_get_vsr_dword_offset(int index) +{ + int offset; + + if ((index != 0) && (index != 1)) + return -1; + +#ifdef __BIG_ENDIAN + offset = index; +#else + offset = 1 - index; +#endif + + return offset; +} + +static inline int kvmppc_get_vsr_word_offset(int index) +{ + int offset; + + if ((index > 3) || (index < 0)) + return -1; + +#ifdef __BIG_ENDIAN + offset = index; +#else + offset = 3 - index; +#endif + return offset; +} + +static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, + u64 gpr) +{ + union kvmppc_one_reg val; + int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; + + if (offset == -1) + return; + + if (vcpu->arch.mmio_vsx_tx_sx_enabled) { + val.vval = VCPU_VSX_VR(vcpu, index); + val.vsxval[offset] = gpr; + VCPU_VSX_VR(vcpu, index) = val.vval; + } else { + VCPU_VSX_FPR(vcpu, index, offset) = gpr; + } +} + +static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, + u64 gpr) +{ + union kvmppc_one_reg val; + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; + + if (vcpu->arch.mmio_vsx_tx_sx_enabled) { + val.vval = VCPU_VSX_VR(vcpu, index); + val.vsxval[0] = gpr; + val.vsxval[1] = gpr; + VCPU_VSX_VR(vcpu, index) = val.vval; + } else { + VCPU_VSX_FPR(vcpu, index, 0) = gpr; + VCPU_VSX_FPR(vcpu, index, 1) = gpr; + } +} + +static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, + u32 gpr32) +{ + union kvmppc_one_reg val; + int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); + int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; + int dword_offset, word_offset; + + if (offset == -1) + return; + + if (vcpu->arch.mmio_vsx_tx_sx_enabled) { + val.vval = VCPU_VSX_VR(vcpu, index); + val.vsx32val[offset] = gpr32; + VCPU_VSX_VR(vcpu, index) = val.vval; + } else { + dword_offset = offset / 2; + word_offset = offset % 2; + val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); + val.vsx32val[word_offset] = gpr32; + VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; + } +} +#endif /* CONFIG_VSX */ + +#ifdef CONFIG_PPC_FPU +static inline u64 sp_to_dp(u32 fprs) +{ + u64 fprd; + + preempt_disable(); + enable_kernel_fp(); + asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) + : "fr0"); + preempt_enable(); + return fprd; +} + +static inline u32 dp_to_sp(u64 fprd) +{ + u32 fprs; + + preempt_disable(); + enable_kernel_fp(); + asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) + : "fr0"); + preempt_enable(); + return fprs; +} + +#else +#define sp_to_dp(x) (x) +#define dp_to_sp(x) (x) +#endif /* CONFIG_PPC_FPU */ + static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { @@ -832,6 +958,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, } } + /* conversion between single and double precision */ + if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) + gpr = sp_to_dp(gpr); + if (vcpu->arch.mmio_sign_extend) { switch (run->mmio.len) { #ifdef CONFIG_PPC64 @@ -848,8 +978,6 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, } } - kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); - switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { case KVM_MMIO_REG_GPR: kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); @@ -866,6 +994,17 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #endif +#ifdef CONFIG_VSX + case KVM_MMIO_REG_VSX: + if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD) + kvmppc_set_vsr_dword(vcpu, gpr); + else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD) + kvmppc_set_vsr_word(vcpu, gpr); + else if (vcpu->arch.mmio_vsx_copy_type == + KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) + kvmppc_set_vsr_dword_dump(vcpu, gpr); + break; +#endif default: BUG(); } @@ -932,6 +1071,35 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); } +#ifdef CONFIG_VSX +int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int rt, unsigned int bytes, + int is_default_endian, int mmio_sign_extend) +{ + enum emulation_result emulated = EMULATE_DONE; + + /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */ + if ( (vcpu->arch.mmio_vsx_copy_nums > 4) || + (vcpu->arch.mmio_vsx_copy_nums < 0) ) { + return EMULATE_FAIL; + } + + while (vcpu->arch.mmio_vsx_copy_nums) { + emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, + is_default_endian, mmio_sign_extend); + + if (emulated != EMULATE_DONE) + break; + + vcpu->arch.paddr_accessed += run->mmio.len; + + vcpu->arch.mmio_vsx_copy_nums--; + vcpu->arch.mmio_vsx_offset++; + } + return emulated; +} +#endif /* CONFIG_VSX */ + int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, u64 val, unsigned int bytes, int is_default_endian) { @@ -957,6 +1125,9 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; + if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) + val = dp_to_sp(val); + /* Store the value at the lowest bytes in 'data'. */ if (!host_swabbed) { switch (bytes) { @@ -990,6 +1161,129 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, } EXPORT_SYMBOL_GPL(kvmppc_handle_store); +#ifdef CONFIG_VSX +static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) +{ + u32 dword_offset, word_offset; + union kvmppc_one_reg reg; + int vsx_offset = 0; + int copy_type = vcpu->arch.mmio_vsx_copy_type; + int result = 0; + + switch (copy_type) { + case KVMPPC_VSX_COPY_DWORD: + vsx_offset = + kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); + + if (vsx_offset == -1) { + result = -1; + break; + } + + if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { + *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); + } else { + reg.vval = VCPU_VSX_VR(vcpu, rs); + *val = reg.vsxval[vsx_offset]; + } + break; + + case KVMPPC_VSX_COPY_WORD: + vsx_offset = + kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); + + if (vsx_offset == -1) { + result = -1; + break; + } + + if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { + dword_offset = vsx_offset / 2; + word_offset = vsx_offset % 2; + reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); + *val = reg.vsx32val[word_offset]; + } else { + reg.vval = VCPU_VSX_VR(vcpu, rs); + *val = reg.vsx32val[vsx_offset]; + } + break; + + default: + result = -1; + break; + } + + return result; +} + +int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, + int rs, unsigned int bytes, int is_default_endian) +{ + u64 val; + enum emulation_result emulated = EMULATE_DONE; + + vcpu->arch.io_gpr = rs; + + /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */ + if ( (vcpu->arch.mmio_vsx_copy_nums > 4) || + (vcpu->arch.mmio_vsx_copy_nums < 0) ) { + return EMULATE_FAIL; + } + + while (vcpu->arch.mmio_vsx_copy_nums) { + if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) + return EMULATE_FAIL; + + emulated = kvmppc_handle_store(run, vcpu, + val, bytes, is_default_endian); + + if (emulated != EMULATE_DONE) + break; + + vcpu->arch.paddr_accessed += run->mmio.len; + + vcpu->arch.mmio_vsx_copy_nums--; + vcpu->arch.mmio_vsx_offset++; + } + + return emulated; +} + +static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, + struct kvm_run *run) +{ + enum emulation_result emulated = EMULATE_FAIL; + int r; + + vcpu->arch.paddr_accessed += run->mmio.len; + + if (!vcpu->mmio_is_write) { + emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, + run->mmio.len, 1, vcpu->arch.mmio_sign_extend); + } else { + emulated = kvmppc_handle_vsx_store(run, vcpu, + vcpu->arch.io_gpr, run->mmio.len, 1); + } + + switch (emulated) { + case EMULATE_DO_MMIO: + run->exit_reason = KVM_EXIT_MMIO; + r = RESUME_HOST; + break; + case EMULATE_FAIL: + pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + r = RESUME_HOST; + break; + default: + r = RESUME_GUEST; + break; + } + return r; +} +#endif /* CONFIG_VSX */ + int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) { int r = 0; @@ -1092,13 +1386,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int r; sigset_t sigsaved; - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); - if (vcpu->mmio_needed) { + vcpu->mmio_needed = 0; if (!vcpu->mmio_is_write) kvmppc_complete_mmio_load(vcpu, run); - vcpu->mmio_needed = 0; +#ifdef CONFIG_VSX + if (vcpu->arch.mmio_vsx_copy_nums > 0) { + vcpu->arch.mmio_vsx_copy_nums--; + vcpu->arch.mmio_vsx_offset++; + } + + if (vcpu->arch.mmio_vsx_copy_nums > 0) { + r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); + if (r == RESUME_HOST) { + vcpu->mmio_needed = 1; + return r; + } + } +#endif } else if (vcpu->arch.osi_needed) { u64 *gprs = run->osi.gprs; int i; @@ -1120,6 +1425,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) #endif } + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + if (run->immediate_exit) r = -EINTR; else @@ -1219,8 +1527,12 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, r = -EPERM; dev = kvm_device_from_filp(f.file); - if (dev) - r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); + if (dev) { + if (xive_enabled()) + r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); + else + r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); + } fdput(f); break; @@ -1244,7 +1556,7 @@ bool kvm_arch_intc_initialized(struct kvm *kvm) return true; #endif #ifdef CONFIG_KVM_XICS - if (kvm->arch.xics) + if (kvm->arch.xics || kvm->arch.xive) return true; #endif return false; diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c index 915412e4d5ba..1fa794d7d59f 100644 --- a/arch/powerpc/mm/icswx.c +++ b/arch/powerpc/mm/icswx.c @@ -186,7 +186,7 @@ static u32 acop_get_inst(struct pt_regs *regs) } /** - * @regs: regsiters at time of interrupt + * @regs: registers at time of interrupt * @address: storage address * @error_code: Fault code, usually the DSISR or ESR depending on * processor type diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 7925a9d72cca..59684b4af4d1 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -967,3 +967,4 @@ EXPORT_SYMBOL_GPL(opal_leds_set_ind); EXPORT_SYMBOL_GPL(opal_write_oppanel_async); /* Export this for KVM */ EXPORT_SYMBOL_GPL(opal_int_set_mfrr); +EXPORT_SYMBOL_GPL(opal_int_eoi); diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 6fdbd383f676..283caf1070c9 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -3330,6 +3330,11 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type) } } +static resource_size_t pnv_pci_default_alignment(void) +{ + return PAGE_SIZE; +} + #ifdef CONFIG_PCI_IOV static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, int resno) @@ -3863,6 +3868,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, hose->controller_ops = pnv_pci_ioda_controller_ops; } + ppc_md.pcibios_default_alignment = pnv_pci_default_alignment; + #ifdef CONFIG_PCI_IOV ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment; diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 6a98efb14264..913825086b8d 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -46,13 +46,15 @@ #endif bool __xive_enabled; +EXPORT_SYMBOL_GPL(__xive_enabled); bool xive_cmdline_disabled; /* We use only one priority for now */ static u8 xive_irq_priority; -/* TIMA */ +/* TIMA exported to KVM */ void __iomem *xive_tima; +EXPORT_SYMBOL_GPL(xive_tima); u32 xive_tima_offset; /* Backend ops */ @@ -345,8 +347,11 @@ static void xive_irq_eoi(struct irq_data *d) DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", d->irq, irqd_to_hwirq(d), xc->pending_prio); - /* EOI the source if it hasn't been disabled */ - if (!irqd_irq_disabled(d)) + /* + * EOI the source if it hasn't been disabled and hasn't + * been passed-through to a KVM guest + */ + if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d)) xive_do_source_eoi(irqd_to_hwirq(d), xd); /* @@ -689,9 +694,14 @@ static int xive_irq_set_affinity(struct irq_data *d, old_target = xd->target; - rc = xive_ops->configure_irq(hw_irq, - get_hard_smp_processor_id(target), - xive_irq_priority, d->irq); + /* + * Only configure the irq if it's not currently passed-through to + * a KVM guest + */ + if (!irqd_is_forwarded_to_vcpu(d)) + rc = xive_ops->configure_irq(hw_irq, + get_hard_smp_processor_id(target), + xive_irq_priority, d->irq); if (rc < 0) { pr_err("Error %d reconfiguring irq %d\n", rc, d->irq); return rc; @@ -771,6 +781,123 @@ static int xive_irq_retrigger(struct irq_data *d) return 1; } +static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) +{ + struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); + unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); + int rc; + u8 pq; + + /* + * We only support this on interrupts that do not require + * firmware calls for masking and unmasking + */ + if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) + return -EIO; + + /* + * This is called by KVM with state non-NULL for enabling + * pass-through or NULL for disabling it + */ + if (state) { + irqd_set_forwarded_to_vcpu(d); + + /* Set it to PQ=10 state to prevent further sends */ + pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_10); + + /* No target ? nothing to do */ + if (xd->target == XIVE_INVALID_TARGET) { + /* + * An untargetted interrupt should have been + * also masked at the source + */ + WARN_ON(pq & 2); + + return 0; + } + + /* + * If P was set, adjust state to PQ=11 to indicate + * that a resend is needed for the interrupt to reach + * the guest. Also remember the value of P. + * + * This also tells us that it's in flight to a host queue + * or has already been fetched but hasn't been EOIed yet + * by the host. This it's potentially using up a host + * queue slot. This is important to know because as long + * as this is the case, we must not hard-unmask it when + * "returning" that interrupt to the host. + * + * This saved_p is cleared by the host EOI, when we know + * for sure the queue slot is no longer in use. + */ + if (pq & 2) { + pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_11); + xd->saved_p = true; + + /* + * Sync the XIVE source HW to ensure the interrupt + * has gone through the EAS before we change its + * target to the guest. That should guarantee us + * that we *will* eventually get an EOI for it on + * the host. Otherwise there would be a small window + * for P to be seen here but the interrupt going + * to the guest queue. + */ + if (xive_ops->sync_source) + xive_ops->sync_source(hw_irq); + } else + xd->saved_p = false; + } else { + irqd_clr_forwarded_to_vcpu(d); + + /* No host target ? hard mask and return */ + if (xd->target == XIVE_INVALID_TARGET) { + xive_do_source_set_mask(xd, true); + return 0; + } + + /* + * Sync the XIVE source HW to ensure the interrupt + * has gone through the EAS before we change its + * target to the host. + */ + if (xive_ops->sync_source) + xive_ops->sync_source(hw_irq); + + /* + * By convention we are called with the interrupt in + * a PQ=10 or PQ=11 state, ie, it won't fire and will + * have latched in Q whether there's a pending HW + * interrupt or not. + * + * First reconfigure the target. + */ + rc = xive_ops->configure_irq(hw_irq, + get_hard_smp_processor_id(xd->target), + xive_irq_priority, d->irq); + if (rc) + return rc; + + /* + * Then if saved_p is not set, effectively re-enable the + * interrupt with an EOI. If it is set, we know there is + * still a message in a host queue somewhere that will be + * EOId eventually. + * + * Note: We don't check irqd_irq_disabled(). Effectively, + * we *will* let the irq get through even if masked if the + * HW is still firing it in order to deal with the whole + * saved_p business properly. If the interrupt triggers + * while masked, the generic code will re-mask it anyway. + */ + if (!xd->saved_p) + xive_do_source_eoi(hw_irq, xd); + + } + return 0; +} + static struct irq_chip xive_irq_chip = { .name = "XIVE-IRQ", .irq_startup = xive_irq_startup, @@ -781,12 +908,14 @@ static struct irq_chip xive_irq_chip = { .irq_set_affinity = xive_irq_set_affinity, .irq_set_type = xive_irq_set_type, .irq_retrigger = xive_irq_retrigger, + .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity, }; bool is_xive_irq(struct irq_chip *chip) { return chip == &xive_irq_chip; } +EXPORT_SYMBOL_GPL(is_xive_irq); void xive_cleanup_irq_data(struct xive_irq_data *xd) { @@ -801,6 +930,7 @@ void xive_cleanup_irq_data(struct xive_irq_data *xd) xd->trig_mmio = NULL; } } +EXPORT_SYMBOL_GPL(xive_cleanup_irq_data); static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) { diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c index 1a726229a427..ab9ecce61ee5 100644 --- a/arch/powerpc/sysdev/xive/native.c +++ b/arch/powerpc/sysdev/xive/native.c @@ -31,6 +31,7 @@ #include <asm/xive.h> #include <asm/xive-regs.h> #include <asm/opal.h> +#include <asm/kvm_ppc.h> #include "xive-internal.h" @@ -95,6 +96,7 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data) } return 0; } +EXPORT_SYMBOL_GPL(xive_native_populate_irq_data); int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq) { @@ -108,6 +110,8 @@ int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq) } return rc == 0 ? 0 : -ENXIO; } +EXPORT_SYMBOL_GPL(xive_native_configure_irq); + /* This can be called multiple time to change a queue configuration */ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, @@ -172,6 +176,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, fail: return rc; } +EXPORT_SYMBOL_GPL(xive_native_configure_queue); static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio) { @@ -192,6 +197,7 @@ void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio) { __xive_native_disable_queue(vp_id, q, prio); } +EXPORT_SYMBOL_GPL(xive_native_disable_queue); static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio) { @@ -262,6 +268,7 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc) } return 0; } +#endif /* CONFIG_SMP */ u32 xive_native_alloc_irq(void) { @@ -277,6 +284,7 @@ u32 xive_native_alloc_irq(void) return 0; return rc; } +EXPORT_SYMBOL_GPL(xive_native_alloc_irq); void xive_native_free_irq(u32 irq) { @@ -287,7 +295,9 @@ void xive_native_free_irq(u32 irq) msleep(1); } } +EXPORT_SYMBOL_GPL(xive_native_free_irq); +#ifdef CONFIG_SMP static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc) { s64 rc; @@ -383,7 +393,7 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc) return; /* Enable the pool VP */ - vp = xive_pool_vps + get_hard_smp_processor_id(cpu); + vp = xive_pool_vps + cpu; pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp); for (;;) { rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0); @@ -428,7 +438,7 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc) in_be64(xive_tima + TM_SPC_PULL_POOL_CTX); /* Disable it */ - vp = xive_pool_vps + get_hard_smp_processor_id(cpu); + vp = xive_pool_vps + cpu; for (;;) { rc = opal_xive_set_vp_info(vp, 0, 0); if (rc != OPAL_BUSY) @@ -437,10 +447,11 @@ static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc) } } -static void xive_native_sync_source(u32 hw_irq) +void xive_native_sync_source(u32 hw_irq) { opal_xive_sync(XIVE_SYNC_EAS, hw_irq); } +EXPORT_SYMBOL_GPL(xive_native_sync_source); static const struct xive_ops xive_native_ops = { .populate_irq_data = xive_native_populate_irq_data, @@ -501,10 +512,24 @@ static bool xive_parse_provisioning(struct device_node *np) return true; } +static void xive_native_setup_pools(void) +{ + /* Allocate a pool big enough */ + pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids); + + xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids); + if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP)) + pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n"); + + pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n", + xive_pool_vps, nr_cpu_ids); +} + u32 xive_native_default_eq_shift(void) { return xive_queue_shift; } +EXPORT_SYMBOL_GPL(xive_native_default_eq_shift); bool xive_native_init(void) { @@ -514,7 +539,7 @@ bool xive_native_init(void) struct property *prop; u8 max_prio = 7; const __be32 *p; - u32 val; + u32 val, cpu; s64 rc; if (xive_cmdline_disabled) @@ -550,7 +575,11 @@ bool xive_native_init(void) break; } - /* Grab size of provisioning pages */ + /* Configure Thread Management areas for KVM */ + for_each_possible_cpu(cpu) + kvmppc_set_xive_tima(cpu, r.start, tima); + + /* Grab size of provisionning pages */ xive_parse_provisioning(np); /* Switch the XIVE to exploitation mode */ @@ -560,6 +589,9 @@ bool xive_native_init(void) return false; } + /* Setup some dummy HV pool VPs */ + xive_native_setup_pools(); + /* Initialize XIVE core with our backend */ if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS, max_prio)) { @@ -638,3 +670,47 @@ void xive_native_free_vp_block(u32 vp_base) pr_warn("OPAL error %lld freeing VP block\n", rc); } EXPORT_SYMBOL_GPL(xive_native_free_vp_block); + +int xive_native_enable_vp(u32 vp_id) +{ + s64 rc; + + for (;;) { + rc = opal_xive_set_vp_info(vp_id, OPAL_XIVE_VP_ENABLED, 0); + if (rc != OPAL_BUSY) + break; + msleep(1); + } + return rc ? -EIO : 0; +} +EXPORT_SYMBOL_GPL(xive_native_enable_vp); + +int xive_native_disable_vp(u32 vp_id) +{ + s64 rc; + + for (;;) { + rc = opal_xive_set_vp_info(vp_id, 0, 0); + if (rc != OPAL_BUSY) + break; + msleep(1); + } + return rc ? -EIO : 0; +} +EXPORT_SYMBOL_GPL(xive_native_disable_vp); + +int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id) +{ + __be64 vp_cam_be; + __be32 vp_chip_id_be; + s64 rc; + + rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be); + if (rc) + return -EIO; + *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu; + *out_chip_id = be32_to_cpu(vp_chip_id_be); + + return 0; +} +EXPORT_SYMBOL_GPL(xive_native_get_vp_info); |