diff options
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/Makefile | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu_host.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_vio.c | 54 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_vio_hv.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xics.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500mc.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 3 |
13 files changed, 73 insertions, 19 deletions
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 7f7b6d86ac73..eba0bea6e032 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -8,7 +8,8 @@ ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm KVM := ../../../virt/kvm common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ - $(KVM)/eventfd.o $(KVM)/vfio.o + $(KVM)/eventfd.o +common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o CFLAGS_e500_mmu.o := -I. CFLAGS_e500_mmu_host.o := -I. diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 55c4d51ea3e2..999106991a76 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -22,7 +22,7 @@ #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> -#include <asm/mmu-hash32.h> +#include <asm/book3s/32/mmu-hash.h> #include <asm/machdep.h> #include <asm/mmu_context.h> #include <asm/hw_irq.h> diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 9bf7031a67ff..b9131aa1aedf 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -26,7 +26,7 @@ #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> -#include <asm/mmu-hash64.h> +#include <asm/book3s/64/mmu-hash.h> /* #define DEBUG_MMU */ diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 913cd2198fa6..114edace6cdd 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -23,7 +23,7 @@ #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> -#include <asm/mmu-hash64.h> +#include <asm/book3s/64/mmu-hash.h> #include <asm/machdep.h> #include <asm/mmu_context.h> #include <asm/hw_irq.h> diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index fb37290a57b4..c7b78d8336b2 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -32,7 +32,7 @@ #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> -#include <asm/mmu-hash64.h> +#include <asm/book3s/64/mmu-hash.h> #include <asm/hvcall.h> #include <asm/synch.h> #include <asm/ppc-opcode.h> diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 2c2d1030843a..18cf6d1f8174 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -31,7 +31,7 @@ #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> -#include <asm/mmu-hash64.h> +#include <asm/book3s/64/mmu-hash.h> #include <asm/hvcall.h> #include <asm/synch.h> #include <asm/ppc-opcode.h> @@ -209,6 +209,32 @@ fail: return ret; } +long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, + unsigned long ioba, unsigned long tce) +{ + struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); + long ret; + + /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ + /* liobn, ioba, tce); */ + + if (!stt) + return H_TOO_HARD; + + ret = kvmppc_ioba_validate(stt, ioba, 1); + if (ret != H_SUCCESS) + return ret; + + ret = kvmppc_tce_validate(stt, tce); + if (ret != H_SUCCESS) + return ret; + + kvmppc_tce_put(stt, ioba >> stt->page_shift, tce); + + return H_SUCCESS; +} +EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); + long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce_list, unsigned long npages) @@ -264,3 +290,29 @@ unlock_exit: return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect); + +long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, + unsigned long liobn, unsigned long ioba, + unsigned long tce_value, unsigned long npages) +{ + struct kvmppc_spapr_tce_table *stt; + long i, ret; + + stt = kvmppc_find_table(vcpu, liobn); + if (!stt) + return H_TOO_HARD; + + ret = kvmppc_ioba_validate(stt, ioba, npages); + if (ret != H_SUCCESS) + return ret; + + /* Check permission bits only to allow userspace poison TCE for debug */ + if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) + return H_PARAMETER; + + for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) + kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); + + return H_SUCCESS; +} +EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 44be73e6aa26..d461c440889a 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -30,7 +30,7 @@ #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> -#include <asm/mmu-hash64.h> +#include <asm/book3s/64/mmu-hash.h> #include <asm/mmu_context.h> #include <asm/hvcall.h> #include <asm/synch.h> @@ -180,8 +180,8 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE -long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, - unsigned long ioba, unsigned long tce) +long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, + unsigned long ioba, unsigned long tce) { struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); long ret; @@ -204,7 +204,6 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, return H_SUCCESS; } -EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long ua, unsigned long *phpa) @@ -296,7 +295,7 @@ unlock_exit: return ret; } -long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, +long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce_value, unsigned long npages) { @@ -320,7 +319,6 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, return H_SUCCESS; } -EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba) diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 91700518bbf3..4cb8db05f3e5 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -17,7 +17,7 @@ #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> -#include <asm/mmu-hash64.h> +#include <asm/book3s/64/mmu-hash.h> #include <asm/hvcall.h> #include <asm/synch.h> #include <asm/ppc-opcode.h> diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 85b32f16fa74..e571ad277398 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -27,7 +27,7 @@ #include <asm/asm-offsets.h> #include <asm/exception-64s.h> #include <asm/kvm_book3s_asm.h> -#include <asm/mmu-hash64.h> +#include <asm/book3s/64/mmu-hash.h> #include <asm/tm.h> #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) @@ -1942,7 +1942,7 @@ hcall_real_table: .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table .long DOTSYM(kvmppc_h_protect) - hcall_real_table .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table - .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table + .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table .long 0 /* 0x24 - H_SET_SPRG0 */ .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table .long 0 /* 0x2c */ @@ -2020,7 +2020,7 @@ hcall_real_table: .long 0 /* 0x12c */ .long 0 /* 0x130 */ .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table - .long DOTSYM(kvmppc_h_stuff_tce) - hcall_real_table + .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table .long 0 /* 0x140 */ .long 0 /* 0x144 */ diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index 905e94a1370f..46871d554057 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -432,7 +432,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, * the whole masked_pending business which is about not * losing interrupts that occur while masked. * - * I don't differenciate normal deliveries and resends, this + * I don't differentiate normal deliveries and resends, this * implementation will differ from PAPR and not lose such * interrupts. */ diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 778ef86e187e..4d66f44a1657 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -992,7 +992,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_restart_interrupt(vcpu, exit_nr); /* - * get last instruction before beeing preempted + * get last instruction before being preempted * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA */ switch (exit_nr) { diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index cda695de8aa7..f48a0c22e8f9 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -182,7 +182,7 @@ int kvmppc_core_check_processor_compat(void) r = 0; #ifdef CONFIG_ALTIVEC /* - * Since guests have the priviledge to enable AltiVec, we need AltiVec + * Since guests have the privilege to enable AltiVec, we need AltiVec * support in the host to save/restore their context. * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit * because it's cleared in the absence of CONFIG_ALTIVEC! diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 19aa59b0850c..6a68730774ee 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -96,6 +96,9 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) * so we don't miss a request because the requester sees * OUTSIDE_GUEST_MODE and assumes we'll be checking requests * before next entering the guest (and thus doesn't IPI). + * This also orders the write to mode from any reads + * to the page tables done while the VCPU is running. + * Please see the comment in kvm_flush_remote_tlbs. */ smp_mb(); |