diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_rmhandlers.S')
-rw-r--r-- | arch/powerpc/kvm/book3s_rmhandlers.S | 156 |
1 files changed, 68 insertions, 88 deletions
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 1a1b34487e71..34187585c507 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -20,6 +20,7 @@ #include <asm/ppc_asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> +#include <asm/mmu.h> #include <asm/page.h> #include <asm/asm-offsets.h> @@ -35,42 +36,46 @@ #if defined(CONFIG_PPC_BOOK3S_64) -#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg) -#define SHADOW_VCPU_OFF PACA_KVM_SVCPU -#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) #define FUNC(name) GLUE(.,name) +#define MTMSR_EERI(reg) mtmsrd (reg),1 + + .globl kvmppc_skip_interrupt +kvmppc_skip_interrupt: + /* + * Here all GPRs are unchanged from when the interrupt happened + * except for r13, which is saved in SPRG_SCRATCH0. + */ + mfspr r13, SPRN_SRR0 + addi r13, r13, 4 + mtspr SPRN_SRR0, r13 + GET_SCRATCH0(r13) + rfid + b . + + .globl kvmppc_skip_Hinterrupt +kvmppc_skip_Hinterrupt: + /* + * Here all GPRs are unchanged from when the interrupt happened + * except for r13, which is saved in SPRG_SCRATCH0. + */ + mfspr r13, SPRN_HSRR0 + addi r13, r13, 4 + mtspr SPRN_HSRR0, r13 + GET_SCRATCH0(r13) + hrfid + b . #elif defined(CONFIG_PPC_BOOK3S_32) -#define LOAD_SHADOW_VCPU(reg) \ - mfspr reg, SPRN_SPRG_THREAD; \ - lwz reg, THREAD_KVM_SVCPU(reg); \ - /* PPC32 can have a NULL pointer - let's check for that */ \ - mtspr SPRN_SPRG_SCRATCH1, r12; /* Save r12 */ \ - mfcr r12; \ - cmpwi reg, 0; \ - bne 1f; \ - mfspr reg, SPRN_SPRG_SCRATCH0; \ - mtcr r12; \ - mfspr r12, SPRN_SPRG_SCRATCH1; \ - b kvmppc_resume_\intno; \ -1:; \ - mtcr r12; \ - mfspr r12, SPRN_SPRG_SCRATCH1; \ - tophys(reg, reg) - -#define SHADOW_VCPU_OFF 0 -#define MSR_NOIRQ MSR_KERNEL #define FUNC(name) name - -#endif +#define MTMSR_EERI(reg) mtmsr (reg) .macro INTERRUPT_TRAMPOLINE intno .global kvmppc_trampoline_\intno kvmppc_trampoline_\intno: - SET_SCRATCH0(r13) /* Save r13 */ + mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */ /* * First thing to do is to find out if we're coming @@ -78,19 +83,28 @@ kvmppc_trampoline_\intno: * * To distinguish, we check a magic byte in the PACA/current */ - LOAD_SHADOW_VCPU(r13) - PPC_STL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) + mfspr r13, SPRN_SPRG_THREAD + lwz r13, THREAD_KVM_SVCPU(r13) + /* PPC32 can have a NULL pointer - let's check for that */ + mtspr SPRN_SPRG_SCRATCH1, r12 /* Save r12 */ mfcr r12 - stw r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) - lbz r12, (SHADOW_VCPU_OFF + SVCPU_IN_GUEST)(r13) + cmpwi r13, 0 + bne 1f +2: mtcr r12 + mfspr r12, SPRN_SPRG_SCRATCH1 + mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ + b kvmppc_resume_\intno /* Get back original handler */ + +1: tophys(r13, r13) + stw r12, HSTATE_SCRATCH1(r13) + mfspr r12, SPRN_SPRG_SCRATCH1 + stw r12, HSTATE_SCRATCH0(r13) + lbz r12, HSTATE_IN_GUEST(r13) cmpwi r12, KVM_GUEST_MODE_NONE bne ..kvmppc_handler_hasmagic_\intno /* No KVM guest? Then jump back to the Linux handler! */ - lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) - mtcr r12 - PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) - GET_SCRATCH0(r13) /* r13 = original r13 */ - b kvmppc_resume_\intno /* Get back original handler */ + lwz r12, HSTATE_SCRATCH1(r13) + b 2b /* Now we know we're handling a KVM guest */ ..kvmppc_handler_hasmagic_\intno: @@ -112,9 +126,6 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL -#ifdef CONFIG_PPC_BOOK3S_64 -INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV -#endif INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL @@ -124,14 +135,6 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC -/* Those are only available on 64 bit machines */ - -#ifdef CONFIG_PPC_BOOK3S_64 -INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT -INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT -INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX -#endif - /* * Bring us back to the faulting code, but skip the * faulting instruction. @@ -143,8 +146,8 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX * * R12 = free * R13 = Shadow VCPU (PACA) - * SVCPU.SCRATCH0 = guest R12 - * SVCPU.SCRATCH1 = guest CR + * HSTATE.SCRATCH0 = guest R12 + * HSTATE.SCRATCH1 = guest CR * SPRG_SCRATCH0 = guest R13 * */ @@ -156,49 +159,34 @@ kvmppc_handler_skip_ins: mtsrr0 r12 /* Clean up all state */ - lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) + lwz r12, HSTATE_SCRATCH1(r13) mtcr r12 - PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) + PPC_LL r12, HSTATE_SCRATCH0(r13) GET_SCRATCH0(r13) /* And get back into the code */ RFI +#endif /* - * This trampoline brings us back to a real mode handler - * - * Input Registers: - * - * R5 = SRR0 - * R6 = SRR1 - * LR = real-mode IP + * Call kvmppc_handler_trampoline_enter in real mode * + * On entry, r4 contains the guest shadow MSR */ -.global kvmppc_handler_lowmem_trampoline -kvmppc_handler_lowmem_trampoline: - - mtsrr0 r5 +_GLOBAL(kvmppc_entry_trampoline) + mfmsr r5 + LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) + toreal(r7) + + li r9, MSR_RI + ori r9, r9, MSR_EE + andc r9, r5, r9 /* Clear EE and RI in MSR value */ + li r6, MSR_IR | MSR_DR + ori r6, r6, MSR_EE + andc r6, r5, r6 /* Clear EE, DR and IR in MSR value */ + MTMSR_EERI(r9) /* Clear EE and RI in MSR */ + mtsrr0 r7 /* before we set srr0/1 */ mtsrr1 r6 - blr -kvmppc_handler_lowmem_trampoline_end: - -/* - * Call a function in real mode - * - * Input Registers: - * - * R3 = function - * R4 = MSR - * R5 = scratch register - * - */ -_GLOBAL(kvmppc_rmcall) - LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ) - mtmsr r5 /* Disable relocation and interrupts, so mtsrr - doesn't get interrupted */ - sync - mtsrr0 r3 - mtsrr1 r4 RFI #if defined(CONFIG_PPC_BOOK3S_32) @@ -251,12 +239,4 @@ define_load_up(altivec) define_load_up(vsx) #endif -.global kvmppc_trampoline_lowmem -kvmppc_trampoline_lowmem: - PPC_LONG kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START - -.global kvmppc_trampoline_enter -kvmppc_trampoline_enter: - PPC_LONG kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START - #include "book3s_segment.S" |