From 797179bc4fe06c89e47a9f36f886f68640b423f8 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 9 Jun 2016 10:50:43 +0100 Subject: MIPS: KVM: Fix modular KVM under QEMU MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Copy __kvm_mips_vcpu_run() into unmapped memory, so that we can never get a TLB refill exception in it when KVM is built as a module. This was observed to happen with the host MIPS kernel running under QEMU, due to a not entirely transparent optimisation in the QEMU TLB handling where TLB entries replaced with TLBWR are copied to a separate part of the TLB array. Code in those pages continue to be executable, but those mappings persist only until the next ASID switch, even if they are marked global. An ASID switch happens in __kvm_mips_vcpu_run() at exception level after switching to the guest exception base. Subsequent TLB mapped kernel instructions just prior to switching to the guest trigger a TLB refill exception, which enters the guest exception handlers without updating EPC. This appears as a guest triggered TLB refill on a host kernel mapped (host KSeg2) address, which is not handled correctly as user (guest) mode accesses to kernel (host) segments always generate address error exceptions. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Ralf Baechle Cc: kvm@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: # 3.10.x- Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index dc052fb5c7a2..44da5259f390 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -315,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) memcpy(gebase + offset, mips32_GuestException, mips32_GuestExceptionEnd - mips32_GuestException); +#ifdef MODULE + offset += mips32_GuestExceptionEnd - mips32_GuestException; + memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run, + __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run); + vcpu->arch.vcpu_run = gebase + offset; +#else + vcpu->arch.vcpu_run = __kvm_mips_vcpu_run; +#endif + /* Invalidate the icache for these ranges */ local_flush_icache_range((unsigned long)gebase, (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); @@ -404,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) /* Disable hardware page table walking while in guest */ htw_stop(); - r = __kvm_mips_vcpu_run(run, vcpu); + r = vcpu->arch.vcpu_run(run, vcpu); /* Re-enable HTW before enabling interrupts */ htw_start(); -- cgit v1.2.3 From 8cffd197485122632103a12fdada911242e7c01e Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 9 Jun 2016 14:19:08 +0100 Subject: MIPS: KVM: Convert code to kernel sized types MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert the MIPS KVM C code to use standard kernel sized types (e.g. u32) instead of inttypes.h style ones (e.g. uint32_t) or other types as appropriate. Signed-off-by: James Hogan Cc: Ralf Baechle Cc: Paolo Bonzini Cc: Radim Krčmář Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/kvm/dyntrans.c | 24 +++++------ arch/mips/kvm/emulate.c | 104 +++++++++++++++++++++++----------------------- arch/mips/kvm/interrupt.c | 2 +- arch/mips/kvm/mips.c | 8 ++-- arch/mips/kvm/tlb.c | 28 ++++++------- arch/mips/kvm/trap_emul.c | 30 ++++++------- 6 files changed, 98 insertions(+), 98 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c index e79502a88534..d4a86fb239cd 100644 --- a/arch/mips/kvm/dyntrans.c +++ b/arch/mips/kvm/dyntrans.c @@ -33,13 +33,13 @@ int kvm_mips_trans_cache_index(u32 inst, u32 *opc, { int result = 0; unsigned long kseg0_opc; - uint32_t synci_inst = 0x0; + u32 synci_inst = 0x0; /* Replace the CACHE instruction, with a NOP */ kseg0_opc = CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa (vcpu, (unsigned long) opc)); - memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); + memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(u32)); local_flush_icache_range(kseg0_opc, kseg0_opc + 32); return result; @@ -54,7 +54,7 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc, { int result = 0; unsigned long kseg0_opc; - uint32_t synci_inst = SYNCI_TEMPLATE, base, offset; + u32 synci_inst = SYNCI_TEMPLATE, base, offset; base = (inst >> 21) & 0x1f; offset = inst & 0xffff; @@ -64,7 +64,7 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc, kseg0_opc = CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa (vcpu, (unsigned long) opc)); - memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); + memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(u32)); local_flush_icache_range(kseg0_opc, kseg0_opc + 32); return result; @@ -72,8 +72,8 @@ int kvm_mips_trans_cache_va(u32 inst, u32 *opc, int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) { - int32_t rt, rd, sel; - uint32_t mfc0_inst; + u32 rt, rd, sel; + u32 mfc0_inst; unsigned long kseg0_opc, flags; rt = (inst >> 16) & 0x1f; @@ -94,11 +94,11 @@ int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) kseg0_opc = CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa (vcpu, (unsigned long) opc)); - memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t)); + memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(u32)); local_flush_icache_range(kseg0_opc, kseg0_opc + 32); } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { local_irq_save(flags); - memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t)); + memcpy((void *)opc, (void *)&mfc0_inst, sizeof(u32)); local_flush_icache_range((unsigned long)opc, (unsigned long)opc + 32); local_irq_restore(flags); @@ -112,8 +112,8 @@ int kvm_mips_trans_mfc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) { - int32_t rt, rd, sel; - uint32_t mtc0_inst = SW_TEMPLATE; + u32 rt, rd, sel; + u32 mtc0_inst = SW_TEMPLATE; unsigned long kseg0_opc, flags; rt = (inst >> 16) & 0x1f; @@ -127,11 +127,11 @@ int kvm_mips_trans_mtc0(u32 inst, u32 *opc, struct kvm_vcpu *vcpu) kseg0_opc = CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa (vcpu, (unsigned long) opc)); - memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t)); + memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(u32)); local_flush_icache_range(kseg0_opc, kseg0_opc + 32); } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { local_irq_save(flags); - memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t)); + memcpy((void *)opc, (void *)&mtc0_inst, sizeof(u32)); local_flush_icache_range((unsigned long)opc, (unsigned long)opc + 32); local_irq_restore(flags); diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index c59c51908476..8f4f3242a655 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -52,7 +52,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, goto unaligned; /* Read the instruction */ - insn.word = kvm_get_inst((uint32_t *) epc, vcpu); + insn.word = kvm_get_inst((u32 *) epc, vcpu); if (insn.word == KVM_INVALID_INST) return KVM_INVALID_INST; @@ -304,7 +304,7 @@ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) { struct mips_coproc *cop0 = vcpu->arch.cop0; ktime_t expires, threshold; - uint32_t count, compare; + u32 count, compare; int running; /* Calculate the biased and scaled guest CP0_Count */ @@ -315,7 +315,7 @@ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) * Find whether CP0_Count has reached the closest timer interrupt. If * not, we shouldn't inject it. */ - if ((int32_t)(count - compare) < 0) + if ((s32)(count - compare) < 0) return count; /* @@ -421,13 +421,13 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, ktime_t now, u32 count) { struct mips_coproc *cop0 = vcpu->arch.cop0; - uint32_t compare; + u32 compare; u64 delta; ktime_t expire; /* Calculate timeout (wrap 0 to 2^32) */ compare = kvm_read_c0_guest_compare(cop0); - delta = (u64)(uint32_t)(compare - count - 1) + 1; + delta = (u64)(u32)(compare - count - 1) + 1; delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); expire = ktime_add_ns(now, delta); @@ -543,7 +543,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) int dc; u32 old_compare = kvm_read_c0_guest_compare(cop0); ktime_t now; - uint32_t count; + u32 count; /* if unchanged, must just be an ack */ if (old_compare == compare) { @@ -584,7 +584,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; - uint32_t count; + u32 count; ktime_t now; /* Stop hrtimer */ @@ -631,7 +631,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; - uint32_t count; + u32 count; kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); @@ -660,7 +660,7 @@ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) s64 changed = count_ctl ^ vcpu->arch.count_ctl; s64 delta; ktime_t expire, now; - uint32_t count, compare; + u32 count, compare; /* Only allow defined bits to be changed */ if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) @@ -686,7 +686,7 @@ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) */ count = kvm_read_c0_guest_count(cop0); compare = kvm_read_c0_guest_compare(cop0); - delta = (u64)(uint32_t)(compare - count - 1) + 1; + delta = (u64)(u32)(compare - count - 1) + 1; delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); expire = ktime_add_ns(vcpu->arch.count_resume, delta); @@ -800,9 +800,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; - uint32_t pc = vcpu->arch.pc; + unsigned long pc = vcpu->arch.pc; - kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); + kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); return EMULATE_FAIL; } @@ -812,11 +812,11 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) struct mips_coproc *cop0 = vcpu->arch.cop0; int index = kvm_read_c0_guest_index(cop0); struct kvm_mips_tlb *tlb = NULL; - uint32_t pc = vcpu->arch.pc; + unsigned long pc = vcpu->arch.pc; if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { kvm_debug("%s: illegal index: %d\n", __func__, index); - kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", + kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", pc, index, kvm_read_c0_guest_entryhi(cop0), kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), @@ -836,7 +836,7 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); - kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", + kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", pc, index, kvm_read_c0_guest_entryhi(cop0), kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), @@ -850,7 +850,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_mips_tlb *tlb = NULL; - uint32_t pc = vcpu->arch.pc; + unsigned long pc = vcpu->arch.pc; int index; get_random_bytes(&index, sizeof(index)); @@ -869,7 +869,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); - kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", + kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", pc, index, kvm_read_c0_guest_entryhi(cop0), kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0)); @@ -881,14 +881,14 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; long entryhi = kvm_read_c0_guest_entryhi(cop0); - uint32_t pc = vcpu->arch.pc; + unsigned long pc = vcpu->arch.pc; int index = -1; index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); kvm_write_c0_guest_index(cop0, index); - kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, + kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, index); return EMULATE_DONE; @@ -978,8 +978,8 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, { struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; - int32_t rt, rd, copz, sel, co_bit, op; - uint32_t pc = vcpu->arch.pc; + u32 rt, rd, copz, sel, co_bit, op; + unsigned long pc = vcpu->arch.pc; unsigned long curr_pc; /* @@ -1047,7 +1047,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, } kvm_debug - ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", + ("[%#lx] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", pc, rd, sel, rt, vcpu->arch.gprs[rt]); break; @@ -1077,7 +1077,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n", kvm_read_c0_guest_ebase(cop0)); } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { - uint32_t nasid = + u32 nasid = vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID; if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && ((kvm_read_c0_guest_entryhi(cop0) & @@ -1099,7 +1099,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); goto done; } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { - kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", + kvm_debug("[%#lx] MTCz, COMPARE %#lx <- %#lx\n", pc, kvm_read_c0_guest_compare(cop0), vcpu->arch.gprs[rt]); @@ -1218,7 +1218,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, kvm_write_c0_guest_config5(cop0, val); } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { - uint32_t old_cause, new_cause; + u32 old_cause, new_cause; old_cause = kvm_read_c0_guest_cause(cop0); new_cause = vcpu->arch.gprs[rt]; @@ -1239,7 +1239,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, #endif } - kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc, + kvm_debug("[%#lx] MTCz, cop0->reg[%d][%d]: %#lx\n", pc, rd, sel, cop0->reg[rd][sel]); break; @@ -1271,9 +1271,8 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, case wrpgpr_op: { - uint32_t css = - cop0->reg[MIPS_CP0_STATUS][2] & 0xf; - uint32_t pss = + u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; + u32 pss = (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; /* * We don't support any shadow register sets, so @@ -1316,8 +1315,9 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DO_MMIO; - int32_t op, base, rt, offset; - uint32_t bytes; + u32 op, base, rt; + s16 offset; + u32 bytes; void *data = run->mmio.data; unsigned long curr_pc; @@ -1332,7 +1332,7 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause, rt = (inst >> 16) & 0x1f; base = (inst >> 21) & 0x1f; - offset = inst & 0xffff; + offset = (s16)inst; op = (inst >> 26) & 0x3f; switch (op) { @@ -1356,7 +1356,7 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause, *(u8 *) data = vcpu->arch.gprs[rt]; kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], - *(uint8_t *) data); + *(u8 *) data); break; @@ -1378,11 +1378,11 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause, run->mmio.is_write = 1; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; - *(uint32_t *) data = vcpu->arch.gprs[rt]; + *(u32 *) data = vcpu->arch.gprs[rt]; kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, - vcpu->arch.gprs[rt], *(uint32_t *) data); + vcpu->arch.gprs[rt], *(u32 *) data); break; case sh_op: @@ -1403,11 +1403,11 @@ enum emulation_result kvm_mips_emulate_store(u32 inst, u32 cause, run->mmio.is_write = 1; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; - *(uint16_t *) data = vcpu->arch.gprs[rt]; + *(u16 *) data = vcpu->arch.gprs[rt]; kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, - vcpu->arch.gprs[rt], *(uint32_t *) data); + vcpu->arch.gprs[rt], *(u32 *) data); break; default: @@ -1428,12 +1428,13 @@ enum emulation_result kvm_mips_emulate_load(u32 inst, u32 cause, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DO_MMIO; - int32_t op, base, rt, offset; - uint32_t bytes; + u32 op, base, rt; + s16 offset; + u32 bytes; rt = (inst >> 16) & 0x1f; base = (inst >> 21) & 0x1f; - offset = inst & 0xffff; + offset = (s16)inst; op = (inst >> 26) & 0x3f; vcpu->arch.pending_load_cause = cause; @@ -1535,7 +1536,8 @@ enum emulation_result kvm_mips_emulate_cache(u32 inst, u32 *opc, { struct mips_coproc *cop0 = vcpu->arch.cop0; enum emulation_result er = EMULATE_DONE; - int32_t offset, cache, op_inst, op, base; + u32 cache, op_inst, op, base; + s16 offset; struct kvm_vcpu_arch *arch = &vcpu->arch; unsigned long va; unsigned long curr_pc; @@ -1551,7 +1553,7 @@ enum emulation_result kvm_mips_emulate_cache(u32 inst, u32 *opc, base = (inst >> 21) & 0x1f; op_inst = (inst >> 16) & 0x1f; - offset = (int16_t)inst; + offset = (s16)inst; cache = op_inst & CacheOp_Cache; op = op_inst & CacheOp_Op; @@ -1691,7 +1693,7 @@ enum emulation_result kvm_mips_emulate_inst(unsigned long cause, u32 *opc, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; - uint32_t inst; + u32 inst; /* Fetch the instruction. */ if (cause & CAUSEF_BD) @@ -2282,7 +2284,7 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, u32 *opc, struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long curr_pc; - uint32_t inst; + u32 inst; /* * Update PC and hold onto current PC in case there is @@ -2377,19 +2379,19 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, switch (run->mmio.len) { case 4: - *gpr = *(int32_t *) run->mmio.data; + *gpr = *(s32 *) run->mmio.data; break; case 2: if (vcpu->mmio_needed == 2) - *gpr = *(int16_t *) run->mmio.data; + *gpr = *(s16 *) run->mmio.data; else - *gpr = *(uint16_t *)run->mmio.data; + *gpr = *(u16 *)run->mmio.data; break; case 1: if (vcpu->mmio_needed == 2) - *gpr = *(int8_t *) run->mmio.data; + *gpr = *(s8 *) run->mmio.data; else *gpr = *(u8 *) run->mmio.data; break; @@ -2409,7 +2411,7 @@ static enum emulation_result kvm_mips_emulate_exc(unsigned long cause, struct kvm_run *run, struct kvm_vcpu *vcpu) { - uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; @@ -2448,7 +2450,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; - uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); @@ -2544,7 +2546,7 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; - uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; unsigned long va = vcpu->arch.host_cp0_badvaddr; int index; diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c index 49ce83237fc3..ad28dac6b7e9 100644 --- a/arch/mips/kvm/interrupt.c +++ b/arch/mips/kvm/interrupt.c @@ -117,7 +117,7 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, u32 cause) { int allowed = 0; - uint32_t exccode; + u32 exccode; struct kvm_vcpu_arch *arch = &vcpu->arch; struct mips_coproc *cop0 = vcpu->arch.cop0; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 44da5259f390..a2b1b9205b94 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1222,7 +1222,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) static void kvm_mips_set_c0_status(void) { - uint32_t status = read_c0_status(); + u32 status = read_c0_status(); if (cpu_has_dsp) status |= (ST0_MX); @@ -1236,9 +1236,9 @@ static void kvm_mips_set_c0_status(void) */ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) { - uint32_t cause = vcpu->arch.host_cp0_cause; - uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 cause = vcpu->arch.host_cp0_cause; + u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 7ea346e150a8..c4e11e138042 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -32,8 +32,6 @@ #define KVM_GUEST_PC_TLB 0 #define KVM_GUEST_SP_TLB 1 -#define PRIx64 "llx" - atomic_t kvm_mips_instance; EXPORT_SYMBOL_GPL(kvm_mips_instance); @@ -102,13 +100,13 @@ void kvm_mips_dump_host_tlbs(void) kvm_info("TLB%c%3d Hi 0x%08lx ", (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', i, tlb.tlb_hi); - kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), + kvm_info("Lo0=0x%09llx %c%c attr %lx ", + (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', (tlb.tlb_lo0 >> 3) & 7); - kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), + kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", + (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); @@ -134,13 +132,13 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) kvm_info("TLB%c%3d Hi 0x%08lx ", (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', i, tlb.tlb_hi); - kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), + kvm_info("Lo0=0x%09llx %c%c attr %lx ", + (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', (tlb.tlb_lo0 >> 3) & 7); - kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), + kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", + (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); @@ -160,7 +158,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) pfn = kvm_mips_gfn_to_pfn(kvm, gfn); if (kvm_mips_is_error_pfn(pfn)) { - kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); + kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn); err = -EFAULT; goto out; } @@ -176,7 +174,7 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, unsigned long gva) { gfn_t gfn; - uint32_t offset = gva & ~PAGE_MASK; + unsigned long offset = gva & ~PAGE_MASK; struct kvm *kvm = vcpu->kvm; if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) { @@ -726,7 +724,7 @@ EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { unsigned long flags; - uint32_t cpu; + int cpu; local_irq_save(flags); @@ -755,7 +753,7 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned long paddr, flags, vpn2, asid; - uint32_t inst; + u32 inst; int index; if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 || @@ -787,7 +785,7 @@ u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, (unsigned long) opc); - inst = *(uint32_t *) CKSEG0ADDR(paddr); + inst = *(u32 *) CKSEG0ADDR(paddr); } else { kvm_err("%s: illegal address: %p\n", __func__, opc); return KVM_INVALID_INST; diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 6ba0fafcecbc..4aa5d77b0d6a 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -21,7 +21,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) { gpa_t gpa; - uint32_t kseg = KSEGX(gva); + gva_t kseg = KSEGX(gva); if ((kseg == CKSEG0) || (kseg == CKSEG1)) gpa = CPHYSADDR(gva); @@ -40,7 +40,7 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -87,7 +87,7 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; @@ -131,7 +131,7 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; @@ -178,7 +178,7 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; @@ -232,7 +232,7 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; @@ -262,7 +262,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; @@ -292,7 +292,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -310,7 +310,7 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -328,7 +328,7 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -346,7 +346,7 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; + u32 __user *opc = (u32 __user *)vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -364,7 +364,7 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; + u32 __user *opc = (u32 __user *)vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -382,7 +382,7 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; + u32 __user *opc = (u32 __user *)vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -407,7 +407,7 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct kvm_run *run = vcpu->run; - uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + u32 __user *opc = (u32 __user *) vcpu->arch.pc; unsigned long cause = vcpu->arch.host_cp0_cause; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -457,7 +457,7 @@ static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; - uint32_t config1; + u32 config1; int vcpu_id = vcpu->vcpu_id; /* -- cgit v1.2.3 From 9befad23ed3e2e178741cb84ac09c0ff45610537 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 9 Jun 2016 14:19:11 +0100 Subject: MIPS: KVM: Don't indirect KVM functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Several KVM module functions are indirected so that they can be accessed from tlb.c which is statically built into the kernel. This is no longer necessary as the relevant bits of code have moved into mmu.c which is part of the KVM module, so drop the indirections. Note: is_error_pfn() is defined inline in kvm_host.h, so didn't actually require the KVM module to be loaded for it to work anyway. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 3 --- arch/mips/kvm/mips.c | 18 +----------------- arch/mips/kvm/mmu.c | 4 ++-- arch/mips/kvm/tlb.c | 10 ---------- 4 files changed, 3 insertions(+), 32 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index f64be7987a32..c8f9671c2779 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -93,9 +93,6 @@ #define KVM_INVALID_ADDR 0xdeadbeef extern atomic_t kvm_mips_instance; -extern kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); -extern void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn); -extern bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn); struct kvm_vm_stat { u32 remote_tlb_flush; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index a2b1b9205b94..c1ab6110ca1d 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -147,7 +147,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm) /* Put the pages we reserved for the guest pmap */ for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) - kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); + kvm_release_pfn_clean(kvm->arch.guest_pmap[i]); } kfree(kvm->arch.guest_pmap); @@ -1645,18 +1645,6 @@ static int __init kvm_mips_init(void) register_die_notifier(&kvm_mips_csr_die_notifier); - /* - * On MIPS, kernel modules are executed from "mapped space", which - * requires TLBs. The TLB handling code is statically linked with - * the rest of the kernel (tlb.c) to avoid the possibility of - * double faulting. The issue is that the TLB code references - * routines that are part of the the KVM module, which are only - * available once the module is loaded. - */ - kvm_mips_gfn_to_pfn = gfn_to_pfn; - kvm_mips_release_pfn_clean = kvm_release_pfn_clean; - kvm_mips_is_error_pfn = is_error_pfn; - return 0; } @@ -1664,10 +1652,6 @@ static void __exit kvm_mips_exit(void) { kvm_exit(); - kvm_mips_gfn_to_pfn = NULL; - kvm_mips_release_pfn_clean = NULL; - kvm_mips_is_error_pfn = NULL; - unregister_die_notifier(&kvm_mips_csr_die_notifier); } diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index d18cadfd6e01..d5ada83ec55c 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -37,9 +37,9 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) return 0; srcu_idx = srcu_read_lock(&kvm->srcu); - pfn = kvm_mips_gfn_to_pfn(kvm, gfn); + pfn = gfn_to_pfn(kvm, gfn); - if (kvm_mips_is_error_pfn(pfn)) { + if (is_error_pfn(pfn)) { kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn); err = -EFAULT; goto out; diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index 373817c3166b..37d77ad8431e 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -35,16 +35,6 @@ atomic_t kvm_mips_instance; EXPORT_SYMBOL_GPL(kvm_mips_instance); -/* These function pointers are initialized once the KVM module is loaded */ -kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); -EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn); - -void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn); -EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean); - -bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn); -EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn); - static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) { int cpu = smp_processor_id(); -- cgit v1.2.3 From 878edf014e29de38c49153aba20273fbc9ae31af Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 9 Jun 2016 14:19:14 +0100 Subject: MIPS: KVM: Restore host EBase from ebase variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The host kernel's exception vector base address is currently saved in the VCPU structure at creation time, and restored on a guest exit. However it doesn't change and can already be easily accessed from the 'ebase' variable (arch/mips/kernel/traps.c), so drop the host_ebase member of kvm_vcpu_arch, export the 'ebase' variable to modules and load from there instead. This does result in a single extra instruction (lui) on the guest exit path, but simplifies the code a bit and removes the redundant storage of the host exception base address. Credit for the idea goes to Cavium's VZ KVM implementation. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 2 +- arch/mips/kernel/asm-offsets.c | 1 - arch/mips/kernel/traps.c | 1 + arch/mips/kvm/locore.S | 2 +- arch/mips/kvm/mips.c | 3 --- 5 files changed, 3 insertions(+), 6 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index f68293b4a598..24a8e557db88 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -334,7 +334,7 @@ struct kvm_mips_tlb { #define KVM_MIPS_GUEST_TLB_SIZE 64 struct kvm_vcpu_arch { - void *host_ebase, *guest_ebase; + void *guest_ebase; int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); unsigned long host_stack; unsigned long host_gp; diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 420808899c70..a1263d188a5a 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -355,7 +355,6 @@ void output_kvm_defines(void) OFFSET(VCPU_RUN, kvm_vcpu, run); OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch); - OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase); OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase); OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 4a1712b5abdf..66e5820bfdae 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1859,6 +1859,7 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs) #define VECTORSPACING 0x100 /* for EI/VI mode */ unsigned long ebase; +EXPORT_SYMBOL_GPL(ebase); unsigned long exception_handlers[32]; unsigned long vi_handlers[64]; diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index 43c8ef847efa..f87bec546366 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S @@ -319,7 +319,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) mtc0 k0, CP0_STATUS ehb - LONG_L k0, VCPU_HOST_EBASE(k1) + LONG_L k0, ebase mtc0 k0,CP0_EBASE /* diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index c1ab6110ca1d..6e753761b5d6 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -273,9 +273,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) else size = 0x4000; - /* Save Linux EBASE */ - vcpu->arch.host_ebase = (void *)read_c0_ebase(); - gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); if (!gebase) { -- cgit v1.2.3 From f943176a7205a064da05f81fc94dccc4c7379010 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 14 Jun 2016 09:40:10 +0100 Subject: MIPS: KVM: Generalise fpu_inuse for other state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename fpu_inuse and the related definitions to aux_inuse so it can be used for lazy context management of other auxiliary processor state too, such as VZ guest timer, watchpoints and performance counters. Signed-off-by: James Hogan Cc: Ralf Baechle Cc: Paolo Bonzini Cc: Radim Krčmář Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 8 ++++---- arch/mips/kvm/emulate.c | 8 ++++---- arch/mips/kvm/mips.c | 38 +++++++++++++++++++------------------- 3 files changed, 27 insertions(+), 27 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index d0432b5f2343..e6273850bab6 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -323,8 +323,8 @@ struct kvm_mips_tlb { long tlb_lo[2]; }; -#define KVM_MIPS_FPU_FPU 0x1 -#define KVM_MIPS_FPU_MSA 0x2 +#define KVM_MIPS_AUX_FPU 0x1 +#define KVM_MIPS_AUX_MSA 0x2 #define KVM_MIPS_GUEST_TLB_SIZE 64 struct kvm_vcpu_arch { @@ -346,8 +346,8 @@ struct kvm_vcpu_arch { /* FPU State */ struct mips_fpu_struct fpu; - /* Which FPU state is loaded (KVM_MIPS_FPU_*) */ - unsigned int fpu_inuse; + /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */ + unsigned int aux_inuse; /* COP0 State */ struct mips_coproc *cop0; diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 5b89c0803405..8647bd97b934 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1154,7 +1154,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, * it first. */ if (change & ST0_CU1 && !(val & ST0_FR) && - vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) + vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) kvm_lose_fpu(vcpu); /* @@ -1165,7 +1165,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, * the near future. */ if (change & ST0_CU1 && - vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) + vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) change_c0_status(ST0_CU1, val); preempt_enable(); @@ -1200,7 +1200,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, * context is already loaded. */ if (change & MIPS_CONF5_FRE && - vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) + vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) change_c0_config5(MIPS_CONF5_FRE, val); /* @@ -1210,7 +1210,7 @@ enum emulation_result kvm_mips_emulate_CP0(u32 inst, u32 *opc, u32 cause, * quickly enabled again in the near future. */ if (change & MIPS_CONF5_MSAEN && - vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) + vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) change_c0_config5(MIPS_CONF5_MSAEN, val); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 6e753761b5d6..9093262ff3ce 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1447,7 +1447,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) * not to clobber the status register directly via the commpage. */ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && - vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) + vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) kvm_lose_fpu(vcpu); /* @@ -1462,9 +1462,9 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) enable_fpu_hazard(); /* If guest FPU state not active, restore it now */ - if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) { + if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { __kvm_restore_fpu(&vcpu->arch); - vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; + vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; } preempt_enable(); @@ -1491,8 +1491,8 @@ void kvm_own_msa(struct kvm_vcpu *vcpu) * interacts with MSA state, so play it safe and save it first. */ if (!(sr & ST0_FR) && - (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | - KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU) + (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | + KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU) kvm_lose_fpu(vcpu); change_c0_status(ST0_CU1 | ST0_FR, sr); @@ -1506,20 +1506,20 @@ void kvm_own_msa(struct kvm_vcpu *vcpu) set_c0_config5(MIPS_CONF5_MSAEN); enable_fpu_hazard(); - switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { - case KVM_MIPS_FPU_FPU: + switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { + case KVM_MIPS_AUX_FPU: /* * Guest FPU state already loaded, only restore upper MSA state */ __kvm_restore_msa_upper(&vcpu->arch); - vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; + vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; break; case 0: /* Neither FPU or MSA already active, restore full MSA state */ __kvm_restore_msa(&vcpu->arch); - vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; + vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; if (kvm_mips_guest_has_fpu(&vcpu->arch)) - vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; + vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; break; default: break; @@ -1533,13 +1533,13 @@ void kvm_own_msa(struct kvm_vcpu *vcpu) void kvm_drop_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); - if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { + if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { disable_msa(); - vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; + vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; } - if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { + if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { clear_c0_status(ST0_CU1 | ST0_FR); - vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; + vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; } preempt_enable(); } @@ -1555,7 +1555,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) */ preempt_disable(); - if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { + if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { set_c0_config5(MIPS_CONF5_MSAEN); enable_fpu_hazard(); @@ -1563,17 +1563,17 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) /* Disable MSA & FPU */ disable_msa(); - if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { + if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { clear_c0_status(ST0_CU1 | ST0_FR); disable_fpu_hazard(); } - vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); - } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { + vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); + } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { set_c0_status(ST0_CU1); enable_fpu_hazard(); __kvm_save_fpu(&vcpu->arch); - vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; + vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; /* Disable FPU */ clear_c0_status(ST0_CU1 | ST0_FR); -- cgit v1.2.3 From 04ebebf45a6ec61a4405040ea47c4320be5ed229 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 14 Jun 2016 09:40:11 +0100 Subject: MIPS: KVM: Add kvm_aux trace event MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a MIPS specific trace event for auxiliary context operations (notably FPU and MSA). Unfortunately the generic kvm_fpu trace event isn't flexible enough to handle the range of interesting things that can happen with FPU and MSA context. The type of state being operated on is traced: - FPU: Just the FPU registers. - MSA: Just the upper half of the MSA vector registers (low half already loaded with FPU state). - FPU & MSA: Full MSA vector state (includes FPU state). As is the type of operation: - Restore: State was enabled and restored. - Save: State was saved and disabled. - Enable: State was enabled (already loaded). - Disable: State was disabled (kept loaded). - Discard: State was discarded and disabled. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Ralf Baechle Cc: Steven Rostedt Cc: Ingo Molnar Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org [Fix remaining occurrence of "fpu_msa", change to "aux". - Paolo] Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 11 +++++++++++ arch/mips/kvm/trace.h | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 9093262ff3ce..c0e8f8640f2b 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1465,6 +1465,9 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { __kvm_restore_fpu(&vcpu->arch); vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); + } else { + trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU); } preempt_enable(); @@ -1513,6 +1516,7 @@ void kvm_own_msa(struct kvm_vcpu *vcpu) */ __kvm_restore_msa_upper(&vcpu->arch); vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA); break; case 0: /* Neither FPU or MSA already active, restore full MSA state */ @@ -1520,8 +1524,11 @@ void kvm_own_msa(struct kvm_vcpu *vcpu) vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; if (kvm_mips_guest_has_fpu(&vcpu->arch)) vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, + KVM_TRACE_AUX_FPU_MSA); break; default: + trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA); break; } @@ -1535,10 +1542,12 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu) preempt_disable(); if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { disable_msa(); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA); vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; } if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { clear_c0_status(ST0_CU1 | ST0_FR); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU); vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; } preempt_enable(); @@ -1560,6 +1569,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) enable_fpu_hazard(); __kvm_save_msa(&vcpu->arch); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); /* Disable MSA & FPU */ disable_msa(); @@ -1574,6 +1584,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) __kvm_save_fpu(&vcpu->arch); vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); /* Disable FPU */ clear_c0_status(ST0_CU1 | ST0_FR); diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index bd6437f67dc0..f3ada591ca25 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h @@ -38,6 +38,52 @@ TRACE_EVENT(kvm_exit, __entry->pc) ); +#define KVM_TRACE_AUX_RESTORE 0 +#define KVM_TRACE_AUX_SAVE 1 +#define KVM_TRACE_AUX_ENABLE 2 +#define KVM_TRACE_AUX_DISABLE 3 +#define KVM_TRACE_AUX_DISCARD 4 + +#define KVM_TRACE_AUX_FPU 1 +#define KVM_TRACE_AUX_MSA 2 +#define KVM_TRACE_AUX_FPU_MSA 3 + +#define kvm_trace_symbol_aux_op \ + { KVM_TRACE_AUX_RESTORE, "restore" }, \ + { KVM_TRACE_AUX_SAVE, "save" }, \ + { KVM_TRACE_AUX_ENABLE, "enable" }, \ + { KVM_TRACE_AUX_DISABLE, "disable" }, \ + { KVM_TRACE_AUX_DISCARD, "discard" } + +#define kvm_trace_symbol_aux_state \ + { KVM_TRACE_AUX_FPU, "FPU" }, \ + { KVM_TRACE_AUX_MSA, "MSA" }, \ + { KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" } + +TRACE_EVENT(kvm_aux, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, + unsigned int state), + TP_ARGS(vcpu, op, state), + TP_STRUCT__entry( + __field(unsigned long, pc) + __field(u8, op) + __field(u8, state) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + __entry->op = op; + __entry->state = state; + ), + + TP_printk("%s %s PC: 0x%08lx", + __print_symbolic(__entry->op, + kvm_trace_symbol_aux_op), + __print_symbolic(__entry->state, + kvm_trace_symbol_aux_state), + __entry->pc) +); + #endif /* _TRACE_KVM_H */ /* This part must be outside protection */ -- cgit v1.2.3 From 1e09e86ac13747903501004082bf1c5b7c6262b2 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 14 Jun 2016 09:40:12 +0100 Subject: MIPS: KVM: Clean up kvm_exit trace event MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean up the MIPS kvm_exit trace event so that the exit reasons are specified in a trace friendly way (via __print_symbolic), and so that the exit reasons that derive straight from Cause.ExcCode values map directly, allowing a single trace_kvm_exit() call to replace a bunch of individual ones. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Ralf Baechle Cc: Steven Rostedt Cc: Ingo Molnar Cc: kvm@vger.kernel.org Cc: linux-mips@linux-mips.org Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 22 -------------------- arch/mips/kvm/emulate.c | 4 ++-- arch/mips/kvm/mips.c | 17 ++-------------- arch/mips/kvm/stats.c | 21 ------------------- arch/mips/kvm/trace.h | 44 +++++++++++++++++++++++++++++++++++++--- 5 files changed, 45 insertions(+), 63 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index e6273850bab6..b8cb74270746 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -125,28 +125,6 @@ struct kvm_vcpu_stat { u32 halt_wakeup; }; -enum kvm_mips_exit_types { - WAIT_EXITS, - CACHE_EXITS, - SIGNAL_EXITS, - INT_EXITS, - COP_UNUSABLE_EXITS, - TLBMOD_EXITS, - TLBMISS_LD_EXITS, - TLBMISS_ST_EXITS, - ADDRERR_ST_EXITS, - ADDRERR_LD_EXITS, - SYSCALL_EXITS, - RESVD_INST_EXITS, - BREAK_INST_EXITS, - TRAP_INST_EXITS, - MSA_FPE_EXITS, - FPE_EXITS, - MSA_DISABLED_EXITS, - FLUSH_DCACHE_EXITS, - MAX_KVM_MIPS_EXIT_TYPES -}; - struct kvm_arch_memory_slot { }; diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 8647bd97b934..fce08bda9ebc 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -775,7 +775,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) vcpu->arch.pending_exceptions); ++vcpu->stat.wait_exits; - trace_kvm_exit(vcpu, WAIT_EXITS); + trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT); if (!vcpu->arch.pending_exceptions) { vcpu->arch.wait = 1; kvm_vcpu_block(vcpu); @@ -1718,7 +1718,7 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, case cache_op: ++vcpu->stat.cache_exits; - trace_kvm_exit(vcpu, CACHE_EXITS); + trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); break; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index c0e8f8640f2b..e9e40b9dd9be 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1257,6 +1257,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", cause, opc, run, vcpu); + trace_kvm_exit(vcpu, exccode); /* * Do a privilege check, if in UM most of these exit conditions end up @@ -1276,7 +1277,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); ++vcpu->stat.int_exits; - trace_kvm_exit(vcpu, INT_EXITS); if (need_resched()) cond_resched(); @@ -1288,7 +1288,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc); ++vcpu->stat.cop_unusable_exits; - trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); /* XXXKYMA: Might need to return to user space */ if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) @@ -1297,7 +1296,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) case EXCCODE_MOD: ++vcpu->stat.tlbmod_exits; - trace_kvm_exit(vcpu, TLBMOD_EXITS); ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); break; @@ -1307,7 +1305,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) badvaddr); ++vcpu->stat.tlbmiss_st_exits; - trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); break; @@ -1316,61 +1313,51 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) cause, opc, badvaddr); ++vcpu->stat.tlbmiss_ld_exits; - trace_kvm_exit(vcpu, TLBMISS_LD_EXITS); ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); break; case EXCCODE_ADES: ++vcpu->stat.addrerr_st_exits; - trace_kvm_exit(vcpu, ADDRERR_ST_EXITS); ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); break; case EXCCODE_ADEL: ++vcpu->stat.addrerr_ld_exits; - trace_kvm_exit(vcpu, ADDRERR_LD_EXITS); ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); break; case EXCCODE_SYS: ++vcpu->stat.syscall_exits; - trace_kvm_exit(vcpu, SYSCALL_EXITS); ret = kvm_mips_callbacks->handle_syscall(vcpu); break; case EXCCODE_RI: ++vcpu->stat.resvd_inst_exits; - trace_kvm_exit(vcpu, RESVD_INST_EXITS); ret = kvm_mips_callbacks->handle_res_inst(vcpu); break; case EXCCODE_BP: ++vcpu->stat.break_inst_exits; - trace_kvm_exit(vcpu, BREAK_INST_EXITS); ret = kvm_mips_callbacks->handle_break(vcpu); break; case EXCCODE_TR: ++vcpu->stat.trap_inst_exits; - trace_kvm_exit(vcpu, TRAP_INST_EXITS); ret = kvm_mips_callbacks->handle_trap(vcpu); break; case EXCCODE_MSAFPE: ++vcpu->stat.msa_fpe_exits; - trace_kvm_exit(vcpu, MSA_FPE_EXITS); ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); break; case EXCCODE_FPE: ++vcpu->stat.fpe_exits; - trace_kvm_exit(vcpu, FPE_EXITS); ret = kvm_mips_callbacks->handle_fpe(vcpu); break; case EXCCODE_MSADIS: ++vcpu->stat.msa_disabled_exits; - trace_kvm_exit(vcpu, MSA_DISABLED_EXITS); ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); break; @@ -1397,7 +1384,7 @@ skip_emul: run->exit_reason = KVM_EXIT_INTR; ret = (-EINTR << 2) | RESUME_HOST; ++vcpu->stat.signal_exits; - trace_kvm_exit(vcpu, SIGNAL_EXITS); + trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); } } diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c index 888bb67070ac..53f851a61554 100644 --- a/arch/mips/kvm/stats.c +++ b/arch/mips/kvm/stats.c @@ -11,27 +11,6 @@ #include -char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = { - "WAIT", - "CACHE", - "Signal", - "Interrupt", - "COP0/1 Unusable", - "TLB Mod", - "TLB Miss (LD)", - "TLB Miss (ST)", - "Address Err (ST)", - "Address Error (LD)", - "System Call", - "Reserved Inst", - "Break Inst", - "Trap Inst", - "MSA FPE", - "FPE", - "MSA Disabled", - "D-Cache Flushes", -}; - char *kvm_cop0_str[N_MIPS_COPROC_REGS] = { "Index", "Random", diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index f3ada591ca25..ffa6ee8f2025 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h @@ -17,8 +17,45 @@ #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE trace -/* Tracepoints for VM eists */ -extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; +/* The first 32 exit reasons correspond to Cause.ExcCode */ +#define KVM_TRACE_EXIT_INT 0 +#define KVM_TRACE_EXIT_TLBMOD 1 +#define KVM_TRACE_EXIT_TLBMISS_LD 2 +#define KVM_TRACE_EXIT_TLBMISS_ST 3 +#define KVM_TRACE_EXIT_ADDRERR_LD 4 +#define KVM_TRACE_EXIT_ADDRERR_ST 5 +#define KVM_TRACE_EXIT_SYSCALL 8 +#define KVM_TRACE_EXIT_BREAK_INST 9 +#define KVM_TRACE_EXIT_RESVD_INST 10 +#define KVM_TRACE_EXIT_COP_UNUSABLE 11 +#define KVM_TRACE_EXIT_TRAP_INST 13 +#define KVM_TRACE_EXIT_MSA_FPE 14 +#define KVM_TRACE_EXIT_FPE 15 +#define KVM_TRACE_EXIT_MSA_DISABLED 21 +/* Further exit reasons */ +#define KVM_TRACE_EXIT_WAIT 32 +#define KVM_TRACE_EXIT_CACHE 33 +#define KVM_TRACE_EXIT_SIGNAL 34 + +/* Tracepoints for VM exits */ +#define kvm_trace_symbol_exit_types \ + { KVM_TRACE_EXIT_INT, "Interrupt" }, \ + { KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, \ + { KVM_TRACE_EXIT_TLBMISS_LD, "TLB Miss (LD)" }, \ + { KVM_TRACE_EXIT_TLBMISS_ST, "TLB Miss (ST)" }, \ + { KVM_TRACE_EXIT_ADDRERR_LD, "Address Error (LD)" }, \ + { KVM_TRACE_EXIT_ADDRERR_ST, "Address Err (ST)" }, \ + { KVM_TRACE_EXIT_SYSCALL, "System Call" }, \ + { KVM_TRACE_EXIT_BREAK_INST, "Break Inst" }, \ + { KVM_TRACE_EXIT_RESVD_INST, "Reserved Inst" }, \ + { KVM_TRACE_EXIT_COP_UNUSABLE, "COP0/1 Unusable" }, \ + { KVM_TRACE_EXIT_TRAP_INST, "Trap Inst" }, \ + { KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \ + { KVM_TRACE_EXIT_FPE, "FPE" }, \ + { KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \ + { KVM_TRACE_EXIT_WAIT, "WAIT" }, \ + { KVM_TRACE_EXIT_CACHE, "CACHE" }, \ + { KVM_TRACE_EXIT_SIGNAL, "Signal" } TRACE_EVENT(kvm_exit, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), @@ -34,7 +71,8 @@ TRACE_EVENT(kvm_exit, ), TP_printk("[%s]PC: 0x%08lx", - kvm_mips_exit_types_str[__entry->reason], + __print_symbolic(__entry->reason, + kvm_trace_symbol_exit_types), __entry->pc) ); -- cgit v1.2.3 From 93258604ab6d3f2bdc6cb02f61961af56712f144 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 14 Jun 2016 09:40:14 +0100 Subject: MIPS: KVM: Add guest mode switch trace events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a few trace events for entering and coming out of guest mode, as well as re-entering it from a guest exit exception. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Ralf Baechle Cc: Steven Rostedt Cc: Ingo Molnar Cc: kvm@vger.kernel.org Cc: linux-mips@linux-mips.org Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 4 ++++ arch/mips/kvm/trace.h | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index e9e40b9dd9be..b5ad2ba1847a 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -410,7 +410,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) /* Disable hardware page table walking while in guest */ htw_stop(); + trace_kvm_enter(vcpu); r = vcpu->arch.vcpu_run(run, vcpu); + trace_kvm_out(vcpu); /* Re-enable HTW before enabling interrupts */ htw_start(); @@ -1389,6 +1391,8 @@ skip_emul: } if (ret == RESUME_GUEST) { + trace_kvm_reenter(vcpu); + /* * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context * is live), restore FCR31 / MSACSR. diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index 7daf7474d6a6..aec1c43f2b44 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h @@ -17,6 +17,54 @@ #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE trace +/* + * Tracepoints for VM enters + */ +TRACE_EVENT(kvm_enter, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu), + TP_STRUCT__entry( + __field(unsigned long, pc) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + ), + + TP_printk("PC: 0x%08lx", + __entry->pc) +); + +TRACE_EVENT(kvm_reenter, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu), + TP_STRUCT__entry( + __field(unsigned long, pc) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + ), + + TP_printk("PC: 0x%08lx", + __entry->pc) +); + +TRACE_EVENT(kvm_out, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu), + TP_STRUCT__entry( + __field(unsigned long, pc) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + ), + + TP_printk("PC: 0x%08lx", + __entry->pc) +); + /* The first 32 exit reasons correspond to Cause.ExcCode */ #define KVM_TRACE_EXIT_INT 0 #define KVM_TRACE_EXIT_TLBMOD 1 -- cgit v1.2.3 From cc68d22f9727d02c1d981d27c11389fd9413e419 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 15 Jun 2016 19:29:48 +0100 Subject: MIPS: KVM: Pass all unknown registers to callbacks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass all unrecognised register IDs through to the set_one_reg() and get_one_reg() callbacks, not just select ones. This allows implementation specific registers to be more easily added without having to modify arch/mips/kvm/mips.c. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index b5ad2ba1847a..fe82f3354c23 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -688,16 +688,11 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, v = (long)kvm_read_c0_guest_errorepc(cop0); break; /* registers to be handled specially */ - case KVM_REG_MIPS_CP0_COUNT: - case KVM_REG_MIPS_COUNT_CTL: - case KVM_REG_MIPS_COUNT_RESUME: - case KVM_REG_MIPS_COUNT_HZ: + default: ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); if (ret) return ret; break; - default: - return -EINVAL; } if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; @@ -859,21 +854,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, kvm_write_c0_guest_errorepc(cop0, v); break; /* registers to be handled specially */ - case KVM_REG_MIPS_CP0_COUNT: - case KVM_REG_MIPS_CP0_COMPARE: - case KVM_REG_MIPS_CP0_CAUSE: - case KVM_REG_MIPS_CP0_CONFIG: - case KVM_REG_MIPS_CP0_CONFIG1: - case KVM_REG_MIPS_CP0_CONFIG2: - case KVM_REG_MIPS_CP0_CONFIG3: - case KVM_REG_MIPS_CP0_CONFIG4: - case KVM_REG_MIPS_CP0_CONFIG5: - case KVM_REG_MIPS_COUNT_CTL: - case KVM_REG_MIPS_COUNT_RESUME: - case KVM_REG_MIPS_COUNT_HZ: - return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); default: - return -EINVAL; + return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); } return 0; } -- cgit v1.2.3 From f5c43bd4218c0d7bd65b010fd080cd6edeaeb4c8 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 15 Jun 2016 19:29:49 +0100 Subject: MIPS: KVM: Make KVM_GET_REG_LIST dynamic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the implementation of KVM_GET_REG_LIST more dynamic so that only the subset of registers actually available can be exposed to user mode. This is important for VZ where some of the guest register state may not be possible to prevent the guest from accessing, therefore the user process may need to be aware of the state even if it doesn't understand what the state is for. This also allows different MIPS KVM implementations to provide different registers to one another, by way of new num_regs(vcpu) and copy_reg_indices(vcpu, indices) callback functions, currently just stubbed for trap & emulate. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 2 ++ arch/mips/kvm/mips.c | 29 ++++++++++++++++++++++------- arch/mips/kvm/trap_emul.c | 13 +++++++++++++ 3 files changed, 37 insertions(+), 7 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 1e002136f514..38f0491fcb2f 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -560,6 +560,8 @@ struct kvm_mips_callbacks { u32 cause); int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority, u32 cause); + unsigned long (*num_regs)(struct kvm_vcpu *vcpu); + int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices); int (*get_one_reg)(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, s64 *v); int (*set_one_reg)(struct kvm_vcpu *vcpu, diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index fe82f3354c23..2c4709a09b78 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -538,6 +538,26 @@ static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_COUNT_HZ, }; +static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) +{ + unsigned long ret; + + ret = ARRAY_SIZE(kvm_mips_get_one_regs); + ret += kvm_mips_callbacks->num_regs(vcpu); + + return ret; +} + +static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) +{ + if (copy_to_user(indices, kvm_mips_get_one_regs, + sizeof(kvm_mips_get_one_regs))) + return -EFAULT; + indices += ARRAY_SIZE(kvm_mips_get_one_regs); + + return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); +} + static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -908,23 +928,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, } case KVM_GET_REG_LIST: { struct kvm_reg_list __user *user_list = argp; - u64 __user *reg_dest; struct kvm_reg_list reg_list; unsigned n; if (copy_from_user(®_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n; - reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); + reg_list.n = kvm_mips_num_regs(vcpu); if (copy_to_user(user_list, ®_list, sizeof(reg_list))) return -EFAULT; if (n < reg_list.n) return -E2BIG; - reg_dest = user_list->reg; - if (copy_to_user(reg_dest, kvm_mips_get_one_regs, - sizeof(kvm_mips_get_one_regs))) - return -EFAULT; - return 0; + return kvm_mips_copy_reg_indices(vcpu, user_list->reg); } case KVM_NMI: /* Treat the NMI as a CPU reset */ diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 09b97fa9dabb..b64ca1a222f7 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -478,6 +478,17 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) return 0; } +static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) +{ + return 0; +} + +static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, + u64 __user *indices) +{ + return 0; +} + static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, s64 *v) @@ -627,6 +638,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { .dequeue_io_int = kvm_mips_dequeue_io_int_cb, .irq_deliver = kvm_mips_irq_deliver_cb, .irq_clear = kvm_mips_irq_clear_cb, + .num_regs = kvm_trap_emul_num_regs, + .copy_reg_indices = kvm_trap_emul_copy_reg_indices, .get_one_reg = kvm_trap_emul_get_one_reg, .set_one_reg = kvm_trap_emul_set_one_reg, .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs, -- cgit v1.2.3 From e57759306c44ba6105c04eafc3b22efc55bb7ad2 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 15 Jun 2016 19:29:51 +0100 Subject: MIPS: KVM: List FPU/MSA registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make KVM_GET_REG_LIST list FPU & MSA registers. Specifically we list all 32 vector registers when MSA can be enabled, 32 single-precision FP registers when FPU can be enabled, and either 16 or 32 double-precision FP registers when FPU can be enabled depending on whether FR mode is supported (which provides 32 doubles instead of 16 even doubles). Note, these registers may still be inaccessible depending on the current FP mode of the guest. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 2c4709a09b78..622b9feba927 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -538,11 +538,29 @@ static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_COUNT_HZ, }; +static u64 kvm_mips_get_one_regs_fpu[] = { + KVM_REG_MIPS_FCR_IR, + KVM_REG_MIPS_FCR_CSR, +}; + +static u64 kvm_mips_get_one_regs_msa[] = { + KVM_REG_MIPS_MSA_IR, + KVM_REG_MIPS_MSA_CSR, +}; + static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) { unsigned long ret; ret = ARRAY_SIZE(kvm_mips_get_one_regs); + if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { + ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48; + /* odd doubles */ + if (boot_cpu_data.fpu_id & MIPS_FPIR_F64) + ret += 16; + } + if (kvm_mips_guest_can_have_msa(&vcpu->arch)) + ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; ret += kvm_mips_callbacks->num_regs(vcpu); return ret; @@ -550,11 +568,51 @@ static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) { + u64 index; + unsigned int i; + if (copy_to_user(indices, kvm_mips_get_one_regs, sizeof(kvm_mips_get_one_regs))) return -EFAULT; indices += ARRAY_SIZE(kvm_mips_get_one_regs); + if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { + if (copy_to_user(indices, kvm_mips_get_one_regs_fpu, + sizeof(kvm_mips_get_one_regs_fpu))) + return -EFAULT; + indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu); + + for (i = 0; i < 32; ++i) { + index = KVM_REG_MIPS_FPR_32(i); + if (copy_to_user(indices, &index, sizeof(index))) + return -EFAULT; + ++indices; + + /* skip odd doubles if no F64 */ + if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) + continue; + + index = KVM_REG_MIPS_FPR_64(i); + if (copy_to_user(indices, &index, sizeof(index))) + return -EFAULT; + ++indices; + } + } + + if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { + if (copy_to_user(indices, kvm_mips_get_one_regs_msa, + sizeof(kvm_mips_get_one_regs_msa))) + return -EFAULT; + indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa); + + for (i = 0; i < 32; ++i) { + index = KVM_REG_MIPS_VEC_128(i); + if (copy_to_user(indices, &index, sizeof(index))) + return -EFAULT; + ++indices; + } + } + return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); } -- cgit v1.2.3 From 05108709526716e1d40210fe3b9d7acd1cb694ea Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 15 Jun 2016 19:29:56 +0100 Subject: MIPS: KVM: Add KScratch registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow up to 6 KVM guest KScratch registers to be enabled and accessed via the KVM guest register API and from the guest itself (the fallback reading and writing of commpage registers is sufficient for KScratch registers to work as expected). User mode can expose the registers by setting the appropriate bits of the guest Config4.KScrExist field. KScratch registers that aren't usable won't be writeable via the KVM Ioctl API. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- Documentation/virtual/kvm/api.txt | 6 ++++ arch/mips/include/asm/kvm_host.h | 19 +++++++++++ arch/mips/kvm/emulate.c | 7 +++- arch/mips/kvm/mips.c | 71 +++++++++++++++++++++++++++++++++++++++ arch/mips/kvm/trace.h | 6 ++++ arch/mips/kvm/trap_emul.c | 2 ++ 6 files changed, 110 insertions(+), 1 deletion(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 4aac3e51bf9f..09efa9eb3926 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -2032,6 +2032,12 @@ registers, find a list below: MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32 MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32 MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH3 | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64 MIPS | KVM_REG_MIPS_COUNT_CTL | 64 MIPS | KVM_REG_MIPS_COUNT_RESUME | 64 MIPS | KVM_REG_MIPS_COUNT_HZ | 64 diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index f12eb01a3195..5e9da2a31fde 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -56,6 +56,12 @@ #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) +#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2) +#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3) +#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4) +#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5) +#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6) +#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7) #define KVM_MAX_VCPUS 1 @@ -376,6 +382,7 @@ struct kvm_vcpu_arch { u8 fpu_enabled; u8 msa_enabled; + u8 kscratch_enabled; }; @@ -429,6 +436,18 @@ struct kvm_vcpu_arch { #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) +#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2]) +#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3]) +#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4]) +#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5]) +#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6]) +#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7]) +#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val)) +#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val)) +#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val)) +#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val)) +#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val)) +#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val)) /* * Some of the guest registers may be modified asynchronously (e.g. from a diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 4ca5450febbb..5f0354c80c8e 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -941,7 +941,12 @@ unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) { /* Config5 is optional */ - return MIPS_CONF_M; + unsigned int mask = MIPS_CONF_M; + + /* KScrExist */ + mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16; + + return mask; } /** diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 622b9feba927..5a2b9034a05c 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -9,6 +9,7 @@ * Authors: Sanjay Lal */ +#include #include #include #include @@ -548,6 +549,15 @@ static u64 kvm_mips_get_one_regs_msa[] = { KVM_REG_MIPS_MSA_CSR, }; +static u64 kvm_mips_get_one_regs_kscratch[] = { + KVM_REG_MIPS_CP0_KSCRATCH1, + KVM_REG_MIPS_CP0_KSCRATCH2, + KVM_REG_MIPS_CP0_KSCRATCH3, + KVM_REG_MIPS_CP0_KSCRATCH4, + KVM_REG_MIPS_CP0_KSCRATCH5, + KVM_REG_MIPS_CP0_KSCRATCH6, +}; + static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) { unsigned long ret; @@ -561,6 +571,7 @@ static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) } if (kvm_mips_guest_can_have_msa(&vcpu->arch)) ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; + ret += __arch_hweight8(vcpu->arch.kscratch_enabled); ret += kvm_mips_callbacks->num_regs(vcpu); return ret; @@ -613,6 +624,16 @@ static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) } } + for (i = 0; i < 6; ++i) { + if (!(vcpu->arch.kscratch_enabled & BIT(i + 2))) + continue; + + if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i], + sizeof(kvm_mips_get_one_regs_kscratch[i]))) + return -EFAULT; + ++indices; + } + return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); } @@ -765,6 +786,31 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_ERROREPC: v = (long)kvm_read_c0_guest_errorepc(cop0); break; + case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: + idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; + if (!(vcpu->arch.kscratch_enabled & BIT(idx))) + return -EINVAL; + switch (idx) { + case 2: + v = (long)kvm_read_c0_guest_kscratch1(cop0); + break; + case 3: + v = (long)kvm_read_c0_guest_kscratch2(cop0); + break; + case 4: + v = (long)kvm_read_c0_guest_kscratch3(cop0); + break; + case 5: + v = (long)kvm_read_c0_guest_kscratch4(cop0); + break; + case 6: + v = (long)kvm_read_c0_guest_kscratch5(cop0); + break; + case 7: + v = (long)kvm_read_c0_guest_kscratch6(cop0); + break; + } + break; /* registers to be handled specially */ default: ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); @@ -931,6 +977,31 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_CP0_ERROREPC: kvm_write_c0_guest_errorepc(cop0, v); break; + case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: + idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; + if (!(vcpu->arch.kscratch_enabled & BIT(idx))) + return -EINVAL; + switch (idx) { + case 2: + kvm_write_c0_guest_kscratch1(cop0, v); + break; + case 3: + kvm_write_c0_guest_kscratch2(cop0, v); + break; + case 4: + kvm_write_c0_guest_kscratch3(cop0, v); + break; + case 5: + kvm_write_c0_guest_kscratch4(cop0, v); + break; + case 6: + kvm_write_c0_guest_kscratch5(cop0, v); + break; + case 7: + kvm_write_c0_guest_kscratch6(cop0, v); + break; + } + break; /* registers to be handled specially */ default: return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index 5d712ecb0734..a38bdab68574 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h @@ -178,6 +178,12 @@ TRACE_EVENT(kvm_exit, { KVM_TRACE_COP0(16, 7), "Config7" }, \ { KVM_TRACE_COP0(26, 0), "ECC" }, \ { KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \ + { KVM_TRACE_COP0(31, 2), "KScratch1" }, \ + { KVM_TRACE_COP0(31, 3), "KScratch2" }, \ + { KVM_TRACE_COP0(31, 4), "KScratch3" }, \ + { KVM_TRACE_COP0(31, 5), "KScratch4" }, \ + { KVM_TRACE_COP0(31, 6), "KScratch5" }, \ + { KVM_TRACE_COP0(31, 7), "KScratch6" }, \ { KVM_TRACE_HWR( 0, 0), "CPUNum" }, \ { KVM_TRACE_HWR( 1, 0), "SYNCI_Step" }, \ { KVM_TRACE_HWR( 2, 0), "CC" }, \ diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index b64ca1a222f7..eb191c4612bb 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -418,6 +418,8 @@ static int kvm_trap_emul_vm_init(struct kvm *kvm) static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) { + vcpu->arch.kscratch_enabled = 0xfc; + return 0; } -- cgit v1.2.3 From 6edaa5307f3f51e4e56dc4c63f68a69d88c6ddf5 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 15 Jun 2016 15:18:26 +0200 Subject: KVM: remove kvm_guest_enter/exit wrappers Use the functions from context_tracking.h directly. Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Thomas Gleixner Reviewed-by: Rik van Riel Signed-off-by: Paolo Bonzini --- arch/arm/kvm/arm.c | 8 ++++---- arch/mips/kvm/mips.c | 4 ++-- arch/powerpc/kvm/book3s_hv.c | 4 ++-- arch/powerpc/kvm/book3s_pr.c | 4 ++-- arch/powerpc/kvm/booke.c | 4 ++-- arch/powerpc/kvm/powerpc.c | 2 +- arch/s390/kvm/kvm-s390.c | 4 ++-- arch/s390/kvm/vsie.c | 4 ++-- arch/x86/kvm/x86.c | 4 ++-- include/linux/kvm_host.h | 22 ---------------------- 10 files changed, 19 insertions(+), 41 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index f20ca84537f5..9ac4970882fe 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -615,7 +615,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * Enter the guest */ trace_kvm_entry(*vcpu_pc(vcpu)); - __kvm_guest_enter(); + guest_enter_irqoff(); vcpu->mode = IN_GUEST_MODE; ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); @@ -641,14 +641,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) local_irq_enable(); /* - * We do local_irq_enable() before calling kvm_guest_exit() so + * We do local_irq_enable() before calling guest_exit() so * that if a timer interrupt hits while running the guest we * account that tick as being spent in the guest. We enable - * preemption after calling kvm_guest_exit() so that if we get + * preemption after calling guest_exit() so that if we get * preempted we make sure ticks after that is not counted as * guest time. */ - kvm_guest_exit(); + guest_exit(); trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); /* diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 5a2b9034a05c..5f1163653b50 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -406,7 +406,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_mips_deliver_interrupts(vcpu, kvm_read_c0_guest_cause(vcpu->arch.cop0)); - __kvm_guest_enter(); + guest_enter_irqoff(); /* Disable hardware page table walking while in guest */ htw_stop(); @@ -418,7 +418,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) /* Re-enable HTW before enabling interrupts */ htw_start(); - __kvm_guest_exit(); + guest_exit_irqoff(); local_irq_enable(); if (vcpu->sigset_active) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index e20beae5ca7a..6b2859c12ae8 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2522,7 +2522,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) spin_unlock(&pvc->lock); - kvm_guest_enter(); + guest_enter(); srcu_idx = srcu_read_lock(&vc->kvm->srcu); @@ -2570,7 +2570,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); - kvm_guest_exit(); + guest_exit(); for (sub = 0; sub < core_info.n_subcores; ++sub) list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub], diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 8e4f64f0b774..6a66c5ff0827 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -914,7 +914,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, /* We get here with MSR.EE=1 */ trace_kvm_exit(exit_nr, vcpu); - kvm_guest_exit(); + guest_exit(); switch (exit_nr) { case BOOK3S_INTERRUPT_INST_STORAGE: @@ -1531,7 +1531,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_clear_debug(vcpu); - /* No need for kvm_guest_exit. It's done in handle_exit. + /* No need for guest_exit. It's done in handle_exit. We also get here with interrupts enabled. */ /* Make sure we save the guest FPU/Altivec/VSX state */ diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 4afae695899a..02b4672f7347 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -776,7 +776,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ret = __kvmppc_vcpu_run(kvm_run, vcpu); - /* No need for kvm_guest_exit. It's done in handle_exit. + /* No need for guest_exit. It's done in handle_exit. We also get here with interrupts enabled. */ /* Switch back to user space debug context */ @@ -1012,7 +1012,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } trace_kvm_exit(exit_nr, vcpu); - __kvm_guest_exit(); + guest_exit_irqoff(); local_irq_enable(); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 02416fea7653..1ac036e45ed4 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -119,7 +119,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) continue; } - __kvm_guest_enter(); + guest_enter_irqoff(); return 1; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 03eeeb0ded24..d42428c11794 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -2623,14 +2623,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) * guest_enter and guest_exit should be no uaccess. */ local_irq_disable(); - __kvm_guest_enter(); + guest_enter_irqoff(); __disable_cpu_timer_accounting(vcpu); local_irq_enable(); exit_reason = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); local_irq_disable(); __enable_cpu_timer_accounting(vcpu); - __kvm_guest_exit(); + guest_exit_irqoff(); local_irq_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 6895e7b3be12..c106488b4137 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -765,13 +765,13 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); local_irq_disable(); - kvm_guest_enter(); + guest_enter_irqoff(); local_irq_enable(); rc = sie64a(scb_s, vcpu->run->s.regs.gprs); local_irq_disable(); - kvm_guest_exit(); + guest_exit_irqoff(); local_irq_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9e50e2ad6d08..618463abeec5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6658,7 +6658,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) trace_kvm_entry(vcpu->vcpu_id); wait_lapic_expire(vcpu); - __kvm_guest_enter(); + guest_enter_irqoff(); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); @@ -6717,7 +6717,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) */ barrier(); - kvm_guest_exit(); + guest_exit(); preempt_enable(); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ffff40522688..66b2f6159aad 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -875,28 +875,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm, } #endif -/* must be called with irqs disabled */ -static inline void __kvm_guest_enter(void) -{ - guest_enter_irqoff(); -} - -/* must be called with irqs disabled */ -static inline void __kvm_guest_exit(void) -{ - guest_exit_irqoff(); -} - -static inline void kvm_guest_enter(void) -{ - guest_enter(); -} - -static inline void kvm_guest_exit(void) -{ - guest_exit(); -} - /* * search_memslots() and __gfn_to_memslot() are here because they are * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. -- cgit v1.2.3 From 90e9311a34e7b88f246a6d741ef70e3fdba15a34 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 23 Jun 2016 17:34:39 +0100 Subject: MIPS; KVM: Convert exception entry to uasm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert the whole of locore.S (assembly to enter guest and handle exception entry) to be generated dynamically with uasm. This is done with minimal changes to the resulting code. The main changes are: - Some constants are generated by uasm using LUI+ADDIU instead of LUI+ORI. - Loading of lo and hi are swapped around in vcpu_run but not when resuming the guest after an exit. Both bits of logic are now generated by the same code. - Register MOVEs in uasm use different ADDU operand ordering to GNU as, putting zero register into rs instead of rt. - The JALR.HB to call the C exit handler is switched to JALR, since the hazard barrier would appear to be unnecessary. This will allow further optimisation in the future to dynamically handle the capabilities of the CPU. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim KrÄmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 8 +- arch/mips/kvm/Kconfig | 1 + arch/mips/kvm/Makefile | 2 +- arch/mips/kvm/entry.c | 622 +++++++++++++++++++++++++++++++++++++++ arch/mips/kvm/interrupt.h | 4 - arch/mips/kvm/locore.S | 602 ------------------------------------- arch/mips/kvm/mips.c | 37 +-- 7 files changed, 642 insertions(+), 634 deletions(-) create mode 100644 arch/mips/kvm/entry.c delete mode 100644 arch/mips/kvm/locore.S (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index b0773c6d622f..2e76e899079c 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -533,8 +533,12 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); -/* Trampoline ASM routine to start running in "Guest" context */ -extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); +extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); + +/* Building of entry/exception code */ +void *kvm_mips_build_vcpu_run(void *addr); +void *kvm_mips_build_exception(void *addr); +void *kvm_mips_build_exit(void *addr); /* FPU/MSA context management */ void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index 2ae12825529f..7c56d6b124d1 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -17,6 +17,7 @@ if VIRTUALIZATION config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM + select EXPORT_UASM select PREEMPT_NOTIFIERS select ANON_INODES select KVM_MMIO diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 0aabe40fcac9..847429de780d 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile @@ -7,7 +7,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o -kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \ +kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \ interrupt.o stats.o commpage.o \ dyntrans.o trap_emul.o fpu.o kvm-objs += mmu.o diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c new file mode 100644 index 000000000000..9a18b4939b35 --- /dev/null +++ b/arch/mips/kvm/entry.c @@ -0,0 +1,622 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Generation of main entry point for the guest, exception handling. + * + * Copyright (C) 2012 MIPS Technologies, Inc. + * Authors: Sanjay Lal + * + * Copyright (C) 2016 Imagination Technologies Ltd. + */ + +#include +#include +#include +#include + +/* Register names */ +#define ZERO 0 +#define AT 1 +#define V0 2 +#define V1 3 +#define A0 4 +#define A1 5 + +#if _MIPS_SIM == _MIPS_SIM_ABI32 +#define T0 8 +#define T1 9 +#define T2 10 +#define T3 11 +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ + +#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 +#define T0 12 +#define T1 13 +#define T2 14 +#define T3 15 +#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ + +#define S0 16 +#define S1 17 +#define T9 25 +#define K0 26 +#define K1 27 +#define GP 28 +#define SP 29 +#define RA 31 + +/* Some CP0 registers */ +#define C0_HWRENA 7, 0 +#define C0_BADVADDR 8, 0 +#define C0_ENTRYHI 10, 0 +#define C0_STATUS 12, 0 +#define C0_CAUSE 13, 0 +#define C0_EPC 14, 0 +#define C0_EBASE 15, 1 +#define C0_CONFIG3 16, 3 +#define C0_CONFIG5 16, 5 +#define C0_DDATA_LO 28, 3 +#define C0_ERROREPC 30, 0 + +#define CALLFRAME_SIZ 32 + +enum label_id { + label_fpu_1 = 1, + label_msa_1, + label_return_to_host, + label_kernel_asid, +}; + +UASM_L_LA(_fpu_1) +UASM_L_LA(_msa_1) +UASM_L_LA(_return_to_host) +UASM_L_LA(_kernel_asid) + +static void *kvm_mips_build_enter_guest(void *addr); +static void *kvm_mips_build_ret_from_exit(void *addr); +static void *kvm_mips_build_ret_to_guest(void *addr); +static void *kvm_mips_build_ret_to_host(void *addr); + +/** + * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. + * @addr: Address to start writing code. + * + * Assemble the start of the vcpu_run function to run a guest VCPU. The function + * conforms to the following prototype: + * + * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); + * + * The exit from the guest and return to the caller is handled by the code + * generated by kvm_mips_build_ret_to_host(). + * + * Returns: Next address after end of written function. + */ +void *kvm_mips_build_vcpu_run(void *addr) +{ + u32 *p = addr; + unsigned int i; + + /* + * A0: run + * A1: vcpu + */ + + /* k0/k1 not being used in host kernel context */ + uasm_i_addiu(&p, K1, SP, -(int)sizeof(struct pt_regs)); + for (i = 16; i < 32; ++i) { + if (i == 24) + i = 28; + UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1); + } + + /* Save hi/lo */ + uasm_i_mflo(&p, V0); + UASM_i_SW(&p, V0, offsetof(struct pt_regs, lo), K1); + uasm_i_mfhi(&p, V1); + UASM_i_SW(&p, V1, offsetof(struct pt_regs, hi), K1); + + /* Save host status */ + uasm_i_mfc0(&p, V0, C0_STATUS); + UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1); + + /* Save DDATA_LO, will be used to store pointer to vcpu */ + uasm_i_mfc0(&p, V1, C0_DDATA_LO); + UASM_i_SW(&p, V1, offsetof(struct pt_regs, cp0_epc), K1); + + /* DDATA_LO has pointer to vcpu */ + uasm_i_mtc0(&p, A1, C0_DDATA_LO); + + /* Offset into vcpu->arch */ + uasm_i_addiu(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); + + /* + * Save the host stack to VCPU, used for exception processing + * when we exit from the Guest + */ + UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); + + /* Save the kernel gp as well */ + UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); + + /* + * Setup status register for running the guest in UM, interrupts + * are disabled + */ + UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV); + uasm_i_mtc0(&p, K0, C0_STATUS); + uasm_i_ehb(&p); + + /* load up the new EBASE */ + UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); + uasm_i_mtc0(&p, K0, C0_EBASE); + + /* + * Now that the new EBASE has been loaded, unset BEV, set + * interrupt mask as it was but make sure that timer interrupts + * are enabled + */ + uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE); + uasm_i_andi(&p, V0, V0, ST0_IM); + uasm_i_or(&p, K0, K0, V0); + uasm_i_mtc0(&p, K0, C0_STATUS); + uasm_i_ehb(&p); + + p = kvm_mips_build_enter_guest(p); + + return p; +} + +/** + * kvm_mips_build_enter_guest() - Assemble code to resume guest execution. + * @addr: Address to start writing code. + * + * Assemble the code to resume guest execution. This code is common between the + * initial entry into the guest from the host, and returning from the exit + * handler back to the guest. + * + * Returns: Next address after end of written function. + */ +static void *kvm_mips_build_enter_guest(void *addr) +{ + u32 *p = addr; + unsigned int i; + struct uasm_label labels[2]; + struct uasm_reloc relocs[2]; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + + /* Set Guest EPC */ + UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); + uasm_i_mtc0(&p, T0, C0_EPC); + + /* Set the ASID for the Guest Kernel */ + UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); + UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), + T0); + uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); + uasm_i_xori(&p, T0, T0, KSU_USER); + uasm_il_bnez(&p, &r, T0, label_kernel_asid); + uasm_i_addiu(&p, T1, K1, + offsetof(struct kvm_vcpu_arch, guest_kernel_asid)); + /* else user */ + uasm_i_addiu(&p, T1, K1, + offsetof(struct kvm_vcpu_arch, guest_user_asid)); + uasm_l_kernel_asid(&l, p); + + /* t1: contains the base of the ASID array, need to get the cpu id */ + /* smp_processor_id */ + UASM_i_LW(&p, T2, offsetof(struct thread_info, cpu), GP); + /* x4 */ + uasm_i_sll(&p, T2, T2, 2); + UASM_i_ADDU(&p, T3, T1, T2); + UASM_i_LW(&p, K0, 0, T3); +#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE + /* x sizeof(struct cpuinfo_mips)/4 */ + uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4); + uasm_i_mul(&p, T2, T2, T3); + + UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); + UASM_i_ADDU(&p, AT, AT, T2); + UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT); + uasm_i_and(&p, K0, K0, T2); +#else + uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); +#endif + uasm_i_mtc0(&p, K0, C0_ENTRYHI); + uasm_i_ehb(&p); + + /* Disable RDHWR access */ + uasm_i_mtc0(&p, ZERO, C0_HWRENA); + + /* load the guest context from VCPU and return */ + for (i = 1; i < 32; ++i) { + /* Guest k0/k1 loaded later */ + if (i == K0 || i == K1) + continue; + UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); + } + + /* Restore hi/lo */ + UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1); + uasm_i_mthi(&p, K0); + + UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1); + uasm_i_mtlo(&p, K0); + + /* Restore the guest's k0/k1 registers */ + UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); + UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); + + /* Jump to guest */ + uasm_i_eret(&p); + + uasm_resolve_relocs(relocs, labels); + + return p; +} + +/** + * kvm_mips_build_exception() - Assemble first level guest exception handler. + * @addr: Address to start writing code. + * + * Assemble exception vector code for guest execution. The generated vector will + * jump to the common exception handler generated by kvm_mips_build_exit(). + * + * Returns: Next address after end of written function. + */ +void *kvm_mips_build_exception(void *addr) +{ + u32 *p = addr; + + /* Save guest k0 */ + uasm_i_mtc0(&p, K0, C0_ERROREPC); + uasm_i_ehb(&p); + + /* Get EBASE */ + uasm_i_mfc0(&p, K0, C0_EBASE); + /* Get rid of CPUNum */ + uasm_i_srl(&p, K0, K0, 10); + uasm_i_sll(&p, K0, K0, 10); + /* Save k1 @ offset 0x3000 */ + UASM_i_SW(&p, K1, 0x3000, K0); + + /* Exception handler is installed @ offset 0x2000 */ + uasm_i_addiu(&p, K0, K0, 0x2000); + /* Jump to the function */ + uasm_i_jr(&p, K0); + uasm_i_nop(&p); + + return p; +} + +/** + * kvm_mips_build_exit() - Assemble common guest exit handler. + * @addr: Address to start writing code. + * + * Assemble the generic guest exit handling code. This is called by the + * exception vectors (generated by kvm_mips_build_exception()), and calls + * kvm_mips_handle_exit(), then either resumes the guest or returns to the host + * depending on the return value. + * + * Returns: Next address after end of written function. + */ +void *kvm_mips_build_exit(void *addr) +{ + u32 *p = addr; + unsigned int i; + struct uasm_label labels[3]; + struct uasm_reloc relocs[3]; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + + /* + * Generic Guest exception handler. We end up here when the guest + * does something that causes a trap to kernel mode. + */ + + /* Get the VCPU pointer from DDATA_LO */ + uasm_i_mfc0(&p, K1, C0_DDATA_LO); + uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); + + /* Start saving Guest context to VCPU */ + for (i = 0; i < 32; ++i) { + /* Guest k0/k1 saved later */ + if (i == K0 || i == K1) + continue; + UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); + } + + /* We need to save hi/lo and restore them on the way out */ + uasm_i_mfhi(&p, T0); + UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1); + + uasm_i_mflo(&p, T0); + UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1); + + /* Finally save guest k0/k1 to VCPU */ + uasm_i_mfc0(&p, T0, C0_ERROREPC); + UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); + + /* Get GUEST k1 and save it in VCPU */ + uasm_i_addiu(&p, T1, ZERO, ~0x2ff); + uasm_i_mfc0(&p, T0, C0_EBASE); + uasm_i_and(&p, T0, T0, T1); + UASM_i_LW(&p, T0, 0x3000, T0); + UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); + + /* Now that context has been saved, we can use other registers */ + + /* Restore vcpu */ + uasm_i_mfc0(&p, A1, C0_DDATA_LO); + uasm_i_move(&p, S1, A1); + + /* Restore run (vcpu->run) */ + UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1); + /* Save pointer to run in s0, will be saved by the compiler */ + uasm_i_move(&p, S0, A0); + + /* + * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process + * the exception + */ + uasm_i_mfc0(&p, K0, C0_EPC); + UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1); + + uasm_i_mfc0(&p, K0, C0_BADVADDR); + UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), + K1); + + uasm_i_mfc0(&p, K0, C0_CAUSE); + uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); + + /* Now restore the host state just enough to run the handlers */ + + /* Switch EBASE to the one used by Linux */ + /* load up the host EBASE */ + uasm_i_mfc0(&p, V0, C0_STATUS); + + uasm_i_lui(&p, AT, ST0_BEV >> 16); + uasm_i_or(&p, K0, V0, AT); + + uasm_i_mtc0(&p, K0, C0_STATUS); + uasm_i_ehb(&p); + + UASM_i_LA_mostly(&p, K0, (long)&ebase); + UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); + uasm_i_mtc0(&p, K0, C0_EBASE); + + /* + * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't + * trigger FPE for pending exceptions. + */ + uasm_i_lui(&p, AT, ST0_CU1 >> 16); + uasm_i_and(&p, V1, V0, AT); + uasm_il_beqz(&p, &r, V1, label_fpu_1); + uasm_i_nop(&p); + uasm_i_cfc1(&p, T0, 31); + uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), K1); + uasm_i_ctc1(&p, ZERO, 31); + uasm_l_fpu_1(&l, p); + +#ifdef CONFIG_CPU_HAS_MSA + /* + * If MSA is enabled, save MSACSR and clear it so that later + * instructions don't trigger MSAFPE for pending exceptions. + */ + uasm_i_mfc0(&p, T0, C0_CONFIG3); + uasm_i_ext(&p, T0, T0, 28, 1); /* MIPS_CONF3_MSAP */ + uasm_il_beqz(&p, &r, T0, label_msa_1); + uasm_i_nop(&p); + uasm_i_mfc0(&p, T0, C0_CONFIG5); + uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */ + uasm_il_beqz(&p, &r, T0, label_msa_1); + uasm_i_nop(&p); + uasm_i_cfcmsa(&p, T0, MSA_CSR); + uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), + K1); + uasm_i_ctcmsa(&p, MSA_CSR, ZERO); + uasm_l_msa_1(&l, p); +#endif + + /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ + uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); + uasm_i_and(&p, V0, V0, AT); + uasm_i_lui(&p, AT, ST0_CU0 >> 16); + uasm_i_or(&p, V0, V0, AT); + uasm_i_mtc0(&p, V0, C0_STATUS); + uasm_i_ehb(&p); + + /* Load up host GP */ + UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); + + /* Need a stack before we can jump to "C" */ + UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); + + /* Saved host state */ + uasm_i_addiu(&p, SP, SP, -(int)sizeof(struct pt_regs)); + + /* + * XXXKYMA do we need to load the host ASID, maybe not because the + * kernel entries are marked GLOBAL, need to verify + */ + + /* Restore host DDATA_LO */ + UASM_i_LW(&p, K0, offsetof(struct pt_regs, cp0_epc), SP); + uasm_i_mtc0(&p, K0, C0_DDATA_LO); + + /* Restore RDHWR access */ + UASM_i_LA_mostly(&p, K0, (long)&hwrena); + uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); + uasm_i_mtc0(&p, K0, C0_HWRENA); + + /* Jump to handler */ + /* + * XXXKYMA: not sure if this is safe, how large is the stack?? + * Now jump to the kvm_mips_handle_exit() to see if we can deal + * with this in the kernel + */ + UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); + uasm_i_jalr(&p, RA, T9); + uasm_i_addiu(&p, SP, SP, -CALLFRAME_SIZ); + + uasm_resolve_relocs(relocs, labels); + + p = kvm_mips_build_ret_from_exit(p); + + return p; +} + +/** + * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler. + * @addr: Address to start writing code. + * + * Assemble the code to handle the return from kvm_mips_handle_exit(), either + * resuming the guest or returning to the host depending on the return value. + * + * Returns: Next address after end of written function. + */ +static void *kvm_mips_build_ret_from_exit(void *addr) +{ + u32 *p = addr; + struct uasm_label labels[2]; + struct uasm_reloc relocs[2]; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + + /* Return from handler Make sure interrupts are disabled */ + uasm_i_di(&p, ZERO); + uasm_i_ehb(&p); + + /* + * XXXKYMA: k0/k1 could have been blown away if we processed + * an exception while we were handling the exception from the + * guest, reload k1 + */ + + uasm_i_move(&p, K1, S1); + uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); + + /* + * Check return value, should tell us if we are returning to the + * host (handle I/O etc)or resuming the guest + */ + uasm_i_andi(&p, T0, V0, RESUME_HOST); + uasm_il_bnez(&p, &r, T0, label_return_to_host); + uasm_i_nop(&p); + + p = kvm_mips_build_ret_to_guest(p); + + uasm_l_return_to_host(&l, p); + p = kvm_mips_build_ret_to_host(p); + + uasm_resolve_relocs(relocs, labels); + + return p; +} + +/** + * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest. + * @addr: Address to start writing code. + * + * Assemble the code to handle return from the guest exit handler + * (kvm_mips_handle_exit()) back to the guest. + * + * Returns: Next address after end of written function. + */ +static void *kvm_mips_build_ret_to_guest(void *addr) +{ + u32 *p = addr; + + /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ + uasm_i_mtc0(&p, S1, C0_DDATA_LO); + + /* Load up the Guest EBASE to minimize the window where BEV is set */ + UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); + + /* Switch EBASE back to the one used by KVM */ + uasm_i_mfc0(&p, V1, C0_STATUS); + uasm_i_lui(&p, AT, ST0_BEV >> 16); + uasm_i_or(&p, K0, V1, AT); + uasm_i_mtc0(&p, K0, C0_STATUS); + uasm_i_ehb(&p); + uasm_i_mtc0(&p, T0, C0_EBASE); + + /* Setup status register for running guest in UM */ + uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); + UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX)); + uasm_i_and(&p, V1, V1, AT); + uasm_i_mtc0(&p, V1, C0_STATUS); + uasm_i_ehb(&p); + + p = kvm_mips_build_enter_guest(p); + + return p; +} + +/** + * kvm_mips_build_ret_to_host() - Assemble code to return to the host. + * @addr: Address to start writing code. + * + * Assemble the code to handle return from the guest exit handler + * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run + * function generated by kvm_mips_build_vcpu_run(). + * + * Returns: Next address after end of written function. + */ +static void *kvm_mips_build_ret_to_host(void *addr) +{ + u32 *p = addr; + unsigned int i; + + /* EBASE is already pointing to Linux */ + UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1); + uasm_i_addiu(&p, K1, K1, -(int)sizeof(struct pt_regs)); + + /* Restore host DDATA_LO */ + UASM_i_LW(&p, K0, offsetof(struct pt_regs, cp0_epc), K1); + uasm_i_mtc0(&p, K0, C0_DDATA_LO); + + /* + * r2/v0 is the return code, shift it down by 2 (arithmetic) + * to recover the err code + */ + uasm_i_sra(&p, K0, V0, 2); + uasm_i_move(&p, V0, K0); + + /* Load context saved on the host stack */ + for (i = 16; i < 31; ++i) { + if (i == 24) + i = 28; + UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1); + } + + UASM_i_LW(&p, K0, offsetof(struct pt_regs, hi), K1); + uasm_i_mthi(&p, K0); + + UASM_i_LW(&p, K0, offsetof(struct pt_regs, lo), K1); + uasm_i_mtlo(&p, K0); + + /* Restore RDHWR access */ + UASM_i_LA_mostly(&p, K0, (long)&hwrena); + uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); + uasm_i_mtc0(&p, K0, C0_HWRENA); + + /* Restore RA, which is the address we will return to */ + UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1); + uasm_i_jr(&p, RA); + uasm_i_nop(&p); + + return p; +} + diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h index d661c100b219..fb118a2c8379 100644 --- a/arch/mips/kvm/interrupt.h +++ b/arch/mips/kvm/interrupt.h @@ -28,10 +28,6 @@ #define MIPS_EXC_MAX 12 /* XXXSL More to follow */ -extern char __kvm_mips_vcpu_run_end[]; -extern char mips32_exception[], mips32_exceptionEnd[]; -extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; - #define C_TI (_ULCAST_(1) << 30) #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S deleted file mode 100644 index 698286c0f732..000000000000 --- a/arch/mips/kvm/locore.S +++ /dev/null @@ -1,602 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Main entry point for the guest, exception handling. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - * Authors: Sanjay Lal - */ - -#include -#include -#include -#include -#include -#include - -#define _C_LABEL(x) x -#define MIPSX(name) mips32_ ## name -#define CALLFRAME_SIZ 32 - -/* - * VECTOR - * exception vector entrypoint - */ -#define VECTOR(x, regmask) \ - .ent _C_LABEL(x),0; \ - EXPORT(x); - -#define VECTOR_END(x) \ - EXPORT(x); - -/* Overload, Danger Will Robinson!! */ -#define PT_HOST_USERLOCAL PT_EPC - -#define CP0_DDATA_LO $28,3 - -/* Resume Flags */ -#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ - -#define RESUME_GUEST 0 -#define RESUME_HOST RESUME_FLAG_HOST - -/* - * __kvm_mips_vcpu_run: entry point to the guest - * a0: run - * a1: vcpu - */ - .set noreorder - -FEXPORT(__kvm_mips_vcpu_run) - /* k0/k1 not being used in host kernel context */ - INT_ADDIU k1, sp, -PT_SIZE - LONG_S $16, PT_R16(k1) - LONG_S $17, PT_R17(k1) - LONG_S $18, PT_R18(k1) - LONG_S $19, PT_R19(k1) - LONG_S $20, PT_R20(k1) - LONG_S $21, PT_R21(k1) - LONG_S $22, PT_R22(k1) - LONG_S $23, PT_R23(k1) - - LONG_S $28, PT_R28(k1) - LONG_S $29, PT_R29(k1) - LONG_S $30, PT_R30(k1) - LONG_S $31, PT_R31(k1) - - /* Save hi/lo */ - mflo v0 - LONG_S v0, PT_LO(k1) - mfhi v1 - LONG_S v1, PT_HI(k1) - - /* Save host status */ - mfc0 v0, CP0_STATUS - LONG_S v0, PT_STATUS(k1) - - /* Save DDATA_LO, will be used to store pointer to vcpu */ - mfc0 v1, CP0_DDATA_LO - LONG_S v1, PT_HOST_USERLOCAL(k1) - - /* DDATA_LO has pointer to vcpu */ - mtc0 a1, CP0_DDATA_LO - - /* Offset into vcpu->arch */ - INT_ADDIU k1, a1, VCPU_HOST_ARCH - - /* - * Save the host stack to VCPU, used for exception processing - * when we exit from the Guest - */ - LONG_S sp, VCPU_HOST_STACK(k1) - - /* Save the kernel gp as well */ - LONG_S gp, VCPU_HOST_GP(k1) - - /* - * Setup status register for running the guest in UM, interrupts - * are disabled - */ - li k0, (ST0_EXL | KSU_USER | ST0_BEV) - mtc0 k0, CP0_STATUS - ehb - - /* load up the new EBASE */ - LONG_L k0, VCPU_GUEST_EBASE(k1) - mtc0 k0, CP0_EBASE - - /* - * Now that the new EBASE has been loaded, unset BEV, set - * interrupt mask as it was but make sure that timer interrupts - * are enabled - */ - li k0, (ST0_EXL | KSU_USER | ST0_IE) - andi v0, v0, ST0_IM - or k0, k0, v0 - mtc0 k0, CP0_STATUS - ehb - - /* Set Guest EPC */ - LONG_L t0, VCPU_PC(k1) - mtc0 t0, CP0_EPC - -FEXPORT(__kvm_mips_load_asid) - /* Set the ASID for the Guest Kernel */ - PTR_L t0, VCPU_COP0(k1) - LONG_L t0, COP0_STATUS(t0) - andi t0, KSU_USER | ST0_ERL | ST0_EXL - xori t0, KSU_USER - bnez t0, 1f /* If kernel */ - INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ - INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ -1: - /* t1: contains the base of the ASID array, need to get the cpu id */ - LONG_L t2, TI_CPU($28) /* smp_processor_id */ - INT_SLL t2, t2, 2 /* x4 */ - REG_ADDU t3, t1, t2 - LONG_L k0, (t3) -#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE - li t3, CPUINFO_SIZE/4 - mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */ - LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2) - and k0, k0, t2 -#else - andi k0, k0, MIPS_ENTRYHI_ASID -#endif - mtc0 k0, CP0_ENTRYHI - ehb - - /* Disable RDHWR access */ - mtc0 zero, CP0_HWRENA - - .set noat - /* Now load up the Guest Context from VCPU */ - LONG_L $1, VCPU_R1(k1) - LONG_L $2, VCPU_R2(k1) - LONG_L $3, VCPU_R3(k1) - - LONG_L $4, VCPU_R4(k1) - LONG_L $5, VCPU_R5(k1) - LONG_L $6, VCPU_R6(k1) - LONG_L $7, VCPU_R7(k1) - - LONG_L $8, VCPU_R8(k1) - LONG_L $9, VCPU_R9(k1) - LONG_L $10, VCPU_R10(k1) - LONG_L $11, VCPU_R11(k1) - LONG_L $12, VCPU_R12(k1) - LONG_L $13, VCPU_R13(k1) - LONG_L $14, VCPU_R14(k1) - LONG_L $15, VCPU_R15(k1) - LONG_L $16, VCPU_R16(k1) - LONG_L $17, VCPU_R17(k1) - LONG_L $18, VCPU_R18(k1) - LONG_L $19, VCPU_R19(k1) - LONG_L $20, VCPU_R20(k1) - LONG_L $21, VCPU_R21(k1) - LONG_L $22, VCPU_R22(k1) - LONG_L $23, VCPU_R23(k1) - LONG_L $24, VCPU_R24(k1) - LONG_L $25, VCPU_R25(k1) - - /* k0/k1 loaded up later */ - - LONG_L $28, VCPU_R28(k1) - LONG_L $29, VCPU_R29(k1) - LONG_L $30, VCPU_R30(k1) - LONG_L $31, VCPU_R31(k1) - - /* Restore hi/lo */ - LONG_L k0, VCPU_LO(k1) - mtlo k0 - - LONG_L k0, VCPU_HI(k1) - mthi k0 - -FEXPORT(__kvm_mips_load_k0k1) - /* Restore the guest's k0/k1 registers */ - LONG_L k0, VCPU_R26(k1) - LONG_L k1, VCPU_R27(k1) - - /* Jump to guest */ - eret -EXPORT(__kvm_mips_vcpu_run_end) - -VECTOR(MIPSX(exception), unknown) -/* Find out what mode we came from and jump to the proper handler. */ - mtc0 k0, CP0_ERROREPC #01: Save guest k0 - ehb #02: - - mfc0 k0, CP0_EBASE #02: Get EBASE - INT_SRL k0, k0, 10 #03: Get rid of CPUNum - INT_SLL k0, k0, 10 #04 - LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 - INT_ADDIU k0, k0, 0x2000 #06: Exception handler is - # installed @ offset 0x2000 - j k0 #07: jump to the function - nop #08: branch delay slot -VECTOR_END(MIPSX(exceptionEnd)) -.end MIPSX(exception) - -/* - * Generic Guest exception handler. We end up here when the guest - * does something that causes a trap to kernel mode. - */ -NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) - /* Get the VCPU pointer from DDTATA_LO */ - mfc0 k1, CP0_DDATA_LO - INT_ADDIU k1, k1, VCPU_HOST_ARCH - - /* Start saving Guest context to VCPU */ - LONG_S $0, VCPU_R0(k1) - LONG_S $1, VCPU_R1(k1) - LONG_S $2, VCPU_R2(k1) - LONG_S $3, VCPU_R3(k1) - LONG_S $4, VCPU_R4(k1) - LONG_S $5, VCPU_R5(k1) - LONG_S $6, VCPU_R6(k1) - LONG_S $7, VCPU_R7(k1) - LONG_S $8, VCPU_R8(k1) - LONG_S $9, VCPU_R9(k1) - LONG_S $10, VCPU_R10(k1) - LONG_S $11, VCPU_R11(k1) - LONG_S $12, VCPU_R12(k1) - LONG_S $13, VCPU_R13(k1) - LONG_S $14, VCPU_R14(k1) - LONG_S $15, VCPU_R15(k1) - LONG_S $16, VCPU_R16(k1) - LONG_S $17, VCPU_R17(k1) - LONG_S $18, VCPU_R18(k1) - LONG_S $19, VCPU_R19(k1) - LONG_S $20, VCPU_R20(k1) - LONG_S $21, VCPU_R21(k1) - LONG_S $22, VCPU_R22(k1) - LONG_S $23, VCPU_R23(k1) - LONG_S $24, VCPU_R24(k1) - LONG_S $25, VCPU_R25(k1) - - /* Guest k0/k1 saved later */ - - LONG_S $28, VCPU_R28(k1) - LONG_S $29, VCPU_R29(k1) - LONG_S $30, VCPU_R30(k1) - LONG_S $31, VCPU_R31(k1) - - .set at - - /* We need to save hi/lo and restore them on the way out */ - mfhi t0 - LONG_S t0, VCPU_HI(k1) - - mflo t0 - LONG_S t0, VCPU_LO(k1) - - /* Finally save guest k0/k1 to VCPU */ - mfc0 t0, CP0_ERROREPC - LONG_S t0, VCPU_R26(k1) - - /* Get GUEST k1 and save it in VCPU */ - PTR_LI t1, ~0x2ff - mfc0 t0, CP0_EBASE - and t0, t0, t1 - LONG_L t0, 0x3000(t0) - LONG_S t0, VCPU_R27(k1) - - /* Now that context has been saved, we can use other registers */ - - /* Restore vcpu */ - mfc0 a1, CP0_DDATA_LO - move s1, a1 - - /* Restore run (vcpu->run) */ - LONG_L a0, VCPU_RUN(a1) - /* Save pointer to run in s0, will be saved by the compiler */ - move s0, a0 - - /* - * Save Host level EPC, BadVaddr and Cause to VCPU, useful to - * process the exception - */ - mfc0 k0,CP0_EPC - LONG_S k0, VCPU_PC(k1) - - mfc0 k0, CP0_BADVADDR - LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1) - - mfc0 k0, CP0_CAUSE - sw k0, VCPU_HOST_CP0_CAUSE(k1) - - /* Now restore the host state just enough to run the handlers */ - - /* Switch EBASE to the one used by Linux */ - /* load up the host EBASE */ - mfc0 v0, CP0_STATUS - - or k0, v0, ST0_BEV - - mtc0 k0, CP0_STATUS - ehb - - LONG_L k0, ebase - mtc0 k0,CP0_EBASE - - /* - * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't - * trigger FPE for pending exceptions. - */ - and v1, v0, ST0_CU1 - beqz v1, 1f - nop - .set push - SET_HARDFLOAT - cfc1 t0, fcr31 - sw t0, VCPU_FCR31(k1) - ctc1 zero,fcr31 - .set pop -1: - -#ifdef CONFIG_CPU_HAS_MSA - /* - * If MSA is enabled, save MSACSR and clear it so that later - * instructions don't trigger MSAFPE for pending exceptions. - */ - mfc0 t0, CP0_CONFIG3 - ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */ - beqz t0, 1f - nop - mfc0 t0, CP0_CONFIG5 - ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */ - beqz t0, 1f - nop - _cfcmsa t0, MSA_CSR - sw t0, VCPU_MSA_CSR(k1) - _ctcmsa MSA_CSR, zero -1: -#endif - - /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ - and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) - or v0, v0, ST0_CU0 - mtc0 v0, CP0_STATUS - ehb - - /* Load up host GP */ - LONG_L gp, VCPU_HOST_GP(k1) - - /* Need a stack before we can jump to "C" */ - LONG_L sp, VCPU_HOST_STACK(k1) - - /* Saved host state */ - INT_ADDIU sp, sp, -PT_SIZE - - /* - * XXXKYMA do we need to load the host ASID, maybe not because the - * kernel entries are marked GLOBAL, need to verify - */ - - /* Restore host DDATA_LO */ - LONG_L k0, PT_HOST_USERLOCAL(sp) - mtc0 k0, CP0_DDATA_LO - - /* Restore RDHWR access */ - INT_L k0, hwrena - mtc0 k0, CP0_HWRENA - - /* Jump to handler */ -FEXPORT(__kvm_mips_jump_to_handler) - /* - * XXXKYMA: not sure if this is safe, how large is the stack?? - * Now jump to the kvm_mips_handle_exit() to see if we can deal - * with this in the kernel - */ - PTR_LA t9, kvm_mips_handle_exit - jalr.hb t9 - INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ - - /* Return from handler Make sure interrupts are disabled */ - di - ehb - - /* - * XXXKYMA: k0/k1 could have been blown away if we processed - * an exception while we were handling the exception from the - * guest, reload k1 - */ - - move k1, s1 - INT_ADDIU k1, k1, VCPU_HOST_ARCH - - /* - * Check return value, should tell us if we are returning to the - * host (handle I/O etc)or resuming the guest - */ - andi t0, v0, RESUME_HOST - bnez t0, __kvm_mips_return_to_host - nop - -__kvm_mips_return_to_guest: - /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ - mtc0 s1, CP0_DDATA_LO - - /* Load up the Guest EBASE to minimize the window where BEV is set */ - LONG_L t0, VCPU_GUEST_EBASE(k1) - - /* Switch EBASE back to the one used by KVM */ - mfc0 v1, CP0_STATUS - or k0, v1, ST0_BEV - mtc0 k0, CP0_STATUS - ehb - mtc0 t0, CP0_EBASE - - /* Setup status register for running guest in UM */ - or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) - and v1, v1, ~(ST0_CU0 | ST0_MX) - mtc0 v1, CP0_STATUS - ehb - - /* Set Guest EPC */ - LONG_L t0, VCPU_PC(k1) - mtc0 t0, CP0_EPC - - /* Set the ASID for the Guest Kernel */ - PTR_L t0, VCPU_COP0(k1) - LONG_L t0, COP0_STATUS(t0) - andi t0, KSU_USER | ST0_ERL | ST0_EXL - xori t0, KSU_USER - bnez t0, 1f /* If kernel */ - INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ - INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ -1: - /* t1: contains the base of the ASID array, need to get the cpu id */ - LONG_L t2, TI_CPU($28) /* smp_processor_id */ - INT_SLL t2, t2, 2 /* x4 */ - REG_ADDU t3, t1, t2 - LONG_L k0, (t3) -#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE - li t3, CPUINFO_SIZE/4 - mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */ - LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2) - and k0, k0, t2 -#else - andi k0, k0, MIPS_ENTRYHI_ASID -#endif - mtc0 k0, CP0_ENTRYHI - ehb - - /* Disable RDHWR access */ - mtc0 zero, CP0_HWRENA - - .set noat - /* load the guest context from VCPU and return */ - LONG_L $0, VCPU_R0(k1) - LONG_L $1, VCPU_R1(k1) - LONG_L $2, VCPU_R2(k1) - LONG_L $3, VCPU_R3(k1) - LONG_L $4, VCPU_R4(k1) - LONG_L $5, VCPU_R5(k1) - LONG_L $6, VCPU_R6(k1) - LONG_L $7, VCPU_R7(k1) - LONG_L $8, VCPU_R8(k1) - LONG_L $9, VCPU_R9(k1) - LONG_L $10, VCPU_R10(k1) - LONG_L $11, VCPU_R11(k1) - LONG_L $12, VCPU_R12(k1) - LONG_L $13, VCPU_R13(k1) - LONG_L $14, VCPU_R14(k1) - LONG_L $15, VCPU_R15(k1) - LONG_L $16, VCPU_R16(k1) - LONG_L $17, VCPU_R17(k1) - LONG_L $18, VCPU_R18(k1) - LONG_L $19, VCPU_R19(k1) - LONG_L $20, VCPU_R20(k1) - LONG_L $21, VCPU_R21(k1) - LONG_L $22, VCPU_R22(k1) - LONG_L $23, VCPU_R23(k1) - LONG_L $24, VCPU_R24(k1) - LONG_L $25, VCPU_R25(k1) - - /* $/k1 loaded later */ - LONG_L $28, VCPU_R28(k1) - LONG_L $29, VCPU_R29(k1) - LONG_L $30, VCPU_R30(k1) - LONG_L $31, VCPU_R31(k1) - -FEXPORT(__kvm_mips_skip_guest_restore) - LONG_L k0, VCPU_HI(k1) - mthi k0 - - LONG_L k0, VCPU_LO(k1) - mtlo k0 - - LONG_L k0, VCPU_R26(k1) - LONG_L k1, VCPU_R27(k1) - - eret - .set at - -__kvm_mips_return_to_host: - /* EBASE is already pointing to Linux */ - LONG_L k1, VCPU_HOST_STACK(k1) - INT_ADDIU k1,k1, -PT_SIZE - - /* Restore host DDATA_LO */ - LONG_L k0, PT_HOST_USERLOCAL(k1) - mtc0 k0, CP0_DDATA_LO - - /* - * r2/v0 is the return code, shift it down by 2 (arithmetic) - * to recover the err code - */ - INT_SRA k0, v0, 2 - move $2, k0 - - /* Load context saved on the host stack */ - LONG_L $16, PT_R16(k1) - LONG_L $17, PT_R17(k1) - LONG_L $18, PT_R18(k1) - LONG_L $19, PT_R19(k1) - LONG_L $20, PT_R20(k1) - LONG_L $21, PT_R21(k1) - LONG_L $22, PT_R22(k1) - LONG_L $23, PT_R23(k1) - - LONG_L $28, PT_R28(k1) - LONG_L $29, PT_R29(k1) - LONG_L $30, PT_R30(k1) - - LONG_L k0, PT_HI(k1) - mthi k0 - - LONG_L k0, PT_LO(k1) - mtlo k0 - - /* Restore RDHWR access */ - INT_L k0, hwrena - mtc0 k0, CP0_HWRENA - - /* Restore RA, which is the address we will return to */ - LONG_L ra, PT_R31(k1) - j ra - nop - -VECTOR_END(MIPSX(GuestExceptionEnd)) -.end MIPSX(GuestException) - -MIPSX(exceptions): - #### - ##### The exception handlers. - ##### - .word _C_LABEL(MIPSX(GuestException)) # 0 - .word _C_LABEL(MIPSX(GuestException)) # 1 - .word _C_LABEL(MIPSX(GuestException)) # 2 - .word _C_LABEL(MIPSX(GuestException)) # 3 - .word _C_LABEL(MIPSX(GuestException)) # 4 - .word _C_LABEL(MIPSX(GuestException)) # 5 - .word _C_LABEL(MIPSX(GuestException)) # 6 - .word _C_LABEL(MIPSX(GuestException)) # 7 - .word _C_LABEL(MIPSX(GuestException)) # 8 - .word _C_LABEL(MIPSX(GuestException)) # 9 - .word _C_LABEL(MIPSX(GuestException)) # 10 - .word _C_LABEL(MIPSX(GuestException)) # 11 - .word _C_LABEL(MIPSX(GuestException)) # 12 - .word _C_LABEL(MIPSX(GuestException)) # 13 - .word _C_LABEL(MIPSX(GuestException)) # 14 - .word _C_LABEL(MIPSX(GuestException)) # 15 - .word _C_LABEL(MIPSX(GuestException)) # 16 - .word _C_LABEL(MIPSX(GuestException)) # 17 - .word _C_LABEL(MIPSX(GuestException)) # 18 - .word _C_LABEL(MIPSX(GuestException)) # 19 - .word _C_LABEL(MIPSX(GuestException)) # 20 - .word _C_LABEL(MIPSX(GuestException)) # 21 - .word _C_LABEL(MIPSX(GuestException)) # 22 - .word _C_LABEL(MIPSX(GuestException)) # 23 - .word _C_LABEL(MIPSX(GuestException)) # 24 - .word _C_LABEL(MIPSX(GuestException)) # 25 - .word _C_LABEL(MIPSX(GuestException)) # 26 - .word _C_LABEL(MIPSX(GuestException)) # 27 - .word _C_LABEL(MIPSX(GuestException)) # 28 - .word _C_LABEL(MIPSX(GuestException)) # 29 - .word _C_LABEL(MIPSX(GuestException)) # 30 - .word _C_LABEL(MIPSX(GuestException)) # 31 diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 5f1163653b50..e3ae1229f147 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -247,8 +247,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { - int err, size, offset; - void *gebase; + int err, size; + void *gebase, *p; int i; struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); @@ -286,41 +286,28 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) /* Save new ebase */ vcpu->arch.guest_ebase = gebase; - /* Copy L1 Guest Exception handler to correct offset */ + /* Build guest exception vectors dynamically in unmapped memory */ /* TLB Refill, EXL = 0 */ - memcpy(gebase, mips32_exception, - mips32_exceptionEnd - mips32_exception); + kvm_mips_build_exception(gebase); /* General Exception Entry point */ - memcpy(gebase + 0x180, mips32_exception, - mips32_exceptionEnd - mips32_exception); + kvm_mips_build_exception(gebase + 0x180); /* For vectored interrupts poke the exception code @ all offsets 0-7 */ for (i = 0; i < 8; i++) { kvm_debug("L1 Vectored handler @ %p\n", gebase + 0x200 + (i * VECTORSPACING)); - memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception, - mips32_exceptionEnd - mips32_exception); + kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING); } - /* General handler, relocate to unmapped space for sanity's sake */ - offset = 0x2000; - kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n", - gebase + offset, - mips32_GuestExceptionEnd - mips32_GuestException); + /* General exit handler */ + p = gebase + 0x2000; + p = kvm_mips_build_exit(p); - memcpy(gebase + offset, mips32_GuestException, - mips32_GuestExceptionEnd - mips32_GuestException); - -#ifdef MODULE - offset += mips32_GuestExceptionEnd - mips32_GuestException; - memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run, - __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run); - vcpu->arch.vcpu_run = gebase + offset; -#else - vcpu->arch.vcpu_run = __kvm_mips_vcpu_run; -#endif + /* Guest entry routine */ + vcpu->arch.vcpu_run = p; + p = kvm_mips_build_vcpu_run(p); /* Invalidate the icache for these ranges */ local_flush_icache_range((unsigned long)gebase, -- cgit v1.2.3 From d7b8f890b63f386ca28c820b8ddb7ff1f63cbe3c Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 23 Jun 2016 17:34:40 +0100 Subject: MIPS: KVM: Add dumping of generated entry code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dump the generated entry code with pr_debug(), similar to how it is done in tlbex.c, so it can be more easily debugged. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim KrÄmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index e3ae1229f147..9f36dcb3c580 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -245,6 +245,23 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, } } +static inline void dump_handler(const char *symbol, void *start, void *end) +{ + u32 *p; + + pr_debug("LEAF(%s)\n", symbol); + + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + + for (p = start; p < (u32 *)end; ++p) + pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p); + + pr_debug("\t.set\tpop\n"); + + pr_debug("\tEND(%s)\n", symbol); +} + struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err, size; @@ -309,6 +326,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) vcpu->arch.vcpu_run = p; p = kvm_mips_build_vcpu_run(p); + /* Dump the generated code */ + pr_debug("#include \n"); + pr_debug("#include \n"); + pr_debug("\n"); + dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); + dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200); + dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); + /* Invalidate the icache for these ranges */ local_flush_icache_range((unsigned long)gebase, (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); -- cgit v1.2.3 From 1e5217f54251ddd339e00a0b30f126589737d467 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 23 Jun 2016 17:34:45 +0100 Subject: MIPS: KVM: Dynamically choose scratch registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Scratch cop0 registers are needed by KVM to be able to save/restore all the GPRs, including k0/k1, and for storing the VCPU pointer. However no registers are universally suitable for these purposes, so the decision should be made at runtime. Until now, we've used DDATA_LO to store the VCPU pointer, and ErrorEPC as a temporary. It could be argued that this is abuse of those registers, and DDATA_LO is known not to be usable on certain implementations (Cavium Octeon). If KScratch registers are present, use them instead. We save & restore the temporary register in addition to the VCPU pointer register when using a KScratch register for it, as it may be used for normal host TLB handling too. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim KrÄmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 1 + arch/mips/kvm/entry.c | 94 +++++++++++++++++++++++++++++++++------- arch/mips/kvm/mips.c | 4 ++ 3 files changed, 84 insertions(+), 15 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 2e76e899079c..a80c3208b234 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -536,6 +536,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); /* Building of entry/exception code */ +int kvm_mips_entry_setup(void); void *kvm_mips_build_vcpu_run(void *addr); void *kvm_mips_build_exception(void *addr); void *kvm_mips_build_exit(void *addr); diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index 6395bfa7e542..b6e7fd9f12f0 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -61,6 +61,9 @@ #define CALLFRAME_SIZ 32 +static unsigned int scratch_vcpu[2] = { C0_DDATA_LO }; +static unsigned int scratch_tmp[2] = { C0_ERROREPC }; + enum label_id { label_fpu_1 = 1, label_msa_1, @@ -78,6 +81,69 @@ static void *kvm_mips_build_ret_from_exit(void *addr); static void *kvm_mips_build_ret_to_guest(void *addr); static void *kvm_mips_build_ret_to_host(void *addr); +/** + * kvm_mips_entry_setup() - Perform global setup for entry code. + * + * Perform global setup for entry code, such as choosing a scratch register. + * + * Returns: 0 on success. + * -errno on failure. + */ +int kvm_mips_entry_setup(void) +{ + /* + * We prefer to use KScratchN registers if they are available over the + * defaults above, which may not work on all cores. + */ + unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc; + + /* Pick a scratch register for storing VCPU */ + if (kscratch_mask) { + scratch_vcpu[0] = 31; + scratch_vcpu[1] = ffs(kscratch_mask) - 1; + kscratch_mask &= ~BIT(scratch_vcpu[1]); + } + + /* Pick a scratch register to use as a temp for saving state */ + if (kscratch_mask) { + scratch_tmp[0] = 31; + scratch_tmp[1] = ffs(kscratch_mask) - 1; + kscratch_mask &= ~BIT(scratch_tmp[1]); + } + + return 0; +} + +static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp, + unsigned int frame) +{ + /* Save the VCPU scratch register value in cp0_epc of the stack frame */ + uasm_i_mfc0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); + UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); + + /* Save the temp scratch register value in cp0_cause of stack frame */ + if (scratch_tmp[0] == 31) { + uasm_i_mfc0(p, tmp, scratch_tmp[0], scratch_tmp[1]); + UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); + } +} + +static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, + unsigned int frame) +{ + /* + * Restore host scratch register values saved by + * kvm_mips_build_save_scratch(). + */ + UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); + uasm_i_mtc0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); + + if (scratch_tmp[0] == 31) { + UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); + uasm_i_mtc0(p, tmp, scratch_tmp[0], scratch_tmp[1]); + } +} + /** * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. * @addr: Address to start writing code. @@ -120,12 +186,11 @@ void *kvm_mips_build_vcpu_run(void *addr) uasm_i_mfc0(&p, V0, C0_STATUS); UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1); - /* Save DDATA_LO, will be used to store pointer to vcpu */ - uasm_i_mfc0(&p, V1, C0_DDATA_LO); - UASM_i_SW(&p, V1, offsetof(struct pt_regs, cp0_epc), K1); + /* Save scratch registers, will be used to store pointer to vcpu etc */ + kvm_mips_build_save_scratch(&p, V1, K1); - /* DDATA_LO has pointer to vcpu */ - uasm_i_mtc0(&p, A1, C0_DDATA_LO); + /* VCPU scratch register has pointer to vcpu */ + uasm_i_mtc0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); /* Offset into vcpu->arch */ uasm_i_addiu(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); @@ -273,7 +338,7 @@ void *kvm_mips_build_exception(void *addr) u32 *p = addr; /* Save guest k0 */ - uasm_i_mtc0(&p, K0, C0_ERROREPC); + uasm_i_mtc0(&p, K0, scratch_tmp[0], scratch_tmp[1]); uasm_i_ehb(&p); /* Get EBASE */ @@ -321,8 +386,8 @@ void *kvm_mips_build_exit(void *addr) * does something that causes a trap to kernel mode. */ - /* Get the VCPU pointer from DDATA_LO */ - uasm_i_mfc0(&p, K1, C0_DDATA_LO); + /* Get the VCPU pointer from the scratch register */ + uasm_i_mfc0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); uasm_i_addiu(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); /* Start saving Guest context to VCPU */ @@ -341,7 +406,7 @@ void *kvm_mips_build_exit(void *addr) UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1); /* Finally save guest k0/k1 to VCPU */ - uasm_i_mfc0(&p, T0, C0_ERROREPC); + uasm_i_mfc0(&p, T0, scratch_tmp[0], scratch_tmp[1]); UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); /* Get GUEST k1 and save it in VCPU */ @@ -354,7 +419,7 @@ void *kvm_mips_build_exit(void *addr) /* Now that context has been saved, we can use other registers */ /* Restore vcpu */ - uasm_i_mfc0(&p, A1, C0_DDATA_LO); + uasm_i_mfc0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); uasm_i_move(&p, S1, A1); /* Restore run (vcpu->run) */ @@ -446,9 +511,8 @@ void *kvm_mips_build_exit(void *addr) * kernel entries are marked GLOBAL, need to verify */ - /* Restore host DDATA_LO */ - UASM_i_LW(&p, K0, offsetof(struct pt_regs, cp0_epc), SP); - uasm_i_mtc0(&p, K0, C0_DDATA_LO); + /* Restore host scratch registers, as we'll have clobbered them */ + kvm_mips_build_restore_scratch(&p, K0, SP); /* Restore RDHWR access */ UASM_i_LA_mostly(&p, K0, (long)&hwrena); @@ -536,8 +600,8 @@ static void *kvm_mips_build_ret_to_guest(void *addr) { u32 *p = addr; - /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ - uasm_i_mtc0(&p, S1, C0_DDATA_LO); + /* Put the saved pointer to vcpu (s1) back into the scratch register */ + uasm_i_mtc0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); /* Load up the Guest EBASE to minimize the window where BEV is set */ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 9f36dcb3c580..26cc0b93c565 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1775,6 +1775,10 @@ static int __init kvm_mips_init(void) { int ret; + ret = kvm_mips_entry_setup(); + if (ret) + return ret; + ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); if (ret) -- cgit v1.2.3 From 1f9ca62cbc5f4d1663a0f0d193156ce9dc6ed452 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 23 Jun 2016 17:34:46 +0100 Subject: MIPS: KVM: Relative branch to common exit handler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use a relative branch to get from the individual exception vectors to the common guest exit handler, rather than loading the address of the exit handler and jumping to it. This is made easier due to the fact we are now generating the entry code dynamically. This will also allow the exception code to be further reduced in future patches. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim KrÄmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/include/asm/kvm_host.h | 2 +- arch/mips/kvm/entry.c | 23 +++++++++++++++++------ arch/mips/kvm/mips.c | 12 +++++++----- 3 files changed, 25 insertions(+), 12 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index a80c3208b234..b32785543787 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -538,7 +538,7 @@ extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); /* Building of entry/exception code */ int kvm_mips_entry_setup(void); void *kvm_mips_build_vcpu_run(void *addr); -void *kvm_mips_build_exception(void *addr); +void *kvm_mips_build_exception(void *addr, void *handler); void *kvm_mips_build_exit(void *addr); /* FPU/MSA context management */ diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index b6e7fd9f12f0..fb2cbf653474 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -69,12 +69,14 @@ enum label_id { label_msa_1, label_return_to_host, label_kernel_asid, + label_exit_common, }; UASM_L_LA(_fpu_1) UASM_L_LA(_msa_1) UASM_L_LA(_return_to_host) UASM_L_LA(_kernel_asid) +UASM_L_LA(_exit_common) static void *kvm_mips_build_enter_guest(void *addr); static void *kvm_mips_build_ret_from_exit(void *addr); @@ -327,15 +329,23 @@ static void *kvm_mips_build_enter_guest(void *addr) /** * kvm_mips_build_exception() - Assemble first level guest exception handler. * @addr: Address to start writing code. + * @handler: Address of common handler (within range of @addr). * * Assemble exception vector code for guest execution. The generated vector will - * jump to the common exception handler generated by kvm_mips_build_exit(). + * branch to the common exception handler generated by kvm_mips_build_exit(). * * Returns: Next address after end of written function. */ -void *kvm_mips_build_exception(void *addr) +void *kvm_mips_build_exception(void *addr, void *handler) { u32 *p = addr; + struct uasm_label labels[2]; + struct uasm_reloc relocs[2]; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); /* Save guest k0 */ uasm_i_mtc0(&p, K0, scratch_tmp[0], scratch_tmp[1]); @@ -349,12 +359,13 @@ void *kvm_mips_build_exception(void *addr) /* Save k1 @ offset 0x3000 */ UASM_i_SW(&p, K1, 0x3000, K0); - /* Exception handler is installed @ offset 0x2000 */ - uasm_i_addiu(&p, K0, K0, 0x2000); - /* Jump to the function */ - uasm_i_jr(&p, K0); + /* Branch to the common handler */ + uasm_il_b(&p, &r, label_exit_common); uasm_i_nop(&p); + uasm_l_exit_common(&l, handler); + uasm_resolve_relocs(relocs, labels); + return p; } diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 26cc0b93c565..7c76768ff364 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -265,7 +265,7 @@ static inline void dump_handler(const char *symbol, void *start, void *end) struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err, size; - void *gebase, *p; + void *gebase, *p, *handler; int i; struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); @@ -304,22 +304,24 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) vcpu->arch.guest_ebase = gebase; /* Build guest exception vectors dynamically in unmapped memory */ + handler = gebase + 0x2000; /* TLB Refill, EXL = 0 */ - kvm_mips_build_exception(gebase); + kvm_mips_build_exception(gebase, handler); /* General Exception Entry point */ - kvm_mips_build_exception(gebase + 0x180); + kvm_mips_build_exception(gebase + 0x180, handler); /* For vectored interrupts poke the exception code @ all offsets 0-7 */ for (i = 0; i < 8; i++) { kvm_debug("L1 Vectored handler @ %p\n", gebase + 0x200 + (i * VECTORSPACING)); - kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING); + kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING, + handler); } /* General exit handler */ - p = gebase + 0x2000; + p = handler; p = kvm_mips_build_exit(p); /* Guest entry routine */ -- cgit v1.2.3 From 70e92c7ee94094d2db8bfe225a8c9b1bde89c26d Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 4 Jul 2016 19:35:11 +0100 Subject: MIPS: KVM: Don't save/restore lo/hi for r6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MIPSr6 doesn't have lo/hi registers, so don't bother saving or restoring them, and don't expose them to userland with the KVM ioctl interface either. In fact the lo/hi registers aren't callee saved in the MIPS ABIs anyway, so there is no need to preserve the host lo/hi values at all when transitioning to and from the guest (which happens via a function call). Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Radim KrÄmář Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/kvm/entry.c | 16 ++++------------ arch/mips/kvm/mips.c | 6 ++++++ 2 files changed, 10 insertions(+), 12 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index de8b6ec5573f..75ba7c2ecb3d 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -178,12 +178,6 @@ void *kvm_mips_build_vcpu_run(void *addr) UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1); } - /* Save hi/lo */ - uasm_i_mflo(&p, V0); - UASM_i_SW(&p, V0, offsetof(struct pt_regs, lo), K1); - uasm_i_mfhi(&p, V1); - UASM_i_SW(&p, V1, offsetof(struct pt_regs, hi), K1); - /* Save host status */ uasm_i_mfc0(&p, V0, C0_STATUS); UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1); @@ -307,12 +301,14 @@ static void *kvm_mips_build_enter_guest(void *addr) UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); } +#ifndef CONFIG_CPU_MIPSR6 /* Restore hi/lo */ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1); uasm_i_mthi(&p, K0); UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1); uasm_i_mtlo(&p, K0); +#endif /* Restore the guest's k0/k1 registers */ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); @@ -408,12 +404,14 @@ void *kvm_mips_build_exit(void *addr) UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); } +#ifndef CONFIG_CPU_MIPSR6 /* We need to save hi/lo and restore them on the way out */ uasm_i_mfhi(&p, T0); UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1); uasm_i_mflo(&p, T0); UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1); +#endif /* Finally save guest k1 to VCPU */ uasm_i_ehb(&p); @@ -663,12 +661,6 @@ static void *kvm_mips_build_ret_to_host(void *addr) UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1); } - UASM_i_LW(&p, K0, offsetof(struct pt_regs, hi), K1); - uasm_i_mthi(&p, K0); - - UASM_i_LW(&p, K0, offsetof(struct pt_regs, lo), K1); - uasm_i_mtlo(&p, K0); - /* Restore RDHWR access */ UASM_i_LA_mostly(&p, K0, (long)&hwrena); uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 7c76768ff364..414b00074e29 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -521,8 +521,10 @@ static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_R30, KVM_REG_MIPS_R31, +#ifndef CONFIG_CPU_MIPSR6 KVM_REG_MIPS_HI, KVM_REG_MIPS_LO, +#endif KVM_REG_MIPS_PC, KVM_REG_MIPS_CP0_INDEX, @@ -666,12 +668,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; break; +#ifndef CONFIG_CPU_MIPSR6 case KVM_REG_MIPS_HI: v = (long)vcpu->arch.hi; break; case KVM_REG_MIPS_LO: v = (long)vcpu->arch.lo; break; +#endif case KVM_REG_MIPS_PC: v = (long)vcpu->arch.pc; break; @@ -887,12 +891,14 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; break; +#ifndef CONFIG_CPU_MIPSR6 case KVM_REG_MIPS_HI: vcpu->arch.hi = v; break; case KVM_REG_MIPS_LO: vcpu->arch.lo = v; break; +#endif case KVM_REG_MIPS_PC: vcpu->arch.pc = v; break; -- cgit v1.2.3 From 2a06dab877dee3d4144c3ba32c662db18a1fdd2b Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 8 Jul 2016 11:53:26 +0100 Subject: MIPS: KVM: Fail if ebase doesn't fit in CP0_EBase MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fail if the address of the allocated exception base doesn't fit into the CP0_EBase register. This can happen on MIPS64 if CP0_EBase.WG isn't implemented but RAM is available outside of the range of KSeg0. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 414b00074e29..a6ea084b4d9d 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -300,6 +300,18 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", ALIGN(size, PAGE_SIZE), gebase); + /* + * Check new ebase actually fits in CP0_EBase. The lack of a write gate + * limits us to the low 512MB of physical address space. If the memory + * we allocate is out of range, just give up now. + */ + if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) { + kvm_err("CP0_EBase.WG required for guest exception base %pK\n", + gebase); + err = -ENOMEM; + goto out_free_gebase; + } + /* Save new ebase */ vcpu->arch.guest_ebase = gebase; -- cgit v1.2.3 From 235539b48a2357da28f52d66d04bec04f3dcb9dd Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Wed, 7 Sep 2016 14:47:23 -0400 Subject: kvm: add stubs for arch specific debugfs support Two stubs are added: o kvm_arch_has_vcpu_debugfs(): must return true if the arch supports creating debugfs entries in the vcpu debugfs dir (which will be implemented by the next commit) o kvm_arch_create_vcpu_debugfs(): code that creates debugfs entries in the vcpu debugfs dir For x86, this commit introduces a new file to avoid growing arch/x86/kvm/x86.c even more. Signed-off-by: Luiz Capitulino Signed-off-by: Paolo Bonzini --- arch/arm/kvm/arm.c | 10 ++++++++++ arch/mips/kvm/mips.c | 10 ++++++++++ arch/powerpc/kvm/powerpc.c | 10 ++++++++++ arch/s390/kvm/kvm-s390.c | 10 ++++++++++ arch/x86/kvm/Makefile | 2 +- arch/x86/kvm/debugfs.c | 20 ++++++++++++++++++++ include/linux/kvm_host.h | 3 +++ 7 files changed, 64 insertions(+), 1 deletion(-) create mode 100644 arch/x86/kvm/debugfs.c (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 75f130ef6504..c638935baad6 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -144,6 +144,16 @@ out_fail_alloc: return ret; } +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} + int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index a6ea084b4d9d..49b25e74d0c7 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -140,6 +140,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) return 0; } +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} + void kvm_mips_free_vcpus(struct kvm *kvm) { unsigned int i; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 0b7d66422650..70963c845e96 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -438,6 +438,16 @@ err_out: return -EINVAL; } +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} + void kvm_arch_destroy_vm(struct kvm *kvm) { unsigned int i; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ac6c056df4b9..0a9ce9dd7825 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1489,6 +1489,16 @@ out_err: return rc; } +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} + void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 3, "%s", "free cpu"); diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 464fa477afbf..3bff20710471 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -13,7 +13,7 @@ kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ - hyperv.o page_track.o + hyperv.o page_track.o debugfs.o kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c new file mode 100644 index 000000000000..bb5e9f635dde --- /dev/null +++ b/arch/x86/kvm/debugfs.c @@ -0,0 +1,20 @@ +/* + * Kernel-based Virtual Machine driver for Linux + * + * Copyright 2016 Red Hat, Inc. and/or its affiliates. + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ +#include + +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 9c28b4d4c90b..5486ff9aa71e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -749,6 +749,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); +bool kvm_arch_has_vcpu_debugfs(void); +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); + int kvm_arch_hardware_enable(void); void kvm_arch_hardware_disable(void); int kvm_arch_hardware_setup(void); -- cgit v1.2.3 From 25b08c7fb0e410bdf7c42ea1faff816eb0451bbc Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 16 Sep 2016 00:06:43 +0100 Subject: KVM: MIPS: Invalidate TLB by regenerating ASIDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Invalidate host TLB mappings when the guest ASID is changed by regenerating ASIDs, rather than flushing the entire host TLB except entries in the guest KSeg0 range. For the guest kernel mode ASID we regenerate on the spot when the guest ASID is changed, as that will always take place while the guest is in kernel mode. However when the guest invalidates TLB entries the ASID will often by changed temporarily as part of writing EntryHi without the guest returning to user mode in between. We therefore regenerate the user mode ASID lazily before entering the guest in user mode, if and only if the guest ASID has actually changed since the last guest user mode entry. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/kvm_host.h | 3 +++ arch/mips/kvm/emulate.c | 18 +++++++++++++----- arch/mips/kvm/mips.c | 30 ++++++++++++++++++++++++++++++ arch/mips/kvm/mmu.c | 4 ++++ 4 files changed, 50 insertions(+), 5 deletions(-) (limited to 'arch/mips/kvm/mips.c') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 4d7e0e466b5a..a5685c1adba2 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -328,6 +328,9 @@ struct kvm_vcpu_arch { u32 guest_kernel_asid[NR_CPUS]; struct mm_struct guest_kernel_mm, guest_user_mm; + /* Guest ASID of last user mode execution */ + unsigned int last_user_gasid; + int last_sched_cpu; /* WAIT executed */ diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 43853ec6e160..8dc9e64346e6 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1170,15 +1170,23 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, & KVM_ENTRYHI_ASID, nasid); + /* + * Regenerate/invalidate kernel MMU + * context. + * The user MMU context will be + * regenerated lazily on re-entry to + * guest user if the guest ASID actually + * changes. + */ preempt_disable(); - /* Blow away the shadow host TLBs */ - kvm_mips_flush_host_tlb(1); cpu = smp_processor_id(); + kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, + cpu, vcpu); + vcpu->arch.guest_kernel_asid[cpu] = + vcpu->arch.guest_kernel_mm.context.asid[cpu]; for_each_possible_cpu(i) - if (i != cpu) { - vcpu->arch.guest_user_asid[i] = 0; + if (i != cpu) vcpu->arch.guest_kernel_asid[i] = 0; - } preempt_enable(); } kvm_write_c0_guest_entryhi(cop0, diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index a6ea084b4d9d..ad1b15ba5907 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -411,6 +411,31 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return -ENOIOCTLCMD; } +/* Must be called with preemption disabled, just before entering guest */ +static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + int cpu = smp_processor_id(); + unsigned int gasid; + + /* + * Lazy host ASID regeneration for guest user mode. + * If the guest ASID has changed since the last guest usermode + * execution, regenerate the host ASID so as to invalidate stale TLB + * entries. + */ + if (!KVM_GUEST_KERNEL_MODE(vcpu)) { + gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; + if (gasid != vcpu->arch.last_user_gasid) { + kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, + vcpu); + vcpu->arch.guest_user_asid[cpu] = + vcpu->arch.guest_user_mm.context.asid[cpu]; + vcpu->arch.last_user_gasid = gasid; + } + } +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r = 0; @@ -438,6 +463,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) htw_stop(); trace_kvm_enter(vcpu); + + kvm_mips_check_asids(vcpu); + r = vcpu->arch.vcpu_run(run, vcpu); trace_kvm_out(vcpu); @@ -1551,6 +1579,8 @@ skip_emul: if (ret == RESUME_GUEST) { trace_kvm_reenter(vcpu); + kvm_mips_check_asids(vcpu); + /* * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context * is live), restore FCR31 / MSACSR. diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index c1f8758f5323..8e1f2bffcf0f 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -260,9 +260,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & asid_version_mask(cpu)) { + u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & + KVM_ENTRYHI_ASID; + kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); vcpu->arch.guest_user_asid[cpu] = vcpu->arch.guest_user_mm.context.asid[cpu]; + vcpu->arch.last_user_gasid = gasid; newasid++; kvm_debug("[%d]: cpu_context: %#lx\n", cpu, -- cgit v1.2.3