summaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm/emulate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm/emulate.c')
-rw-r--r--arch/mips/kvm/emulate.c116
1 files changed, 50 insertions, 66 deletions
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 1b675c7ce89f..b37954cc880d 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -20,6 +20,7 @@
#include <linux/random.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
+#include <asm/cacheops.h>
#include <asm/cpu-info.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -29,7 +30,6 @@
#include <asm/r4kcache.h>
#define CONFIG_MIPS_MT
-#include "opcode.h"
#include "interrupt.h"
#include "commpage.h"
@@ -1239,21 +1239,20 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
er = EMULATE_FAIL;
break;
- case mfmcz_op:
+ case mfmc0_op:
#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[MIPS_CP0_STATUS][0]++;
#endif
- if (rt != 0) {
+ if (rt != 0)
vcpu->arch.gprs[rt] =
kvm_read_c0_guest_status(cop0);
- }
/* EI */
if (inst & 0x20) {
- kvm_debug("[%#lx] mfmcz_op: EI\n",
+ kvm_debug("[%#lx] mfmc0_op: EI\n",
vcpu->arch.pc);
kvm_set_c0_guest_status(cop0, ST0_IE);
} else {
- kvm_debug("[%#lx] mfmcz_op: DI\n",
+ kvm_debug("[%#lx] mfmc0_op: DI\n",
vcpu->arch.pc);
kvm_clear_c0_guest_status(cop0, ST0_IE);
}
@@ -1545,19 +1544,6 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
return 0;
}
-#define MIPS_CACHE_OP_INDEX_INV 0x0
-#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
-#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
-#define MIPS_CACHE_OP_IMP 0x3
-#define MIPS_CACHE_OP_HIT_INV 0x4
-#define MIPS_CACHE_OP_FILL_WB_INV 0x5
-#define MIPS_CACHE_OP_HIT_HB 0x6
-#define MIPS_CACHE_OP_FETCH_LOCK 0x7
-
-#define MIPS_CACHE_ICACHE 0x0
-#define MIPS_CACHE_DCACHE 0x1
-#define MIPS_CACHE_SEC 0x3
-
enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
uint32_t cause,
struct kvm_run *run,
@@ -1582,8 +1568,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
base = (inst >> 21) & 0x1f;
op_inst = (inst >> 16) & 0x1f;
offset = (int16_t)inst;
- cache = (inst >> 16) & 0x3;
- op = (inst >> 18) & 0x7;
+ cache = op_inst & CacheOp_Cache;
+ op = op_inst & CacheOp_Op;
va = arch->gprs[base] + offset;
@@ -1595,14 +1581,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
* invalidate the caches entirely by stepping through all the
* ways/indexes
*/
- if (op == MIPS_CACHE_OP_INDEX_INV) {
+ if (op == Index_Writeback_Inv) {
kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
arch->gprs[base], offset);
- if (cache == MIPS_CACHE_DCACHE)
+ if (cache == Cache_D)
r4k_blast_dcache();
- else if (cache == MIPS_CACHE_ICACHE)
+ else if (cache == Cache_I)
r4k_blast_icache();
else {
kvm_err("%s: unsupported CACHE INDEX operation\n",
@@ -1675,9 +1661,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
skip_fault:
/* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
- if (cache == MIPS_CACHE_DCACHE
- && (op == MIPS_CACHE_OP_FILL_WB_INV
- || op == MIPS_CACHE_OP_HIT_INV)) {
+ if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
flush_dcache_line(va);
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
@@ -1687,7 +1671,7 @@ skip_fault:
*/
kvm_mips_trans_cache_va(inst, opc, vcpu);
#endif
- } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+ } else if (op_inst == Hit_Invalidate_I) {
flush_dcache_line(va);
flush_icache_line(va);
@@ -1781,7 +1765,7 @@ enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_SYSCALL << CAUSEB_EXCCODE));
+ (EXCCODE_SYS << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -1828,7 +1812,7 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
}
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+ (EXCCODE_TLBL << CAUSEB_EXCCODE));
/* setup badvaddr, context and entryhi registers for the guest */
kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
@@ -1874,7 +1858,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
}
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+ (EXCCODE_TLBL << CAUSEB_EXCCODE));
/* setup badvaddr, context and entryhi registers for the guest */
kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
@@ -1918,7 +1902,7 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
}
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+ (EXCCODE_TLBS << CAUSEB_EXCCODE));
/* setup badvaddr, context and entryhi registers for the guest */
kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
@@ -1962,7 +1946,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
}
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+ (EXCCODE_TLBS << CAUSEB_EXCCODE));
/* setup badvaddr, context and entryhi registers for the guest */
kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
@@ -2033,7 +2017,8 @@ enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
arch->pc = KVM_GUEST_KSEG0 + 0x180;
}
- kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+ kvm_change_c0_guest_cause(cop0, (0xff),
+ (EXCCODE_MOD << CAUSEB_EXCCODE));
/* setup badvaddr, context and entryhi registers for the guest */
kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
@@ -2068,7 +2053,7 @@ enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
arch->pc = KVM_GUEST_KSEG0 + 0x180;
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+ (EXCCODE_CPU << CAUSEB_EXCCODE));
kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
return EMULATE_DONE;
@@ -2096,7 +2081,7 @@ enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_RES_INST << CAUSEB_EXCCODE));
+ (EXCCODE_RI << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2131,7 +2116,7 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_BREAK << CAUSEB_EXCCODE));
+ (EXCCODE_BP << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2166,7 +2151,7 @@ enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_TRAP << CAUSEB_EXCCODE));
+ (EXCCODE_TR << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2201,7 +2186,7 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_MSAFPE << CAUSEB_EXCCODE));
+ (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2236,7 +2221,7 @@ enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_FPE << CAUSEB_EXCCODE));
+ (EXCCODE_FPE << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2271,7 +2256,7 @@ enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
kvm_change_c0_guest_cause(cop0, (0xff),
- (T_MSADIS << CAUSEB_EXCCODE));
+ (EXCCODE_MSADIS << CAUSEB_EXCCODE));
/* Set PC to the exception entry point */
arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2480,25 +2465,25 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
if (usermode) {
switch (exccode) {
- case T_INT:
- case T_SYSCALL:
- case T_BREAK:
- case T_RES_INST:
- case T_TRAP:
- case T_MSAFPE:
- case T_FPE:
- case T_MSADIS:
+ case EXCCODE_INT:
+ case EXCCODE_SYS:
+ case EXCCODE_BP:
+ case EXCCODE_RI:
+ case EXCCODE_TR:
+ case EXCCODE_MSAFPE:
+ case EXCCODE_FPE:
+ case EXCCODE_MSADIS:
break;
- case T_COP_UNUSABLE:
+ case EXCCODE_CPU:
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
er = EMULATE_PRIV_FAIL;
break;
- case T_TLB_MOD:
+ case EXCCODE_MOD:
break;
- case T_TLB_LD_MISS:
+ case EXCCODE_TLBL:
/*
* We we are accessing Guest kernel space, then send an
* address error exception to the guest
@@ -2507,12 +2492,12 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
kvm_debug("%s: LD MISS @ %#lx\n", __func__,
badvaddr);
cause &= ~0xff;
- cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+ cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
er = EMULATE_PRIV_FAIL;
}
break;
- case T_TLB_ST_MISS:
+ case EXCCODE_TLBS:
/*
* We we are accessing Guest kernel space, then send an
* address error exception to the guest
@@ -2521,26 +2506,26 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
kvm_debug("%s: ST MISS @ %#lx\n", __func__,
badvaddr);
cause &= ~0xff;
- cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+ cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
er = EMULATE_PRIV_FAIL;
}
break;
- case T_ADDR_ERR_ST:
+ case EXCCODE_ADES:
kvm_debug("%s: address error ST @ %#lx\n", __func__,
badvaddr);
if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
cause &= ~0xff;
- cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+ cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
}
er = EMULATE_PRIV_FAIL;
break;
- case T_ADDR_ERR_LD:
+ case EXCCODE_ADEL:
kvm_debug("%s: address error LD @ %#lx\n", __func__,
badvaddr);
if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
cause &= ~0xff;
- cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+ cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
}
er = EMULATE_PRIV_FAIL;
break;
@@ -2583,13 +2568,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
* an entry into the guest TLB.
*/
index = kvm_mips_guest_tlb_lookup(vcpu,
- (va & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi
- (vcpu->arch.cop0) & ASID_MASK));
+ (va & VPN2_MASK) |
+ (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
if (index < 0) {
- if (exccode == T_TLB_LD_MISS) {
+ if (exccode == EXCCODE_TLBL) {
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
- } else if (exccode == T_TLB_ST_MISS) {
+ } else if (exccode == EXCCODE_TLBS) {
er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
} else {
kvm_err("%s: invalid exc code: %d\n", __func__,
@@ -2604,10 +2588,10 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
* exception to the guest
*/
if (!TLB_IS_VALID(*tlb, va)) {
- if (exccode == T_TLB_LD_MISS) {
+ if (exccode == EXCCODE_TLBL) {
er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
vcpu);
- } else if (exccode == T_TLB_ST_MISS) {
+ } else if (exccode == EXCCODE_TLBS) {
er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
vcpu);
} else {
OpenPOWER on IntegriCloud