summaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm/dyntrans.c
blob: d77b61b3d6ee7e294e927ed588ef499003f5b438 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
 *
 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 */

#include <linux/errno.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/memblock.h>
#include <asm/cacheflush.h>

#include "commpage.h"

/**
 * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
 * @vcpu:	Virtual CPU.
 * @opc:	PC of instruction to replace.
 * @replace:	Instruction to write
 */
static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
				  union mips_instruction replace)
{
	unsigned long vaddr = (unsigned long)opc;
	int err;

retry:
	/* The GVA page table is still active so use the Linux TLB handlers */
	kvm_trap_emul_gva_lockless_begin(vcpu);
	err = put_user(replace.word, opc);
	kvm_trap_emul_gva_lockless_end(vcpu);

	if (unlikely(err)) {
		/*
		 * We write protect clean pages in GVA page table so normal
		 * Linux TLB mod handler doesn't silently dirty the page.
		 * Its also possible we raced with a GVA invalidation.
		 * Try to force the page to become dirty.
		 */
		err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
		if (unlikely(err)) {
			kvm_info("%s: Address unwriteable: %p\n",
				 __func__, opc);
			return -EFAULT;
		}

		/*
		 * Try again. This will likely trigger a TLB refill, which will
		 * fetch the new dirty entry from the GVA page table, which
		 * should then succeed.
		 */
		goto retry;
	}
	__local_flush_icache_user_range(vaddr, vaddr + 4);

	return 0;
}

int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
			       struct kvm_vcpu *vcpu)
{
	union mips_instruction nop_inst = { 0 };

	/* Replace the CACHE instruction, with a NOP */
	return kvm_mips_trans_replace(vcpu, opc, nop_inst);
}

/*
 * Address based CACHE instructions are transformed into synci(s). A little
 * heavy for just D-cache invalidates, but avoids an expensive trap
 */
int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
			    struct kvm_vcpu *vcpu)
{
	union mips_instruction synci_inst = { 0 };

	synci_inst.i_format.opcode = bcond_op;
	synci_inst.i_format.rs = inst.i_format.rs;
	synci_inst.i_format.rt = synci_op;
	if (cpu_has_mips_r6)
		synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
	else
		synci_inst.i_format.simmediate = inst.i_format.simmediate;

	return kvm_mips_trans_replace(vcpu, opc, synci_inst);
}

int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
			struct kvm_vcpu *vcpu)
{
	union mips_instruction mfc0_inst = { 0 };
	u32 rd, sel;

	rd = inst.c0r_format.rd;
	sel = inst.c0r_format.sel;

	if (rd == MIPS_CP0_ERRCTL && sel == 0) {
		mfc0_inst.r_format.opcode = spec_op;
		mfc0_inst.r_format.rd = inst.c0r_format.rt;
		mfc0_inst.r_format.func = add_op;
	} else {
		mfc0_inst.i_format.opcode = lw_op;
		mfc0_inst.i_format.rt = inst.c0r_format.rt;
		mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
			offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
#ifdef CONFIG_CPU_BIG_ENDIAN
		if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
			mfc0_inst.i_format.simmediate |= 4;
#endif
	}

	return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
}

int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
			struct kvm_vcpu *vcpu)
{
	union mips_instruction mtc0_inst = { 0 };
	u32 rd, sel;

	rd = inst.c0r_format.rd;
	sel = inst.c0r_format.sel;

	mtc0_inst.i_format.opcode = sw_op;
	mtc0_inst.i_format.rt = inst.c0r_format.rt;
	mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
		offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
#ifdef CONFIG_CPU_BIG_ENDIAN
	if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
		mtc0_inst.i_format.simmediate |= 4;
#endif

	return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
}
OpenPOWER on IntegriCloud