diff options
Diffstat (limited to 'arch/mips/kvm/kvm_tlb.c')
-rw-r--r-- | arch/mips/kvm/kvm_tlb.c | 61 |
1 files changed, 41 insertions, 20 deletions
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c index 89511a9258d3..c777dd36d4a8 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/kvm_tlb.c @@ -17,6 +17,8 @@ #include <linux/delay.h> #include <linux/module.h> #include <linux/kvm_host.h> +#include <linux/srcu.h> + #include <asm/cpu.h> #include <asm/bootinfo.h> @@ -51,13 +53,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn); uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) { - return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); + return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; } uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) { - return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); + return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; } inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) @@ -84,7 +86,7 @@ void kvm_mips_dump_host_tlbs(void) old_pagemask = read_c0_pagemask(); printk("HOST TLBs:\n"); - printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); + printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); for (i = 0; i < current_cpu_data.tlbsize; i++) { write_c0_index(i); @@ -169,21 +171,27 @@ void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu) } } -static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) +static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) { + int srcu_idx, err = 0; pfn_t pfn; if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) - return; + return 0; + srcu_idx = srcu_read_lock(&kvm->srcu); pfn = kvm_mips_gfn_to_pfn(kvm, gfn); if (kvm_mips_is_error_pfn(pfn)) { - panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); + kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); + err = -EFAULT; + goto out; } kvm->arch.guest_pmap[gfn] = pfn; - return; +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return err; } /* Translate guest KSEG0 addresses to Host PA */ @@ -207,7 +215,10 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, gva); return KVM_INVALID_PAGE; } - kvm_mips_map_page(vcpu->kvm, gfn); + + if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) + return KVM_INVALID_ADDR; + return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; } @@ -310,8 +321,11 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, even = !(gfn & 0x1); vaddr = badvaddr & (PAGE_MASK << 1); - kvm_mips_map_page(vcpu->kvm, gfn); - kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1); + if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) + return -1; + + if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0) + return -1; if (even) { pfn0 = kvm->arch.guest_pmap[gfn]; @@ -389,8 +403,11 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, pfn0 = 0; pfn1 = 0; } else { - kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT); - kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT); + if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) + return -1; + + if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) + return -1; pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; @@ -428,7 +445,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && - (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { + (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { index = i; break; } @@ -626,7 +643,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, { unsigned long asid = asid_cache(cpu); - if (!(ASID_MASK(ASID_INC(asid)))) { + if (!((asid += ASID_INC) & ASID_MASK)) { if (cpu_has_vtag_icache) { flush_icache_all(); } @@ -804,7 +821,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (!newasid) { /* If we preempted while the guest was executing, then reload the pre-empted ASID */ if (current->flags & PF_VCPU) { - write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); + write_c0_entryhi(vcpu->arch. + preempt_entryhi & ASID_MASK); ehb(); } } else { @@ -816,11 +834,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) */ if (current->flags & PF_VCPU) { if (KVM_GUEST_KERNEL_MODE(vcpu)) - write_c0_entryhi(ASID_MASK(vcpu->arch. - guest_kernel_asid[cpu])); + write_c0_entryhi(vcpu->arch. + guest_kernel_asid[cpu] & + ASID_MASK); else - write_c0_entryhi(ASID_MASK(vcpu->arch. - guest_user_asid[cpu])); + write_c0_entryhi(vcpu->arch. + guest_user_asid[cpu] & + ASID_MASK); ehb(); } } @@ -879,7 +899,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) kvm_mips_guest_tlb_lookup(vcpu, ((unsigned long) opc & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); + (kvm_read_c0_guest_entryhi + (cop0) & ASID_MASK)); if (index < 0) { kvm_err ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", |