diff options
author | Alexander Graf <agraf@suse.de> | 2010-04-20 02:49:48 +0200 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 12:18:58 +0300 |
commit | f7bc74e1c306636a659a04805474b2f8fcbd1f7e (patch) | |
tree | 5aa5c5c9676d577b55bbc700f1d5a6ee5c137a27 /arch | |
parent | 7fdaec997cc8ef77e8da7ed70f3d9f074b61c31f (diff) | |
download | blackbird-op-linux-f7bc74e1c306636a659a04805474b2f8fcbd1f7e.tar.gz blackbird-op-linux-f7bc74e1c306636a659a04805474b2f8fcbd1f7e.zip |
KVM: PPC: Improve split mode
When in split mode, instruction relocation and data relocation are not equal.
So far we implemented this mode by reserving a special pseudo-VSID for the
two cases and flushing all PTEs when going into split mode, which is slow.
Unfortunately 32bit Linux and Mac OS X use split mode extensively. So to not
slow down things too much, I came up with a different idea: Mark the split
mode with a bit in the VSID and then treat it like any other segment.
This means we can just flush the shadow segment cache, but keep the PTEs
intact. I verified that this works with ppc32 Linux and Mac OS X 10.4
guests and does speed them up.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 9 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 28 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu.c | 21 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 27 |
4 files changed, 46 insertions, 39 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 5d3bd0cc4116..6f74d93725a0 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -100,11 +100,10 @@ struct kvmppc_vcpu_book3s { #define CONTEXT_GUEST 1 #define CONTEXT_GUEST_END 2 -#define VSID_REAL_DR 0x7ffffffffff00000ULL -#define VSID_REAL_IR 0x7fffffffffe00000ULL -#define VSID_SPLIT_MASK 0x7fffffffffe00000ULL -#define VSID_REAL 0x7fffffffffc00000ULL -#define VSID_BAT 0x7fffffffffb00000ULL +#define VSID_REAL 0x1fffffffffc00000ULL +#define VSID_BAT 0x1fffffffffb00000ULL +#define VSID_REAL_DR 0x2000000000000000ULL +#define VSID_REAL_IR 0x4000000000000000ULL #define VSID_PR 0x8000000000000000ULL extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index f66de7e518f7..397701d39ae7 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -148,16 +148,8 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) } } - if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || - (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { - bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; - bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; - - /* Flush split mode PTEs */ - if (dr != ir) - kvmppc_mmu_pte_vflush(vcpu, VSID_SPLIT_MASK, - VSID_SPLIT_MASK); - + if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != + (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); } @@ -535,6 +527,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, bool is_mmio = false; bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; + u64 vsid; relocated = data ? dr : ir; @@ -552,13 +545,20 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { case 0: - pte.vpage |= VSID_REAL; + pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); break; case MSR_DR: - pte.vpage |= VSID_REAL_DR; - break; case MSR_IR: - pte.vpage |= VSID_REAL_IR; + vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); + + if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) + pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); + else + pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); + pte.vpage |= vsid; + + if (vsid == -1) + page_found = -EINVAL; break; } diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 33186b745c90..0b10503c8a4a 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -330,30 +330,35 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool lar static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid) { + ulong ea = esid << SID_SHIFT; + struct kvmppc_sr *sr; + u64 gvsid = esid; + + if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { + sr = find_sr(to_book3s(vcpu), ea); + if (sr->valid) + gvsid = sr->vsid; + } + /* In case we only have one of MSR_IR or MSR_DR set, let's put that in the real-mode context (and hope RM doesn't access high memory) */ switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { case 0: - *vsid = (VSID_REAL >> 16) | esid; + *vsid = VSID_REAL | esid; break; case MSR_IR: - *vsid = (VSID_REAL_IR >> 16) | esid; + *vsid = VSID_REAL_IR | gvsid; break; case MSR_DR: - *vsid = (VSID_REAL_DR >> 16) | esid; + *vsid = VSID_REAL_DR | gvsid; break; case MSR_DR|MSR_IR: - { - ulong ea = esid << SID_SHIFT; - struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea); - if (!sr->valid) return -1; *vsid = sr->vsid; break; - } default: BUG(); } diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index a9241e90a68b..612de6e4d74b 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -442,29 +442,32 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid) { + ulong ea = esid << SID_SHIFT; + struct kvmppc_slb *slb; + u64 gvsid = esid; + + if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { + slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); + if (slb) + gvsid = slb->vsid; + } + switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { case 0: - *vsid = (VSID_REAL >> 16) | esid; + *vsid = VSID_REAL | esid; break; case MSR_IR: - *vsid = (VSID_REAL_IR >> 16) | esid; + *vsid = VSID_REAL_IR | gvsid; break; case MSR_DR: - *vsid = (VSID_REAL_DR >> 16) | esid; + *vsid = VSID_REAL_DR | gvsid; break; case MSR_DR|MSR_IR: - { - ulong ea; - struct kvmppc_slb *slb; - ea = esid << SID_SHIFT; - slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); - if (slb) - *vsid = slb->vsid; - else + if (!slb) return -ENOENT; + *vsid = gvsid; break; - } default: BUG(); break; |