diff options
author | Paul Mackerras <paulus@samba.org> | 2013-08-06 14:14:33 +1000 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2013-08-28 16:41:14 +0200 |
commit | 9d1ffdd8f34b1f89264effd10e75ea4d6272690e (patch) | |
tree | 6521516a39e8675d6ef9daa22c875c51ab753c7b /arch/powerpc/kvm/book3s_pr.c | |
parent | 7bfa9ad55d691f2b836b576769b11eca2cf50816 (diff) | |
download | blackbird-op-linux-9d1ffdd8f34b1f89264effd10e75ea4d6272690e.tar.gz blackbird-op-linux-9d1ffdd8f34b1f89264effd10e75ea4d6272690e.zip |
KVM: PPC: Book3S PR: Don't corrupt guest state when kernel uses VMX
Currently the code assumes that once we load up guest FP/VSX or VMX
state into the CPU, it stays valid in the CPU registers until we
explicitly flush it to the thread_struct. However, on POWER7,
copy_page() and memcpy() can use VMX. These functions do flush the
VMX state to the thread_struct before using VMX instructions, but if
this happens while we have guest state in the VMX registers, and we
then re-enter the guest, we don't reload the VMX state from the
thread_struct, leading to guest corruption. This has been observed
to cause guest processes to segfault.
To fix this, we check before re-entering the guest that all of the
bits corresponding to facilities owned by the guest, as expressed
in vcpu->arch.guest_owned_ext, are set in current->thread.regs->msr.
Any bits that have been cleared correspond to facilities that have
been used by kernel code and thus flushed to the thread_struct, so
for them we reload the state from the thread_struct.
We also need to check current->thread.regs->msr before calling
giveup_fpu() or giveup_altivec(), since if the relevant bit is
clear, the state has already been flushed to the thread_struct and
to flush it again would corrupt it.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_pr.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 29 |
1 files changed, 25 insertions, 4 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 19cbac6f9d03..983e5eda892f 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -468,7 +468,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) * both the traditional FP registers and the added VSX * registers into thread.fpr[]. */ - giveup_fpu(current); + if (current->thread.regs->msr & MSR_FP) + giveup_fpu(current); for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; @@ -483,7 +484,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { - giveup_altivec(current); + if (current->thread.regs->msr & MSR_VEC) + giveup_altivec(current); memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); vcpu->arch.vscr = t->vscr; } @@ -575,8 +577,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, printk(KERN_INFO "Loading up ext 0x%lx\n", msr); #endif - current->thread.regs->msr |= msr; - if (msr & MSR_FP) { for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; @@ -598,12 +598,32 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, #endif } + current->thread.regs->msr |= msr; vcpu->arch.guest_owned_ext |= msr; kvmppc_recalc_shadow_msr(vcpu); return RESUME_GUEST; } +/* + * Kernel code using FP or VMX could have flushed guest state to + * the thread_struct; if so, get it back now. + */ +static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) +{ + unsigned long lost_ext; + + lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; + if (!lost_ext) + return; + + if (lost_ext & MSR_FP) + kvmppc_load_up_fpu(); + if (lost_ext & MSR_VEC) + kvmppc_load_up_altivec(); + current->thread.regs->msr |= lost_ext; +} + int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int exit_nr) { @@ -892,6 +912,7 @@ program_interrupt: } else { kvmppc_fix_ee_before_entry(); } + kvmppc_handle_lost_ext(vcpu); } trace_kvm_book3s_reenter(r, vcpu); |