From 07fec1c2e75ef2f55d7a211414b0d63e185e84f0 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 17 Apr 2014 12:53:13 +0200 Subject: KVM: PPC: E500: Ignore L1CSR1_ICFI,ICLFR The L1 instruction cache control register contains bits that indicate that we're still handling a request. Mask those out when we set the SPR so that a read doesn't assume we're still doing something. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500_emulate.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 89b7f821f6c4..95d886f461fb 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -222,6 +222,7 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va break; case SPRN_L1CSR1: vcpu_e500->l1csr1 = spr_val; + vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR); break; case SPRN_HID0: vcpu_e500->hid0 = spr_val; -- cgit v1.2.1 From 8f20a3ab27342171462781cef4637c18d3dbc5f8 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 17 Apr 2014 13:25:33 +0200 Subject: KVM: PPC: E500: Add dcbtls emulation The dcbtls instruction is able to lock data inside the L1 cache. We don't want to give the guest actual access to hardware cache locks, as that could influence other VMs on the same system. But we can tell the guest that its locking attempt failed. By implementing the instruction we at least don't give the guest a program exception which it definitely does not expect. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500_emulate.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 95d886f461fb..002d51764143 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -19,6 +19,7 @@ #include "booke.h" #include "e500.h" +#define XOP_DCBTLS 166 #define XOP_MSGSND 206 #define XOP_MSGCLR 238 #define XOP_TLBIVAX 786 @@ -103,6 +104,15 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu, return emulated; } +static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu) +{ + struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); + + /* Always fail to lock the cache */ + vcpu_e500->l1csr0 |= L1CSR0_CUL; + return EMULATE_DONE; +} + int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { @@ -116,6 +126,10 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, case 31: switch (get_xop(inst)) { + case XOP_DCBTLS: + emulated = kvmppc_e500_emul_dcbtls(vcpu); + break; + #ifdef CONFIG_KVM_E500MC case XOP_MSGSND: emulated = kvmppc_e500_emul_msgsnd(vcpu, rb); -- cgit v1.2.1 From e5ee5422f8867d8b8108f8e1f0f47dc59b043f5b Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 5 May 2014 08:39:44 +0530 Subject: KVM: PPC: BOOK3S: PR: Enable Little Endian PR guest This patch make sure we inherit the LE bit correctly in different case so that we can run Little Endian distro in PR mode Signed-off-by: Aneesh Kumar K.V Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu.c | 2 +- arch/powerpc/kvm/book3s_pr.c | 23 ++++++++++++++++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 83da1f868fd5..8231b83c493b 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -38,7 +38,7 @@ static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) { - kvmppc_set_msr(vcpu, MSR_SF); + kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); } static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index c5c052a9729c..8c05cb5877a9 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -249,7 +249,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) ulong smsr = vcpu->arch.shared->msr; /* Guest MSR values */ - smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE; + smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; /* Process MSR values */ smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; /* External providers the guest reserved */ @@ -1110,6 +1110,15 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_HIOR: *val = get_reg_val(id, to_book3s(vcpu)->hior); break; + case KVM_REG_PPC_LPCR: + /* + * We are only interested in the LPCR_ILE bit + */ + if (vcpu->arch.intr_msr & MSR_LE) + *val = get_reg_val(id, LPCR_ILE); + else + *val = get_reg_val(id, 0); + break; default: r = -EINVAL; break; @@ -1118,6 +1127,14 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, return r; } +static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) +{ + if (new_lpcr & LPCR_ILE) + vcpu->arch.intr_msr |= MSR_LE; + else + vcpu->arch.intr_msr &= ~MSR_LE; +} + static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) { @@ -1128,6 +1145,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, to_book3s(vcpu)->hior = set_reg_val(id, *val); to_book3s(vcpu)->hior_explicit = true; break; + case KVM_REG_PPC_LPCR: + kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); + break; default: r = -EINVAL; break; @@ -1180,6 +1200,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, vcpu->arch.pvr = 0x3C0301; if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) vcpu->arch.pvr = mfspr(SPRN_PVR); + vcpu->arch.intr_msr = MSR_SF; #else /* default to book3s_32 (750) */ vcpu->arch.pvr = 0x84202; -- cgit v1.2.1 From 7562c4fded33ca931e17f30a259f49ac20c89b7d Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Sun, 4 May 2014 22:56:08 +0530 Subject: KVM: PPC: BOOK3S: PR: Fix WARN_ON with debug options on With debug option "sleep inside atomic section checking" enabled we get the below WARN_ON during a PR KVM boot. This is because upstream now have PREEMPT_COUNT enabled even if we have preempt disabled. Fix the warning by adding preempt_disable/enable around floating point and altivec enable. WARNING: at arch/powerpc/kernel/process.c:156 Modules linked in: kvm_pr kvm CPU: 1 PID: 3990 Comm: qemu-system-ppc Tainted: G W 3.15.0-rc1+ #4 task: c0000000eb85b3a0 ti: c0000000ec59c000 task.ti: c0000000ec59c000 NIP: c000000000015c84 LR: d000000003334644 CTR: c000000000015c00 REGS: c0000000ec59f140 TRAP: 0700 Tainted: G W (3.15.0-rc1+) MSR: 8000000000029032 CR: 42000024 XER: 20000000 CFAR: c000000000015c24 SOFTE: 1 GPR00: d000000003334644 c0000000ec59f3c0 c000000000e2fa40 c0000000e2f80000 GPR04: 0000000000000800 0000000000002000 0000000000000001 8000000000000000 GPR08: 0000000000000001 0000000000000001 0000000000002000 c000000000015c00 GPR12: d00000000333da18 c00000000fb80900 0000000000000000 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000000 00003fffce4e0fa1 GPR20: 0000000000000010 0000000000000001 0000000000000002 00000000100b9a38 GPR24: 0000000000000002 0000000000000000 0000000000000000 0000000000000013 GPR28: 0000000000000000 c0000000eb85b3a0 0000000000002000 c0000000e2f80000 NIP [c000000000015c84] .enable_kernel_fp+0x84/0x90 LR [d000000003334644] .kvmppc_handle_ext+0x134/0x190 [kvm_pr] Call Trace: [c0000000ec59f3c0] [0000000000000010] 0x10 (unreliable) [c0000000ec59f430] [d000000003334644] .kvmppc_handle_ext+0x134/0x190 [kvm_pr] [c0000000ec59f4c0] [d00000000324b380] .kvmppc_set_msr+0x30/0x50 [kvm] [c0000000ec59f530] [d000000003337cac] .kvmppc_core_emulate_op_pr+0x16c/0x5e0 [kvm_pr] [c0000000ec59f5f0] [d00000000324a944] .kvmppc_emulate_instruction+0x284/0xa80 [kvm] [c0000000ec59f6c0] [d000000003336888] .kvmppc_handle_exit_pr+0x488/0xb70 [kvm_pr] [c0000000ec59f790] [d000000003338d34] kvm_start_lightweight+0xcc/0xdc [kvm_pr] [c0000000ec59f960] [d000000003336288] .kvmppc_vcpu_run_pr+0xc8/0x190 [kvm_pr] [c0000000ec59f9f0] [d00000000324c880] .kvmppc_vcpu_run+0x30/0x50 [kvm] [c0000000ec59fa60] [d000000003249e74] .kvm_arch_vcpu_ioctl_run+0x54/0x1b0 [kvm] [c0000000ec59faf0] [d000000003244948] .kvm_vcpu_ioctl+0x478/0x760 [kvm] [c0000000ec59fcb0] [c000000000224e34] .do_vfs_ioctl+0x4d4/0x790 [c0000000ec59fd90] [c000000000225148] .SyS_ioctl+0x58/0xb0 [c0000000ec59fe30] [c00000000000a1e4] syscall_exit+0x0/0x98 Signed-off-by: Aneesh Kumar K.V Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_pr.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 8c05cb5877a9..01a7156d055c 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -683,16 +683,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, #endif if (msr & MSR_FP) { + preempt_disable(); enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); t->fp_save_area = &vcpu->arch.fp; + preempt_enable(); } if (msr & MSR_VEC) { #ifdef CONFIG_ALTIVEC + preempt_disable(); enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); t->vr_save_area = &vcpu->arch.vr; + preempt_enable(); #endif } @@ -716,13 +720,17 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) return; if (lost_ext & MSR_FP) { + preempt_disable(); enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); + preempt_enable(); } #ifdef CONFIG_ALTIVEC if (lost_ext & MSR_VEC) { + preempt_disable(); enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); + preempt_enable(); } #endif current->thread.regs->msr |= lost_ext; -- cgit v1.2.1 From 740f834eb2505e1883b54cfc1644af80834ab6b6 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 12:48:19 +0200 Subject: KVM: PPC: Book3S: PR: Fix C/R bit setting Commit 9308ab8e2d made C/R HTAB updates go byte-wise into the target HTAB. However, it didn't update the guest's copy of the HTAB, but instead the host local copy of it. Write to the guest's HTAB instead. Signed-off-by: Alexander Graf CC: Paul Mackerras Acked-by: Paul Mackerras --- arch/powerpc/kvm/book3s_32_mmu.c | 2 +- arch/powerpc/kvm/book3s_64_mmu.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 76a64ce6a5b6..60fc3f4de25f 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -270,7 +270,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, page */ if (found) { u32 pte_r = pteg[i+1]; - char __user *addr = (char __user *) &pteg[i+1]; + char __user *addr = (char __user *) (ptegp + (i+1) * sizeof(u32)); /* * Use single-byte writes to update the HPTE, to diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 8231b83c493b..171e5caf2b88 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -342,14 +342,14 @@ do_second: * non-PAPR platforms such as mac99, and this is * what real hardware does. */ - char __user *addr = (char __user *) &pteg[i+1]; + char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); r |= HPTE_R_R; put_user(r >> 8, addr + 6); } if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { /* Set the dirty flag */ /* Use a single byte write */ - char __user *addr = (char __user *) &pteg[i+1]; + char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); r |= HPTE_R_C; put_user(r, addr + 7); } -- cgit v1.2.1 From 860540bc50a174c2dff5e11821375475a5da5f4e Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 12:51:44 +0200 Subject: KVM: PPC: Book3S_32: PR: Access HTAB in big endian The HTAB is always big endian. We access the guest's HTAB using copy_from/to_user, but don't yet take care of the fact that we might be running on an LE host. Wrap all accesses to the guest HTAB with big endian accessors. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_32_mmu.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 60fc3f4de25f..0e42b167da0b 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -208,6 +208,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, u32 sre; hva_t ptegp; u32 pteg[16]; + u32 pte0, pte1; u32 ptem = 0; int i; int found = 0; @@ -233,11 +234,13 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, } for (i=0; i<16; i+=2) { - if (ptem == pteg[i]) { + pte0 = be32_to_cpu(pteg[i]); + pte1 = be32_to_cpu(pteg[i + 1]); + if (ptem == pte0) { u8 pp; - pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); - pp = pteg[i+1] & 3; + pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF); + pp = pte1 & 3; if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) || (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR))) @@ -260,7 +263,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, } dprintk_pte("MMU: Found PTE -> %x %x - %x\n", - pteg[i], pteg[i+1], pp); + pte0, pte1, pp); found = 1; break; } @@ -269,7 +272,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, /* Update PTE C and A bits, so the guest's swapper knows we used the page */ if (found) { - u32 pte_r = pteg[i+1]; + u32 pte_r = pte1; char __user *addr = (char __user *) (ptegp + (i+1) * sizeof(u32)); /* @@ -296,7 +299,8 @@ no_page_found: to_book3s(vcpu)->sdr1, ptegp); for (i=0; i<16; i+=2) { dprintk_pte(" %02d: 0x%x - 0x%x (0x%x)\n", - i, pteg[i], pteg[i+1], ptem); + i, be32_to_cpu(pteg[i]), + be32_to_cpu(pteg[i+1]), ptem); } } -- cgit v1.2.1 From 4e509af9f83debe296661d2e09a8a214850efe3c Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 12:54:54 +0200 Subject: KVM: PPC: Book3S_64 PR: Access HTAB in big endian The HTAB is always big endian. We access the guest's HTAB using copy_from/to_user, but don't yet take care of the fact that we might be running on an LE host. Wrap all accesses to the guest HTAB with big endian accessors. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 171e5caf2b88..b93c2456253d 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -275,12 +275,15 @@ do_second: key = 4; for (i=0; i<16; i+=2) { + u64 pte0 = be64_to_cpu(pteg[i]); + u64 pte1 = be64_to_cpu(pteg[i + 1]); + /* Check all relevant fields of 1st dword */ - if ((pteg[i] & v_mask) == v_val) { + if ((pte0 & v_mask) == v_val) { /* If large page bit is set, check pgsize encoding */ if (slbe->large && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { - pgsize = decode_pagesize(slbe, pteg[i+1]); + pgsize = decode_pagesize(slbe, pte1); if (pgsize < 0) continue; } @@ -297,8 +300,8 @@ do_second: goto do_second; } - v = pteg[i]; - r = pteg[i+1]; + v = be64_to_cpu(pteg[i]); + r = be64_to_cpu(pteg[i+1]); pp = (r & HPTE_R_PP) | key; if (r & HPTE_R_PP0) pp |= 8; -- cgit v1.2.1 From 14a7d41dad9d3943e05995c59bfe7e0117d8e752 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 12:57:11 +0200 Subject: KVM: PPC: Book3S_64 PR: Access shadow slb in big endian The "shadow SLB" in the PACA is shared with the hypervisor, so it has to be big endian. We access the shadow SLB during world switch, so let's make sure we access it in big endian even when we're on a little endian host. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_slb.S | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 4f12e8f0c718..596140e5c889 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S @@ -17,29 +17,28 @@ * Authors: Alexander Graf */ -#ifdef __LITTLE_ENDIAN__ -#error Need to fix SLB shadow accesses in little endian mode -#endif - #define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10)) #define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8) #define UNBOLT_SLB_ENTRY(num) \ - ld r9, SHADOW_SLB_ESID(num)(r12); \ - /* Invalid? Skip. */; \ - rldicl. r0, r9, 37, 63; \ - beq slb_entry_skip_ ## num; \ - xoris r9, r9, SLB_ESID_V@h; \ - std r9, SHADOW_SLB_ESID(num)(r12); \ + li r11, SHADOW_SLB_ESID(num); \ + LDX_BE r9, r12, r11; \ + /* Invalid? Skip. */; \ + rldicl. r0, r9, 37, 63; \ + beq slb_entry_skip_ ## num; \ + xoris r9, r9, SLB_ESID_V@h; \ + STDX_BE r9, r12, r11; \ slb_entry_skip_ ## num: #define REBOLT_SLB_ENTRY(num) \ - ld r10, SHADOW_SLB_ESID(num)(r11); \ - cmpdi r10, 0; \ - beq slb_exit_skip_ ## num; \ - oris r10, r10, SLB_ESID_V@h; \ - ld r9, SHADOW_SLB_VSID(num)(r11); \ - slbmte r9, r10; \ - std r10, SHADOW_SLB_ESID(num)(r11); \ + li r8, SHADOW_SLB_ESID(num); \ + li r7, SHADOW_SLB_VSID(num); \ + LDX_BE r10, r11, r8; \ + cmpdi r10, 0; \ + beq slb_exit_skip_ ## num; \ + oris r10, r10, SLB_ESID_V@h; \ + LDX_BE r9, r11, r7; \ + slbmte r9, r10; \ + STDX_BE r10, r11, r8; \ slb_exit_skip_ ## num: /****************************************************************************** -- cgit v1.2.1 From 94810ba4edc8fc49c68650306928245f6c0c99fa Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 13:04:01 +0200 Subject: KVM: PPC: Book3S PR: Default to big endian guest The default MSR when user space does not define anything should be identical on little and big endian hosts, so remove MSR_LE from it. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_pr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 01a7156d055c..d7b0ad2bffe4 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1216,7 +1216,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); vcpu->arch.slb_nr = 64; - vcpu->arch.shadow_msr = MSR_USER64; + vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; err = kvmppc_mmu_init(vcpu); if (err < 0) -- cgit v1.2.1 From 1692aa3faa5dff40eaa8ba8fb58aad9a91cac640 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 13:09:15 +0200 Subject: KVM: PPC: Book3S PR: PAPR: Access HTAB in big endian The HTAB on PPC is always in big endian. When we access it via hypercalls on behalf of the guest and we're running on a little endian host, we need to make sure we swap the bits accordingly. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_pr_papr.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index 5efa97b993d8..255e5b1da1e0 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c @@ -57,7 +57,7 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) for (i = 0; ; ++i) { if (i == 8) goto done; - if ((*hpte & HPTE_V_VALID) == 0) + if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0) break; hpte += 2; } @@ -67,8 +67,8 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) goto done; } - hpte[0] = kvmppc_get_gpr(vcpu, 6); - hpte[1] = kvmppc_get_gpr(vcpu, 7); + hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); + hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); pteg_addr += i * HPTE_SIZE; copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); kvmppc_set_gpr(vcpu, 4, pte_index | i); @@ -93,6 +93,8 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) pteg = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); + pte[0] = be64_to_cpu(pte[0]); + pte[1] = be64_to_cpu(pte[1]); ret = H_NOT_FOUND; if ((pte[0] & HPTE_V_VALID) == 0 || @@ -169,6 +171,8 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); + pte[0] = be64_to_cpu(pte[0]); + pte[1] = be64_to_cpu(pte[1]); /* tsl = AVPN */ flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; @@ -207,6 +211,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) pteg = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); + pte[0] = be64_to_cpu(pte[0]); + pte[1] = be64_to_cpu(pte[1]); ret = H_NOT_FOUND; if ((pte[0] & HPTE_V_VALID) == 0 || @@ -225,6 +231,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) rb = compute_tlbie_rb(v, r, pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); + pte[0] = cpu_to_be64(pte[0]); + pte[1] = cpu_to_be64(pte[1]); copy_to_user((void __user *)pteg, pte, sizeof(pte)); ret = H_SUCCESS; -- cgit v1.2.1 From b59d9d26becb8b32596f2eed6ce5a81d61f4f513 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 13:10:33 +0200 Subject: KVM: PPC: Book3S PR: PAPR: Access RTAS in big endian When the guest does an RTAS hypercall it keeps all RTAS variables inside a big endian data structure. To make sure we don't have to bother about endianness inside the actual RTAS handlers, let's just convert the whole structure to host endian before we call our RTAS handlers and back to big endian when we return to the guest. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_rtas.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index 7a053157483b..edb14ba992b3 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c @@ -205,6 +205,32 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) return rc; } +static void kvmppc_rtas_swap_endian_in(struct rtas_args *args) +{ +#ifdef __LITTLE_ENDIAN__ + int i; + + args->token = be32_to_cpu(args->token); + args->nargs = be32_to_cpu(args->nargs); + args->nret = be32_to_cpu(args->nret); + for (i = 0; i < args->nargs; i++) + args->args[i] = be32_to_cpu(args->args[i]); +#endif +} + +static void kvmppc_rtas_swap_endian_out(struct rtas_args *args) +{ +#ifdef __LITTLE_ENDIAN__ + int i; + + for (i = 0; i < args->nret; i++) + args->args[i] = cpu_to_be32(args->args[i]); + args->token = cpu_to_be32(args->token); + args->nargs = cpu_to_be32(args->nargs); + args->nret = cpu_to_be32(args->nret); +#endif +} + int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) { struct rtas_token_definition *d; @@ -223,6 +249,8 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) if (rc) goto fail; + kvmppc_rtas_swap_endian_in(&args); + /* * args->rets is a pointer into args->args. Now that we've * copied args we need to fix it up to point into our copy, @@ -247,6 +275,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) if (rc == 0) { args.rets = orig_rets; + kvmppc_rtas_swap_endian_out(&args); rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); if (rc) goto fail; -- cgit v1.2.1 From 2743103f91e20d4c4d4f1a8d00821289c4c6ff62 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 13:39:16 +0200 Subject: KVM: PPC: PR: Fill pvinfo hcall instructions in big endian We expose a blob of hypercall instructions to user space that it gives to the guest via device tree again. That blob should contain a stream of instructions necessary to do a hypercall in big endian, as it just gets passed into the guest and old guests use them straight away. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/powerpc.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 3cf541a53e2a..a9bd0ff0e173 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1015,10 +1015,10 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) u32 inst_nop = 0x60000000; #ifdef CONFIG_KVM_BOOKE_HV u32 inst_sc1 = 0x44000022; - pvinfo->hcall[0] = inst_sc1; - pvinfo->hcall[1] = inst_nop; - pvinfo->hcall[2] = inst_nop; - pvinfo->hcall[3] = inst_nop; + pvinfo->hcall[0] = cpu_to_be32(inst_sc1); + pvinfo->hcall[1] = cpu_to_be32(inst_nop); + pvinfo->hcall[2] = cpu_to_be32(inst_nop); + pvinfo->hcall[3] = cpu_to_be32(inst_nop); #else u32 inst_lis = 0x3c000000; u32 inst_ori = 0x60000000; @@ -1034,10 +1034,10 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) * sc * nop */ - pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); - pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); - pvinfo->hcall[2] = inst_sc; - pvinfo->hcall[3] = inst_nop; + pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); + pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); + pvinfo->hcall[2] = cpu_to_be32(inst_sc); + pvinfo->hcall[3] = cpu_to_be32(inst_nop); #endif pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; -- cgit v1.2.1 From 5deb8e7ad8ac7e3fcdfa042acff617f461b361c2 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 13:46:24 +0200 Subject: KVM: PPC: Make shared struct aka magic page guest endian The shared (magic) page is a data structure that contains often used supervisor privileged SPRs accessible via memory to the user to reduce the number of exits we have to take to read/write them. When we actually share this structure with the guest we have to maintain it in guest endianness, because some of the patch tricks only work with native endian load/store operations. Since we only share the structure with either host or guest in little endian on book3s_64 pr mode, we don't have to worry about booke or book3s hv. For booke, the shared struct stays big endian. For book3s_64 hv we maintain the struct in host native endian, since it never gets shared with the guest. For book3s_64 pr we introduce a variable that tells us which endianness the shared struct is in and route every access to it through helper inline functions that evaluate this variable. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s.c | 72 ++++++++++++------------ arch/powerpc/kvm/book3s_32_mmu.c | 21 +++---- arch/powerpc/kvm/book3s_32_mmu_host.c | 4 +- arch/powerpc/kvm/book3s_64_mmu.c | 19 ++++--- arch/powerpc/kvm/book3s_64_mmu_host.c | 4 +- arch/powerpc/kvm/book3s_emulate.c | 28 ++++----- arch/powerpc/kvm/book3s_exports.c | 1 + arch/powerpc/kvm/book3s_hv.c | 11 ++++ arch/powerpc/kvm/book3s_interrupts.S | 23 +++++++- arch/powerpc/kvm/book3s_paired_singles.c | 16 +++--- arch/powerpc/kvm/book3s_pr.c | 97 +++++++++++++++++++------------- arch/powerpc/kvm/book3s_pr_papr.c | 2 +- arch/powerpc/kvm/emulate.c | 24 ++++---- arch/powerpc/kvm/powerpc.c | 33 ++++++++++- arch/powerpc/kvm/trace_pr.h | 2 +- 15 files changed, 221 insertions(+), 136 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 94e597e6f15c..81abc5cef0fe 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -85,9 +85,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, if (is_kvmppc_hv_enabled(vcpu->kvm)) return; if (pending_now) - vcpu->arch.shared->int_pending = 1; + kvmppc_set_int_pending(vcpu, 1); else if (old_pending) - vcpu->arch.shared->int_pending = 0; + kvmppc_set_int_pending(vcpu, 0); } static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) @@ -99,11 +99,11 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) if (is_kvmppc_hv_enabled(vcpu->kvm)) return false; - crit_raw = vcpu->arch.shared->critical; + crit_raw = kvmppc_get_critical(vcpu); crit_r1 = kvmppc_get_gpr(vcpu, 1); /* Truncate crit indicators in 32 bit mode */ - if (!(vcpu->arch.shared->msr & MSR_SF)) { + if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { crit_raw &= 0xffffffff; crit_r1 &= 0xffffffff; } @@ -111,15 +111,15 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) /* Critical section when crit == r1 */ crit = (crit_raw == crit_r1); /* ... and we're in supervisor mode */ - crit = crit && !(vcpu->arch.shared->msr & MSR_PR); + crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); return crit; } void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) { - vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); - vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; + kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu)); + kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags); kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); vcpu->arch.mmu.reset_msr(vcpu); } @@ -225,12 +225,12 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) switch (priority) { case BOOK3S_IRQPRIO_DECREMENTER: - deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; + deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; vec = BOOK3S_INTERRUPT_DECREMENTER; break; case BOOK3S_IRQPRIO_EXTERNAL: case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: - deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; + deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; vec = BOOK3S_INTERRUPT_EXTERNAL; break; case BOOK3S_IRQPRIO_SYSTEM_RESET: @@ -343,7 +343,7 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, { ulong mp_pa = vcpu->arch.magic_page_pa; - if (!(vcpu->arch.shared->msr & MSR_SF)) + if (!(kvmppc_get_msr(vcpu) & MSR_SF)) mp_pa = (uint32_t)mp_pa; /* Magic page override */ @@ -367,7 +367,7 @@ EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn); static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, bool iswrite, struct kvmppc_pte *pte) { - int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); + int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR)); int r; if (relocated) { @@ -498,18 +498,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) regs->ctr = kvmppc_get_ctr(vcpu); regs->lr = kvmppc_get_lr(vcpu); regs->xer = kvmppc_get_xer(vcpu); - regs->msr = vcpu->arch.shared->msr; - regs->srr0 = vcpu->arch.shared->srr0; - regs->srr1 = vcpu->arch.shared->srr1; + regs->msr = kvmppc_get_msr(vcpu); + regs->srr0 = kvmppc_get_srr0(vcpu); + regs->srr1 = kvmppc_get_srr1(vcpu); regs->pid = vcpu->arch.pid; - regs->sprg0 = vcpu->arch.shared->sprg0; - regs->sprg1 = vcpu->arch.shared->sprg1; - regs->sprg2 = vcpu->arch.shared->sprg2; - regs->sprg3 = vcpu->arch.shared->sprg3; - regs->sprg4 = vcpu->arch.shared->sprg4; - regs->sprg5 = vcpu->arch.shared->sprg5; - regs->sprg6 = vcpu->arch.shared->sprg6; - regs->sprg7 = vcpu->arch.shared->sprg7; + regs->sprg0 = kvmppc_get_sprg0(vcpu); + regs->sprg1 = kvmppc_get_sprg1(vcpu); + regs->sprg2 = kvmppc_get_sprg2(vcpu); + regs->sprg3 = kvmppc_get_sprg3(vcpu); + regs->sprg4 = kvmppc_get_sprg4(vcpu); + regs->sprg5 = kvmppc_get_sprg5(vcpu); + regs->sprg6 = kvmppc_get_sprg6(vcpu); + regs->sprg7 = kvmppc_get_sprg7(vcpu); for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); @@ -527,16 +527,16 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) kvmppc_set_lr(vcpu, regs->lr); kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_msr(vcpu, regs->msr); - vcpu->arch.shared->srr0 = regs->srr0; - vcpu->arch.shared->srr1 = regs->srr1; - vcpu->arch.shared->sprg0 = regs->sprg0; - vcpu->arch.shared->sprg1 = regs->sprg1; - vcpu->arch.shared->sprg2 = regs->sprg2; - vcpu->arch.shared->sprg3 = regs->sprg3; - vcpu->arch.shared->sprg4 = regs->sprg4; - vcpu->arch.shared->sprg5 = regs->sprg5; - vcpu->arch.shared->sprg6 = regs->sprg6; - vcpu->arch.shared->sprg7 = regs->sprg7; + kvmppc_set_srr0(vcpu, regs->srr0); + kvmppc_set_srr1(vcpu, regs->srr1); + kvmppc_set_sprg0(vcpu, regs->sprg0); + kvmppc_set_sprg1(vcpu, regs->sprg1); + kvmppc_set_sprg2(vcpu, regs->sprg2); + kvmppc_set_sprg3(vcpu, regs->sprg3); + kvmppc_set_sprg4(vcpu, regs->sprg4); + kvmppc_set_sprg5(vcpu, regs->sprg5); + kvmppc_set_sprg6(vcpu, regs->sprg6); + kvmppc_set_sprg7(vcpu, regs->sprg7); for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); @@ -570,10 +570,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) r = 0; switch (reg->id) { case KVM_REG_PPC_DAR: - val = get_reg_val(reg->id, vcpu->arch.shared->dar); + val = get_reg_val(reg->id, kvmppc_get_dar(vcpu)); break; case KVM_REG_PPC_DSISR: - val = get_reg_val(reg->id, vcpu->arch.shared->dsisr); + val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu)); break; case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: i = reg->id - KVM_REG_PPC_FPR0; @@ -660,10 +660,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) r = 0; switch (reg->id) { case KVM_REG_PPC_DAR: - vcpu->arch.shared->dar = set_reg_val(reg->id, val); + kvmppc_set_dar(vcpu, set_reg_val(reg->id, val)); break; case KVM_REG_PPC_DSISR: - vcpu->arch.shared->dsisr = set_reg_val(reg->id, val); + kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val)); break; case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: i = reg->id - KVM_REG_PPC_FPR0; diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 0e42b167da0b..628d90ed417d 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -91,7 +91,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) { - return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf]; + return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); } static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, @@ -160,7 +160,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, else bat = &vcpu_book3s->ibat[i]; - if (vcpu->arch.shared->msr & MSR_PR) { + if (kvmppc_get_msr(vcpu) & MSR_PR) { if (!bat->vp) continue; } else { @@ -242,8 +242,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF); pp = pte1 & 3; - if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) || - (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR))) + if ((sr_kp(sre) && (kvmppc_get_msr(vcpu) & MSR_PR)) || + (sr_ks(sre) && !(kvmppc_get_msr(vcpu) & MSR_PR))) pp |= 4; pte->may_write = false; @@ -320,7 +320,7 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, /* Magic page override */ if (unlikely(mp_ea) && unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && - !(vcpu->arch.shared->msr & MSR_PR)) { + !(kvmppc_get_msr(vcpu) & MSR_PR)) { pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); pte->raddr &= KVM_PAM; @@ -345,13 +345,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) { - return vcpu->arch.shared->sr[srnum]; + return kvmppc_get_sr(vcpu, srnum); } static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, ulong value) { - vcpu->arch.shared->sr[srnum] = value; + kvmppc_set_sr(vcpu, srnum, value); kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); } @@ -371,8 +371,9 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, ulong ea = esid << SID_SHIFT; u32 sr; u64 gvsid = esid; + u64 msr = kvmppc_get_msr(vcpu); - if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { + if (msr & (MSR_DR|MSR_IR)) { sr = find_sr(vcpu, ea); if (sr_valid(sr)) gvsid = sr_vsid(sr); @@ -381,7 +382,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, /* In case we only have one of MSR_IR or MSR_DR set, let's put that in the real-mode context (and hope RM doesn't access high memory) */ - switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { + switch (msr & (MSR_DR|MSR_IR)) { case 0: *vsid = VSID_REAL | esid; break; @@ -401,7 +402,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, BUG(); } - if (vcpu->arch.shared->msr & MSR_PR) + if (msr & MSR_PR) *vsid |= VSID_PR; return 0; diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 5fac89dfe4cd..678e75370495 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -92,7 +92,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) struct kvmppc_sid_map *map; u16 sid_map_mask; - if (vcpu->arch.shared->msr & MSR_PR) + if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); @@ -279,7 +279,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) u16 sid_map_mask; static int backwards_map = 0; - if (vcpu->arch.shared->msr & MSR_PR) + if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; /* We might get collisions that trap in preceding order, so let's diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index b93c2456253d..278729f4df80 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -226,7 +226,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, /* Magic page override */ if (unlikely(mp_ea) && unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && - !(vcpu->arch.shared->msr & MSR_PR)) { + !(kvmppc_get_msr(vcpu) & MSR_PR)) { gpte->eaddr = eaddr; gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); @@ -269,9 +269,9 @@ do_second: goto no_page_found; } - if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp) + if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp) key = 4; - else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks) + else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks) key = 4; for (i=0; i<16; i+=2) { @@ -482,7 +482,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) vcpu->arch.slb[i].origv = 0; } - if (vcpu->arch.shared->msr & MSR_IR) { + if (kvmppc_get_msr(vcpu) & MSR_IR) { kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); } @@ -566,7 +566,7 @@ static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid) { ulong mp_ea = vcpu->arch.magic_page_ea; - return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) && + return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) && (mp_ea >> SID_SHIFT) == esid; } #endif @@ -579,8 +579,9 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 gvsid = esid; ulong mp_ea = vcpu->arch.magic_page_ea; int pagesize = MMU_PAGE_64K; + u64 msr = kvmppc_get_msr(vcpu); - if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { + if (msr & (MSR_DR|MSR_IR)) { slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); if (slb) { gvsid = slb->vsid; @@ -593,7 +594,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, } } - switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { + switch (msr & (MSR_DR|MSR_IR)) { case 0: gvsid = VSID_REAL | esid; break; @@ -626,7 +627,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, gvsid |= VSID_64K; #endif - if (vcpu->arch.shared->msr & MSR_PR) + if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; *vsid = gvsid; @@ -636,7 +637,7 @@ no_slb: /* Catch magic page case */ if (unlikely(mp_ea) && unlikely(esid == (mp_ea >> SID_SHIFT)) && - !(vcpu->arch.shared->msr & MSR_PR)) { + !(kvmppc_get_msr(vcpu) & MSR_PR)) { *vsid = VSID_REAL | esid; return 0; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 0d513af62bba..e2efb85c65a3 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -58,7 +58,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) struct kvmppc_sid_map *map; u16 sid_map_mask; - if (vcpu->arch.shared->msr & MSR_PR) + if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); @@ -230,7 +230,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) u16 sid_map_mask; static int backwards_map = 0; - if (vcpu->arch.shared->msr & MSR_PR) + if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; /* We might get collisions that trap in preceding order, so let's diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 99d40f8977e8..45d0a805c3ca 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -80,7 +80,7 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) return false; /* Limit user space to its own small SPR set */ - if ((vcpu->arch.shared->msr & MSR_PR) && level > PRIV_PROBLEM) + if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) return false; return true; @@ -100,8 +100,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, switch (get_xop(inst)) { case OP_19_XOP_RFID: case OP_19_XOP_RFI: - kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0); - kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); + kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu)); + kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu)); *advance = 0; break; @@ -113,16 +113,16 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, case 31: switch (get_xop(inst)) { case OP_31_XOP_MFMSR: - kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); + kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu)); break; case OP_31_XOP_MTMSRD: { ulong rs_val = kvmppc_get_gpr(vcpu, rs); if (inst & 0x10000) { - ulong new_msr = vcpu->arch.shared->msr; + ulong new_msr = kvmppc_get_msr(vcpu); new_msr &= ~(MSR_RI | MSR_EE); new_msr |= rs_val & (MSR_RI | MSR_EE); - vcpu->arch.shared->msr = new_msr; + kvmppc_set_msr_fast(vcpu, new_msr); } else kvmppc_set_msr(vcpu, rs_val); break; @@ -179,7 +179,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ulong cmd = kvmppc_get_gpr(vcpu, 3); int i; - if ((vcpu->arch.shared->msr & MSR_PR) || + if ((kvmppc_get_msr(vcpu) & MSR_PR) || !vcpu->arch.papr_enabled) { emulated = EMULATE_FAIL; break; @@ -261,14 +261,14 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, ra_val = kvmppc_get_gpr(vcpu, ra); addr = (ra_val + rb_val) & ~31ULL; - if (!(vcpu->arch.shared->msr & MSR_SF)) + if (!(kvmppc_get_msr(vcpu) & MSR_SF)) addr &= 0xffffffff; vaddr = addr; r = kvmppc_st(vcpu, &addr, 32, zeros, true); if ((r == -ENOENT) || (r == -EPERM)) { *advance = 0; - vcpu->arch.shared->dar = vaddr; + kvmppc_set_dar(vcpu, vaddr); vcpu->arch.fault_dar = vaddr; dsisr = DSISR_ISSTORE; @@ -277,7 +277,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, else if (r == -EPERM) dsisr |= DSISR_PROTFAULT; - vcpu->arch.shared->dsisr = dsisr; + kvmppc_set_dsisr(vcpu, dsisr); vcpu->arch.fault_dsisr = dsisr; kvmppc_book3s_queue_irqprio(vcpu, @@ -356,10 +356,10 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) to_book3s(vcpu)->sdr1 = spr_val; break; case SPRN_DSISR: - vcpu->arch.shared->dsisr = spr_val; + kvmppc_set_dsisr(vcpu, spr_val); break; case SPRN_DAR: - vcpu->arch.shared->dar = spr_val; + kvmppc_set_dar(vcpu, spr_val); break; case SPRN_HIOR: to_book3s(vcpu)->hior = spr_val; @@ -493,10 +493,10 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val *spr_val = to_book3s(vcpu)->sdr1; break; case SPRN_DSISR: - *spr_val = vcpu->arch.shared->dsisr; + *spr_val = kvmppc_get_dsisr(vcpu); break; case SPRN_DAR: - *spr_val = vcpu->arch.shared->dar; + *spr_val = kvmppc_get_dar(vcpu); break; case SPRN_HIOR: *spr_val = to_book3s(vcpu)->hior; diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c index 20d4ea8e656d..0d013fbc2e13 100644 --- a/arch/powerpc/kvm/book3s_exports.c +++ b/arch/powerpc/kvm/book3s_exports.c @@ -18,6 +18,7 @@ */ #include +#include #include #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8227dba5af0f..030821a414a8 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1280,6 +1280,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, goto free_vcpu; vcpu->arch.shared = &vcpu->arch.shregs; +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE + /* + * The shared struct is never shared on HV, + * so we can always use host endianness + */ +#ifdef __BIG_ENDIAN__ + vcpu->arch.shared_big_endian = true; +#else + vcpu->arch.shared_big_endian = false; +#endif +#endif vcpu->arch.mmcr[0] = MMCR0_FC; vcpu->arch.ctrl = CTRL_RUNLATCH; /* default to host PVR, since we can't spoof it */ diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 3533c999194a..e2c29e381dc7 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -104,8 +104,27 @@ kvm_start_lightweight: stb r3, HSTATE_RESTORE_HID5(r13) /* Load up guest SPRG3 value, since it's user readable */ - ld r3, VCPU_SHARED(r4) - ld r3, VCPU_SHARED_SPRG3(r3) + lwz r3, VCPU_SHAREDBE(r4) + cmpwi r3, 0 + ld r5, VCPU_SHARED(r4) + beq sprg3_little_endian +sprg3_big_endian: +#ifdef __BIG_ENDIAN__ + ld r3, VCPU_SHARED_SPRG3(r5) +#else + addi r5, r5, VCPU_SHARED_SPRG3 + ldbrx r3, 0, r5 +#endif + b after_sprg3_load +sprg3_little_endian: +#ifdef __LITTLE_ENDIAN__ + ld r3, VCPU_SHARED_SPRG3(r5) +#else + addi r5, r5, VCPU_SHARED_SPRG3 + ldbrx r3, 0, r5 +#endif + +after_sprg3_load: mtspr SPRN_SPRG3, r3 #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index c1abd95063f4..6c8011fd57e6 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -165,16 +165,18 @@ static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) { - u64 dsisr; - struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; + u32 dsisr; + u64 msr = kvmppc_get_msr(vcpu); - shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0); - shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0); - shared->dar = eaddr; + msr = kvmppc_set_field(msr, 33, 36, 0); + msr = kvmppc_set_field(msr, 42, 47, 0); + kvmppc_set_msr(vcpu, msr); + kvmppc_set_dar(vcpu, eaddr); /* Page Fault */ dsisr = kvmppc_set_field(0, 33, 33, 1); if (is_store) - shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); + dsisr = kvmppc_set_field(dsisr, 38, 38, 1); + kvmppc_set_dsisr(vcpu, dsisr); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); } @@ -660,7 +662,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) if (!kvmppc_inst_is_paired_single(vcpu, inst)) return EMULATE_FAIL; - if (!(vcpu->arch.shared->msr & MSR_FP)) { + if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); return EMULATE_AGAIN; } diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d7b0ad2bffe4..d424ca053765 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -246,14 +246,15 @@ static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) { - ulong smsr = vcpu->arch.shared->msr; + ulong guest_msr = kvmppc_get_msr(vcpu); + ulong smsr = guest_msr; /* Guest MSR values */ smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; /* Process MSR values */ smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; /* External providers the guest reserved */ - smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); + smsr |= (guest_msr & vcpu->arch.guest_owned_ext); /* 64-bit Process MSR values */ #ifdef CONFIG_PPC_BOOK3S_64 smsr |= MSR_ISF | MSR_HV; @@ -263,14 +264,14 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) { - ulong old_msr = vcpu->arch.shared->msr; + ulong old_msr = kvmppc_get_msr(vcpu); #ifdef EXIT_DEBUG printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); #endif msr &= to_book3s(vcpu)->msr_mask; - vcpu->arch.shared->msr = msr; + kvmppc_set_msr_fast(vcpu, msr); kvmppc_recalc_shadow_msr(vcpu); if (msr & MSR_POW) { @@ -281,11 +282,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) /* Unset POW bit after we woke up */ msr &= ~MSR_POW; - vcpu->arch.shared->msr = msr; + kvmppc_set_msr_fast(vcpu, msr); } } - if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != + if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); @@ -317,7 +318,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) } /* Preload FPU if it's enabled */ - if (vcpu->arch.shared->msr & MSR_FP) + if (kvmppc_get_msr(vcpu) & MSR_FP) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); } @@ -438,7 +439,7 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { ulong mp_pa = vcpu->arch.magic_page_pa; - if (!(vcpu->arch.shared->msr & MSR_SF)) + if (!(kvmppc_get_msr(vcpu) & MSR_SF)) mp_pa = (uint32_t)mp_pa; if (unlikely(mp_pa) && @@ -459,8 +460,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, int page_found = 0; struct kvmppc_pte pte; bool is_mmio = false; - bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; - bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; + bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; + bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; u64 vsid; relocated = data ? dr : ir; @@ -480,7 +481,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, pte.page_size = MMU_PAGE_64K; } - switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { + switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { case 0: pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); break; @@ -488,7 +489,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, case MSR_IR: vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); - if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) + if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); else pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); @@ -511,22 +512,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, if (page_found == -ENOENT) { /* Page not found in guest PTE entries */ - vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); - vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr; - vcpu->arch.shared->msr |= - vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; + u64 ssrr1 = vcpu->arch.shadow_srr1; + u64 msr = kvmppc_get_msr(vcpu); + kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); + kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr); + kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EPERM) { /* Storage protection */ - vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); - vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; - vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; - vcpu->arch.shared->msr |= - vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; + u32 dsisr = vcpu->arch.fault_dsisr; + u64 ssrr1 = vcpu->arch.shadow_srr1; + u64 msr = kvmppc_get_msr(vcpu); + kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); + dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT; + kvmppc_set_dsisr(vcpu, dsisr); + kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EINVAL) { /* Page not found in guest SLB */ - vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); + kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); } else if (!is_mmio && kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { @@ -614,11 +618,12 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu) ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); if (ret == -ENOENT) { - ulong msr = vcpu->arch.shared->msr; + ulong msr = kvmppc_get_msr(vcpu); msr = kvmppc_set_field(msr, 33, 33, 1); msr = kvmppc_set_field(msr, 34, 36, 0); - vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); + msr = kvmppc_set_field(msr, 42, 47, 0); + kvmppc_set_msr_fast(vcpu, msr); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); return EMULATE_AGAIN; } @@ -651,7 +656,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) return RESUME_GUEST; - if (!(vcpu->arch.shared->msr & msr)) { + if (!(kvmppc_get_msr(vcpu) & msr)) { kvmppc_book3s_queue_irqprio(vcpu, exit_nr); return RESUME_GUEST; } @@ -792,7 +797,9 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); r = RESUME_GUEST; } else { - vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; + u64 msr = kvmppc_get_msr(vcpu); + msr |= shadow_srr1 & 0x58000000; + kvmppc_set_msr_fast(vcpu, msr); kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; } @@ -832,8 +839,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); srcu_read_unlock(&vcpu->kvm->srcu, idx); } else { - vcpu->arch.shared->dar = dar; - vcpu->arch.shared->dsisr = fault_dsisr; + kvmppc_set_dar(vcpu, dar); + kvmppc_set_dsisr(vcpu, fault_dsisr); kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; } @@ -841,7 +848,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, } case BOOK3S_INTERRUPT_DATA_SEGMENT: if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { - vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); + kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_SEGMENT); } @@ -879,7 +886,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, program_interrupt: flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; - if (vcpu->arch.shared->msr & MSR_PR) { + if (kvmppc_get_msr(vcpu) & MSR_PR) { #ifdef EXIT_DEBUG printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); #endif @@ -921,7 +928,7 @@ program_interrupt: case BOOK3S_INTERRUPT_SYSCALL: if (vcpu->arch.papr_enabled && (kvmppc_get_last_sc(vcpu) == 0x44000022) && - !(vcpu->arch.shared->msr & MSR_PR)) { + !(kvmppc_get_msr(vcpu) & MSR_PR)) { /* SC 1 papr hypercalls */ ulong cmd = kvmppc_get_gpr(vcpu, 3); int i; @@ -953,7 +960,7 @@ program_interrupt: gprs[i] = kvmppc_get_gpr(vcpu, i); vcpu->arch.osi_needed = 1; r = RESUME_HOST_NV; - } else if (!(vcpu->arch.shared->msr & MSR_PR) && + } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { /* KVM PV hypercalls */ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); @@ -994,10 +1001,16 @@ program_interrupt: } case BOOK3S_INTERRUPT_ALIGNMENT: if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { - vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, - kvmppc_get_last_inst(vcpu)); - vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, - kvmppc_get_last_inst(vcpu)); + u32 last_inst = kvmppc_get_last_inst(vcpu); + u32 dsisr; + u64 dar; + + dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); + dar = kvmppc_alignment_dar(vcpu, last_inst); + + kvmppc_set_dsisr(vcpu, dsisr); + kvmppc_set_dar(vcpu, dar); + kvmppc_book3s_queue_irqprio(vcpu, exit_nr); } r = RESUME_GUEST; @@ -1062,7 +1075,7 @@ static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, } } else { for (i = 0; i < 16; i++) - sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; + sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); for (i = 0; i < 8; i++) { sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; @@ -1198,8 +1211,14 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, goto uninit_vcpu; /* the real shared page fills the last 4k of our page */ vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); - #ifdef CONFIG_PPC_BOOK3S_64 + /* Always start the shared struct in native endian mode */ +#ifdef __BIG_ENDIAN__ + vcpu->arch.shared_big_endian = true; +#else + vcpu->arch.shared_big_endian = false; +#endif + /* * Default to the same as the host if we're on sufficiently * recent machine that we have 1TB segments; @@ -1293,7 +1312,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) #endif /* Preload FPU if it's enabled */ - if (vcpu->arch.shared->msr & MSR_FP) + if (kvmppc_get_msr(vcpu) & MSR_FP) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); kvmppc_fix_ee_before_entry(); diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index 255e5b1da1e0..52a63bfe3f07 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c @@ -278,7 +278,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) case H_PUT_TCE: return kvmppc_h_pr_put_tce(vcpu); case H_CEDE: - vcpu->arch.shared->msr |= MSR_EE; + kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE); kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); vcpu->stat.halt_wakeup++; diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index c2b887be2c29..da86d9ba3476 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -97,10 +97,10 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) switch (sprn) { case SPRN_SRR0: - vcpu->arch.shared->srr0 = spr_val; + kvmppc_set_srr0(vcpu, spr_val); break; case SPRN_SRR1: - vcpu->arch.shared->srr1 = spr_val; + kvmppc_set_srr1(vcpu, spr_val); break; /* XXX We need to context-switch the timebase for @@ -114,16 +114,16 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) break; case SPRN_SPRG0: - vcpu->arch.shared->sprg0 = spr_val; + kvmppc_set_sprg0(vcpu, spr_val); break; case SPRN_SPRG1: - vcpu->arch.shared->sprg1 = spr_val; + kvmppc_set_sprg1(vcpu, spr_val); break; case SPRN_SPRG2: - vcpu->arch.shared->sprg2 = spr_val; + kvmppc_set_sprg2(vcpu, spr_val); break; case SPRN_SPRG3: - vcpu->arch.shared->sprg3 = spr_val; + kvmppc_set_sprg3(vcpu, spr_val); break; /* PIR can legally be written, but we ignore it */ @@ -150,10 +150,10 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) switch (sprn) { case SPRN_SRR0: - spr_val = vcpu->arch.shared->srr0; + spr_val = kvmppc_get_srr0(vcpu); break; case SPRN_SRR1: - spr_val = vcpu->arch.shared->srr1; + spr_val = kvmppc_get_srr1(vcpu); break; case SPRN_PVR: spr_val = vcpu->arch.pvr; @@ -173,16 +173,16 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) break; case SPRN_SPRG0: - spr_val = vcpu->arch.shared->sprg0; + spr_val = kvmppc_get_sprg0(vcpu); break; case SPRN_SPRG1: - spr_val = vcpu->arch.shared->sprg1; + spr_val = kvmppc_get_sprg1(vcpu); break; case SPRN_SPRG2: - spr_val = vcpu->arch.shared->sprg2; + spr_val = kvmppc_get_sprg2(vcpu); break; case SPRN_SPRG3: - spr_val = vcpu->arch.shared->sprg3; + spr_val = kvmppc_get_sprg3(vcpu); break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index a9bd0ff0e173..b4e15bf3ff88 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -125,6 +125,27 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) +static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; + int i; + + shared->sprg0 = swab64(shared->sprg0); + shared->sprg1 = swab64(shared->sprg1); + shared->sprg2 = swab64(shared->sprg2); + shared->sprg3 = swab64(shared->sprg3); + shared->srr0 = swab64(shared->srr0); + shared->srr1 = swab64(shared->srr1); + shared->dar = swab64(shared->dar); + shared->msr = swab64(shared->msr); + shared->dsisr = swab32(shared->dsisr); + shared->int_pending = swab32(shared->int_pending); + for (i = 0; i < ARRAY_SIZE(shared->sr); i++) + shared->sr[i] = swab32(shared->sr[i]); +} +#endif + int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) { int nr = kvmppc_get_gpr(vcpu, 11); @@ -135,7 +156,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); unsigned long r2 = 0; - if (!(vcpu->arch.shared->msr & MSR_SF)) { + if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { /* 32 bit mode */ param1 &= 0xffffffff; param2 &= 0xffffffff; @@ -146,6 +167,16 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) switch (nr) { case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): { +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) + /* Book3S can be little endian, find it out here */ + int shared_big_endian = true; + if (vcpu->arch.intr_msr & MSR_LE) + shared_big_endian = false; + if (shared_big_endian != vcpu->arch.shared_big_endian) + kvmppc_swab_shared(vcpu); + vcpu->arch.shared_big_endian = shared_big_endian; +#endif + vcpu->arch.magic_page_pa = param1; vcpu->arch.magic_page_ea = param2; diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h index 8b22e4748344..e1357cd8dc1f 100644 --- a/arch/powerpc/kvm/trace_pr.h +++ b/arch/powerpc/kvm/trace_pr.h @@ -255,7 +255,7 @@ TRACE_EVENT(kvm_exit, __entry->exit_nr = exit_nr; __entry->pc = kvmppc_get_pc(vcpu); __entry->dar = kvmppc_get_fault_dar(vcpu); - __entry->msr = vcpu->arch.shared->msr; + __entry->msr = kvmppc_get_msr(vcpu); __entry->srr1 = vcpu->arch.shadow_srr1; __entry->last_inst = vcpu->arch.last_inst; ), -- cgit v1.2.1 From cd087eefe637d545345ea5f888d4ea4fe52e312c Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 13:52:01 +0200 Subject: KVM: PPC: Book3S PR: Do dcbz32 patching with big endian instructions When the host CPU we're running on doesn't support dcbz32 itself, but the guest wants to have dcbz only clear 32 bytes of data, we loop through every executable mapped page to search for dcbz instructions and patch them with a special privileged instruction that we emulate as dcbz32. The only guests that want to see dcbz act as 32byte are book3s_32 guests, so we don't have to worry about little endian instruction ordering. So let's just always search for big endian dcbz instructions, also when we're on a little endian host. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_32_mmu.c | 2 +- arch/powerpc/kvm/book3s_pr.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 628d90ed417d..93503bbdae43 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -131,7 +131,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu, pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", - kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, + kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg, sr_vsid(sre)); r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d424ca053765..6e55934dda0a 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -428,8 +428,8 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) /* patch dcbz into reserved instruction, so we trap */ for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) - if ((page[i] & 0xff0007ff) == INS_DCBZ) - page[i] &= 0xfffffff7; + if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ) + page[i] &= cpu_to_be32(0xfffffff7); kunmap_atomic(page); put_page(hpage); -- cgit v1.2.1 From f24bc1ed45c13805c19f58cc4289b1be7efee19b Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 24 Apr 2014 13:55:36 +0200 Subject: KVM: PPC: Book3S: Move little endian conflict to HV KVM With the previous patches applied, we can now successfully use PR KVM on little endian hosts which means we can now allow users to select it. However, HV KVM still needs some work, so let's keep the kconfig conflict on that one. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 141b2027189a..d6a53b95de94 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -6,7 +6,6 @@ source "virt/kvm/Kconfig" menuconfig VIRTUALIZATION bool "Virtualization" - depends on !CPU_LITTLE_ENDIAN ---help--- Say Y here to get to see options for using your Linux host to run other operating systems inside virtual machines (guests). @@ -76,6 +75,7 @@ config KVM_BOOK3S_64 config KVM_BOOK3S_64_HV tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host" depends on KVM_BOOK3S_64 + depends on !CPU_LITTLE_ENDIAN select KVM_BOOK3S_HV_POSSIBLE select MMU_NOTIFIER select CMA -- cgit v1.2.1 From f8f6eb0d189cf2724af5ebc8cad460c78fb1994e Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 22 Apr 2014 12:41:06 +0200 Subject: KVM: PPC: Book3S PR: Ignore PMU SPRs When we expose a POWER8 CPU into the guest, it will start accessing PMU SPRs that we don't emulate. Just ignore accesses to them. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_emulate.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 45d0a805c3ca..52448ef499cb 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -455,6 +455,13 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_WPAR_GEKKO: case SPRN_MSSSR0: case SPRN_DABR: +#ifdef CONFIG_PPC_BOOK3S_64 + case SPRN_MMCRS: + case SPRN_MMCRA: + case SPRN_MMCR0: + case SPRN_MMCR1: + case SPRN_MMCR2: +#endif break; unprivileged: default: @@ -553,6 +560,13 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val case SPRN_WPAR_GEKKO: case SPRN_MSSSR0: case SPRN_DABR: +#ifdef CONFIG_PPC_BOOK3S_64 + case SPRN_MMCRS: + case SPRN_MMCRA: + case SPRN_MMCR0: + case SPRN_MMCR1: + case SPRN_MMCR2: +#endif *spr_val = 0; break; default: -- cgit v1.2.1 From a5948fa092a04dfd6b9ee31c99eb6896c158eb08 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 25 Apr 2014 16:07:21 +0200 Subject: KVM: PPC: Book3S PR: Emulate TIR register In parallel to the Processor ID Register (PIR) threaded POWER8 also adds a Thread ID Register (TIR). Since PR KVM doesn't emulate more than one thread per core, we can just always expose 0 here. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_emulate.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 52448ef499cb..0a1de29a4caf 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -566,6 +566,7 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val case SPRN_MMCR0: case SPRN_MMCR1: case SPRN_MMCR2: + case SPRN_TIR: #endif *spr_val = 0; break; -- cgit v1.2.1 From 616dff86028298dbc91174fb3d12b8ed8cd74955 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 29 Apr 2014 16:48:44 +0200 Subject: KVM: PPC: Book3S PR: Handle Facility interrupt and FSCR POWER8 introduced a new interrupt type called "Facility unavailable interrupt" which contains its status message in a new register called FSCR. Handle these exits and try to emulate instructions for unhandled facilities. Follow-on patches enable KVM to expose specific facilities into the guest. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s.c | 10 ++++++ arch/powerpc/kvm/book3s_emulate.c | 6 ++++ arch/powerpc/kvm/book3s_hv.c | 6 ---- arch/powerpc/kvm/book3s_pr.c | 68 +++++++++++++++++++++++++++++++++++++++ arch/powerpc/kvm/book3s_segment.S | 25 ++++++++++++++ 5 files changed, 109 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 81abc5cef0fe..79cfa2d10238 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -145,6 +145,7 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec) case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; + case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break; default: prio = BOOK3S_IRQPRIO_MAX; break; } @@ -275,6 +276,9 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: vec = BOOK3S_INTERRUPT_PERFMON; break; + case BOOK3S_IRQPRIO_FAC_UNAVAIL: + vec = BOOK3S_INTERRUPT_FAC_UNAVAIL; + break; default: deliver = 0; printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); @@ -627,6 +631,9 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu)); break; #endif /* CONFIG_KVM_XICS */ + case KVM_REG_PPC_FSCR: + val = get_reg_val(reg->id, vcpu->arch.fscr); + break; default: r = -EINVAL; break; @@ -716,6 +723,9 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) set_reg_val(reg->id, val)); break; #endif /* CONFIG_KVM_XICS */ + case KVM_REG_PPC_FSCR: + vcpu->arch.fscr = set_reg_val(reg->id, val); + break; default: r = -EINVAL; break; diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 0a1de29a4caf..e8133e5e89c6 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -438,6 +438,9 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_GQR7: to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; break; + case SPRN_FSCR: + vcpu->arch.fscr = spr_val; + break; case SPRN_ICTC: case SPRN_THRM1: case SPRN_THRM2: @@ -545,6 +548,9 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val case SPRN_GQR7: *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; break; + case SPRN_FSCR: + *spr_val = vcpu->arch.fscr; + break; case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 030821a414a8..0092e12b3e46 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -879,9 +879,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_IAMR: *val = get_reg_val(id, vcpu->arch.iamr); break; - case KVM_REG_PPC_FSCR: - *val = get_reg_val(id, vcpu->arch.fscr); - break; case KVM_REG_PPC_PSPB: *val = get_reg_val(id, vcpu->arch.pspb); break; @@ -1091,9 +1088,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_IAMR: vcpu->arch.iamr = set_reg_val(id, *val); break; - case KVM_REG_PPC_FSCR: - vcpu->arch.fscr = set_reg_val(id, *val); - break; case KVM_REG_PPC_PSPB: vcpu->arch.pspb = set_reg_val(id, *val); break; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 6e55934dda0a..ddc626eea2da 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -53,6 +53,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ulong msr); +static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); /* Some compatibility defines */ #ifdef CONFIG_PPC_BOOK3S_32 @@ -115,6 +116,9 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, svcpu->ctr = vcpu->arch.ctr; svcpu->lr = vcpu->arch.lr; svcpu->pc = vcpu->arch.pc; +#ifdef CONFIG_PPC_BOOK3S_64 + svcpu->shadow_fscr = vcpu->arch.shadow_fscr; +#endif svcpu->in_use = true; } @@ -158,6 +162,9 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, vcpu->arch.fault_dar = svcpu->fault_dar; vcpu->arch.fault_dsisr = svcpu->fault_dsisr; vcpu->arch.last_inst = svcpu->last_inst; +#ifdef CONFIG_PPC_BOOK3S_64 + vcpu->arch.shadow_fscr = svcpu->shadow_fscr; +#endif svcpu->in_use = false; out: @@ -610,6 +617,17 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) kvmppc_recalc_shadow_msr(vcpu); } +/* Give up facility (TAR / EBB / DSCR) */ +static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) +{ +#ifdef CONFIG_PPC_BOOK3S_64 + if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { + /* Facility not available to the guest, ignore giveup request*/ + return; + } +#endif +} + static int kvmppc_read_inst(struct kvm_vcpu *vcpu) { ulong srr0 = kvmppc_get_pc(vcpu); @@ -741,6 +759,50 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) current->thread.regs->msr |= lost_ext; } +#ifdef CONFIG_PPC_BOOK3S_64 + +static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) +{ + /* Inject the Interrupt Cause field and trigger a guest interrupt */ + vcpu->arch.fscr &= ~(0xffULL << 56); + vcpu->arch.fscr |= (fac << 56); + kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); +} + +static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) +{ + enum emulation_result er = EMULATE_FAIL; + + if (!(kvmppc_get_msr(vcpu) & MSR_PR)) + er = kvmppc_emulate_instruction(vcpu->run, vcpu); + + if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { + /* Couldn't emulate, trigger interrupt in guest */ + kvmppc_trigger_fac_interrupt(vcpu, fac); + } +} + +/* Enable facilities (TAR, EBB, DSCR) for the guest */ +static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) +{ + BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S)); + + if (!(vcpu->arch.fscr & (1ULL << fac))) { + /* Facility not enabled by the guest */ + kvmppc_trigger_fac_interrupt(vcpu, fac); + return RESUME_GUEST; + } + + switch (fac) { + default: + kvmppc_emulate_fac(vcpu, fac); + break; + } + + return RESUME_GUEST; +} +#endif + int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int exit_nr) { @@ -1015,6 +1077,12 @@ program_interrupt: } r = RESUME_GUEST; break; +#ifdef CONFIG_PPC_BOOK3S_64 + case BOOK3S_INTERRUPT_FAC_UNAVAIL: + kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); + r = RESUME_GUEST; + break; +#endif case BOOK3S_INTERRUPT_MACHINE_CHECK: case BOOK3S_INTERRUPT_TRACE: kvmppc_book3s_queue_irqprio(vcpu, exit_nr); diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 1e0cc2adfd40..acee37cde840 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -90,6 +90,15 @@ kvmppc_handler_trampoline_enter: LOAD_GUEST_SEGMENTS #ifdef CONFIG_PPC_BOOK3S_64 +BEGIN_FTR_SECTION + /* Save host FSCR */ + mfspr r8, SPRN_FSCR + std r8, HSTATE_HOST_FSCR(r13) + /* Set FSCR during guest execution */ + ld r9, SVCPU_SHADOW_FSCR(r13) + mtspr SPRN_FSCR, r9 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + /* Some guests may need to have dcbz set to 32 byte length. * * Usually we ensure that by patching the guest's instructions @@ -255,6 +264,10 @@ BEGIN_FTR_SECTION cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST beq- ld_last_inst END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +BEGIN_FTR_SECTION + cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL + beq- ld_last_inst +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) #endif b no_ld_last_inst @@ -311,6 +324,18 @@ no_ld_last_inst: no_dcbz32_off: +BEGIN_FTR_SECTION + /* Save guest FSCR on a FAC_UNAVAIL interrupt */ + cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL + bne+ no_fscr_save + mfspr r7, SPRN_FSCR + std r7, SVCPU_SHADOW_FSCR(r13) +no_fscr_save: + /* Restore host FSCR */ + ld r8, HSTATE_HOST_FSCR(r13) + mtspr SPRN_FSCR, r8 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + #endif /* CONFIG_PPC_BOOK3S_64 */ /* -- cgit v1.2.1 From e14e7a1e537d6e18f9c511f25c25c5efb7799fb5 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 22 Apr 2014 12:26:58 +0200 Subject: KVM: PPC: Book3S PR: Expose TAR facility to guest POWER8 implements a new register called TAR. This register has to be enabled in FSCR and then from KVM's point of view is mere storage. This patch enables the guest to use TAR. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s.c | 6 ++++++ arch/powerpc/kvm/book3s_hv.c | 6 ------ arch/powerpc/kvm/book3s_pr.c | 18 ++++++++++++++++++ 3 files changed, 24 insertions(+), 6 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 79cfa2d10238..4046a1a91a75 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -634,6 +634,9 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) case KVM_REG_PPC_FSCR: val = get_reg_val(reg->id, vcpu->arch.fscr); break; + case KVM_REG_PPC_TAR: + val = get_reg_val(reg->id, vcpu->arch.tar); + break; default: r = -EINVAL; break; @@ -726,6 +729,9 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) case KVM_REG_PPC_FSCR: vcpu->arch.fscr = set_reg_val(reg->id, val); break; + case KVM_REG_PPC_TAR: + vcpu->arch.tar = set_reg_val(reg->id, val); + break; default: r = -EINVAL; break; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 0092e12b3e46..ee1d8ee5f1a7 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -891,9 +891,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_BESCR: *val = get_reg_val(id, vcpu->arch.bescr); break; - case KVM_REG_PPC_TAR: - *val = get_reg_val(id, vcpu->arch.tar); - break; case KVM_REG_PPC_DPDES: *val = get_reg_val(id, vcpu->arch.vcore->dpdes); break; @@ -1100,9 +1097,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_BESCR: vcpu->arch.bescr = set_reg_val(id, *val); break; - case KVM_REG_PPC_TAR: - vcpu->arch.tar = set_reg_val(id, *val); - break; case KVM_REG_PPC_DPDES: vcpu->arch.vcore->dpdes = set_reg_val(id, *val); break; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index ddc626eea2da..7d27a9518f07 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -90,6 +90,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) #endif kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); + kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); vcpu->cpu = -1; } @@ -625,6 +626,14 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) /* Facility not available to the guest, ignore giveup request*/ return; } + + switch (fac) { + case FSCR_TAR_LG: + vcpu->arch.tar = mfspr(SPRN_TAR); + mtspr(SPRN_TAR, current->thread.tar); + vcpu->arch.shadow_fscr &= ~FSCR_TAR; + break; + } #endif } @@ -794,6 +803,12 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) } switch (fac) { + case FSCR_TAR_LG: + /* TAR switching isn't lazy in Linux yet */ + current->thread.tar = mfspr(SPRN_TAR); + mtspr(SPRN_TAR, vcpu->arch.tar); + vcpu->arch.shadow_fscr |= FSCR_TAR; + break; default: kvmppc_emulate_fac(vcpu, fac); break; @@ -1393,6 +1408,9 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* Make sure we save the guest FPU/Altivec/VSX state */ kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); + /* Make sure we save the guest TAR/EBB/DSCR state */ + kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); + out: vcpu->mode = OUTSIDE_GUEST_MODE; return ret; -- cgit v1.2.1 From 2e23f544135e7b5fc2f0bcb6fa935c4b4f5058b2 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 29 Apr 2014 13:36:21 +0200 Subject: KVM: PPC: Book3S PR: Expose EBB registers POWER8 introduces a new facility called the "Event Based Branch" facility. It contains of a few registers that indicate where a guest should branch to when a defined event occurs and it's in PR mode. We don't want to really enable EBB as it will create a big mess with !PR guest mode while hardware is in PR and we don't really emulate the PMU anyway. So instead, let's just leave it at emulation of all its registers. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s.c | 18 ++++++++++++++++++ arch/powerpc/kvm/book3s_emulate.c | 22 ++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv.c | 18 ------------------ 3 files changed, 40 insertions(+), 18 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 4046a1a91a75..52c654dbd41a 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -637,6 +637,15 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) case KVM_REG_PPC_TAR: val = get_reg_val(reg->id, vcpu->arch.tar); break; + case KVM_REG_PPC_EBBHR: + val = get_reg_val(reg->id, vcpu->arch.ebbhr); + break; + case KVM_REG_PPC_EBBRR: + val = get_reg_val(reg->id, vcpu->arch.ebbrr); + break; + case KVM_REG_PPC_BESCR: + val = get_reg_val(reg->id, vcpu->arch.bescr); + break; default: r = -EINVAL; break; @@ -732,6 +741,15 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) case KVM_REG_PPC_TAR: vcpu->arch.tar = set_reg_val(reg->id, val); break; + case KVM_REG_PPC_EBBHR: + vcpu->arch.ebbhr = set_reg_val(reg->id, val); + break; + case KVM_REG_PPC_EBBRR: + vcpu->arch.ebbrr = set_reg_val(reg->id, val); + break; + case KVM_REG_PPC_BESCR: + vcpu->arch.bescr = set_reg_val(reg->id, val); + break; default: r = -EINVAL; break; diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index e8133e5e89c6..e1165bae693a 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -441,6 +441,17 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_FSCR: vcpu->arch.fscr = spr_val; break; +#ifdef CONFIG_PPC_BOOK3S_64 + case SPRN_BESCR: + vcpu->arch.bescr = spr_val; + break; + case SPRN_EBBHR: + vcpu->arch.ebbhr = spr_val; + break; + case SPRN_EBBRR: + vcpu->arch.ebbrr = spr_val; + break; +#endif case SPRN_ICTC: case SPRN_THRM1: case SPRN_THRM2: @@ -551,6 +562,17 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val case SPRN_FSCR: *spr_val = vcpu->arch.fscr; break; +#ifdef CONFIG_PPC_BOOK3S_64 + case SPRN_BESCR: + *spr_val = vcpu->arch.bescr; + break; + case SPRN_EBBHR: + *spr_val = vcpu->arch.ebbhr; + break; + case SPRN_EBBRR: + *spr_val = vcpu->arch.ebbrr; + break; +#endif case SPRN_THRM1: case SPRN_THRM2: case SPRN_THRM3: diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ee1d8ee5f1a7..3a9456165224 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -882,15 +882,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_PSPB: *val = get_reg_val(id, vcpu->arch.pspb); break; - case KVM_REG_PPC_EBBHR: - *val = get_reg_val(id, vcpu->arch.ebbhr); - break; - case KVM_REG_PPC_EBBRR: - *val = get_reg_val(id, vcpu->arch.ebbrr); - break; - case KVM_REG_PPC_BESCR: - *val = get_reg_val(id, vcpu->arch.bescr); - break; case KVM_REG_PPC_DPDES: *val = get_reg_val(id, vcpu->arch.vcore->dpdes); break; @@ -1088,15 +1079,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_PSPB: vcpu->arch.pspb = set_reg_val(id, *val); break; - case KVM_REG_PPC_EBBHR: - vcpu->arch.ebbhr = set_reg_val(id, *val); - break; - case KVM_REG_PPC_EBBRR: - vcpu->arch.ebbrr = set_reg_val(id, *val); - break; - case KVM_REG_PPC_BESCR: - vcpu->arch.bescr = set_reg_val(id, *val); - break; case KVM_REG_PPC_DPDES: vcpu->arch.vcore->dpdes = set_reg_val(id, *val); break; -- cgit v1.2.1 From 9916d57e64a49d85bcffe272478f869b8fe1583a Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 29 Apr 2014 17:54:40 +0200 Subject: KVM: PPC: Book3S PR: Expose TM registers POWER8 introduces transactional memory which brings along a number of new registers and MSR bits. Implementing all of those is a pretty big headache, so for now let's at least emulate enough to make Linux's context switching code happy. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_emulate.c | 22 ++++++++++++++++++++++ arch/powerpc/kvm/book3s_pr.c | 20 +++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index e1165bae693a..9bdff159ad2a 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -451,6 +451,17 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) case SPRN_EBBRR: vcpu->arch.ebbrr = spr_val; break; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case SPRN_TFHAR: + vcpu->arch.tfhar = spr_val; + break; + case SPRN_TEXASR: + vcpu->arch.texasr = spr_val; + break; + case SPRN_TFIAR: + vcpu->arch.tfiar = spr_val; + break; +#endif #endif case SPRN_ICTC: case SPRN_THRM1: @@ -572,6 +583,17 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val case SPRN_EBBRR: *spr_val = vcpu->arch.ebbrr; break; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + case SPRN_TFHAR: + *spr_val = vcpu->arch.tfhar; + break; + case SPRN_TEXASR: + *spr_val = vcpu->arch.texasr; + break; + case SPRN_TFIAR: + *spr_val = vcpu->arch.tfiar; + break; +#endif #endif case SPRN_THRM1: case SPRN_THRM2: diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 7d27a9518f07..23367a7e44c3 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -794,9 +794,27 @@ static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) /* Enable facilities (TAR, EBB, DSCR) for the guest */ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) { + bool guest_fac_enabled; BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S)); - if (!(vcpu->arch.fscr & (1ULL << fac))) { + /* + * Not every facility is enabled by FSCR bits, check whether the + * guest has this facility enabled at all. + */ + switch (fac) { + case FSCR_TAR_LG: + case FSCR_EBB_LG: + guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); + break; + case FSCR_TM_LG: + guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; + break; + default: + guest_fac_enabled = false; + break; + } + + if (!guest_fac_enabled) { /* Facility not enabled by the guest */ kvmppc_trigger_fac_interrupt(vcpu, fac); return RESUME_GUEST; -- cgit v1.2.1 From 792fc49787cb7df13f1c38d3e25c863e1c3a6bb2 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 6 May 2014 21:24:18 +0530 Subject: KVM: PPC: BOOK3S: HV: Prefer CMA region for hash page table allocation Today when KVM tries to reserve memory for the hash page table it allocates from the normal page allocator first. If that fails it falls back to CMA's reserved region. One of the side effects of this is that we could end up exhausting the page allocator and get linux into OOM conditions while we still have plenty of space available in CMA. This patch addresses this issue by first trying hash page table allocation from CMA's reserved region before falling back to the normal page allocator. So if we run out of memory, we really are out of memory. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index fb25ebc0af0c..f32896ffd784 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -52,7 +52,7 @@ static void kvmppc_rmap_reset(struct kvm *kvm); long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) { - unsigned long hpt; + unsigned long hpt = 0; struct revmap_entry *rev; struct page *page = NULL; long order = KVM_DEFAULT_HPT_ORDER; @@ -64,22 +64,11 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) } kvm->arch.hpt_cma_alloc = 0; - /* - * try first to allocate it from the kernel page allocator. - * We keep the CMA reserved for failed allocation. - */ - hpt = __get_free_pages(GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT | - __GFP_NOWARN, order - PAGE_SHIFT); - - /* Next try to allocate from the preallocated pool */ - if (!hpt) { - VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER); - page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); - if (page) { - hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); - kvm->arch.hpt_cma_alloc = 1; - } else - --order; + VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER); + page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); + if (page) { + hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); + kvm->arch.hpt_cma_alloc = 1; } /* Lastly try successively smaller sizes from the page allocator */ -- cgit v1.2.1 From 1f365bb0de12da4a9ef8e56ffba2218d9a026011 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 6 May 2014 23:31:36 +0530 Subject: KVM: PPC: BOOK3S: HV: Add mixed page-size support for guest On recent IBM Power CPUs, while the hashed page table is looked up using the page size from the segmentation hardware (i.e. the SLB), it is possible to have the HPT entry indicate a larger page size. Thus for example it is possible to put a 16MB page in a 64kB segment, but since the hash lookup is done using a 64kB page size, it may be necessary to put multiple entries in the HPT for a single 16MB page. This capability is called mixed page-size segment (MPSS). With MPSS, there are two relevant page sizes: the base page size, which is the size used in searching the HPT, and the actual page size, which is the size indicated in the HPT entry. [ Note that the actual page size is always >= base page size ]. We use "ibm,segment-page-sizes" device tree node to advertise the MPSS support to PAPR guest. The penc encoding indicates whether we support a specific combination of base page size and actual page size in the same segment. We also use the penc value in the LP encoding of HPTE entry. This patch exposes MPSS support to KVM guest by advertising the feature via "ibm,segment-page-sizes". It also adds the necessary changes to decode the base page size and the actual page size correctly from the HPTE entry. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3a9456165224..aba05bbb3e74 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1930,6 +1930,13 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, * support pte_enc here */ (*sps)->enc[0].pte_enc = def->penc[linux_psize]; + /* + * Add 16MB MPSS support if host supports it + */ + if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) { + (*sps)->enc[1].page_shift = 24; + (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M]; + } (*sps)++; } -- cgit v1.2.1 From f3383cf80e417e86fcc84a2eb4c96bc52842d8d9 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 12 May 2014 01:08:32 +0200 Subject: KVM: PPC: Disable NX for old magic page using guests Old guests try to use the magic page, but map their trampoline code inside of an NX region. Since we can't fix those old kernels, try to detect whether the guest is sane or not. If not, just disable NX functionality in KVM so that old guests at least work at all. For newer guests, add a bit that we can set to keep NX functionality available. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu.c | 3 +++ arch/powerpc/kvm/powerpc.c | 14 ++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 278729f4df80..774a253ca4e1 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -313,6 +313,9 @@ do_second: gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); gpte->page_size = pgsize; gpte->may_execute = ((r & HPTE_R_N) ? false : true); + if (unlikely(vcpu->arch.disable_kernel_nx) && + !(kvmppc_get_msr(vcpu) & MSR_PR)) + gpte->may_execute = true; gpte->may_read = false; gpte->may_write = false; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index b4e15bf3ff88..154f352c39ae 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -177,8 +177,18 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) vcpu->arch.shared_big_endian = shared_big_endian; #endif - vcpu->arch.magic_page_pa = param1; - vcpu->arch.magic_page_ea = param2; + if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { + /* + * Older versions of the Linux magic page code had + * a bug where they would map their trampoline code + * NX. If that's the case, remove !PR NX capability. + */ + vcpu->arch.disable_kernel_nx = true; + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + } + + vcpu->arch.magic_page_pa = param1 & ~0xfffULL; + vcpu->arch.magic_page_ea = param2 & ~0xfffULL; r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; -- cgit v1.2.1 From 7310f3a5b0ecc7ac8b57e70fe395968cbe0fc40a Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 12 May 2014 17:04:05 +0530 Subject: KVM: PPC: BOOK3S: Always use the saved DAR value Although it's optional, IBM POWER cpus always had DAR value set on alignment interrupt. So don't try to compute these values. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_emulate.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 9bdff159ad2a..61f38eb470b3 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -676,6 +676,12 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) { +#ifdef CONFIG_PPC_BOOK3S_64 + /* + * Linux's fix_alignment() assumes that DAR is valid, so can we + */ + return vcpu->arch.fault_dar; +#else ulong dar = 0; ulong ra = get_ra(inst); ulong rb = get_rb(inst); @@ -700,4 +706,5 @@ ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) } return dar; +#endif } -- cgit v1.2.1 From ddca156ae6bafc0c6af61805bfe4b37440448a4c Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 12 May 2014 17:04:06 +0530 Subject: KVM: PPC: BOOK3S: Remove open coded make_dsisr in alignment handler Use make_dsisr instead of open coding it. This also have the added benefit of handling alignment interrupt on additional instructions. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_emulate.c | 39 +-------------------------------------- 1 file changed, 1 insertion(+), 38 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 61f38eb470b3..c9924475368f 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -634,44 +634,7 @@ unprivileged: u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) { - u32 dsisr = 0; - - /* - * This is what the spec says about DSISR bits (not mentioned = 0): - * - * 12:13 [DS] Set to bits 30:31 - * 15:16 [X] Set to bits 29:30 - * 17 [X] Set to bit 25 - * [D/DS] Set to bit 5 - * 18:21 [X] Set to bits 21:24 - * [D/DS] Set to bits 1:4 - * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS) - * 27:31 Set to bits 11:15 (RA) - */ - - switch (get_op(inst)) { - /* D-form */ - case OP_LFS: - case OP_LFD: - case OP_STFD: - case OP_STFS: - dsisr |= (inst >> 12) & 0x4000; /* bit 17 */ - dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */ - break; - /* X-form */ - case 31: - dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */ - dsisr |= (inst << 8) & 0x04000; /* bit 17 */ - dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */ - break; - default: - printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); - break; - } - - dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */ - - return dsisr; + return make_dsisr(inst); } ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) -- cgit v1.2.1 From 42188365f92c2d6bc89c9441a5e9305c1354e8a6 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Tue, 13 May 2014 17:05:51 +0200 Subject: KVM: PPC: Graciously fail broken LE hypercalls There are LE Linux guests out there that don't handle hypercalls correctly. Instead of interpreting the instruction stream from device tree as big endian they assume it's a little endian instruction stream and fail. When we see an illegal instruction from such a byte reversed instruction stream, bail out graciously and just declare every hcall as error. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_emulate.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index c9924475368f..3f295269af37 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c @@ -94,8 +94,25 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, int rs = get_rs(inst); int ra = get_ra(inst); int rb = get_rb(inst); + u32 inst_sc = 0x44000002; switch (get_op(inst)) { + case 0: + emulated = EMULATE_FAIL; + if ((kvmppc_get_msr(vcpu) & MSR_LE) && + (inst == swab32(inst_sc))) { + /* + * This is the byte reversed syscall instruction of our + * hypercall handler. Early versions of LE Linux didn't + * swap the instructions correctly and ended up in + * illegal instructions. + * Just always fail hypercalls on these broken systems. + */ + kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED); + kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); + emulated = EMULATE_DONE; + } + break; case 19: switch (get_xop(inst)) { case OP_19_XOP_RFID: -- cgit v1.2.1 From aae6559651dd9d779da79f30de02033557fe263b Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 22 May 2014 17:25:14 +0200 Subject: KVM: PPC: MPIC: Reset IRQ source private members When we reset the in-kernel MPIC controller, we forget to reset some hidden state such as destmask and output. This state is usually set when the guest writes to the IDR register for a specific IRQ line. To make sure we stay in sync and don't forget hidden state, treat reset of the IDR register as a simple write of the IDR register. That automatically updates all the hidden state as well. Reported-by: Paul Janzen Signed-off-by: Alexander Graf --- arch/powerpc/kvm/mpic.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c index efbd9962a209..b68d0dc9479a 100644 --- a/arch/powerpc/kvm/mpic.c +++ b/arch/powerpc/kvm/mpic.c @@ -126,6 +126,8 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr, u32 val, int idx); static int openpic_cpu_read_internal(void *opaque, gpa_t addr, u32 *ptr, int idx); +static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ, + uint32_t val); enum irq_type { IRQ_TYPE_NORMAL = 0, @@ -528,7 +530,6 @@ static void openpic_reset(struct openpic *opp) /* Initialise IRQ sources */ for (i = 0; i < opp->max_irq; i++) { opp->src[i].ivpr = opp->ivpr_reset; - opp->src[i].idr = opp->idr_reset; switch (opp->src[i].type) { case IRQ_TYPE_NORMAL: @@ -543,6 +544,8 @@ static void openpic_reset(struct openpic *opp) case IRQ_TYPE_FSLSPECIAL: break; } + + write_IRQreg_idr(opp, i, opp->idr_reset); } /* Initialise IRQ destinations */ for (i = 0; i < MAX_CPU; i++) { -- cgit v1.2.1 From f2e91042a807cbf9b0b0d9776bf37d1ef0bd7ebe Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 22 May 2014 17:40:15 +0200 Subject: KVM: PPC: Add CAP to indicate hcall fixes We worked around some nasty KVM magic page hcall breakages: 1) NX bit not honored, so ignore NX when we detect it 2) LE guests swizzle hypercall instruction Without these fixes in place, there's no way it would make sense to expose kvm hypercalls to a guest. Chances are immensely high it would trip over and break. So add a new CAP that gives user space a hint that we have workarounds for the bugs above in place. It can use those as hint to disable PV hypercalls when the guest CPU is anything POWER7 or higher and the host does not have fixes in place. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/powerpc.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 154f352c39ae..bab20f410443 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -416,6 +416,7 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_SPAPR_TCE: case KVM_CAP_PPC_ALLOC_HTAB: case KVM_CAP_PPC_RTAS: + case KVM_CAP_PPC_FIXUP_HCALL: #ifdef CONFIG_KVM_XICS case KVM_CAP_IRQ_XICS: #endif -- cgit v1.2.1 From 55765483e1df8135102ae9ca77dccbca9a7a6184 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 26 May 2014 19:48:36 +1000 Subject: KVM: PPC: Book3S HV: Fix check for running inside guest in global_invalidates() The global_invalidates() function contains a check that is intended to tell whether we are currently executing in the context of a hypercall issued by the guest. The reason is that the optimization of using a local TLB invalidate instruction is only valid in that context. The check was testing local_paca->kvm_hstate.kvm_vcore, which gets set when entering the guest but no longer gets cleared when exiting the guest. To fix this, we use the kvm_vcpu field instead, which does get cleared when exiting the guest, by the kvmppc_release_hwthread() calls inside kvmppc_run_core(). The effect of having the check wrong was that when kvmppc_do_h_remove() got called from htab_write() on the destination machine during a migration, it cleared the current cpu's bit in kvm->arch.need_tlb_flush. This meant that when the guest started running in the destination VM, it may miss out on doing a complete TLB flush, and therefore may end up using stale TLB entries from a previous guest that used the same LPID value. This should make migration more reliable. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 1d6c56ad5b60..ac840c6dfa9b 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -42,13 +42,14 @@ static int global_invalidates(struct kvm *kvm, unsigned long flags) /* * If there is only one vcore, and it's currently running, + * as indicated by local_paca->kvm_hstate.kvm_vcpu being set, * we can use tlbiel as long as we mark all other physical * cores as potentially having stale TLB entries for this lpid. * If we're not using MMU notifiers, we never take pages away * from the guest, so we can use tlbiel if requested. * Otherwise, don't use tlbiel. */ - if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcore) + if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) global = 0; else if (kvm->arch.using_mmu_notifiers) global = 1; -- cgit v1.2.1 From 1066f7724c73fca9ddb978c7f5b7411c54032047 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 26 May 2014 19:48:37 +1000 Subject: KVM: PPC: Book3S HV: Put huge-page HPTEs in rmap chain for base address Currently, when a huge page is faulted in for a guest, we select the rmap chain to insert the HPTE into based on the guest physical address that the guest tried to access. Since there is an rmap chain for each system page, there are many rmap chains for the area covered by a huge page (e.g. 256 for 16MB pages when PAGE_SIZE = 64kB), and the huge-page HPTE could end up in any one of them. For consistency, and to make the huge-page HPTEs easier to find, we now put huge-page HPTEs in the rmap chain corresponding to the base address of the huge page. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f32896ffd784..4e22ecbcf93f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -585,6 +585,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, struct kvm *kvm = vcpu->kvm; unsigned long *hptep, hpte[3], r; unsigned long mmu_seq, psize, pte_size; + unsigned long gpa_base, gfn_base; unsigned long gpa, gfn, hva, pfn; struct kvm_memory_slot *memslot; unsigned long *rmap; @@ -623,7 +624,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, /* Translate the logical address and get the page */ psize = hpte_page_size(hpte[0], r); - gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1)); + gpa_base = r & HPTE_R_RPN & ~(psize - 1); + gfn_base = gpa_base >> PAGE_SHIFT; + gpa = gpa_base | (ea & (psize - 1)); gfn = gpa >> PAGE_SHIFT; memslot = gfn_to_memslot(kvm, gfn); @@ -635,6 +638,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, if (!kvm->arch.using_mmu_notifiers) return -EFAULT; /* should never get here */ + /* + * This should never happen, because of the slot_is_aligned() + * check in kvmppc_do_h_enter(). + */ + if (gfn_base < memslot->base_gfn) + return -EFAULT; + /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_notifier_seq; smp_rmb(); @@ -727,7 +737,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, goto out_unlock; hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; - rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; + /* Always put the HPTE in the rmap chain for the page base address */ + rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; lock_rmap(rmap); /* Check if we might have been invalidated; let the guest retry if so */ -- cgit v1.2.1 From 687414bebe30d59c766b682cf86b1c5fa92d7af9 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Mon, 26 May 2014 19:48:38 +1000 Subject: KVM: PPC: Book3S HV: Fix dirty map for hugepages The dirty map that we construct for the KVM_GET_DIRTY_LOG ioctl has one bit per system page (4K/64K). Currently, we only set one bit in the map for each HPT entry with the Change bit set, even if the HPT is for a large page (e.g., 16MB). Userspace then considers only the first system page dirty, though in fact the guest may have modified anywhere in the large page. To fix this, we make kvm_test_clear_dirty() return the actual number of pages that are dirty (and rename it to kvm_test_clear_dirty_npages() to emphasize that that's what it returns). In kvmppc_hv_get_dirty_log() we then set that many bits in the dirty map. Signed-off-by: Alexey Kardashevskiy Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 4e22ecbcf93f..96c90447d4bf 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1060,22 +1060,27 @@ void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); } -static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) +/* + * Returns the number of system pages that are dirty. + * This can be more than 1 if we find a huge-page HPTE. + */ +static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) { struct revmap_entry *rev = kvm->arch.revmap; unsigned long head, i, j; + unsigned long n; unsigned long *hptep; - int ret = 0; + int npages_dirty = 0; retry: lock_rmap(rmapp); if (*rmapp & KVMPPC_RMAP_CHANGED) { *rmapp &= ~KVMPPC_RMAP_CHANGED; - ret = 1; + npages_dirty = 1; } if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { unlock_rmap(rmapp); - return ret; + return npages_dirty; } i = head = *rmapp & KVMPPC_RMAP_INDEX; @@ -1106,13 +1111,16 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp) rev[i].guest_rpte |= HPTE_R_C; note_hpte_modification(kvm, &rev[i]); } - ret = 1; + n = hpte_page_size(hptep[0], hptep[1]); + n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (n > npages_dirty) + npages_dirty = n; } hptep[0] &= ~HPTE_V_HVLOCK; } while ((i = j) != head); unlock_rmap(rmapp); - return ret; + return npages_dirty; } static void harvest_vpa_dirty(struct kvmppc_vpa *vpa, @@ -1136,15 +1144,22 @@ static void harvest_vpa_dirty(struct kvmppc_vpa *vpa, long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) { - unsigned long i; + unsigned long i, j; unsigned long *rmapp; struct kvm_vcpu *vcpu; preempt_disable(); rmapp = memslot->arch.rmap; for (i = 0; i < memslot->npages; ++i) { - if (kvm_test_clear_dirty(kvm, rmapp) && map) - __set_bit_le(i, map); + int npages = kvm_test_clear_dirty_npages(kvm, rmapp); + /* + * Note that if npages > 0 then i must be a multiple of npages, + * since we always put huge-page HPTEs in the rmap chain + * corresponding to their page base address. + */ + if (npages && map) + for (j = i; npages; ++j, --npages) + __set_bit_le(j, map); ++rmapp; } -- cgit v1.2.1 From 6c576e74fd91b93ca1eedcd9eb5200171d2ba32b Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 26 May 2014 19:48:39 +1000 Subject: KVM: PPC: Book3S HV: Make sure we don't miss dirty pages Current, when testing whether a page is dirty (when constructing the bitmap for the KVM_GET_DIRTY_LOG ioctl), we test the C (changed) bit in the HPT entries mapping the page, and if it is 0, we consider the page to be clean. However, the Power ISA doesn't require processors to set the C bit to 1 immediately when writing to a page, and in fact allows them to delay the writeback of the C bit until they receive a TLB invalidation for the page. Thus it is possible that the page could be dirty and we miss it. Now, if there are vcpus running, this is not serious since the collection of the dirty log is racy already - some vcpu could dirty the page just after we check it. But if there are no vcpus running we should return definitive results, in case we are in the final phase of migrating the guest. Also, if the permission bits in the HPTE don't allow writing, then we know that no CPU can set C. If the HPTE was previously writable and the page was modified, any C bit writeback would have been flushed out by the tlbie that we did when changing the HPTE to read-only. Otherwise we need to do a TLB invalidation even if the C bit is 0, and then check the C bit. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 47 +++++++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 10 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 96c90447d4bf..80561074078d 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1060,6 +1060,11 @@ void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); } +static int vcpus_running(struct kvm *kvm) +{ + return atomic_read(&kvm->arch.vcpus_running) != 0; +} + /* * Returns the number of system pages that are dirty. * This can be more than 1 if we find a huge-page HPTE. @@ -1069,6 +1074,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) struct revmap_entry *rev = kvm->arch.revmap; unsigned long head, i, j; unsigned long n; + unsigned long v, r; unsigned long *hptep; int npages_dirty = 0; @@ -1088,7 +1094,22 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); j = rev[i].forw; - if (!(hptep[1] & HPTE_R_C)) + /* + * Checking the C (changed) bit here is racy since there + * is no guarantee about when the hardware writes it back. + * If the HPTE is not writable then it is stable since the + * page can't be written to, and we would have done a tlbie + * (which forces the hardware to complete any writeback) + * when making the HPTE read-only. + * If vcpus are running then this call is racy anyway + * since the page could get dirtied subsequently, so we + * expect there to be a further call which would pick up + * any delayed C bit writeback. + * Otherwise we need to do the tlbie even if C==0 in + * order to pick up any delayed writeback of C. + */ + if (!(hptep[1] & HPTE_R_C) && + (!hpte_is_writable(hptep[1]) || vcpus_running(kvm))) continue; if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { @@ -1100,23 +1121,29 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) } /* Now check and modify the HPTE */ - if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) { - /* need to make it temporarily absent to clear C */ - hptep[0] |= HPTE_V_ABSENT; - kvmppc_invalidate_hpte(kvm, hptep, i); - hptep[1] &= ~HPTE_R_C; - eieio(); - hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; + if (!(hptep[0] & HPTE_V_VALID)) + continue; + + /* need to make it temporarily absent so C is stable */ + hptep[0] |= HPTE_V_ABSENT; + kvmppc_invalidate_hpte(kvm, hptep, i); + v = hptep[0]; + r = hptep[1]; + if (r & HPTE_R_C) { + hptep[1] = r & ~HPTE_R_C; if (!(rev[i].guest_rpte & HPTE_R_C)) { rev[i].guest_rpte |= HPTE_R_C; note_hpte_modification(kvm, &rev[i]); } - n = hpte_page_size(hptep[0], hptep[1]); + n = hpte_page_size(v, r); n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; if (n > npages_dirty) npages_dirty = n; + eieio(); } - hptep[0] &= ~HPTE_V_HVLOCK; + v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); + v |= HPTE_V_VALID; + hptep[0] = v; } while ((i = j) != head); unlock_rmap(rmapp); -- cgit v1.2.1 From 9bc01a9bc77edac2ea6db62c5111a7f4335d4021 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 26 May 2014 19:48:40 +1000 Subject: KVM: PPC: Book3S HV: Work around POWER8 performance monitor bugs This adds workarounds for two hardware bugs in the POWER8 performance monitor unit (PMU), both related to interrupt generation. The effect of these bugs is that PMU interrupts can get lost, leading to tools such as perf reporting fewer counts and samples than they should. The first bug relates to the PMAO (perf. mon. alert occurred) bit in MMCR0; setting it should cause an interrupt, but doesn't. The other bug relates to the PMAE (perf. mon. alert enable) bit in MMCR0. Setting PMAE when a counter is negative and counter negative conditions are enabled to cause alerts should cause an alert, but doesn't. The workaround for the first bug is to create conditions where a counter will overflow, whenever we are about to restore a MMCR0 value that has PMAO set (and PMAO_SYNC clear). The workaround for the second bug is to freeze all counters using MMCR2 before reading MMCR0. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 59 +++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ffbb871c2bd8..60fe8ba318cf 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -86,6 +86,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) lbz r4, LPPACA_PMCINUSE(r3) cmpwi r4, 0 beq 23f /* skip if not */ +BEGIN_FTR_SECTION + ld r3, HSTATE_MMCR(r13) + andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO + cmpwi r4, MMCR0_PMAO + beql kvmppc_fix_pmao +END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) lwz r3, HSTATE_PMC(r13) lwz r4, HSTATE_PMC + 4(r13) lwz r5, HSTATE_PMC + 8(r13) @@ -726,6 +732,12 @@ skip_tm: sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ isync +BEGIN_FTR_SECTION + ld r3, VCPU_MMCR(r4) + andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO + cmpwi r5, MMCR0_PMAO + beql kvmppc_fix_pmao +END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ lwz r6, VCPU_PMC + 8(r4) @@ -1324,6 +1336,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 25: /* Save PMU registers if requested */ /* r8 and cr0.eq are live here */ +BEGIN_FTR_SECTION + /* + * POWER8 seems to have a hardware bug where setting + * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE] + * when some counters are already negative doesn't seem + * to cause a performance monitor alert (and hence interrupt). + * The effect of this is that when saving the PMU state, + * if there is no PMU alert pending when we read MMCR0 + * before freezing the counters, but one becomes pending + * before we read the counters, we lose it. + * To work around this, we need a way to freeze the counters + * before reading MMCR0. Normally, freezing the counters + * is done by writing MMCR0 (to set MMCR0[FC]) which + * unavoidably writes MMCR0[PMA0] as well. On POWER8, + * we can also freeze the counters using MMCR2, by writing + * 1s to all the counter freeze condition bits (there are + * 9 bits each for 6 counters). + */ + li r3, -1 /* set all freeze bits */ + clrrdi r3, r3, 10 + mfspr r10, SPRN_MMCR2 + mtspr SPRN_MMCR2, r3 + isync +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) li r3, 1 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ mfspr r4, SPRN_MMCR0 /* save MMCR0 */ @@ -1347,6 +1383,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) std r4, VCPU_MMCR(r9) std r5, VCPU_MMCR + 8(r9) std r6, VCPU_MMCR + 16(r9) +BEGIN_FTR_SECTION + std r10, VCPU_MMCR + 24(r9) +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) std r7, VCPU_SIAR(r9) std r8, VCPU_SDAR(r9) mfspr r3, SPRN_PMC1 @@ -1370,12 +1409,10 @@ BEGIN_FTR_SECTION stw r11, VCPU_PMC + 28(r9) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) BEGIN_FTR_SECTION - mfspr r4, SPRN_MMCR2 mfspr r5, SPRN_SIER mfspr r6, SPRN_SPMC1 mfspr r7, SPRN_SPMC2 mfspr r8, SPRN_MMCRS - std r4, VCPU_MMCR + 24(r9) std r5, VCPU_SIER(r9) stw r6, VCPU_PMC + 24(r9) stw r7, VCPU_PMC + 28(r9) @@ -2311,3 +2348,21 @@ kvmppc_msr_interrupt: li r0, 1 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG blr + +/* + * This works around a hardware bug on POWER8E processors, where + * writing a 1 to the MMCR0[PMAO] bit doesn't generate a + * performance monitor interrupt. Instead, when we need to have + * an interrupt pending, we have to arrange for a counter to overflow. + */ +kvmppc_fix_pmao: + li r3, 0 + mtspr SPRN_MMCR2, r3 + lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h + ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN + mtspr SPRN_MMCR0, r3 + lis r3, 0x7fff + ori r3, r3, 0xffff + mtspr SPRN_PMC6, r3 + isync + blr -- cgit v1.2.1 From 000a25ddb7106cdcb34e7f6c7547e5b2354b6645 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 26 May 2014 19:48:41 +1000 Subject: KVM: PPC: Book3S HV: Fix machine check delivery to guest The code that delivered a machine check to the guest after handling it in real mode failed to load up r11 before calling kvmppc_msr_interrupt, which needs the old MSR value in r11 so it can see the transactional state there. This adds the missing load. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 60fe8ba318cf..220aefbcb7ca 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2144,6 +2144,7 @@ machine_check_realmode: beq mc_cont /* If not, deliver a machine check. SRR0/1 are already set */ li r10, BOOK3S_INTERRUPT_MACHINE_CHECK + ld r11, VCPU_MSR(r9) bl kvmppc_msr_interrupt b fast_interrupt_c_return -- cgit v1.2.1 From 207438d4e21e05728a8a58b5e25b0f6553260068 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 15 May 2014 14:36:05 +0200 Subject: KVM: PPC: Book3S PR: Use SLB entry 0 We didn't make use of SLB entry 0 because ... of no good reason. SLB entry 0 will always be used by the Linux linear SLB entry, so the fact that slbia does not invalidate it doesn't matter as we overwrite SLB 0 on exit anyway. Just enable use of SLB entry 0 for our shadow SLB code. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_mmu_host.c | 11 ++++------- arch/powerpc/kvm/book3s_64_slb.S | 3 ++- 2 files changed, 6 insertions(+), 8 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index e2efb85c65a3..0ac98392f363 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -271,11 +271,8 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) int found_inval = -1; int r; - if (!svcpu->slb_max) - svcpu->slb_max = 1; - /* Are we overwriting? */ - for (i = 1; i < svcpu->slb_max; i++) { + for (i = 0; i < svcpu->slb_max; i++) { if (!(svcpu->slb[i].esid & SLB_ESID_V)) found_inval = i; else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { @@ -285,7 +282,7 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) } /* Found a spare entry that was invalidated before */ - if (found_inval > 0) { + if (found_inval >= 0) { r = found_inval; goto out; } @@ -359,7 +356,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) ulong seg_mask = -seg_size; int i; - for (i = 1; i < svcpu->slb_max; i++) { + for (i = 0; i < svcpu->slb_max; i++) { if ((svcpu->slb[i].esid & SLB_ESID_V) && (svcpu->slb[i].esid & seg_mask) == ea) { /* Invalidate this entry */ @@ -373,7 +370,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->slb_max = 1; + svcpu->slb_max = 0; svcpu->slb[0].esid = 0; svcpu_put(svcpu); } diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 596140e5c889..84c52c6b5837 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S @@ -138,7 +138,8 @@ slb_do_enter: /* Restore bolted entries from the shadow and fix it along the way */ - /* We don't store anything in entry 0, so we don't need to take care of it */ + li r0, r0 + slbmte r0, r0 slbia isync -- cgit v1.2.1 From d8d164a9850d486cc48081c18831680254688d0f Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 15 May 2014 14:38:03 +0200 Subject: KVM: PPC: Book3S PR: Rework SLB switching code On LPAR guest systems Linux enables the shadow SLB to indicate to the hypervisor a number of SLB entries that always have to be available. Today we go through this shadow SLB and disable all ESID's valid bits. However, pHyp doesn't like this approach very much and honors us with fancy machine checks. Fortunately the shadow SLB descriptor also has an entry that indicates the number of valid entries following. During the lifetime of a guest we can just swap that value to 0 and don't have to worry about the SLB restoration magic. While we're touching the code, let's also make it more readable (get rid of rldicl), allow it to deal with a dynamic number of bolted SLB entries and only do shadow SLB swizzling on LPAR systems. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_64_slb.S | 83 ++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 45 deletions(-) (limited to 'arch/powerpc/kvm') diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 84c52c6b5837..3589c4e3d49b 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S @@ -17,29 +17,9 @@ * Authors: Alexander Graf */ -#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10)) -#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8) -#define UNBOLT_SLB_ENTRY(num) \ - li r11, SHADOW_SLB_ESID(num); \ - LDX_BE r9, r12, r11; \ - /* Invalid? Skip. */; \ - rldicl. r0, r9, 37, 63; \ - beq slb_entry_skip_ ## num; \ - xoris r9, r9, SLB_ESID_V@h; \ - STDX_BE r9, r12, r11; \ - slb_entry_skip_ ## num: - -#define REBOLT_SLB_ENTRY(num) \ - li r8, SHADOW_SLB_ESID(num); \ - li r7, SHADOW_SLB_VSID(num); \ - LDX_BE r10, r11, r8; \ - cmpdi r10, 0; \ - beq slb_exit_skip_ ## num; \ - oris r10, r10, SLB_ESID_V@h; \ - LDX_BE r9, r11, r7; \ - slbmte r9, r10; \ - STDX_BE r10, r11, r8; \ -slb_exit_skip_ ## num: +#define SHADOW_SLB_ENTRY_LEN 0x10 +#define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x) +#define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8) /****************************************************************************** * * @@ -63,20 +43,15 @@ slb_exit_skip_ ## num: * SVCPU[LR] = guest LR */ - /* Remove LPAR shadow entries */ +BEGIN_FW_FTR_SECTION -#if SLB_NUM_BOLTED == 3 + /* Declare SLB shadow as 0 entries big */ - ld r12, PACA_SLBSHADOWPTR(r13) + ld r11, PACA_SLBSHADOWPTR(r13) + li r8, 0 + stb r8, 3(r11) - /* Remove bolted entries */ - UNBOLT_SLB_ENTRY(0) - UNBOLT_SLB_ENTRY(1) - UNBOLT_SLB_ENTRY(2) - -#else -#error unknown number of bolted entries -#endif +END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR) /* Flush SLB */ @@ -99,7 +74,7 @@ slb_loop_enter: ld r10, 0(r11) - rldicl. r0, r10, 37, 63 + andis. r9, r10, SLB_ESID_V@h beq slb_loop_enter_skip ld r9, 8(r11) @@ -136,24 +111,42 @@ slb_do_enter: * */ - /* Restore bolted entries from the shadow and fix it along the way */ + /* Remove all SLB entries that are in use. */ li r0, r0 slbmte r0, r0 slbia - isync -#if SLB_NUM_BOLTED == 3 + /* Restore bolted entries from the shadow */ ld r11, PACA_SLBSHADOWPTR(r13) - REBOLT_SLB_ENTRY(0) - REBOLT_SLB_ENTRY(1) - REBOLT_SLB_ENTRY(2) - -#else -#error unknown number of bolted entries -#endif +BEGIN_FW_FTR_SECTION + + /* Declare SLB shadow as SLB_NUM_BOLTED entries big */ + + li r8, SLB_NUM_BOLTED + stb r8, 3(r11) + +END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR) + + /* Manually load all entries from shadow SLB */ + + li r8, SLBSHADOW_SAVEAREA + li r7, SLBSHADOW_SAVEAREA + 8 + + .rept SLB_NUM_BOLTED + LDX_BE r10, r11, r8 + cmpdi r10, 0 + beq 1f + LDX_BE r9, r11, r7 + slbmte r9, r10 +1: addi r7, r7, SHADOW_SLB_ENTRY_LEN + addi r8, r8, SHADOW_SLB_ENTRY_LEN + .endr + + isync + sync slb_do_exit: -- cgit v1.2.1