summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c55
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S29
-rw-r--r--arch/powerpc/kvm/e500.c3
-rw-r--r--arch/powerpc/kvm/e500_emulate.c19
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c3
8 files changed, 53 insertions, 65 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 3fc2ba784a71..fb37290a57b4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -70,7 +70,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
}
/* Lastly try successively smaller sizes from the page allocator */
- while (!hpt && order > PPC_MIN_HPT_ORDER) {
+ /* Only do this if userspace didn't specify a size via ioctl */
+ while (!hpt && order > PPC_MIN_HPT_ORDER && !htab_orderp) {
hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
__GFP_NOWARN, order - PAGE_SHIFT);
if (!hpt)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 228049786888..9c26c5a96ea2 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -36,7 +36,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
-#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@@ -75,12 +74,6 @@
static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
-#if defined(CONFIG_PPC_64K_PAGES)
-#define MPP_BUFFER_ORDER 0
-#elif defined(CONFIG_PPC_4K_PAGES)
-#define MPP_BUFFER_ORDER 3
-#endif
-
static int dynamic_mt_modes = 6;
module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
@@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list);
- vcore->mpp_buffer_is_valid = false;
-
- if (cpu_has_feature(CPU_FTR_ARCH_207S))
- vcore->mpp_buffer = (void *)__get_free_pages(
- GFP_KERNEL|__GFP_ZERO,
- MPP_BUFFER_ORDER);
-
return vcore;
}
@@ -1894,33 +1880,6 @@ static int on_primary_thread(void)
return 1;
}
-static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
-{
- phys_addr_t phy_addr, mpp_addr;
-
- phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
- mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
-
- mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
- logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
-
- vc->mpp_buffer_is_valid = true;
-}
-
-static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
-{
- phys_addr_t phy_addr, mpp_addr;
-
- phy_addr = virt_to_phys(vc->mpp_buffer);
- mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
-
- /* We must abort any in-progress save operations to ensure
- * the table is valid so that prefetch engine knows when to
- * stop prefetching. */
- logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
- mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
-}
-
/*
* A list of virtual cores for each physical CPU.
* These are vcores that could run but their runner VCPU tasks are
@@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
- if (vc->mpp_buffer_is_valid)
- kvmppc_start_restoring_l2_cache(vc);
-
__kvmppc_vcore_entry();
- if (vc->mpp_buffer)
- kvmppc_start_saving_l2_cache(vc);
-
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
spin_lock(&vc->lock);
@@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm)
{
long int i;
- for (i = 0; i < KVM_MAX_VCORES; ++i) {
- if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
- struct kvmppc_vcore *vc = kvm->arch.vcores[i];
- free_pages((unsigned long)vc->mpp_buffer,
- MPP_BUFFER_ORDER);
- }
+ for (i = 0; i < KVM_MAX_VCORES; ++i)
kfree(kvm->arch.vcores[i]);
- }
kvm->arch.online_vcores = 0;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 0bce4fffcb2e..91700518bbf3 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -472,6 +472,8 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
note_hpte_modification(kvm, rev);
unlock_hpte(hpte, 0);
+ if (v & HPTE_V_ABSENT)
+ v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
hpret[0] = v;
hpret[1] = r;
return H_SUCCESS;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b98889e9851d..b1dab8d1d885 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -150,6 +150,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq 11f
+ cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
+ beq 15f /* Invoke the H_DOORBELL handler */
cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
beq cr2, 14f /* HMI check */
@@ -174,6 +176,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_HSRR1, r7
b hmi_exception_after_realmode
+15: mtspr SPRN_HSRR0, r8
+ mtspr SPRN_HSRR1, r7
+ ba 0xe80
+
kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
@@ -2377,7 +2383,6 @@ machine_check_realmode:
mr r3, r9 /* get vcpu pointer */
bl kvmppc_realmode_machine_check
nop
- cmpdi r3, 0 /* Did we handle MCE ? */
ld r9, HSTATE_KVM_VCPU(r13)
li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
/*
@@ -2390,13 +2395,18 @@ machine_check_realmode:
* The old code used to return to host for unhandled errors which
* was causing guest to hang with soft lockups inside guest and
* makes it difficult to recover guest instance.
+ *
+ * if we receive machine check with MSR(RI=0) then deliver it to
+ * guest as machine check causing guest to crash.
*/
- ld r10, VCPU_PC(r9)
ld r11, VCPU_MSR(r9)
+ andi. r10, r11, MSR_RI /* check for unrecoverable exception */
+ beq 1f /* Deliver a machine check to guest */
+ ld r10, VCPU_PC(r9)
+ cmpdi r3, 0 /* Did we handle MCE ? */
bne 2f /* Continue guest execution. */
/* If not, deliver a machine check. SRR0/1 are already set */
- li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
- ld r11, VCPU_MSR(r9)
+1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
bl kvmppc_msr_interrupt
2: b fast_interrupt_c_return
@@ -2436,14 +2446,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* hypervisor doorbell */
3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
+
+ /*
+ * Clear the doorbell as we will invoke the handler
+ * explicitly in the guest exit path.
+ */
+ lis r6, (PPC_DBELL_SERVER << (63-36))@h
+ PPC_MSGCLR(6)
/* see if it's a host IPI */
li r3, 1
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
bnelr
- /* if not, clear it and return -1 */
- lis r6, (PPC_DBELL_SERVER << (63-36))@h
- PPC_MSGCLR(6)
+ /* if not, return -1 */
li r3, -1
blr
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index b29ce752c7d6..32fdab57d604 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -237,7 +237,8 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
struct kvm_book3e_206_tlb_entry *gtlbe)
{
struct vcpu_id_table *idt = vcpu_e500->idt;
- unsigned int pr, tid, ts, pid;
+ unsigned int pr, tid, ts;
+ int pid;
u32 val, eaddr;
unsigned long flags;
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index ce7291c79f6c..990db69a1d0b 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -15,6 +15,7 @@
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/dbell.h>
+#include <asm/reg_booke.h>
#include "booke.h"
#include "e500.h"
@@ -22,6 +23,7 @@
#define XOP_DCBTLS 166
#define XOP_MSGSND 206
#define XOP_MSGCLR 238
+#define XOP_MFTMR 366
#define XOP_TLBIVAX 786
#define XOP_TLBSX 914
#define XOP_TLBRE 946
@@ -113,6 +115,19 @@ static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
+ int rt)
+{
+ /* Expose one thread per vcpu */
+ if (get_tmrn(inst) == TMRN_TMCFG0) {
+ kvmppc_set_gpr(vcpu, rt,
+ 1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
+ return EMULATE_DONE;
+ }
+
+ return EMULATE_FAIL;
+}
+
int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
@@ -165,6 +180,10 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
break;
+ case XOP_MFTMR:
+ emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
+ break;
+
case XOP_EHPRIV:
emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
advance);
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 805fee9beefa..34c43fff4adb 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -406,7 +406,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
unsigned long gfn_start, gfn_end;
- tsize_pages = 1 << (tsize - 2);
+ tsize_pages = 1UL << (tsize - 2);
gfn_start = gfn & ~(tsize_pages - 1);
gfn_end = gfn_start + tsize_pages;
@@ -447,7 +447,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
if (likely(!pfnmap)) {
- tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
+ tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
pfn = gfn_to_pfn_memslot(slot, gfn);
if (is_error_noslot_pfn(pfn)) {
if (printk_ratelimit())
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2e51289610e4..6fd2405c7f4a 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -559,6 +559,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
else
r = num_online_cpus();
break;
+ case KVM_CAP_NR_MEMSLOTS:
+ r = KVM_USER_MEM_SLOTS;
+ break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
OpenPOWER on IntegriCloud