summaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/arm.c79
-rw-r--r--arch/arm/kvm/guest.c92
-rw-r--r--arch/arm/kvm/handle_exit.c2
-rw-r--r--arch/arm/kvm/mmu.c24
-rw-r--r--arch/arm/kvm/psci.c11
5 files changed, 174 insertions, 34 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2a700e00528d..1d8248ea5669 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -17,6 +17,7 @@
*/
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
@@ -137,6 +138,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
goto out_free_stage2_pgd;
+ kvm_timer_init(kvm);
+
/* Mark the initial VMID generation invalid */
kvm->arch.vmid_gen = 0;
@@ -188,6 +191,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_IRQCHIP:
r = vgic_present;
break;
+ case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
@@ -339,6 +343,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ /*
+ * The arch-generic KVM code expects the cpu field of a vcpu to be -1
+ * if the vcpu is no longer assigned to a cpu. This is used for the
+ * optimized make_all_cpus_request path.
+ */
+ vcpu->cpu = -1;
+
kvm_arm_set_running_vcpu(NULL);
}
@@ -462,6 +473,8 @@ static void update_vttbr(struct kvm *kvm)
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{
+ int ret;
+
if (likely(vcpu->arch.has_run_once))
return 0;
@@ -471,22 +484,12 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Initialize the VGIC before running a vcpu the first time on
* this VM.
*/
- if (irqchip_in_kernel(vcpu->kvm) &&
- unlikely(!vgic_initialized(vcpu->kvm))) {
- int ret = kvm_vgic_init(vcpu->kvm);
+ if (unlikely(!vgic_initialized(vcpu->kvm))) {
+ ret = kvm_vgic_init(vcpu->kvm);
if (ret)
return ret;
}
- /*
- * Handle the "start in power-off" case by calling into the
- * PSCI code.
- */
- if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
- *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
- kvm_psci_call(vcpu);
- }
-
return 0;
}
@@ -700,6 +703,24 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
return -EINVAL;
}
+static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_init *init)
+{
+ int ret;
+
+ ret = kvm_vcpu_set_target(vcpu, init);
+ if (ret)
+ return ret;
+
+ /*
+ * Handle the "start in power-off" case by marking the VCPU as paused.
+ */
+ if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+ vcpu->arch.pause = true;
+
+ return 0;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -713,8 +734,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (copy_from_user(&init, argp, sizeof(init)))
return -EFAULT;
- return kvm_vcpu_set_target(vcpu, &init);
-
+ return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
}
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
@@ -772,7 +792,7 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
case KVM_ARM_DEVICE_VGIC_V2:
if (!vgic_present)
return -ENXIO;
- return kvm_vgic_set_addr(kvm, type, dev_addr->addr);
+ return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
default:
return -ENODEV;
}
@@ -853,6 +873,33 @@ static struct notifier_block hyp_init_cpu_nb = {
.notifier_call = hyp_init_cpu_notify,
};
+#ifdef CONFIG_CPU_PM
+static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
+ unsigned long cmd,
+ void *v)
+{
+ if (cmd == CPU_PM_EXIT) {
+ cpu_init_hyp_mode(NULL);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hyp_init_cpu_pm_nb = {
+ .notifier_call = hyp_init_cpu_pm_notifier,
+};
+
+static void __init hyp_cpu_pm_init(void)
+{
+ cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
+}
+#else
+static inline void hyp_cpu_pm_init(void)
+{
+}
+#endif
+
/**
* Inits Hyp-mode on all online CPUs
*/
@@ -1013,6 +1060,8 @@ int kvm_arch_init(void *opaque)
goto out_err;
}
+ hyp_cpu_pm_init();
+
kvm_coproc_table_init();
return 0;
out_err:
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 20f8d97904af..2786eae10c0d 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -109,6 +109,83 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL;
}
+#ifndef CONFIG_KVM_ARM_TIMER
+
+#define NUM_TIMER_REGS 0
+
+static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ return 0;
+}
+
+static bool is_timer_reg(u64 index)
+{
+ return false;
+}
+
+int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
+{
+ return 0;
+}
+
+u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
+{
+ return 0;
+}
+
+#else
+
+#define NUM_TIMER_REGS 3
+
+static bool is_timer_reg(u64 index)
+{
+ switch (index) {
+ case KVM_REG_ARM_TIMER_CTL:
+ case KVM_REG_ARM_TIMER_CNT:
+ case KVM_REG_ARM_TIMER_CVAL:
+ return true;
+ }
+ return false;
+}
+
+static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+ if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
+ return -EFAULT;
+ uindices++;
+ if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
+ return -EFAULT;
+ uindices++;
+ if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
+ return -EFAULT;
+
+ return 0;
+}
+
+#endif
+
+static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+ int ret;
+
+ ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
+ if (ret != 0)
+ return ret;
+
+ return kvm_arm_timer_set_reg(vcpu, reg->id, val);
+}
+
+static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ void __user *uaddr = (void __user *)(long)reg->addr;
+ u64 val;
+
+ val = kvm_arm_timer_get_reg(vcpu, reg->id);
+ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+}
+
static unsigned long num_core_regs(void)
{
return sizeof(struct kvm_regs) / sizeof(u32);
@@ -121,7 +198,8 @@ static unsigned long num_core_regs(void)
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
- return num_core_regs() + kvm_arm_num_coproc_regs(vcpu);
+ return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+ + NUM_TIMER_REGS;
}
/**
@@ -133,6 +211,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
unsigned int i;
const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
+ int ret;
for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
if (put_user(core_reg | i, uindices))
@@ -140,6 +219,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
uindices++;
}
+ ret = copy_timer_indices(vcpu, uindices);
+ if (ret)
+ return ret;
+ uindices += NUM_TIMER_REGS;
+
return kvm_arm_copy_coproc_indices(vcpu, uindices);
}
@@ -153,6 +237,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
+ if (is_timer_reg(reg->id))
+ return get_timer_reg(vcpu, reg);
+
return kvm_arm_coproc_get_reg(vcpu, reg);
}
@@ -166,6 +253,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
+ if (is_timer_reg(reg->id))
+ return set_timer_reg(vcpu, reg);
+
return kvm_arm_coproc_set_reg(vcpu, reg);
}
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index a92079011a83..0de91fc6de0f 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -26,8 +26,6 @@
#include "trace.h"
-#include "trace.h"
-
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 580906989db1..7789857d1470 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -667,14 +667,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
} else {
/*
- * Pages belonging to VMAs not aligned to the PMD mapping
- * granularity cannot be mapped using block descriptors even
- * if the pages belong to a THP for the process, because the
- * stage-2 block descriptor will cover more than a single THP
- * and we loose atomicity for unmapping, updates, and splits
- * of the THP or other pages in the stage-2 block range.
+ * Pages belonging to memslots that don't have the same
+ * alignment for userspace and IPA cannot be mapped using
+ * block descriptors even if the pages belong to a THP for
+ * the process, because the stage-2 block descriptor will
+ * cover more than a single THP and we loose atomicity for
+ * unmapping, updates, and splits of the THP or other pages
+ * in the stage-2 block range.
*/
- if (vma->vm_start & ~PMD_MASK)
+ if ((memslot->userspace_addr & ~PMD_MASK) !=
+ ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
force_pte = true;
}
up_read(&current->mm->mmap_sem);
@@ -916,9 +918,9 @@ int kvm_mmu_init(void)
{
int err;
- hyp_idmap_start = virt_to_phys(__hyp_idmap_text_start);
- hyp_idmap_end = virt_to_phys(__hyp_idmap_text_end);
- hyp_idmap_vector = virt_to_phys(__kvm_hyp_init);
+ hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
+ hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
+ hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) {
/*
@@ -945,7 +947,7 @@ int kvm_mmu_init(void)
*/
kvm_flush_dcache_to_poc(init_bounce_page, len);
- phys_base = virt_to_phys(init_bounce_page);
+ phys_base = kvm_virt_to_phys(init_bounce_page);
hyp_idmap_vector += phys_base - hyp_idmap_start;
hyp_idmap_start = phys_base;
hyp_idmap_end = phys_base + len;
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 0881bf169fbc..448f60e8d23c 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -54,15 +54,15 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
}
}
- if (!vcpu)
+ /*
+ * Make sure the caller requested a valid CPU and that the CPU is
+ * turned off.
+ */
+ if (!vcpu || !vcpu->arch.pause)
return KVM_PSCI_RET_INVAL;
target_pc = *vcpu_reg(source_vcpu, 2);
- wq = kvm_arch_vcpu_wq(vcpu);
- if (!waitqueue_active(wq))
- return KVM_PSCI_RET_INVAL;
-
kvm_reset_vcpu(vcpu);
/* Gracefully handle Thumb2 entry point */
@@ -79,6 +79,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
vcpu->arch.pause = false;
smp_mb(); /* Make sure the above is visible */
+ wq = kvm_arch_vcpu_wq(vcpu);
wake_up_interruptible(wq);
return KVM_PSCI_RET_SUCCESS;
OpenPOWER on IntegriCloud