summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/kvm_emulate.h10
-rw-r--r--arch/arm/include/asm/kvm_host.h18
-rw-r--r--arch/arm/include/asm/kvm_hyp.h13
-rw-r--r--arch/arm/include/uapi/asm/kvm.h12
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c24
-rw-r--r--arch/arm/mm/dma-mapping.c20
6 files changed, 44 insertions, 53 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 6b7644a383f6..40002416efec 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -271,6 +271,16 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
}
+static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
+ bool flag)
+{
+}
+
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index f80418ddeb60..8a37c8e89777 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -15,7 +15,6 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/fpstate.h>
-#include <asm/smp_plat.h>
#include <kvm/arm_arch_timer.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -147,11 +146,10 @@ struct kvm_host_data {
typedef struct kvm_host_data kvm_host_data_t;
-static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
- int cpu)
+static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
- cpu_ctxt->cp15[c0_MPIDR] = cpu_logical_map(cpu);
+ cpu_ctxt->cp15[c0_MPIDR] = read_cpuid_mpidr();
}
struct vcpu_reset_state {
@@ -362,7 +360,11 @@ static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_vhe_guest_enter(void) {}
static inline void kvm_arm_vhe_guest_exit(void) {}
-static inline bool kvm_arm_harden_branch_predictor(void)
+#define KVM_BP_HARDEN_UNKNOWN -1
+#define KVM_BP_HARDEN_WA_NEEDED 0
+#define KVM_BP_HARDEN_NOT_REQUIRED 1
+
+static inline int kvm_arm_harden_branch_predictor(void)
{
switch(read_cpuid_part()) {
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
@@ -370,10 +372,12 @@ static inline bool kvm_arm_harden_branch_predictor(void)
case ARM_CPU_PART_CORTEX_A12:
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_CORTEX_A17:
- return true;
+ return KVM_BP_HARDEN_WA_NEEDED;
#endif
+ case ARM_CPU_PART_CORTEX_A7:
+ return KVM_BP_HARDEN_NOT_REQUIRED;
default:
- return false;
+ return KVM_BP_HARDEN_UNKNOWN;
}
}
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index 71ac1c8d101c..40e9034db601 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -82,13 +82,14 @@
#define VFP_FPEXC __ACCESS_VFP(FPEXC)
/* AArch64 compatibility macros, only for the timer so far */
-#define read_sysreg_el0(r) read_sysreg(r##_el0)
-#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0)
+#define read_sysreg_el0(r) read_sysreg(r##_EL0)
+#define write_sysreg_el0(v, r) write_sysreg(v, r##_EL0)
+
+#define SYS_CNTP_CTL_EL0 CNTP_CTL
+#define SYS_CNTP_CVAL_EL0 CNTP_CVAL
+#define SYS_CNTV_CTL_EL0 CNTV_CTL
+#define SYS_CNTV_CVAL_EL0 CNTV_CVAL
-#define cntp_ctl_el0 CNTP_CTL
-#define cntp_cval_el0 CNTP_CVAL
-#define cntv_ctl_el0 CNTV_CTL
-#define cntv_cval_el0 CNTV_CVAL
#define cntvoff_el2 CNTVOFF
#define cnthctl_el2 CNTHCTL
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 4602464ebdfb..a4217c1a5d01 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -214,6 +214,18 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
+ /* Higher values mean better protection. */
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
+ /* Higher values mean better protection. */
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
index 1aea01ba1262..52b82559d99b 100644
--- a/arch/arm/mm/dma-mapping-nommu.c
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -35,18 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)
{
- void *ret;
-
- /*
- * Try generic allocator first if we are advertised that
- * consistency is not required.
- */
-
- if (attrs & DMA_ATTR_NON_CONSISTENT)
- return dma_direct_alloc_pages(dev, size, dma_handle, gfp,
- attrs);
-
- ret = dma_alloc_from_global_coherent(size, dma_handle);
+ void *ret = dma_alloc_from_global_coherent(size, dma_handle);
/*
* dma_alloc_from_global_coherent() may fail because:
@@ -66,16 +55,9 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
unsigned long attrs)
{
- if (attrs & DMA_ATTR_NON_CONSISTENT) {
- dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
- } else {
- int ret = dma_release_from_global_coherent(get_order(size),
- cpu_addr);
-
- WARN_ON_ONCE(ret == 0);
- }
+ int ret = dma_release_from_global_coherent(get_order(size), cpu_addr);
- return;
+ WARN_ON_ONCE(ret == 0);
}
static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1fb5c0ca1ed8..4789c60a86e3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -216,25 +216,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
static int __dma_supported(struct device *dev, u64 mask, bool warn)
{
- unsigned long max_dma_pfn;
-
- /*
- * If the mask allows for more memory than we can address,
- * and we actually have that much memory, then we must
- * indicate that DMA to this device is not supported.
- */
- if (sizeof(mask) != sizeof(dma_addr_t) &&
- mask > (dma_addr_t)~0 &&
- dma_to_pfn(dev, ~0) < max_pfn - 1) {
- if (warn) {
- dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
- mask);
- dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
- }
- return 0;
- }
-
- max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
+ unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
/*
* Translate the device's DMA mask to a PFN limit. This
OpenPOWER on IntegriCloud