summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-10-23 18:49:51 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2015-10-23 18:49:51 +0900
commita2c01ed5d46f0686c52272e09f7d2f5be9f573fd (patch)
tree773a9e89b041cfa4ee8018af6d490c77bc6f8a7a /arch/powerpc
parentd0ddf980d6efdcee6c7a85eb0f27baa6b60eeff6 (diff)
parent8832317f662c06f5c06e638f57bfe89a71c9b266 (diff)
downloadblackbird-op-linux-a2c01ed5d46f0686c52272e09f7d2f5be9f573fd.tar.gz
blackbird-op-linux-a2c01ed5d46f0686c52272e09f7d2f5be9f573fd.zip
Merge tag 'powerpc-4.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: - Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8" from Paul - Handle irq_happened flag correctly in off-line loop from Paul - Validate rtas.entry before calling enter_rtas() from Vasant * tag 'powerpc-4.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/rtas: Validate rtas.entry before calling enter_rtas() powerpc/powernv: Handle irq_happened flag correctly in off-line loop powerpc: Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8"
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/cache.h7
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h17
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/kernel/rtas.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c55
-rw-r--r--arch/powerpc/platforms/powernv/smp.c29
7 files changed, 28 insertions, 86 deletions
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 0dc42c5082b7..5f8229e24fe6 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -3,7 +3,6 @@
#ifdef __KERNEL__
-#include <asm/reg.h>
/* bytes per L1 cache line */
#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
@@ -40,12 +39,6 @@ struct ppc64_caches {
};
extern struct ppc64_caches ppc64_caches;
-
-static inline void logmpp(u64 x)
-{
- asm volatile(PPC_LOGMPP(R1) : : "r" (x));
-}
-
#endif /* __powerpc64__ && ! __ASSEMBLY__ */
#if defined(__ASSEMBLY__)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 827a38d7a9db..887c259556df 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -297,8 +297,6 @@ struct kvmppc_vcore {
u32 arch_compat;
ulong pcr;
ulong dpdes; /* doorbell state (POWER8) */
- void *mpp_buffer; /* Micro Partition Prefetch buffer */
- bool mpp_buffer_is_valid;
ulong conferring_threads;
};
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 790f5d1d9a46..7ab04fc59e24 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -141,7 +141,6 @@
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LDARX 0x7c0000a8
-#define PPC_INST_LOGMPP 0x7c0007e4
#define PPC_INST_LSWI 0x7c0004aa
#define PPC_INST_LSWX 0x7c00042a
#define PPC_INST_LWARX 0x7c000028
@@ -285,20 +284,6 @@
#define __PPC_EH(eh) 0
#endif
-/* POWER8 Micro Partition Prefetch (MPP) parameters */
-/* Address mask is common for LOGMPP instruction and MPPR SPR */
-#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL
-
-/* Bits 60 and 61 of MPP SPR should be set to one of the following */
-/* Aborting the fetch is indeed setting 00 in the table size bits */
-#define PPC_MPPR_FETCH_ABORT (0x0ULL << 60)
-#define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60)
-
-/* Bits 54 and 55 of register for LOGMPP instruction should be set to: */
-#define PPC_LOGMPP_LOG_L2 (0x02ULL << 54)
-#define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54)
-#define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54)
-
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
__PPC_RA(a) | __PPC_RB(b))
@@ -307,8 +292,6 @@
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))
-#define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \
- __PPC_RB(b))
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
___PPC_RT(t) | ___PPC_RA(a) | \
___PPC_RB(b) | __PPC_EH(eh))
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index aa1cc5f015ee..a908ada8e0a5 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -226,7 +226,6 @@
#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
#define SPRN_DAWR 0xB4
-#define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */
#define SPRN_RPR 0xBA /* Relative Priority Register */
#define SPRN_CIABR 0xBB
#define CIABR_PRIV 0x3
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 84bf934cf748..5a753fae8265 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1043,6 +1043,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (!rtas.entry)
+ return -EINVAL;
+
if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
return -EFAULT;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 228049786888..9c26c5a96ea2 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -36,7 +36,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
-#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
@@ -75,12 +74,6 @@
static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
-#if defined(CONFIG_PPC_64K_PAGES)
-#define MPP_BUFFER_ORDER 0
-#elif defined(CONFIG_PPC_4K_PAGES)
-#define MPP_BUFFER_ORDER 3
-#endif
-
static int dynamic_mt_modes = 6;
module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
@@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list);
- vcore->mpp_buffer_is_valid = false;
-
- if (cpu_has_feature(CPU_FTR_ARCH_207S))
- vcore->mpp_buffer = (void *)__get_free_pages(
- GFP_KERNEL|__GFP_ZERO,
- MPP_BUFFER_ORDER);
-
return vcore;
}
@@ -1894,33 +1880,6 @@ static int on_primary_thread(void)
return 1;
}
-static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
-{
- phys_addr_t phy_addr, mpp_addr;
-
- phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
- mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
-
- mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
- logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
-
- vc->mpp_buffer_is_valid = true;
-}
-
-static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
-{
- phys_addr_t phy_addr, mpp_addr;
-
- phy_addr = virt_to_phys(vc->mpp_buffer);
- mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
-
- /* We must abort any in-progress save operations to ensure
- * the table is valid so that prefetch engine knows when to
- * stop prefetching. */
- logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
- mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
-}
-
/*
* A list of virtual cores for each physical CPU.
* These are vcores that could run but their runner VCPU tasks are
@@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
- if (vc->mpp_buffer_is_valid)
- kvmppc_start_restoring_l2_cache(vc);
-
__kvmppc_vcore_entry();
- if (vc->mpp_buffer)
- kvmppc_start_saving_l2_cache(vc);
-
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
spin_lock(&vc->lock);
@@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm)
{
long int i;
- for (i = 0; i < KVM_MAX_VCORES; ++i) {
- if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
- struct kvmppc_vcore *vc = kvm->arch.vcores[i];
- free_pages((unsigned long)vc->mpp_buffer,
- MPP_BUFFER_ORDER);
- }
+ for (i = 0; i < KVM_MAX_VCORES; ++i)
kfree(kvm->arch.vcores[i]);
- }
kvm->arch.online_vcores = 0;
}
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 8f70ba681a78..ca264833ee64 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void)
* so clear LPCR:PECE1. We keep PECE2 enabled.
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
+
+ /*
+ * Hard-disable interrupts, and then clear irq_happened flags
+ * that we can safely ignore while off-line, since they
+ * are for things for which we do no processing when off-line
+ * (or in the case of HMI, all the processing we need to do
+ * is done in lower-level real-mode code).
+ */
+ hard_irq_disable();
+ local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI);
+
while (!generic_check_cpu_restart(cpu)) {
+ /*
+ * Clear IPI flag, since we don't handle IPIs while
+ * offline, except for those when changing micro-threading
+ * mode, which are handled explicitly below, and those
+ * for coming online, which are handled via
+ * generic_check_cpu_restart() calls.
+ */
+ kvmppc_set_host_ipi(cpu, 0);
ppc64_runlatch_off();
@@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void)
* having finished executing in a KVM guest, then srr1
* contains 0.
*/
- if ((srr1 & wmask) == SRR1_WAKEEE) {
+ if (((srr1 & wmask) == SRR1_WAKEEE) ||
+ (local_paca->irq_happened & PACA_IRQ_EE)) {
icp_native_flush_interrupt();
- local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
- smp_mb();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
- kvmppc_set_host_ipi(cpu, 0);
}
+ local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL);
+ smp_mb();
if (cpu_core_split_required())
continue;
- if (!generic_check_cpu_restart(cpu))
+ if (srr1 && !generic_check_cpu_restart(cpu))
DBG("CPU%d Unexpected exit while offline !\n", cpu);
}
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
OpenPOWER on IntegriCloud