summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c1
-rw-r--r--arch/powerpc/platforms/85xx/common.c6
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c17
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx.h2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c16
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rds.c2
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c1
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c1
-rw-r--r--arch/powerpc/platforms/85xx/twr_p102x.c1
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig10
-rw-r--r--arch/powerpc/platforms/embedded6xx/Makefile1
-rw-r--r--arch/powerpc/platforms/embedded6xx/prpmc2800.c156
-rw-r--r--arch/powerpc/platforms/powernv/Makefile4
-rw-r--r--arch/powerpc/platforms/powernv/opal-async.c203
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c525
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c313
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor.c64
-rw-r--r--arch/powerpc/platforms/powernv/opal-sysparam.c290
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S14
-rw-r--r--arch/powerpc/platforms/powernv/opal.c106
-rw-r--r--arch/powerpc/platforms/powernv/setup.c1
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig12
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c83
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c26
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c44
35 files changed, 1679 insertions, 238 deletions
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 213d5b815827..84476b646005 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -68,6 +68,7 @@ define_machine(c293_pcie) {
.init_IRQ = c293_pcie_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 3b085c7ee539..b564b5e23f7c 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -107,6 +107,12 @@ void __init mpc85xx_qe_init(void)
qe_reset();
of_node_put(np);
+}
+
+void __init mpc85xx_qe_par_io_init(void)
+{
+ struct device_node *np;
+
np = of_find_node_by_name(NULL, "par_io");
if (np) {
struct device_node *ucc;
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index fbd871e69754..8e4b1e1a4911 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -26,11 +26,13 @@
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/ehv_pic.h>
+#include <asm/qe_ic.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "smp.h"
+#include "mpc85xx.h"
void __init corenet_gen_pic_init(void)
{
@@ -38,6 +40,8 @@ void __init corenet_gen_pic_init(void)
unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU |
MPIC_NO_RESET;
+ struct device_node *np;
+
if (ppc_md.get_irq == mpic_get_coreint_irq)
flags |= MPIC_ENABLE_COREINT;
@@ -45,6 +49,13 @@ void __init corenet_gen_pic_init(void)
BUG_ON(mpic == NULL);
mpic_init(mpic);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
+ if (np) {
+ qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
+ qe_ic_cascade_high_mpic);
+ of_node_put(np);
+ }
}
/*
@@ -57,6 +68,8 @@ void __init corenet_gen_setup_arch(void)
swiotlb_detect_4g();
pr_info("%s board from Freescale Semiconductor\n", ppc_md.name);
+
+ mpc85xx_qe_init();
}
static const struct of_device_id of_device_ids[] = {
@@ -81,6 +94,9 @@ static const struct of_device_id of_device_ids[] = {
{
.compatible = "fsl,qoriq-pcie-v3.0",
},
+ {
+ .compatible = "fsl,qe",
+ },
/* The following two are for the Freescale hypervisor */
{
.name = "hypervisor",
@@ -163,6 +179,7 @@ define_machine(corenet_generic) {
.init_IRQ = corenet_gen_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_coreint_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index e6285ae6f423..11790e074c8a 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -215,6 +215,7 @@ define_machine(ge_imp3a) {
.show_cpuinfo = ge_imp3a_show_cpuinfo,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 15ce4b55f117..a378ba3519e9 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -76,6 +76,7 @@ define_machine(mpc8536_ds) {
.init_IRQ = mpc8536_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/mpc85xx.h b/arch/powerpc/platforms/85xx/mpc85xx.h
index fc51dd4092e5..39056f6befeb 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx.h
+++ b/arch/powerpc/platforms/85xx/mpc85xx.h
@@ -10,8 +10,10 @@ static inline void __init mpc85xx_cpm2_pic_init(void) {}
#ifdef CONFIG_QUICC_ENGINE
extern void mpc85xx_qe_init(void);
+extern void mpc85xx_qe_par_io_init(void);
#else
static inline void __init mpc85xx_qe_init(void) {}
+static inline void __init mpc85xx_qe_par_io_init(void) {}
#endif
#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 7a31a0e1df29..b0753e222086 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -385,6 +385,7 @@ define_machine(mpc85xx_cds) {
#ifdef CONFIG_PCI
.restart = mpc85xx_cds_restart,
.pcibios_fixup_bus = mpc85xx_cds_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#else
.restart = fsl_rstcr_restart,
#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 9ebb91ed96a3..ffdf02121a7c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -209,6 +209,7 @@ define_machine(mpc8544_ds) {
.init_IRQ = mpc85xx_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -223,6 +224,7 @@ define_machine(mpc8572_ds) {
.init_IRQ = mpc85xx_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -237,6 +239,7 @@ define_machine(p2020_ds) {
.init_IRQ = mpc85xx_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 34f3c5eb3bee..a392e94a07fa 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -239,6 +239,7 @@ static void __init mpc85xx_mds_qe_init(void)
struct device_node *np;
mpc85xx_qe_init();
+ mpc85xx_qe_par_io_init();
mpc85xx_mds_reset_ucc_phys();
if (machine_is(p1021_mds)) {
@@ -391,6 +392,7 @@ define_machine(mpc8568_mds) {
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
};
@@ -412,6 +414,7 @@ define_machine(mpc8569_mds) {
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
};
@@ -434,6 +437,7 @@ define_machine(p1021_mds) {
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index e15bdd18fdb2..e358bed66d01 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -86,10 +86,6 @@ void __init mpc85xx_rdb_pic_init(void)
*/
static void __init mpc85xx_rdb_setup_arch(void)
{
-#ifdef CONFIG_QUICC_ENGINE
- struct device_node *np;
-#endif
-
if (ppc_md.progress)
ppc_md.progress("mpc85xx_rdb_setup_arch()", 0);
@@ -99,8 +95,10 @@ static void __init mpc85xx_rdb_setup_arch(void)
#ifdef CONFIG_QUICC_ENGINE
mpc85xx_qe_init();
+ mpc85xx_qe_par_io_init();
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
if (machine_is(p1025_rdb)) {
+ struct device_node *np;
struct ccsr_guts __iomem *guts;
@@ -233,6 +231,7 @@ define_machine(p2020_rdb) {
.init_IRQ = mpc85xx_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -247,6 +246,7 @@ define_machine(p1020_rdb) {
.init_IRQ = mpc85xx_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -261,6 +261,7 @@ define_machine(p1021_rdb_pc) {
.init_IRQ = mpc85xx_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -275,6 +276,7 @@ define_machine(p2020_rdb_pc) {
.init_IRQ = mpc85xx_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -289,6 +291,7 @@ define_machine(p1025_rdb) {
.init_IRQ = mpc85xx_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -303,6 +306,7 @@ define_machine(p1020_mbg_pc) {
.init_IRQ = mpc85xx_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -317,6 +321,7 @@ define_machine(p1020_utm_pc) {
.init_IRQ = mpc85xx_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -331,6 +336,7 @@ define_machine(p1020_rdb_pc) {
.init_IRQ = mpc85xx_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -345,6 +351,7 @@ define_machine(p1020_rdb_pd) {
.init_IRQ = mpc85xx_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -359,6 +366,7 @@ define_machine(p1024_rdb) {
.init_IRQ = mpc85xx_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index d6a3dd311494..ad1a3d438a9e 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -78,6 +78,7 @@ define_machine(p1010_rdb) {
.init_IRQ = p1010_rdb_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index e611e79f23ce..6ac986d3f8a3 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -567,6 +567,7 @@ define_machine(p1022_ds) {
.init_IRQ = p1022_ds_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index 8c9297112b30..7a180f0308d5 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -147,6 +147,7 @@ define_machine(p1022_rdk) {
.init_IRQ = p1022_rdk_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
index 2ae9d490c3d9..0e614007acfb 100644
--- a/arch/powerpc/platforms/85xx/p1023_rds.c
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -126,6 +126,7 @@ define_machine(p1023_rds) {
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
};
@@ -140,5 +141,6 @@ define_machine(p1023_rdb) {
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
};
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 5cefc5a9a144..7f2673293549 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -66,6 +66,7 @@ define_machine(qemu_e500) {
.init_IRQ = qemu_e500_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_coreint_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index f62121825914..b07214666d65 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -135,6 +135,7 @@ define_machine(sbc8548) {
.restart = fsl_rstcr_restart,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
index c25ff10f05ee..1eadb6d0dc64 100644
--- a/arch/powerpc/platforms/85xx/twr_p102x.c
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -77,6 +77,7 @@ static void __init twr_p1025_setup_arch(void)
#ifdef CONFIG_QUICC_ENGINE
mpc85xx_qe_init();
+ mpc85xx_qe_par_io_init();
#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
if (machine_is(twr_p1025)) {
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index dcbf7e42dce7..1a9c1085855f 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -170,6 +170,7 @@ define_machine(xes_mpc8572) {
.init_IRQ = xes_mpc85xx_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -184,6 +185,7 @@ define_machine(xes_mpc8548) {
.init_IRQ = xes_mpc85xx_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
@@ -198,6 +200,7 @@ define_machine(xes_mpc8540) {
.init_IRQ = xes_mpc85xx_pic_init,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+ .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 6d3c7a9fd047..2a7024d8d8b1 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -34,7 +34,6 @@ config MPC7448HPC2
select TSI108_BRIDGE
select DEFAULT_UIMAGE
select PPC_UDBG_16550
- select TSI108_BRIDGE
help
Select MPC7448HPC2 if configuring for Freescale MPC7448HPC2 (Taiga)
platform
@@ -44,19 +43,10 @@ config PPC_HOLLY
depends on EMBEDDED6xx
select TSI108_BRIDGE
select PPC_UDBG_16550
- select TSI108_BRIDGE
help
Select PPC_HOLLY if configuring for an IBM 750GX/CL Eval
Board with TSI108/9 bridge (Hickory/Holly)
-config PPC_PRPMC2800
- bool "Motorola-PrPMC2800"
- depends on EMBEDDED6xx
- select MV64X60
- select NOT_COHERENT_CACHE
- help
- This option enables support for the Motorola PrPMC2800 board
-
config PPC_C2K
bool "SBS/GEFanuc C2K board"
depends on EMBEDDED6xx
diff --git a/arch/powerpc/platforms/embedded6xx/Makefile b/arch/powerpc/platforms/embedded6xx/Makefile
index cdd48d402b93..f126a2a09981 100644
--- a/arch/powerpc/platforms/embedded6xx/Makefile
+++ b/arch/powerpc/platforms/embedded6xx/Makefile
@@ -5,7 +5,6 @@ obj-$(CONFIG_MPC7448HPC2) += mpc7448_hpc2.o
obj-$(CONFIG_LINKSTATION) += linkstation.o ls_uart.o
obj-$(CONFIG_STORCENTER) += storcenter.o
obj-$(CONFIG_PPC_HOLLY) += holly.o
-obj-$(CONFIG_PPC_PRPMC2800) += prpmc2800.o
obj-$(CONFIG_PPC_C2K) += c2k.o
obj-$(CONFIG_USBGECKO_UDBG) += usbgecko_udbg.o
obj-$(CONFIG_GAMECUBE_COMMON) += flipper-pic.o
diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
deleted file mode 100644
index d455f08bea53..000000000000
--- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Board setup routines for the Motorola PrPMC2800
- *
- * Author: Dale Farnsworth <dale@farnsworth.org>
- *
- * 2007 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/seq_file.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/time.h>
-
-#include <mm/mmu_decl.h>
-
-#include <sysdev/mv64x60.h>
-
-#define MV64x60_MPP_CNTL_0 0x0000
-#define MV64x60_MPP_CNTL_2 0x0008
-
-#define MV64x60_GPP_IO_CNTL 0x0000
-#define MV64x60_GPP_LEVEL_CNTL 0x0010
-#define MV64x60_GPP_VALUE_SET 0x0018
-
-#define PLATFORM_NAME_MAX 32
-
-static char prpmc2800_platform_name[PLATFORM_NAME_MAX];
-
-static void __iomem *mv64x60_mpp_reg_base;
-static void __iomem *mv64x60_gpp_reg_base;
-
-static void __init prpmc2800_setup_arch(void)
-{
- struct device_node *np;
- phys_addr_t paddr;
- const unsigned int *reg;
-
- /*
- * ioremap mpp and gpp registers in case they are later
- * needed by prpmc2800_reset_board().
- */
- np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-mpp");
- reg = of_get_property(np, "reg", NULL);
- paddr = of_translate_address(np, reg);
- of_node_put(np);
- mv64x60_mpp_reg_base = ioremap(paddr, reg[1]);
-
- np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp");
- reg = of_get_property(np, "reg", NULL);
- paddr = of_translate_address(np, reg);
- of_node_put(np);
- mv64x60_gpp_reg_base = ioremap(paddr, reg[1]);
-
-#ifdef CONFIG_PCI
- mv64x60_pci_init();
-#endif
-
- printk("Motorola %s\n", prpmc2800_platform_name);
-}
-
-static void prpmc2800_reset_board(void)
-{
- u32 temp;
-
- local_irq_disable();
-
- temp = in_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_0);
- temp &= 0xFFFF0FFF;
- out_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_0, temp);
-
- temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL);
- temp |= 0x00000004;
- out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL, temp);
-
- temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL);
- temp |= 0x00000004;
- out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL, temp);
-
- temp = in_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_2);
- temp &= 0xFFFF0FFF;
- out_le32(mv64x60_mpp_reg_base + MV64x60_MPP_CNTL_2, temp);
-
- temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL);
- temp |= 0x00080000;
- out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_LEVEL_CNTL, temp);
-
- temp = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL);
- temp |= 0x00080000;
- out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_IO_CNTL, temp);
-
- out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_VALUE_SET, 0x00080004);
-}
-
-static void prpmc2800_restart(char *cmd)
-{
- volatile ulong i = 10000000;
-
- prpmc2800_reset_board();
-
- while (i-- > 0);
- panic("restart failed\n");
-}
-
-#ifdef CONFIG_NOT_COHERENT_CACHE
-#define PPRPM2800_COHERENCY_SETTING "off"
-#else
-#define PPRPM2800_COHERENCY_SETTING "on"
-#endif
-
-void prpmc2800_show_cpuinfo(struct seq_file *m)
-{
- seq_printf(m, "Vendor\t\t: Motorola\n");
- seq_printf(m, "coherency\t: %s\n", PPRPM2800_COHERENCY_SETTING);
-}
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init prpmc2800_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
- unsigned long len = PLATFORM_NAME_MAX;
- void *m;
-
- if (!of_flat_dt_is_compatible(root, "motorola,PrPMC2800"))
- return 0;
-
- /* Update ppc_md.name with name from dt */
- m = of_get_flat_dt_prop(root, "model", &len);
- if (m)
- strncpy(prpmc2800_platform_name, m,
- min((int)len, PLATFORM_NAME_MAX - 1));
-
- _set_L2CR(_get_L2CR() | L2CR_L2E);
- return 1;
-}
-
-define_machine(prpmc2800){
- .name = prpmc2800_platform_name,
- .probe = prpmc2800_probe,
- .setup_arch = prpmc2800_setup_arch,
- .init_early = mv64x60_init_early,
- .show_cpuinfo = prpmc2800_show_cpuinfo,
- .init_IRQ = mv64x60_init_irq,
- .get_irq = mv64x60_get_irq,
- .restart = prpmc2800_restart,
- .calibrate_decr = generic_calibrate_decr,
-};
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 8d767fde5a6a..f324ea099503 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -1,6 +1,6 @@
-obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o
+obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o
obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
-obj-y += rng.o
+obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
new file mode 100644
index 000000000000..cd0c1354d404
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-async.c
@@ -0,0 +1,203 @@
+/*
+ * PowerNV OPAL asynchronous completion interfaces
+ *
+ * Copyright 2013 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/gfp.h>
+#include <linux/of.h>
+#include <asm/opal.h>
+
+#define N_ASYNC_COMPLETIONS 64
+
+static DECLARE_BITMAP(opal_async_complete_map, N_ASYNC_COMPLETIONS) = {~0UL};
+static DECLARE_BITMAP(opal_async_token_map, N_ASYNC_COMPLETIONS);
+static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait);
+static DEFINE_SPINLOCK(opal_async_comp_lock);
+static struct semaphore opal_async_sem;
+static struct opal_msg *opal_async_responses;
+static unsigned int opal_max_async_tokens;
+
+int __opal_async_get_token(void)
+{
+ unsigned long flags;
+ int token;
+
+ spin_lock_irqsave(&opal_async_comp_lock, flags);
+ token = find_first_bit(opal_async_complete_map, opal_max_async_tokens);
+ if (token >= opal_max_async_tokens) {
+ token = -EBUSY;
+ goto out;
+ }
+
+ if (__test_and_set_bit(token, opal_async_token_map)) {
+ token = -EBUSY;
+ goto out;
+ }
+
+ __clear_bit(token, opal_async_complete_map);
+
+out:
+ spin_unlock_irqrestore(&opal_async_comp_lock, flags);
+ return token;
+}
+
+int opal_async_get_token_interruptible(void)
+{
+ int token;
+
+ /* Wait until a token is available */
+ if (down_interruptible(&opal_async_sem))
+ return -ERESTARTSYS;
+
+ token = __opal_async_get_token();
+ if (token < 0)
+ up(&opal_async_sem);
+
+ return token;
+}
+
+int __opal_async_release_token(int token)
+{
+ unsigned long flags;
+
+ if (token < 0 || token >= opal_max_async_tokens) {
+ pr_err("%s: Passed token is out of range, token %d\n",
+ __func__, token);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&opal_async_comp_lock, flags);
+ __set_bit(token, opal_async_complete_map);
+ __clear_bit(token, opal_async_token_map);
+ spin_unlock_irqrestore(&opal_async_comp_lock, flags);
+
+ return 0;
+}
+
+int opal_async_release_token(int token)
+{
+ int ret;
+
+ ret = __opal_async_release_token(token);
+ if (ret)
+ return ret;
+
+ up(&opal_async_sem);
+
+ return 0;
+}
+
+int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
+{
+ if (token >= opal_max_async_tokens) {
+ pr_err("%s: Invalid token passed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!msg) {
+ pr_err("%s: Invalid message pointer passed\n", __func__);
+ return -EINVAL;
+ }
+
+ wait_event(opal_async_wait, test_bit(token, opal_async_complete_map));
+ memcpy(msg, &opal_async_responses[token], sizeof(*msg));
+
+ return 0;
+}
+
+static int opal_async_comp_event(struct notifier_block *nb,
+ unsigned long msg_type, void *msg)
+{
+ struct opal_msg *comp_msg = msg;
+ unsigned long flags;
+
+ if (msg_type != OPAL_MSG_ASYNC_COMP)
+ return 0;
+
+ memcpy(&opal_async_responses[comp_msg->params[0]], comp_msg,
+ sizeof(*comp_msg));
+ spin_lock_irqsave(&opal_async_comp_lock, flags);
+ __set_bit(comp_msg->params[0], opal_async_complete_map);
+ spin_unlock_irqrestore(&opal_async_comp_lock, flags);
+
+ wake_up(&opal_async_wait);
+
+ return 0;
+}
+
+static struct notifier_block opal_async_comp_nb = {
+ .notifier_call = opal_async_comp_event,
+ .next = NULL,
+ .priority = 0,
+};
+
+static int __init opal_async_comp_init(void)
+{
+ struct device_node *opal_node;
+ const __be32 *async;
+ int err;
+
+ opal_node = of_find_node_by_path("/ibm,opal");
+ if (!opal_node) {
+ pr_err("%s: Opal node not found\n", __func__);
+ err = -ENOENT;
+ goto out;
+ }
+
+ async = of_get_property(opal_node, "opal-msg-async-num", NULL);
+ if (!async) {
+ pr_err("%s: %s has no opal-msg-async-num\n",
+ __func__, opal_node->full_name);
+ err = -ENOENT;
+ goto out_opal_node;
+ }
+
+ opal_max_async_tokens = be32_to_cpup(async);
+ if (opal_max_async_tokens > N_ASYNC_COMPLETIONS)
+ opal_max_async_tokens = N_ASYNC_COMPLETIONS;
+
+ err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP,
+ &opal_async_comp_nb);
+ if (err) {
+ pr_err("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, err);
+ goto out_opal_node;
+ }
+
+ opal_async_responses = kzalloc(
+ sizeof(*opal_async_responses) * opal_max_async_tokens,
+ GFP_KERNEL);
+ if (!opal_async_responses) {
+ pr_err("%s: Out of memory, failed to do asynchronous "
+ "completion init\n", __func__);
+ err = -ENOMEM;
+ goto out_opal_node;
+ }
+
+ /* Initialize to 1 less than the maximum tokens available, as we may
+ * require to pop one during emergency through synchronous call to
+ * __opal_async_get_token()
+ */
+ sema_init(&opal_async_sem, opal_max_async_tokens - 1);
+
+out_opal_node:
+ of_node_put(opal_node);
+out:
+ return err;
+}
+subsys_initcall(opal_async_comp_init);
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
new file mode 100644
index 000000000000..0c767c561dc9
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -0,0 +1,525 @@
+/*
+ * PowerNV OPAL Dump Interface
+ *
+ * Copyright 2013,2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kobject.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/delay.h>
+
+#include <asm/opal.h>
+
+#define DUMP_TYPE_FSP 0x01
+
+struct dump_obj {
+ struct kobject kobj;
+ struct bin_attribute dump_attr;
+ uint32_t id; /* becomes object name */
+ uint32_t type;
+ uint32_t size;
+ char *buffer;
+};
+#define to_dump_obj(x) container_of(x, struct dump_obj, kobj)
+
+struct dump_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct dump_obj *dump, struct dump_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct dump_obj *dump, struct dump_attribute *attr,
+ const char *buf, size_t count);
+};
+#define to_dump_attr(x) container_of(x, struct dump_attribute, attr)
+
+static ssize_t dump_id_show(struct dump_obj *dump_obj,
+ struct dump_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "0x%x\n", dump_obj->id);
+}
+
+static const char* dump_type_to_string(uint32_t type)
+{
+ switch (type) {
+ case 0x01: return "SP Dump";
+ case 0x02: return "System/Platform Dump";
+ case 0x03: return "SMA Dump";
+ default: return "unknown";
+ }
+}
+
+static ssize_t dump_type_show(struct dump_obj *dump_obj,
+ struct dump_attribute *attr,
+ char *buf)
+{
+
+ return sprintf(buf, "0x%x %s\n", dump_obj->type,
+ dump_type_to_string(dump_obj->type));
+}
+
+static ssize_t dump_ack_show(struct dump_obj *dump_obj,
+ struct dump_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "ack - acknowledge dump\n");
+}
+
+/*
+ * Send acknowledgement to OPAL
+ */
+static int64_t dump_send_ack(uint32_t dump_id)
+{
+ int rc;
+
+ rc = opal_dump_ack(dump_id);
+ if (rc)
+ pr_warn("%s: Failed to send ack to Dump ID 0x%x (%d)\n",
+ __func__, dump_id, rc);
+ return rc;
+}
+
+static void delay_release_kobj(void *kobj)
+{
+ kobject_put((struct kobject *)kobj);
+}
+
+static ssize_t dump_ack_store(struct dump_obj *dump_obj,
+ struct dump_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ dump_send_ack(dump_obj->id);
+ sysfs_schedule_callback(&dump_obj->kobj, delay_release_kobj,
+ &dump_obj->kobj, THIS_MODULE);
+ return count;
+}
+
+/* Attributes of a dump
+ * The binary attribute of the dump itself is dynamic
+ * due to the dynamic size of the dump
+ */
+static struct dump_attribute id_attribute =
+ __ATTR(id, 0666, dump_id_show, NULL);
+static struct dump_attribute type_attribute =
+ __ATTR(type, 0666, dump_type_show, NULL);
+static struct dump_attribute ack_attribute =
+ __ATTR(acknowledge, 0660, dump_ack_show, dump_ack_store);
+
+static ssize_t init_dump_show(struct dump_obj *dump_obj,
+ struct dump_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "1 - initiate dump\n");
+}
+
+static int64_t dump_fips_init(uint8_t type)
+{
+ int rc;
+
+ rc = opal_dump_init(type);
+ if (rc)
+ pr_warn("%s: Failed to initiate FipS dump (%d)\n",
+ __func__, rc);
+ return rc;
+}
+
+static ssize_t init_dump_store(struct dump_obj *dump_obj,
+ struct dump_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ dump_fips_init(DUMP_TYPE_FSP);
+ pr_info("%s: Initiated FSP dump\n", __func__);
+ return count;
+}
+
+static struct dump_attribute initiate_attribute =
+ __ATTR(initiate_dump, 0600, init_dump_show, init_dump_store);
+
+static struct attribute *initiate_attrs[] = {
+ &initiate_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group initiate_attr_group = {
+ .attrs = initiate_attrs,
+};
+
+static struct kset *dump_kset;
+
+static ssize_t dump_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct dump_attribute *attribute;
+ struct dump_obj *dump;
+
+ attribute = to_dump_attr(attr);
+ dump = to_dump_obj(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(dump, attribute, buf);
+}
+
+static ssize_t dump_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct dump_attribute *attribute;
+ struct dump_obj *dump;
+
+ attribute = to_dump_attr(attr);
+ dump = to_dump_obj(kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(dump, attribute, buf, len);
+}
+
+static const struct sysfs_ops dump_sysfs_ops = {
+ .show = dump_attr_show,
+ .store = dump_attr_store,
+};
+
+static void dump_release(struct kobject *kobj)
+{
+ struct dump_obj *dump;
+
+ dump = to_dump_obj(kobj);
+ vfree(dump->buffer);
+ kfree(dump);
+}
+
+static struct attribute *dump_default_attrs[] = {
+ &id_attribute.attr,
+ &type_attribute.attr,
+ &ack_attribute.attr,
+ NULL,
+};
+
+static struct kobj_type dump_ktype = {
+ .sysfs_ops = &dump_sysfs_ops,
+ .release = &dump_release,
+ .default_attrs = dump_default_attrs,
+};
+
+static void free_dump_sg_list(struct opal_sg_list *list)
+{
+ struct opal_sg_list *sg1;
+ while (list) {
+ sg1 = list->next;
+ kfree(list);
+ list = sg1;
+ }
+ list = NULL;
+}
+
+static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
+{
+ struct opal_sg_list *sg1, *list = NULL;
+ void *addr;
+ int64_t size;
+
+ addr = dump->buffer;
+ size = dump->size;
+
+ sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!sg1)
+ goto nomem;
+
+ list = sg1;
+ sg1->num_entries = 0;
+ while (size > 0) {
+ /* Translate virtual address to physical address */
+ sg1->entry[sg1->num_entries].data =
+ (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
+
+ if (size > PAGE_SIZE)
+ sg1->entry[sg1->num_entries].length = PAGE_SIZE;
+ else
+ sg1->entry[sg1->num_entries].length = size;
+
+ sg1->num_entries++;
+ if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
+ sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!sg1->next)
+ goto nomem;
+
+ sg1 = sg1->next;
+ sg1->num_entries = 0;
+ }
+ addr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ return list;
+
+nomem:
+ pr_err("%s : Failed to allocate memory\n", __func__);
+ free_dump_sg_list(list);
+ return NULL;
+}
+
+static void sglist_to_phy_addr(struct opal_sg_list *list)
+{
+ struct opal_sg_list *sg, *next;
+
+ for (sg = list; sg; sg = next) {
+ next = sg->next;
+ /* Don't translate NULL pointer for last entry */
+ if (sg->next)
+ sg->next = (struct opal_sg_list *)__pa(sg->next);
+ else
+ sg->next = NULL;
+
+ /* Convert num_entries to length */
+ sg->num_entries =
+ sg->num_entries * sizeof(struct opal_sg_entry) + 16;
+ }
+}
+
+static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
+{
+ int rc;
+ *type = 0xffffffff;
+
+ rc = opal_dump_info2(id, size, type);
+
+ if (rc == OPAL_PARAMETER)
+ rc = opal_dump_info(id, size);
+
+ if (rc)
+ pr_warn("%s: Failed to get dump info (%d)\n",
+ __func__, rc);
+ return rc;
+}
+
+static int64_t dump_read_data(struct dump_obj *dump)
+{
+ struct opal_sg_list *list;
+ uint64_t addr;
+ int64_t rc;
+
+ /* Allocate memory */
+ dump->buffer = vzalloc(PAGE_ALIGN(dump->size));
+ if (!dump->buffer) {
+ pr_err("%s : Failed to allocate memory\n", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Generate SG list */
+ list = dump_data_to_sglist(dump);
+ if (!list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Translate sg list addr to real address */
+ sglist_to_phy_addr(list);
+
+ /* First entry address */
+ addr = __pa(list);
+
+ /* Fetch data */
+ rc = OPAL_BUSY_EVENT;
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_dump_read(dump->id, addr);
+ if (rc == OPAL_BUSY_EVENT) {
+ opal_poll_events(NULL);
+ msleep(20);
+ }
+ }
+
+ if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL)
+ pr_warn("%s: Extract dump failed for ID 0x%x\n",
+ __func__, dump->id);
+
+ /* Free SG list */
+ free_dump_sg_list(list);
+
+out:
+ return rc;
+}
+
+static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ ssize_t rc;
+
+ struct dump_obj *dump = to_dump_obj(kobj);
+
+ if (!dump->buffer) {
+ rc = dump_read_data(dump);
+
+ if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) {
+ vfree(dump->buffer);
+ dump->buffer = NULL;
+
+ return -EIO;
+ }
+ if (rc == OPAL_PARTIAL) {
+ /* On a partial read, we just return EIO
+ * and rely on userspace to ask us to try
+ * again.
+ */
+ pr_info("%s: Platform dump partially read.ID = 0x%x\n",
+ __func__, dump->id);
+ return -EIO;
+ }
+ }
+
+ memcpy(buffer, dump->buffer + pos, count);
+
+ /* You may think we could free the dump buffer now and retrieve
+ * it again later if needed, but due to current firmware limitation,
+ * that's not the case. So, once read into userspace once,
+ * we keep the dump around until it's acknowledged by userspace.
+ */
+
+ return count;
+}
+
+static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
+ uint32_t type)
+{
+ struct dump_obj *dump;
+ int rc;
+
+ dump = kzalloc(sizeof(*dump), GFP_KERNEL);
+ if (!dump)
+ return NULL;
+
+ dump->kobj.kset = dump_kset;
+
+ kobject_init(&dump->kobj, &dump_ktype);
+
+ sysfs_bin_attr_init(&dump->dump_attr);
+
+ dump->dump_attr.attr.name = "dump";
+ dump->dump_attr.attr.mode = 0400;
+ dump->dump_attr.size = size;
+ dump->dump_attr.read = dump_attr_read;
+
+ dump->id = id;
+ dump->size = size;
+ dump->type = type;
+
+ rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
+ if (rc) {
+ kobject_put(&dump->kobj);
+ return NULL;
+ }
+
+ rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
+ if (rc) {
+ kobject_put(&dump->kobj);
+ return NULL;
+ }
+
+ pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
+ __func__, dump->id, dump->size);
+
+ kobject_uevent(&dump->kobj, KOBJ_ADD);
+
+ return dump;
+}
+
+static int process_dump(void)
+{
+ int rc;
+ uint32_t dump_id, dump_size, dump_type;
+ struct dump_obj *dump;
+ char name[22];
+
+ rc = dump_read_info(&dump_id, &dump_size, &dump_type);
+ if (rc != OPAL_SUCCESS)
+ return rc;
+
+ sprintf(name, "0x%x-0x%x", dump_type, dump_id);
+
+ /* we may get notified twice, let's handle
+ * that gracefully and not create two conflicting
+ * entries.
+ */
+ if (kset_find_obj(dump_kset, name))
+ return 0;
+
+ dump = create_dump_obj(dump_id, dump_size, dump_type);
+ if (!dump)
+ return -1;
+
+ return 0;
+}
+
+static void dump_work_fn(struct work_struct *work)
+{
+ process_dump();
+}
+
+static DECLARE_WORK(dump_work, dump_work_fn);
+
+static void schedule_process_dump(void)
+{
+ schedule_work(&dump_work);
+}
+
+/*
+ * New dump available notification
+ *
+ * Once we get notification, we add sysfs entries for it.
+ * We only fetch the dump on demand, and create sysfs asynchronously.
+ */
+static int dump_event(struct notifier_block *nb,
+ unsigned long events, void *change)
+{
+ if (events & OPAL_EVENT_DUMP_AVAIL)
+ schedule_process_dump();
+
+ return 0;
+}
+
+static struct notifier_block dump_nb = {
+ .notifier_call = dump_event,
+ .next = NULL,
+ .priority = 0
+};
+
+void __init opal_platform_dump_init(void)
+{
+ int rc;
+
+ dump_kset = kset_create_and_add("dump", NULL, opal_kobj);
+ if (!dump_kset) {
+ pr_warn("%s: Failed to create dump kset\n", __func__);
+ return;
+ }
+
+ rc = sysfs_create_group(&dump_kset->kobj, &initiate_attr_group);
+ if (rc) {
+ pr_warn("%s: Failed to create initiate dump attr group\n",
+ __func__);
+ kobject_put(&dump_kset->kobj);
+ return;
+ }
+
+ rc = opal_notifier_register(&dump_nb);
+ if (rc) {
+ pr_warn("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, rc);
+ return;
+ }
+
+ opal_dump_resend_notification();
+}
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
new file mode 100644
index 000000000000..1d7355bc9db0
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -0,0 +1,313 @@
+/*
+ * Error log support on PowerNV.
+ *
+ * Copyright 2013,2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/fcntl.h>
+#include <linux/kobject.h>
+#include <asm/uaccess.h>
+#include <asm/opal.h>
+
+struct elog_obj {
+ struct kobject kobj;
+ struct bin_attribute raw_attr;
+ uint64_t id;
+ uint64_t type;
+ size_t size;
+ char *buffer;
+};
+#define to_elog_obj(x) container_of(x, struct elog_obj, kobj)
+
+struct elog_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct elog_obj *elog, struct elog_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct elog_obj *elog, struct elog_attribute *attr,
+ const char *buf, size_t count);
+};
+#define to_elog_attr(x) container_of(x, struct elog_attribute, attr)
+
+static ssize_t elog_id_show(struct elog_obj *elog_obj,
+ struct elog_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "0x%llx\n", elog_obj->id);
+}
+
+static const char *elog_type_to_string(uint64_t type)
+{
+ switch (type) {
+ case 0: return "PEL";
+ default: return "unknown";
+ }
+}
+
+static ssize_t elog_type_show(struct elog_obj *elog_obj,
+ struct elog_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "0x%llx %s\n",
+ elog_obj->type,
+ elog_type_to_string(elog_obj->type));
+}
+
+static ssize_t elog_ack_show(struct elog_obj *elog_obj,
+ struct elog_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "ack - acknowledge log message\n");
+}
+
+static void delay_release_kobj(void *kobj)
+{
+ kobject_put((struct kobject *)kobj);
+}
+
+static ssize_t elog_ack_store(struct elog_obj *elog_obj,
+ struct elog_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ opal_send_ack_elog(elog_obj->id);
+ sysfs_schedule_callback(&elog_obj->kobj, delay_release_kobj,
+ &elog_obj->kobj, THIS_MODULE);
+ return count;
+}
+
+static struct elog_attribute id_attribute =
+ __ATTR(id, 0666, elog_id_show, NULL);
+static struct elog_attribute type_attribute =
+ __ATTR(type, 0666, elog_type_show, NULL);
+static struct elog_attribute ack_attribute =
+ __ATTR(acknowledge, 0660, elog_ack_show, elog_ack_store);
+
+static struct kset *elog_kset;
+
+static ssize_t elog_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct elog_attribute *attribute;
+ struct elog_obj *elog;
+
+ attribute = to_elog_attr(attr);
+ elog = to_elog_obj(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(elog, attribute, buf);
+}
+
+static ssize_t elog_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct elog_attribute *attribute;
+ struct elog_obj *elog;
+
+ attribute = to_elog_attr(attr);
+ elog = to_elog_obj(kobj);
+
+ if (!attribute->store)
+ return -EIO;
+
+ return attribute->store(elog, attribute, buf, len);
+}
+
+static const struct sysfs_ops elog_sysfs_ops = {
+ .show = elog_attr_show,
+ .store = elog_attr_store,
+};
+
+static void elog_release(struct kobject *kobj)
+{
+ struct elog_obj *elog;
+
+ elog = to_elog_obj(kobj);
+ kfree(elog->buffer);
+ kfree(elog);
+}
+
+static struct attribute *elog_default_attrs[] = {
+ &id_attribute.attr,
+ &type_attribute.attr,
+ &ack_attribute.attr,
+ NULL,
+};
+
+static struct kobj_type elog_ktype = {
+ .sysfs_ops = &elog_sysfs_ops,
+ .release = &elog_release,
+ .default_attrs = elog_default_attrs,
+};
+
+/* Maximum size of a single log on FSP is 16KB */
+#define OPAL_MAX_ERRLOG_SIZE 16384
+
+static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ int opal_rc;
+
+ struct elog_obj *elog = to_elog_obj(kobj);
+
+ /* We may have had an error reading before, so let's retry */
+ if (!elog->buffer) {
+ elog->buffer = kzalloc(elog->size, GFP_KERNEL);
+ if (!elog->buffer)
+ return -EIO;
+
+ opal_rc = opal_read_elog(__pa(elog->buffer),
+ elog->size, elog->id);
+ if (opal_rc != OPAL_SUCCESS) {
+ pr_err("ELOG: log read failed for log-id=%llx\n",
+ elog->id);
+ kfree(elog->buffer);
+ elog->buffer = NULL;
+ return -EIO;
+ }
+ }
+
+ memcpy(buffer, elog->buffer + pos, count);
+
+ return count;
+}
+
+static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
+{
+ struct elog_obj *elog;
+ int rc;
+
+ elog = kzalloc(sizeof(*elog), GFP_KERNEL);
+ if (!elog)
+ return NULL;
+
+ elog->kobj.kset = elog_kset;
+
+ kobject_init(&elog->kobj, &elog_ktype);
+
+ sysfs_bin_attr_init(&elog->raw_attr);
+
+ elog->raw_attr.attr.name = "raw";
+ elog->raw_attr.attr.mode = 0400;
+ elog->raw_attr.size = size;
+ elog->raw_attr.read = raw_attr_read;
+
+ elog->id = id;
+ elog->size = size;
+ elog->type = type;
+
+ elog->buffer = kzalloc(elog->size, GFP_KERNEL);
+
+ if (elog->buffer) {
+ rc = opal_read_elog(__pa(elog->buffer),
+ elog->size, elog->id);
+ if (rc != OPAL_SUCCESS) {
+ pr_err("ELOG: log read failed for log-id=%llx\n",
+ elog->id);
+ kfree(elog->buffer);
+ elog->buffer = NULL;
+ }
+ }
+
+ rc = kobject_add(&elog->kobj, NULL, "0x%llx", id);
+ if (rc) {
+ kobject_put(&elog->kobj);
+ return NULL;
+ }
+
+ rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr);
+ if (rc) {
+ kobject_put(&elog->kobj);
+ return NULL;
+ }
+
+ kobject_uevent(&elog->kobj, KOBJ_ADD);
+
+ return elog;
+}
+
+static void elog_work_fn(struct work_struct *work)
+{
+ size_t elog_size;
+ uint64_t log_id;
+ uint64_t elog_type;
+ int rc;
+ char name[2+16+1];
+
+ rc = opal_get_elog_size(&log_id, &elog_size, &elog_type);
+ if (rc != OPAL_SUCCESS) {
+ pr_err("ELOG: Opal log read failed\n");
+ return;
+ }
+
+ BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
+
+ if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
+ elog_size = OPAL_MAX_ERRLOG_SIZE;
+
+ sprintf(name, "0x%llx", log_id);
+
+ /* we may get notified twice, let's handle
+ * that gracefully and not create two conflicting
+ * entries.
+ */
+ if (kset_find_obj(elog_kset, name))
+ return;
+
+ create_elog_obj(log_id, elog_size, elog_type);
+}
+
+static DECLARE_WORK(elog_work, elog_work_fn);
+
+static int elog_event(struct notifier_block *nb,
+ unsigned long events, void *change)
+{
+ /* check for error log event */
+ if (events & OPAL_EVENT_ERROR_LOG_AVAIL)
+ schedule_work(&elog_work);
+ return 0;
+}
+
+static struct notifier_block elog_nb = {
+ .notifier_call = elog_event,
+ .next = NULL,
+ .priority = 0
+};
+
+int __init opal_elog_init(void)
+{
+ int rc = 0;
+
+ elog_kset = kset_create_and_add("elog", NULL, opal_kobj);
+ if (!elog_kset) {
+ pr_warn("%s: failed to create elog kset\n", __func__);
+ return -1;
+ }
+
+ rc = opal_notifier_register(&elog_nb);
+ if (rc) {
+ pr_err("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, rc);
+ return rc;
+ }
+
+ /* We are now ready to pull error logs from opal. */
+ opal_resend_pending_logs();
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c
new file mode 100644
index 000000000000..663cc9c65613
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-sensor.c
@@ -0,0 +1,64 @@
+/*
+ * PowerNV sensor code
+ *
+ * Copyright (C) 2013 IBM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <asm/opal.h>
+
+static DEFINE_MUTEX(opal_sensor_mutex);
+
+/*
+ * This will return sensor information to driver based on the requested sensor
+ * handle. A handle is an opaque id for the powernv, read by the driver from the
+ * device tree..
+ */
+int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
+{
+ int ret, token;
+ struct opal_msg msg;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ pr_err("%s: Couldn't get the token, returning\n", __func__);
+ ret = token;
+ goto out;
+ }
+
+ mutex_lock(&opal_sensor_mutex);
+ ret = opal_sensor_read(sensor_hndl, token, sensor_data);
+ if (ret != OPAL_ASYNC_COMPLETION)
+ goto out_token;
+
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_err("%s: Failed to wait for the async response, %d\n",
+ __func__, ret);
+ goto out_token;
+ }
+
+ ret = msg.params[1];
+
+out_token:
+ mutex_unlock(&opal_sensor_mutex);
+ opal_async_release_token(token);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(opal_get_sensor_data);
diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c
new file mode 100644
index 000000000000..0bd249a26f30
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-sysparam.c
@@ -0,0 +1,290 @@
+/*
+ * PowerNV system parameter code
+ *
+ * Copyright (C) 2013 IBM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/gfp.h>
+#include <linux/stat.h>
+#include <asm/opal.h>
+
+#define MAX_PARAM_DATA_LEN 64
+
+static DEFINE_MUTEX(opal_sysparam_mutex);
+static struct kobject *sysparam_kobj;
+static void *param_data_buf;
+
+struct param_attr {
+ struct list_head list;
+ u32 param_id;
+ u32 param_size;
+ struct kobj_attribute kobj_attr;
+};
+
+static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
+{
+ struct opal_msg msg;
+ int ret, token;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ pr_err("%s: Couldn't get the token, returning\n",
+ __func__);
+ ret = token;
+ goto out;
+ }
+
+ ret = opal_get_param(token, param_id, (u64)buffer, length);
+ if (ret != OPAL_ASYNC_COMPLETION)
+ goto out_token;
+
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_err("%s: Failed to wait for the async response, %d\n",
+ __func__, ret);
+ goto out_token;
+ }
+
+ ret = msg.params[1];
+
+out_token:
+ opal_async_release_token(token);
+out:
+ return ret;
+}
+
+static int opal_set_sys_param(u32 param_id, u32 length, void *buffer)
+{
+ struct opal_msg msg;
+ int ret, token;
+
+ token = opal_async_get_token_interruptible();
+ if (token < 0) {
+ if (token != -ERESTARTSYS)
+ pr_err("%s: Couldn't get the token, returning\n",
+ __func__);
+ ret = token;
+ goto out;
+ }
+
+ ret = opal_set_param(token, param_id, (u64)buffer, length);
+
+ if (ret != OPAL_ASYNC_COMPLETION)
+ goto out_token;
+
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_err("%s: Failed to wait for the async response, %d\n",
+ __func__, ret);
+ goto out_token;
+ }
+
+ ret = msg.params[1];
+
+out_token:
+ opal_async_release_token(token);
+out:
+ return ret;
+}
+
+static ssize_t sys_param_show(struct kobject *kobj,
+ struct kobj_attribute *kobj_attr, char *buf)
+{
+ struct param_attr *attr = container_of(kobj_attr, struct param_attr,
+ kobj_attr);
+ int ret;
+
+ mutex_lock(&opal_sysparam_mutex);
+ ret = opal_get_sys_param(attr->param_id, attr->param_size,
+ param_data_buf);
+ if (ret)
+ goto out;
+
+ memcpy(buf, param_data_buf, attr->param_size);
+
+out:
+ mutex_unlock(&opal_sysparam_mutex);
+ return ret ? ret : attr->param_size;
+}
+
+static ssize_t sys_param_store(struct kobject *kobj,
+ struct kobj_attribute *kobj_attr, const char *buf, size_t count)
+{
+ struct param_attr *attr = container_of(kobj_attr, struct param_attr,
+ kobj_attr);
+ int ret;
+
+ mutex_lock(&opal_sysparam_mutex);
+ memcpy(param_data_buf, buf, count);
+ ret = opal_set_sys_param(attr->param_id, attr->param_size,
+ param_data_buf);
+ mutex_unlock(&opal_sysparam_mutex);
+ return ret ? ret : count;
+}
+
+void __init opal_sys_param_init(void)
+{
+ struct device_node *sysparam;
+ struct param_attr *attr;
+ u32 *id, *size;
+ int count, i;
+ u8 *perm;
+
+ if (!opal_kobj) {
+ pr_warn("SYSPARAM: opal kobject is not available\n");
+ goto out;
+ }
+
+ sysparam_kobj = kobject_create_and_add("sysparams", opal_kobj);
+ if (!sysparam_kobj) {
+ pr_err("SYSPARAM: Failed to create sysparam kobject\n");
+ goto out;
+ }
+
+ /* Allocate big enough buffer for any get/set transactions */
+ param_data_buf = kzalloc(MAX_PARAM_DATA_LEN, GFP_KERNEL);
+ if (!param_data_buf) {
+ pr_err("SYSPARAM: Failed to allocate memory for param data "
+ "buf\n");
+ goto out_kobj_put;
+ }
+
+ sysparam = of_find_node_by_path("/ibm,opal/sysparams");
+ if (!sysparam) {
+ pr_err("SYSPARAM: Opal sysparam node not found\n");
+ goto out_param_buf;
+ }
+
+ if (!of_device_is_compatible(sysparam, "ibm,opal-sysparams")) {
+ pr_err("SYSPARAM: Opal sysparam node not compatible\n");
+ goto out_node_put;
+ }
+
+ /* Number of parameters exposed through DT */
+ count = of_property_count_strings(sysparam, "param-name");
+ if (count < 0) {
+ pr_err("SYSPARAM: No string found of property param-name in "
+ "the node %s\n", sysparam->name);
+ goto out_node_put;
+ }
+
+ id = kzalloc(sizeof(*id) * count, GFP_KERNEL);
+ if (!id) {
+ pr_err("SYSPARAM: Failed to allocate memory to read parameter "
+ "id\n");
+ goto out_node_put;
+ }
+
+ size = kzalloc(sizeof(*size) * count, GFP_KERNEL);
+ if (!size) {
+ pr_err("SYSPARAM: Failed to allocate memory to read parameter "
+ "size\n");
+ goto out_free_id;
+ }
+
+ perm = kzalloc(sizeof(*perm) * count, GFP_KERNEL);
+ if (!perm) {
+ pr_err("SYSPARAM: Failed to allocate memory to read supported "
+ "action on the parameter");
+ goto out_free_size;
+ }
+
+ if (of_property_read_u32_array(sysparam, "param-id", id, count)) {
+ pr_err("SYSPARAM: Missing property param-id in the DT\n");
+ goto out_free_perm;
+ }
+
+ if (of_property_read_u32_array(sysparam, "param-len", size, count)) {
+ pr_err("SYSPARAM: Missing propery param-len in the DT\n");
+ goto out_free_perm;
+ }
+
+
+ if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) {
+ pr_err("SYSPARAM: Missing propery param-perm in the DT\n");
+ goto out_free_perm;
+ }
+
+ attr = kzalloc(sizeof(*attr) * count, GFP_KERNEL);
+ if (!attr) {
+ pr_err("SYSPARAM: Failed to allocate memory for parameter "
+ "attributes\n");
+ goto out_free_perm;
+ }
+
+ /* For each of the parameters, populate the parameter attributes */
+ for (i = 0; i < count; i++) {
+ sysfs_attr_init(&attr[i].kobj_attr.attr);
+ attr[i].param_id = id[i];
+ attr[i].param_size = size[i];
+ if (of_property_read_string_index(sysparam, "param-name", i,
+ &attr[i].kobj_attr.attr.name))
+ continue;
+
+ /* If the parameter is read-only or read-write */
+ switch (perm[i] & 3) {
+ case OPAL_SYSPARAM_READ:
+ attr[i].kobj_attr.attr.mode = S_IRUGO;
+ break;
+ case OPAL_SYSPARAM_WRITE:
+ attr[i].kobj_attr.attr.mode = S_IWUGO;
+ break;
+ case OPAL_SYSPARAM_RW:
+ attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUGO;
+ break;
+ default:
+ break;
+ }
+
+ attr[i].kobj_attr.show = sys_param_show;
+ attr[i].kobj_attr.store = sys_param_store;
+
+ if (sysfs_create_file(sysparam_kobj, &attr[i].kobj_attr.attr)) {
+ pr_err("SYSPARAM: Failed to create sysfs file %s\n",
+ attr[i].kobj_attr.attr.name);
+ goto out_free_attr;
+ }
+ }
+
+ kfree(perm);
+ kfree(size);
+ kfree(id);
+ of_node_put(sysparam);
+ return;
+
+out_free_attr:
+ kfree(attr);
+out_free_perm:
+ kfree(perm);
+out_free_size:
+ kfree(size);
+out_free_id:
+ kfree(id);
+out_node_put:
+ of_node_put(sysparam);
+out_param_buf:
+ kfree(param_data_buf);
+out_kobj_put:
+ kobject_put(sysparam_kobj);
+out:
+ return;
+}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 3e8829c40fbb..75c89df8d71e 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -123,9 +123,23 @@ OPAL_CALL(opal_xscom_write, OPAL_XSCOM_WRITE);
OPAL_CALL(opal_lpc_read, OPAL_LPC_READ);
OPAL_CALL(opal_lpc_write, OPAL_LPC_WRITE);
OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU);
+OPAL_CALL(opal_read_elog, OPAL_ELOG_READ);
+OPAL_CALL(opal_send_ack_elog, OPAL_ELOG_ACK);
+OPAL_CALL(opal_get_elog_size, OPAL_ELOG_SIZE);
+OPAL_CALL(opal_resend_pending_logs, OPAL_ELOG_RESEND);
+OPAL_CALL(opal_write_elog, OPAL_ELOG_WRITE);
OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE);
OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE);
OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE);
+OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT);
+OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO);
+OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2);
+OPAL_CALL(opal_dump_read, OPAL_DUMP_READ);
+OPAL_CALL(opal_dump_ack, OPAL_DUMP_ACK);
OPAL_CALL(opal_get_msg, OPAL_GET_MSG);
OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION);
+OPAL_CALL(opal_dump_resend_notification, OPAL_DUMP_RESEND);
OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT);
+OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ);
+OPAL_CALL(opal_get_param, OPAL_GET_PARAM);
+OPAL_CALL(opal_set_param, OPAL_SET_PARAM);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 65499adaecff..e92f2f67640f 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -21,6 +21,7 @@
#include <linux/sched.h>
#include <linux/kobject.h>
#include <linux/delay.h>
+#include <linux/memblock.h>
#include <asm/opal.h>
#include <asm/firmware.h>
#include <asm/mce.h>
@@ -33,8 +34,18 @@ struct kobject *opal_kobj;
struct opal {
u64 base;
u64 entry;
+ u64 size;
} opal;
+struct mcheck_recoverable_range {
+ u64 start_addr;
+ u64 end_addr;
+ u64 recover_addr;
+};
+
+static struct mcheck_recoverable_range *mc_recoverable_range;
+static int mc_recoverable_range_len;
+
static struct device_node *opal_node;
static DEFINE_SPINLOCK(opal_write_lock);
extern u64 opal_mc_secondary_handler[];
@@ -49,25 +60,29 @@ static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
int __init early_init_dt_scan_opal(unsigned long node,
const char *uname, int depth, void *data)
{
- const void *basep, *entryp;
- unsigned long basesz, entrysz;
+ const void *basep, *entryp, *sizep;
+ unsigned long basesz, entrysz, runtimesz;
if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
return 0;
basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
+ sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
- if (!basep || !entryp)
+ if (!basep || !entryp || !sizep)
return 1;
opal.base = of_read_number(basep, basesz/4);
opal.entry = of_read_number(entryp, entrysz/4);
+ opal.size = of_read_number(sizep, runtimesz/4);
pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n",
opal.base, basep, basesz);
pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n",
opal.entry, entryp, entrysz);
+ pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%ld)\n",
+ opal.size, sizep, runtimesz);
powerpc_firmware_features |= FW_FEATURE_OPAL;
if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
@@ -84,6 +99,53 @@ int __init early_init_dt_scan_opal(unsigned long node,
return 1;
}
+int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ unsigned long i, size;
+ const __be32 *prop;
+
+ if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
+ return 0;
+
+ prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &size);
+
+ if (!prop)
+ return 1;
+
+ pr_debug("Found machine check recoverable ranges.\n");
+
+ /*
+ * Allocate a buffer to hold the MC recoverable ranges. We would be
+ * accessing them in real mode, hence it needs to be within
+ * RMO region.
+ */
+ mc_recoverable_range =__va(memblock_alloc_base(size, __alignof__(u64),
+ ppc64_rma_size));
+ memset(mc_recoverable_range, 0, size);
+
+ /*
+ * Each recoverable address entry is an (start address,len,
+ * recover address) pair, * 2 cells each, totalling 4 cells per entry.
+ */
+ for (i = 0; i < size / (sizeof(*prop) * 5); i++) {
+ mc_recoverable_range[i].start_addr =
+ of_read_number(prop + (i * 5) + 0, 2);
+ mc_recoverable_range[i].end_addr =
+ mc_recoverable_range[i].start_addr +
+ of_read_number(prop + (i * 5) + 2, 1);
+ mc_recoverable_range[i].recover_addr =
+ of_read_number(prop + (i * 5) + 3, 2);
+
+ pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
+ mc_recoverable_range[i].start_addr,
+ mc_recoverable_range[i].end_addr,
+ mc_recoverable_range[i].recover_addr);
+ }
+ mc_recoverable_range_len = i;
+ return 1;
+}
+
static int __init opal_register_exception_handlers(void)
{
#ifdef __BIG_ENDIAN__
@@ -401,6 +463,38 @@ int opal_machine_check(struct pt_regs *regs)
return 0;
}
+static uint64_t find_recovery_address(uint64_t nip)
+{
+ int i;
+
+ for (i = 0; i < mc_recoverable_range_len; i++)
+ if ((nip >= mc_recoverable_range[i].start_addr) &&
+ (nip < mc_recoverable_range[i].end_addr))
+ return mc_recoverable_range[i].recover_addr;
+ return 0;
+}
+
+bool opal_mce_check_early_recovery(struct pt_regs *regs)
+{
+ uint64_t recover_addr = 0;
+
+ if (!opal.base || !opal.size)
+ goto out;
+
+ if ((regs->nip >= opal.base) &&
+ (regs->nip <= (opal.base + opal.size)))
+ recover_addr = find_recovery_address(regs->nip);
+
+ /*
+ * Setup regs->nip to rfi into fixup address.
+ */
+ if (recover_addr)
+ regs->nip = recover_addr;
+
+out:
+ return !!recover_addr;
+}
+
static irqreturn_t opal_interrupt(int irq, void *data)
{
__be64 events;
@@ -472,8 +566,14 @@ static int __init opal_init(void)
/* Create "opal" kobject under /sys/firmware */
rc = opal_sysfs_init();
if (rc == 0) {
+ /* Setup error log interface */
+ rc = opal_elog_init();
/* Setup code update interface */
opal_flash_init();
+ /* Setup platform dump extract interface */
+ opal_platform_dump_init();
+ /* Setup system parameters interface */
+ opal_sys_param_init();
}
return 0;
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 81a7a0a79be7..61cf8fa9c61b 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -187,6 +187,7 @@ static void __init pnv_setup_machdep_opal(void)
ppc_md.power_off = pnv_power_off;
ppc_md.halt = pnv_halt;
ppc_md.machine_check_exception = opal_machine_check;
+ ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery;
}
#ifdef CONFIG_PPC_POWERNV_RTAS
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 80b1d57c306a..2cb8b776c84a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -111,6 +111,18 @@ config CMM
will be reused for other LPARs. The interface allows firmware to
balance memory across many LPARs.
+config HV_PERF_CTRS
+ bool "Hypervisor supplied PMU events (24x7 & GPCI)"
+ default y
+ depends on PERF_EVENTS && PPC_PSERIES
+ help
+ Enable access to hypervisor supplied counters in perf. Currently,
+ this enables code that uses the hcall GetPerfCounterInfo and 24x7
+ interfaces to retrieve counters. GPCI exists on Power 6 and later
+ systems. 24x7 is available on Power 8 systems.
+
+ If unsure, select Y.
+
config DTL
bool "Dispatch Trace Log"
depends on PPC_SPLPAR && DEBUG_FS
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 0ea99e3d4815..9b8e05078a63 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -420,4 +420,4 @@ static int __init pseries_cpu_hotplug_init(void)
return 0;
}
-arch_initcall(pseries_cpu_hotplug_init);
+machine_arch_initcall(pseries, pseries_cpu_hotplug_init);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 9590dbb756f2..573b488fc48b 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -14,6 +14,7 @@
#include <linux/memblock.h>
#include <linux/vmalloc.h>
#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
@@ -75,13 +76,27 @@ unsigned long memory_block_size_bytes(void)
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
+static int pseries_remove_memory(u64 start, u64 size)
{
- unsigned long start, start_pfn;
- struct zone *zone;
int ret;
- unsigned long section;
- unsigned long sections_to_remove;
+
+ /* Remove htab bolted mappings for this section of memory */
+ start = (unsigned long)__va(start);
+ ret = remove_section_mapping(start, start + size);
+
+ /* Ensure all vmalloc mappings are flushed in case they also
+ * hit that section of memory
+ */
+ vm_unmap_aliases();
+
+ return ret;
+}
+
+static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
+{
+ unsigned long block_sz, start_pfn;
+ int sections_per_block;
+ int i, nid;
start_pfn = base >> PAGE_SHIFT;
@@ -90,45 +105,21 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
return 0;
}
- zone = page_zone(pfn_to_page(start_pfn));
+ block_sz = memory_block_size_bytes();
+ sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+ nid = memory_add_physaddr_to_nid(base);
- /*
- * Remove section mappings and sysfs entries for the
- * section of the memory we are removing.
- *
- * NOTE: Ideally, this should be done in generic code like
- * remove_memory(). But remove_memory() gets called by writing
- * to sysfs "state" file and we can't remove sysfs entries
- * while writing to it. So we have to defer it to here.
- */
- sections_to_remove = (memblock_size >> PAGE_SHIFT) / PAGES_PER_SECTION;
- for (section = 0; section < sections_to_remove; section++) {
- unsigned long pfn = start_pfn + section * PAGES_PER_SECTION;
- ret = __remove_pages(zone, pfn, PAGES_PER_SECTION);
- if (ret)
- return ret;
+ for (i = 0; i < sections_per_block; i++) {
+ remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
+ base += MIN_MEMORY_BLOCK_SIZE;
}
- /*
- * Update memory regions for memory remove
- */
+ /* Update memory regions for memory remove */
memblock_remove(base, memblock_size);
-
- /*
- * Remove htab bolted mappings for this section of memory
- */
- start = (unsigned long)__va(base);
- ret = remove_section_mapping(start, start + memblock_size);
-
- /* Ensure all vmalloc mappings are flushed in case they also
- * hit that section of memory
- */
- vm_unmap_aliases();
-
- return ret;
+ return 0;
}
-static int pseries_remove_memory(struct device_node *np)
+static int pseries_remove_mem_node(struct device_node *np)
{
const char *type;
const unsigned int *regs;
@@ -153,8 +144,8 @@ static int pseries_remove_memory(struct device_node *np)
base = *(unsigned long *)regs;
lmb_size = regs[3];
- ret = pseries_remove_memblock(base, lmb_size);
- return ret;
+ pseries_remove_memblock(base, lmb_size);
+ return 0;
}
#else
static inline int pseries_remove_memblock(unsigned long base,
@@ -162,13 +153,13 @@ static inline int pseries_remove_memblock(unsigned long base,
{
return -EOPNOTSUPP;
}
-static inline int pseries_remove_memory(struct device_node *np)
+static inline int pseries_remove_mem_node(struct device_node *np)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
-static int pseries_add_memory(struct device_node *np)
+static int pseries_add_mem_node(struct device_node *np)
{
const char *type;
const unsigned int *regs;
@@ -254,10 +245,10 @@ static int pseries_memory_notifier(struct notifier_block *nb,
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
- err = pseries_add_memory(node);
+ err = pseries_add_mem_node(node);
break;
case OF_RECONFIG_DETACH_NODE:
- err = pseries_remove_memory(node);
+ err = pseries_remove_mem_node(node);
break;
case OF_RECONFIG_UPDATE_PROPERTY:
pr = (struct of_prop_reconfig *)node;
@@ -277,6 +268,10 @@ static int __init pseries_memory_hotplug_init(void)
if (firmware_has_feature(FW_FEATURE_LPAR))
of_reconfig_notifier_register(&pseries_mem_nb);
+#ifdef CONFIG_MEMORY_HOTREMOVE
+ ppc_md.remove_memory = pseries_remove_memory;
+#endif
+
return 0;
}
machine_device_initcall(pseries, pseries_memory_hotplug_init);
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index cde4e0a095ae..bde7ebad3949 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -290,13 +290,6 @@ void post_mobility_fixup(void)
int rc;
int activate_fw_token;
- rc = pseries_devicetree_update(MIGRATION_SCOPE);
- if (rc) {
- printk(KERN_ERR "Initial post-mobility device tree update "
- "failed: %d\n", rc);
- return;
- }
-
activate_fw_token = rtas_token("ibm,activate-firmware");
if (activate_fw_token == RTAS_UNKNOWN_SERVICE) {
printk(KERN_ERR "Could not make post-mobility "
@@ -304,16 +297,17 @@ void post_mobility_fixup(void)
return;
}
- rc = rtas_call(activate_fw_token, 0, 1, NULL);
- if (!rc) {
- rc = pseries_devicetree_update(MIGRATION_SCOPE);
- if (rc)
- printk(KERN_ERR "Secondary post-mobility device tree "
- "update failed: %d\n", rc);
- } else {
+ do {
+ rc = rtas_call(activate_fw_token, 0, 1, NULL);
+ } while (rtas_busy_delay(rc));
+
+ if (rc)
printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
- return;
- }
+
+ rc = pseries_devicetree_update(MIGRATION_SCOPE);
+ if (rc)
+ printk(KERN_ERR "Post-mobility device tree update "
+ "failed: %d\n", rc);
return;
}
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 16a255255d30..b87b97849d4c 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -26,6 +26,7 @@
#include <asm/mmu.h>
#include <asm/rtas.h>
#include <asm/topology.h>
+#include "../../kernel/cacheinfo.h"
static u64 stream_id;
static struct device suspend_dev;
@@ -79,6 +80,23 @@ static int pseries_suspend_cpu(void)
}
/**
+ * pseries_suspend_enable_irqs
+ *
+ * Post suspend configuration updates
+ *
+ **/
+static void pseries_suspend_enable_irqs(void)
+{
+ /*
+ * Update configuration which can be modified based on device tree
+ * changes during resume.
+ */
+ cacheinfo_cpu_offline(smp_processor_id());
+ post_mobility_fixup();
+ cacheinfo_cpu_online(smp_processor_id());
+}
+
+/**
* pseries_suspend_enter - Final phase of hibernation
*
* Return value:
@@ -174,7 +192,30 @@ out:
return rc;
}
-static DEVICE_ATTR(hibernate, S_IWUSR, NULL, store_hibernate);
+#define USER_DT_UPDATE 0
+#define KERN_DT_UPDATE 1
+
+/**
+ * show_hibernate - Report device tree update responsibilty
+ * @dev: subsys root device
+ * @attr: device attribute struct
+ * @buf: buffer
+ *
+ * Report whether a device tree update is performed by the kernel after a
+ * resume, or if drmgr must coordinate the update from user space.
+ *
+ * Return value:
+ * 0 if drmgr is to initiate update, and 1 otherwise
+ **/
+static ssize_t show_hibernate(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", KERN_DT_UPDATE);
+}
+
+static DEVICE_ATTR(hibernate, S_IWUSR | S_IRUGO,
+ show_hibernate, store_hibernate);
static struct bus_type suspend_subsys = {
.name = "power",
@@ -235,6 +276,7 @@ static int __init pseries_suspend_init(void)
return rc;
ppc_md.suspend_disable_cpu = pseries_suspend_cpu;
+ ppc_md.suspend_enable_irqs = pseries_suspend_enable_irqs;
suspend_set_ops(&pseries_suspend_ops);
return 0;
}
OpenPOWER on IntegriCloud