summaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel')
-rw-r--r--arch/ppc64/kernel/LparData.c13
-rw-r--r--arch/ppc64/kernel/Makefile5
-rw-r--r--arch/ppc64/kernel/head.S48
-rw-r--r--arch/ppc64/kernel/lparmap.c31
-rw-r--r--arch/ppc64/kernel/machine_kexec.c12
-rw-r--r--arch/ppc64/kernel/misc.S6
-rw-r--r--arch/ppc64/kernel/mpic.c4
-rw-r--r--arch/ppc64/kernel/mpic.h2
-rw-r--r--arch/ppc64/kernel/pci.c20
-rw-r--r--arch/ppc64/kernel/pmac_setup.c12
-rw-r--r--arch/ppc64/kernel/prom.c2
-rw-r--r--arch/ppc64/kernel/prom_init.c2
-rw-r--r--arch/ppc64/kernel/setup.c5
-rw-r--r--arch/ppc64/kernel/smp.c15
-rw-r--r--arch/ppc64/kernel/udbg.c2
-rw-r--r--arch/ppc64/kernel/xics.c31
16 files changed, 117 insertions, 93 deletions
diff --git a/arch/ppc64/kernel/LparData.c b/arch/ppc64/kernel/LparData.c
index 6ffcf67dd507..1c11031c838e 100644
--- a/arch/ppc64/kernel/LparData.c
+++ b/arch/ppc64/kernel/LparData.c
@@ -32,18 +32,17 @@
/* The HvReleaseData is the root of the information shared between
* the hypervisor and Linux.
*/
-
struct HvReleaseData hvReleaseData = {
.xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
.xSize = sizeof(struct HvReleaseData),
.xVpdAreasPtrOffset = offsetof(struct naca_struct, xItVpdAreas),
.xSlicNacaAddr = &naca, /* 64-bit Naca address */
- .xMsNucDataOffset = 0x4800, /* offset of LparMap within loadarea (see head.S) */
- .xTagsMode = 1, /* tags inactive */
- .xAddressSize = 0, /* 64 bit */
- .xNoSharedProcs = 0, /* shared processors */
- .xNoHMT = 0, /* HMT allowed */
- .xRsvd2 = 6, /* TEMP: This allows non-GA driver */
+ .xMsNucDataOffset = LPARMAP_PHYS,
+ .xFlags = HVREL_TAGSINACTIVE /* tags inactive */
+ /* 64 bit */
+ /* shared processors */
+ /* HMT allowed */
+ | 6, /* TEMP: This allows non-GA driver */
.xVrmIndex = 4, /* We are v5r2m0 */
.xMinSupportedPlicVrmIndex = 3, /* v5r1m0 */
.xMinCompatablePlicVrmIndex = 3, /* v5r1m0 */
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index d9b2660ef221..2ecccb6b4f8c 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -73,3 +73,8 @@ obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
obj-$(CONFIG_KPROBES) += kprobes.o
CFLAGS_ioctl32.o += -Ifs/
+
+ifeq ($(CONFIG_PPC_ISERIES),y)
+arch/ppc64/kernel/head.o: arch/ppc64/kernel/lparmap.s
+AFLAGS_head.o += -Iarch/ppc64/kernel
+endif
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 93ebcac0d5a2..accaa052d31f 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -38,6 +38,7 @@
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/hvcall.h>
+#include <asm/iSeries/LparMap.h>
#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
@@ -522,36 +523,9 @@ __end_interrupts:
#ifdef CONFIG_PPC_ISERIES
.globl naca
naca:
- .llong itVpdAreas
-
- /*
- * The iSeries LPAR map is at this fixed address
- * so that the HvReleaseData structure can address
- * it with a 32-bit offset.
- *
- * The VSID values below are dependent on the
- * VSID generation algorithm. See include/asm/mmu_context.h.
- */
-
- . = 0x4800
-
- .llong 2 /* # ESIDs to be mapped by hypervisor */
- .llong 1 /* # memory ranges to be mapped by hypervisor */
- .llong STAB0_PAGE /* Page # of segment table within load area */
- .llong 0 /* Reserved */
- .llong 0 /* Reserved */
- .llong 0 /* Reserved */
- .llong 0 /* Reserved */
- .llong 0 /* Reserved */
- .llong (KERNELBASE>>SID_SHIFT)
- .llong 0x408f92c94 /* KERNELBASE VSID */
- /* We have to list the bolted VMALLOC segment here, too, so that it
- * will be restored on shared processor switch */
- .llong (VMALLOCBASE>>SID_SHIFT)
- .llong 0xf09b89af5 /* VMALLOCBASE VSID */
- .llong 8192 /* # pages to map (32 MB) */
- .llong 0 /* Offset from start of loadarea to start of map */
- .llong 0x408f92c940000 /* VPN of first page to map */
+ .llong itVpdAreas
+ .llong 0 /* xRamDisk */
+ .llong 0 /* xRamDiskSize */
. = 0x6100
@@ -706,6 +680,11 @@ hardware_interrupt_iSeries_masked:
.globl fwnmi_data_area
fwnmi_data_area:
+#ifdef CONFIG_PPC_ISERIES
+ . = LPARMAP_PHYS
+#include "lparmap.s"
+#endif /* CONFIG_PPC_ISERIES */
+
/*
* Vectors for the FWNMI option. Share common code.
*/
@@ -2098,7 +2077,7 @@ _GLOBAL(hmt_start_secondary)
blr
#endif
-#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
+#if defined(CONFIG_KEXEC) || (defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES))
_GLOBAL(smp_release_cpus)
/* All secondary cpus are spinning on a common
* spinloop, release them all now so they can start
@@ -2131,13 +2110,6 @@ empty_zero_page:
swapper_pg_dir:
.space 4096
-#ifdef CONFIG_SMP
-/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
- .globl stab_array
-stab_array:
- .space 4096 * 48
-#endif
-
/*
* This space gets a copy of optional info passed to us by the bootstrap
* Used to pass parameters into the kernel like root=/dev/sda1, etc.
diff --git a/arch/ppc64/kernel/lparmap.c b/arch/ppc64/kernel/lparmap.c
new file mode 100644
index 000000000000..b81de286df5e
--- /dev/null
+++ b/arch/ppc64/kernel/lparmap.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2005 Stephen Rothwell IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/iSeries/LparMap.h>
+
+const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
+ .xNumberEsids = HvEsidsToMap,
+ .xNumberRanges = HvRangesToMap,
+ .xSegmentTableOffs = STAB0_PAGE,
+
+ .xEsids = {
+ { .xKernelEsid = GET_ESID(KERNELBASE),
+ .xKernelVsid = KERNEL_VSID(KERNELBASE), },
+ { .xKernelEsid = GET_ESID(VMALLOCBASE),
+ .xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
+ },
+
+ .xRanges = {
+ { .xPages = HvPagesToMap,
+ .xOffset = 0,
+ .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT),
+ },
+ },
+};
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c
index fdb2fc649d72..4775f12a013c 100644
--- a/arch/ppc64/kernel/machine_kexec.c
+++ b/arch/ppc64/kernel/machine_kexec.c
@@ -185,7 +185,7 @@ void kexec_copy_flush(struct kimage *image)
void kexec_smp_down(void *arg)
{
if (ppc_md.cpu_irq_down)
- ppc_md.cpu_irq_down();
+ ppc_md.cpu_irq_down(1);
local_irq_disable();
kexec_smp_wait();
@@ -232,7 +232,7 @@ static void kexec_prepare_cpus(void)
/* after we tell the others to go down */
if (ppc_md.cpu_irq_down)
- ppc_md.cpu_irq_down();
+ ppc_md.cpu_irq_down(0);
put_cpu();
@@ -243,15 +243,19 @@ static void kexec_prepare_cpus(void)
static void kexec_prepare_cpus(void)
{
+ extern void smp_release_cpus(void);
/*
* move the secondarys to us so that we can copy
* the new kernel 0-0x100 safely
*
* do this if kexec in setup.c ?
+ *
+ * We need to release the cpus if we are ever going from an
+ * UP to an SMP kernel.
*/
- smp_relase_cpus();
+ smp_release_cpus();
if (ppc_md.cpu_irq_down)
- ppc_md.cpu_irq_down();
+ ppc_md.cpu_irq_down(0);
local_irq_disable();
}
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index 59f4f9973818..a05b50b738e9 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -1129,6 +1129,9 @@ _GLOBAL(sys_call_table32)
.llong .compat_sys_waitid
.llong .sys32_ioprio_set
.llong .sys32_ioprio_get
+ .llong .sys_inotify_init /* 275 */
+ .llong .sys_inotify_add_watch
+ .llong .sys_inotify_rm_watch
.balign 8
_GLOBAL(sys_call_table)
@@ -1407,3 +1410,6 @@ _GLOBAL(sys_call_table)
.llong .sys_waitid
.llong .sys_ioprio_set
.llong .sys_ioprio_get
+ .llong .sys_inotify_init /* 275 */
+ .llong .sys_inotify_add_watch
+ .llong .sys_inotify_rm_watch
diff --git a/arch/ppc64/kernel/mpic.c b/arch/ppc64/kernel/mpic.c
index e8fbab1df37f..cc262a05ddb4 100644
--- a/arch/ppc64/kernel/mpic.c
+++ b/arch/ppc64/kernel/mpic.c
@@ -794,10 +794,10 @@ void mpic_setup_this_cpu(void)
/*
* XXX: someone who knows mpic should check this.
- * do we need to eoi the ipi here (see xics comments)?
+ * do we need to eoi the ipi including for kexec cpu here (see xics comments)?
* or can we reset the mpic in the new kernel?
*/
-void mpic_teardown_this_cpu(void)
+void mpic_teardown_this_cpu(int secondary)
{
struct mpic *mpic = mpic_primary;
unsigned long flags;
diff --git a/arch/ppc64/kernel/mpic.h b/arch/ppc64/kernel/mpic.h
index 99fbbc9a084c..ca78a7f10528 100644
--- a/arch/ppc64/kernel/mpic.h
+++ b/arch/ppc64/kernel/mpic.h
@@ -256,7 +256,7 @@ extern unsigned int mpic_irq_get_priority(unsigned int irq);
extern void mpic_setup_this_cpu(void);
/* Clean up for kexec (or cpu offline or ...) */
-extern void mpic_teardown_this_cpu(void);
+extern void mpic_teardown_this_cpu(int secondary);
/* Request IPIs on primary mpic */
extern void mpic_request_ipis(void);
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index ae6f579d3fa0..d0d55c7908ef 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -108,8 +108,28 @@ void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region
region->end = res->end - offset;
}
+void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+ struct pci_bus_region *region)
+{
+ unsigned long offset = 0;
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+
+ if (!hose)
+ return;
+
+ if (res->flags & IORESOURCE_IO)
+ offset = (unsigned long)hose->io_base_virt - pci_io_base;
+
+ if (res->flags & IORESOURCE_MEM)
+ offset = hose->pci_mem_offset;
+
+ res->start = region->start + offset;
+ res->end = region->end + offset;
+}
+
#ifdef CONFIG_HOTPLUG
EXPORT_SYMBOL(pcibios_resource_to_bus);
+EXPORT_SYMBOL(pcibios_bus_to_resource);
#endif
/*
diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c
index 3013cdb5f933..e40877fa67cd 100644
--- a/arch/ppc64/kernel/pmac_setup.c
+++ b/arch/ppc64/kernel/pmac_setup.c
@@ -97,7 +97,7 @@ EXPORT_SYMBOL(smu_cmdbuf_abs);
extern void udbg_init_scc(struct device_node *np);
-void __pmac pmac_show_cpuinfo(struct seq_file *m)
+static void __pmac pmac_show_cpuinfo(struct seq_file *m)
{
struct device_node *np;
char *pp;
@@ -144,7 +144,7 @@ void __pmac pmac_show_cpuinfo(struct seq_file *m)
}
-void __init pmac_setup_arch(void)
+static void __init pmac_setup_arch(void)
{
/* init to some ~sane value until calibrate_delay() runs */
loops_per_jiffy = 50000000;
@@ -230,7 +230,7 @@ void __pmac note_bootable_part(dev_t dev, int part, int goodness)
}
}
-void __pmac pmac_restart(char *cmd)
+static void __pmac pmac_restart(char *cmd)
{
switch(sys_ctrler) {
#ifdef CONFIG_ADB_PMU
@@ -249,7 +249,7 @@ void __pmac pmac_restart(char *cmd)
}
}
-void __pmac pmac_power_off(void)
+static void __pmac pmac_power_off(void)
{
switch(sys_ctrler) {
#ifdef CONFIG_ADB_PMU
@@ -267,7 +267,7 @@ void __pmac pmac_power_off(void)
}
}
-void __pmac pmac_halt(void)
+static void __pmac pmac_halt(void)
{
pmac_power_off();
}
@@ -327,7 +327,7 @@ static void __init init_boot_display(void)
/*
* Early initialization.
*/
-void __init pmac_init_early(void)
+static void __init pmac_init_early(void)
{
DBG(" -> pmac_init_early\n");
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 47727a6f7346..5aca01ddd81f 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -916,6 +916,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
}
}
+#ifdef CONFIG_ALTIVEC
/* Check if we have a VMX and eventually update CPU features */
prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", NULL);
if (prop && (*prop) > 0) {
@@ -929,6 +930,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
}
+#endif /* CONFIG_ALTIVEC */
/*
* Check for an SMT capable CPU and set the CPU feature. We do
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index e248a7950aeb..dbbe6c79d8da 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -1803,7 +1803,7 @@ static void __init fixup_device_tree(void)
if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
== PROM_ERROR)
return;
- if (u3_rev != 0x35)
+ if (u3_rev != 0x35 && u3_rev != 0x37)
return;
/* does it need fixup ? */
if (prom_getproplen(i2c, "interrupts") > 0)
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c
index d1b33f0b26cb..687e85595208 100644
--- a/arch/ppc64/kernel/setup.c
+++ b/arch/ppc64/kernel/setup.c
@@ -694,7 +694,6 @@ void machine_restart(char *cmd)
local_irq_disable();
while (1) ;
}
-EXPORT_SYMBOL(machine_restart);
void machine_power_off(void)
{
@@ -707,7 +706,6 @@ void machine_power_off(void)
local_irq_disable();
while (1) ;
}
-EXPORT_SYMBOL(machine_power_off);
void machine_halt(void)
{
@@ -720,7 +718,6 @@ void machine_halt(void)
local_irq_disable();
while (1) ;
}
-EXPORT_SYMBOL(machine_halt);
static int ppc64_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
@@ -1071,6 +1068,8 @@ void __init setup_arch(char **cmdline_p)
irqstack_early_init();
emergency_stack_init();
+ stabs_alloc();
+
/* set up the bootmem stuff with available memory */
do_init_bootmem();
sparse_init();
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c
index 2fcddfcb594d..793b562da653 100644
--- a/arch/ppc64/kernel/smp.c
+++ b/arch/ppc64/kernel/smp.c
@@ -65,8 +65,6 @@ struct smp_ops_t *smp_ops;
static volatile unsigned int cpu_callin_map[NR_CPUS];
-extern unsigned char stab_array[];
-
void smp_call_function_interrupt(void);
int smt_enabled_at_boot = 1;
@@ -492,19 +490,6 @@ int __devinit __cpu_up(unsigned int cpu)
paca[cpu].default_decr = tb_ticks_per_jiffy;
- if (!cpu_has_feature(CPU_FTR_SLB)) {
- void *tmp;
-
- /* maximum of 48 CPUs on machines with a segment table */
- if (cpu >= 48)
- BUG();
-
- tmp = &stab_array[PAGE_SIZE * cpu];
- memset(tmp, 0, PAGE_SIZE);
- paca[cpu].stab_addr = (unsigned long)tmp;
- paca[cpu].stab_real = virt_to_abs(tmp);
- }
-
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
*/
diff --git a/arch/ppc64/kernel/udbg.c b/arch/ppc64/kernel/udbg.c
index d4ccd6f1ef47..c0da45540f0f 100644
--- a/arch/ppc64/kernel/udbg.c
+++ b/arch/ppc64/kernel/udbg.c
@@ -141,7 +141,7 @@ void udbg_init_scc(struct device_node *np)
#endif /* CONFIG_PPC_PMAC */
-#if CONFIG_PPC_PMAC
+#ifdef CONFIG_PPC_PMAC
static void udbg_real_putc(unsigned char c)
{
while ((real_readb(sccc) & SCC_TXRDY) == 0)
diff --git a/arch/ppc64/kernel/xics.c b/arch/ppc64/kernel/xics.c
index 677c4450984a..d9dc6f28d050 100644
--- a/arch/ppc64/kernel/xics.c
+++ b/arch/ppc64/kernel/xics.c
@@ -647,29 +647,30 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
}
}
-void xics_teardown_cpu(void)
+void xics_teardown_cpu(int secondary)
{
int cpu = smp_processor_id();
- int status;
ops->cppr_info(cpu, 0x00);
iosync();
/*
- * we need to EOI the IPI if we got here from kexec down IPI
- *
- * xics doesn't care if we duplicate an EOI as long as we
- * don't EOI and raise priority.
- *
- * probably need to check all the other interrupts too
- * should we be flagging idle loop instead?
- * or creating some task to be scheduled?
+ * Some machines need to have at least one cpu in the GIQ,
+ * so leave the master cpu in the group.
*/
- ops->xirr_info_set(cpu, XICS_IPI);
-
- status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
- (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
- WARN_ON(status != 0);
+ if (secondary) {
+ /*
+ * we need to EOI the IPI if we got here from kexec down IPI
+ *
+ * probably need to check all the other interrupts too
+ * should we be flagging idle loop instead?
+ * or creating some task to be scheduled?
+ */
+ ops->xirr_info_set(cpu, XICS_IPI);
+ rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
+ (1UL << interrupt_server_size) - 1 -
+ default_distrib_server, 0);
+ }
}
#ifdef CONFIG_HOTPLUG_CPU
OpenPOWER on IntegriCloud