summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2006-10-21 16:46:04 +0100
committerDavid Woodhouse <dwmw2@infradead.org>2006-10-21 16:46:04 +0100
commit513b046c96cc2fbce730a3474f6f7ff0c4fdd05c (patch)
treee8006368b6f643067486f92405a404757807d6da /arch/powerpc/platforms
parent82810b7b6cc7a74c68881a13b0eb66c7a6370fcc (diff)
parentc7a3bd177f248d01ee18a01d22048c80e071c331 (diff)
downloadblackbird-op-linux-513b046c96cc2fbce730a3474f6f7ff0c4fdd05c.tar.gz
blackbird-op-linux-513b046c96cc2fbce730a3474f6f7ff0c4fdd05c.zip
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/82xx/Kconfig21
-rw-r--r--arch/powerpc/platforms/82xx/Makefile5
-rw-r--r--arch/powerpc/platforms/82xx/m82xx_pci.h19
-rw-r--r--arch/powerpc/platforms/82xx/mpc82xx.c110
-rw-r--r--arch/powerpc/platforms/82xx/mpc82xx_ads.c658
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads.h65
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig26
-rw-r--r--arch/powerpc/platforms/83xx/Makefile2
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c215
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.h19
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc8360e_pb.c238
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c7
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.h1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c10
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c7
-rw-r--r--arch/powerpc/platforms/cell/Kconfig5
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c244
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h97
-rw-r--r--arch/powerpc/platforms/cell/iommu.c8
-rw-r--r--arch/powerpc/platforms/cell/ras.c1
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c20
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c208
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c355
-rw-r--r--arch/powerpc/platforms/cell/spufs/gang.c81
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c232
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c48
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c450
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h29
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c9
-rw-r--r--arch/powerpc/platforms/chrp/setup.c9
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c6
-rw-r--r--arch/powerpc/platforms/iseries/irq.c17
-rw-r--r--arch/powerpc/platforms/iseries/irq.h2
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c4
-rw-r--r--arch/powerpc/platforms/iseries/mf.c4
-rw-r--r--arch/powerpc/platforms/iseries/pci.c8
-rw-r--r--arch/powerpc/platforms/iseries/setup.c16
-rw-r--r--arch/powerpc/platforms/iseries/smp.c6
-rw-r--r--arch/powerpc/platforms/iseries/smp.h6
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c2
-rw-r--r--arch/powerpc/platforms/maple/pci.c187
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c26
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c1
-rw-r--r--arch/powerpc/platforms/pasemi/time.c1
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c2
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c2
-rw-r--r--arch/powerpc/platforms/powermac/pic.c19
-rw-r--r--arch/powerpc/platforms/powermac/pic.h4
-rw-r--r--arch/powerpc/platforms/powermac/smp.c15
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c14
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c3
-rw-r--r--arch/powerpc/platforms/pseries/ras.c14
-rw-r--r--arch/powerpc/platforms/pseries/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/xics.c22
-rw-r--r--arch/powerpc/platforms/pseries/xics.h3
60 files changed, 2872 insertions, 740 deletions
diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig
new file mode 100644
index 000000000000..47d841ecf2e2
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/Kconfig
@@ -0,0 +1,21 @@
+menu "Platform support"
+ depends on PPC_82xx
+
+choice
+ prompt "Machine Type"
+ default MPC82xx_ADS
+
+config MPC82xx_ADS
+ bool "Freescale MPC82xx ADS"
+ select DEFAULT_UIMAGE
+ select PQ2ADS
+ select 8272
+ select 8260
+ select CPM2
+ select FSL_SOC
+ help
+ This option enables support for the MPC8272 ADS board
+
+endchoice
+
+endmenu
diff --git a/arch/powerpc/platforms/82xx/Makefile b/arch/powerpc/platforms/82xx/Makefile
new file mode 100644
index 000000000000..d9fd4c84d2e0
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the PowerPC 82xx linux kernel.
+#
+obj-$(CONFIG_PPC_82xx) += mpc82xx.o
+obj-$(CONFIG_MPC82xx_ADS) += mpc82xx_ads.o
diff --git a/arch/powerpc/platforms/82xx/m82xx_pci.h b/arch/powerpc/platforms/82xx/m82xx_pci.h
new file mode 100644
index 000000000000..9cd8893b5a32
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/m82xx_pci.h
@@ -0,0 +1,19 @@
+#ifndef _PPC_KERNEL_M82XX_PCI_H
+#define _PPC_KERNEL_M82XX_PCI_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/m8260_pci.h>
+
+#define SIU_INT_IRQ1 ((uint)0x13 + CPM_IRQ_OFFSET)
+
+#ifndef _IO_BASE
+#define _IO_BASE isa_io_base
+#endif
+
+#endif /* _PPC_KERNEL_M8260_PCI_H */
diff --git a/arch/powerpc/platforms/82xx/mpc82xx.c b/arch/powerpc/platforms/82xx/mpc82xx.c
new file mode 100644
index 000000000000..0f5b30dc60da
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/mpc82xx.c
@@ -0,0 +1,110 @@
+/*
+ * MPC82xx setup and early boot code plus other random bits.
+ *
+ * Author: Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * Copyright (c) 2006 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+#include <linux/module.h>
+#include <linux/fsl_devices.h>
+#include <linux/fs_uart_pd.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/bootinfo.h>
+#include <asm/pci-bridge.h>
+#include <asm/mpc8260.h>
+#include <asm/irq.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/cpm2.h>
+#include <asm/udbg.h>
+#include <asm/i8259.h>
+#include <linux/fs_enet_pd.h>
+
+#include <sysdev/fsl_soc.h>
+#include <sysdev/cpm2_pic.h>
+
+#include "pq2ads_pd.h"
+
+static int __init get_freq(char *name, unsigned long *val)
+{
+ struct device_node *cpu;
+ unsigned int *fp;
+ int found = 0;
+
+ /* The cpu node should have timebase and clock frequency properties */
+ cpu = of_find_node_by_type(NULL, "cpu");
+
+ if (cpu) {
+ fp = (unsigned int *)get_property(cpu, name, NULL);
+ if (fp) {
+ found = 1;
+ *val = *fp++;
+ }
+
+ of_node_put(cpu);
+ }
+
+ return found;
+}
+
+void __init m82xx_calibrate_decr(void)
+{
+ ppc_tb_freq = 125000000;
+ if (!get_freq("bus-frequency", &ppc_tb_freq)) {
+ printk(KERN_ERR "WARNING: Estimating decrementer frequency "
+ "(not found)\n");
+ }
+ ppc_tb_freq /= 4;
+ ppc_proc_freq = 1000000000;
+ if (!get_freq("clock-frequency", &ppc_proc_freq))
+ printk(KERN_ERR "WARNING: Estimating processor frequency"
+ "(not found)\n");
+}
+
+void mpc82xx_ads_show_cpuinfo(struct seq_file *m)
+{
+ uint pvid, svid, phid1;
+ uint memsize = total_memory;
+
+ pvid = mfspr(SPRN_PVR);
+ svid = mfspr(SPRN_SVR);
+
+ seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
+ seq_printf(m, "Machine\t\t: %s\n", CPUINFO_MACHINE);
+ seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
+ seq_printf(m, "SVR\t\t: 0x%x\n", svid);
+
+ /* Display cpu Pll setting */
+ phid1 = mfspr(SPRN_HID1);
+ seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
+
+ /* Display the amount of memory */
+ seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
+}
diff --git a/arch/powerpc/platforms/82xx/mpc82xx_ads.c b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
new file mode 100644
index 000000000000..bb9acbb98176
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
@@ -0,0 +1,658 @@
+/*
+ * MPC82xx_ads setup and early boot code plus other random bits.
+ *
+ * Author: Vitaly Bordug <vbordug@ru.mvista.com>
+ * m82xx_restart fix by Wade Farnsworth <wfarnsworth@mvista.com>
+ *
+ * Copyright (c) 2006 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+#include <linux/module.h>
+#include <linux/fsl_devices.h>
+#include <linux/fs_uart_pd.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/bootinfo.h>
+#include <asm/pci-bridge.h>
+#include <asm/mpc8260.h>
+#include <asm/irq.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/cpm2.h>
+#include <asm/udbg.h>
+#include <asm/i8259.h>
+#include <linux/fs_enet_pd.h>
+
+#include <sysdev/fsl_soc.h>
+#include <../sysdev/cpm2_pic.h>
+
+#include "pq2ads_pd.h"
+
+#ifdef CONFIG_PCI
+static uint pci_clk_frq;
+static struct {
+ unsigned long *pci_int_stat_reg;
+ unsigned long *pci_int_mask_reg;
+} pci_regs;
+
+static unsigned long pci_int_base;
+static struct irq_host *pci_pic_host;
+static struct device_node *pci_pic_node;
+#endif
+
+static void __init mpc82xx_ads_pic_init(void)
+{
+ struct device_node *np = of_find_compatible_node(NULL, "cpm-pic", "CPM2");
+ struct resource r;
+ cpm2_map_t *cpm_reg;
+
+ if (np == NULL) {
+ printk(KERN_ERR "PIC init: can not find cpm-pic node\n");
+ return;
+ }
+ if (of_address_to_resource(np, 0, &r)) {
+ printk(KERN_ERR "PIC init: invalid resource\n");
+ of_node_put(np);
+ return;
+ }
+ cpm2_pic_init(np);
+ of_node_put(np);
+
+ /* Initialize the default interrupt mapping priorities,
+ * in case the boot rom changed something on us.
+ */
+ cpm_reg = (cpm2_map_t *) ioremap(get_immrbase(), sizeof(cpm2_map_t));
+ cpm_reg->im_intctl.ic_siprr = 0x05309770;
+ iounmap(cpm_reg);
+#ifdef CONFIG_PCI
+ /* Initialize stuff for the 82xx CPLD IC and install demux */
+ m82xx_pci_init_irq();
+#endif
+}
+
+static void init_fcc1_ioports(struct fs_platform_info *fpi)
+{
+ struct io_port *io;
+ u32 tempval;
+ cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t));
+ struct device_node *np;
+ struct resource r;
+ u32 *bcsr;
+
+ np = of_find_node_by_type(NULL, "memory");
+ if (!np) {
+ printk(KERN_INFO "No memory node in device tree\n");
+ return;
+ }
+ if (of_address_to_resource(np, 1, &r)) {
+ printk(KERN_INFO "No memory reg property [1] in devicetree\n");
+ return;
+ }
+ of_node_put(np);
+ bcsr = ioremap(r.start + 4, sizeof(u32));
+ io = &immap->im_ioport;
+
+ /* Enable the PHY */
+ clrbits32(bcsr, BCSR1_FETHIEN);
+ setbits32(bcsr, BCSR1_FETH_RST);
+
+ /* FCC1 pins are on port A/C. */
+ /* Configure port A and C pins for FCC1 Ethernet. */
+
+ tempval = in_be32(&io->iop_pdira);
+ tempval &= ~PA1_DIRA0;
+ tempval |= PA1_DIRA1;
+ out_be32(&io->iop_pdira, tempval);
+
+ tempval = in_be32(&io->iop_psora);
+ tempval &= ~PA1_PSORA0;
+ tempval |= PA1_PSORA1;
+ out_be32(&io->iop_psora, tempval);
+
+ setbits32(&io->iop_ppara, PA1_DIRA0 | PA1_DIRA1);
+
+ /* Alter clocks */
+ tempval = PC_CLK(fpi->clk_tx - 8) | PC_CLK(fpi->clk_rx - 8);
+
+ clrbits32(&io->iop_psorc, tempval);
+ clrbits32(&io->iop_pdirc, tempval);
+ setbits32(&io->iop_pparc, tempval);
+
+ cpm2_clk_setup(CPM_CLK_FCC1, fpi->clk_rx, CPM_CLK_RX);
+ cpm2_clk_setup(CPM_CLK_FCC1, fpi->clk_tx, CPM_CLK_TX);
+
+ iounmap(bcsr);
+ iounmap(immap);
+}
+
+static void init_fcc2_ioports(struct fs_platform_info *fpi)
+{
+ cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t));
+ struct device_node *np;
+ struct resource r;
+ u32 *bcsr;
+
+ struct io_port *io;
+ u32 tempval;
+
+ np = of_find_node_by_type(NULL, "memory");
+ if (!np) {
+ printk(KERN_INFO "No memory node in device tree\n");
+ return;
+ }
+ if (of_address_to_resource(np, 1, &r)) {
+ printk(KERN_INFO "No memory reg property [1] in devicetree\n");
+ return;
+ }
+ of_node_put(np);
+ io = &immap->im_ioport;
+ bcsr = ioremap(r.start + 12, sizeof(u32));
+
+ /* Enable the PHY */
+ clrbits32(bcsr, BCSR3_FETHIEN2);
+ setbits32(bcsr, BCSR3_FETH2_RST);
+
+ /* FCC2 are port B/C. */
+ /* Configure port A and C pins for FCC2 Ethernet. */
+
+ tempval = in_be32(&io->iop_pdirb);
+ tempval &= ~PB2_DIRB0;
+ tempval |= PB2_DIRB1;
+ out_be32(&io->iop_pdirb, tempval);
+
+ tempval = in_be32(&io->iop_psorb);
+ tempval &= ~PB2_PSORB0;
+ tempval |= PB2_PSORB1;
+ out_be32(&io->iop_psorb, tempval);
+
+ setbits32(&io->iop_pparb, PB2_DIRB0 | PB2_DIRB1);
+
+ tempval = PC_CLK(fpi->clk_tx - 8) | PC_CLK(fpi->clk_rx - 8);
+
+ /* Alter clocks */
+ clrbits32(&io->iop_psorc, tempval);
+ clrbits32(&io->iop_pdirc, tempval);
+ setbits32(&io->iop_pparc, tempval);
+
+ cpm2_clk_setup(CPM_CLK_FCC2, fpi->clk_rx, CPM_CLK_RX);
+ cpm2_clk_setup(CPM_CLK_FCC2, fpi->clk_tx, CPM_CLK_TX);
+
+ iounmap(bcsr);
+ iounmap(immap);
+}
+
+void init_fcc_ioports(struct fs_platform_info *fpi)
+{
+ int fcc_no = fs_get_fcc_index(fpi->fs_no);
+
+ switch (fcc_no) {
+ case 0:
+ init_fcc1_ioports(fpi);
+ break;
+ case 1:
+ init_fcc2_ioports(fpi);
+ break;
+ default:
+ printk(KERN_ERR "init_fcc_ioports: invalid FCC number\n");
+ return;
+ }
+}
+
+static void init_scc1_uart_ioports(struct fs_uart_platform_info *data)
+{
+ cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t));
+
+ /* SCC1 is only on port D */
+ setbits32(&immap->im_ioport.iop_ppard, 0x00000003);
+ clrbits32(&immap->im_ioport.iop_psord, 0x00000001);
+ setbits32(&immap->im_ioport.iop_psord, 0x00000002);
+ clrbits32(&immap->im_ioport.iop_pdird, 0x00000001);
+ setbits32(&immap->im_ioport.iop_pdird, 0x00000002);
+
+ clrbits32(&immap->im_cpmux.cmx_scr, (0x00000007 << (4 - data->clk_tx)));
+ clrbits32(&immap->im_cpmux.cmx_scr, (0x00000038 << (4 - data->clk_rx)));
+ setbits32(&immap->im_cpmux.cmx_scr,
+ ((data->clk_tx - 1) << (4 - data->clk_tx)));
+ setbits32(&immap->im_cpmux.cmx_scr,
+ ((data->clk_rx - 1) << (4 - data->clk_rx)));
+
+ iounmap(immap);
+}
+
+static void init_scc4_uart_ioports(struct fs_uart_platform_info *data)
+{
+ cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t));
+
+ setbits32(&immap->im_ioport.iop_ppard, 0x00000600);
+ clrbits32(&immap->im_ioport.iop_psord, 0x00000600);
+ clrbits32(&immap->im_ioport.iop_pdird, 0x00000200);
+ setbits32(&immap->im_ioport.iop_pdird, 0x00000400);
+
+ clrbits32(&immap->im_cpmux.cmx_scr, (0x00000007 << (4 - data->clk_tx)));
+ clrbits32(&immap->im_cpmux.cmx_scr, (0x00000038 << (4 - data->clk_rx)));
+ setbits32(&immap->im_cpmux.cmx_scr,
+ ((data->clk_tx - 1) << (4 - data->clk_tx)));
+ setbits32(&immap->im_cpmux.cmx_scr,
+ ((data->clk_rx - 1) << (4 - data->clk_rx)));
+
+ iounmap(immap);
+}
+
+void init_scc_ioports(struct fs_uart_platform_info *data)
+{
+ int scc_no = fs_get_scc_index(data->fs_no);
+
+ switch (scc_no) {
+ case 0:
+ init_scc1_uart_ioports(data);
+ data->brg = data->clk_rx;
+ break;
+ case 3:
+ init_scc4_uart_ioports(data);
+ data->brg = data->clk_rx;
+ break;
+ default:
+ printk(KERN_ERR "init_scc_ioports: invalid SCC number\n");
+ return;
+ }
+}
+
+void __init m82xx_board_setup(void)
+{
+ cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t));
+ struct device_node *np;
+ struct resource r;
+ u32 *bcsr;
+
+ np = of_find_node_by_type(NULL, "memory");
+ if (!np) {
+ printk(KERN_INFO "No memory node in device tree\n");
+ return;
+ }
+ if (of_address_to_resource(np, 1, &r)) {
+ printk(KERN_INFO "No memory reg property [1] in devicetree\n");
+ return;
+ }
+ of_node_put(np);
+ bcsr = ioremap(r.start + 4, sizeof(u32));
+ /* Enable the 2nd UART port */
+ clrbits32(bcsr, BCSR1_RS232_EN2);
+
+#ifdef CONFIG_SERIAL_CPM_SCC1
+ clrbits32((u32 *) & immap->im_scc[0].scc_sccm,
+ UART_SCCM_TX | UART_SCCM_RX);
+ clrbits32((u32 *) & immap->im_scc[0].scc_gsmrl,
+ SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+#endif
+
+#ifdef CONFIG_SERIAL_CPM_SCC2
+ clrbits32((u32 *) & immap->im_scc[1].scc_sccm,
+ UART_SCCM_TX | UART_SCCM_RX);
+ clrbits32((u32 *) & immap->im_scc[1].scc_gsmrl,
+ SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+#endif
+
+#ifdef CONFIG_SERIAL_CPM_SCC3
+ clrbits32((u32 *) & immap->im_scc[2].scc_sccm,
+ UART_SCCM_TX | UART_SCCM_RX);
+ clrbits32((u32 *) & immap->im_scc[2].scc_gsmrl,
+ SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+#endif
+
+#ifdef CONFIG_SERIAL_CPM_SCC4
+ clrbits32((u32 *) & immap->im_scc[3].scc_sccm,
+ UART_SCCM_TX | UART_SCCM_RX);
+ clrbits32((u32 *) & immap->im_scc[3].scc_gsmrl,
+ SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+#endif
+
+ iounmap(bcsr);
+ iounmap(immap);
+}
+
+#ifdef CONFIG_PCI
+static void m82xx_pci_mask_irq(unsigned int irq)
+{
+ int bit = irq - pci_int_base;
+
+ *pci_regs.pci_int_mask_reg |= (1 << (31 - bit));
+ return;
+}
+
+static void m82xx_pci_unmask_irq(unsigned int irq)
+{
+ int bit = irq - pci_int_base;
+
+ *pci_regs.pci_int_mask_reg &= ~(1 << (31 - bit));
+ return;
+}
+
+static void m82xx_pci_mask_and_ack(unsigned int irq)
+{
+ int bit = irq - pci_int_base;
+
+ *pci_regs.pci_int_mask_reg |= (1 << (31 - bit));
+ return;
+}
+
+static void m82xx_pci_end_irq(unsigned int irq)
+{
+ int bit = irq - pci_int_base;
+
+ *pci_regs.pci_int_mask_reg &= ~(1 << (31 - bit));
+ return;
+}
+
+struct hw_interrupt_type m82xx_pci_ic = {
+ .typename = "MPC82xx ADS PCI",
+ .name = "MPC82xx ADS PCI",
+ .enable = m82xx_pci_unmask_irq,
+ .disable = m82xx_pci_mask_irq,
+ .ack = m82xx_pci_mask_and_ack,
+ .end = m82xx_pci_end_irq,
+ .mask = m82xx_pci_mask_irq,
+ .mask_ack = m82xx_pci_mask_and_ack,
+ .unmask = m82xx_pci_unmask_irq,
+ .eoi = m82xx_pci_end_irq,
+};
+
+static void
+m82xx_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned long stat, mask, pend;
+ int bit;
+
+ for (;;) {
+ stat = *pci_regs.pci_int_stat_reg;
+ mask = *pci_regs.pci_int_mask_reg;
+ pend = stat & ~mask & 0xf0000000;
+ if (!pend)
+ break;
+ for (bit = 0; pend != 0; ++bit, pend <<= 1) {
+ if (pend & 0x80000000)
+ __do_IRQ(pci_int_base + bit);
+ }
+ }
+}
+
+static int pci_pic_host_match(struct irq_host *h, struct device_node *node)
+{
+ return node == pci_pic_node;
+}
+
+static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ get_irq_desc(virq)->status |= IRQ_LEVEL;
+ set_irq_chip(virq, &m82xx_pci_ic);
+ return 0;
+}
+
+static void pci_host_unmap(struct irq_host *h, unsigned int virq)
+{
+ /* remove chip and handler */
+ set_irq_chip(virq, NULL);
+}
+
+static struct irq_host_ops pci_pic_host_ops = {
+ .match = pci_pic_host_match,
+ .map = pci_pic_host_map,
+ .unmap = pci_host_unmap,
+};
+
+void m82xx_pci_init_irq(void)
+{
+ int irq;
+ cpm2_map_t *immap;
+ struct device_node *np;
+ struct resource r;
+ const u32 *regs;
+ unsigned int size;
+ const u32 *irq_map;
+ int i;
+ unsigned int irq_max, irq_min;
+
+ if ((np = of_find_node_by_type(NULL, "soc")) == NULL) {
+ printk(KERN_INFO "No SOC node in device tree\n");
+ return;
+ }
+ memset(&r, 0, sizeof(r));
+ if (of_address_to_resource(np, 0, &r)) {
+ printk(KERN_INFO "No SOC reg property in device tree\n");
+ return;
+ }
+ immap = ioremap(r.start, sizeof(*immap));
+ of_node_put(np);
+
+ /* install the demultiplexer for the PCI cascade interrupt */
+ np = of_find_node_by_type(NULL, "pci");
+ if (!np) {
+ printk(KERN_INFO "No pci node on device tree\n");
+ iounmap(immap);
+ return;
+ }
+ irq_map = get_property(np, "interrupt-map", &size);
+ if ((!irq_map) || (size <= 7)) {
+ printk(KERN_INFO "No interrupt-map property of pci node\n");
+ iounmap(immap);
+ return;
+ }
+ size /= sizeof(irq_map[0]);
+ for (i = 0, irq_max = 0, irq_min = 512; i < size; i += 7, irq_map += 7) {
+ if (irq_map[5] < irq_min)
+ irq_min = irq_map[5];
+ if (irq_map[5] > irq_max)
+ irq_max = irq_map[5];
+ }
+ pci_int_base = irq_min;
+ irq = irq_of_parse_and_map(np, 0);
+ set_irq_chained_handler(irq, m82xx_pci_irq_demux);
+ of_node_put(np);
+ np = of_find_node_by_type(NULL, "pci-pic");
+ if (!np) {
+ printk(KERN_INFO "No pci pic node on device tree\n");
+ iounmap(immap);
+ return;
+ }
+ pci_pic_node = of_node_get(np);
+ /* PCI interrupt controller registers: status and mask */
+ regs = get_property(np, "reg", &size);
+ if ((!regs) || (size <= 2)) {
+ printk(KERN_INFO "No reg property in pci pic node\n");
+ iounmap(immap);
+ return;
+ }
+ pci_regs.pci_int_stat_reg =
+ ioremap(regs[0], sizeof(*pci_regs.pci_int_stat_reg));
+ pci_regs.pci_int_mask_reg =
+ ioremap(regs[1], sizeof(*pci_regs.pci_int_mask_reg));
+ of_node_put(np);
+ /* configure chip select for PCI interrupt controller */
+ immap->im_memctl.memc_br3 = regs[0] | 0x00001801;
+ immap->im_memctl.memc_or3 = 0xffff8010;
+ /* make PCI IRQ level sensitive */
+ immap->im_intctl.ic_siexr &= ~(1 << (14 - (irq - SIU_INT_IRQ1)));
+
+ /* mask all PCI interrupts */
+ *pci_regs.pci_int_mask_reg |= 0xfff00000;
+ iounmap(immap);
+ pci_pic_host =
+ irq_alloc_host(IRQ_HOST_MAP_LINEAR, irq_max - irq_min + 1,
+ &pci_pic_host_ops, irq_max + 1);
+ return;
+}
+
+static int m82xx_pci_exclude_device(u_char bus, u_char devfn)
+{
+ if (bus == 0 && PCI_SLOT(devfn) == 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static void
+__init mpc82xx_pcibios_fixup(void)
+{
+ struct pci_dev *dev = NULL;
+
+ for_each_pci_dev(dev) {
+ pci_read_irq_line(dev);
+ }
+}
+
+void __init add_bridge(struct device_node *np)
+{
+ int len;
+ struct pci_controller *hose;
+ struct resource r;
+ const int *bus_range;
+ const void *ptr;
+
+ memset(&r, 0, sizeof(r));
+ if (of_address_to_resource(np, 0, &r)) {
+ printk(KERN_INFO "No PCI reg property in device tree\n");
+ return;
+ }
+ if (!(ptr = get_property(np, "clock-frequency", NULL))) {
+ printk(KERN_INFO "No clock-frequency property in PCI node");
+ return;
+ }
+ pci_clk_frq = *(uint *) ptr;
+ of_node_put(np);
+ bus_range = get_property(np, "bus-range", &len);
+ if (bus_range == NULL || len < 2 * sizeof(int)) {
+ printk(KERN_WARNING "Can't get bus-range for %s, assume"
+ " bus 0\n", np->full_name);
+ }
+
+ pci_assign_all_buses = 1;
+
+ hose = pcibios_alloc_controller();
+
+ if (!hose)
+ return;
+
+ hose->arch_data = np;
+ hose->set_cfg_type = 1;
+
+ hose->first_busno = bus_range ? bus_range[0] : 0;
+ hose->last_busno = bus_range ? bus_range[1] : 0xff;
+ hose->bus_offset = 0;
+
+ hose->set_cfg_type = 1;
+
+ setup_indirect_pci(hose,
+ r.start + offsetof(pci_cpm2_t, pci_cfg_addr),
+ r.start + offsetof(pci_cpm2_t, pci_cfg_data));
+
+ pci_process_bridge_OF_ranges(hose, np, 1);
+}
+#endif
+
+/*
+ * Setup the architecture
+ */
+static void __init mpc82xx_ads_setup_arch(void)
+{
+#ifdef CONFIG_PCI
+ struct device_node *np;
+#endif
+
+ if (ppc_md.progress)
+ ppc_md.progress("mpc82xx_ads_setup_arch()", 0);
+ cpm2_reset();
+
+ /* Map I/O region to a 256MB BAT */
+
+ m82xx_board_setup();
+
+#ifdef CONFIG_PCI
+ ppc_md.pci_exclude_device = m82xx_pci_exclude_device;
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+ add_bridge(np);
+
+ of_node_put(np);
+ ppc_md.pci_map_irq = NULL;
+ ppc_md.pcibios_fixup = mpc82xx_pcibios_fixup;
+ ppc_md.pcibios_fixup_bus = NULL;
+#endif
+
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_HDA1;
+#endif
+
+ if (ppc_md.progress)
+ ppc_md.progress("mpc82xx_ads_setup_arch(), finish", 0);
+}
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init mpc82xx_ads_probe(void)
+{
+ /* We always match for now, eventually we should look at
+ * the flat dev tree to ensure this is the board we are
+ * supposed to run on
+ */
+ return 1;
+}
+
+#define RMR_CSRE 0x00000001
+static void m82xx_restart(char *cmd)
+{
+ __volatile__ unsigned char dummy;
+
+ local_irq_disable();
+ ((cpm2_map_t *) cpm2_immr)->im_clkrst.car_rmr |= RMR_CSRE;
+
+ /* Clear the ME,EE,IR & DR bits in MSR to cause checkstop */
+ mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR));
+ dummy = ((cpm2_map_t *) cpm2_immr)->im_clkrst.res[0];
+ printk("Restart failed\n");
+ while (1) ;
+}
+
+static void m82xx_halt(void)
+{
+ local_irq_disable();
+ while (1) ;
+}
+
+define_machine(mpc82xx_ads)
+{
+ .name = "MPC82xx ADS",
+ .probe = mpc82xx_ads_probe,
+ .setup_arch = mpc82xx_ads_setup_arch,
+ .init_IRQ = mpc82xx_ads_pic_init,
+ .show_cpuinfo = mpc82xx_ads_show_cpuinfo,
+ .get_irq = cpm2_get_irq,
+ .calibrate_decr = m82xx_calibrate_decr,
+ .restart = m82xx_restart,.halt = m82xx_halt,
+};
diff --git a/arch/powerpc/platforms/82xx/pq2ads.h b/arch/powerpc/platforms/82xx/pq2ads.h
new file mode 100644
index 000000000000..fb2f92bcd770
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/pq2ads.h
@@ -0,0 +1,65 @@
+/*
+ * PQ2/mpc8260 board-specific stuff
+ *
+ * A collection of structures, addresses, and values associated with
+ * the Freescale MPC8260ADS/MPC8266ADS-PCI boards.
+ * Copied from the RPX-Classic and SBS8260 stuff.
+ *
+ * Author: Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * Originally written by Dan Malek for Motorola MPC8260 family
+ *
+ * Copyright (c) 2001 Dan Malek <dan@embeddedalley.com>
+ * Copyright (c) 2006 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifdef __KERNEL__
+#ifndef __MACH_ADS8260_DEFS
+#define __MACH_ADS8260_DEFS
+
+#include <asm/ppcboot.h>
+
+/* For our show_cpuinfo hooks. */
+#define CPUINFO_VENDOR "Freescale Semiconductor"
+#define CPUINFO_MACHINE "PQ2 ADS PowerPC"
+
+/* Backword-compatibility stuff for the drivers */
+#define CPM_MAP_ADDR ((uint)0xf0000000)
+#define CPM_IRQ_OFFSET 0
+
+/* The ADS8260 has 16, 32-bit wide control/status registers, accessed
+ * only on word boundaries.
+ * Not all are used (yet), or are interesting to us (yet).
+ */
+
+/* Things of interest in the CSR.
+ */
+#define BCSR0_LED0 ((uint)0x02000000) /* 0 == on */
+#define BCSR0_LED1 ((uint)0x01000000) /* 0 == on */
+#define BCSR1_FETHIEN ((uint)0x08000000) /* 0 == enable*/
+#define BCSR1_FETH_RST ((uint)0x04000000) /* 0 == reset */
+#define BCSR1_RS232_EN1 ((uint)0x02000000) /* 0 ==enable */
+#define BCSR1_RS232_EN2 ((uint)0x01000000) /* 0 ==enable */
+#define BCSR3_FETHIEN2 ((uint)0x10000000) /* 0 == enable*/
+#define BCSR3_FETH2_RS ((uint)0x80000000) /* 0 == reset */
+
+/* cpm serial driver works with constants below */
+
+#define SIU_INT_SMC1 ((uint)0x04+CPM_IRQ_OFFSET)
+#define SIU_INT_SMC2i ((uint)0x05+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC1 ((uint)0x28+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC2 ((uint)0x29+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC3 ((uint)0x2a+CPM_IRQ_OFFSET)
+#define SIU_INT_SCC4 ((uint)0x2b+CPM_IRQ_OFFSET)
+
+void m82xx_pci_init_irq(void);
+void mpc82xx_ads_show_cpuinfo(struct seq_file*);
+void m82xx_calibrate_decr(void);
+
+#endif /* __MACH_ADS8260_DEFS */
+#endif /* __KERNEL__ */
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 5fe7b7faf45f..7edb6b461382 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -5,6 +5,13 @@ choice
prompt "Machine Type"
default MPC834x_SYS
+config MPC832x_MDS
+ bool "Freescale MPC832x MDS"
+ select DEFAULT_UIMAGE
+ select QUICC_ENGINE
+ help
+ This option enables support for the MPC832x MDS evaluation board.
+
config MPC834x_SYS
bool "Freescale MPC834x SYS"
select DEFAULT_UIMAGE
@@ -25,12 +32,31 @@ config MPC834x_ITX
Be aware that PCI initialization is the bootloader's
responsiblilty.
+config MPC8360E_PB
+ bool "Freescale MPC8360E PB"
+ select DEFAULT_UIMAGE
+ select QUICC_ENGINE
+ help
+ This option enables support for the MPC836x EMDS Processor Board.
+
endchoice
+config PPC_MPC832x
+ bool
+ select PPC_UDBG_16550
+ select PPC_INDIRECT_PCI
+ default y if MPC832x_MDS
+
config MPC834x
bool
select PPC_UDBG_16550
select PPC_INDIRECT_PCI
default y if MPC834x_SYS || MPC834x_ITX
+config PPC_MPC836x
+ bool
+ select PPC_UDBG_16550
+ select PPC_INDIRECT_PCI
+ default y if MPC8360E_PB
+
endmenu
diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile
index 9387a110d28a..f1aa7e24a938 100644
--- a/arch/powerpc/platforms/83xx/Makefile
+++ b/arch/powerpc/platforms/83xx/Makefile
@@ -5,3 +5,5 @@ obj-y := misc.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MPC834x_SYS) += mpc834x_sys.o
obj-$(CONFIG_MPC834x_ITX) += mpc834x_itx.o
+obj-$(CONFIG_MPC8360E_PB) += mpc8360e_pb.o
+obj-$(CONFIG_MPC832x_MDS) += mpc832x_mds.o
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
new file mode 100644
index 000000000000..54dea9d42dc9
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Description:
+ * MPC832xE MDS board specific routines.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ipic.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <sysdev/fsl_soc.h>
+#include <asm/qe.h>
+#include <asm/qe_ic.h>
+
+#include "mpc83xx.h"
+#include "mpc832x_mds.h"
+
+#undef DEBUG
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+#ifndef CONFIG_PCI
+unsigned long isa_io_base = 0;
+unsigned long isa_mem_base = 0;
+#endif
+
+static u8 *bcsr_regs = NULL;
+
+u8 *get_bcsr(void)
+{
+ return bcsr_regs;
+}
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+static void __init mpc832x_sys_setup_arch(void)
+{
+ struct device_node *np;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mpc832x_sys_setup_arch()", 0);
+
+ np = of_find_node_by_type(NULL, "cpu");
+ if (np != 0) {
+ unsigned int *fp =
+ (int *)get_property(np, "clock-frequency", NULL);
+ if (fp != 0)
+ loops_per_jiffy = *fp / HZ;
+ else
+ loops_per_jiffy = 50000000 / HZ;
+ of_node_put(np);
+ }
+
+ /* Map BCSR area */
+ np = of_find_node_by_name(NULL, "bcsr");
+ if (np != 0) {
+ struct resource res;
+
+ of_address_to_resource(np, 0, &res);
+ bcsr_regs = ioremap(res.start, res.end - res.start +1);
+ of_node_put(np);
+ }
+
+#ifdef CONFIG_PCI
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+ add_bridge(np);
+
+ ppc_md.pci_swizzle = common_swizzle;
+ ppc_md.pci_exclude_device = mpc83xx_exclude_device;
+#endif
+
+#ifdef CONFIG_QUICC_ENGINE
+ qe_reset();
+
+ if ((np = of_find_node_by_name(np, "par_io")) != NULL) {
+ par_io_init(np);
+ of_node_put(np);
+
+ for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
+ par_io_of_config(np);
+ }
+
+ if ((np = of_find_compatible_node(NULL, "network", "ucc_geth"))
+ != NULL){
+ /* Reset the Ethernet PHY */
+ bcsr_regs[9] &= ~0x20;
+ udelay(1000);
+ bcsr_regs[9] |= 0x20;
+ iounmap(bcsr_regs);
+ of_node_put(np);
+ }
+
+#endif /* CONFIG_QUICC_ENGINE */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start)
+ ROOT_DEV = Root_RAM0;
+ else
+#endif
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_HDA1;
+#endif
+}
+
+void __init mpc832x_sys_init_IRQ(void)
+{
+
+ struct device_node *np;
+
+ np = of_find_node_by_type(NULL, "ipic");
+ if (!np)
+ return;
+
+ ipic_init(np, 0);
+
+ /* Initialize the default interrupt mapping priorities,
+ * in case the boot rom changed something on us.
+ */
+ ipic_set_default_priority();
+ of_node_put(np);
+
+#ifdef CONFIG_QUICC_ENGINE
+ np = of_find_node_by_type(NULL, "qeic");
+ if (!np)
+ return;
+
+ qe_ic_init(np, 0);
+ of_node_put(np);
+#endif /* CONFIG_QUICC_ENGINE */
+}
+
+#if defined(CONFIG_I2C_MPC) && defined(CONFIG_SENSORS_DS1374)
+extern ulong ds1374_get_rtc_time(void);
+extern int ds1374_set_rtc_time(ulong);
+
+static int __init mpc832x_rtc_hookup(void)
+{
+ struct timespec tv;
+
+ ppc_md.get_rtc_time = ds1374_get_rtc_time;
+ ppc_md.set_rtc_time = ds1374_set_rtc_time;
+
+ tv.tv_nsec = 0;
+ tv.tv_sec = (ppc_md.get_rtc_time) ();
+ do_settimeofday(&tv);
+
+ return 0;
+}
+
+late_initcall(mpc832x_rtc_hookup);
+#endif
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init mpc832x_sys_probe(void)
+{
+ char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
+ "model", NULL);
+
+ if (model == NULL)
+ return 0;
+ if (strcmp(model, "MPC8323EMDS"))
+ return 0;
+
+ DBG("%s found\n", model);
+
+ return 1;
+}
+
+define_machine(mpc832x_mds) {
+ .name = "MPC832x MDS",
+ .probe = mpc832x_sys_probe,
+ .setup_arch = mpc832x_sys_setup_arch,
+ .init_IRQ = mpc832x_sys_init_IRQ,
+ .get_irq = ipic_get_irq,
+ .restart = mpc83xx_restart,
+ .time_init = mpc83xx_time_init,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.h b/arch/powerpc/platforms/83xx/mpc832x_mds.h
new file mode 100644
index 000000000000..a49588904f8a
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Description:
+ * MPC832x MDS board specific header.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MACH_MPC832x_MDS_H__
+#define __MACH_MPC832x_MDS_H__
+
+extern u8 *get_bcsr(void);
+
+#endif /* __MACH_MPC832x_MDS_H__ */
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 8c676d763bb0..5446bab08eca 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -11,7 +11,6 @@
* option) any later version.
*/
-#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/arch/powerpc/platforms/83xx/mpc8360e_pb.c b/arch/powerpc/platforms/83xx/mpc8360e_pb.c
new file mode 100644
index 000000000000..1a523c81c06e
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc8360e_pb.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ *
+ * Author: Li Yang <LeoLi@freescale.com>
+ * Yin Olivia <Hong-hua.Yin@freescale.com>
+ *
+ * Description:
+ * MPC8360E MDS PB board specific routines.
+ *
+ * Changelog:
+ * Jun 21, 2006 Initial version
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+
+#include <asm/of_device.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ipic.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <sysdev/fsl_soc.h>
+#include <asm/qe.h>
+#include <asm/qe_ic.h>
+
+#include "mpc83xx.h"
+
+#undef DEBUG
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+#ifndef CONFIG_PCI
+unsigned long isa_io_base = 0;
+unsigned long isa_mem_base = 0;
+#endif
+
+static u8 *bcsr_regs = NULL;
+
+u8 *get_bcsr(void)
+{
+ return bcsr_regs;
+}
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+static void __init mpc8360_sys_setup_arch(void)
+{
+ struct device_node *np;
+
+ if (ppc_md.progress)
+ ppc_md.progress("mpc8360_sys_setup_arch()", 0);
+
+ np = of_find_node_by_type(NULL, "cpu");
+ if (np != 0) {
+ const unsigned int *fp =
+ get_property(np, "clock-frequency", NULL);
+ if (fp != 0)
+ loops_per_jiffy = *fp / HZ;
+ else
+ loops_per_jiffy = 50000000 / HZ;
+ of_node_put(np);
+ }
+
+ /* Map BCSR area */
+ np = of_find_node_by_name(NULL, "bcsr");
+ if (np != 0) {
+ struct resource res;
+
+ of_address_to_resource(np, 0, &res);
+ bcsr_regs = ioremap(res.start, res.end - res.start +1);
+ of_node_put(np);
+ }
+
+#ifdef CONFIG_PCI
+ for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+ add_bridge(np);
+
+ ppc_md.pci_swizzle = common_swizzle;
+ ppc_md.pci_exclude_device = mpc83xx_exclude_device;
+#endif
+
+#ifdef CONFIG_QUICC_ENGINE
+ qe_reset();
+
+ if ((np = of_find_node_by_name(np, "par_io")) != NULL) {
+ par_io_init(np);
+ of_node_put(np);
+
+ for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
+ par_io_of_config(np);
+ }
+
+ if ((np = of_find_compatible_node(NULL, "network", "ucc_geth"))
+ != NULL){
+ /* Reset the Ethernet PHY */
+ bcsr_regs[9] &= ~0x20;
+ udelay(1000);
+ bcsr_regs[9] |= 0x20;
+ iounmap(bcsr_regs);
+ of_node_put(np);
+ }
+
+#endif /* CONFIG_QUICC_ENGINE */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start)
+ ROOT_DEV = Root_RAM0;
+ else
+#endif
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_HDA1;
+#endif
+}
+
+static int __init mpc8360_declare_of_platform_devices(void)
+{
+ struct device_node *np;
+
+ for (np = NULL; (np = of_find_compatible_node(np, "network",
+ "ucc_geth")) != NULL;) {
+ int ucc_num;
+ char bus_id[BUS_ID_SIZE];
+
+ ucc_num = *((uint *) get_property(np, "device-id", NULL)) - 1;
+ snprintf(bus_id, BUS_ID_SIZE, "ucc_geth.%u", ucc_num);
+ of_platform_device_create(np, bus_id, NULL);
+ }
+
+ return 0;
+}
+device_initcall(mpc8360_declare_of_platform_devices);
+
+void __init mpc8360_sys_init_IRQ(void)
+{
+
+ struct device_node *np;
+
+ np = of_find_node_by_type(NULL, "ipic");
+ if (!np)
+ return;
+
+ ipic_init(np, 0);
+
+ /* Initialize the default interrupt mapping priorities,
+ * in case the boot rom changed something on us.
+ */
+ ipic_set_default_priority();
+ of_node_put(np);
+
+#ifdef CONFIG_QUICC_ENGINE
+ np = of_find_node_by_type(NULL, "qeic");
+ if (!np)
+ return;
+
+ qe_ic_init(np, 0);
+ of_node_put(np);
+#endif /* CONFIG_QUICC_ENGINE */
+}
+
+#if defined(CONFIG_I2C_MPC) && defined(CONFIG_SENSORS_DS1374)
+extern ulong ds1374_get_rtc_time(void);
+extern int ds1374_set_rtc_time(ulong);
+
+static int __init mpc8360_rtc_hookup(void)
+{
+ struct timespec tv;
+
+ ppc_md.get_rtc_time = ds1374_get_rtc_time;
+ ppc_md.set_rtc_time = ds1374_set_rtc_time;
+
+ tv.tv_nsec = 0;
+ tv.tv_sec = (ppc_md.get_rtc_time) ();
+ do_settimeofday(&tv);
+
+ return 0;
+}
+
+late_initcall(mpc8360_rtc_hookup);
+#endif
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init mpc8360_sys_probe(void)
+{
+ char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
+ "model", NULL);
+ if (model == NULL)
+ return 0;
+ if (strcmp(model, "MPC8360EPB"))
+ return 0;
+
+ DBG("MPC8360EMDS-PB found\n");
+
+ return 1;
+}
+
+define_machine(mpc8360_sys) {
+ .name = "MPC8360E PB",
+ .probe = mpc8360_sys_probe,
+ .setup_arch = mpc8360_sys_setup_arch,
+ .init_IRQ = mpc8360_sys_init_IRQ,
+ .get_irq = ipic_get_irq,
+ .restart = mpc83xx_restart,
+ .time_init = mpc83xx_time_init,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 28070e7ae507..d3e669d69c73 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -66,13 +66,12 @@ mpc85xx_pcibios_fixup(void)
#ifdef CONFIG_CPM2
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc,
- struct pt_regs *regs)
+static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
{
int cascade_irq;
- while ((cascade_irq = cpm2_get_irq(regs)) >= 0) {
- generic_handle_irq(cascade_irq, regs);
+ while ((cascade_irq = cpm2_get_irq()) >= 0) {
+ generic_handle_irq(cascade_irq);
}
desc->chip->eoi(irq);
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.h b/arch/powerpc/platforms/85xx/mpc85xx_ads.h
index effcbf78f851..46c3532992aa 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.h
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.h
@@ -18,7 +18,6 @@
#ifndef __MACH_MPC85XXADS_H
#define __MACH_MPC85XXADS_H
-#include <linux/config.h>
#include <linux/initrd.h>
#include <sysdev/fsl_soc.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 4c1fede6470e..953cd5dd3f54 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -11,7 +11,6 @@
* option) any later version.
*/
-#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -133,13 +132,12 @@ mpc85xx_cds_pcibios_fixup(void)
#ifdef CONFIG_PPC_I8259
#warning The i8259 PIC support is currently broken
-static void mpc85xx_8259_cascade(unsigned int irq, struct
- irq_desc *desc, struct pt_regs *regs)
+static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- unsigned int cascade_irq = i8259_irq(regs);
+ unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq, regs);
+ generic_handle_irq(cascade_irq);
desc->chip->eoi(irq);
}
@@ -151,8 +149,10 @@ void __init mpc85xx_cds_pic_init(void)
struct mpic *mpic;
struct resource r;
struct device_node *np = NULL;
+#ifdef CONFIG_PPC_I8259
struct device_node *cascade_node = NULL;
int cascade_irq;
+#endif
np = of_find_node_by_type(np, "open-pic");
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index b637e8157f7b..1a1c226ad4d9 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -53,12 +53,11 @@ unsigned long pci_dram_offset = 0;
#ifdef CONFIG_PCI
-static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc,
- struct pt_regs *regs)
+static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- unsigned int cascade_irq = i8259_irq(regs);
+ unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq, regs);
+ generic_handle_irq(cascade_irq);
desc->chip->eoi(irq);
}
#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 0c8c7b6ab897..3e430b489bb7 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -16,11 +16,6 @@ config SPU_BASE
bool
default n
-config SPUFS_MMAP
- bool
- depends on SPU_FS && SPARSEMEM
- default y
-
config CBE_RAS
bool "RAS features for bare metal Cell BE"
default y
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 3f3859d12e00..2f194ba29899 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -6,8 +6,6 @@
* (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
*/
-
-#include <linux/config.h>
#include <linux/percpu.h>
#include <linux/types.h>
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 6b57a47c5d37..a914c12b4060 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -21,6 +21,12 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * TODO:
+ * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
+ * vs node numbers in the setup code
+ * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
+ * a non-active node to the active node)
*/
#include <linux/interrupt.h>
@@ -44,24 +50,25 @@ struct iic {
u8 target_id;
u8 eoi_stack[16];
int eoi_ptr;
- struct irq_host *host;
+ struct device_node *node;
};
static DEFINE_PER_CPU(struct iic, iic);
#define IIC_NODE_COUNT 2
-static struct irq_host *iic_hosts[IIC_NODE_COUNT];
+static struct irq_host *iic_host;
/* Convert between "pending" bits and hw irq number */
static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
{
unsigned char unit = bits.source & 0xf;
+ unsigned char node = bits.source >> 4;
+ unsigned char class = bits.class & 3;
+ /* Decode IPIs */
if (bits.flags & CBE_IIC_IRQ_IPI)
- return IIC_IRQ_IPI0 | (bits.prio >> 4);
- else if (bits.class <= 3)
- return (bits.class << 4) | unit;
+ return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
else
- return IIC_IRQ_INVALID;
+ return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
}
static void iic_mask(unsigned int irq)
@@ -86,21 +93,69 @@ static struct irq_chip iic_chip = {
.eoi = iic_eoi,
};
+
+static void iic_ioexc_eoi(unsigned int irq)
+{
+}
+
+static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ struct cbe_iic_regs __iomem *node_iic = (void __iomem *)desc->handler_data;
+ unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
+ unsigned long bits, ack;
+ int cascade;
+
+ for (;;) {
+ bits = in_be64(&node_iic->iic_is);
+ if (bits == 0)
+ break;
+ /* pre-ack edge interrupts */
+ ack = bits & IIC_ISR_EDGE_MASK;
+ if (ack)
+ out_be64(&node_iic->iic_is, ack);
+ /* handle them */
+ for (cascade = 63; cascade >= 0; cascade--)
+ if (bits & (0x8000000000000000UL >> cascade)) {
+ unsigned int cirq =
+ irq_linear_revmap(iic_host,
+ base | cascade);
+ if (cirq != NO_IRQ)
+ generic_handle_irq(cirq);
+ }
+ /* post-ack level interrupts */
+ ack = bits & ~IIC_ISR_EDGE_MASK;
+ if (ack)
+ out_be64(&node_iic->iic_is, ack);
+ }
+ desc->chip->eoi(irq);
+}
+
+
+static struct irq_chip iic_ioexc_chip = {
+ .typename = " CELL-IOEX",
+ .mask = iic_mask,
+ .unmask = iic_unmask,
+ .eoi = iic_ioexc_eoi,
+};
+
/* Get an IRQ number from the pending state register of the IIC */
-static unsigned int iic_get_irq(struct pt_regs *regs)
+static unsigned int iic_get_irq(void)
{
struct cbe_iic_pending_bits pending;
struct iic *iic;
+ unsigned int virq;
iic = &__get_cpu_var(iic);
*(unsigned long *) &pending =
in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
+ if (!(pending.flags & CBE_IIC_IRQ_VALID))
+ return NO_IRQ;
+ virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
+ if (virq == NO_IRQ)
+ return NO_IRQ;
iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
BUG_ON(iic->eoi_ptr > 15);
- if (pending.flags & CBE_IIC_IRQ_VALID)
- return irq_linear_revmap(iic->host,
- iic_pending_to_hwnum(pending));
- return NO_IRQ;
+ return virq;
}
#ifdef CONFIG_SMP
@@ -108,12 +163,7 @@ static unsigned int iic_get_irq(struct pt_regs *regs)
/* Use the highest interrupt priorities for IPI */
static inline int iic_ipi_to_irq(int ipi)
{
- return IIC_IRQ_IPI0 + IIC_NUM_IPIS - 1 - ipi;
-}
-
-static inline int iic_irq_to_ipi(int irq)
-{
- return IIC_NUM_IPIS - 1 - (irq - IIC_IRQ_IPI0);
+ return IIC_IRQ_TYPE_IPI + 0xf - ipi;
}
void iic_setup_cpu(void)
@@ -123,7 +173,7 @@ void iic_setup_cpu(void)
void iic_cause_IPI(int cpu, int mesg)
{
- out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4);
+ out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4);
}
u8 iic_get_target_id(int cpu)
@@ -134,49 +184,33 @@ EXPORT_SYMBOL_GPL(iic_get_target_id);
struct irq_host *iic_get_irq_host(int node)
{
- if (node < 0 || node >= IIC_NODE_COUNT)
- return NULL;
- return iic_hosts[node];
+ return iic_host;
}
EXPORT_SYMBOL_GPL(iic_get_irq_host);
-static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t iic_ipi_action(int irq, void *dev_id)
{
int ipi = (int)(long)dev_id;
- smp_message_recv(ipi, regs);
+ smp_message_recv(ipi);
return IRQ_HANDLED;
}
-
static void iic_request_ipi(int ipi, const char *name)
{
- int node, virq;
+ int virq;
- for (node = 0; node < IIC_NODE_COUNT; node++) {
- char *rname;
- if (iic_hosts[node] == NULL)
- continue;
- virq = irq_create_mapping(iic_hosts[node],
- iic_ipi_to_irq(ipi));
- if (virq == NO_IRQ) {
- printk(KERN_ERR
- "iic: failed to map IPI %s on node %d\n",
- name, node);
- continue;
- }
- rname = kzalloc(strlen(name) + 16, GFP_KERNEL);
- if (rname)
- sprintf(rname, "%s node %d", name, node);
- else
- rname = (char *)name;
- if (request_irq(virq, iic_ipi_action, IRQF_DISABLED,
- rname, (void *)(long)ipi))
- printk(KERN_ERR
- "iic: failed to request IPI %s on node %d\n",
- name, node);
+ virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi));
+ if (virq == NO_IRQ) {
+ printk(KERN_ERR
+ "iic: failed to map IPI %s\n", name);
+ return;
}
+ if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name,
+ (void *)(long)ipi))
+ printk(KERN_ERR
+ "iic: failed to request IPI %s\n", name);
}
void iic_request_IPIs(void)
@@ -193,16 +227,24 @@ void iic_request_IPIs(void)
static int iic_host_match(struct irq_host *h, struct device_node *node)
{
- return h->host_data != NULL && node == h->host_data;
+ return device_is_compatible(node,
+ "IBM,CBEA-Internal-Interrupt-Controller");
}
static int iic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
- if (hw < IIC_IRQ_IPI0)
- set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq);
- else
+ switch (hw & IIC_IRQ_TYPE_MASK) {
+ case IIC_IRQ_TYPE_IPI:
set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
+ break;
+ case IIC_IRQ_TYPE_IOEXC:
+ set_irq_chip_and_handler(virq, &iic_ioexc_chip,
+ handle_fasteoi_irq);
+ break;
+ default:
+ set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq);
+ }
return 0;
}
@@ -211,11 +253,39 @@ static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
- /* Currently, we don't translate anything. That needs to be fixed as
- * we get better defined device-trees. iic interrupts have to be
- * explicitely mapped by whoever needs them
- */
- return -ENODEV;
+ unsigned int node, ext, unit, class;
+ const u32 *val;
+
+ if (!device_is_compatible(ct,
+ "IBM,CBEA-Internal-Interrupt-Controller"))
+ return -ENODEV;
+ if (intsize != 1)
+ return -ENODEV;
+ val = get_property(ct, "#interrupt-cells", NULL);
+ if (val == NULL || *val != 1)
+ return -ENODEV;
+
+ node = intspec[0] >> 24;
+ ext = (intspec[0] >> 16) & 0xff;
+ class = (intspec[0] >> 8) & 0xff;
+ unit = intspec[0] & 0xff;
+
+ /* Check if node is in supported range */
+ if (node > 1)
+ return -EINVAL;
+
+ /* Build up interrupt number, special case for IO exceptions */
+ *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
+ if (unit == IIC_UNIT_IIC && class == 1)
+ *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
+ else
+ *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
+ (class << IIC_IRQ_CLASS_SHIFT) | unit;
+
+ /* Dummy flags, ignored by iic code */
+ *out_flags = IRQ_TYPE_EDGE_RISING;
+
+ return 0;
}
static struct irq_host_ops iic_host_ops = {
@@ -225,7 +295,7 @@ static struct irq_host_ops iic_host_ops = {
};
static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
- struct irq_host *host)
+ struct device_node *node)
{
/* XXX FIXME: should locate the linux CPU number from the HW cpu
* number properly. We are lucky for now
@@ -237,19 +307,19 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
iic->eoi_stack[0] = 0xff;
- iic->host = host;
+ iic->node = of_node_get(node);
out_be64(&iic->regs->prio, 0);
- printk(KERN_INFO "IIC for CPU %d at %lx mapped to %p, target id 0x%x\n",
- hw_cpu, addr, iic->regs, iic->target_id);
+ printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
+ hw_cpu, iic->target_id, node->full_name);
}
static int __init setup_iic(void)
{
struct device_node *dn;
struct resource r0, r1;
- struct irq_host *host;
- int found = 0;
+ unsigned int node, cascade, found = 0;
+ struct cbe_iic_regs __iomem *node_iic;
const u32 *np;
for (dn = NULL;
@@ -269,19 +339,37 @@ static int __init setup_iic(void)
of_node_put(dn);
return -ENODEV;
}
- host = NULL;
- if (found < IIC_NODE_COUNT) {
- host = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
- IIC_SOURCE_COUNT,
- &iic_host_ops,
- IIC_IRQ_INVALID);
- iic_hosts[found] = host;
- BUG_ON(iic_hosts[found] == NULL);
- iic_hosts[found]->host_data = of_node_get(dn);
- found++;
- }
- init_one_iic(np[0], r0.start, host);
- init_one_iic(np[1], r1.start, host);
+ found++;
+ init_one_iic(np[0], r0.start, dn);
+ init_one_iic(np[1], r1.start, dn);
+
+ /* Setup cascade for IO exceptions. XXX cleanup tricks to get
+ * node vs CPU etc...
+ * Note that we configure the IIC_IRR here with a hard coded
+ * priority of 1. We might want to improve that later.
+ */
+ node = np[0] >> 1;
+ node_iic = cbe_get_cpu_iic_regs(np[0]);
+ cascade = node << IIC_IRQ_NODE_SHIFT;
+ cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
+ cascade |= IIC_UNIT_IIC;
+ cascade = irq_create_mapping(iic_host, cascade);
+ if (cascade == NO_IRQ)
+ continue;
+ /*
+ * irq_data is a generic pointer that gets passed back
+ * to us later, so the forced cast is fine.
+ */
+ set_irq_data(cascade, (void __force *)node_iic);
+ set_irq_chained_handler(cascade , iic_ioexc_cascade);
+ out_be64(&node_iic->iic_ir,
+ (1 << 12) /* priority */ |
+ (node << 4) /* dest node */ |
+ IIC_UNIT_THREAD_0 /* route them to thread 0 */);
+ /* Flush pending (make sure it triggers if there is
+ * anything pending
+ */
+ out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
}
if (found)
@@ -292,6 +380,12 @@ static int __init setup_iic(void)
void __init iic_init_IRQ(void)
{
+ /* Setup an irq host data structure */
+ iic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT,
+ &iic_host_ops, IIC_IRQ_INVALID);
+ BUG_ON(iic_host == NULL);
+ irq_set_default_host(iic_host);
+
/* Discover and initialize iics */
if (setup_iic() < 0)
panic("IIC: Failed to initialize !\n");
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
index 5560a92ec3ab..9ba1d3c17b4b 100644
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -2,48 +2,76 @@
#define ASM_CELL_PIC_H
#ifdef __KERNEL__
/*
- * Mapping of IIC pending bits into per-node
- * interrupt numbers.
+ * Mapping of IIC pending bits into per-node interrupt numbers.
*
- * IRQ FF CC SS PP FF CC SS PP Description
+ * Interrupt numbers are in the range 0...0x1ff where the top bit
+ * (0x100) represent the source node. Only 2 nodes are supported with
+ * the current code though it's trivial to extend that if necessary using
+ * higher level bits
*
- * 00-3f 80 02 +0 00 - 80 02 +0 3f South Bridge
- * 00-3f 80 02 +b 00 - 80 02 +b 3f South Bridge
- * 41-4a 80 00 +1 ** - 80 00 +a ** SPU Class 0
- * 51-5a 80 01 +1 ** - 80 01 +a ** SPU Class 1
- * 61-6a 80 02 +1 ** - 80 02 +a ** SPU Class 2
- * 70-7f C0 ** ** 00 - C0 ** ** 0f IPI
+ * The bottom 8 bits are split into 2 type bits and 6 data bits that
+ * depend on the type:
*
- * F flags
- * C class
- * S source
- * P Priority
- * + node number
- * * don't care
+ * 00 (0x00 | data) : normal interrupt. data is (class << 4) | source
+ * 01 (0x40 | data) : IO exception. data is the exception number as
+ * defined by bit numbers in IIC_SR
+ * 10 (0x80 | data) : IPI. data is the IPI number (obtained from the priority)
+ * and node is always 0 (IPIs are per-cpu, their source is
+ * not relevant)
+ * 11 (0xc0 | data) : reserved
*
- * A node consists of a Cell Broadband Engine and an optional
- * south bridge device providing a maximum of 64 IRQs.
- * The south bridge may be connected to either IOIF0
- * or IOIF1.
- * Each SPE is represented as three IRQ lines, one per
- * interrupt class.
- * 16 IRQ numbers are reserved for inter processor
- * interruptions, although these are only used in the
- * range of the first node.
+ * In addition, interrupt number 0x80000000 is defined as always invalid
+ * (that is the node field is expected to never extend to move than 23 bits)
*
- * This scheme needs 128 IRQ numbers per BIF node ID,
- * which means that with the total of 512 lines
- * available, we can have a maximum of four nodes.
*/
enum {
- IIC_IRQ_INVALID = 0xff,
- IIC_IRQ_MAX = 0x3f,
- IIC_IRQ_EXT_IOIF0 = 0x20,
- IIC_IRQ_EXT_IOIF1 = 0x2b,
- IIC_IRQ_IPI0 = 0x40,
- IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */
- IIC_SOURCE_COUNT = 0x50,
+ IIC_IRQ_INVALID = 0x80000000u,
+ IIC_IRQ_NODE_MASK = 0x100,
+ IIC_IRQ_NODE_SHIFT = 8,
+ IIC_IRQ_MAX = 0x1ff,
+ IIC_IRQ_TYPE_MASK = 0xc0,
+ IIC_IRQ_TYPE_NORMAL = 0x00,
+ IIC_IRQ_TYPE_IOEXC = 0x40,
+ IIC_IRQ_TYPE_IPI = 0x80,
+ IIC_IRQ_CLASS_SHIFT = 4,
+ IIC_IRQ_CLASS_0 = 0x00,
+ IIC_IRQ_CLASS_1 = 0x10,
+ IIC_IRQ_CLASS_2 = 0x20,
+ IIC_SOURCE_COUNT = 0x200,
+
+ /* Here are defined the various source/dest units. Avoid using those
+ * definitions if you can, they are mostly here for reference
+ */
+ IIC_UNIT_SPU_0 = 0x4,
+ IIC_UNIT_SPU_1 = 0x7,
+ IIC_UNIT_SPU_2 = 0x3,
+ IIC_UNIT_SPU_3 = 0x8,
+ IIC_UNIT_SPU_4 = 0x2,
+ IIC_UNIT_SPU_5 = 0x9,
+ IIC_UNIT_SPU_6 = 0x1,
+ IIC_UNIT_SPU_7 = 0xa,
+ IIC_UNIT_IOC_0 = 0x0,
+ IIC_UNIT_IOC_1 = 0xb,
+ IIC_UNIT_THREAD_0 = 0xe, /* target only */
+ IIC_UNIT_THREAD_1 = 0xf, /* target only */
+ IIC_UNIT_IIC = 0xe, /* source only (IO exceptions) */
+
+ /* Base numbers for the external interrupts */
+ IIC_IRQ_EXT_IOIF0 =
+ IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_0,
+ IIC_IRQ_EXT_IOIF1 =
+ IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_1,
+
+ /* Base numbers for the IIC_ISR interrupts */
+ IIC_IRQ_IOEX_TMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 63,
+ IIC_IRQ_IOEX_PMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 62,
+ IIC_IRQ_IOEX_ATI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 61,
+ IIC_IRQ_IOEX_MATBFI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 60,
+ IIC_IRQ_IOEX_ELDI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 59,
+
+ /* Which bits in IIC_ISR are edge sensitive */
+ IIC_ISR_EDGE_MASK = 0x4ul,
};
extern void iic_init_IRQ(void);
@@ -52,7 +80,6 @@ extern void iic_request_IPIs(void);
extern void iic_setup_cpu(void);
extern u8 iic_get_target_id(int cpu);
-extern struct irq_host *iic_get_irq_host(int node);
extern void spider_init_IRQ(void);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index d2b20eba5b87..aca4c3db0dde 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -345,8 +345,8 @@ static int cell_map_iommu_hardcoded(int num_nodes)
/* node 0 */
iommu = &cell_iommus[0];
- iommu->mapped_base = ioremap(0x20000511000, 0x1000);
- iommu->mapped_mmio_base = ioremap(0x20000510000, 0x1000);
+ iommu->mapped_base = ioremap(0x20000511000ul, 0x1000);
+ iommu->mapped_mmio_base = ioremap(0x20000510000ul, 0x1000);
enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
@@ -358,8 +358,8 @@ static int cell_map_iommu_hardcoded(int num_nodes)
/* node 1 */
iommu = &cell_iommus[1];
- iommu->mapped_base = ioremap(0x30000511000, 0x1000);
- iommu->mapped_mmio_base = ioremap(0x30000510000, 0x1000);
+ iommu->mapped_base = ioremap(0x30000511000ul, 0x1000);
+ iommu->mapped_mmio_base = ioremap(0x30000510000ul, 0x1000);
enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base);
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 033ad6e2827b..0984c7071695 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -1,6 +1,5 @@
#define DEBUG
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/smp.h>
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 742a03282b44..21a9ebd4978e 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -213,8 +213,7 @@ static struct irq_host_ops spider_host_ops = {
.xlate = spider_host_xlate,
};
-static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc,
- struct pt_regs *regs)
+static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc)
{
struct spider_pic *pic = desc->handler_data;
unsigned int cs, virq;
@@ -225,7 +224,7 @@ static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc,
else
virq = irq_linear_revmap(pic->host, cs);
if (virq != NO_IRQ)
- generic_handle_irq(virq, regs);
+ generic_handle_irq(virq);
desc->chip->eoi(irq);
}
@@ -243,9 +242,7 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
const u32 *imap, *tmp;
int imaplen, intsize, unit;
struct device_node *iic;
- struct irq_host *iic_host;
-#if 0 /* Enable that when we have a way to retreive the node as well */
/* First, we check wether we have a real "interrupts" in the device
* tree in case the device-tree is ever fixed
*/
@@ -253,9 +250,8 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
if (of_irq_map_one(pic->of_node, 0, &oirq) == 0) {
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
- goto bail;
+ return virq;
}
-#endif
/* Now do the horrible hacks */
tmp = get_property(pic->of_node, "#interrupt-cells", NULL);
@@ -289,11 +285,11 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
* the iic host from the iic OF node, but that way I'm still compatible
* with really really old old firmwares for which we don't have a node
*/
- iic_host = iic_get_irq_host(pic->node_id);
- if (iic_host == NULL)
- return NO_IRQ;
/* Manufacture an IIC interrupt number of class 2 */
- virq = irq_create_mapping(iic_host, 0x20 | unit);
+ virq = irq_create_mapping(NULL,
+ (pic->node_id << IIC_IRQ_NODE_SHIFT) |
+ (2 << IIC_IRQ_CLASS_SHIFT) |
+ unit);
if (virq == NO_IRQ)
printk(KERN_ERR "spider_pic: failed to map cascade !");
return virq;
@@ -370,7 +366,7 @@ void __init spider_init_IRQ(void)
} else if (device_is_compatible(dn, "sti,platform-spider-pic")
&& (chip < 2)) {
static long hard_coded_pics[] =
- { 0x24000008000, 0x34000008000 };
+ { 0x24000008000ul, 0x34000008000ul};
r.start = hard_coded_pics[chip];
} else
continue;
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 0f5c8ebc7fc3..d0fb959e3ef1 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -25,11 +25,13 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/wait.h>
+#include <asm/firmware.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <linux/mutex.h>
@@ -46,21 +48,21 @@ EXPORT_SYMBOL_GPL(spu_priv1_ops);
static int __spu_trap_invalid_dma(struct spu *spu)
{
pr_debug("%s\n", __FUNCTION__);
- force_sig(SIGBUS, /* info, */ current);
+ spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
return 0;
}
static int __spu_trap_dma_align(struct spu *spu)
{
pr_debug("%s\n", __FUNCTION__);
- force_sig(SIGBUS, /* info, */ current);
+ spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
return 0;
}
static int __spu_trap_error(struct spu *spu)
{
pr_debug("%s\n", __FUNCTION__);
- force_sig(SIGILL, /* info, */ current);
+ spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
return 0;
}
@@ -145,7 +147,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
}
static irqreturn_t
-spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
+spu_irq_class_0(int irq, void *data)
{
struct spu *spu;
@@ -184,7 +186,7 @@ spu_irq_class_0_bottom(struct spu *spu)
EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
static irqreturn_t
-spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
+spu_irq_class_1(int irq, void *data)
{
struct spu *spu;
unsigned long stat, mask, dar, dsisr;
@@ -222,7 +224,7 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
static irqreturn_t
-spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
+spu_irq_class_2(int irq, void *data)
{
struct spu *spu;
unsigned long stat;
@@ -317,7 +319,7 @@ static void spu_free_irqs(struct spu *spu)
free_irq(spu->irqs[2], spu);
}
-static LIST_HEAD(spu_list);
+static struct list_head spu_list[MAX_NUMNODES];
static DEFINE_MUTEX(spu_mutex);
static void spu_init_channels(struct spu *spu)
@@ -354,32 +356,42 @@ static void spu_init_channels(struct spu *spu)
}
}
-struct spu *spu_alloc(void)
+struct spu *spu_alloc_node(int node)
{
- struct spu *spu;
+ struct spu *spu = NULL;
mutex_lock(&spu_mutex);
- if (!list_empty(&spu_list)) {
- spu = list_entry(spu_list.next, struct spu, list);
+ if (!list_empty(&spu_list[node])) {
+ spu = list_entry(spu_list[node].next, struct spu, list);
list_del_init(&spu->list);
- pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
- } else {
- pr_debug("No SPU left\n");
- spu = NULL;
+ pr_debug("Got SPU %x %d %d\n",
+ spu->isrc, spu->number, spu->node);
+ spu_init_channels(spu);
}
mutex_unlock(&spu_mutex);
- if (spu)
- spu_init_channels(spu);
+ return spu;
+}
+EXPORT_SYMBOL_GPL(spu_alloc_node);
+
+struct spu *spu_alloc(void)
+{
+ struct spu *spu = NULL;
+ int node;
+
+ for (node = 0; node < MAX_NUMNODES; node++) {
+ spu = spu_alloc_node(node);
+ if (spu)
+ break;
+ }
return spu;
}
-EXPORT_SYMBOL_GPL(spu_alloc);
void spu_free(struct spu *spu)
{
mutex_lock(&spu_mutex);
- list_add_tail(&spu->list, &spu_list);
+ list_add_tail(&spu->list, &spu_list[spu->node]);
mutex_unlock(&spu_mutex);
}
EXPORT_SYMBOL_GPL(spu_free);
@@ -566,32 +578,31 @@ static void spu_unmap(struct spu *spu)
}
/* This function shall be abstracted for HV platforms */
-static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
+static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
{
- struct irq_host *host;
unsigned int isrc;
const u32 *tmp;
- host = iic_get_irq_host(spu->node);
- if (host == NULL)
- return -ENODEV;
-
- /* Get the interrupt source from the device-tree */
+ /* Get the interrupt source unit from the device-tree */
tmp = get_property(np, "isrc", NULL);
if (!tmp)
return -ENODEV;
- spu->isrc = isrc = tmp[0];
+ isrc = tmp[0];
+
+ /* Add the node number */
+ isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
+ spu->isrc = isrc;
/* Now map interrupts of all 3 classes */
- spu->irqs[0] = irq_create_mapping(host, 0x00 | isrc);
- spu->irqs[1] = irq_create_mapping(host, 0x10 | isrc);
- spu->irqs[2] = irq_create_mapping(host, 0x20 | isrc);
+ spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
+ spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
+ spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
/* Right now, we only fail if class 2 failed */
return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
}
-static int __init spu_map_device(struct spu *spu, struct device_node *node)
+static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
{
const char *prop;
int ret;
@@ -636,6 +647,88 @@ out:
return ret;
}
+static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
+{
+ struct of_irq oirq;
+ int ret;
+ int i;
+
+ for (i=0; i < 3; i++) {
+ ret = of_irq_map_one(np, i, &oirq);
+ if (ret)
+ goto err;
+
+ ret = -EINVAL;
+ spu->irqs[i] = irq_create_of_mapping(oirq.controller,
+ oirq.specifier, oirq.size);
+ if (spu->irqs[i] == NO_IRQ)
+ goto err;
+ }
+ return 0;
+
+err:
+ pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
+ for (; i >= 0; i--) {
+ if (spu->irqs[i] != NO_IRQ)
+ irq_dispose_mapping(spu->irqs[i]);
+ }
+ return ret;
+}
+
+static int spu_map_resource(struct device_node *node, int nr,
+ void __iomem** virt, unsigned long *phys)
+{
+ struct resource resource = { };
+ int ret;
+
+ ret = of_address_to_resource(node, 0, &resource);
+ if (ret)
+ goto out;
+
+ if (phys)
+ *phys = resource.start;
+ *virt = ioremap(resource.start, resource.end - resource.start);
+ if (!*virt)
+ ret = -EINVAL;
+
+out:
+ return ret;
+}
+
+static int __init spu_map_device(struct spu *spu, struct device_node *node)
+{
+ int ret = -ENODEV;
+ spu->name = get_property(node, "name", NULL);
+ if (!spu->name)
+ goto out;
+
+ ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
+ &spu->local_store_phys);
+ if (ret)
+ goto out;
+ ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
+ &spu->problem_phys);
+ if (ret)
+ goto out_unmap;
+ ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
+ NULL);
+ if (ret)
+ goto out_unmap;
+
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
+ NULL);
+ if (ret)
+ goto out_unmap;
+ return 0;
+
+out_unmap:
+ spu_unmap(spu);
+out:
+ pr_debug("failed to map spe %s: %d\n", spu->name, ret);
+ return ret;
+}
+
struct sysdev_class spu_sysdev_class = {
set_kset_name("spu")
};
@@ -688,16 +781,28 @@ static int __init create_spu(struct device_node *spe)
if (!spu)
goto out;
- ret = spu_map_device(spu, spe);
- if (ret)
- goto out_free;
-
spu->node = find_spu_node_id(spe);
+ if (spu->node >= MAX_NUMNODES) {
+ printk(KERN_WARNING "SPE %s on node %d ignored,"
+ " node number too big\n", spe->full_name, spu->node);
+ printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
+ return -ENODEV;
+ }
spu->nid = of_node_to_nid(spe);
if (spu->nid == -1)
spu->nid = 0;
+
+ ret = spu_map_device(spu, spe);
+ /* try old method */
+ if (ret)
+ ret = spu_map_device_old(spu, spe);
+ if (ret)
+ goto out_free;
+
ret = spu_map_interrupts(spu, spe);
if (ret)
+ ret = spu_map_interrupts_old(spu, spe);
+ if (ret)
goto out_unmap;
spin_lock_init(&spu->register_lock);
spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
@@ -707,13 +812,13 @@ static int __init create_spu(struct device_node *spe)
spu->number = number++;
ret = spu_request_irqs(spu);
if (ret)
- goto out_unmap;
+ goto out_unlock;
ret = spu_create_sysdev(spu);
if (ret)
goto out_free_irqs;
- list_add(&spu->list, &spu_list);
+ list_add(&spu->list, &spu_list[spu->node]);
mutex_unlock(&spu_mutex);
pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
@@ -723,9 +828,9 @@ static int __init create_spu(struct device_node *spe)
out_free_irqs:
spu_free_irqs(spu);
-
-out_unmap:
+out_unlock:
mutex_unlock(&spu_mutex);
+out_unmap:
spu_unmap(spu);
out_free:
kfree(spu);
@@ -746,9 +851,13 @@ static void destroy_spu(struct spu *spu)
static void cleanup_spu_base(void)
{
struct spu *spu, *tmp;
+ int node;
+
mutex_lock(&spu_mutex);
- list_for_each_entry_safe(spu, tmp, &spu_list, list)
- destroy_spu(spu);
+ for (node = 0; node < MAX_NUMNODES; node++) {
+ list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
+ destroy_spu(spu);
+ }
mutex_unlock(&spu_mutex);
sysdev_class_unregister(&spu_sysdev_class);
}
@@ -757,13 +866,16 @@ module_exit(cleanup_spu_base);
static int __init init_spu_base(void)
{
struct device_node *node;
- int ret;
+ int i, ret;
/* create sysdev class for spus */
ret = sysdev_class_register(&spu_sysdev_class);
if (ret)
return ret;
+ for (i = 0; i < MAX_NUMNODES; i++)
+ INIT_LIST_HEAD(&spu_list[i]);
+
ret = -ENODEV;
for (node = of_find_node_by_type(NULL, "spe");
node; node = of_find_node_by_type(node, "spe")) {
@@ -775,18 +887,6 @@ static int __init init_spu_base(void)
break;
}
}
- /* in some old firmware versions, the spe is called 'spc', so we
- look for that as well */
- for (node = of_find_node_by_type(NULL, "spc");
- node; node = of_find_node_by_type(node, "spc")) {
- ret = create_spu(node);
- if (ret) {
- printk(KERN_WARNING "%s: Error initializing %s\n",
- __FUNCTION__, node->name);
- cleanup_spu_base();
- break;
- }
- }
return ret;
}
module_init(init_spu_base);
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index bb5dc634272c..ecdfbb35f82e 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -2,7 +2,7 @@ obj-y += switch.o
obj-$(CONFIG_SPU_FS) += spufs.o
spufs-y += inode.o file.o context.o syscalls.o
-spufs-y += sched.o backing_ops.o hw_ops.o run.o
+spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
# Rules to build switch.o with the help of SPU tool chain
SPU_CROSS := spu-
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 36439c5e9f2d..034cf6af53a2 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -27,7 +27,7 @@
#include <asm/spu_csa.h>
#include "spufs.h"
-struct spu_context *alloc_spu_context(void)
+struct spu_context *alloc_spu_context(struct spu_gang *gang)
{
struct spu_context *ctx;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
@@ -51,6 +51,8 @@ struct spu_context *alloc_spu_context(void)
ctx->state = SPU_STATE_SAVED;
ctx->ops = &spu_backing_ops;
ctx->owner = get_task_mm(current);
+ if (gang)
+ spu_gang_add_ctx(gang, ctx);
goto out;
out_free:
kfree(ctx);
@@ -67,6 +69,8 @@ void destroy_spu_context(struct kref *kref)
spu_deactivate(ctx);
up_write(&ctx->state_sema);
spu_fini_csa(&ctx->csa);
+ if (ctx->gang)
+ spu_gang_remove_ctx(ctx->gang, ctx);
kfree(ctx);
}
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 51fd197ab5dd..0de8e114e6b6 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -36,6 +36,8 @@
#include "spufs.h"
+#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)
+
static int
spufs_mem_open(struct inode *inode, struct file *file)
@@ -88,7 +90,6 @@ spufs_mem_write(struct file *file, const char __user *buffer,
return ret;
}
-#ifdef CONFIG_SPUFS_MMAP
static struct page *
spufs_mem_mmap_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
@@ -101,12 +102,16 @@ spufs_mem_mmap_nopage(struct vm_area_struct *vma,
spu_acquire(ctx);
- if (ctx->state == SPU_STATE_SAVED)
+ if (ctx->state == SPU_STATE_SAVED) {
+ vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
+ & ~(_PAGE_NO_CACHE | _PAGE_GUARDED));
page = vmalloc_to_page(ctx->csa.lscsa->ls + offset);
- else
+ } else {
+ vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
+ | _PAGE_NO_CACHE | _PAGE_GUARDED);
page = pfn_to_page((ctx->spu->local_store_phys + offset)
>> PAGE_SHIFT);
-
+ }
spu_release(ctx);
if (type)
@@ -133,22 +138,19 @@ spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &spufs_mem_mmap_vmops;
return 0;
}
-#endif
static struct file_operations spufs_mem_fops = {
.open = spufs_mem_open,
.read = spufs_mem_read,
.write = spufs_mem_write,
.llseek = generic_file_llseek,
-#ifdef CONFIG_SPUFS_MMAP
.mmap = spufs_mem_mmap,
-#endif
};
-#ifdef CONFIG_SPUFS_MMAP
static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
unsigned long address,
- int *type, unsigned long ps_offs)
+ int *type, unsigned long ps_offs,
+ unsigned long ps_size)
{
struct page *page = NOPAGE_SIGBUS;
int fault_type = VM_FAULT_SIGBUS;
@@ -158,7 +160,7 @@ static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
int ret;
offset += vma->vm_pgoff << PAGE_SHIFT;
- if (offset >= 0x4000)
+ if (offset >= ps_size)
goto out;
ret = spu_acquire_runnable(ctx);
@@ -179,10 +181,11 @@ static struct page *spufs_ps_nopage(struct vm_area_struct *vma,
return page;
}
+#if SPUFS_MMAP_4K
static struct page *spufs_cntl_mmap_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
- return spufs_ps_nopage(vma, address, type, 0x4000);
+ return spufs_ps_nopage(vma, address, type, 0x4000, 0x1000);
}
static struct vm_operations_struct spufs_cntl_mmap_vmops = {
@@ -191,17 +194,12 @@ static struct vm_operations_struct spufs_cntl_mmap_vmops = {
/*
* mmap support for problem state control area [0x4000 - 0x4fff].
- * Mapping this area requires that the application have CAP_SYS_RAWIO,
- * as these registers require special care when read/writing.
*/
static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
{
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -209,42 +207,49 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &spufs_cntl_mmap_vmops;
return 0;
}
-#endif
+#else /* SPUFS_MMAP_4K */
+#define spufs_cntl_mmap NULL
+#endif /* !SPUFS_MMAP_4K */
-static int spufs_cntl_open(struct inode *inode, struct file *file)
+static u64 spufs_cntl_get(void *data)
{
- struct spufs_inode_info *i = SPUFS_I(inode);
- struct spu_context *ctx = i->i_ctx;
+ struct spu_context *ctx = data;
+ u64 val;
- file->private_data = ctx;
- file->f_mapping = inode->i_mapping;
- ctx->cntl = inode->i_mapping;
- return 0;
+ spu_acquire(ctx);
+ val = ctx->ops->status_read(ctx);
+ spu_release(ctx);
+
+ return val;
}
-static ssize_t
-spufs_cntl_read(struct file *file, char __user *buffer,
- size_t size, loff_t *pos)
+static void spufs_cntl_set(void *data, u64 val)
{
- /* FIXME: read from spu status */
- return -EINVAL;
+ struct spu_context *ctx = data;
+
+ spu_acquire(ctx);
+ ctx->ops->runcntl_write(ctx, val);
+ spu_release(ctx);
}
-static ssize_t
-spufs_cntl_write(struct file *file, const char __user *buffer,
- size_t size, loff_t *pos)
+static int spufs_cntl_open(struct inode *inode, struct file *file)
{
- /* FIXME: write to runctl bit */
- return -EINVAL;
+ struct spufs_inode_info *i = SPUFS_I(inode);
+ struct spu_context *ctx = i->i_ctx;
+
+ file->private_data = ctx;
+ file->f_mapping = inode->i_mapping;
+ ctx->cntl = inode->i_mapping;
+ return simple_attr_open(inode, file, spufs_cntl_get,
+ spufs_cntl_set, "0x%08lx");
}
static struct file_operations spufs_cntl_fops = {
.open = spufs_cntl_open,
- .read = spufs_cntl_read,
- .write = spufs_cntl_write,
-#ifdef CONFIG_SPUFS_MMAP
+ .release = simple_attr_close,
+ .read = simple_attr_read,
+ .write = simple_attr_write,
.mmap = spufs_cntl_mmap,
-#endif
};
static int
@@ -356,27 +361,54 @@ static int spufs_pipe_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
}
+/*
+ * Read as many bytes from the mailbox as possible, until
+ * one of the conditions becomes true:
+ *
+ * - no more data available in the mailbox
+ * - end of the user provided buffer
+ * - end of the mapped area
+ */
static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 mbox_data;
- int ret;
+ u32 mbox_data, __user *udata;
+ ssize_t count;
if (len < 4)
return -EINVAL;
+ if (!access_ok(VERIFY_WRITE, buf, len))
+ return -EFAULT;
+
+ udata = (void __user *)buf;
+
spu_acquire(ctx);
- ret = ctx->ops->mbox_read(ctx, &mbox_data);
+ for (count = 0; count <= len; count += 4, udata++) {
+ int ret;
+ ret = ctx->ops->mbox_read(ctx, &mbox_data);
+ if (ret == 0)
+ break;
+
+ /*
+ * at the end of the mapped area, we can fault
+ * but still need to return the data we have
+ * read successfully so far.
+ */
+ ret = __put_user(mbox_data, udata);
+ if (ret) {
+ if (!count)
+ count = -EFAULT;
+ break;
+ }
+ }
spu_release(ctx);
- if (!ret)
- return -EAGAIN;
-
- if (copy_to_user(buf, &mbox_data, sizeof mbox_data))
- return -EFAULT;
+ if (!count)
+ count = -EAGAIN;
- return 4;
+ return count;
}
static struct file_operations spufs_mbox_fops = {
@@ -432,36 +464,70 @@ void spufs_ibox_callback(struct spu *spu)
kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
}
+/*
+ * Read as many bytes from the interrupt mailbox as possible, until
+ * one of the conditions becomes true:
+ *
+ * - no more data available in the mailbox
+ * - end of the user provided buffer
+ * - end of the mapped area
+ *
+ * If the file is opened without O_NONBLOCK, we wait here until
+ * any data is available, but return when we have been able to
+ * read something.
+ */
static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 ibox_data;
- ssize_t ret;
+ u32 ibox_data, __user *udata;
+ ssize_t count;
if (len < 4)
return -EINVAL;
+ if (!access_ok(VERIFY_WRITE, buf, len))
+ return -EFAULT;
+
+ udata = (void __user *)buf;
+
spu_acquire(ctx);
- ret = 0;
+ /* wait only for the first element */
+ count = 0;
if (file->f_flags & O_NONBLOCK) {
if (!spu_ibox_read(ctx, &ibox_data))
- ret = -EAGAIN;
+ count = -EAGAIN;
} else {
- ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
+ count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
}
+ if (count)
+ goto out;
- spu_release(ctx);
+ /* if we can't write at all, return -EFAULT */
+ count = __put_user(ibox_data, udata);
+ if (count)
+ goto out;
- if (ret)
- return ret;
+ for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
+ int ret;
+ ret = ctx->ops->ibox_read(ctx, &ibox_data);
+ if (ret == 0)
+ break;
+ /*
+ * at the end of the mapped area, we can fault
+ * but still need to return the data we have
+ * read successfully so far.
+ */
+ ret = __put_user(ibox_data, udata);
+ if (ret)
+ break;
+ }
- ret = 4;
- if (copy_to_user(buf, &ibox_data, sizeof ibox_data))
- ret = -EFAULT;
+out:
+ spu_release(ctx);
- return ret;
+ return count;
}
static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
@@ -534,32 +600,67 @@ void spufs_wbox_callback(struct spu *spu)
kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
}
+/*
+ * Write as many bytes to the interrupt mailbox as possible, until
+ * one of the conditions becomes true:
+ *
+ * - the mailbox is full
+ * - end of the user provided buffer
+ * - end of the mapped area
+ *
+ * If the file is opened without O_NONBLOCK, we wait here until
+ * space is availabyl, but return when we have been able to
+ * write something.
+ */
static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 wbox_data;
- int ret;
+ u32 wbox_data, __user *udata;
+ ssize_t count;
if (len < 4)
return -EINVAL;
- if (copy_from_user(&wbox_data, buf, sizeof wbox_data))
+ udata = (void __user *)buf;
+ if (!access_ok(VERIFY_READ, buf, len))
+ return -EFAULT;
+
+ if (__get_user(wbox_data, udata))
return -EFAULT;
spu_acquire(ctx);
- ret = 0;
+ /*
+ * make sure we can at least write one element, by waiting
+ * in case of !O_NONBLOCK
+ */
+ count = 0;
if (file->f_flags & O_NONBLOCK) {
if (!spu_wbox_write(ctx, wbox_data))
- ret = -EAGAIN;
+ count = -EAGAIN;
} else {
- ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
+ count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
}
- spu_release(ctx);
+ if (count)
+ goto out;
+
+ /* write aѕ much as possible */
+ for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
+ int ret;
+ ret = __get_user(wbox_data, udata);
+ if (ret)
+ break;
+
+ ret = spu_wbox_write(ctx, wbox_data);
+ if (ret == 0)
+ break;
+ }
- return ret ? ret : sizeof wbox_data;
+out:
+ spu_release(ctx);
+ return count;
}
static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
@@ -657,11 +758,19 @@ static ssize_t spufs_signal1_write(struct file *file, const char __user *buf,
return 4;
}
-#ifdef CONFIG_SPUFS_MMAP
static struct page *spufs_signal1_mmap_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
- return spufs_ps_nopage(vma, address, type, 0x14000);
+#if PAGE_SIZE == 0x1000
+ return spufs_ps_nopage(vma, address, type, 0x14000, 0x1000);
+#elif PAGE_SIZE == 0x10000
+ /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
+ * signal 1 and 2 area
+ */
+ return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
+#else
+#error unsupported page size
+#endif
}
static struct vm_operations_struct spufs_signal1_mmap_vmops = {
@@ -680,15 +789,12 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &spufs_signal1_mmap_vmops;
return 0;
}
-#endif
static struct file_operations spufs_signal1_fops = {
.open = spufs_signal1_open,
.read = spufs_signal1_read,
.write = spufs_signal1_write,
-#ifdef CONFIG_SPUFS_MMAP
.mmap = spufs_signal1_mmap,
-#endif
};
static int spufs_signal2_open(struct inode *inode, struct file *file)
@@ -743,11 +849,20 @@ static ssize_t spufs_signal2_write(struct file *file, const char __user *buf,
return 4;
}
-#ifdef CONFIG_SPUFS_MMAP
+#if SPUFS_MMAP_4K
static struct page *spufs_signal2_mmap_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
- return spufs_ps_nopage(vma, address, type, 0x1c000);
+#if PAGE_SIZE == 0x1000
+ return spufs_ps_nopage(vma, address, type, 0x1c000, 0x1000);
+#elif PAGE_SIZE == 0x10000
+ /* For 64k pages, both signal1 and signal2 can be used to mmap the whole
+ * signal 1 and 2 area
+ */
+ return spufs_ps_nopage(vma, address, type, 0x10000, 0x10000);
+#else
+#error unsupported page size
+#endif
}
static struct vm_operations_struct spufs_signal2_mmap_vmops = {
@@ -767,15 +882,15 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &spufs_signal2_mmap_vmops;
return 0;
}
-#endif
+#else /* SPUFS_MMAP_4K */
+#define spufs_signal2_mmap NULL
+#endif /* !SPUFS_MMAP_4K */
static struct file_operations spufs_signal2_fops = {
.open = spufs_signal2_open,
.read = spufs_signal2_read,
.write = spufs_signal2_write,
-#ifdef CONFIG_SPUFS_MMAP
.mmap = spufs_signal2_mmap,
-#endif
};
static void spufs_signal1_type_set(void *data, u64 val)
@@ -824,11 +939,11 @@ static u64 spufs_signal2_type_get(void *data)
DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get,
spufs_signal2_type_set, "%llu");
-#ifdef CONFIG_SPUFS_MMAP
+#if SPUFS_MMAP_4K
static struct page *spufs_mss_mmap_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
- return spufs_ps_nopage(vma, address, type, 0x0000);
+ return spufs_ps_nopage(vma, address, type, 0x0000, 0x1000);
}
static struct vm_operations_struct spufs_mss_mmap_vmops = {
@@ -837,17 +952,12 @@ static struct vm_operations_struct spufs_mss_mmap_vmops = {
/*
* mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
- * Mapping this area requires that the application have CAP_SYS_RAWIO,
- * as these registers require special care when read/writing.
*/
static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
{
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -855,7 +965,9 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &spufs_mss_mmap_vmops;
return 0;
}
-#endif
+#else /* SPUFS_MMAP_4K */
+#define spufs_mss_mmap NULL
+#endif /* !SPUFS_MMAP_4K */
static int spufs_mss_open(struct inode *inode, struct file *file)
{
@@ -867,17 +979,54 @@ static int spufs_mss_open(struct inode *inode, struct file *file)
static struct file_operations spufs_mss_fops = {
.open = spufs_mss_open,
-#ifdef CONFIG_SPUFS_MMAP
.mmap = spufs_mss_mmap,
-#endif
+};
+
+static struct page *spufs_psmap_mmap_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ return spufs_ps_nopage(vma, address, type, 0x0000, 0x20000);
+}
+
+static struct vm_operations_struct spufs_psmap_mmap_vmops = {
+ .nopage = spufs_psmap_mmap_nopage,
+};
+
+/*
+ * mmap support for full problem state area [0x00000 - 0x1ffff].
+ */
+static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
+ | _PAGE_NO_CACHE | _PAGE_GUARDED);
+
+ vma->vm_ops = &spufs_psmap_mmap_vmops;
+ return 0;
+}
+
+static int spufs_psmap_open(struct inode *inode, struct file *file)
+{
+ struct spufs_inode_info *i = SPUFS_I(inode);
+
+ file->private_data = i->i_ctx;
+ return nonseekable_open(inode, file);
+}
+
+static struct file_operations spufs_psmap_fops = {
+ .open = spufs_psmap_open,
+ .mmap = spufs_psmap_mmap,
};
-#ifdef CONFIG_SPUFS_MMAP
+#if SPUFS_MMAP_4K
static struct page *spufs_mfc_mmap_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
- return spufs_ps_nopage(vma, address, type, 0x3000);
+ return spufs_ps_nopage(vma, address, type, 0x3000, 0x1000);
}
static struct vm_operations_struct spufs_mfc_mmap_vmops = {
@@ -886,17 +1035,12 @@ static struct vm_operations_struct spufs_mfc_mmap_vmops = {
/*
* mmap support for problem state MFC DMA area [0x0000 - 0x0fff].
- * Mapping this area requires that the application have CAP_SYS_RAWIO,
- * as these registers require special care when read/writing.
*/
static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
{
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
| _PAGE_NO_CACHE | _PAGE_GUARDED);
@@ -904,7 +1048,9 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_ops = &spufs_mfc_mmap_vmops;
return 0;
}
-#endif
+#else /* SPUFS_MMAP_4K */
+#define spufs_mfc_mmap NULL
+#endif /* !SPUFS_MMAP_4K */
static int spufs_mfc_open(struct inode *inode, struct file *file)
{
@@ -1194,9 +1340,7 @@ static struct file_operations spufs_mfc_fops = {
.flush = spufs_mfc_flush,
.fsync = spufs_mfc_fsync,
.fasync = spufs_mfc_fasync,
-#ifdef CONFIG_SPUFS_MMAP
.mmap = spufs_mfc_mmap,
-#endif
};
static void spufs_npc_set(void *data, u64 val)
@@ -1344,6 +1488,21 @@ static u64 spufs_id_get(void *data)
}
DEFINE_SIMPLE_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n")
+static u64 spufs_object_id_get(void *data)
+{
+ struct spu_context *ctx = data;
+ return ctx->object_id;
+}
+
+static void spufs_object_id_set(void *data, u64 id)
+{
+ struct spu_context *ctx = data;
+ ctx->object_id = id;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get,
+ spufs_object_id_set, "0x%llx\n");
+
struct tree_descr spufs_dir_contents[] = {
{ "mem", &spufs_mem_fops, 0666, },
{ "regs", &spufs_regs_fops, 0666, },
@@ -1367,6 +1526,8 @@ struct tree_descr spufs_dir_contents[] = {
{ "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, },
{ "event_mask", &spufs_event_mask_ops, 0666, },
{ "srr0", &spufs_srr0_ops, 0666, },
+ { "psmap", &spufs_psmap_fops, 0666, },
{ "phys-id", &spufs_id_ops, 0666, },
+ { "object-id", &spufs_object_id_ops, 0666, },
{},
};
diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c
new file mode 100644
index 000000000000..212ea78f9051
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/gang.c
@@ -0,0 +1,81 @@
+/*
+ * SPU file system
+ *
+ * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
+ *
+ * Author: Arnd Bergmann <arndb@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "spufs.h"
+
+struct spu_gang *alloc_spu_gang(void)
+{
+ struct spu_gang *gang;
+
+ gang = kzalloc(sizeof *gang, GFP_KERNEL);
+ if (!gang)
+ goto out;
+
+ kref_init(&gang->kref);
+ mutex_init(&gang->mutex);
+ INIT_LIST_HEAD(&gang->list);
+
+out:
+ return gang;
+}
+
+static void destroy_spu_gang(struct kref *kref)
+{
+ struct spu_gang *gang;
+ gang = container_of(kref, struct spu_gang, kref);
+ WARN_ON(gang->contexts || !list_empty(&gang->list));
+ kfree(gang);
+}
+
+struct spu_gang *get_spu_gang(struct spu_gang *gang)
+{
+ kref_get(&gang->kref);
+ return gang;
+}
+
+int put_spu_gang(struct spu_gang *gang)
+{
+ return kref_put(&gang->kref, &destroy_spu_gang);
+}
+
+void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx)
+{
+ mutex_lock(&gang->mutex);
+ ctx->gang = get_spu_gang(gang);
+ list_add(&ctx->gang_list, &gang->list);
+ gang->contexts++;
+ mutex_unlock(&gang->mutex);
+}
+
+void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx)
+{
+ mutex_lock(&gang->mutex);
+ WARN_ON(ctx->gang != gang);
+ list_del_init(&ctx->gang_list);
+ gang->contexts--;
+ mutex_unlock(&gang->mutex);
+
+ put_spu_gang(gang);
+}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 3950ddccb2c8..427d00a4f6a0 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -50,6 +50,10 @@ spufs_alloc_inode(struct super_block *sb)
ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL);
if (!ei)
return NULL;
+
+ ei->i_gang = NULL;
+ ei->i_ctx = NULL;
+
return &ei->vfs_inode;
}
@@ -128,14 +132,19 @@ out:
static void
spufs_delete_inode(struct inode *inode)
{
- if (SPUFS_I(inode)->i_ctx)
- put_spu_context(SPUFS_I(inode)->i_ctx);
+ struct spufs_inode_info *ei = SPUFS_I(inode);
+
+ if (ei->i_ctx)
+ put_spu_context(ei->i_ctx);
+ if (ei->i_gang)
+ put_spu_gang(ei->i_gang);
clear_inode(inode);
}
static void spufs_prune_dir(struct dentry *dir)
{
struct dentry *dentry, *tmp;
+
mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
spin_lock(&dcache_lock);
@@ -156,13 +165,13 @@ static void spufs_prune_dir(struct dentry *dir)
mutex_unlock(&dir->d_inode->i_mutex);
}
-/* Caller must hold root->i_mutex */
-static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry)
+/* Caller must hold parent->i_mutex */
+static int spufs_rmdir(struct inode *parent, struct dentry *dir)
{
/* remove all entries */
- spufs_prune_dir(dir_dentry);
+ spufs_prune_dir(dir);
- return simple_rmdir(root, dir_dentry);
+ return simple_rmdir(parent, dir);
}
static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files,
@@ -191,17 +200,17 @@ out:
static int spufs_dir_close(struct inode *inode, struct file *file)
{
struct spu_context *ctx;
- struct inode *dir;
- struct dentry *dentry;
+ struct inode *parent;
+ struct dentry *dir;
int ret;
- dentry = file->f_dentry;
- dir = dentry->d_parent->d_inode;
- ctx = SPUFS_I(dentry->d_inode)->i_ctx;
+ dir = file->f_dentry;
+ parent = dir->d_parent->d_inode;
+ ctx = SPUFS_I(dir->d_inode)->i_ctx;
- mutex_lock(&dir->i_mutex);
- ret = spufs_rmdir(dir, dentry);
- mutex_unlock(&dir->i_mutex);
+ mutex_lock(&parent->i_mutex);
+ ret = spufs_rmdir(parent, dir);
+ mutex_unlock(&parent->i_mutex);
WARN_ON(ret);
/* We have to give up the mm_struct */
@@ -224,7 +233,8 @@ struct file_operations spufs_context_fops = {
};
static int
-spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
+ int mode)
{
int ret;
struct inode *inode;
@@ -239,11 +249,13 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
inode->i_gid = dir->i_gid;
inode->i_mode &= S_ISGID;
}
- ctx = alloc_spu_context();
+ ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
SPUFS_I(inode)->i_ctx = ctx;
if (!ctx)
goto out_iput;
+ ctx->flags = flags;
+
inode->i_op = &spufs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
@@ -289,24 +301,177 @@ out:
return ret;
}
+static int spufs_create_context(struct inode *inode,
+ struct dentry *dentry,
+ struct vfsmount *mnt, int flags, int mode)
+{
+ int ret;
+
+ ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
+ if (ret)
+ goto out_unlock;
+
+ /*
+ * get references for dget and mntget, will be released
+ * in error path of *_open().
+ */
+ ret = spufs_context_open(dget(dentry), mntget(mnt));
+ if (ret < 0) {
+ WARN_ON(spufs_rmdir(inode, dentry));
+ mutex_unlock(&inode->i_mutex);
+ spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
+ goto out;
+ }
+
+out_unlock:
+ mutex_unlock(&inode->i_mutex);
+out:
+ dput(dentry);
+ return ret;
+}
+
+static int spufs_rmgang(struct inode *root, struct dentry *dir)
+{
+ /* FIXME: this fails if the dir is not empty,
+ which causes a leak of gangs. */
+ return simple_rmdir(root, dir);
+}
+
+static int spufs_gang_close(struct inode *inode, struct file *file)
+{
+ struct inode *parent;
+ struct dentry *dir;
+ int ret;
+
+ dir = file->f_dentry;
+ parent = dir->d_parent->d_inode;
+
+ ret = spufs_rmgang(parent, dir);
+ WARN_ON(ret);
+
+ return dcache_dir_close(inode, file);
+}
+
+struct file_operations spufs_gang_fops = {
+ .open = dcache_dir_open,
+ .release = spufs_gang_close,
+ .llseek = dcache_dir_lseek,
+ .read = generic_read_dir,
+ .readdir = dcache_readdir,
+ .fsync = simple_sync_file,
+};
+
+static int
+spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
+{
+ int ret;
+ struct inode *inode;
+ struct spu_gang *gang;
+
+ ret = -ENOSPC;
+ inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
+ if (!inode)
+ goto out;
+
+ ret = 0;
+ if (dir->i_mode & S_ISGID) {
+ inode->i_gid = dir->i_gid;
+ inode->i_mode &= S_ISGID;
+ }
+ gang = alloc_spu_gang();
+ SPUFS_I(inode)->i_ctx = NULL;
+ SPUFS_I(inode)->i_gang = gang;
+ if (!gang)
+ goto out_iput;
+
+ inode->i_op = &spufs_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+
+ d_instantiate(dentry, inode);
+ dget(dentry);
+ dir->i_nlink++;
+ dentry->d_inode->i_nlink++;
+ return ret;
+
+out_iput:
+ iput(inode);
+out:
+ return ret;
+}
+
+static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
+{
+ int ret;
+ struct file *filp;
+
+ ret = get_unused_fd();
+ if (ret < 0) {
+ dput(dentry);
+ mntput(mnt);
+ goto out;
+ }
+
+ filp = dentry_open(dentry, mnt, O_RDONLY);
+ if (IS_ERR(filp)) {
+ put_unused_fd(ret);
+ ret = PTR_ERR(filp);
+ goto out;
+ }
+
+ filp->f_op = &spufs_gang_fops;
+ fd_install(ret, filp);
+out:
+ return ret;
+}
+
+static int spufs_create_gang(struct inode *inode,
+ struct dentry *dentry,
+ struct vfsmount *mnt, int mode)
+{
+ int ret;
+
+ ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
+ if (ret)
+ goto out;
+
+ /*
+ * get references for dget and mntget, will be released
+ * in error path of *_open().
+ */
+ ret = spufs_gang_open(dget(dentry), mntget(mnt));
+ if (ret < 0)
+ WARN_ON(spufs_rmgang(inode, dentry));
+
+out:
+ mutex_unlock(&inode->i_mutex);
+ dput(dentry);
+ return ret;
+}
+
+
static struct file_system_type spufs_type;
-long spufs_create_thread(struct nameidata *nd,
- unsigned int flags, mode_t mode)
+long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode)
{
struct dentry *dentry;
int ret;
- /* need to be at the root of spufs */
ret = -EINVAL;
- if (nd->dentry->d_sb->s_type != &spufs_type ||
- nd->dentry != nd->dentry->d_sb->s_root)
+ /* check if we are on spufs */
+ if (nd->dentry->d_sb->s_type != &spufs_type)
goto out;
- /* all flags are reserved */
- if (flags)
+ /* don't accept undefined flags */
+ if (flags & (~SPU_CREATE_FLAG_ALL))
goto out;
+ /* only threads can be underneath a gang */
+ if (nd->dentry != nd->dentry->d_sb->s_root) {
+ if ((flags & SPU_CREATE_GANG) ||
+ !SPUFS_I(nd->dentry->d_inode)->i_gang)
+ goto out;
+ }
+
dentry = lookup_create(nd, 1);
ret = PTR_ERR(dentry);
if (IS_ERR(dentry))
@@ -317,22 +482,13 @@ long spufs_create_thread(struct nameidata *nd,
goto out_dput;
mode &= ~current->fs->umask;
- ret = spufs_mkdir(nd->dentry->d_inode, dentry, mode & S_IRWXUGO);
- if (ret)
- goto out_dput;
- /*
- * get references for dget and mntget, will be released
- * in error path of *_open().
- */
- ret = spufs_context_open(dget(dentry), mntget(nd->mnt));
- if (ret < 0) {
- WARN_ON(spufs_rmdir(nd->dentry->d_inode, dentry));
- mutex_unlock(&nd->dentry->d_inode->i_mutex);
- spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
- dput(dentry);
- goto out;
- }
+ if (flags & SPU_CREATE_GANG)
+ return spufs_create_gang(nd->dentry->d_inode,
+ dentry, nd->mnt, mode);
+ else
+ return spufs_create_context(nd->dentry->d_inode,
+ dentry, nd->mnt, flags, mode);
out_dput:
dput(dentry);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 483c8b76232c..63df8cf4ba16 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -14,6 +14,26 @@ void spufs_stop_callback(struct spu *spu)
wake_up_all(&ctx->stop_wq);
}
+void spufs_dma_callback(struct spu *spu, int type)
+{
+ struct spu_context *ctx = spu->ctx;
+
+ if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
+ ctx->event_return |= type;
+ wake_up_all(&ctx->stop_wq);
+ } else {
+ switch (type) {
+ case SPE_EVENT_DMA_ALIGNMENT:
+ case SPE_EVENT_INVALID_DMA:
+ force_sig(SIGBUS, /* info, */ current);
+ break;
+ case SPE_EVENT_SPE_ERROR:
+ force_sig(SIGILL, /* info */ current);
+ break;
+ }
+ }
+}
+
static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
{
struct spu *spu;
@@ -28,8 +48,7 @@ static inline int spu_stopped(struct spu_context *ctx, u32 * stat)
return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0;
}
-static inline int spu_run_init(struct spu_context *ctx, u32 * npc,
- u32 * status)
+static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
{
int ret;
@@ -72,7 +91,7 @@ static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc,
SPU_STATUS_STOPPED_BY_HALT)) {
return *status;
}
- if ((ret = spu_run_init(ctx, npc, status)) != 0)
+ if ((ret = spu_run_init(ctx, npc)) != 0)
return ret;
return 0;
}
@@ -177,46 +196,49 @@ static inline int spu_process_events(struct spu_context *ctx)
}
long spufs_run_spu(struct file *file, struct spu_context *ctx,
- u32 * npc, u32 * status)
+ u32 *npc, u32 *event)
{
int ret;
+ u32 status;
if (down_interruptible(&ctx->run_sema))
return -ERESTARTSYS;
- ret = spu_run_init(ctx, npc, status);
+ ctx->event_return = 0;
+ ret = spu_run_init(ctx, npc);
if (ret)
goto out;
do {
- ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status));
+ ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
if (unlikely(ret))
break;
- if ((*status & SPU_STATUS_STOPPED_BY_STOP) &&
- (*status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
+ if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
+ (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
ret = spu_process_callback(ctx);
if (ret)
break;
- *status &= ~SPU_STATUS_STOPPED_BY_STOP;
+ status &= ~SPU_STATUS_STOPPED_BY_STOP;
}
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
- ret = spu_reacquire_runnable(ctx, npc, status);
+ ret = spu_reacquire_runnable(ctx, npc, &status);
if (ret)
goto out;
continue;
}
ret = spu_process_events(ctx);
- } while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP |
+ } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
SPU_STATUS_STOPPED_BY_HALT)));
ctx->ops->runcntl_stop(ctx);
- ret = spu_run_fini(ctx, npc, status);
+ ret = spu_run_fini(ctx, npc, &status);
if (!ret)
- ret = *status;
+ ret = status;
spu_yield(ctx);
out:
+ *event = ctx->event_return;
up(&ctx->run_sema);
return ret;
}
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 1350294484b6..bd6fe4b7a84b 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -3,11 +3,7 @@
* Copyright (C) IBM 2005
* Author: Mark Nutter <mnutter@us.ibm.com>
*
- * SPU scheduler, based on Linux thread priority. For now use
- * a simple "cooperative" yield model with no preemption. SPU
- * scheduling will eventually be preemptive: When a thread with
- * a higher static priority gets ready to run, then an active SPU
- * context will be preempted and returned to the waitq.
+ * 2006-03-31 NUMA domains added.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,6 +33,9 @@
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
+#include <linux/numa.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
@@ -49,128 +48,59 @@
#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
struct spu_prio_array {
- atomic_t nr_blocked;
unsigned long bitmap[SPU_BITMAP_SIZE];
wait_queue_head_t waitq[MAX_PRIO];
+ struct list_head active_list[MAX_NUMNODES];
+ struct mutex active_mutex[MAX_NUMNODES];
};
-/* spu_runqueue - This is the main runqueue data structure for SPUs. */
-struct spu_runqueue {
- struct semaphore sem;
- unsigned long nr_active;
- unsigned long nr_idle;
- unsigned long nr_switches;
- struct list_head active_list;
- struct list_head idle_list;
- struct spu_prio_array prio;
-};
-
-static struct spu_runqueue *spu_runqueues = NULL;
-
-static inline struct spu_runqueue *spu_rq(void)
-{
- /* Future: make this a per-NODE array,
- * and use cpu_to_node(smp_processor_id())
- */
- return spu_runqueues;
-}
+static struct spu_prio_array *spu_prio;
-static inline struct spu *del_idle(struct spu_runqueue *rq)
+static inline int node_allowed(int node)
{
- struct spu *spu;
+ cpumask_t mask;
- BUG_ON(rq->nr_idle <= 0);
- BUG_ON(list_empty(&rq->idle_list));
- /* Future: Move SPU out of low-power SRI state. */
- spu = list_entry(rq->idle_list.next, struct spu, sched_list);
- list_del_init(&spu->sched_list);
- rq->nr_idle--;
- return spu;
+ if (!nr_cpus_node(node))
+ return 0;
+ mask = node_to_cpumask(node);
+ if (!cpus_intersects(mask, current->cpus_allowed))
+ return 0;
+ return 1;
}
-static inline void del_active(struct spu_runqueue *rq, struct spu *spu)
+static inline void mm_needs_global_tlbie(struct mm_struct *mm)
{
- BUG_ON(rq->nr_active <= 0);
- BUG_ON(list_empty(&rq->active_list));
- list_del_init(&spu->sched_list);
- rq->nr_active--;
-}
+ int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
-static inline void add_idle(struct spu_runqueue *rq, struct spu *spu)
-{
- /* Future: Put SPU into low-power SRI state. */
- list_add_tail(&spu->sched_list, &rq->idle_list);
- rq->nr_idle++;
+ /* Global TLBIE broadcast required with SPEs. */
+ __cpus_setall(&mm->cpu_vm_mask, nr);
}
-static inline void add_active(struct spu_runqueue *rq, struct spu *spu)
-{
- rq->nr_active++;
- rq->nr_switches++;
- list_add_tail(&spu->sched_list, &rq->active_list);
-}
+static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
-static void prio_wakeup(struct spu_runqueue *rq)
+static void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
{
- if (atomic_read(&rq->prio.nr_blocked) && rq->nr_idle) {
- int best = sched_find_first_bit(rq->prio.bitmap);
- if (best < MAX_PRIO) {
- wait_queue_head_t *wq = &rq->prio.waitq[best];
- wake_up_interruptible_nr(wq, 1);
- }
- }
+ blocking_notifier_call_chain(&spu_switch_notifier,
+ ctx ? ctx->object_id : 0, spu);
}
-static void prio_wait(struct spu_runqueue *rq, struct spu_context *ctx,
- u64 flags)
+int spu_switch_event_register(struct notifier_block * n)
{
- int prio = current->prio;
- wait_queue_head_t *wq = &rq->prio.waitq[prio];
- DEFINE_WAIT(wait);
-
- __set_bit(prio, rq->prio.bitmap);
- atomic_inc(&rq->prio.nr_blocked);
- prepare_to_wait_exclusive(wq, &wait, TASK_INTERRUPTIBLE);
- if (!signal_pending(current)) {
- up(&rq->sem);
- up_write(&ctx->state_sema);
- pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
- current->pid, current->prio);
- schedule();
- down_write(&ctx->state_sema);
- down(&rq->sem);
- }
- finish_wait(wq, &wait);
- atomic_dec(&rq->prio.nr_blocked);
- if (!waitqueue_active(wq))
- __clear_bit(prio, rq->prio.bitmap);
+ return blocking_notifier_chain_register(&spu_switch_notifier, n);
}
-static inline int is_best_prio(struct spu_runqueue *rq)
+int spu_switch_event_unregister(struct notifier_block * n)
{
- int best_prio;
-
- best_prio = sched_find_first_bit(rq->prio.bitmap);
- return (current->prio < best_prio) ? 1 : 0;
+ return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}
-static inline void mm_needs_global_tlbie(struct mm_struct *mm)
-{
- /* Global TLBIE broadcast required with SPEs. */
-#if (NR_CPUS > 1)
- __cpus_setall(&mm->cpu_vm_mask, NR_CPUS);
-#else
- __cpus_setall(&mm->cpu_vm_mask, NR_CPUS+1); /* is this ok? */
-#endif
-}
static inline void bind_context(struct spu *spu, struct spu_context *ctx)
{
- pr_debug("%s: pid=%d SPU=%d\n", __FUNCTION__, current->pid,
- spu->number);
+ pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
+ spu->number, spu->node);
spu->ctx = ctx;
spu->flags = 0;
- ctx->flags = 0;
ctx->spu = spu;
ctx->ops = &spu_hw_ops;
spu->pid = current->pid;
@@ -181,16 +111,20 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
spu->wbox_callback = spufs_wbox_callback;
spu->stop_callback = spufs_stop_callback;
spu->mfc_callback = spufs_mfc_callback;
+ spu->dma_callback = spufs_dma_callback;
mb();
spu_unmap_mappings(ctx);
spu_restore(&ctx->csa, spu);
spu->timestamp = jiffies;
+ spu_cpu_affinity_set(spu, raw_smp_processor_id());
+ spu_switch_notify(spu, ctx);
}
static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
{
- pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__,
- spu->pid, spu->number);
+ pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
+ spu->pid, spu->number, spu->node);
+ spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
spu->timestamp = jiffies;
@@ -199,173 +133,158 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
spu->wbox_callback = NULL;
spu->stop_callback = NULL;
spu->mfc_callback = NULL;
+ spu->dma_callback = NULL;
spu->mm = NULL;
spu->pid = 0;
spu->prio = MAX_PRIO;
ctx->ops = &spu_backing_ops;
ctx->spu = NULL;
- ctx->flags = 0;
spu->flags = 0;
spu->ctx = NULL;
}
-static void spu_reaper(void *data)
+static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
+ int prio)
{
- struct spu_context *ctx = data;
- struct spu *spu;
-
- down_write(&ctx->state_sema);
- spu = ctx->spu;
- if (spu && test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) {
- if (atomic_read(&spu->rq->prio.nr_blocked)) {
- pr_debug("%s: spu=%d\n", __func__, spu->number);
- ctx->ops->runcntl_stop(ctx);
- spu_deactivate(ctx);
- wake_up_all(&ctx->stop_wq);
- } else {
- clear_bit(SPU_CONTEXT_PREEMPT, &ctx->flags);
- }
- }
- up_write(&ctx->state_sema);
- put_spu_context(ctx);
+ prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
+ set_bit(prio, spu_prio->bitmap);
}
-static void schedule_spu_reaper(struct spu_runqueue *rq, struct spu *spu)
+static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
+ int prio)
{
- struct spu_context *ctx = get_spu_context(spu->ctx);
- unsigned long now = jiffies;
- unsigned long expire = spu->timestamp + SPU_MIN_TIMESLICE;
-
- set_bit(SPU_CONTEXT_PREEMPT, &ctx->flags);
- INIT_WORK(&ctx->reap_work, spu_reaper, ctx);
- if (time_after(now, expire))
- schedule_work(&ctx->reap_work);
- else
- schedule_delayed_work(&ctx->reap_work, expire - now);
-}
+ u64 flags;
-static void check_preempt_active(struct spu_runqueue *rq)
-{
- struct list_head *p;
- struct spu *worst = NULL;
-
- list_for_each(p, &rq->active_list) {
- struct spu *spu = list_entry(p, struct spu, sched_list);
- struct spu_context *ctx = spu->ctx;
- if (!test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) {
- if (!worst || (spu->prio > worst->prio)) {
- worst = spu;
- }
- }
- }
- if (worst && (current->prio < worst->prio))
- schedule_spu_reaper(rq, worst);
+ __set_current_state(TASK_RUNNING);
+
+ spin_lock_irqsave(&wq->lock, flags);
+
+ remove_wait_queue_locked(wq, wait);
+ if (list_empty(&wq->task_list))
+ clear_bit(prio, spu_prio->bitmap);
+
+ spin_unlock_irqrestore(&wq->lock, flags);
}
-static struct spu *get_idle_spu(struct spu_context *ctx, u64 flags)
+static void spu_prio_wait(struct spu_context *ctx, u64 flags)
{
- struct spu_runqueue *rq;
- struct spu *spu = NULL;
+ int prio = current->prio;
+ wait_queue_head_t *wq = &spu_prio->waitq[prio];
+ DEFINE_WAIT(wait);
- rq = spu_rq();
- down(&rq->sem);
- for (;;) {
- if (rq->nr_idle > 0) {
- if (is_best_prio(rq)) {
- /* Fall through. */
- spu = del_idle(rq);
- break;
- } else {
- prio_wakeup(rq);
- up(&rq->sem);
- yield();
- if (signal_pending(current)) {
- return NULL;
- }
- rq = spu_rq();
- down(&rq->sem);
- continue;
- }
- } else {
- check_preempt_active(rq);
- prio_wait(rq, ctx, flags);
- if (signal_pending(current)) {
- prio_wakeup(rq);
- spu = NULL;
- break;
- }
- continue;
- }
+ if (ctx->spu)
+ return;
+
+ spu_add_wq(wq, &wait, prio);
+
+ if (!signal_pending(current)) {
+ up_write(&ctx->state_sema);
+ pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
+ current->pid, current->prio);
+ schedule();
+ down_write(&ctx->state_sema);
}
- up(&rq->sem);
- return spu;
+
+ spu_del_wq(wq, &wait, prio);
}
-static void put_idle_spu(struct spu *spu)
+static void spu_prio_wakeup(void)
{
- struct spu_runqueue *rq = spu->rq;
-
- down(&rq->sem);
- add_idle(rq, spu);
- prio_wakeup(rq);
- up(&rq->sem);
+ int best = sched_find_first_bit(spu_prio->bitmap);
+ if (best < MAX_PRIO) {
+ wait_queue_head_t *wq = &spu_prio->waitq[best];
+ wake_up_interruptible_nr(wq, 1);
+ }
}
static int get_active_spu(struct spu *spu)
{
- struct spu_runqueue *rq = spu->rq;
- struct list_head *p;
+ int node = spu->node;
struct spu *tmp;
int rc = 0;
- down(&rq->sem);
- list_for_each(p, &rq->active_list) {
- tmp = list_entry(p, struct spu, sched_list);
+ mutex_lock(&spu_prio->active_mutex[node]);
+ list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
if (tmp == spu) {
- del_active(rq, spu);
+ list_del_init(&spu->list);
rc = 1;
break;
}
}
- up(&rq->sem);
+ mutex_unlock(&spu_prio->active_mutex[node]);
return rc;
}
static void put_active_spu(struct spu *spu)
{
- struct spu_runqueue *rq = spu->rq;
+ int node = spu->node;
+
+ mutex_lock(&spu_prio->active_mutex[node]);
+ list_add_tail(&spu->list, &spu_prio->active_list[node]);
+ mutex_unlock(&spu_prio->active_mutex[node]);
+}
+
+static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
+{
+ struct spu *spu = NULL;
+ int node = cpu_to_node(raw_smp_processor_id());
+ int n;
- down(&rq->sem);
- add_active(rq, spu);
- up(&rq->sem);
+ for (n = 0; n < MAX_NUMNODES; n++, node++) {
+ node = (node < MAX_NUMNODES) ? node : 0;
+ if (!node_allowed(node))
+ continue;
+ spu = spu_alloc_node(node);
+ if (spu)
+ break;
+ }
+ return spu;
}
-/* Lock order:
- * spu_activate() & spu_deactivate() require the
- * caller to have down_write(&ctx->state_sema).
+static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
+{
+ /* Future: spu_get_idle() if possible,
+ * otherwise try to preempt an active
+ * context.
+ */
+ return spu_get_idle(ctx, flags);
+}
+
+/* The three externally callable interfaces
+ * for the scheduler begin here.
*
- * The rq->sem is breifly held (inside or outside a
- * given ctx lock) for list management, but is never
- * held during save/restore.
+ * spu_activate - bind a context to SPU, waiting as needed.
+ * spu_deactivate - unbind a context from its SPU.
+ * spu_yield - yield an SPU if others are waiting.
*/
int spu_activate(struct spu_context *ctx, u64 flags)
{
struct spu *spu;
+ int ret = 0;
- if (ctx->spu)
- return 0;
- spu = get_idle_spu(ctx, flags);
- if (!spu)
- return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN;
- bind_context(spu, ctx);
- /*
- * We're likely to wait for interrupts on the same
- * CPU that we are now on, so send them here.
- */
- spu_cpu_affinity_set(spu, raw_smp_processor_id());
- put_active_spu(spu);
- return 0;
+ for (;;) {
+ if (ctx->spu)
+ return 0;
+ spu = spu_get(ctx, flags);
+ if (spu != NULL) {
+ if (ctx->spu != NULL) {
+ spu_free(spu);
+ spu_prio_wakeup();
+ break;
+ }
+ bind_context(spu, ctx);
+ put_active_spu(spu);
+ break;
+ }
+ spu_prio_wait(ctx, flags);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ spu_prio_wakeup();
+ break;
+ }
+ }
+ return ret;
}
void spu_deactivate(struct spu_context *ctx)
@@ -378,8 +297,10 @@ void spu_deactivate(struct spu_context *ctx)
return;
needs_idle = get_active_spu(spu);
unbind_context(spu, ctx);
- if (needs_idle)
- put_idle_spu(spu);
+ if (needs_idle) {
+ spu_free(spu);
+ spu_prio_wakeup();
+ }
}
void spu_yield(struct spu_context *ctx)
@@ -387,77 +308,60 @@ void spu_yield(struct spu_context *ctx)
struct spu *spu;
int need_yield = 0;
- down_write(&ctx->state_sema);
- spu = ctx->spu;
- if (spu && (sched_find_first_bit(spu->rq->prio.bitmap) < MAX_PRIO)) {
- pr_debug("%s: yielding SPU %d\n", __FUNCTION__, spu->number);
- spu_deactivate(ctx);
- ctx->state = SPU_STATE_SAVED;
- need_yield = 1;
- } else if (spu) {
- spu->prio = MAX_PRIO;
+ if (down_write_trylock(&ctx->state_sema)) {
+ if ((spu = ctx->spu) != NULL) {
+ int best = sched_find_first_bit(spu_prio->bitmap);
+ if (best < MAX_PRIO) {
+ pr_debug("%s: yielding SPU %d NODE %d\n",
+ __FUNCTION__, spu->number, spu->node);
+ spu_deactivate(ctx);
+ ctx->state = SPU_STATE_SAVED;
+ need_yield = 1;
+ } else {
+ spu->prio = MAX_PRIO;
+ }
+ }
+ up_write(&ctx->state_sema);
}
- up_write(&ctx->state_sema);
if (unlikely(need_yield))
yield();
}
int __init spu_sched_init(void)
{
- struct spu_runqueue *rq;
- struct spu *spu;
int i;
- rq = spu_runqueues = kmalloc(sizeof(struct spu_runqueue), GFP_KERNEL);
- if (!rq) {
- printk(KERN_WARNING "%s: Unable to allocate runqueues.\n",
+ spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
+ if (!spu_prio) {
+ printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
__FUNCTION__);
return 1;
}
- memset(rq, 0, sizeof(struct spu_runqueue));
- init_MUTEX(&rq->sem);
- INIT_LIST_HEAD(&rq->active_list);
- INIT_LIST_HEAD(&rq->idle_list);
- rq->nr_active = 0;
- rq->nr_idle = 0;
- rq->nr_switches = 0;
- atomic_set(&rq->prio.nr_blocked, 0);
for (i = 0; i < MAX_PRIO; i++) {
- init_waitqueue_head(&rq->prio.waitq[i]);
- __clear_bit(i, rq->prio.bitmap);
+ init_waitqueue_head(&spu_prio->waitq[i]);
+ __clear_bit(i, spu_prio->bitmap);
}
- __set_bit(MAX_PRIO, rq->prio.bitmap);
- for (;;) {
- spu = spu_alloc();
- if (!spu)
- break;
- pr_debug("%s: adding SPU[%d]\n", __FUNCTION__, spu->number);
- add_idle(rq, spu);
- spu->rq = rq;
- spu->timestamp = jiffies;
- }
- if (!rq->nr_idle) {
- printk(KERN_WARNING "%s: No available SPUs.\n", __FUNCTION__);
- kfree(rq);
- return 1;
+ __set_bit(MAX_PRIO, spu_prio->bitmap);
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ mutex_init(&spu_prio->active_mutex[i]);
+ INIT_LIST_HEAD(&spu_prio->active_list[i]);
}
return 0;
}
void __exit spu_sched_exit(void)
{
- struct spu_runqueue *rq = spu_rq();
- struct spu *spu;
-
- if (!rq) {
- printk(KERN_WARNING "%s: no runqueues!\n", __FUNCTION__);
- return;
- }
- while (rq->nr_idle > 0) {
- spu = del_idle(rq);
- if (!spu)
- break;
- spu_free(spu);
+ struct spu *spu, *tmp;
+ int node;
+
+ for (node = 0; node < MAX_NUMNODES; node++) {
+ mutex_lock(&spu_prio->active_mutex[node]);
+ list_for_each_entry_safe(spu, tmp, &spu_prio->active_list[node],
+ list) {
+ list_del_init(&spu->list);
+ spu_free(spu);
+ }
+ mutex_unlock(&spu_prio->active_mutex[node]);
}
- kfree(rq);
+ kfree(spu_prio);
}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 4485738e2102..a0f55ca2d488 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -39,6 +39,8 @@ struct spu_context_ops;
#define SPU_CONTEXT_PREEMPT 0UL
+struct spu_gang;
+
struct spu_context {
struct spu *spu; /* pointer to a physical SPU */
struct spu_state csa; /* SPU context save area. */
@@ -48,6 +50,7 @@ struct spu_context {
struct address_space *cntl; /* 'control' area mappings. */
struct address_space *signal1; /* 'signal1' area mappings. */
struct address_space *signal2; /* 'signal2' area mappings. */
+ u64 object_id; /* user space pointer for oprofile */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
struct rw_semaphore state_sema;
@@ -66,7 +69,18 @@ struct spu_context {
u32 tagwait;
struct spu_context_ops *ops;
struct work_struct reap_work;
- u64 flags;
+ unsigned long flags;
+ unsigned long event_return;
+
+ struct list_head gang_list;
+ struct spu_gang *gang;
+};
+
+struct spu_gang {
+ struct list_head list;
+ struct mutex mutex;
+ struct kref kref;
+ int contexts;
};
struct mfc_dma_command {
@@ -114,6 +128,7 @@ extern struct spu_context_ops spu_backing_ops;
struct spufs_inode_info {
struct spu_context *i_ctx;
+ struct spu_gang *i_gang;
struct inode vfs_inode;
};
#define SPUFS_I(inode) \
@@ -124,12 +139,19 @@ extern struct tree_descr spufs_dir_contents[];
/* system call implementation */
long spufs_run_spu(struct file *file,
struct spu_context *ctx, u32 *npc, u32 *status);
-long spufs_create_thread(struct nameidata *nd,
+long spufs_create(struct nameidata *nd,
unsigned int flags, mode_t mode);
extern struct file_operations spufs_context_fops;
+/* gang management */
+struct spu_gang *alloc_spu_gang(void);
+struct spu_gang *get_spu_gang(struct spu_gang *gang);
+int put_spu_gang(struct spu_gang *gang);
+void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
+void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
+
/* context management */
-struct spu_context * alloc_spu_context(void);
+struct spu_context * alloc_spu_context(struct spu_gang *gang);
void destroy_spu_context(struct kref *kref);
struct spu_context * get_spu_context(struct spu_context *ctx);
int put_spu_context(struct spu_context *ctx);
@@ -183,5 +205,6 @@ void spufs_ibox_callback(struct spu *spu);
void spufs_wbox_callback(struct spu *spu);
void spufs_stop_callback(struct spu *spu);
void spufs_mfc_callback(struct spu *spu);
+void spufs_dma_callback(struct spu *spu, int type);
#endif
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 9d9d82dd32ba..0f782ca662ba 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -1779,6 +1779,15 @@ static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
*/
out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
eieio();
+ /*
+ * FIXME: this is to restart a DMA that we were processing
+ * before the save. better remember the fault information
+ * in the csa instead.
+ */
+ if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {
+ out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
+ eieio();
+ }
}
static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index e6565a949ddc..a6d1ae4dc2a3 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -38,7 +38,7 @@ static long do_spu_run(struct file *filp,
u32 npc, status;
ret = -EFAULT;
- if (get_user(npc, unpc) || get_user(status, ustatus))
+ if (get_user(npc, unpc))
goto out;
/* check if this file was created by spu_create */
@@ -49,7 +49,10 @@ static long do_spu_run(struct file *filp,
i = SPUFS_I(filp->f_dentry->d_inode);
ret = spufs_run_spu(filp, i->i_ctx, &npc, &status);
- if (put_user(npc, unpc) || put_user(status, ustatus))
+ if (put_user(npc, unpc))
+ ret = -EFAULT;
+
+ if (ustatus && put_user(status, ustatus))
ret = -EFAULT;
out:
return ret;
@@ -87,7 +90,7 @@ asmlinkage long sys_spu_create(const char __user *pathname,
ret = path_lookup(tmp, LOOKUP_PARENT|
LOOKUP_OPEN|LOOKUP_CREATE, &nd);
if (!ret) {
- ret = spufs_create_thread(&nd, flags, mode);
+ ret = spufs_create(&nd, flags, mode);
path_release(&nd);
}
putname(tmp);
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 488dbd9b51ae..cae3d13229b9 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -70,7 +70,7 @@ unsigned long event_scan_interval;
* has to include <linux/interrupt.h> (to get irqreturn_t), which
* causes all sorts of problems. -- paulus
*/
-extern irqreturn_t xmon_irq(int, void *, struct pt_regs *);
+extern irqreturn_t xmon_irq(int, void *);
extern unsigned long loops_per_jiffy;
@@ -335,12 +335,11 @@ chrp_event_scan(unsigned long unused)
jiffies + event_scan_interval);
}
-static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc,
- struct pt_regs *regs)
+static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- unsigned int cascade_irq = i8259_irq(regs);
+ unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq, regs);
+ generic_handle_irq(cascade_irq);
desc->chip->eoi(irq);
}
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index e4f2b9df5e17..bdb475c65cba 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -18,7 +18,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -62,8 +61,7 @@ pci_dram_offset = MPC7448_HPC2_PCI_MEM_OFFSET;
extern int tsi108_setup_pci(struct device_node *dev);
extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
extern void tsi108_pci_int_init(void);
-extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc,
- struct pt_regs *regs);
+extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
int mpc7448_hpc2_exclude_device(u_char bus, u_char devfn)
{
@@ -201,7 +199,7 @@ static void __init mpc7448_hpc2_init_IRQ(void)
tsi_pic = of_find_node_by_type(NULL, "open-pic");
if (tsi_pic) {
unsigned int size;
- void *prop = get_property(tsi_pic, "reg", &size);
+ const void *prop = get_property(tsi_pic, "reg", &size);
mpic_paddr = of_translate_address(tsi_pic, prop);
}
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index e32446877e78..5225abfafd9b 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -43,10 +43,7 @@
#include "irq.h"
#include "pci.h"
#include "call_pci.h"
-
-#if defined(CONFIG_SMP)
-extern void iSeries_smp_message_recv(struct pt_regs *);
-#endif
+#include "smp.h"
#ifdef CONFIG_PCI
@@ -88,7 +85,7 @@ static DEFINE_SPINLOCK(pending_irqs_lock);
static int num_pending_irqs;
static int pending_irqs[NR_IRQS];
-static void int_received(struct pci_event *event, struct pt_regs *regs)
+static void int_received(struct pci_event *event)
{
int irq;
@@ -146,11 +143,11 @@ static void int_received(struct pci_event *event, struct pt_regs *regs)
}
}
-static void pci_event_handler(struct HvLpEvent *event, struct pt_regs *regs)
+static void pci_event_handler(struct HvLpEvent *event)
{
if (event && (event->xType == HvLpEvent_Type_PciIo)) {
if (hvlpevent_is_int(event))
- int_received((struct pci_event *)event, regs);
+ int_received((struct pci_event *)event);
else
printk(KERN_ERR
"pci_event_handler: unexpected ack received\n");
@@ -308,18 +305,18 @@ int __init iSeries_allocate_IRQ(HvBusNumber bus,
/*
* Get the next pending IRQ.
*/
-unsigned int iSeries_get_irq(struct pt_regs *regs)
+unsigned int iSeries_get_irq(void)
{
int irq = NO_IRQ_IGNORE;
#ifdef CONFIG_SMP
if (get_lppaca()->int_dword.fields.ipi_cnt) {
get_lppaca()->int_dword.fields.ipi_cnt = 0;
- iSeries_smp_message_recv(regs);
+ iSeries_smp_message_recv();
}
#endif /* CONFIG_SMP */
if (hvlpevent_is_pending())
- process_hvlpevents(regs);
+ process_hvlpevents();
#ifdef CONFIG_PCI
if (num_pending_irqs) {
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h
index 1ee8985140e5..69f1b437fc7b 100644
--- a/arch/powerpc/platforms/iseries/irq.h
+++ b/arch/powerpc/platforms/iseries/irq.h
@@ -4,6 +4,6 @@
extern void iSeries_init_IRQ(void);
extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32);
extern void iSeries_activate_IRQs(void);
-extern unsigned int iSeries_get_irq(struct pt_regs *);
+extern unsigned int iSeries_get_irq(void);
#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
index 98c1c2440aad..e3e929e1b460 100644
--- a/arch/powerpc/platforms/iseries/lpevents.c
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -116,7 +116,7 @@ static void hvlpevent_clear_valid(struct HvLpEvent * event)
hvlpevent_invalidate(event);
}
-void process_hvlpevents(struct pt_regs *regs)
+void process_hvlpevents(void)
{
struct HvLpEvent * event;
@@ -144,7 +144,7 @@ void process_hvlpevents(struct pt_regs *regs)
__get_cpu_var(hvlpevent_counts)[event->xType]++;
if (event->xType < HvLpEvent_Type_NumTypes &&
lpEventHandler[event->xType])
- lpEventHandler[event->xType](event, regs);
+ lpEventHandler[event->xType](event);
else
printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 1983b640bac1..b5737d68d6c4 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -513,7 +513,7 @@ static void handle_ack(struct io_mf_lp_event *event)
* parse it enough to know if it is an interrupt or an
* acknowledge.
*/
-static void hv_handler(struct HvLpEvent *event, struct pt_regs *regs)
+static void hv_handler(struct HvLpEvent *event)
{
if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
if (hvlpevent_is_ack(event))
@@ -847,7 +847,7 @@ static int mf_get_boot_rtc(struct rtc_time *tm)
/* We need to poll here as we are not yet taking interrupts */
while (rtc_data.busy) {
if (hvlpevent_is_pending())
- process_hvlpevents(NULL);
+ process_hvlpevents();
}
return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
}
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index 3eb12065df23..4aa165e010d9 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -262,14 +262,6 @@ void __init iSeries_pci_final_fixup(void)
mf_display_src(0xC9000200);
}
-void pcibios_fixup_bus(struct pci_bus *PciBus)
-{
-}
-
-void pcibios_fixup_resources(struct pci_dev *pdev)
-{
-}
-
/*
* Look down the chain to find the matching Device Device
*/
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 7f1953066ff8..a0ff7ba7d666 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -649,15 +649,21 @@ static void iseries_dedicated_idle(void)
void __init iSeries_init_IRQ(void) { }
#endif
+/*
+ * iSeries has no legacy IO, anything calling this function has to
+ * fail or bad things will happen
+ */
+static int iseries_check_legacy_ioport(unsigned int baseport)
+{
+ return -ENODEV;
+}
+
static int __init iseries_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "IBM,iSeries"))
return 0;
- powerpc_firmware_features |= FW_FEATURE_ISERIES;
- powerpc_firmware_features |= FW_FEATURE_LPAR;
-
hpte_init_iSeries();
return 1;
@@ -680,6 +686,7 @@ define_machine(iseries) {
.calibrate_decr = generic_calibrate_decr,
.progress = iSeries_progress,
.probe = iseries_probe,
+ .check_legacy_ioport = iseries_check_legacy_ioport,
/* XXX Implement enable_pmcs for iSeries */
};
@@ -687,6 +694,9 @@ void * __init iSeries_early_setup(void)
{
unsigned long phys_mem_size;
+ powerpc_firmware_features |= FW_FEATURE_ISERIES;
+ powerpc_firmware_features |= FW_FEATURE_LPAR;
+
iSeries_fixup_klimit();
/*
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 2eb095edb472..aee5908df700 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -43,9 +43,11 @@
#include <asm/cputable.h>
#include <asm/system.h>
+#include "smp.h"
+
static unsigned long iSeries_smp_message[NR_CPUS];
-void iSeries_smp_message_recv(struct pt_regs *regs)
+void iSeries_smp_message_recv(void)
{
int cpu = smp_processor_id();
int msg;
@@ -55,7 +57,7 @@ void iSeries_smp_message_recv(struct pt_regs *regs)
for (msg = 0; msg < 4; msg++)
if (test_and_clear_bit(msg, &iSeries_smp_message[cpu]))
- smp_message_recv(msg, regs);
+ smp_message_recv(msg);
}
static inline void smp_iSeries_do_message(int cpu, int msg)
diff --git a/arch/powerpc/platforms/iseries/smp.h b/arch/powerpc/platforms/iseries/smp.h
new file mode 100644
index 000000000000..d501f7de01e7
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/smp.h
@@ -0,0 +1,6 @@
+#ifndef _PLATFORMS_ISERIES_SMP_H
+#define _PLATFORMS_ISERIES_SMP_H
+
+extern void iSeries_smp_message_recv(void);
+
+#endif /* _PLATFORMS_ISERIES_SMP_H */
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index 9baa4ee82592..04e07e5da0c1 100644
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -378,7 +378,7 @@ void vio_set_hostlp(void)
}
EXPORT_SYMBOL(vio_set_hostlp);
-static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
+static void vio_handleEvent(struct HvLpEvent *event)
{
HvLpIndex remoteLp;
int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 1b827618e05f..63b4d1bff359 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -8,7 +8,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#define DEBUG
+#undef DEBUG
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -16,6 +16,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/irq.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -33,7 +34,7 @@
#define DBG(x...)
#endif
-static struct pci_controller *u3_agp, *u3_ht;
+static struct pci_controller *u3_agp, *u3_ht, *u4_pcie;
static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
{
@@ -287,6 +288,114 @@ static struct pci_ops u3_ht_pci_ops =
u3_ht_write_config
};
+static unsigned int u4_pcie_cfa0(unsigned int devfn, unsigned int off)
+{
+ return (1 << PCI_SLOT(devfn)) |
+ (PCI_FUNC(devfn) << 8) |
+ ((off >> 8) << 28) |
+ (off & 0xfcu);
+}
+
+static unsigned int u4_pcie_cfa1(unsigned int bus, unsigned int devfn,
+ unsigned int off)
+{
+ return (bus << 16) |
+ (devfn << 8) |
+ ((off >> 8) << 28) |
+ (off & 0xfcu) | 1u;
+}
+
+static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
+ u8 bus, u8 dev_fn, int offset)
+{
+ unsigned int caddr;
+
+ if (bus == hose->first_busno)
+ caddr = u4_pcie_cfa0(dev_fn, offset);
+ else
+ caddr = u4_pcie_cfa1(bus, dev_fn, offset);
+
+ /* Uninorth will return garbage if we don't read back the value ! */
+ do {
+ out_le32(hose->cfg_addr, caddr);
+ } while (in_le32(hose->cfg_addr) != caddr);
+
+ offset &= 0x03;
+ return hose->cfg_data + offset;
+}
+
+static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val)
+{
+ struct pci_controller *hose;
+ volatile void __iomem *addr;
+
+ hose = pci_bus_to_host(bus);
+ if (hose == NULL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (offset >= 0x1000)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+ switch (len) {
+ case 1:
+ *val = in_8(addr);
+ break;
+ case 2:
+ *val = in_le16(addr);
+ break;
+ default:
+ *val = in_le32(addr);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val)
+{
+ struct pci_controller *hose;
+ volatile void __iomem *addr;
+
+ hose = pci_bus_to_host(bus);
+ if (hose == NULL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (offset >= 0x1000)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+ switch (len) {
+ case 1:
+ out_8(addr, val);
+ (void) in_8(addr);
+ break;
+ case 2:
+ out_le16(addr, val);
+ (void) in_le16(addr);
+ break;
+ default:
+ out_le32(addr, val);
+ (void) in_le32(addr);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops u4_pcie_pci_ops =
+{
+ u4_pcie_read_config,
+ u4_pcie_write_config
+};
+
static void __init setup_u3_agp(struct pci_controller* hose)
{
/* On G5, we move AGP up to high bus number so we don't need
@@ -307,6 +416,26 @@ static void __init setup_u3_agp(struct pci_controller* hose)
u3_agp = hose;
}
+static void __init setup_u4_pcie(struct pci_controller* hose)
+{
+ /* We currently only implement the "non-atomic" config space, to
+ * be optimised later.
+ */
+ hose->ops = &u4_pcie_pci_ops;
+ hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
+ hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
+
+ /* The bus contains a bridge from root -> device, we need to
+ * make it visible on bus 0 so that we pick the right type
+ * of config cycles. If we didn't, we would have to force all
+ * config cycles to be type 1. So we override the "bus-range"
+ * property here
+ */
+ hose->first_busno = 0x00;
+ hose->last_busno = 0xff;
+ u4_pcie = hose;
+}
+
static void __init setup_u3_ht(struct pci_controller* hose)
{
hose->ops = &u3_ht_pci_ops;
@@ -354,6 +483,10 @@ static int __init add_bridge(struct device_node *dev)
setup_u3_ht(hose);
disp_name = "U3-HT";
primary = 1;
+ } else if (device_is_compatible(dev, "u4-pcie")) {
+ setup_u4_pcie(hose);
+ disp_name = "U4-PCIE";
+ primary = 0;
}
printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
disp_name, hose->first_busno, hose->last_busno);
@@ -361,7 +494,6 @@ static int __init add_bridge(struct device_node *dev)
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, primary);
- pci_setup_phb_io(hose, primary);
/* Fixup "bus-range" OF property */
fixup_bus_range(dev);
@@ -376,8 +508,30 @@ void __init maple_pcibios_fixup(void)
DBG(" -> maple_pcibios_fixup\n");
- for_each_pci_dev(dev)
+ for_each_pci_dev(dev) {
+ /* Fixup IRQ for PCIe host */
+ if (u4_pcie != NULL && dev->bus->number == 0 &&
+ pci_bus_to_host(dev->bus) == u4_pcie) {
+ printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
+ dev->irq = irq_create_mapping(NULL, 1);
+ if (dev->irq != NO_IRQ)
+ set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
+ continue;
+ }
+
+ /* Hide AMD8111 IDE interrupt when in legacy mode so
+ * the driver calls pci_get_legacy_ide_irq()
+ */
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
+ (dev->class & 5) != 5) {
+ dev->irq = NO_IRQ;
+ continue;
+ }
+
+ /* For all others, map the interrupt from the device-tree */
pci_read_irq_line(dev);
+ }
DBG(" <- maple_pcibios_fixup\n");
}
@@ -388,8 +542,10 @@ static void __init maple_fixup_phb_resources(void)
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base;
+
hose->io_resource.start += offset;
hose->io_resource.end += offset;
+
printk(KERN_INFO "PCI Host %d, io start: %llx; io end: %llx\n",
hose->global_number,
(unsigned long long)hose->io_resource.start,
@@ -431,6 +587,19 @@ void __init maple_pci_init(void)
if (ht && add_bridge(ht) != 0)
of_node_put(ht);
+ /*
+ * We need to call pci_setup_phb_io for the HT bridge first
+ * so it gets the I/O port numbers starting at 0, and we
+ * need to call it for the AGP bridge after that so it gets
+ * small positive I/O port numbers.
+ */
+ if (u3_ht)
+ pci_setup_phb_io(u3_ht, 1);
+ if (u3_agp)
+ pci_setup_phb_io(u3_agp, 0);
+ if (u4_pcie)
+ pci_setup_phb_io(u4_pcie, 0);
+
/* Fixup the IO resources on our host bridges as the common code
* does it only for childs of the host bridges
*/
@@ -465,8 +634,11 @@ int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
return defirq;
np = pci_device_to_OF_node(pdev);
- if (np == NULL)
+ if (np == NULL) {
+ printk("Failed to locate OF node for IDE %s\n",
+ pci_name(pdev));
return defirq;
+ }
irq = irq_of_parse_and_map(np, channel & 0x1);
if (irq == NO_IRQ) {
printk("Failed to map onboard IDE interrupt for channel %d\n",
@@ -479,6 +651,9 @@ int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
/* XXX: To remove once all firmwares are ok */
static void fixup_maple_ide(struct pci_dev* dev)
{
+ if (!machine_is(maple))
+ return;
+
#if 0 /* Enable this to enable IDE port 0 */
{
u8 v;
@@ -495,7 +670,7 @@ static void fixup_maple_ide(struct pci_dev* dev)
dev->resource[4].start = 0xcc00;
dev->resource[4].end = 0xcc10;
#endif
-#if 1 /* Enable this to fixup IDE sense/polarity of irqs in IO-APICs */
+#if 0 /* Enable this to fixup IDE sense/polarity of irqs in IO-APICs */
{
struct pci_dev *apicdev;
u32 v;
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index 4679c5230413..39020c1fa13d 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -35,17 +35,17 @@
#define CONFIG_OFFSET_VALID(off) ((off) < 4096)
-static unsigned long pa_pxp_cfg_addr(struct pci_controller *hose,
+static void volatile __iomem *pa_pxp_cfg_addr(struct pci_controller *hose,
u8 bus, u8 devfn, int offset)
{
- return ((unsigned long)hose->cfg_data) + PA_PXP_CFA(bus, devfn, offset);
+ return hose->cfg_data + PA_PXP_CFA(bus, devfn, offset);
}
static int pa_pxp_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
- unsigned long addr;
+ void volatile __iomem *addr;
hose = pci_bus_to_host(bus);
if (!hose)
@@ -62,13 +62,13 @@ static int pa_pxp_read_config(struct pci_bus *bus, unsigned int devfn,
*/
switch (len) {
case 1:
- *val = in_8((u8 *)addr);
+ *val = in_8(addr);
break;
case 2:
- *val = in_le16((u16 *)addr);
+ *val = in_le16(addr);
break;
default:
- *val = in_le32((u32 *)addr);
+ *val = in_le32(addr);
break;
}
@@ -79,7 +79,7 @@ static int pa_pxp_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
- unsigned long addr;
+ void volatile __iomem *addr;
hose = pci_bus_to_host(bus);
if (!hose)
@@ -96,16 +96,16 @@ static int pa_pxp_write_config(struct pci_bus *bus, unsigned int devfn,
*/
switch (len) {
case 1:
- out_8((u8 *)addr, val);
- (void) in_8((u8 *)addr);
+ out_8(addr, val);
+ (void) in_8(addr);
break;
case 2:
- out_le16((u16 *)addr, val);
- (void) in_le16((u16 *)addr);
+ out_le16(addr, val);
+ (void) in_le16(addr);
break;
default:
- out_le32((u32 *)addr, val);
- (void) in_le32((u32 *)addr);
+ out_le32(addr, val);
+ (void) in_le32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 628482671c15..106896c3b60a 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -22,7 +22,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
diff --git a/arch/powerpc/platforms/pasemi/time.c b/arch/powerpc/platforms/pasemi/time.c
index 9bd410b8fec6..fa54351ac268 100644
--- a/arch/powerpc/platforms/pasemi/time.c
+++ b/arch/powerpc/platforms/pasemi/time.c
@@ -17,7 +17,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/config.h>
#include <linux/time.h>
#include <asm/time.h>
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index c2c7cf75dd5f..bfc4829162f1 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -342,7 +342,7 @@ static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr)
}
/* Interrupt handler */
-static irqreturn_t kw_i2c_irq(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t kw_i2c_irq(int irq, void *dev_id)
{
struct pmac_i2c_host_kw *host = dev_id;
unsigned long flags;
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index ee3b223ab17a..5c6c15c5f9a3 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -15,7 +15,7 @@
#define DBG(fmt...)
#endif
-static irqreturn_t macio_gpio_irq(int irq, void *data, struct pt_regs *regs)
+static irqreturn_t macio_gpio_irq(int irq, void *data)
{
pmf_do_irq(data);
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 39f7ddb554ea..39db12890214 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -42,7 +42,7 @@
* has to include <linux/interrupt.h> (to get irqreturn_t), which
* causes all sorts of problems. -- paulus
*/
-extern irqreturn_t xmon_irq(int, void *, struct pt_regs *);
+extern irqreturn_t xmon_irq(int, void *);
#ifdef CONFIG_PPC32
struct pmac_irq_hw {
@@ -210,7 +210,7 @@ static struct irq_chip pmac_pic = {
.retrigger = pmac_retrigger,
};
-static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
+static irqreturn_t gatwick_action(int cpl, void *dev_id)
{
unsigned long flags;
int irq, bits;
@@ -227,7 +227,7 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
continue;
irq += __ilog2(bits);
spin_unlock_irqrestore(&pmac_pic_lock, flags);
- __do_IRQ(irq, regs);
+ __do_IRQ(irq);
spin_lock_irqsave(&pmac_pic_lock, flags);
rc = IRQ_HANDLED;
}
@@ -235,18 +235,18 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs)
return rc;
}
-static unsigned int pmac_pic_get_irq(struct pt_regs *regs)
+static unsigned int pmac_pic_get_irq(void)
{
int irq;
unsigned long bits = 0;
unsigned long flags;
#ifdef CONFIG_SMP
- void psurge_smp_message_recv(struct pt_regs *);
+ void psurge_smp_message_recv(void);
/* IPI's are a hack on the powersurge -- Cort */
if ( smp_processor_id() != 0 ) {
- psurge_smp_message_recv(regs);
+ psurge_smp_message_recv();
return NO_IRQ_IGNORE; /* ignore, already handled */
}
#endif /* CONFIG_SMP */
@@ -440,14 +440,13 @@ static void __init pmac_pic_probe_oldstyle(void)
}
#endif /* CONFIG_PPC32 */
-static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc,
- struct pt_regs *regs)
+static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc)
{
struct mpic *mpic = desc->handler_data;
- unsigned int cascade_irq = mpic_get_one_irq(mpic, regs);
+ unsigned int cascade_irq = mpic_get_one_irq(mpic);
if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq, regs);
+ generic_handle_irq(cascade_irq);
desc->chip->eoi(irq);
}
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h
index 664103dfeef9..c44c89f5e532 100644
--- a/arch/powerpc/platforms/powermac/pic.h
+++ b/arch/powerpc/platforms/powermac/pic.h
@@ -5,7 +5,7 @@
extern struct hw_interrupt_type pmac_pic;
-void pmac_pic_init(void);
-int pmac_get_irq(struct pt_regs *regs);
+extern void pmac_pic_init(void);
+extern int pmac_get_irq(void);
#endif /* __PPC_PLATFORMS_PMAC_PIC_H */
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 1949b657b092..eeb2ae5ffc58 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -160,7 +160,7 @@ static inline void psurge_clr_ipi(int cpu)
*/
static unsigned long psurge_smp_message[NR_CPUS];
-void psurge_smp_message_recv(struct pt_regs *regs)
+void psurge_smp_message_recv(void)
{
int cpu = smp_processor_id();
int msg;
@@ -174,12 +174,12 @@ void psurge_smp_message_recv(struct pt_regs *regs)
/* make sure there is a message there */
for (msg = 0; msg < 4; msg++)
if (test_and_clear_bit(msg, &psurge_smp_message[cpu]))
- smp_message_recv(msg, regs);
+ smp_message_recv(msg);
}
-irqreturn_t psurge_primary_intr(int irq, void *d, struct pt_regs *regs)
+irqreturn_t psurge_primary_intr(int irq, void *d)
{
- psurge_smp_message_recv(regs);
+ psurge_smp_message_recv();
return IRQ_HANDLED;
}
@@ -328,6 +328,7 @@ static void __init smp_psurge_kick_cpu(int nr)
{
unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
unsigned long a;
+ int i;
/* may need to flush here if secondary bats aren't setup */
for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
@@ -340,7 +341,11 @@ static void __init smp_psurge_kick_cpu(int nr)
mb();
psurge_set_ipi(nr);
- udelay(10);
+ /*
+ * We can't use udelay here because the timebase is now frozen.
+ */
+ for (i = 0; i < 2000; ++i)
+ barrier();
psurge_clr_ipi(nr);
if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index ce1a235855f7..379db05b0082 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -111,8 +111,6 @@ void udbg_scc_init(int force_scc)
pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch,
PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
-
- /* Setup for 57600 8N1 */
if (ch == ch_a)
addr += 0x20;
sccc = ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
@@ -125,9 +123,21 @@ void udbg_scc_init(int force_scc)
x = in_8(sccc);
out_8(sccc, 0x09); /* reset A or B side */
out_8(sccc, 0xc0);
+
+ /* If SCC was the OF output port, read the BRG value, else
+ * Setup for 57600 8N1
+ */
+ if (ch_def != NULL) {
+ out_8(sccc, 13);
+ scc_inittab[1] = in_8(sccc);
+ out_8(sccc, 12);
+ scc_inittab[3] = in_8(sccc);
+ }
+
for (i = 0; i < sizeof(scc_inittab); ++i)
out_8(sccc, scc_inittab[i]);
+
udbg_putc = udbg_scc_putc;
udbg_getc = udbg_scc_getc;
udbg_getc_poll = udbg_scc_getc_poll;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index bbf2e34dc358..d24ba547e53f 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -267,7 +267,8 @@ static void iommu_table_setparms(struct pci_controller *phb,
struct iommu_table *tbl)
{
struct device_node *node;
- const unsigned long *basep, *sizep;
+ const unsigned long *basep;
+ const u32 *sizep;
node = (struct device_node *)phb->arch_data;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 311ed1993fc0..b1d3d161249e 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -65,16 +65,14 @@ static int ras_check_exception_token;
#define EPOW_SENSOR_INDEX 0
#define RAS_VECTOR_OFFSET 0x500
-static irqreturn_t ras_epow_interrupt(int irq, void *dev_id,
- struct pt_regs * regs);
-static irqreturn_t ras_error_interrupt(int irq, void *dev_id,
- struct pt_regs * regs);
+static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
+static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
/* #define DEBUG */
static void request_ras_irqs(struct device_node *np,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ irq_handler_t handler,
const char *name)
{
int i, index, count = 0;
@@ -166,8 +164,7 @@ __initcall(init_ras_IRQ);
* to examine the type of power failure and take appropriate action where
* the time horizon permits something useful to be done.
*/
-static irqreturn_t
-ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
{
int status = 0xdeadbeef;
int state = 0;
@@ -210,8 +207,7 @@ ras_epow_interrupt(int irq, void *dev_id, struct pt_regs * regs)
* For nonrecoverable errors, an error is logged and we stop all processing
* as quickly as possible in order to prevent propagation of the failure.
*/
-static irqreturn_t
-ras_error_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
{
struct rtas_error_log *rtas_elog;
int status = 0xdeadbeef;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 43dbf737698c..89a8119f988d 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -121,12 +121,11 @@ static void __init fwnmi_init(void)
fwnmi_active = 1;
}
-void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc,
- struct pt_regs *regs)
+void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc)
{
- unsigned int cascade_irq = i8259_irq(regs);
+ unsigned int cascade_irq = i8259_irq();
if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq, regs);
+ generic_handle_irq(cascade_irq);
desc->chip->eoi(irq);
}
@@ -180,7 +179,7 @@ static void __init pseries_mpic_init_IRQ(void)
cascade_irq = irq_of_parse_and_map(cascade, 0);
if (cascade == NO_IRQ) {
- printk(KERN_ERR "xics: failed to map cascade interrupt");
+ printk(KERN_ERR "mpic: failed to map cascade interrupt");
return;
}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 253972e5479f..d071abe78ab1 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -308,14 +308,14 @@ static inline unsigned int xics_remap_irq(unsigned int vec)
return NO_IRQ;
}
-static unsigned int xics_get_irq_direct(struct pt_regs *regs)
+static unsigned int xics_get_irq_direct(void)
{
unsigned int cpu = smp_processor_id();
return xics_remap_irq(direct_xirr_info_get(cpu));
}
-static unsigned int xics_get_irq_lpar(struct pt_regs *regs)
+static unsigned int xics_get_irq_lpar(void)
{
unsigned int cpu = smp_processor_id();
@@ -324,7 +324,7 @@ static unsigned int xics_get_irq_lpar(struct pt_regs *regs)
#ifdef CONFIG_SMP
-static irqreturn_t xics_ipi_dispatch(int cpu, struct pt_regs *regs)
+static irqreturn_t xics_ipi_dispatch(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
@@ -332,47 +332,47 @@ static irqreturn_t xics_ipi_dispatch(int cpu, struct pt_regs *regs)
if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
&xics_ipi_message[cpu].value)) {
mb();
- smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
+ smp_message_recv(PPC_MSG_CALL_FUNCTION);
}
if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
&xics_ipi_message[cpu].value)) {
mb();
- smp_message_recv(PPC_MSG_RESCHEDULE, regs);
+ smp_message_recv(PPC_MSG_RESCHEDULE);
}
#if 0
if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
&xics_ipi_message[cpu].value)) {
mb();
- smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
+ smp_message_recv(PPC_MSG_MIGRATE_TASK);
}
#endif
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
&xics_ipi_message[cpu].value)) {
mb();
- smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
+ smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
}
#endif
}
return IRQ_HANDLED;
}
-static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id)
{
int cpu = smp_processor_id();
direct_qirr_info(cpu, 0xff);
- return xics_ipi_dispatch(cpu, regs);
+ return xics_ipi_dispatch(cpu);
}
-static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id)
{
int cpu = smp_processor_id();
lpar_qirr_info(cpu, 0xff);
- return xics_ipi_dispatch(cpu, regs);
+ return xics_ipi_dispatch(cpu);
}
void xics_cause_IPI(int cpu)
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h
index 6ee1055b0ffb..db0ec3ba3ae2 100644
--- a/arch/powerpc/platforms/pseries/xics.h
+++ b/arch/powerpc/platforms/pseries/xics.h
@@ -31,7 +31,6 @@ struct xics_ipi_struct {
extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
struct irq_desc;
-extern void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc,
- struct pt_regs *regs);
+extern void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc);
#endif /* _POWERPC_KERNEL_XICS_H */
OpenPOWER on IntegriCloud