summaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/parisc/kernel
downloadblackbird-op-linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz
blackbird-op-linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/Makefile24
-rw-r--r--arch/parisc/kernel/asm-offsets.c299
-rw-r--r--arch/parisc/kernel/binfmt_elf32.c126
-rw-r--r--arch/parisc/kernel/cache.c366
-rw-r--r--arch/parisc/kernel/drivers.c765
-rw-r--r--arch/parisc/kernel/entry.S2426
-rw-r--r--arch/parisc/kernel/firmware.c1405
-rw-r--r--arch/parisc/kernel/hardware.c1366
-rw-r--r--arch/parisc/kernel/head.S386
-rw-r--r--arch/parisc/kernel/hpmc.S304
-rw-r--r--arch/parisc/kernel/init_task.c76
-rw-r--r--arch/parisc/kernel/inventory.c612
-rw-r--r--arch/parisc/kernel/ioctl32.c625
-rw-r--r--arch/parisc/kernel/irq.c343
-rw-r--r--arch/parisc/kernel/module.c822
-rw-r--r--arch/parisc/kernel/pa7300lc.c49
-rw-r--r--arch/parisc/kernel/pacache.S1086
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c187
-rw-r--r--arch/parisc/kernel/pci-dma.c578
-rw-r--r--arch/parisc/kernel/pci.c346
-rw-r--r--arch/parisc/kernel/pdc_chassis.c245
-rw-r--r--arch/parisc/kernel/pdc_cons.c189
-rw-r--r--arch/parisc/kernel/perf.c841
-rw-r--r--arch/parisc/kernel/perf_asm.S1691
-rw-r--r--arch/parisc/kernel/perf_images.h3138
-rw-r--r--arch/parisc/kernel/process.c396
-rw-r--r--arch/parisc/kernel/processor.c400
-rw-r--r--arch/parisc/kernel/ptrace.c423
-rw-r--r--arch/parisc/kernel/real2.S304
-rw-r--r--arch/parisc/kernel/semaphore.c102
-rw-r--r--arch/parisc/kernel/setup.c368
-rw-r--r--arch/parisc/kernel/signal.c664
-rw-r--r--arch/parisc/kernel/signal32.c400
-rw-r--r--arch/parisc/kernel/signal32.h43
-rw-r--r--arch/parisc/kernel/smp.c723
-rw-r--r--arch/parisc/kernel/sys32.h48
-rw-r--r--arch/parisc/kernel/sys_parisc.c253
-rw-r--r--arch/parisc/kernel/sys_parisc32.c720
-rw-r--r--arch/parisc/kernel/syscall.S703
-rw-r--r--arch/parisc/kernel/syscall_table.S372
-rw-r--r--arch/parisc/kernel/time.c243
-rw-r--r--arch/parisc/kernel/topology.c37
-rw-r--r--arch/parisc/kernel/traps.c834
-rw-r--r--arch/parisc/kernel/unaligned.c816
-rw-r--r--arch/parisc/kernel/unwind.c393
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S207
46 files changed, 26744 insertions, 0 deletions
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
new file mode 100644
index 000000000000..171f9c239f60
--- /dev/null
+++ b/arch/parisc/kernel/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for arch/parisc/kernel
+#
+
+extra-y := init_task.o head.o vmlinux.lds
+
+AFLAGS_entry.o := -traditional
+AFLAGS_pacache.o := -traditional
+CFLAGS_ioctl32.o := -Ifs/
+
+obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
+ pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
+ ptrace.o hardware.o inventory.o drivers.o semaphore.o \
+ signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
+ process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
+ topology.o
+
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_PA11) += pci-dma.o
+obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o ioctl32.o signal32.o
+# only supported for PCX-W/U in 64-bit mode at the moment
+obj-$(CONFIG_64BIT) += perf.o perf_asm.o
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
new file mode 100644
index 000000000000..1ad44f92d6e4
--- /dev/null
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -0,0 +1,299 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ *
+ * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
+ * Copyright (C) 2000 Sam Creasey <sammy@sammy.net>
+ * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
+ * Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2001 Richard Hirst <rhirst at parisc-linux.org>
+ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/version.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/pdc.h>
+#include <asm/uaccess.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+#ifdef __LP64__
+#define FRAME_SIZE 128
+#else
+#define FRAME_SIZE 64
+#endif
+
+#define align(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
+
+int main(void)
+{
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info));
+ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_PERSONALITY, offsetof(struct task_struct, personality));
+ DEFINE(TASK_PID, offsetof(struct task_struct, pid));
+ BLANK();
+ DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
+ DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
+ DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));
+ DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));
+ DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));
+ DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));
+ DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));
+ DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));
+ DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));
+ DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));
+ DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9]));
+ DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10]));
+ DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11]));
+ DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12]));
+ DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13]));
+ DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14]));
+ DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15]));
+ DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16]));
+ DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17]));
+ DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18]));
+ DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19]));
+ DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20]));
+ DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21]));
+ DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22]));
+ DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23]));
+ DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24]));
+ DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25]));
+ DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26]));
+ DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27]));
+ DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28]));
+ DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29]));
+ DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30]));
+ DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31]));
+ DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0]));
+ DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1]));
+ DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2]));
+ DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3]));
+ DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4]));
+ DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5]));
+ DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6]));
+ DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7]));
+ DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8]));
+ DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9]));
+ DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10]));
+ DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11]));
+ DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12]));
+ DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13]));
+ DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14]));
+ DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15]));
+ DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16]));
+ DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17]));
+ DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18]));
+ DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19]));
+ DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20]));
+ DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21]));
+ DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22]));
+ DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23]));
+ DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24]));
+ DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25]));
+ DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26]));
+ DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27]));
+ DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28]));
+ DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29]));
+ DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30]));
+ DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31]));
+ DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0]));
+ DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1]));
+ DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2]));
+ DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3]));
+ DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4]));
+ DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5]));
+ DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6]));
+ DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7]));
+ DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0]));
+ DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1]));
+ DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0]));
+ DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1]));
+ DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27));
+ DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28));
+ DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp));
+ DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc));
+ DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar));
+ DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir));
+ DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr));
+ DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
+ BLANK();
+ DEFINE(TASK_SZ, sizeof(struct task_struct));
+ DEFINE(TASK_SZ_ALGN, align(sizeof(struct task_struct), 64));
+ BLANK();
+ DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
+ DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
+ DEFINE(PT_GR2, offsetof(struct pt_regs, gr[ 2]));
+ DEFINE(PT_GR3, offsetof(struct pt_regs, gr[ 3]));
+ DEFINE(PT_GR4, offsetof(struct pt_regs, gr[ 4]));
+ DEFINE(PT_GR5, offsetof(struct pt_regs, gr[ 5]));
+ DEFINE(PT_GR6, offsetof(struct pt_regs, gr[ 6]));
+ DEFINE(PT_GR7, offsetof(struct pt_regs, gr[ 7]));
+ DEFINE(PT_GR8, offsetof(struct pt_regs, gr[ 8]));
+ DEFINE(PT_GR9, offsetof(struct pt_regs, gr[ 9]));
+ DEFINE(PT_GR10, offsetof(struct pt_regs, gr[10]));
+ DEFINE(PT_GR11, offsetof(struct pt_regs, gr[11]));
+ DEFINE(PT_GR12, offsetof(struct pt_regs, gr[12]));
+ DEFINE(PT_GR13, offsetof(struct pt_regs, gr[13]));
+ DEFINE(PT_GR14, offsetof(struct pt_regs, gr[14]));
+ DEFINE(PT_GR15, offsetof(struct pt_regs, gr[15]));
+ DEFINE(PT_GR16, offsetof(struct pt_regs, gr[16]));
+ DEFINE(PT_GR17, offsetof(struct pt_regs, gr[17]));
+ DEFINE(PT_GR18, offsetof(struct pt_regs, gr[18]));
+ DEFINE(PT_GR19, offsetof(struct pt_regs, gr[19]));
+ DEFINE(PT_GR20, offsetof(struct pt_regs, gr[20]));
+ DEFINE(PT_GR21, offsetof(struct pt_regs, gr[21]));
+ DEFINE(PT_GR22, offsetof(struct pt_regs, gr[22]));
+ DEFINE(PT_GR23, offsetof(struct pt_regs, gr[23]));
+ DEFINE(PT_GR24, offsetof(struct pt_regs, gr[24]));
+ DEFINE(PT_GR25, offsetof(struct pt_regs, gr[25]));
+ DEFINE(PT_GR26, offsetof(struct pt_regs, gr[26]));
+ DEFINE(PT_GR27, offsetof(struct pt_regs, gr[27]));
+ DEFINE(PT_GR28, offsetof(struct pt_regs, gr[28]));
+ DEFINE(PT_GR29, offsetof(struct pt_regs, gr[29]));
+ DEFINE(PT_GR30, offsetof(struct pt_regs, gr[30]));
+ DEFINE(PT_GR31, offsetof(struct pt_regs, gr[31]));
+ DEFINE(PT_FR0, offsetof(struct pt_regs, fr[ 0]));
+ DEFINE(PT_FR1, offsetof(struct pt_regs, fr[ 1]));
+ DEFINE(PT_FR2, offsetof(struct pt_regs, fr[ 2]));
+ DEFINE(PT_FR3, offsetof(struct pt_regs, fr[ 3]));
+ DEFINE(PT_FR4, offsetof(struct pt_regs, fr[ 4]));
+ DEFINE(PT_FR5, offsetof(struct pt_regs, fr[ 5]));
+ DEFINE(PT_FR6, offsetof(struct pt_regs, fr[ 6]));
+ DEFINE(PT_FR7, offsetof(struct pt_regs, fr[ 7]));
+ DEFINE(PT_FR8, offsetof(struct pt_regs, fr[ 8]));
+ DEFINE(PT_FR9, offsetof(struct pt_regs, fr[ 9]));
+ DEFINE(PT_FR10, offsetof(struct pt_regs, fr[10]));
+ DEFINE(PT_FR11, offsetof(struct pt_regs, fr[11]));
+ DEFINE(PT_FR12, offsetof(struct pt_regs, fr[12]));
+ DEFINE(PT_FR13, offsetof(struct pt_regs, fr[13]));
+ DEFINE(PT_FR14, offsetof(struct pt_regs, fr[14]));
+ DEFINE(PT_FR15, offsetof(struct pt_regs, fr[15]));
+ DEFINE(PT_FR16, offsetof(struct pt_regs, fr[16]));
+ DEFINE(PT_FR17, offsetof(struct pt_regs, fr[17]));
+ DEFINE(PT_FR18, offsetof(struct pt_regs, fr[18]));
+ DEFINE(PT_FR19, offsetof(struct pt_regs, fr[19]));
+ DEFINE(PT_FR20, offsetof(struct pt_regs, fr[20]));
+ DEFINE(PT_FR21, offsetof(struct pt_regs, fr[21]));
+ DEFINE(PT_FR22, offsetof(struct pt_regs, fr[22]));
+ DEFINE(PT_FR23, offsetof(struct pt_regs, fr[23]));
+ DEFINE(PT_FR24, offsetof(struct pt_regs, fr[24]));
+ DEFINE(PT_FR25, offsetof(struct pt_regs, fr[25]));
+ DEFINE(PT_FR26, offsetof(struct pt_regs, fr[26]));
+ DEFINE(PT_FR27, offsetof(struct pt_regs, fr[27]));
+ DEFINE(PT_FR28, offsetof(struct pt_regs, fr[28]));
+ DEFINE(PT_FR29, offsetof(struct pt_regs, fr[29]));
+ DEFINE(PT_FR30, offsetof(struct pt_regs, fr[30]));
+ DEFINE(PT_FR31, offsetof(struct pt_regs, fr[31]));
+ DEFINE(PT_SR0, offsetof(struct pt_regs, sr[ 0]));
+ DEFINE(PT_SR1, offsetof(struct pt_regs, sr[ 1]));
+ DEFINE(PT_SR2, offsetof(struct pt_regs, sr[ 2]));
+ DEFINE(PT_SR3, offsetof(struct pt_regs, sr[ 3]));
+ DEFINE(PT_SR4, offsetof(struct pt_regs, sr[ 4]));
+ DEFINE(PT_SR5, offsetof(struct pt_regs, sr[ 5]));
+ DEFINE(PT_SR6, offsetof(struct pt_regs, sr[ 6]));
+ DEFINE(PT_SR7, offsetof(struct pt_regs, sr[ 7]));
+ DEFINE(PT_IASQ0, offsetof(struct pt_regs, iasq[0]));
+ DEFINE(PT_IASQ1, offsetof(struct pt_regs, iasq[1]));
+ DEFINE(PT_IAOQ0, offsetof(struct pt_regs, iaoq[0]));
+ DEFINE(PT_IAOQ1, offsetof(struct pt_regs, iaoq[1]));
+ DEFINE(PT_CR27, offsetof(struct pt_regs, cr27));
+ DEFINE(PT_ORIG_R28, offsetof(struct pt_regs, orig_r28));
+ DEFINE(PT_KSP, offsetof(struct pt_regs, ksp));
+ DEFINE(PT_KPC, offsetof(struct pt_regs, kpc));
+ DEFINE(PT_SAR, offsetof(struct pt_regs, sar));
+ DEFINE(PT_IIR, offsetof(struct pt_regs, iir));
+ DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
+ DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_SZ_ALGN, align(sizeof(struct pt_regs), 64));
+ BLANK();
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+ DEFINE(THREAD_SZ, sizeof(struct thread_info));
+ DEFINE(THREAD_SZ_ALGN, align(sizeof(struct thread_info), 64));
+ BLANK();
+ DEFINE(IRQSTAT_SIRQ_PEND, offsetof(irq_cpustat_t, __softirq_pending));
+ DEFINE(IRQSTAT_SZ, sizeof(irq_cpustat_t));
+ BLANK();
+ DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
+ DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
+ DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
+ DEFINE(ICACHE_LOOP, offsetof(struct pdc_cache_info, ic_loop));
+ DEFINE(DCACHE_BASE, offsetof(struct pdc_cache_info, dc_base));
+ DEFINE(DCACHE_STRIDE, offsetof(struct pdc_cache_info, dc_stride));
+ DEFINE(DCACHE_COUNT, offsetof(struct pdc_cache_info, dc_count));
+ DEFINE(DCACHE_LOOP, offsetof(struct pdc_cache_info, dc_loop));
+ DEFINE(ITLB_SID_BASE, offsetof(struct pdc_cache_info, it_sp_base));
+ DEFINE(ITLB_SID_STRIDE, offsetof(struct pdc_cache_info, it_sp_stride));
+ DEFINE(ITLB_SID_COUNT, offsetof(struct pdc_cache_info, it_sp_count));
+ DEFINE(ITLB_OFF_BASE, offsetof(struct pdc_cache_info, it_off_base));
+ DEFINE(ITLB_OFF_STRIDE, offsetof(struct pdc_cache_info, it_off_stride));
+ DEFINE(ITLB_OFF_COUNT, offsetof(struct pdc_cache_info, it_off_count));
+ DEFINE(ITLB_LOOP, offsetof(struct pdc_cache_info, it_loop));
+ DEFINE(DTLB_SID_BASE, offsetof(struct pdc_cache_info, dt_sp_base));
+ DEFINE(DTLB_SID_STRIDE, offsetof(struct pdc_cache_info, dt_sp_stride));
+ DEFINE(DTLB_SID_COUNT, offsetof(struct pdc_cache_info, dt_sp_count));
+ DEFINE(DTLB_OFF_BASE, offsetof(struct pdc_cache_info, dt_off_base));
+ DEFINE(DTLB_OFF_STRIDE, offsetof(struct pdc_cache_info, dt_off_stride));
+ DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
+ DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
+ BLANK();
+ DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT);
+ DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT);
+ BLANK();
+ DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
+ DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
+ DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
+ DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
+ DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
+ DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER));
+ DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
+ DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
+ DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
+ DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
+ DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
+ DEFINE(ASM_PT_INITIAL, PT_INITIAL);
+ DEFINE(ASM_PAGE_SIZE, PAGE_SIZE);
+ BLANK();
+ DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
+ DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
+ DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
+ return 0;
+}
diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c
new file mode 100644
index 000000000000..d1833f164bbe
--- /dev/null
+++ b/arch/parisc/kernel/binfmt_elf32.c
@@ -0,0 +1,126 @@
+/*
+ * Support for 32-bit Linux/Parisc ELF binaries on 64 bit kernels
+ *
+ * Copyright (C) 2000 John Marvin
+ * Copyright (C) 2000 Hewlett Packard Co.
+ *
+ * Heavily inspired from various other efforts to do the same thing
+ * (ia64,sparc64/mips64)
+ */
+
+/* Make sure include/asm-parisc/elf.h does the right thing */
+
+#define ELF_CLASS ELFCLASS32
+
+#define ELF_CORE_COPY_REGS(dst, pt) \
+ memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
+ { int i; \
+ for (i = 0; i < 32; i++) dst[i] = (elf_greg_t) pt->gr[i]; \
+ for (i = 0; i < 8; i++) dst[32 + i] = (elf_greg_t) pt->sr[i]; \
+ } \
+ dst[40] = (elf_greg_t) pt->iaoq[0]; dst[41] = (elf_greg_t) pt->iaoq[1]; \
+ dst[42] = (elf_greg_t) pt->iasq[0]; dst[43] = (elf_greg_t) pt->iasq[1]; \
+ dst[44] = (elf_greg_t) pt->sar; dst[45] = (elf_greg_t) pt->iir; \
+ dst[46] = (elf_greg_t) pt->isr; dst[47] = (elf_greg_t) pt->ior; \
+ dst[48] = (elf_greg_t) mfctl(22); dst[49] = (elf_greg_t) mfctl(0); \
+ dst[50] = (elf_greg_t) mfctl(24); dst[51] = (elf_greg_t) mfctl(25); \
+ dst[52] = (elf_greg_t) mfctl(26); dst[53] = (elf_greg_t) mfctl(27); \
+ dst[54] = (elf_greg_t) mfctl(28); dst[55] = (elf_greg_t) mfctl(29); \
+ dst[56] = (elf_greg_t) mfctl(30); dst[57] = (elf_greg_t) mfctl(31); \
+ dst[58] = (elf_greg_t) mfctl( 8); dst[59] = (elf_greg_t) mfctl( 9); \
+ dst[60] = (elf_greg_t) mfctl(12); dst[61] = (elf_greg_t) mfctl(13); \
+ dst[62] = (elf_greg_t) mfctl(10); dst[63] = (elf_greg_t) mfctl(15);
+
+
+typedef unsigned int elf_greg_t;
+
+#include <linux/spinlock.h>
+#include <asm/processor.h>
+#include <linux/module.h>
+#include <linux/elfcore.h>
+#include <linux/compat.h> /* struct compat_timeval */
+
+#define elf_prstatus elf_prstatus32
+struct elf_prstatus32
+{
+ struct elf_siginfo pr_info; /* Info associated with signal */
+ short pr_cursig; /* Current signal */
+ unsigned int pr_sigpend; /* Set of pending signals */
+ unsigned int pr_sighold; /* Set of held signals */
+ pid_t pr_pid;
+ pid_t pr_ppid;
+ pid_t pr_pgrp;
+ pid_t pr_sid;
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_cutime; /* Cumulative user time */
+ struct compat_timeval pr_cstime; /* Cumulative system time */
+ elf_gregset_t pr_reg; /* GP registers */
+ int pr_fpvalid; /* True if math co-processor being used. */
+};
+
+#define elf_prpsinfo elf_prpsinfo32
+struct elf_prpsinfo32
+{
+ char pr_state; /* numeric process state */
+ char pr_sname; /* char for pr_state */
+ char pr_zomb; /* zombie */
+ char pr_nice; /* nice val */
+ unsigned int pr_flag; /* flags */
+ u16 pr_uid;
+ u16 pr_gid;
+ pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /* Lots missing */
+ char pr_fname[16]; /* filename of executable */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+};
+
+#define elf_addr_t unsigned int
+#define init_elf_binfmt init_elf32_binfmt
+
+#define ELF_PLATFORM ("PARISC32\0")
+
+/*
+ * We should probably use this macro to set a flag somewhere to indicate
+ * this is a 32 on 64 process. We could use PER_LINUX_32BIT, or we
+ * could set a processor dependent flag in the thread_struct.
+ */
+
+#define SET_PERSONALITY(ex, ibcs2) \
+ current->personality = PER_LINUX32; \
+ current->thread.map_base = DEFAULT_MAP_BASE32; \
+ current->thread.task_size = DEFAULT_TASK_SIZE32 \
+
+#undef cputime_to_timeval
+#define cputime_to_timeval cputime_to_compat_timeval
+static __inline__ void
+cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
+{
+ unsigned long jiffies = cputime_to_jiffies(cputime);
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
+#include "../../../fs/binfmt_elf.c"
+
+/* Set up a separate execution domain for ELF32 binaries running
+ * on an ELF64 kernel */
+
+static struct exec_domain parisc32_exec_domain = {
+ .name = "Linux/ELF32",
+ .pers_low = PER_LINUX32,
+ .pers_high = PER_LINUX32,
+};
+
+static int __init parisc32_exec_init(void)
+{
+ /* steal the identity signal mappings from the default domain */
+ parisc32_exec_domain.signal_map = default_exec_domain.signal_map;
+ parisc32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
+
+ register_exec_domain(&parisc32_exec_domain);
+
+ return 0;
+}
+
+__initcall(parisc32_exec_init);
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
new file mode 100644
index 000000000000..f46a07a79218
--- /dev/null
+++ b/arch/parisc/kernel/cache.c
@@ -0,0 +1,366 @@
+/* $Id: cache.c,v 1.4 2000/01/25 00:11:38 prumpf Exp $
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999 Helge Deller (07-13-1999)
+ * Copyright (C) 1999 SuSE GmbH Nuernberg
+ * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
+ *
+ * Cache and TLB management
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/pagemap.h>
+
+#include <asm/pdc.h>
+#include <asm/cache.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+
+int split_tlb;
+int dcache_stride;
+int icache_stride;
+EXPORT_SYMBOL(dcache_stride);
+
+
+#if defined(CONFIG_SMP)
+/* On some machines (e.g. ones with the Merced bus), there can be
+ * only a single PxTLB broadcast at a time; this must be guaranteed
+ * by software. We put a spinlock around all TLB flushes to
+ * ensure this.
+ */
+DEFINE_SPINLOCK(pa_tlb_lock);
+EXPORT_SYMBOL(pa_tlb_lock);
+#endif
+
+struct pdc_cache_info cache_info;
+#ifndef CONFIG_PA20
+static struct pdc_btlb_info btlb_info;
+#endif
+
+#ifdef CONFIG_SMP
+void
+flush_data_cache(void)
+{
+ on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
+}
+void
+flush_instruction_cache(void)
+{
+ on_each_cpu((void (*)(void *))flush_instruction_cache_local, NULL, 1, 1);
+}
+#endif
+
+void
+flush_cache_all_local(void)
+{
+ flush_instruction_cache_local();
+ flush_data_cache_local();
+}
+EXPORT_SYMBOL(flush_cache_all_local);
+
+/* flushes EVERYTHING (tlb & cache) */
+
+void
+flush_all_caches(void)
+{
+ flush_cache_all();
+ flush_tlb_all();
+}
+EXPORT_SYMBOL(flush_all_caches);
+
+void
+update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ struct page *page = pte_page(pte);
+
+ if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
+ test_bit(PG_dcache_dirty, &page->flags)) {
+
+ flush_kernel_dcache_page(page_address(page));
+ clear_bit(PG_dcache_dirty, &page->flags);
+ }
+}
+
+void
+show_cache_info(struct seq_file *m)
+{
+ seq_printf(m, "I-cache\t\t: %ld KB\n",
+ cache_info.ic_size/1024 );
+ seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %d-way associative)\n",
+ cache_info.dc_size/1024,
+ (cache_info.dc_conf.cc_wt ? "WT":"WB"),
+ (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
+ (cache_info.dc_conf.cc_assoc)
+ );
+
+ seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
+ cache_info.it_size,
+ cache_info.dt_size,
+ cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
+ );
+
+#ifndef CONFIG_PA20
+ /* BTLB - Block TLB */
+ if (btlb_info.max_size==0) {
+ seq_printf(m, "BTLB\t\t: not supported\n" );
+ } else {
+ seq_printf(m,
+ "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
+ "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
+ "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
+ btlb_info.max_size, (int)4096,
+ btlb_info.max_size>>8,
+ btlb_info.fixed_range_info.num_i,
+ btlb_info.fixed_range_info.num_d,
+ btlb_info.fixed_range_info.num_comb,
+ btlb_info.variable_range_info.num_i,
+ btlb_info.variable_range_info.num_d,
+ btlb_info.variable_range_info.num_comb
+ );
+ }
+#endif
+}
+
+void __init
+parisc_cache_init(void)
+{
+ if (pdc_cache_info(&cache_info) < 0)
+ panic("parisc_cache_init: pdc_cache_info failed");
+
+#if 0
+ printk("ic_size %lx dc_size %lx it_size %lx\n",
+ cache_info.ic_size,
+ cache_info.dc_size,
+ cache_info.it_size);
+
+ printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
+ cache_info.dc_base,
+ cache_info.dc_stride,
+ cache_info.dc_count,
+ cache_info.dc_loop);
+
+ printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
+ *(unsigned long *) (&cache_info.dc_conf),
+ cache_info.dc_conf.cc_alias,
+ cache_info.dc_conf.cc_block,
+ cache_info.dc_conf.cc_line,
+ cache_info.dc_conf.cc_shift);
+ printk(" wt %d sh %d cst %d assoc %d\n",
+ cache_info.dc_conf.cc_wt,
+ cache_info.dc_conf.cc_sh,
+ cache_info.dc_conf.cc_cst,
+ cache_info.dc_conf.cc_assoc);
+
+ printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
+ cache_info.ic_base,
+ cache_info.ic_stride,
+ cache_info.ic_count,
+ cache_info.ic_loop);
+
+ printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
+ *(unsigned long *) (&cache_info.ic_conf),
+ cache_info.ic_conf.cc_alias,
+ cache_info.ic_conf.cc_block,
+ cache_info.ic_conf.cc_line,
+ cache_info.ic_conf.cc_shift);
+ printk(" wt %d sh %d cst %d assoc %d\n",
+ cache_info.ic_conf.cc_wt,
+ cache_info.ic_conf.cc_sh,
+ cache_info.ic_conf.cc_cst,
+ cache_info.ic_conf.cc_assoc);
+
+ printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
+ cache_info.dt_conf.tc_sh,
+ cache_info.dt_conf.tc_page,
+ cache_info.dt_conf.tc_cst,
+ cache_info.dt_conf.tc_aid,
+ cache_info.dt_conf.tc_pad1);
+
+ printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d \n",
+ cache_info.it_conf.tc_sh,
+ cache_info.it_conf.tc_page,
+ cache_info.it_conf.tc_cst,
+ cache_info.it_conf.tc_aid,
+ cache_info.it_conf.tc_pad1);
+#endif
+
+ split_tlb = 0;
+ if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
+ if (cache_info.dt_conf.tc_sh == 2)
+ printk(KERN_WARNING "Unexpected TLB configuration. "
+ "Will flush I/D separately (could be optimized).\n");
+
+ split_tlb = 1;
+ }
+
+ /* "New and Improved" version from Jim Hull
+ * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
+ */
+#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
+ dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
+ icache_stride = CAFL_STRIDE(cache_info.ic_conf);
+#undef CAFL_STRIDE
+
+#ifndef CONFIG_PA20
+ if (pdc_btlb_info(&btlb_info) < 0) {
+ memset(&btlb_info, 0, sizeof btlb_info);
+ }
+#endif
+
+ if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
+ PDC_MODEL_NVA_UNSUPPORTED) {
+ printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
+#if 0
+ panic("SMP kernel required to avoid non-equivalent aliasing");
+#endif
+ }
+}
+
+void disable_sr_hashing(void)
+{
+ int srhash_type;
+
+ switch (boot_cpu_data.cpu_type) {
+ case pcx: /* We shouldn't get this far. setup.c should prevent it. */
+ BUG();
+ return;
+
+ case pcxs:
+ case pcxt:
+ case pcxt_:
+ srhash_type = SRHASH_PCXST;
+ break;
+
+ case pcxl:
+ srhash_type = SRHASH_PCXL;
+ break;
+
+ case pcxl2: /* pcxl2 doesn't support space register hashing */
+ return;
+
+ default: /* Currently all PA2.0 machines use the same ins. sequence */
+ srhash_type = SRHASH_PA20;
+ break;
+ }
+
+ disable_sr_hashing_asm(srhash_type);
+}
+
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping = page_mapping(page);
+ struct vm_area_struct *mpnt;
+ struct prio_tree_iter iter;
+ unsigned long offset;
+ unsigned long addr;
+ pgoff_t pgoff;
+ pte_t *pte;
+ unsigned long pfn = page_to_pfn(page);
+
+
+ if (mapping && !mapping_mapped(mapping)) {
+ set_bit(PG_dcache_dirty, &page->flags);
+ return;
+ }
+
+ flush_kernel_dcache_page(page_address(page));
+
+ if (!mapping)
+ return;
+
+ pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+ /* We have carefully arranged in arch_get_unmapped_area() that
+ * *any* mappings of a file are always congruently mapped (whether
+ * declared as MAP_PRIVATE or MAP_SHARED), so we only need
+ * to flush one address here for them all to become coherent */
+
+ flush_dcache_mmap_lock(mapping);
+ vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
+ addr = mpnt->vm_start + offset;
+
+ /* Flush instructions produce non access tlb misses.
+ * On PA, we nullify these instructions rather than
+ * taking a page fault if the pte doesn't exist.
+ * This is just for speed. If the page translation
+ * isn't there, there's no point exciting the
+ * nadtlb handler into a nullification frenzy */
+
+
+ if(!(pte = translation_exists(mpnt, addr)))
+ continue;
+
+ /* make sure we really have this page: the private
+ * mappings may cover this area but have COW'd this
+ * particular page */
+ if(pte_pfn(*pte) != pfn)
+ continue;
+
+ __flush_cache_page(mpnt, addr);
+
+ break;
+ }
+ flush_dcache_mmap_unlock(mapping);
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+/* Defined in arch/parisc/kernel/pacache.S */
+EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+EXPORT_SYMBOL(flush_data_cache_local);
+EXPORT_SYMBOL(flush_kernel_icache_range_asm);
+
+void clear_user_page_asm(void *page, unsigned long vaddr)
+{
+ /* This function is implemented in assembly in pacache.S */
+ extern void __clear_user_page_asm(void *page, unsigned long vaddr);
+
+ purge_tlb_start();
+ __clear_user_page_asm(page, vaddr);
+ purge_tlb_end();
+}
+
+#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
+int parisc_cache_flush_threshold = FLUSH_THRESHOLD;
+
+void parisc_setup_cache_timing(void)
+{
+ unsigned long rangetime, alltime;
+ extern char _text; /* start of kernel code, defined by linker */
+ extern char _end; /* end of BSS, defined by linker */
+ unsigned long size;
+
+ alltime = mfctl(16);
+ flush_data_cache();
+ alltime = mfctl(16) - alltime;
+
+ size = (unsigned long)(&_end - _text);
+ rangetime = mfctl(16);
+ flush_kernel_dcache_range((unsigned long)&_text, size);
+ rangetime = mfctl(16) - rangetime;
+
+ printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
+ alltime, size, rangetime);
+
+ /* Racy, but if we see an intermediate value, it's ok too... */
+ parisc_cache_flush_threshold = size * alltime / rangetime;
+
+ parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
+ if (!parisc_cache_flush_threshold)
+ parisc_cache_flush_threshold = FLUSH_THRESHOLD;
+
+ printk("Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
+}
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
new file mode 100644
index 000000000000..ebf186656afb
--- /dev/null
+++ b/arch/parisc/kernel/drivers.c
@@ -0,0 +1,765 @@
+/*
+ * drivers.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 1999 The Puffin Group
+ * Copyright (c) 2001 Matthew Wilcox for Hewlett Packard
+ * Copyright (c) 2001 Helge Deller <deller@gmx.de>
+ * Copyright (c) 2001,2002 Ryan Bradetich
+ * Copyright (c) 2004-2005 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * The file handles registering devices and drivers, then matching them.
+ * It's the closest we get to a dating agency.
+ *
+ * If you're thinking about modifying this file, here are some gotchas to
+ * bear in mind:
+ * - 715/Mirage device paths have a dummy device between Lasi and its children
+ * - The EISA adapter may show up as a sibling or child of Wax
+ * - Dino has an optionally functional serial port. If firmware enables it,
+ * it shows up as a child of Dino. If firmware disables it, the buswalk
+ * finds it and it shows up as a child of Cujo
+ * - Dino has both parisc and pci devices as children
+ * - parisc devices are discovered in a random order, including children
+ * before parents in some cases.
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/pdc.h>
+#include <asm/parisc-device.h>
+
+/* See comments in include/asm-parisc/pci.h */
+struct hppa_dma_ops *hppa_dma_ops;
+EXPORT_SYMBOL(hppa_dma_ops);
+
+static struct device root = {
+ .bus_id = "parisc",
+};
+
+#define for_each_padev(padev) \
+ for (padev = next_dev(&root); padev != NULL; \
+ padev = next_dev(&padev->dev))
+
+#define check_dev(padev) \
+ (padev->id.hw_type != HPHW_FAULTY) ? padev : next_dev(&padev->dev)
+
+/**
+ * next_dev - enumerates registered devices
+ * @dev: the previous device returned from next_dev
+ *
+ * next_dev does a depth-first search of the tree, returning parents
+ * before children. Returns NULL when there are no more devices.
+ */
+static struct parisc_device *next_dev(struct device *dev)
+{
+ if (!list_empty(&dev->children)) {
+ dev = list_to_dev(dev->children.next);
+ return check_dev(to_parisc_device(dev));
+ }
+
+ while (dev != &root) {
+ if (dev->node.next != &dev->parent->children) {
+ dev = list_to_dev(dev->node.next);
+ return to_parisc_device(dev);
+ }
+ dev = dev->parent;
+ }
+
+ return NULL;
+}
+
+/**
+ * match_device - Report whether this driver can handle this device
+ * @driver: the PA-RISC driver to try
+ * @dev: the PA-RISC device to try
+ */
+static int match_device(struct parisc_driver *driver, struct parisc_device *dev)
+{
+ const struct parisc_device_id *ids;
+
+ for (ids = driver->id_table; ids->sversion; ids++) {
+ if ((ids->sversion != SVERSION_ANY_ID) &&
+ (ids->sversion != dev->id.sversion))
+ continue;
+
+ if ((ids->hw_type != HWTYPE_ANY_ID) &&
+ (ids->hw_type != dev->id.hw_type))
+ continue;
+
+ if ((ids->hversion != HVERSION_ANY_ID) &&
+ (ids->hversion != dev->id.hversion))
+ continue;
+
+ return 1;
+ }
+ return 0;
+}
+
+static void claim_device(struct parisc_driver *driver, struct parisc_device *dev)
+{
+ dev->driver = driver;
+ request_mem_region(dev->hpa, 0x1000, driver->name);
+}
+
+static int parisc_driver_probe(struct device *dev)
+{
+ int rc;
+ struct parisc_device *pa_dev = to_parisc_device(dev);
+ struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
+
+ rc = pa_drv->probe(pa_dev);
+
+ if(!rc)
+ claim_device(pa_drv, pa_dev);
+
+ return rc;
+}
+
+static int parisc_driver_remove(struct device *dev)
+{
+ struct parisc_device *pa_dev = to_parisc_device(dev);
+ struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
+ if (pa_drv->remove)
+ pa_drv->remove(pa_dev);
+ release_mem_region(pa_dev->hpa, 0x1000);
+
+ return 0;
+}
+
+
+/**
+ * register_parisc_driver - Register this driver if it can handle a device
+ * @driver: the PA-RISC driver to try
+ */
+int register_parisc_driver(struct parisc_driver *driver)
+{
+ /* FIXME: we need this because apparently the sti
+ * driver can be registered twice */
+ if(driver->drv.name) {
+ printk(KERN_WARNING
+ "BUG: skipping previously registered driver %s\n",
+ driver->name);
+ return 1;
+ }
+
+ if (!driver->probe) {
+ printk(KERN_WARNING
+ "BUG: driver %s has no probe routine\n",
+ driver->name);
+ return 1;
+ }
+
+ driver->drv.bus = &parisc_bus_type;
+
+ /* We install our own probe and remove routines */
+ WARN_ON(driver->drv.probe != NULL);
+ WARN_ON(driver->drv.remove != NULL);
+
+ driver->drv.probe = parisc_driver_probe;
+ driver->drv.remove = parisc_driver_remove;
+ driver->drv.name = driver->name;
+
+ return driver_register(&driver->drv);
+}
+EXPORT_SYMBOL(register_parisc_driver);
+
+/**
+ * count_parisc_driver - count # of devices this driver would match
+ * @driver: the PA-RISC driver to try
+ *
+ * Use by IOMMU support to "guess" the right size IOPdir.
+ * Formula is something like memsize/(num_iommu * entry_size).
+ */
+int count_parisc_driver(struct parisc_driver *driver)
+{
+ struct parisc_device *device;
+ int cnt = 0;
+
+ for_each_padev(device) {
+ if (match_device(driver, device))
+ cnt++;
+ }
+
+ return cnt;
+}
+
+
+
+/**
+ * unregister_parisc_driver - Unregister this driver from the list of drivers
+ * @driver: the PA-RISC driver to unregister
+ */
+int unregister_parisc_driver(struct parisc_driver *driver)
+{
+ driver_unregister(&driver->drv);
+ return 0;
+}
+EXPORT_SYMBOL(unregister_parisc_driver);
+
+static struct parisc_device *find_device_by_addr(unsigned long hpa)
+{
+ struct parisc_device *dev;
+ for_each_padev(dev) {
+ if (dev->hpa == hpa)
+ return dev;
+ }
+ return NULL;
+}
+
+/**
+ * find_pa_parent_type - Find a parent of a specific type
+ * @dev: The device to start searching from
+ * @type: The device type to search for.
+ *
+ * Walks up the device tree looking for a device of the specified type.
+ * If it finds it, it returns it. If not, it returns NULL.
+ */
+const struct parisc_device *
+find_pa_parent_type(const struct parisc_device *padev, int type)
+{
+ const struct device *dev = &padev->dev;
+ while (dev != &root) {
+ struct parisc_device *candidate = to_parisc_device(dev);
+ if (candidate->id.hw_type == type)
+ return candidate;
+ dev = dev->parent;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_PCI
+static inline int is_pci_dev(struct device *dev)
+{
+ return dev->bus == &pci_bus_type;
+}
+#else
+static inline int is_pci_dev(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+/*
+ * get_node_path fills in @path with the firmware path to the device.
+ * Note that if @node is a parisc device, we don't fill in the 'mod' field.
+ * This is because both callers pass the parent and fill in the mod
+ * themselves. If @node is a PCI device, we do fill it in, even though this
+ * is inconsistent.
+ */
+static void get_node_path(struct device *dev, struct hardware_path *path)
+{
+ int i = 5;
+ memset(&path->bc, -1, 6);
+
+ if (is_pci_dev(dev)) {
+ unsigned int devfn = to_pci_dev(dev)->devfn;
+ path->mod = PCI_FUNC(devfn);
+ path->bc[i--] = PCI_SLOT(devfn);
+ dev = dev->parent;
+ }
+
+ while (dev != &root) {
+ if (is_pci_dev(dev)) {
+ unsigned int devfn = to_pci_dev(dev)->devfn;
+ path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);
+ } else if (dev->bus == &parisc_bus_type) {
+ path->bc[i--] = to_parisc_device(dev)->hw_path;
+ }
+ dev = dev->parent;
+ }
+}
+
+static char *print_hwpath(struct hardware_path *path, char *output)
+{
+ int i;
+ for (i = 0; i < 6; i++) {
+ if (path->bc[i] == -1)
+ continue;
+ output += sprintf(output, "%u/", (unsigned char) path->bc[i]);
+ }
+ output += sprintf(output, "%u", (unsigned char) path->mod);
+ return output;
+}
+
+/**
+ * print_pa_hwpath - Returns hardware path for PA devices
+ * dev: The device to return the path for
+ * output: Pointer to a previously-allocated array to place the path in.
+ *
+ * This function fills in the output array with a human-readable path
+ * to a PA device. This string is compatible with that used by PDC, and
+ * may be printed on the outside of the box.
+ */
+char *print_pa_hwpath(struct parisc_device *dev, char *output)
+{
+ struct hardware_path path;
+
+ get_node_path(dev->dev.parent, &path);
+ path.mod = dev->hw_path;
+ return print_hwpath(&path, output);
+}
+EXPORT_SYMBOL(print_pa_hwpath);
+
+#if defined(CONFIG_PCI) || defined(CONFIG_ISA)
+/**
+ * get_pci_node_path - Determines the hardware path for a PCI device
+ * @pdev: The device to return the path for
+ * @path: Pointer to a previously-allocated array to place the path in.
+ *
+ * This function fills in the hardware_path structure with the route to
+ * the specified PCI device. This structure is suitable for passing to
+ * PDC calls.
+ */
+void get_pci_node_path(struct pci_dev *pdev, struct hardware_path *path)
+{
+ get_node_path(&pdev->dev, path);
+}
+EXPORT_SYMBOL(get_pci_node_path);
+
+/**
+ * print_pci_hwpath - Returns hardware path for PCI devices
+ * dev: The device to return the path for
+ * output: Pointer to a previously-allocated array to place the path in.
+ *
+ * This function fills in the output array with a human-readable path
+ * to a PCI device. This string is compatible with that used by PDC, and
+ * may be printed on the outside of the box.
+ */
+char *print_pci_hwpath(struct pci_dev *dev, char *output)
+{
+ struct hardware_path path;
+
+ get_pci_node_path(dev, &path);
+ return print_hwpath(&path, output);
+}
+EXPORT_SYMBOL(print_pci_hwpath);
+
+#endif /* defined(CONFIG_PCI) || defined(CONFIG_ISA) */
+
+static void setup_bus_id(struct parisc_device *padev)
+{
+ struct hardware_path path;
+ char *output = padev->dev.bus_id;
+ int i;
+
+ get_node_path(padev->dev.parent, &path);
+
+ for (i = 0; i < 6; i++) {
+ if (path.bc[i] == -1)
+ continue;
+ output += sprintf(output, "%u:", (unsigned char) path.bc[i]);
+ }
+ sprintf(output, "%u", (unsigned char) padev->hw_path);
+}
+
+struct parisc_device * create_tree_node(char id, struct device *parent)
+{
+ struct parisc_device *dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ memset(dev, 0, sizeof(*dev));
+ dev->hw_path = id;
+ dev->id.hw_type = HPHW_FAULTY;
+
+ dev->dev.parent = parent;
+ setup_bus_id(dev);
+
+ dev->dev.bus = &parisc_bus_type;
+ dev->dma_mask = 0xffffffffUL; /* PARISC devices are 32-bit */
+
+ /* make the generic dma mask a pointer to the parisc one */
+ dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.coherent_dma_mask = dev->dma_mask;
+ device_register(&dev->dev);
+
+ return dev;
+}
+
+/**
+ * alloc_tree_node - returns a device entry in the iotree
+ * @parent: the parent node in the tree
+ * @id: the element of the module path for this entry
+ *
+ * Checks all the children of @parent for a matching @id. If none
+ * found, it allocates a new device and returns it.
+ */
+static struct parisc_device * alloc_tree_node(struct device *parent, char id)
+{
+ struct device *dev;
+
+ list_for_each_entry(dev, &parent->children, node) {
+ struct parisc_device *padev = to_parisc_device(dev);
+ if (padev->hw_path == id)
+ return padev;
+ }
+
+ return create_tree_node(id, parent);
+}
+
+static struct parisc_device *create_parisc_device(struct hardware_path *modpath)
+{
+ int i;
+ struct device *parent = &root;
+ for (i = 0; i < 6; i++) {
+ if (modpath->bc[i] == -1)
+ continue;
+ parent = &alloc_tree_node(parent, modpath->bc[i])->dev;
+ }
+ return alloc_tree_node(parent, modpath->mod);
+}
+
+struct parisc_device *
+alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
+{
+ int status;
+ unsigned long bytecnt;
+ u8 iodc_data[32];
+ struct parisc_device *dev;
+ const char *name;
+
+ /* Check to make sure this device has not already been added - Ryan */
+ if (find_device_by_addr(hpa) != NULL)
+ return NULL;
+
+ status = pdc_iodc_read(&bytecnt, hpa, 0, &iodc_data, 32);
+ if (status != PDC_OK)
+ return NULL;
+
+ dev = create_parisc_device(mod_path);
+ if (dev->id.hw_type != HPHW_FAULTY) {
+ char p[64];
+ print_pa_hwpath(dev, p);
+ printk("Two devices have hardware path %s. Please file a bug with HP.\n"
+ "In the meantime, you could try rearranging your cards.\n", p);
+ return NULL;
+ }
+
+ dev->id.hw_type = iodc_data[3] & 0x1f;
+ dev->id.hversion = (iodc_data[0] << 4) | ((iodc_data[1] & 0xf0) >> 4);
+ dev->id.hversion_rev = iodc_data[1] & 0x0f;
+ dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) |
+ (iodc_data[5] << 8) | iodc_data[6];
+ dev->hpa = hpa;
+ name = parisc_hardware_description(&dev->id);
+ if (name) {
+ strlcpy(dev->name, name, sizeof(dev->name));
+ }
+
+ return dev;
+}
+
+static int parisc_generic_match(struct device *dev, struct device_driver *drv)
+{
+ return match_device(to_parisc_driver(drv), to_parisc_device(dev));
+}
+
+#define pa_dev_attr(name, field, format_string) \
+static ssize_t name##_show(struct device *dev, char *buf) \
+{ \
+ struct parisc_device *padev = to_parisc_device(dev); \
+ return sprintf(buf, format_string, padev->field); \
+}
+
+#define pa_dev_attr_id(field, format) pa_dev_attr(field, id.field, format)
+
+pa_dev_attr(irq, irq, "%u\n");
+pa_dev_attr_id(hw_type, "0x%02x\n");
+pa_dev_attr(rev, id.hversion_rev, "0x%x\n");
+pa_dev_attr_id(hversion, "0x%03x\n");
+pa_dev_attr_id(sversion, "0x%05x\n");
+
+static struct device_attribute parisc_device_attrs[] = {
+ __ATTR_RO(irq),
+ __ATTR_RO(hw_type),
+ __ATTR_RO(rev),
+ __ATTR_RO(hversion),
+ __ATTR_RO(sversion),
+ __ATTR_NULL,
+};
+
+struct bus_type parisc_bus_type = {
+ .name = "parisc",
+ .match = parisc_generic_match,
+ .dev_attrs = parisc_device_attrs,
+};
+
+/**
+ * register_parisc_device - Locate a driver to manage this device.
+ * @dev: The parisc device.
+ *
+ * Search the driver list for a driver that is willing to manage
+ * this device.
+ */
+int register_parisc_device(struct parisc_device *dev)
+{
+ if (!dev)
+ return 0;
+
+ if (dev->driver)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * match_pci_device - Matches a pci device against a given hardware path
+ * entry.
+ * @dev: the generic device (known to be contained by a pci_dev).
+ * @index: the current BC index
+ * @modpath: the hardware path.
+ * @return: true if the device matches the hardware path.
+ */
+static int match_pci_device(struct device *dev, int index,
+ struct hardware_path *modpath)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int id;
+
+ if (index == 5) {
+ /* we are at the end of the path, and on the actual device */
+ unsigned int devfn = pdev->devfn;
+ return ((modpath->bc[5] == PCI_SLOT(devfn)) &&
+ (modpath->mod == PCI_FUNC(devfn)));
+ }
+
+ id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
+ return (modpath->bc[index] == id);
+}
+
+/**
+ * match_parisc_device - Matches a parisc device against a given hardware
+ * path entry.
+ * @dev: the generic device (known to be contained by a parisc_device).
+ * @index: the current BC index
+ * @modpath: the hardware path.
+ * @return: true if the device matches the hardware path.
+ */
+static int match_parisc_device(struct device *dev, int index,
+ struct hardware_path *modpath)
+{
+ struct parisc_device *curr = to_parisc_device(dev);
+ char id = (index == 6) ? modpath->mod : modpath->bc[index];
+
+ return (curr->hw_path == id);
+}
+
+/**
+ * parse_tree_node - returns a device entry in the iotree
+ * @parent: the parent node in the tree
+ * @index: the current BC index
+ * @modpath: the hardware_path struct to match a device against
+ * @return: The corresponding device if found, NULL otherwise.
+ *
+ * Checks all the children of @parent for a matching @id. If none
+ * found, it returns NULL.
+ */
+static struct device *
+parse_tree_node(struct device *parent, int index, struct hardware_path *modpath)
+{
+ struct device *device;
+
+ list_for_each_entry(device, &parent->children, node) {
+ if (device->bus == &parisc_bus_type) {
+ if (match_parisc_device(device, index, modpath))
+ return device;
+ } else if (is_pci_dev(device)) {
+ if (match_pci_device(device, index, modpath))
+ return device;
+ } else if (device->bus == NULL) {
+ /* we are on a bus bridge */
+ struct device *new = parse_tree_node(device, index, modpath);
+ if (new)
+ return new;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * hwpath_to_device - Finds the generic device corresponding to a given hardware path.
+ * @modpath: the hardware path.
+ * @return: The target device, NULL if not found.
+ */
+struct device *hwpath_to_device(struct hardware_path *modpath)
+{
+ int i;
+ struct device *parent = &root;
+ for (i = 0; i < 6; i++) {
+ if (modpath->bc[i] == -1)
+ continue;
+ parent = parse_tree_node(parent, i, modpath);
+ if (!parent)
+ return NULL;
+ }
+ if (is_pci_dev(parent)) /* pci devices already parse MOD */
+ return parent;
+ else
+ return parse_tree_node(parent, 6, modpath);
+}
+EXPORT_SYMBOL(hwpath_to_device);
+
+/**
+ * device_to_hwpath - Populates the hwpath corresponding to the given device.
+ * @param dev the target device
+ * @param path pointer to a previously allocated hwpath struct to be filled in
+ */
+void device_to_hwpath(struct device *dev, struct hardware_path *path)
+{
+ struct parisc_device *padev;
+ if (dev->bus == &parisc_bus_type) {
+ padev = to_parisc_device(dev);
+ get_node_path(dev->parent, path);
+ path->mod = padev->hw_path;
+ } else if (is_pci_dev(dev)) {
+ get_node_path(dev, path);
+ }
+}
+EXPORT_SYMBOL(device_to_hwpath);
+
+#define BC_PORT_MASK 0x8
+#define BC_LOWER_PORT 0x8
+
+#define BUS_CONVERTER(dev) \
+ ((dev->id.hw_type == HPHW_IOA) || (dev->id.hw_type == HPHW_BCPORT))
+
+#define IS_LOWER_PORT(dev) \
+ ((gsc_readl(dev->hpa + offsetof(struct bc_module, io_status)) \
+ & BC_PORT_MASK) == BC_LOWER_PORT)
+
+#define MAX_NATIVE_DEVICES 64
+#define NATIVE_DEVICE_OFFSET 0x1000
+
+#define FLEX_MASK F_EXTEND(0xfffc0000)
+#define IO_IO_LOW offsetof(struct bc_module, io_io_low)
+#define IO_IO_HIGH offsetof(struct bc_module, io_io_high)
+#define READ_IO_IO_LOW(dev) (unsigned long)(signed int)gsc_readl(dev->hpa + IO_IO_LOW)
+#define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)gsc_readl(dev->hpa + IO_IO_HIGH)
+
+static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
+ struct device *parent);
+
+void walk_lower_bus(struct parisc_device *dev)
+{
+ unsigned long io_io_low, io_io_high;
+
+ if(!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev))
+ return;
+
+ if(dev->id.hw_type == HPHW_IOA) {
+ io_io_low = (unsigned long)(signed int)(READ_IO_IO_LOW(dev) << 16);
+ io_io_high = io_io_low + MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET;
+ } else {
+ io_io_low = (READ_IO_IO_LOW(dev) + ~FLEX_MASK) & FLEX_MASK;
+ io_io_high = (READ_IO_IO_HIGH(dev)+ ~FLEX_MASK) & FLEX_MASK;
+ }
+
+ walk_native_bus(io_io_low, io_io_high, &dev->dev);
+}
+
+/**
+ * walk_native_bus -- Probe a bus for devices
+ * @io_io_low: Base address of this bus.
+ * @io_io_high: Last address of this bus.
+ * @parent: The parent bus device.
+ *
+ * A native bus (eg Runway or GSC) may have up to 64 devices on it,
+ * spaced at intervals of 0x1000 bytes. PDC may not inform us of these
+ * devices, so we have to probe for them. Unfortunately, we may find
+ * devices which are not physically connected (such as extra serial &
+ * keyboard ports). This problem is not yet solved.
+ */
+static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
+ struct device *parent)
+{
+ int i, devices_found = 0;
+ unsigned long hpa = io_io_low;
+ struct hardware_path path;
+
+ get_node_path(parent, &path);
+ do {
+ for(i = 0; i < MAX_NATIVE_DEVICES; i++, hpa += NATIVE_DEVICE_OFFSET) {
+ struct parisc_device *dev;
+
+ /* Was the device already added by Firmware? */
+ dev = find_device_by_addr(hpa);
+ if (!dev) {
+ path.mod = i;
+ dev = alloc_pa_dev(hpa, &path);
+ if (!dev)
+ continue;
+
+ register_parisc_device(dev);
+ devices_found++;
+ }
+ walk_lower_bus(dev);
+ }
+ } while(!devices_found && hpa < io_io_high);
+}
+
+#define CENTRAL_BUS_ADDR F_EXTEND(0xfff80000)
+
+/**
+ * walk_central_bus - Find devices attached to the central bus
+ *
+ * PDC doesn't tell us about all devices in the system. This routine
+ * finds devices connected to the central bus.
+ */
+void walk_central_bus(void)
+{
+ walk_native_bus(CENTRAL_BUS_ADDR,
+ CENTRAL_BUS_ADDR + (MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET),
+ &root);
+}
+
+static void print_parisc_device(struct parisc_device *dev)
+{
+ char hw_path[64];
+ static int count;
+
+ print_pa_hwpath(dev, hw_path);
+ printk(KERN_INFO "%d. %s at 0x%lx [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+ ++count, dev->name, dev->hpa, hw_path, dev->id.hw_type,
+ dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
+
+ if (dev->num_addrs) {
+ int k;
+ printk(", additional addresses: ");
+ for (k = 0; k < dev->num_addrs; k++)
+ printk("0x%lx ", dev->addr[k]);
+ }
+ printk("\n");
+}
+
+/**
+ * init_parisc_bus - Some preparation to be done before inventory
+ */
+void init_parisc_bus(void)
+{
+ bus_register(&parisc_bus_type);
+ device_register(&root);
+ get_device(&root);
+}
+
+/**
+ * print_parisc_devices - Print out a list of devices found in this system
+ */
+void print_parisc_devices(void)
+{
+ struct parisc_device *dev;
+ for_each_padev(dev) {
+ print_parisc_device(dev);
+ }
+}
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
new file mode 100644
index 000000000000..ee58d37dbb27
--- /dev/null
+++ b/arch/parisc/kernel/entry.S
@@ -0,0 +1,2426 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * kernel entry points (interruptions, system call wrappers)
+ * Copyright (C) 1999,2000 Philipp Rumpf
+ * Copyright (C) 1999 SuSE GmbH Nuernberg
+ * Copyright (C) 2000 Hewlett-Packard (John Marvin)
+ * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/config.h>
+#include <asm/offsets.h>
+
+/* we have the following possibilities to act on an interruption:
+ * - handle in assembly and use shadowed registers only
+ * - save registers to kernel stack and handle in assembly or C */
+
+
+#include <asm/assembly.h> /* for LDREG/STREG defines */
+#include <asm/pgtable.h>
+#include <asm/psw.h>
+#include <asm/signal.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+
+#ifdef __LP64__
+#define CMPIB cmpib,*
+#define CMPB cmpb,*
+#define COND(x) *x
+
+ .level 2.0w
+#else
+#define CMPIB cmpib,
+#define CMPB cmpb,
+#define COND(x) x
+
+ .level 2.0
+#endif
+
+ .import pa_dbit_lock,data
+
+ /* space_to_prot macro creates a prot id from a space id */
+
+#if (SPACEID_SHIFT) == 0
+ .macro space_to_prot spc prot
+ depd,z \spc,62,31,\prot
+ .endm
+#else
+ .macro space_to_prot spc prot
+ extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
+ .endm
+#endif
+
+ /* Switch to virtual mapping, trashing only %r1 */
+ .macro virt_map
+ rsm PSW_SM_Q,%r0
+ tovirt_r1 %r29
+ mfsp %sr7, %r1
+ or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
+ mtsp %r1, %sr3
+ mtsp %r0, %sr4
+ mtsp %r0, %sr5
+ mtsp %r0, %sr6
+ mtsp %r0, %sr7
+ load32 KERNEL_PSW, %r1
+ mtctl %r1, %cr22
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ load32 4f, %r1
+ mtctl %r1, %cr18 /* Set IIAOQ tail */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* Set IIAOQ head */
+ rfir
+ nop
+4:
+ .endm
+
+ /*
+ * The "get_stack" macros are responsible for determining the
+ * kernel stack value.
+ *
+ * For Faults:
+ * If sr7 == 0
+ * Already using a kernel stack, so call the
+ * get_stack_use_r30 macro to push a pt_regs structure
+ * on the stack, and store registers there.
+ * else
+ * Need to set up a kernel stack, so call the
+ * get_stack_use_cr30 macro to set up a pointer
+ * to the pt_regs structure contained within the
+ * task pointer pointed to by cr30. Set the stack
+ * pointer to point to the end of the task structure.
+ *
+ * For Interrupts:
+ * If sr7 == 0
+ * Already using a kernel stack, check to see if r30
+ * is already pointing to the per processor interrupt
+ * stack. If it is, call the get_stack_use_r30 macro
+ * to push a pt_regs structure on the stack, and store
+ * registers there. Otherwise, call get_stack_use_cr31
+ * to get a pointer to the base of the interrupt stack
+ * and push a pt_regs structure on that stack.
+ * else
+ * Need to set up a kernel stack, so call the
+ * get_stack_use_cr30 macro to set up a pointer
+ * to the pt_regs structure contained within the
+ * task pointer pointed to by cr30. Set the stack
+ * pointer to point to the end of the task structure.
+ * N.B: We don't use the interrupt stack for the
+ * first interrupt from userland, because signals/
+ * resched's are processed when returning to userland,
+ * and we can sleep in those cases.
+ *
+ * Note that we use shadowed registers for temps until
+ * we can save %r26 and %r29. %r26 is used to preserve
+ * %r8 (a shadowed register) which temporarily contained
+ * either the fault type ("code") or the eirr. We need
+ * to use a non-shadowed register to carry the value over
+ * the rfir in virt_map. We use %r26 since this value winds
+ * up being passed as the argument to either do_cpu_irq_mask
+ * or handle_interruption. %r29 is used to hold a pointer
+ * the register save area, and once again, it needs to
+ * be a non-shadowed register so that it survives the rfir.
+ *
+ * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
+ */
+
+ .macro get_stack_use_cr30
+
+ /* we save the registers in the task struct */
+
+ mfctl %cr30, %r1
+ tophys %r1,%r9
+ LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
+ tophys %r1,%r9
+ ldo TASK_REGS(%r9),%r9
+ STREG %r30, PT_GR30(%r9)
+ STREG %r29,PT_GR29(%r9)
+ STREG %r26,PT_GR26(%r9)
+ copy %r9,%r29
+ mfctl %cr30, %r1
+ ldo THREAD_SZ_ALGN(%r1), %r30
+ .endm
+
+ .macro get_stack_use_r30
+
+ /* we put a struct pt_regs on the stack and save the registers there */
+
+ tophys %r30,%r9
+ STREG %r30,PT_GR30(%r9)
+ ldo PT_SZ_ALGN(%r30),%r30
+ STREG %r29,PT_GR29(%r9)
+ STREG %r26,PT_GR26(%r9)
+ copy %r9,%r29
+ .endm
+
+ .macro rest_stack
+ LDREG PT_GR1(%r29), %r1
+ LDREG PT_GR30(%r29),%r30
+ LDREG PT_GR29(%r29),%r29
+ .endm
+
+ /* default interruption handler
+ * (calls traps.c:handle_interruption) */
+ .macro def code
+ b intr_save
+ ldi \code, %r8
+ .align 32
+ .endm
+
+ /* Interrupt interruption handler
+ * (calls irq.c:do_cpu_irq_mask) */
+ .macro extint code
+ b intr_extint
+ mfsp %sr7,%r16
+ .align 32
+ .endm
+
+ .import os_hpmc, code
+
+ /* HPMC handler */
+ .macro hpmc code
+ nop /* must be a NOP, will be patched later */
+ load32 PA(os_hpmc), %r3
+ bv,n 0(%r3)
+ nop
+ .word 0 /* checksum (will be patched) */
+ .word PA(os_hpmc) /* address of handler */
+ .word 0 /* length of handler */
+ .endm
+
+ /*
+ * Performance Note: Instructions will be moved up into
+ * this part of the code later on, once we are sure
+ * that the tlb miss handlers are close to final form.
+ */
+
+ /* Register definitions for tlb miss handler macros */
+
+ va = r8 /* virtual address for which the trap occured */
+ spc = r24 /* space for which the trap occured */
+
+#ifndef __LP64__
+
+ /*
+ * itlb miss interruption handler (parisc 1.1 - 32 bit)
+ */
+
+ .macro itlb_11 code
+
+ mfctl %pcsq, spc
+ b itlb_miss_11
+ mfctl %pcoq, va
+
+ .align 32
+ .endm
+#endif
+
+ /*
+ * itlb miss interruption handler (parisc 2.0)
+ */
+
+ .macro itlb_20 code
+ mfctl %pcsq, spc
+#ifdef __LP64__
+ b itlb_miss_20w
+#else
+ b itlb_miss_20
+#endif
+ mfctl %pcoq, va
+
+ .align 32
+ .endm
+
+#ifndef __LP64__
+ /*
+ * naitlb miss interruption handler (parisc 1.1 - 32 bit)
+ *
+ * Note: naitlb misses will be treated
+ * as an ordinary itlb miss for now.
+ * However, note that naitlb misses
+ * have the faulting address in the
+ * IOR/ISR.
+ */
+
+ .macro naitlb_11 code
+
+ mfctl %isr,spc
+ b itlb_miss_11
+ mfctl %ior,va
+ /* FIXME: If user causes a naitlb miss, the priv level may not be in
+ * lower bits of va, where the itlb miss handler is expecting them
+ */
+
+ .align 32
+ .endm
+#endif
+
+ /*
+ * naitlb miss interruption handler (parisc 2.0)
+ *
+ * Note: naitlb misses will be treated
+ * as an ordinary itlb miss for now.
+ * However, note that naitlb misses
+ * have the faulting address in the
+ * IOR/ISR.
+ */
+
+ .macro naitlb_20 code
+
+ mfctl %isr,spc
+#ifdef __LP64__
+ b itlb_miss_20w
+#else
+ b itlb_miss_20
+#endif
+ mfctl %ior,va
+ /* FIXME: If user causes a naitlb miss, the priv level may not be in
+ * lower bits of va, where the itlb miss handler is expecting them
+ */
+
+ .align 32
+ .endm
+
+#ifndef __LP64__
+ /*
+ * dtlb miss interruption handler (parisc 1.1 - 32 bit)
+ */
+
+ .macro dtlb_11 code
+
+ mfctl %isr, spc
+ b dtlb_miss_11
+ mfctl %ior, va
+
+ .align 32
+ .endm
+#endif
+
+ /*
+ * dtlb miss interruption handler (parisc 2.0)
+ */
+
+ .macro dtlb_20 code
+
+ mfctl %isr, spc
+#ifdef __LP64__
+ b dtlb_miss_20w
+#else
+ b dtlb_miss_20
+#endif
+ mfctl %ior, va
+
+ .align 32
+ .endm
+
+#ifndef __LP64__
+ /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
+
+ .macro nadtlb_11 code
+
+ mfctl %isr,spc
+ b nadtlb_miss_11
+ mfctl %ior,va
+
+ .align 32
+ .endm
+#endif
+
+ /* nadtlb miss interruption handler (parisc 2.0) */
+
+ .macro nadtlb_20 code
+
+ mfctl %isr,spc
+#ifdef __LP64__
+ b nadtlb_miss_20w
+#else
+ b nadtlb_miss_20
+#endif
+ mfctl %ior,va
+
+ .align 32
+ .endm
+
+#ifndef __LP64__
+ /*
+ * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
+ */
+
+ .macro dbit_11 code
+
+ mfctl %isr,spc
+ b dbit_trap_11
+ mfctl %ior,va
+
+ .align 32
+ .endm
+#endif
+
+ /*
+ * dirty bit trap interruption handler (parisc 2.0)
+ */
+
+ .macro dbit_20 code
+
+ mfctl %isr,spc
+#ifdef __LP64__
+ b dbit_trap_20w
+#else
+ b dbit_trap_20
+#endif
+ mfctl %ior,va
+
+ .align 32
+ .endm
+
+ /* The following are simple 32 vs 64 bit instruction
+ * abstractions for the macros */
+ .macro EXTR reg1,start,length,reg2
+#ifdef __LP64__
+ extrd,u \reg1,32+\start,\length,\reg2
+#else
+ extrw,u \reg1,\start,\length,\reg2
+#endif
+ .endm
+
+ .macro DEP reg1,start,length,reg2
+#ifdef __LP64__
+ depd \reg1,32+\start,\length,\reg2
+#else
+ depw \reg1,\start,\length,\reg2
+#endif
+ .endm
+
+ .macro DEPI val,start,length,reg
+#ifdef __LP64__
+ depdi \val,32+\start,\length,\reg
+#else
+ depwi \val,\start,\length,\reg
+#endif
+ .endm
+
+ /* In LP64, the space contains part of the upper 32 bits of the
+ * fault. We have to extract this and place it in the va,
+ * zeroing the corresponding bits in the space register */
+ .macro space_adjust spc,va,tmp
+#ifdef __LP64__
+ extrd,u \spc,63,SPACEID_SHIFT,\tmp
+ depd %r0,63,SPACEID_SHIFT,\spc
+ depd \tmp,31,SPACEID_SHIFT,\va
+#endif
+ .endm
+
+ .import swapper_pg_dir,code
+
+ /* Get the pgd. For faults on space zero (kernel space), this
+ * is simply swapper_pg_dir. For user space faults, the
+ * pgd is stored in %cr25 */
+ .macro get_pgd spc,reg
+ ldil L%PA(swapper_pg_dir),\reg
+ ldo R%PA(swapper_pg_dir)(\reg),\reg
+ or,COND(=) %r0,\spc,%r0
+ mfctl %cr25,\reg
+ .endm
+
+ /*
+ space_check(spc,tmp,fault)
+
+ spc - The space we saw the fault with.
+ tmp - The place to store the current space.
+ fault - Function to call on failure.
+
+ Only allow faults on different spaces from the
+ currently active one if we're the kernel
+
+ */
+ .macro space_check spc,tmp,fault
+ mfsp %sr7,\tmp
+ or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
+ * as kernel, so defeat the space
+ * check if it is */
+ copy \spc,\tmp
+ or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
+ cmpb,COND(<>),n \tmp,\spc,\fault
+ .endm
+
+ /* Look up a PTE in a 2-Level scheme (faulting at each
+ * level if the entry isn't present
+ *
+ * NOTE: we use ldw even for LP64, since the short pointers
+ * can address up to 1TB
+ */
+ .macro L2_ptep pmd,pte,index,va,fault
+#if PT_NLEVELS == 3
+ EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+#else
+ EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+#endif
+ DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ copy %r0,\pte
+ ldw,s \index(\pmd),\pmd
+ bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
+ DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
+ copy \pmd,%r9
+#ifdef __LP64__
+ shld %r9,PxD_VALUE_SHIFT,\pmd
+#else
+ shlw %r9,PxD_VALUE_SHIFT,\pmd
+#endif
+ EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+ DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
+ LDREG %r0(\pmd),\pte /* pmd is now pte */
+ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
+ .endm
+
+ /* Look up PTE in a 3-Level scheme.
+ *
+ * Here we implement a Hybrid L2/L3 scheme: we allocate the
+ * first pmd adjacent to the pgd. This means that we can
+ * subtract a constant offset to get to it. The pmd and pgd
+ * sizes are arranged so that a single pmd covers 4GB (giving
+ * a full LP64 process access to 8TB) so our lookups are
+ * effectively L2 for the first 4GB of the kernel (i.e. for
+ * all ILP32 processes and all the kernel for machines with
+ * under 4GB of memory) */
+ .macro L3_ptep pgd,pte,index,va,fault
+ extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+ copy %r0,\pte
+ extrd,u,*= \va,31,32,%r0
+ ldw,s \index(\pgd),\pgd
+ extrd,u,*= \va,31,32,%r0
+ bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
+ extrd,u,*= \va,31,32,%r0
+ shld \pgd,PxD_VALUE_SHIFT,\index
+ extrd,u,*= \va,31,32,%r0
+ copy \index,\pgd
+ extrd,u,*<> \va,31,32,%r0
+ ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
+ L2_ptep \pgd,\pte,\index,\va,\fault
+ .endm
+
+ /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
+ * don't needlessly dirty the cache line if it was already set */
+ .macro update_ptep ptep,pte,tmp,tmp1
+ ldi _PAGE_ACCESSED,\tmp1
+ or \tmp1,\pte,\tmp
+ and,COND(<>) \tmp1,\pte,%r0
+ STREG \tmp,0(\ptep)
+ .endm
+
+ /* Set the dirty bit (and accessed bit). No need to be
+ * clever, this is only used from the dirty fault */
+ .macro update_dirty ptep,pte,tmp
+ ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
+ or \tmp,\pte,\pte
+ STREG \pte,0(\ptep)
+ .endm
+
+ /* Convert the pte and prot to tlb insertion values. How
+ * this happens is quite subtle, read below */
+ .macro make_insert_tlb spc,pte,prot
+ space_to_prot \spc \prot /* create prot id from space */
+ /* The following is the real subtlety. This is depositing
+ * T <-> _PAGE_REFTRAP
+ * D <-> _PAGE_DIRTY
+ * B <-> _PAGE_DMB (memory break)
+ *
+ * Then incredible subtlety: The access rights are
+ * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
+ * See 3-14 of the parisc 2.0 manual
+ *
+ * Finally, _PAGE_READ goes in the top bit of PL1 (so we
+ * trigger an access rights trap in user space if the user
+ * tries to read an unreadable page */
+ depd \pte,8,7,\prot
+
+ /* PAGE_USER indicates the page can be read with user privileges,
+ * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
+ * contains _PAGE_READ */
+ extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
+ depdi 7,11,3,\prot
+ /* If we're a gateway page, drop PL2 back to zero for promotion
+ * to kernel privilege (so we can execute the page as kernel).
+ * Any privilege promotion page always denys read and write */
+ extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
+ depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
+
+ /* Get rid of prot bits and convert to page addr for iitlbt */
+
+ depd %r0,63,PAGE_SHIFT,\pte
+ extrd,u \pte,56,32,\pte
+ .endm
+
+ /* Identical macro to make_insert_tlb above, except it
+ * makes the tlb entry for the differently formatted pa11
+ * insertion instructions */
+ .macro make_insert_tlb_11 spc,pte,prot
+ zdep \spc,30,15,\prot
+ dep \pte,8,7,\prot
+ extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
+ depi 1,12,1,\prot
+ extru,= \pte,_PAGE_USER_BIT,1,%r0
+ depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
+ extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
+ depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
+
+ /* Get rid of prot bits and convert to page addr for iitlba */
+
+ depi 0,31,12,\pte
+ extru \pte,24,25,\pte
+
+ .endm
+
+ /* This is for ILP32 PA2.0 only. The TLB insertion needs
+ * to extend into I/O space if the address is 0xfXXXXXXX
+ * so we extend the f's into the top word of the pte in
+ * this case */
+ .macro f_extend pte,tmp
+ extrd,s \pte,42,4,\tmp
+ addi,<> 1,\tmp,%r0
+ extrd,s \pte,63,25,\pte
+ .endm
+
+ /* The alias region is an 8MB aligned 16MB to do clear and
+ * copy user pages at addresses congruent with the user
+ * virtual address.
+ *
+ * To use the alias page, you set %r26 up with the to TLB
+ * entry (identifying the physical page) and %r23 up with
+ * the from tlb entry (or nothing if only a to entry---for
+ * clear_user_page_asm) */
+ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
+ cmpib,COND(<>),n 0,\spc,\fault
+ ldil L%(TMPALIAS_MAP_START),\tmp
+#if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000)
+ /* on LP64, ldi will sign extend into the upper 32 bits,
+ * which is behaviour we don't want */
+ depdi 0,31,32,\tmp
+#endif
+ copy \va,\tmp1
+ DEPI 0,31,23,\tmp1
+ cmpb,COND(<>),n \tmp,\tmp1,\fault
+ ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
+ depd,z \prot,8,7,\prot
+ /*
+ * OK, it is in the temp alias region, check whether "from" or "to".
+ * Check "subtle" note in pacache.S re: r23/r26.
+ */
+#ifdef __LP64__
+ extrd,u,*= \va,41,1,%r0
+#else
+ extrw,u,= \va,9,1,%r0
+#endif
+ or,COND(tr) %r23,%r0,\pte
+ or %r26,%r0,\pte
+ .endm
+
+
+ /*
+ * Align fault_vector_20 on 4K boundary so that both
+ * fault_vector_11 and fault_vector_20 are on the
+ * same page. This is only necessary as long as we
+ * write protect the kernel text, which we may stop
+ * doing once we use large page translations to cover
+ * the static part of the kernel address space.
+ */
+
+ .export fault_vector_20
+
+ .text
+
+ .align 4096
+
+fault_vector_20:
+ /* First vector is invalid (0) */
+ .ascii "cows can fly"
+ .byte 0
+ .align 32
+
+ hpmc 1
+ def 2
+ def 3
+ extint 4
+ def 5
+ itlb_20 6
+ def 7
+ def 8
+ def 9
+ def 10
+ def 11
+ def 12
+ def 13
+ def 14
+ dtlb_20 15
+#if 0
+ naitlb_20 16
+#else
+ def 16
+#endif
+ nadtlb_20 17
+ def 18
+ def 19
+ dbit_20 20
+ def 21
+ def 22
+ def 23
+ def 24
+ def 25
+ def 26
+ def 27
+ def 28
+ def 29
+ def 30
+ def 31
+
+#ifndef __LP64__
+
+ .export fault_vector_11
+
+ .align 2048
+
+fault_vector_11:
+ /* First vector is invalid (0) */
+ .ascii "cows can fly"
+ .byte 0
+ .align 32
+
+ hpmc 1
+ def 2
+ def 3
+ extint 4
+ def 5
+ itlb_11 6
+ def 7
+ def 8
+ def 9
+ def 10
+ def 11
+ def 12
+ def 13
+ def 14
+ dtlb_11 15
+#if 0
+ naitlb_11 16
+#else
+ def 16
+#endif
+ nadtlb_11 17
+ def 18
+ def 19
+ dbit_11 20
+ def 21
+ def 22
+ def 23
+ def 24
+ def 25
+ def 26
+ def 27
+ def 28
+ def 29
+ def 30
+ def 31
+
+#endif
+
+ .import handle_interruption,code
+ .import do_cpu_irq_mask,code
+
+ /*
+ * r26 = function to be called
+ * r25 = argument to pass in
+ * r24 = flags for do_fork()
+ *
+ * Kernel threads don't ever return, so they don't need
+ * a true register context. We just save away the arguments
+ * for copy_thread/ret_ to properly set up the child.
+ */
+
+#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
+#define CLONE_UNTRACED 0x00800000
+
+ .export __kernel_thread, code
+ .import do_fork
+__kernel_thread:
+ STREG %r2, -RP_OFFSET(%r30)
+
+ copy %r30, %r1
+ ldo PT_SZ_ALGN(%r30),%r30
+#ifdef __LP64__
+ /* Yo, function pointers in wide mode are little structs... -PB */
+ ldd 24(%r26), %r2
+ STREG %r2, PT_GR27(%r1) /* Store childs %dp */
+ ldd 16(%r26), %r26
+
+ STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
+ copy %r0, %r22 /* user_tid */
+#endif
+ STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
+ STREG %r25, PT_GR25(%r1)
+ ldil L%CLONE_UNTRACED, %r26
+ ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
+ or %r26, %r24, %r26 /* will have kernel mappings. */
+ ldi 1, %r25 /* stack_start, signals kernel thread */
+ stw %r0, -52(%r30) /* user_tid */
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ BL do_fork, %r2
+ copy %r1, %r24 /* pt_regs */
+
+ /* Parent Returns here */
+
+ LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
+ ldo -PT_SZ_ALGN(%r30), %r30
+ bv %r0(%r2)
+ nop
+
+ /*
+ * Child Returns here
+ *
+ * copy_thread moved args from temp save area set up above
+ * into task save area.
+ */
+
+ .export ret_from_kernel_thread
+ret_from_kernel_thread:
+
+ /* Call schedule_tail first though */
+ BL schedule_tail, %r2
+ nop
+
+ LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
+ LDREG TASK_PT_GR25(%r1), %r26
+#ifdef __LP64__
+ LDREG TASK_PT_GR27(%r1), %r27
+ LDREG TASK_PT_GR22(%r1), %r22
+#endif
+ LDREG TASK_PT_GR26(%r1), %r1
+ ble 0(%sr7, %r1)
+ copy %r31, %r2
+
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+ loadgp /* Thread could have been in a module */
+#endif
+ b sys_exit
+ ldi 0, %r26
+
+ .import sys_execve, code
+ .export __execve, code
+__execve:
+ copy %r2, %r15
+ copy %r30, %r16
+ ldo PT_SZ_ALGN(%r30), %r30
+ STREG %r26, PT_GR26(%r16)
+ STREG %r25, PT_GR25(%r16)
+ STREG %r24, PT_GR24(%r16)
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ BL sys_execve, %r2
+ copy %r16, %r26
+
+ cmpib,=,n 0,%r28,intr_return /* forward */
+
+ /* yes, this will trap and die. */
+ copy %r15, %r2
+ copy %r16, %r30
+ bv %r0(%r2)
+ nop
+
+ .align 4
+
+ /*
+ * struct task_struct *_switch_to(struct task_struct *prev,
+ * struct task_struct *next)
+ *
+ * switch kernel stacks and return prev */
+ .export _switch_to, code
+_switch_to:
+ STREG %r2, -RP_OFFSET(%r30)
+
+ callee_save
+
+ load32 _switch_to_ret, %r2
+
+ STREG %r2, TASK_PT_KPC(%r26)
+ LDREG TASK_PT_KPC(%r25), %r2
+
+ STREG %r30, TASK_PT_KSP(%r26)
+ LDREG TASK_PT_KSP(%r25), %r30
+ LDREG TASK_THREAD_INFO(%r25), %r25
+ bv %r0(%r2)
+ mtctl %r25,%cr30
+
+_switch_to_ret:
+ mtctl %r0, %cr0 /* Needed for single stepping */
+ callee_rest
+
+ LDREG -RP_OFFSET(%r30), %r2
+ bv %r0(%r2)
+ copy %r26, %r28
+
+ /*
+ * Common rfi return path for interruptions, kernel execve, and
+ * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
+ * return via this path if the signal was received when the process
+ * was running; if the process was blocked on a syscall then the
+ * normal syscall_exit path is used. All syscalls for traced
+ * proceses exit via intr_restore.
+ *
+ * XXX If any syscalls that change a processes space id ever exit
+ * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
+ * adjust IASQ[0..1].
+ *
+ * Note that the following code uses a "relied upon translation".
+ * See the parisc ACD for details. The ssm is necessary due to a
+ * PCXT bug.
+ */
+
+ .align 4096
+
+ .export syscall_exit_rfi
+syscall_exit_rfi:
+ mfctl %cr30,%r16
+ LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
+ ldo TASK_REGS(%r16),%r16
+ /* Force iaoq to userspace, as the user has had access to our current
+ * context via sigcontext. Also Filter the PSW for the same reason.
+ */
+ LDREG PT_IAOQ0(%r16),%r19
+ depi 3,31,2,%r19
+ STREG %r19,PT_IAOQ0(%r16)
+ LDREG PT_IAOQ1(%r16),%r19
+ depi 3,31,2,%r19
+ STREG %r19,PT_IAOQ1(%r16)
+ LDREG PT_PSW(%r16),%r19
+ load32 USER_PSW_MASK,%r1
+#ifdef __LP64__
+ load32 USER_PSW_HI_MASK,%r20
+ depd %r20,31,32,%r1
+#endif
+ and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
+ load32 USER_PSW,%r1
+ or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
+ STREG %r19,PT_PSW(%r16)
+
+ /*
+ * If we aren't being traced, we never saved space registers
+ * (we don't store them in the sigcontext), so set them
+ * to "proper" values now (otherwise we'll wind up restoring
+ * whatever was last stored in the task structure, which might
+ * be inconsistent if an interrupt occured while on the gateway
+ * page) Note that we may be "trashing" values the user put in
+ * them, but we don't support the the user changing them.
+ */
+
+ STREG %r0,PT_SR2(%r16)
+ mfsp %sr3,%r19
+ STREG %r19,PT_SR0(%r16)
+ STREG %r19,PT_SR1(%r16)
+ STREG %r19,PT_SR3(%r16)
+ STREG %r19,PT_SR4(%r16)
+ STREG %r19,PT_SR5(%r16)
+ STREG %r19,PT_SR6(%r16)
+ STREG %r19,PT_SR7(%r16)
+
+intr_return:
+ /* NOTE: Need to enable interrupts incase we schedule. */
+ ssm PSW_SM_I, %r0
+
+ /* Check for software interrupts */
+
+ .import irq_stat,data
+
+ load32 irq_stat,%r19
+#ifdef CONFIG_SMP
+ mfctl %cr30,%r1
+ ldw TI_CPU(%r1),%r1 /* get cpu # - int */
+ /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
+ ** irq_stat[] is defined using ____cacheline_aligned.
+ */
+#ifdef __LP64__
+ shld %r1, 6, %r20
+#else
+ shlw %r1, 5, %r20
+#endif
+ add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
+#endif /* CONFIG_SMP */
+
+ LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
+ cmpib,<>,n 0,%r20,intr_do_softirq /* forward */
+
+intr_check_resched:
+
+ /* check for reschedule */
+ mfctl %cr30,%r1
+ LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
+ bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
+
+intr_check_sig:
+ /* As above */
+ mfctl %cr30,%r1
+ LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */
+ bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
+
+intr_restore:
+ copy %r16,%r29
+ ldo PT_FR31(%r29),%r1
+ rest_fp %r1
+ rest_general %r29
+
+ /* Create a "relied upon translation" PA 2.0 Arch. F-5 */
+ ssm 0,%r0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ tophys_r1 %r29
+ rsm (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0
+
+ /* Restore space id's and special cr's from PT_REGS
+ * structure pointed to by r29 */
+ rest_specials %r29
+
+ /* Important: Note that rest_stack restores r29
+ * last (we are using it)! It also restores r1 and r30. */
+ rest_stack
+
+ rfi
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ .import do_softirq,code
+intr_do_softirq:
+ bl do_softirq,%r2
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#else
+ nop
+#endif
+ b intr_check_resched
+ nop
+
+ .import schedule,code
+intr_do_resched:
+ /* Only do reschedule if we are returning to user space */
+ LDREG PT_IASQ0(%r16), %r20
+ CMPIB= 0,%r20,intr_restore /* backward */
+ nop
+ LDREG PT_IASQ1(%r16), %r20
+ CMPIB= 0,%r20,intr_restore /* backward */
+ nop
+
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ ldil L%intr_check_sig, %r2
+ b schedule
+ ldo R%intr_check_sig(%r2), %r2
+
+
+ .import do_signal,code
+intr_do_signal:
+ /*
+ This check is critical to having LWS
+ working. The IASQ is zero on the gateway
+ page and we cannot deliver any signals until
+ we get off the gateway page.
+
+ Only do signals if we are returning to user space
+ */
+ LDREG PT_IASQ0(%r16), %r20
+ CMPIB= 0,%r20,intr_restore /* backward */
+ nop
+ LDREG PT_IASQ1(%r16), %r20
+ CMPIB= 0,%r20,intr_restore /* backward */
+ nop
+
+ copy %r0, %r24 /* unsigned long in_syscall */
+ copy %r16, %r25 /* struct pt_regs *regs */
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ BL do_signal,%r2
+ copy %r0, %r26 /* sigset_t *oldset = NULL */
+
+ b intr_check_sig
+ nop
+
+ /*
+ * External interrupts.
+ */
+
+intr_extint:
+ CMPIB=,n 0,%r16,1f
+ get_stack_use_cr30
+ b,n 3f
+
+1:
+#if 0 /* Interrupt Stack support not working yet! */
+ mfctl %cr31,%r1
+ copy %r30,%r17
+ /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
+#ifdef __LP64__
+ depdi 0,63,15,%r17
+#else
+ depi 0,31,15,%r17
+#endif
+ CMPB=,n %r1,%r17,2f
+ get_stack_use_cr31
+ b,n 3f
+#endif
+2:
+ get_stack_use_r30
+
+3:
+ save_specials %r29
+ virt_map
+ save_general %r29
+
+ ldo PT_FR0(%r29), %r24
+ save_fp %r24
+
+ loadgp
+
+ copy %r29, %r26 /* arg0 is pt_regs */
+ copy %r29, %r16 /* save pt_regs */
+
+ ldil L%intr_return, %r2
+
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ b do_cpu_irq_mask
+ ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
+
+
+ /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
+
+ .export intr_save, code /* for os_hpmc */
+
+intr_save:
+ mfsp %sr7,%r16
+ CMPIB=,n 0,%r16,1f
+ get_stack_use_cr30
+ b 2f
+ copy %r8,%r26
+
+1:
+ get_stack_use_r30
+ copy %r8,%r26
+
+2:
+ save_specials %r29
+
+ /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
+
+ /*
+ * FIXME: 1) Use a #define for the hardwired "6" below (and in
+ * traps.c.
+ * 2) Once we start executing code above 4 Gb, we need
+ * to adjust iasq/iaoq here in the same way we
+ * adjust isr/ior below.
+ */
+
+ CMPIB=,n 6,%r26,skip_save_ior
+
+ /* save_specials left ipsw value in r8 for us to test */
+
+ mfctl %cr20, %r16 /* isr */
+ mfctl %cr21, %r17 /* ior */
+
+#ifdef __LP64__
+ /*
+ * If the interrupted code was running with W bit off (32 bit),
+ * clear the b bits (bits 0 & 1) in the ior.
+ */
+ extrd,u,*<> %r8,PSW_W_BIT,1,%r0
+ depdi 0,1,2,%r17
+
+ /*
+ * FIXME: This code has hardwired assumptions about the split
+ * between space bits and offset bits. This will change
+ * when we allow alternate page sizes.
+ */
+
+ /* adjust isr/ior. */
+
+ extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
+ depd %r1,31,7,%r17 /* deposit them into ior */
+ depdi 0,63,7,%r16 /* clear them from isr */
+#endif
+ STREG %r16, PT_ISR(%r29)
+ STREG %r17, PT_IOR(%r29)
+
+
+skip_save_ior:
+ virt_map
+ save_general %r29
+
+ ldo PT_FR0(%r29), %r25
+ save_fp %r25
+
+ loadgp
+
+ copy %r29, %r25 /* arg1 is pt_regs */
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ ldil L%intr_check_sig, %r2
+ copy %r25, %r16 /* save pt_regs */
+
+ b handle_interruption
+ ldo R%intr_check_sig(%r2), %r2
+
+
+ /*
+ * Note for all tlb miss handlers:
+ *
+ * cr24 contains a pointer to the kernel address space
+ * page directory.
+ *
+ * cr25 contains a pointer to the current user address
+ * space page directory.
+ *
+ * sr3 will contain the space id of the user address space
+ * of the current running thread while that thread is
+ * running in the kernel.
+ */
+
+ /*
+ * register number allocations. Note that these are all
+ * in the shadowed registers
+ */
+
+ t0 = r1 /* temporary register 0 */
+ va = r8 /* virtual address for which the trap occured */
+ t1 = r9 /* temporary register 1 */
+ pte = r16 /* pte/phys page # */
+ prot = r17 /* prot bits */
+ spc = r24 /* space for which the trap occured */
+ ptp = r25 /* page directory/page table pointer */
+
+#ifdef __LP64__
+
+dtlb_miss_20w:
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dtlb_fault
+
+ L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+dtlb_check_alias_20w:
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+nadtlb_miss_20w:
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,nadtlb_fault
+
+ L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+nadtlb_check_flush_20w:
+ bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
+
+ /* Insert a "flush only" translation */
+
+ depdi,z 7,7,3,prot
+ depdi 1,10,1,prot
+
+ /* Get rid of prot bits and convert to page addr for idtlbt */
+
+ depdi 0,63,12,pte
+ extrd,u pte,56,52,pte
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+#else
+
+dtlb_miss_11:
+ get_pgd spc,ptp
+
+ space_check spc,t0,dtlb_fault
+
+ L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ idtlba pte,(%sr1,va)
+ idtlbp prot,(%sr1,va)
+
+ mtsp t0, %sr1 /* Restore sr1 */
+
+ rfir
+ nop
+
+dtlb_check_alias_11:
+
+ /* Check to see if fault is in the temporary alias region */
+
+ cmpib,<>,n 0,spc,dtlb_fault /* forward */
+ ldil L%(TMPALIAS_MAP_START),t0
+ copy va,t1
+ depwi 0,31,23,t1
+ cmpb,<>,n t0,t1,dtlb_fault /* forward */
+ ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
+ depw,z prot,8,7,prot
+
+ /*
+ * OK, it is in the temp alias region, check whether "from" or "to".
+ * Check "subtle" note in pacache.S re: r23/r26.
+ */
+
+ extrw,u,= va,9,1,r0
+ or,tr %r23,%r0,pte /* If "from" use "from" page */
+ or %r26,%r0,pte /* else "to", use "to" page */
+
+ idtlba pte,(va)
+ idtlbp prot,(va)
+
+ rfir
+ nop
+
+nadtlb_miss_11:
+ get_pgd spc,ptp
+
+ space_check spc,t0,nadtlb_fault
+
+ L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ idtlba pte,(%sr1,va)
+ idtlbp prot,(%sr1,va)
+
+ mtsp t0, %sr1 /* Restore sr1 */
+
+ rfir
+ nop
+
+nadtlb_check_flush_11:
+ bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
+
+ /* Insert a "flush only" translation */
+
+ zdepi 7,7,3,prot
+ depi 1,10,1,prot
+
+ /* Get rid of prot bits and convert to page addr for idtlba */
+
+ depi 0,31,12,pte
+ extru pte,24,25,pte
+
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ idtlba pte,(%sr1,va)
+ idtlbp prot,(%sr1,va)
+
+ mtsp t0, %sr1 /* Restore sr1 */
+
+ rfir
+ nop
+
+dtlb_miss_20:
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dtlb_fault
+
+ L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t0
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+dtlb_check_alias_20:
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+nadtlb_miss_20:
+ get_pgd spc,ptp
+
+ space_check spc,t0,nadtlb_fault
+
+ L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t0
+
+ idtlbt pte,prot
+
+ rfir
+ nop
+
+nadtlb_check_flush_20:
+ bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
+
+ /* Insert a "flush only" translation */
+
+ depdi,z 7,7,3,prot
+ depdi 1,10,1,prot
+
+ /* Get rid of prot bits and convert to page addr for idtlbt */
+
+ depdi 0,63,12,pte
+ extrd,u pte,56,32,pte
+ idtlbt pte,prot
+
+ rfir
+ nop
+#endif
+
+nadtlb_emulate:
+
+ /*
+ * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
+ * probei instructions. We don't want to fault for these
+ * instructions (not only does it not make sense, it can cause
+ * deadlocks, since some flushes are done with the mmap
+ * semaphore held). If the translation doesn't exist, we can't
+ * insert a translation, so have to emulate the side effects
+ * of the instruction. Since we don't insert a translation
+ * we can get a lot of faults during a flush loop, so it makes
+ * sense to try to do it here with minimum overhead. We only
+ * emulate fdc,fic,pdc,probew,prober instructions whose base
+ * and index registers are not shadowed. We defer everything
+ * else to the "slow" path.
+ */
+
+ mfctl %cr19,%r9 /* Get iir */
+
+ /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
+ Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
+
+ /* Checks for fdc,fdce,pdc,"fic,4f" only */
+ ldi 0x280,%r16
+ and %r9,%r16,%r17
+ cmpb,<>,n %r16,%r17,nadtlb_probe_check
+ bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
+ BL get_register,%r25
+ extrw,u %r9,15,5,%r8 /* Get index register # */
+ CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
+ copy %r1,%r24
+ BL get_register,%r25
+ extrw,u %r9,10,5,%r8 /* Get base register # */
+ CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
+ BL set_register,%r25
+ add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
+
+nadtlb_nullify:
+ mfctl %cr22,%r8 /* Get ipsw */
+ ldil L%PSW_N,%r9
+ or %r8,%r9,%r8 /* Set PSW_N */
+ mtctl %r8,%cr22
+
+ rfir
+ nop
+
+ /*
+ When there is no translation for the probe address then we
+ must nullify the insn and return zero in the target regsiter.
+ This will indicate to the calling code that it does not have
+ write/read privileges to this address.
+
+ This should technically work for prober and probew in PA 1.1,
+ and also probe,r and probe,w in PA 2.0
+
+ WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
+ THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
+
+ */
+nadtlb_probe_check:
+ ldi 0x80,%r16
+ and %r9,%r16,%r17
+ cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
+ BL get_register,%r25 /* Find the target register */
+ extrw,u %r9,31,5,%r8 /* Get target register */
+ CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
+ BL set_register,%r25
+ copy %r0,%r1 /* Write zero to target register */
+ b nadtlb_nullify /* Nullify return insn */
+ nop
+
+
+#ifdef __LP64__
+itlb_miss_20w:
+
+ /*
+ * I miss is a little different, since we allow users to fault
+ * on the gateway page which is in the kernel address space.
+ */
+
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,itlb_fault
+
+ L3_ptep ptp,pte,t0,va,itlb_fault
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
+#else
+
+itlb_miss_11:
+ get_pgd spc,ptp
+
+ space_check spc,t0,itlb_fault
+
+ L2_ptep ptp,pte,t0,va,itlb_fault
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ iitlba pte,(%sr1,va)
+ iitlbp prot,(%sr1,va)
+
+ mtsp t0, %sr1 /* Restore sr1 */
+
+ rfir
+ nop
+
+itlb_miss_20:
+ get_pgd spc,ptp
+
+ space_check spc,t0,itlb_fault
+
+ L2_ptep ptp,pte,t0,va,itlb_fault
+
+ update_ptep ptp,pte,t0,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t0
+
+ iitlbt pte,prot
+
+ rfir
+ nop
+
+#endif
+
+#ifdef __LP64__
+
+dbit_trap_20w:
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dbit_fault
+
+ L3_ptep ptp,pte,t0,va,dbit_fault
+
+#ifdef CONFIG_SMP
+ CMPIB=,n 0,spc,dbit_nolock_20w
+ load32 PA(pa_dbit_lock),t0
+
+dbit_spin_20w:
+ ldcw 0(t0),t1
+ cmpib,= 0,t1,dbit_spin_20w
+ nop
+
+dbit_nolock_20w:
+#endif
+ update_dirty ptp,pte,t1
+
+ make_insert_tlb spc,pte,prot
+
+ idtlbt pte,prot
+#ifdef CONFIG_SMP
+ CMPIB=,n 0,spc,dbit_nounlock_20w
+ ldi 1,t1
+ stw t1,0(t0)
+
+dbit_nounlock_20w:
+#endif
+
+ rfir
+ nop
+#else
+
+dbit_trap_11:
+
+ get_pgd spc,ptp
+
+ space_check spc,t0,dbit_fault
+
+ L2_ptep ptp,pte,t0,va,dbit_fault
+
+#ifdef CONFIG_SMP
+ CMPIB=,n 0,spc,dbit_nolock_11
+ load32 PA(pa_dbit_lock),t0
+
+dbit_spin_11:
+ ldcw 0(t0),t1
+ cmpib,= 0,t1,dbit_spin_11
+ nop
+
+dbit_nolock_11:
+#endif
+ update_dirty ptp,pte,t1
+
+ make_insert_tlb_11 spc,pte,prot
+
+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
+
+ idtlba pte,(%sr1,va)
+ idtlbp prot,(%sr1,va)
+
+ mtsp t1, %sr1 /* Restore sr1 */
+#ifdef CONFIG_SMP
+ CMPIB=,n 0,spc,dbit_nounlock_11
+ ldi 1,t1
+ stw t1,0(t0)
+
+dbit_nounlock_11:
+#endif
+
+ rfir
+ nop
+
+dbit_trap_20:
+ get_pgd spc,ptp
+
+ space_check spc,t0,dbit_fault
+
+ L2_ptep ptp,pte,t0,va,dbit_fault
+
+#ifdef CONFIG_SMP
+ CMPIB=,n 0,spc,dbit_nolock_20
+ load32 PA(pa_dbit_lock),t0
+
+dbit_spin_20:
+ ldcw 0(t0),t1
+ cmpib,= 0,t1,dbit_spin_20
+ nop
+
+dbit_nolock_20:
+#endif
+ update_dirty ptp,pte,t1
+
+ make_insert_tlb spc,pte,prot
+
+ f_extend pte,t1
+
+ idtlbt pte,prot
+
+#ifdef CONFIG_SMP
+ CMPIB=,n 0,spc,dbit_nounlock_20
+ ldi 1,t1
+ stw t1,0(t0)
+
+dbit_nounlock_20:
+#endif
+
+ rfir
+ nop
+#endif
+
+ .import handle_interruption,code
+
+kernel_bad_space:
+ b intr_save
+ ldi 31,%r8 /* Use an unused code */
+
+dbit_fault:
+ b intr_save
+ ldi 20,%r8
+
+itlb_fault:
+ b intr_save
+ ldi 6,%r8
+
+nadtlb_fault:
+ b intr_save
+ ldi 17,%r8
+
+dtlb_fault:
+ b intr_save
+ ldi 15,%r8
+
+ /* Register saving semantics for system calls:
+
+ %r1 clobbered by system call macro in userspace
+ %r2 saved in PT_REGS by gateway page
+ %r3 - %r18 preserved by C code (saved by signal code)
+ %r19 - %r20 saved in PT_REGS by gateway page
+ %r21 - %r22 non-standard syscall args
+ stored in kernel stack by gateway page
+ %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
+ %r27 - %r30 saved in PT_REGS by gateway page
+ %r31 syscall return pointer
+ */
+
+ /* Floating point registers (FIXME: what do we do with these?)
+
+ %fr0 - %fr3 status/exception, not preserved
+ %fr4 - %fr7 arguments
+ %fr8 - %fr11 not preserved by C code
+ %fr12 - %fr21 preserved by C code
+ %fr22 - %fr31 not preserved by C code
+ */
+
+ .macro reg_save regs
+ STREG %r3, PT_GR3(\regs)
+ STREG %r4, PT_GR4(\regs)
+ STREG %r5, PT_GR5(\regs)
+ STREG %r6, PT_GR6(\regs)
+ STREG %r7, PT_GR7(\regs)
+ STREG %r8, PT_GR8(\regs)
+ STREG %r9, PT_GR9(\regs)
+ STREG %r10,PT_GR10(\regs)
+ STREG %r11,PT_GR11(\regs)
+ STREG %r12,PT_GR12(\regs)
+ STREG %r13,PT_GR13(\regs)
+ STREG %r14,PT_GR14(\regs)
+ STREG %r15,PT_GR15(\regs)
+ STREG %r16,PT_GR16(\regs)
+ STREG %r17,PT_GR17(\regs)
+ STREG %r18,PT_GR18(\regs)
+ .endm
+
+ .macro reg_restore regs
+ LDREG PT_GR3(\regs), %r3
+ LDREG PT_GR4(\regs), %r4
+ LDREG PT_GR5(\regs), %r5
+ LDREG PT_GR6(\regs), %r6
+ LDREG PT_GR7(\regs), %r7
+ LDREG PT_GR8(\regs), %r8
+ LDREG PT_GR9(\regs), %r9
+ LDREG PT_GR10(\regs),%r10
+ LDREG PT_GR11(\regs),%r11
+ LDREG PT_GR12(\regs),%r12
+ LDREG PT_GR13(\regs),%r13
+ LDREG PT_GR14(\regs),%r14
+ LDREG PT_GR15(\regs),%r15
+ LDREG PT_GR16(\regs),%r16
+ LDREG PT_GR17(\regs),%r17
+ LDREG PT_GR18(\regs),%r18
+ .endm
+
+ .export sys_fork_wrapper
+ .export child_return
+sys_fork_wrapper:
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
+ ldo TASK_REGS(%r1),%r1
+ reg_save %r1
+ mfctl %cr27, %r3
+ STREG %r3, PT_CR27(%r1)
+
+ STREG %r2,-RP_OFFSET(%r30)
+ ldo FRAME_SIZE(%r30),%r30
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ /* These are call-clobbered registers and therefore
+ also syscall-clobbered (we hope). */
+ STREG %r2,PT_GR19(%r1) /* save for child */
+ STREG %r30,PT_GR21(%r1)
+
+ LDREG PT_GR30(%r1),%r25
+ copy %r1,%r24
+ BL sys_clone,%r2
+ ldi SIGCHLD,%r26
+
+ LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
+wrapper_exit:
+ ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
+
+ LDREG PT_CR27(%r1), %r3
+ mtctl %r3, %cr27
+ reg_restore %r1
+
+ /* strace expects syscall # to be preserved in r20 */
+ ldi __NR_fork,%r20
+ bv %r0(%r2)
+ STREG %r20,PT_GR20(%r1)
+
+ /* Set the return value for the child */
+child_return:
+ BL schedule_tail, %r2
+ nop
+
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
+ LDREG TASK_PT_GR19(%r1),%r2
+ b wrapper_exit
+ copy %r0,%r28
+
+
+ .export sys_clone_wrapper
+sys_clone_wrapper:
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
+ reg_save %r1
+ mfctl %cr27, %r3
+ STREG %r3, PT_CR27(%r1)
+
+ STREG %r2,-RP_OFFSET(%r30)
+ ldo FRAME_SIZE(%r30),%r30
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ STREG %r2,PT_GR19(%r1) /* save for child */
+ STREG %r30,PT_GR21(%r1)
+ BL sys_clone,%r2
+ copy %r1,%r24
+
+ b wrapper_exit
+ LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
+
+ .export sys_vfork_wrapper
+sys_vfork_wrapper:
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
+ reg_save %r1
+ mfctl %cr27, %r3
+ STREG %r3, PT_CR27(%r1)
+
+ STREG %r2,-RP_OFFSET(%r30)
+ ldo FRAME_SIZE(%r30),%r30
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ STREG %r2,PT_GR19(%r1) /* save for child */
+ STREG %r30,PT_GR21(%r1)
+
+ BL sys_vfork,%r2
+ copy %r1,%r26
+
+ b wrapper_exit
+ LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
+
+
+ .macro execve_wrapper execve
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
+
+ /*
+ * Do we need to save/restore r3-r18 here?
+ * I don't think so. why would new thread need old
+ * threads registers?
+ */
+
+ /* %arg0 - %arg3 are already saved for us. */
+
+ STREG %r2,-RP_OFFSET(%r30)
+ ldo FRAME_SIZE(%r30),%r30
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ bl \execve,%r2
+ copy %r1,%arg0
+
+ ldo -FRAME_SIZE(%r30),%r30
+ LDREG -RP_OFFSET(%r30),%r2
+
+ /* If exec succeeded we need to load the args */
+
+ ldo -1024(%r0),%r1
+ cmpb,>>= %r28,%r1,error_\execve
+ copy %r2,%r19
+
+error_\execve:
+ bv %r0(%r19)
+ nop
+ .endm
+
+ .export sys_execve_wrapper
+ .import sys_execve
+
+sys_execve_wrapper:
+ execve_wrapper sys_execve
+
+#ifdef __LP64__
+ .export sys32_execve_wrapper
+ .import sys32_execve
+
+sys32_execve_wrapper:
+ execve_wrapper sys32_execve
+#endif
+
+ .export sys_rt_sigreturn_wrapper
+sys_rt_sigreturn_wrapper:
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
+ ldo TASK_REGS(%r26),%r26 /* get pt regs */
+ /* Don't save regs, we are going to restore them from sigcontext. */
+ STREG %r2, -RP_OFFSET(%r30)
+#ifdef __LP64__
+ ldo FRAME_SIZE(%r30), %r30
+ BL sys_rt_sigreturn,%r2
+ ldo -16(%r30),%r29 /* Reference param save area */
+#else
+ BL sys_rt_sigreturn,%r2
+ ldo FRAME_SIZE(%r30), %r30
+#endif
+
+ ldo -FRAME_SIZE(%r30), %r30
+ LDREG -RP_OFFSET(%r30), %r2
+
+ /* FIXME: I think we need to restore a few more things here. */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
+ reg_restore %r1
+
+ /* If the signal was received while the process was blocked on a
+ * syscall, then r2 will take us to syscall_exit; otherwise r2 will
+ * take us to syscall_exit_rfi and on to intr_return.
+ */
+ bv %r0(%r2)
+ LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
+
+ .export sys_sigaltstack_wrapper
+sys_sigaltstack_wrapper:
+ /* Get the user stack pointer */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r24 /* get pt regs */
+ LDREG TASK_PT_GR30(%r24),%r24
+ STREG %r2, -RP_OFFSET(%r30)
+#ifdef __LP64__
+ ldo FRAME_SIZE(%r30), %r30
+ b,l do_sigaltstack,%r2
+ ldo -16(%r30),%r29 /* Reference param save area */
+#else
+ bl do_sigaltstack,%r2
+ ldo FRAME_SIZE(%r30), %r30
+#endif
+
+ ldo -FRAME_SIZE(%r30), %r30
+ LDREG -RP_OFFSET(%r30), %r2
+ bv %r0(%r2)
+ nop
+
+#ifdef __LP64__
+ .export sys32_sigaltstack_wrapper
+sys32_sigaltstack_wrapper:
+ /* Get the user stack pointer */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
+ LDREG TASK_PT_GR30(%r24),%r24
+ STREG %r2, -RP_OFFSET(%r30)
+ ldo FRAME_SIZE(%r30), %r30
+ b,l do_sigaltstack32,%r2
+ ldo -16(%r30),%r29 /* Reference param save area */
+
+ ldo -FRAME_SIZE(%r30), %r30
+ LDREG -RP_OFFSET(%r30), %r2
+ bv %r0(%r2)
+ nop
+#endif
+
+ .export sys_rt_sigsuspend_wrapper
+sys_rt_sigsuspend_wrapper:
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
+ ldo TASK_REGS(%r1),%r24
+ reg_save %r24
+
+ STREG %r2, -RP_OFFSET(%r30)
+#ifdef __LP64__
+ ldo FRAME_SIZE(%r30), %r30
+ b,l sys_rt_sigsuspend,%r2
+ ldo -16(%r30),%r29 /* Reference param save area */
+#else
+ bl sys_rt_sigsuspend,%r2
+ ldo FRAME_SIZE(%r30), %r30
+#endif
+
+ ldo -FRAME_SIZE(%r30), %r30
+ LDREG -RP_OFFSET(%r30), %r2
+
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
+ ldo TASK_REGS(%r1),%r1
+ reg_restore %r1
+
+ bv %r0(%r2)
+ nop
+
+ .export syscall_exit
+syscall_exit:
+
+ /* NOTE: HP-UX syscalls also come through here
+ * after hpux_syscall_exit fixes up return
+ * values. */
+
+ /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
+ * via syscall_exit_rfi if the signal was received while the process
+ * was running.
+ */
+
+ /* save return value now */
+
+ mfctl %cr30, %r1
+ LDREG TI_TASK(%r1),%r1
+ STREG %r28,TASK_PT_GR28(%r1)
+
+#ifdef CONFIG_HPUX
+
+/* <linux/personality.h> cannot be easily included */
+#define PER_HPUX 0x10
+ LDREG TASK_PERSONALITY(%r1),%r19
+
+ /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
+ ldo -PER_HPUX(%r19), %r19
+ CMPIB<>,n 0,%r19,1f
+
+ /* Save other hpux returns if personality is PER_HPUX */
+ STREG %r22,TASK_PT_GR22(%r1)
+ STREG %r29,TASK_PT_GR29(%r1)
+1:
+
+#endif /* CONFIG_HPUX */
+
+ /* Seems to me that dp could be wrong here, if the syscall involved
+ * calling a module, and nothing got round to restoring dp on return.
+ */
+ loadgp
+
+syscall_check_bh:
+
+ /* Check for software interrupts */
+
+ .import irq_stat,data
+
+ load32 irq_stat,%r19
+
+#ifdef CONFIG_SMP
+ /* sched.h: int processor */
+ /* %r26 is used as scratch register to index into irq_stat[] */
+ ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
+
+ /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
+#ifdef __LP64__
+ shld %r26, 6, %r20
+#else
+ shlw %r26, 5, %r20
+#endif
+ add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
+#endif /* CONFIG_SMP */
+
+ LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
+ cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */
+
+syscall_check_resched:
+
+ /* check for reschedule */
+
+ LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
+ bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
+
+syscall_check_sig:
+ LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */
+ bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
+
+syscall_restore:
+ /* Are we being ptraced? */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+
+ LDREG TASK_PTRACE(%r1), %r19
+ bb,< %r19,31,syscall_restore_rfi
+ nop
+
+ ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
+ rest_fp %r19
+
+ LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
+ mtsar %r19
+
+ LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
+ LDREG TASK_PT_GR19(%r1),%r19
+ LDREG TASK_PT_GR20(%r1),%r20
+ LDREG TASK_PT_GR21(%r1),%r21
+ LDREG TASK_PT_GR22(%r1),%r22
+ LDREG TASK_PT_GR23(%r1),%r23
+ LDREG TASK_PT_GR24(%r1),%r24
+ LDREG TASK_PT_GR25(%r1),%r25
+ LDREG TASK_PT_GR26(%r1),%r26
+ LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
+ LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
+ LDREG TASK_PT_GR29(%r1),%r29
+ LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
+
+ /* NOTE: We use rsm/ssm pair to make this operation atomic */
+ rsm PSW_SM_I, %r0
+ LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
+ mfsp %sr3,%r1 /* Get users space id */
+ mtsp %r1,%sr7 /* Restore sr7 */
+ ssm PSW_SM_I, %r0
+
+ /* Set sr2 to zero for userspace syscalls to work. */
+ mtsp %r0,%sr2
+ mtsp %r1,%sr4 /* Restore sr4 */
+ mtsp %r1,%sr5 /* Restore sr5 */
+ mtsp %r1,%sr6 /* Restore sr6 */
+
+ depi 3,31,2,%r31 /* ensure return to user mode. */
+
+#ifdef __LP64__
+ /* decide whether to reset the wide mode bit
+ *
+ * For a syscall, the W bit is stored in the lowest bit
+ * of sp. Extract it and reset W if it is zero */
+ extrd,u,*<> %r30,63,1,%r1
+ rsm PSW_SM_W, %r0
+ /* now reset the lowest bit of sp if it was set */
+ xor %r30,%r1,%r30
+#endif
+ be,n 0(%sr3,%r31) /* return to user space */
+
+ /* We have to return via an RFI, so that PSW T and R bits can be set
+ * appropriately.
+ * This sets up pt_regs so we can return via intr_restore, which is not
+ * the most efficient way of doing things, but it works.
+ */
+syscall_restore_rfi:
+ ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
+ mtctl %r2,%cr0 /* for immediate trap */
+ LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
+ ldi 0x0b,%r20 /* Create new PSW */
+ depi -1,13,1,%r20 /* C, Q, D, and I bits */
+
+ /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
+ * set in include/linux/ptrace.h and converted to PA bitmap
+ * numbers in asm-offsets.c */
+
+ /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
+ extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
+ depi -1,27,1,%r20 /* R bit */
+
+ /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
+ extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
+ depi -1,7,1,%r20 /* T bit */
+
+ STREG %r20,TASK_PT_PSW(%r1)
+
+ /* Always store space registers, since sr3 can be changed (e.g. fork) */
+
+ mfsp %sr3,%r25
+ STREG %r25,TASK_PT_SR3(%r1)
+ STREG %r25,TASK_PT_SR4(%r1)
+ STREG %r25,TASK_PT_SR5(%r1)
+ STREG %r25,TASK_PT_SR6(%r1)
+ STREG %r25,TASK_PT_SR7(%r1)
+ STREG %r25,TASK_PT_IASQ0(%r1)
+ STREG %r25,TASK_PT_IASQ1(%r1)
+
+ /* XXX W bit??? */
+ /* Now if old D bit is clear, it means we didn't save all registers
+ * on syscall entry, so do that now. This only happens on TRACEME
+ * calls, or if someone attached to us while we were on a syscall.
+ * We could make this more efficient by not saving r3-r18, but
+ * then we wouldn't be able to use the common intr_restore path.
+ * It is only for traced processes anyway, so performance is not
+ * an issue.
+ */
+ bb,< %r2,30,pt_regs_ok /* Branch if D set */
+ ldo TASK_REGS(%r1),%r25
+ reg_save %r25 /* Save r3 to r18 */
+
+ /* Save the current sr */
+ mfsp %sr0,%r2
+ STREG %r2,TASK_PT_SR0(%r1)
+
+ /* Save the scratch sr */
+ mfsp %sr1,%r2
+ STREG %r2,TASK_PT_SR1(%r1)
+
+ /* sr2 should be set to zero for userspace syscalls */
+ STREG %r0,TASK_PT_SR2(%r1)
+
+pt_regs_ok:
+ LDREG TASK_PT_GR31(%r1),%r2
+ depi 3,31,2,%r2 /* ensure return to user mode. */
+ STREG %r2,TASK_PT_IAOQ0(%r1)
+ ldo 4(%r2),%r2
+ STREG %r2,TASK_PT_IAOQ1(%r1)
+ copy %r25,%r16
+ b intr_restore
+ nop
+
+ .import do_softirq,code
+syscall_do_softirq:
+ bl do_softirq,%r2
+ nop
+ /* NOTE: We enable I-bit incase we schedule later,
+ * and we might be going back to userspace if we were
+ * traced. */
+ b syscall_check_resched
+ ssm PSW_SM_I, %r0 /* do_softirq returns with I bit off */
+
+ .import schedule,code
+syscall_do_resched:
+ BL schedule,%r2
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#else
+ nop
+#endif
+ b syscall_check_bh /* if resched, we start over again */
+ nop
+
+ .import do_signal,code
+syscall_do_signal:
+ /* Save callee-save registers (for sigcontext).
+ FIXME: After this point the process structure should be
+ consistent with all the relevant state of the process
+ before the syscall. We need to verify this. */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */
+ reg_save %r25
+
+ ldi 1, %r24 /* unsigned long in_syscall */
+
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ BL do_signal,%r2
+ copy %r0, %r26 /* sigset_t *oldset = NULL */
+
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
+ reg_restore %r20
+
+ b,n syscall_check_sig
+
+ /*
+ * get_register is used by the non access tlb miss handlers to
+ * copy the value of the general register specified in r8 into
+ * r1. This routine can't be used for shadowed registers, since
+ * the rfir will restore the original value. So, for the shadowed
+ * registers we put a -1 into r1 to indicate that the register
+ * should not be used (the register being copied could also have
+ * a -1 in it, but that is OK, it just means that we will have
+ * to use the slow path instead).
+ */
+
+get_register:
+ blr %r8,%r0
+ nop
+ bv %r0(%r25) /* r0 */
+ copy %r0,%r1
+ bv %r0(%r25) /* r1 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r2 */
+ copy %r2,%r1
+ bv %r0(%r25) /* r3 */
+ copy %r3,%r1
+ bv %r0(%r25) /* r4 */
+ copy %r4,%r1
+ bv %r0(%r25) /* r5 */
+ copy %r5,%r1
+ bv %r0(%r25) /* r6 */
+ copy %r6,%r1
+ bv %r0(%r25) /* r7 */
+ copy %r7,%r1
+ bv %r0(%r25) /* r8 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r9 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r10 */
+ copy %r10,%r1
+ bv %r0(%r25) /* r11 */
+ copy %r11,%r1
+ bv %r0(%r25) /* r12 */
+ copy %r12,%r1
+ bv %r0(%r25) /* r13 */
+ copy %r13,%r1
+ bv %r0(%r25) /* r14 */
+ copy %r14,%r1
+ bv %r0(%r25) /* r15 */
+ copy %r15,%r1
+ bv %r0(%r25) /* r16 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r17 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r18 */
+ copy %r18,%r1
+ bv %r0(%r25) /* r19 */
+ copy %r19,%r1
+ bv %r0(%r25) /* r20 */
+ copy %r20,%r1
+ bv %r0(%r25) /* r21 */
+ copy %r21,%r1
+ bv %r0(%r25) /* r22 */
+ copy %r22,%r1
+ bv %r0(%r25) /* r23 */
+ copy %r23,%r1
+ bv %r0(%r25) /* r24 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r25 - shadowed */
+ ldi -1,%r1
+ bv %r0(%r25) /* r26 */
+ copy %r26,%r1
+ bv %r0(%r25) /* r27 */
+ copy %r27,%r1
+ bv %r0(%r25) /* r28 */
+ copy %r28,%r1
+ bv %r0(%r25) /* r29 */
+ copy %r29,%r1
+ bv %r0(%r25) /* r30 */
+ copy %r30,%r1
+ bv %r0(%r25) /* r31 */
+ copy %r31,%r1
+
+ /*
+ * set_register is used by the non access tlb miss handlers to
+ * copy the value of r1 into the general register specified in
+ * r8.
+ */
+
+set_register:
+ blr %r8,%r0
+ nop
+ bv %r0(%r25) /* r0 (silly, but it is a place holder) */
+ copy %r1,%r0
+ bv %r0(%r25) /* r1 */
+ copy %r1,%r1
+ bv %r0(%r25) /* r2 */
+ copy %r1,%r2
+ bv %r0(%r25) /* r3 */
+ copy %r1,%r3
+ bv %r0(%r25) /* r4 */
+ copy %r1,%r4
+ bv %r0(%r25) /* r5 */
+ copy %r1,%r5
+ bv %r0(%r25) /* r6 */
+ copy %r1,%r6
+ bv %r0(%r25) /* r7 */
+ copy %r1,%r7
+ bv %r0(%r25) /* r8 */
+ copy %r1,%r8
+ bv %r0(%r25) /* r9 */
+ copy %r1,%r9
+ bv %r0(%r25) /* r10 */
+ copy %r1,%r10
+ bv %r0(%r25) /* r11 */
+ copy %r1,%r11
+ bv %r0(%r25) /* r12 */
+ copy %r1,%r12
+ bv %r0(%r25) /* r13 */
+ copy %r1,%r13
+ bv %r0(%r25) /* r14 */
+ copy %r1,%r14
+ bv %r0(%r25) /* r15 */
+ copy %r1,%r15
+ bv %r0(%r25) /* r16 */
+ copy %r1,%r16
+ bv %r0(%r25) /* r17 */
+ copy %r1,%r17
+ bv %r0(%r25) /* r18 */
+ copy %r1,%r18
+ bv %r0(%r25) /* r19 */
+ copy %r1,%r19
+ bv %r0(%r25) /* r20 */
+ copy %r1,%r20
+ bv %r0(%r25) /* r21 */
+ copy %r1,%r21
+ bv %r0(%r25) /* r22 */
+ copy %r1,%r22
+ bv %r0(%r25) /* r23 */
+ copy %r1,%r23
+ bv %r0(%r25) /* r24 */
+ copy %r1,%r24
+ bv %r0(%r25) /* r25 */
+ copy %r1,%r25
+ bv %r0(%r25) /* r26 */
+ copy %r1,%r26
+ bv %r0(%r25) /* r27 */
+ copy %r1,%r27
+ bv %r0(%r25) /* r28 */
+ copy %r1,%r28
+ bv %r0(%r25) /* r29 */
+ copy %r1,%r29
+ bv %r0(%r25) /* r30 */
+ copy %r1,%r30
+ bv %r0(%r25) /* r31 */
+ copy %r1,%r31
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
new file mode 100644
index 000000000000..f244fb200db1
--- /dev/null
+++ b/arch/parisc/kernel/firmware.c
@@ -0,0 +1,1405 @@
+/*
+ * arch/parisc/kernel/firmware.c - safe PDC access routines
+ *
+ * PDC == Processor Dependent Code
+ *
+ * See http://www.parisc-linux.org/documentation/index.html
+ * for documentation describing the entry points and calling
+ * conventions defined below.
+ *
+ * Copyright 1999 SuSE GmbH Nuernberg (Philipp Rumpf, prumpf@tux.org)
+ * Copyright 1999 The Puffin Group, (Alex deVries, David Kennedy)
+ * Copyright 2003 Grant Grundler <grundler parisc-linux org>
+ * Copyright 2003,2004 Ryan Bradetich <rbrad@parisc-linux.org>
+ * Copyright 2004 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+/* I think it would be in everyone's best interest to follow this
+ * guidelines when writing PDC wrappers:
+ *
+ * - the name of the pdc wrapper should match one of the macros
+ * used for the first two arguments
+ * - don't use caps for random parts of the name
+ * - use the static PDC result buffers and "copyout" to structs
+ * supplied by the caller to encapsulate alignment restrictions
+ * - hold pdc_lock while in PDC or using static result buffers
+ * - use __pa() to convert virtual (kernel) pointers to physical
+ * ones.
+ * - the name of the struct used for pdc return values should equal
+ * one of the macros used for the first two arguments to the
+ * corresponding PDC call
+ * - keep the order of arguments
+ * - don't be smart (setting trailing NUL bytes for strings, return
+ * something useful even if the call failed) unless you are sure
+ * it's not going to affect functionality or performance
+ *
+ * Example:
+ * int pdc_cache_info(struct pdc_cache_info *cache_info )
+ * {
+ * int retval;
+ *
+ * spin_lock_irq(&pdc_lock);
+ * retval = mem_pdc_call(PDC_CACHE,PDC_CACHE_INFO,__pa(cache_info),0);
+ * convert_to_wide(pdc_result);
+ * memcpy(cache_info, pdc_result, sizeof(*cache_info));
+ * spin_unlock_irq(&pdc_lock);
+ *
+ * return retval;
+ * }
+ * prumpf 991016
+ */
+
+#include <stdarg.h>
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+
+#include <asm/page.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/system.h>
+#include <asm/processor.h> /* for boot_cpu_data */
+
+static DEFINE_SPINLOCK(pdc_lock);
+static unsigned long pdc_result[32] __attribute__ ((aligned (8)));
+static unsigned long pdc_result2[32] __attribute__ ((aligned (8)));
+
+#ifdef __LP64__
+#define WIDE_FIRMWARE 0x1
+#define NARROW_FIRMWARE 0x2
+
+/* Firmware needs to be initially set to narrow to determine the
+ * actual firmware width. */
+int parisc_narrow_firmware = 1;
+#endif
+
+/* on all currently-supported platforms, IODC I/O calls are always
+ * 32-bit calls, and MEM_PDC calls are always the same width as the OS.
+ * This means Cxxx boxes can't run wide kernels right now. -PB
+ *
+ * CONFIG_PDC_NARROW has been added to allow 64-bit kernels to run on
+ * systems with 32-bit MEM_PDC calls. This will allow wide kernels to
+ * run on Cxxx boxes now. -RB
+ *
+ * Note that some PAT boxes may have 64-bit IODC I/O...
+ */
+
+#ifdef __LP64__
+long real64_call(unsigned long function, ...);
+#endif
+long real32_call(unsigned long function, ...);
+
+#ifdef __LP64__
+# define MEM_PDC (unsigned long)(PAGE0->mem_pdc_hi) << 32 | PAGE0->mem_pdc
+# define mem_pdc_call(args...) unlikely(parisc_narrow_firmware) ? real32_call(MEM_PDC, args) : real64_call(MEM_PDC, args)
+#else
+# define MEM_PDC (unsigned long)PAGE0->mem_pdc
+# define mem_pdc_call(args...) real32_call(MEM_PDC, args)
+#endif
+
+
+/**
+ * f_extend - Convert PDC addresses to kernel addresses.
+ * @address: Address returned from PDC.
+ *
+ * This function is used to convert PDC addresses into kernel addresses
+ * when the PDC address size and kernel address size are different.
+ */
+static unsigned long f_extend(unsigned long address)
+{
+#ifdef __LP64__
+ if(unlikely(parisc_narrow_firmware)) {
+ if((address & 0xff000000) == 0xf0000000)
+ return 0xf0f0f0f000000000UL | (u32)address;
+
+ if((address & 0xf0000000) == 0xf0000000)
+ return 0xffffffff00000000UL | (u32)address;
+ }
+#endif
+ return address;
+}
+
+/**
+ * convert_to_wide - Convert the return buffer addresses into kernel addresses.
+ * @address: The return buffer from PDC.
+ *
+ * This function is used to convert the return buffer addresses retrieved from PDC
+ * into kernel addresses when the PDC address size and kernel address size are
+ * different.
+ */
+static void convert_to_wide(unsigned long *addr)
+{
+#ifdef __LP64__
+ int i;
+ unsigned int *p = (unsigned int *)addr;
+
+ if(unlikely(parisc_narrow_firmware)) {
+ for(i = 31; i >= 0; --i)
+ addr[i] = p[i];
+ }
+#endif
+}
+
+/**
+ * set_firmware_width - Determine if the firmware is wide or narrow.
+ *
+ * This function must be called before any pdc_* function that uses the convert_to_wide
+ * function.
+ */
+void __init set_firmware_width(void)
+{
+#ifdef __LP64__
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ if(pdc_result[0] != NARROW_FIRMWARE)
+ parisc_narrow_firmware = 0;
+ spin_unlock_irq(&pdc_lock);
+#endif
+}
+
+/**
+ * pdc_emergency_unlock - Unlock the linux pdc lock
+ *
+ * This call unlocks the linux pdc lock in case we need some PDC functions
+ * (like pdc_add_valid) during kernel stack dump.
+ */
+void pdc_emergency_unlock(void)
+{
+ /* Spinlock DEBUG code freaks out if we unconditionally unlock */
+ if (spin_is_locked(&pdc_lock))
+ spin_unlock(&pdc_lock);
+}
+
+
+/**
+ * pdc_add_valid - Verify address can be accessed without causing a HPMC.
+ * @address: Address to be verified.
+ *
+ * This PDC call attempts to read from the specified address and verifies
+ * if the address is valid.
+ *
+ * The return value is PDC_OK (0) in case accessing this address is valid.
+ */
+int pdc_add_valid(unsigned long address)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_ADD_VALID, PDC_ADD_VALID_VERIFY, address);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_add_valid);
+
+/**
+ * pdc_chassis_info - Return chassis information.
+ * @result: The return buffer.
+ * @chassis_info: The memory buffer address.
+ * @len: The size of the memory buffer address.
+ *
+ * An HVERSION dependent call for returning the chassis information.
+ */
+int __init pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ memcpy(&pdc_result, chassis_info, sizeof(*chassis_info));
+ memcpy(&pdc_result2, led_info, len);
+ retval = mem_pdc_call(PDC_CHASSIS, PDC_RETURN_CHASSIS_INFO,
+ __pa(pdc_result), __pa(pdc_result2), len);
+ memcpy(chassis_info, pdc_result, sizeof(*chassis_info));
+ memcpy(led_info, pdc_result2, len);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_chassis_send_log - Sends a PDC PAT CHASSIS log message.
+ * @retval: -1 on error, 0 on success. Other value are PDC errors
+ *
+ * Must be correctly formatted or expect system crash
+ */
+#ifdef __LP64__
+int pdc_pat_chassis_send_log(unsigned long state, unsigned long data)
+{
+ int retval = 0;
+
+ if (!is_pdc_pat())
+ return -1;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_PAT_CHASSIS_LOG, PDC_PAT_CHASSIS_WRITE_LOG, __pa(&state), __pa(&data));
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+#endif
+
+/**
+ * pdc_chassis_disp - Updates display
+ * @retval: -1 on error, 0 on success
+ *
+ * Works on old PDC only (E class, others?)
+ */
+int pdc_chassis_disp(unsigned long disp)
+{
+ int retval = 0;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_CHASSIS, PDC_CHASSIS_DISP, disp);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_coproc_cfg - To identify coprocessors attached to the processor.
+ * @pdc_coproc_info: Return buffer address.
+ *
+ * This PDC call returns the presence and status of all the coprocessors
+ * attached to the processor.
+ */
+int __init pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_COPROC, PDC_COPROC_CFG, __pa(pdc_result));
+ convert_to_wide(pdc_result);
+ pdc_coproc_info->ccr_functional = pdc_result[0];
+ pdc_coproc_info->ccr_present = pdc_result[1];
+ pdc_coproc_info->revision = pdc_result[17];
+ pdc_coproc_info->model = pdc_result[18];
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_iodc_read - Read data from the modules IODC.
+ * @actcnt: The actual number of bytes.
+ * @hpa: The HPA of the module for the iodc read.
+ * @index: The iodc entry point.
+ * @iodc_data: A buffer memory for the iodc options.
+ * @iodc_data_size: Size of the memory buffer.
+ *
+ * This PDC call reads from the IODC of the module specified by the hpa
+ * argument.
+ */
+int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index,
+ void *iodc_data, unsigned int iodc_data_size)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_IODC, PDC_IODC_READ, __pa(pdc_result), hpa,
+ index, __pa(pdc_result2), iodc_data_size);
+ convert_to_wide(pdc_result);
+ *actcnt = pdc_result[0];
+ memcpy(iodc_data, pdc_result2, iodc_data_size);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_iodc_read);
+
+/**
+ * pdc_system_map_find_mods - Locate unarchitected modules.
+ * @pdc_mod_info: Return buffer address.
+ * @mod_path: pointer to dev path structure.
+ * @mod_index: fixed address module index.
+ *
+ * To locate and identify modules which reside at fixed I/O addresses, which
+ * do not self-identify via architected bus walks.
+ */
+int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info,
+ struct pdc_module_path *mod_path, long mod_index)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_MODULE, __pa(pdc_result),
+ __pa(pdc_result2), mod_index);
+ convert_to_wide(pdc_result);
+ memcpy(pdc_mod_info, pdc_result, sizeof(*pdc_mod_info));
+ memcpy(mod_path, pdc_result2, sizeof(*mod_path));
+ spin_unlock_irq(&pdc_lock);
+
+ pdc_mod_info->mod_addr = f_extend(pdc_mod_info->mod_addr);
+ return retval;
+}
+
+/**
+ * pdc_system_map_find_addrs - Retrieve additional address ranges.
+ * @pdc_addr_info: Return buffer address.
+ * @mod_index: Fixed address module index.
+ * @addr_index: Address range index.
+ *
+ * Retrieve additional information about subsequent address ranges for modules
+ * with multiple address ranges.
+ */
+int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info,
+ long mod_index, long addr_index)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_SYSTEM_MAP, PDC_FIND_ADDRESS, __pa(pdc_result),
+ mod_index, addr_index);
+ convert_to_wide(pdc_result);
+ memcpy(pdc_addr_info, pdc_result, sizeof(*pdc_addr_info));
+ spin_unlock_irq(&pdc_lock);
+
+ pdc_addr_info->mod_addr = f_extend(pdc_addr_info->mod_addr);
+ return retval;
+}
+
+/**
+ * pdc_model_info - Return model information about the processor.
+ * @model: The return buffer.
+ *
+ * Returns the version numbers, identifiers, and capabilities from the processor module.
+ */
+int pdc_model_info(struct pdc_model *model)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_INFO, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ memcpy(model, pdc_result, sizeof(*model));
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_model_sysmodel - Get the system model name.
+ * @name: A char array of at least 81 characters.
+ *
+ * Get system model name from PDC ROM (e.g. 9000/715 or 9000/778/B160L)
+ */
+int pdc_model_sysmodel(char *name)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_SYSMODEL, __pa(pdc_result),
+ OS_ID_HPUX, __pa(name));
+ convert_to_wide(pdc_result);
+
+ if (retval == PDC_OK) {
+ name[pdc_result[0]] = '\0'; /* add trailing '\0' */
+ } else {
+ name[0] = 0;
+ }
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_model_versions - Identify the version number of each processor.
+ * @cpu_id: The return buffer.
+ * @id: The id of the processor to check.
+ *
+ * Returns the version number for each processor component.
+ *
+ * This comment was here before, but I do not know what it means :( -RB
+ * id: 0 = cpu revision, 1 = boot-rom-version
+ */
+int pdc_model_versions(unsigned long *versions, int id)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_VERSIONS, __pa(pdc_result), id);
+ convert_to_wide(pdc_result);
+ *versions = pdc_result[0];
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_model_cpuid - Returns the CPU_ID.
+ * @cpu_id: The return buffer.
+ *
+ * Returns the CPU_ID value which uniquely identifies the cpu portion of
+ * the processor module.
+ */
+int pdc_model_cpuid(unsigned long *cpu_id)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ pdc_result[0] = 0; /* preset zero (call may not be implemented!) */
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CPU_ID, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ *cpu_id = pdc_result[0];
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_model_capabilities - Returns the platform capabilities.
+ * @capabilities: The return buffer.
+ *
+ * Returns information about platform support for 32- and/or 64-bit
+ * OSes, IO-PDIR coherency, and virtual aliasing.
+ */
+int pdc_model_capabilities(unsigned long *capabilities)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ pdc_result[0] = 0; /* preset zero (call may not be implemented!) */
+ retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_CAPABILITIES, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ *capabilities = pdc_result[0];
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_cache_info - Return cache and TLB information.
+ * @cache_info: The return buffer.
+ *
+ * Returns information about the processor's cache and TLB.
+ */
+int pdc_cache_info(struct pdc_cache_info *cache_info)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_CACHE, PDC_CACHE_INFO, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ memcpy(cache_info, pdc_result, sizeof(*cache_info));
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+#ifndef CONFIG_PA20
+/**
+ * pdc_btlb_info - Return block TLB information.
+ * @btlb: The return buffer.
+ *
+ * Returns information about the hardware Block TLB.
+ */
+int pdc_btlb_info(struct pdc_btlb_info *btlb)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
+ memcpy(btlb, pdc_result, sizeof(*btlb));
+ spin_unlock_irq(&pdc_lock);
+
+ if(retval < 0) {
+ btlb->max_size = 0;
+ }
+ return retval;
+}
+
+/**
+ * pdc_mem_map_hpa - Find fixed module information.
+ * @address: The return buffer
+ * @mod_path: pointer to dev path structure.
+ *
+ * This call was developed for S700 workstations to allow the kernel to find
+ * the I/O devices (Core I/O). In the future (Kittyhawk and beyond) this
+ * call will be replaced (on workstations) by the architected PDC_SYSTEM_MAP
+ * call.
+ *
+ * This call is supported by all existing S700 workstations (up to Gecko).
+ */
+int pdc_mem_map_hpa(struct pdc_memory_map *address,
+ struct pdc_module_path *mod_path)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ memcpy(pdc_result2, mod_path, sizeof(*mod_path));
+ retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
+ __pa(pdc_result2));
+ memcpy(address, pdc_result, sizeof(*address));
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+#endif /* !CONFIG_PA20 */
+
+/**
+ * pdc_lan_station_id - Get the LAN address.
+ * @lan_addr: The return buffer.
+ * @hpa: The network device HPA.
+ *
+ * Get the LAN station address when it is not directly available from the LAN hardware.
+ */
+int pdc_lan_station_id(char *lan_addr, unsigned long hpa)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_LAN_STATION_ID, PDC_LAN_STATION_ID_READ,
+ __pa(pdc_result), hpa);
+ if (retval < 0) {
+ /* FIXME: else read MAC from NVRAM */
+ memset(lan_addr, 0, PDC_LAN_STATION_ID_SIZE);
+ } else {
+ memcpy(lan_addr, pdc_result, PDC_LAN_STATION_ID_SIZE);
+ }
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_lan_station_id);
+
+/**
+ * pdc_stable_read - Read data from Stable Storage.
+ * @staddr: Stable Storage address to access.
+ * @memaddr: The memory address where Stable Storage data shall be copied.
+ * @count: number of bytes to transfert. count is multiple of 4.
+ *
+ * This PDC call reads from the Stable Storage address supplied in staddr
+ * and copies count bytes to the memory address memaddr.
+ * The call will fail if staddr+count > PDC_STABLE size.
+ */
+int pdc_stable_read(unsigned long staddr, void *memaddr, unsigned long count)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_READ, staddr,
+ __pa(pdc_result), count);
+ convert_to_wide(pdc_result);
+ memcpy(memaddr, pdc_result, count);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_stable_read);
+
+/**
+ * pdc_stable_write - Write data to Stable Storage.
+ * @staddr: Stable Storage address to access.
+ * @memaddr: The memory address where Stable Storage data shall be read from.
+ * @count: number of bytes to transfert. count is multiple of 4.
+ *
+ * This PDC call reads count bytes from the supplied memaddr address,
+ * and copies count bytes to the Stable Storage address staddr.
+ * The call will fail if staddr+count > PDC_STABLE size.
+ */
+int pdc_stable_write(unsigned long staddr, void *memaddr, unsigned long count)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ memcpy(pdc_result, memaddr, count);
+ convert_to_wide(pdc_result);
+ retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_WRITE, staddr,
+ __pa(pdc_result), count);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_stable_write);
+
+/**
+ * pdc_stable_get_size - Get Stable Storage size in bytes.
+ * @size: pointer where the size will be stored.
+ *
+ * This PDC call returns the number of bytes in the processor's Stable
+ * Storage, which is the number of contiguous bytes implemented in Stable
+ * Storage starting from staddr=0. size in an unsigned 64-bit integer
+ * which is a multiple of four.
+ */
+int pdc_stable_get_size(unsigned long *size)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_RETURN_SIZE, __pa(pdc_result));
+ *size = pdc_result[0];
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_stable_get_size);
+
+/**
+ * pdc_stable_verify_contents - Checks that Stable Storage contents are valid.
+ *
+ * This PDC call is meant to be used to check the integrity of the current
+ * contents of Stable Storage.
+ */
+int pdc_stable_verify_contents(void)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_VERIFY_CONTENTS);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_stable_verify_contents);
+
+/**
+ * pdc_stable_initialize - Sets Stable Storage contents to zero and initialize
+ * the validity indicator.
+ *
+ * This PDC call will erase all contents of Stable Storage. Use with care!
+ */
+int pdc_stable_initialize(void)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_STABLE, PDC_STABLE_INITIALIZE);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_stable_initialize);
+
+/**
+ * pdc_get_initiator - Get the SCSI Interface Card params (SCSI ID, SDTR, SE or LVD)
+ * @hwpath: fully bc.mod style path to the device.
+ * @initiator: the array to return the result into
+ *
+ * Get the SCSI operational parameters from PDC.
+ * Needed since HPUX never used BIOS or symbios card NVRAM.
+ * Most ncr/sym cards won't have an entry and just use whatever
+ * capabilities of the card are (eg Ultra, LVD). But there are
+ * several cases where it's useful:
+ * o set SCSI id for Multi-initiator clusters,
+ * o cable too long (ie SE scsi 10Mhz won't support 6m length),
+ * o bus width exported is less than what the interface chip supports.
+ */
+int pdc_get_initiator(struct hardware_path *hwpath, struct pdc_initiator *initiator)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+
+/* BCJ-XXXX series boxes. E.G. "9000/785/C3000" */
+#define IS_SPROCKETS() (strlen(boot_cpu_data.pdc.sys_model_name) == 14 && \
+ strncmp(boot_cpu_data.pdc.sys_model_name, "9000/785", 8) == 0)
+
+ retval = mem_pdc_call(PDC_INITIATOR, PDC_GET_INITIATOR,
+ __pa(pdc_result), __pa(hwpath));
+ if (retval < PDC_OK)
+ goto out;
+
+ if (pdc_result[0] < 16) {
+ initiator->host_id = pdc_result[0];
+ } else {
+ initiator->host_id = -1;
+ }
+
+ /*
+ * Sprockets and Piranha return 20 or 40 (MT/s). Prelude returns
+ * 1, 2, 5 or 10 for 5, 10, 20 or 40 MT/s, respectively
+ */
+ switch (pdc_result[1]) {
+ case 1: initiator->factor = 50; break;
+ case 2: initiator->factor = 25; break;
+ case 5: initiator->factor = 12; break;
+ case 25: initiator->factor = 10; break;
+ case 20: initiator->factor = 12; break;
+ case 40: initiator->factor = 10; break;
+ default: initiator->factor = -1; break;
+ }
+
+ if (IS_SPROCKETS()) {
+ initiator->width = pdc_result[4];
+ initiator->mode = pdc_result[5];
+ } else {
+ initiator->width = -1;
+ initiator->mode = -1;
+ }
+
+ out:
+ spin_unlock_irq(&pdc_lock);
+ return (retval >= PDC_OK);
+}
+EXPORT_SYMBOL(pdc_get_initiator);
+
+
+/**
+ * pdc_pci_irt_size - Get the number of entries in the interrupt routing table.
+ * @num_entries: The return value.
+ * @hpa: The HPA for the device.
+ *
+ * This PDC function returns the number of entries in the specified cell's
+ * interrupt table.
+ * Similar to PDC_PAT stuff - but added for Forte/Allegro boxes
+ */
+int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_GET_INT_TBL_SIZE,
+ __pa(pdc_result), hpa);
+ convert_to_wide(pdc_result);
+ *num_entries = pdc_result[0];
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_pci_irt - Get the PCI interrupt routing table.
+ * @num_entries: The number of entries in the table.
+ * @hpa: The Hard Physical Address of the device.
+ * @tbl:
+ *
+ * Get the PCI interrupt routing table for the device at the given HPA.
+ * Similar to PDC_PAT stuff - but added for Forte/Allegro boxes
+ */
+int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl)
+{
+ int retval;
+
+ BUG_ON((unsigned long)tbl & 0x7);
+
+ spin_lock_irq(&pdc_lock);
+ pdc_result[0] = num_entries;
+ retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_GET_INT_TBL,
+ __pa(pdc_result), hpa, __pa(tbl));
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+
+#if 0 /* UNTEST CODE - left here in case someone needs it */
+
+/**
+ * pdc_pci_config_read - read PCI config space.
+ * @hpa token from PDC to indicate which PCI device
+ * @pci_addr configuration space address to read from
+ *
+ * Read PCI Configuration space *before* linux PCI subsystem is running.
+ */
+unsigned int pdc_pci_config_read(void *hpa, unsigned long cfg_addr)
+{
+ int retval;
+ spin_lock_irq(&pdc_lock);
+ pdc_result[0] = 0;
+ pdc_result[1] = 0;
+ retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_READ_CONFIG,
+ __pa(pdc_result), hpa, cfg_addr&~3UL, 4UL);
+ spin_unlock_irq(&pdc_lock);
+ return retval ? ~0 : (unsigned int) pdc_result[0];
+}
+
+
+/**
+ * pdc_pci_config_write - read PCI config space.
+ * @hpa token from PDC to indicate which PCI device
+ * @pci_addr configuration space address to write
+ * @val value we want in the 32-bit register
+ *
+ * Write PCI Configuration space *before* linux PCI subsystem is running.
+ */
+void pdc_pci_config_write(void *hpa, unsigned long cfg_addr, unsigned int val)
+{
+ int retval;
+ spin_lock_irq(&pdc_lock);
+ pdc_result[0] = 0;
+ retval = mem_pdc_call(PDC_PCI_INDEX, PDC_PCI_WRITE_CONFIG,
+ __pa(pdc_result), hpa,
+ cfg_addr&~3UL, 4UL, (unsigned long) val);
+ spin_unlock_irq(&pdc_lock);
+ return retval;
+}
+#endif /* UNTESTED CODE */
+
+/**
+ * pdc_tod_read - Read the Time-Of-Day clock.
+ * @tod: The return buffer:
+ *
+ * Read the Time-Of-Day clock
+ */
+int pdc_tod_read(struct pdc_tod *tod)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_TOD, PDC_TOD_READ, __pa(pdc_result), 0);
+ convert_to_wide(pdc_result);
+ memcpy(tod, pdc_result, sizeof(*tod));
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_tod_read);
+
+/**
+ * pdc_tod_set - Set the Time-Of-Day clock.
+ * @sec: The number of seconds since epoch.
+ * @usec: The number of micro seconds.
+ *
+ * Set the Time-Of-Day clock.
+ */
+int pdc_tod_set(unsigned long sec, unsigned long usec)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_TOD, PDC_TOD_WRITE, sec, usec);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_tod_set);
+
+#ifdef __LP64__
+int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
+ struct pdc_memory_table *tbl, unsigned long entries)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_MEM, PDC_MEM_TABLE, __pa(pdc_result), __pa(pdc_result2), entries);
+ convert_to_wide(pdc_result);
+ memcpy(r_addr, pdc_result, sizeof(*r_addr));
+ memcpy(tbl, pdc_result2, entries * sizeof(*tbl));
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+#endif /* __LP64__ */
+
+/* FIXME: Is this pdc used? I could not find type reference to ftc_bitmap
+ * so I guessed at unsigned long. Someone who knows what this does, can fix
+ * it later. :)
+ */
+int pdc_do_firm_test_reset(unsigned long ftc_bitmap)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_BROADCAST_RESET, PDC_DO_FIRM_TEST_RESET,
+ PDC_FIRM_TEST_MAGIC, ftc_bitmap);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/*
+ * pdc_do_reset - Reset the system.
+ *
+ * Reset the system.
+ */
+int pdc_do_reset(void)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_BROADCAST_RESET, PDC_DO_RESET);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/*
+ * pdc_soft_power_info - Enable soft power switch.
+ * @power_reg: address of soft power register
+ *
+ * Return the absolute address of the soft power switch register
+ */
+int __init pdc_soft_power_info(unsigned long *power_reg)
+{
+ int retval;
+
+ *power_reg = (unsigned long) (-1);
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_INFO, __pa(pdc_result), 0);
+ if (retval == PDC_OK) {
+ convert_to_wide(pdc_result);
+ *power_reg = f_extend(pdc_result[0]);
+ }
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/*
+ * pdc_soft_power_button - Control the soft power button behaviour
+ * @sw_control: 0 for hardware control, 1 for software control
+ *
+ *
+ * This PDC function places the soft power button under software or
+ * hardware control.
+ * Under software control the OS may control to when to allow to shut
+ * down the system. Under hardware control pressing the power button
+ * powers off the system immediately.
+ */
+int pdc_soft_power_button(int sw_control)
+{
+ int retval;
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_SOFT_POWER, PDC_SOFT_POWER_ENABLE, __pa(pdc_result), sw_control);
+ spin_unlock_irq(&pdc_lock);
+ return retval;
+}
+
+/*
+ * pdc_io_reset - Hack to avoid overlapping range registers of Bridges devices.
+ * Primarily a problem on T600 (which parisc-linux doesn't support) but
+ * who knows what other platform firmware might do with this OS "hook".
+ */
+void pdc_io_reset(void)
+{
+ spin_lock_irq(&pdc_lock);
+ mem_pdc_call(PDC_IO, PDC_IO_RESET, 0);
+ spin_unlock_irq(&pdc_lock);
+}
+
+/*
+ * pdc_io_reset_devices - Hack to Stop USB controller
+ *
+ * If PDC used the usb controller, the usb controller
+ * is still running and will crash the machines during iommu
+ * setup, because of still running DMA. This PDC call
+ * stops the USB controller.
+ * Normally called after calling pdc_io_reset().
+ */
+void pdc_io_reset_devices(void)
+{
+ spin_lock_irq(&pdc_lock);
+ mem_pdc_call(PDC_IO, PDC_IO_RESET_DEVICES, 0);
+ spin_unlock_irq(&pdc_lock);
+}
+
+
+/**
+ * pdc_iodc_putc - Console character print using IODC.
+ * @c: the character to output.
+ *
+ * Note that only these special chars are architected for console IODC io:
+ * BEL, BS, CR, and LF. Others are passed through.
+ * Since the HP console requires CR+LF to perform a 'newline', we translate
+ * "\n" to "\r\n".
+ */
+void pdc_iodc_putc(unsigned char c)
+{
+ /* XXX Should we spinlock posx usage */
+ static int posx; /* for simple TAB-Simulation... */
+ static int __attribute__((aligned(8))) iodc_retbuf[32];
+ static char __attribute__((aligned(64))) iodc_dbuf[4096];
+ unsigned int n;
+ unsigned int flags;
+
+ switch (c) {
+ case '\n':
+ iodc_dbuf[0] = '\r';
+ iodc_dbuf[1] = '\n';
+ n = 2;
+ posx = 0;
+ break;
+ case '\t':
+ pdc_iodc_putc(' ');
+ while (posx & 7) /* expand TAB */
+ pdc_iodc_putc(' ');
+ return; /* return since IODC can't handle this */
+ case '\b':
+ posx-=2; /* BS */
+ default:
+ iodc_dbuf[0] = c;
+ n = 1;
+ posx++;
+ break;
+ }
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ real32_call(PAGE0->mem_cons.iodc_io,
+ (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT,
+ PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers),
+ __pa(iodc_retbuf), 0, __pa(iodc_dbuf), n, 0);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+}
+
+/**
+ * pdc_iodc_outc - Console character print using IODC (without conversions).
+ * @c: the character to output.
+ *
+ * Write the character directly to the IODC console.
+ */
+void pdc_iodc_outc(unsigned char c)
+{
+ unsigned int n, flags;
+
+ /* fill buffer with one caracter and print it */
+ static int __attribute__((aligned(8))) iodc_retbuf[32];
+ static char __attribute__((aligned(64))) iodc_dbuf[4096];
+
+ n = 1;
+ iodc_dbuf[0] = c;
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ real32_call(PAGE0->mem_cons.iodc_io,
+ (unsigned long)PAGE0->mem_cons.hpa, ENTRY_IO_COUT,
+ PAGE0->mem_cons.spa, __pa(PAGE0->mem_cons.dp.layers),
+ __pa(iodc_retbuf), 0, __pa(iodc_dbuf), n, 0);
+ spin_unlock_irqrestore(&pdc_lock, flags);
+}
+
+/**
+ * pdc_iodc_getc - Read a character (non-blocking) from the PDC console.
+ *
+ * Read a character (non-blocking) from the PDC console, returns -1 if
+ * key is not present.
+ */
+int pdc_iodc_getc(void)
+{
+ unsigned int flags;
+ static int __attribute__((aligned(8))) iodc_retbuf[32];
+ static char __attribute__((aligned(64))) iodc_dbuf[4096];
+ int ch;
+ int status;
+
+ /* Bail if no console input device. */
+ if (!PAGE0->mem_kbd.iodc_io)
+ return 0;
+
+ /* wait for a keyboard (rs232)-input */
+ spin_lock_irqsave(&pdc_lock, flags);
+ real32_call(PAGE0->mem_kbd.iodc_io,
+ (unsigned long)PAGE0->mem_kbd.hpa, ENTRY_IO_CIN,
+ PAGE0->mem_kbd.spa, __pa(PAGE0->mem_kbd.dp.layers),
+ __pa(iodc_retbuf), 0, __pa(iodc_dbuf), 1, 0);
+
+ ch = *iodc_dbuf;
+ status = *iodc_retbuf;
+ spin_unlock_irqrestore(&pdc_lock, flags);
+
+ if (status == 0)
+ return -1;
+
+ return ch;
+}
+
+int pdc_sti_call(unsigned long func, unsigned long flags,
+ unsigned long inptr, unsigned long outputr,
+ unsigned long glob_cfg)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = real32_call(func, flags, inptr, outputr, glob_cfg);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(pdc_sti_call);
+
+#ifdef __LP64__
+/**
+ * pdc_pat_cell_get_number - Returns the cell number.
+ * @cell_info: The return buffer.
+ *
+ * This PDC call returns the cell number of the cell from which the call
+ * is made.
+ */
+int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_GET_NUMBER, __pa(pdc_result));
+ memcpy(cell_info, pdc_result, sizeof(*cell_info));
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_cell_module - Retrieve the cell's module information.
+ * @actcnt: The number of bytes written to mem_addr.
+ * @ploc: The physical location.
+ * @mod: The module index.
+ * @view_type: The view of the address type.
+ * @mem_addr: The return buffer.
+ *
+ * This PDC call returns information about each module attached to the cell
+ * at the specified location.
+ */
+int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod,
+ unsigned long view_type, void *mem_addr)
+{
+ int retval;
+ static struct pdc_pat_cell_mod_maddr_block result __attribute__ ((aligned (8)));
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_PAT_CELL, PDC_PAT_CELL_MODULE, __pa(pdc_result),
+ ploc, mod, view_type, __pa(&result));
+ if(!retval) {
+ *actcnt = pdc_result[0];
+ memcpy(mem_addr, &result, *actcnt);
+ }
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_cpu_get_number - Retrieve the cpu number.
+ * @cpu_info: The return buffer.
+ * @hpa: The Hard Physical Address of the CPU.
+ *
+ * Retrieve the cpu number for the cpu at the specified HPA.
+ */
+int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_GET_NUMBER,
+ __pa(&pdc_result), hpa);
+ memcpy(cpu_info, pdc_result, sizeof(*cpu_info));
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_get_irt_size - Retrieve the number of entries in the cell's interrupt table.
+ * @num_entries: The return value.
+ * @cell_num: The target cell.
+ *
+ * This PDC function returns the number of entries in the specified cell's
+ * interrupt table.
+ */
+int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE,
+ __pa(pdc_result), cell_num);
+ *num_entries = pdc_result[0];
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_get_irt - Retrieve the cell's interrupt table.
+ * @r_addr: The return buffer.
+ * @cell_num: The target cell.
+ *
+ * This PDC function returns the actual interrupt table for the specified cell.
+ */
+int pdc_pat_get_irt(void *r_addr, unsigned long cell_num)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_GET_PCI_ROUTING_TABLE,
+ __pa(r_addr), cell_num);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_pd_get_addr_map - Retrieve information about memory address ranges.
+ * @actlen: The return buffer.
+ * @mem_addr: Pointer to the memory buffer.
+ * @count: The number of bytes to read from the buffer.
+ * @offset: The offset with respect to the beginning of the buffer.
+ *
+ */
+int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
+ unsigned long count, unsigned long offset)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_PAT_PD, PDC_PAT_PD_GET_ADDR_MAP, __pa(pdc_result),
+ __pa(pdc_result2), count, offset);
+ *actual_len = pdc_result[0];
+ memcpy(mem_addr, pdc_result2, *actual_len);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_io_pci_cfg_read - Read PCI configuration space.
+ * @pci_addr: PCI configuration space address for which the read request is being made.
+ * @pci_size: Size of read in bytes. Valid values are 1, 2, and 4.
+ * @mem_addr: Pointer to return memory buffer.
+ *
+ */
+int pdc_pat_io_pci_cfg_read(unsigned long pci_addr, int pci_size, u32 *mem_addr)
+{
+ int retval;
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_READ,
+ __pa(pdc_result), pci_addr, pci_size);
+ switch(pci_size) {
+ case 1: *(u8 *) mem_addr = (u8) pdc_result[0];
+ case 2: *(u16 *)mem_addr = (u16) pdc_result[0];
+ case 4: *(u32 *)mem_addr = (u32) pdc_result[0];
+ }
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+
+/**
+ * pdc_pat_io_pci_cfg_write - Retrieve information about memory address ranges.
+ * @pci_addr: PCI configuration space address for which the write request is being made.
+ * @pci_size: Size of write in bytes. Valid values are 1, 2, and 4.
+ * @value: Pointer to 1, 2, or 4 byte value in low order end of argument to be
+ * written to PCI Config space.
+ *
+ */
+int pdc_pat_io_pci_cfg_write(unsigned long pci_addr, int pci_size, u32 val)
+{
+ int retval;
+
+ spin_lock_irq(&pdc_lock);
+ retval = mem_pdc_call(PDC_PAT_IO, PDC_PAT_IO_PCI_CONFIG_WRITE,
+ pci_addr, pci_size, val);
+ spin_unlock_irq(&pdc_lock);
+
+ return retval;
+}
+#endif /* __LP64__ */
+
+
+/***************** 32-bit real-mode calls ***********/
+/* The struct below is used
+ * to overlay real_stack (real2.S), preparing a 32-bit call frame.
+ * real32_call_asm() then uses this stack in narrow real mode
+ */
+
+struct narrow_stack {
+ /* use int, not long which is 64 bits */
+ unsigned int arg13;
+ unsigned int arg12;
+ unsigned int arg11;
+ unsigned int arg10;
+ unsigned int arg9;
+ unsigned int arg8;
+ unsigned int arg7;
+ unsigned int arg6;
+ unsigned int arg5;
+ unsigned int arg4;
+ unsigned int arg3;
+ unsigned int arg2;
+ unsigned int arg1;
+ unsigned int arg0;
+ unsigned int frame_marker[8];
+ unsigned int sp;
+ /* in reality, there's nearly 8k of stack after this */
+};
+
+long real32_call(unsigned long fn, ...)
+{
+ va_list args;
+ extern struct narrow_stack real_stack;
+ extern unsigned long real32_call_asm(unsigned int *,
+ unsigned int *,
+ unsigned int);
+
+ va_start(args, fn);
+ real_stack.arg0 = va_arg(args, unsigned int);
+ real_stack.arg1 = va_arg(args, unsigned int);
+ real_stack.arg2 = va_arg(args, unsigned int);
+ real_stack.arg3 = va_arg(args, unsigned int);
+ real_stack.arg4 = va_arg(args, unsigned int);
+ real_stack.arg5 = va_arg(args, unsigned int);
+ real_stack.arg6 = va_arg(args, unsigned int);
+ real_stack.arg7 = va_arg(args, unsigned int);
+ real_stack.arg8 = va_arg(args, unsigned int);
+ real_stack.arg9 = va_arg(args, unsigned int);
+ real_stack.arg10 = va_arg(args, unsigned int);
+ real_stack.arg11 = va_arg(args, unsigned int);
+ real_stack.arg12 = va_arg(args, unsigned int);
+ real_stack.arg13 = va_arg(args, unsigned int);
+ va_end(args);
+
+ return real32_call_asm(&real_stack.sp, &real_stack.arg0, fn);
+}
+
+#ifdef __LP64__
+/***************** 64-bit real-mode calls ***********/
+
+struct wide_stack {
+ unsigned long arg0;
+ unsigned long arg1;
+ unsigned long arg2;
+ unsigned long arg3;
+ unsigned long arg4;
+ unsigned long arg5;
+ unsigned long arg6;
+ unsigned long arg7;
+ unsigned long arg8;
+ unsigned long arg9;
+ unsigned long arg10;
+ unsigned long arg11;
+ unsigned long arg12;
+ unsigned long arg13;
+ unsigned long frame_marker[2]; /* rp, previous sp */
+ unsigned long sp;
+ /* in reality, there's nearly 8k of stack after this */
+};
+
+long real64_call(unsigned long fn, ...)
+{
+ va_list args;
+ extern struct wide_stack real64_stack;
+ extern unsigned long real64_call_asm(unsigned long *,
+ unsigned long *,
+ unsigned long);
+
+ va_start(args, fn);
+ real64_stack.arg0 = va_arg(args, unsigned long);
+ real64_stack.arg1 = va_arg(args, unsigned long);
+ real64_stack.arg2 = va_arg(args, unsigned long);
+ real64_stack.arg3 = va_arg(args, unsigned long);
+ real64_stack.arg4 = va_arg(args, unsigned long);
+ real64_stack.arg5 = va_arg(args, unsigned long);
+ real64_stack.arg6 = va_arg(args, unsigned long);
+ real64_stack.arg7 = va_arg(args, unsigned long);
+ real64_stack.arg8 = va_arg(args, unsigned long);
+ real64_stack.arg9 = va_arg(args, unsigned long);
+ real64_stack.arg10 = va_arg(args, unsigned long);
+ real64_stack.arg11 = va_arg(args, unsigned long);
+ real64_stack.arg12 = va_arg(args, unsigned long);
+ real64_stack.arg13 = va_arg(args, unsigned long);
+ va_end(args);
+
+ return real64_call_asm(&real64_stack.sp, &real64_stack.arg0, fn);
+}
+
+#endif /* __LP64__ */
+
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
new file mode 100644
index 000000000000..2071b5bba15c
--- /dev/null
+++ b/arch/parisc/kernel/hardware.c
@@ -0,0 +1,1366 @@
+/*
+ * Hardware descriptions for HP 9000 based hardware, including
+ * system types, SCSI controllers, DMA controllers, HPPB controllers
+ * and lots more.
+ *
+ * Based on the document "PA-RISC 1.1 I/O Firmware Architecture
+ * Reference Specification", March 7, 1999, version 0.96. This
+ * is available at http://parisc-linux.org/documentation/
+ *
+ * Copyright 1999 by Alex deVries <alex@onefishtwo.ca>
+ * and copyright 1999 The Puffin Group Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+
+#include <asm/hardware.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+/*
+ * HP PARISC Hardware Database
+ * Access to this database is only possible during bootup
+ * so don't reference this table after starting the init process
+ */
+
+static struct hp_hardware hp_hardware_list[] __initdata = {
+ {HPHW_NPROC,0x01,0x4,0x0,"Indigo (840, 930)"},
+ {HPHW_NPROC,0x8,0x4,0x01,"Firefox(825,925)"},
+ {HPHW_NPROC,0xA,0x4,0x01,"Top Gun (835,834,935,635)"},
+ {HPHW_NPROC,0xB,0x4,0x01,"Technical Shogun (845, 645)"},
+ {HPHW_NPROC,0xF,0x4,0x01,"Commercial Shogun (949)"},
+ {HPHW_NPROC,0xC,0x4,0x01,"Cheetah (850, 950)"},
+ {HPHW_NPROC,0x80,0x4,0x01,"Cheetah (950S)"},
+ {HPHW_NPROC,0x81,0x4,0x01,"Jaguar (855, 955)"},
+ {HPHW_NPROC,0x82,0x4,0x01,"Cougar (860, 960)"},
+ {HPHW_NPROC,0x83,0x4,0x13,"Panther (865, 870, 980)"},
+ {HPHW_NPROC,0x100,0x4,0x01,"Burgundy (810)"},
+ {HPHW_NPROC,0x101,0x4,0x01,"SilverFox Low (822, 922)"},
+ {HPHW_NPROC,0x102,0x4,0x01,"SilverFox High (832, 932)"},
+ {HPHW_NPROC,0x103,0x4,0x01,"Lego, SilverLite (815, 808, 920)"},
+ {HPHW_NPROC,0x104,0x4,0x03,"SilverBullet Low (842, 948)"},
+ {HPHW_NPROC,0x105,0x4,0x03,"SilverBullet High (852, 958)"},
+ {HPHW_NPROC,0x106,0x4,0x81,"Oboe"},
+ {HPHW_NPROC,0x180,0x4,0x12,"Dragon"},
+ {HPHW_NPROC,0x181,0x4,0x13,"Chimera (890, 990, 992)"},
+ {HPHW_NPROC,0x182,0x4,0x91,"TNT 100 (891,T500)"},
+ {HPHW_NPROC,0x183,0x4,0x91,"TNT 120 (892,T520)"},
+ {HPHW_NPROC,0x184,0x4,0x91,"Jade 180 U (893,T540)"},
+ {HPHW_NPROC,0x1FF,0x4,0x91,"Hitachi X Processor"},
+ {HPHW_NPROC,0x200,0x4,0x81,"Cobra (720)"},
+ {HPHW_NPROC,0x201,0x4,0x81,"Coral (750)"},
+ {HPHW_NPROC,0x202,0x4,0x81,"King Cobra (730)"},
+ {HPHW_NPROC,0x203,0x4,0x81,"Hardball (735/99)"},
+ {HPHW_NPROC,0x204,0x4,0x81,"Coral II (755/99)"},
+ {HPHW_NPROC,0x205,0x4,0x81,"Coral II (755/125)"},
+ {HPHW_NPROC,0x205,0x4,0x91,"Snake Eagle "},
+ {HPHW_NPROC,0x206,0x4,0x81,"Snake Cheetah (735/130)"},
+ {HPHW_NPROC,0x280,0x4,0x81,"Nova Low (817, 827, 957, 957LX)"},
+ {HPHW_NPROC,0x281,0x4,0x81,"Nova High (837, 847, 857, 967, 967LX)"},
+ {HPHW_NPROC,0x282,0x4,0x81,"Nova8 (807, 917, 917LX, 927,927LX, 937, 937LX, 947,947LX)"},
+ {HPHW_NPROC,0x283,0x4,0x81,"Nova64 (867, 877, 977)"},
+ {HPHW_NPROC,0x284,0x4,0x81,"TNova (887, 897, 987)"},
+ {HPHW_NPROC,0x285,0x4,0x81,"TNova64"},
+ {HPHW_NPROC,0x286,0x4,0x91,"Hydra64 (Nova)"},
+ {HPHW_NPROC,0x287,0x4,0x91,"Hydra96 (Nova)"},
+ {HPHW_NPROC,0x288,0x4,0x81,"TNova96"},
+ {HPHW_NPROC,0x300,0x4,0x81,"Bushmaster (710)"},
+ {HPHW_NPROC,0x302,0x4,0x81,"Flounder (705)"},
+ {HPHW_NPROC,0x310,0x4,0x81,"Scorpio (715/50)"},
+ {HPHW_NPROC,0x311,0x4,0x81,"Scorpio Jr.(715/33)"},
+ {HPHW_NPROC,0x312,0x4,0x81,"Strider-50 (715S/50)"},
+ {HPHW_NPROC,0x313,0x4,0x81,"Strider-33 (715S/33)"},
+ {HPHW_NPROC,0x314,0x4,0x81,"Trailways-50 (715T/50)"},
+ {HPHW_NPROC,0x315,0x4,0x81,"Trailways-33 (715T/33)"},
+ {HPHW_NPROC,0x316,0x4,0x81,"Scorpio Sr.(715/75)"},
+ {HPHW_NPROC,0x317,0x4,0x81,"Scorpio 100 (715/100)"},
+ {HPHW_NPROC,0x318,0x4,0x81,"Spectra (725/50)"},
+ {HPHW_NPROC,0x319,0x4,0x81,"Spectra (725/75)"},
+ {HPHW_NPROC,0x320,0x4,0x81,"Spectra (725/100)"},
+ {HPHW_NPROC,0x401,0x4,0x81,"Pace (745i, 747i)"},
+ {HPHW_NPROC,0x402,0x4,0x81,"Sidewinder (742i)"},
+ {HPHW_NPROC,0x403,0x4,0x81,"Fast Pace"},
+ {HPHW_NPROC,0x480,0x4,0x81,"Orville (E23)"},
+ {HPHW_NPROC,0x481,0x4,0x81,"Wilbur (E25)"},
+ {HPHW_NPROC,0x482,0x4,0x81,"WB-80 (E35)"},
+ {HPHW_NPROC,0x483,0x4,0x81,"WB-96 (E45)"},
+ {HPHW_NPROC,0x484,0x4,0x81,"UL Proc L-100 (811/D210,D310)"},
+ {HPHW_NPROC,0x485,0x4,0x81,"UL Proc L-75 (801/D200)"},
+ {HPHW_NPROC,0x501,0x4,0x81,"Merlin L2 132 (9000/778/B132L)"},
+ {HPHW_NPROC,0x502,0x4,0x81,"Merlin L2 160 (9000/778/B160L)"},
+ {HPHW_NPROC,0x503,0x4,0x81,"Merlin L2+ 132 (9000/778/B132L)"},
+ {HPHW_NPROC,0x504,0x4,0x81,"Merlin L2+ 180 (9000/778/B180L)"},
+ {HPHW_NPROC,0x505,0x4,0x81,"Raven L2 132 (9000/778/C132L)"},
+ {HPHW_NPROC,0x506,0x4,0x81,"Raven L2 160 (9000/779/C160L)"},
+ {HPHW_NPROC,0x507,0x4,0x81,"Raven L2 180 (9000/779/C180L)"},
+ {HPHW_NPROC,0x508,0x4,0x81,"Raven L2 160 (9000/779/C160L)"},
+ {HPHW_NPROC,0x509,0x4,0x81,"712/132 L2 Upgrade"},
+ {HPHW_NPROC,0x50A,0x4,0x81,"712/160 L2 Upgrade"},
+ {HPHW_NPROC,0x50B,0x4,0x81,"715/132 L2 Upgrade"},
+ {HPHW_NPROC,0x50C,0x4,0x81,"715/160 L2 Upgrade"},
+ {HPHW_NPROC,0x50D,0x4,0x81,"Rocky2 L2 120"},
+ {HPHW_NPROC,0x50E,0x4,0x81,"Rocky2 L2 150"},
+ {HPHW_NPROC,0x50F,0x4,0x81,"Anole L2 132 (744)"},
+ {HPHW_NPROC,0x510,0x4,0x81,"Anole L2 165 (744)"},
+ {HPHW_NPROC,0x511,0x4,0x81,"Kiji L2 132"},
+ {HPHW_NPROC,0x512,0x4,0x81,"UL L2 132 (803/D220,D320)"},
+ {HPHW_NPROC,0x513,0x4,0x81,"UL L2 160 (813/D220,D320)"},
+ {HPHW_NPROC,0x514,0x4,0x81,"Merlin Jr L2 132"},
+ {HPHW_NPROC,0x515,0x4,0x81,"Staccato L2 132"},
+ {HPHW_NPROC,0x516,0x4,0x81,"Staccato L2 180 (A Class 180)"},
+ {HPHW_NPROC,0x580,0x4,0x81,"KittyHawk DC2-100 (K100)"},
+ {HPHW_NPROC,0x581,0x4,0x91,"KittyHawk DC3-120 (K210)"},
+ {HPHW_NPROC,0x582,0x4,0x91,"KittyHawk DC3 100 (K400)"},
+ {HPHW_NPROC,0x583,0x4,0x91,"KittyHawk DC3 120 (K410)"},
+ {HPHW_NPROC,0x584,0x4,0x91,"LighteningHawk T120"},
+ {HPHW_NPROC,0x585,0x4,0x91,"SkyHawk 100"},
+ {HPHW_NPROC,0x586,0x4,0x91,"SkyHawk 120"},
+ {HPHW_NPROC,0x587,0x4,0x81,"UL Proc 1-way T'120"},
+ {HPHW_NPROC,0x588,0x4,0x91,"UL Proc 2-way T'120"},
+ {HPHW_NPROC,0x589,0x4,0x81,"UL Proc 1-way T'100 (821/D250,D350)"},
+ {HPHW_NPROC,0x58A,0x4,0x91,"UL Proc 2-way T'100 (831/D250,D350)"},
+ {HPHW_NPROC,0x58B,0x4,0x91,"KittyHawk DC2 100 (K200)"},
+ {HPHW_NPROC,0x58C,0x4,0x91,"ThunderHawk DC3- 120 1M (K220)"},
+ {HPHW_NPROC,0x58D,0x4,0x91,"ThunderHawk DC3 120 1M (K420)"},
+ {HPHW_NPROC,0x58E,0x4,0x81,"Raven 120 T'"},
+ {HPHW_NPROC,0x58F,0x4,0x91,"Mohawk 160 U 1M DC3 (K450)"},
+ {HPHW_NPROC,0x590,0x4,0x91,"Mohawk 180 U 1M DC3 (K460)"},
+ {HPHW_NPROC,0x591,0x4,0x91,"Mohawk 200 U 1M DC3"},
+ {HPHW_NPROC,0x592,0x4,0x81,"Raven 100 T'"},
+ {HPHW_NPROC,0x593,0x4,0x91,"FireHawk 160 U"},
+ {HPHW_NPROC,0x594,0x4,0x91,"FireHawk 180 U"},
+ {HPHW_NPROC,0x595,0x4,0x91,"FireHawk 220 U"},
+ {HPHW_NPROC,0x596,0x4,0x91,"FireHawk 240 U"},
+ {HPHW_NPROC,0x597,0x4,0x91,"SPP2000 processor"},
+ {HPHW_NPROC,0x598,0x4,0x81,"Raven U 230 (9000/780/C230)"},
+ {HPHW_NPROC,0x599,0x4,0x81,"Raven U 240 (9000/780/C240)"},
+ {HPHW_NPROC,0x59A,0x4,0x91,"Unlisted but reserved"},
+ {HPHW_NPROC,0x59A,0x4,0x81,"Unlisted but reserved"},
+ {HPHW_NPROC,0x59B,0x4,0x81,"Raven U 160 (9000/780/C160)"},
+ {HPHW_NPROC,0x59C,0x4,0x81,"Raven U 180 (9000/780/C180)"},
+ {HPHW_NPROC,0x59D,0x4,0x81,"Raven U 200 (9000/780/C200)"},
+ {HPHW_NPROC,0x59E,0x4,0x91,"ThunderHawk T' 120"},
+ {HPHW_NPROC,0x59F,0x4,0x91,"Raven U 180+ (9000/780)"},
+ {HPHW_NPROC,0x5A0,0x4,0x81,"UL 1w T120 1MB/1MB (841/D260,D360)"},
+ {HPHW_NPROC,0x5A1,0x4,0x91,"UL 2w T120 1MB/1MB (851/D260,D360)"},
+ {HPHW_NPROC,0x5A2,0x4,0x81,"UL 1w U160 512K/512K (861/D270,D370)"},
+ {HPHW_NPROC,0x5A3,0x4,0x91,"UL 2w U160 512K/512K (871/D270,D370)"},
+ {HPHW_NPROC,0x5A4,0x4,0x91,"Mohawk 160 U 1M DC3- (K250)"},
+ {HPHW_NPROC,0x5A5,0x4,0x91,"Mohawk 180 U 1M DC3- (K260)"},
+ {HPHW_NPROC,0x5A6,0x4,0x91,"Mohawk 200 U 1M DC3-"},
+ {HPHW_NPROC,0x5A7,0x4,0x81,"UL proc 1-way U160 1M/1M"},
+ {HPHW_NPROC,0x5A8,0x4,0x91,"UL proc 2-way U160 1M/1M"},
+ {HPHW_NPROC,0x5A9,0x4,0x81,"UL proc 1-way U180 1M/1M"},
+ {HPHW_NPROC,0x5AA,0x4,0x91,"UL proc 2-way U180 1M/1M"},
+ {HPHW_NPROC,0x5AB,0x4,0x91,"Obsolete"},
+ {HPHW_NPROC,0x5AB,0x4,0x81,"Obsolete"},
+ {HPHW_NPROC,0x5AC,0x4,0x91,"Obsolete"},
+ {HPHW_NPROC,0x5AC,0x4,0x81,"Obsolete"},
+ {HPHW_NPROC,0x5AD,0x4,0x91,"BraveHawk 180MHz DC3-"},
+ {HPHW_NPROC,0x5AE,0x4,0x91,"BraveHawk 200MHz DC3- (898/K370)"},
+ {HPHW_NPROC,0x5AF,0x4,0x91,"BraveHawk 220MHz DC3-"},
+ {HPHW_NPROC,0x5B0,0x4,0x91,"BraveHawk 180MHz DC3"},
+ {HPHW_NPROC,0x5B1,0x4,0x91,"BraveHawk 200MHz DC3 (899/K570)"},
+ {HPHW_NPROC,0x5B2,0x4,0x91,"BraveHawk 220MHz DC3"},
+ {HPHW_NPROC,0x5B3,0x4,0x91,"FireHawk 200"},
+ {HPHW_NPROC,0x5B4,0x4,0x91,"SPP2500"},
+ {HPHW_NPROC,0x5B5,0x4,0x91,"SummitHawk U+"},
+ {HPHW_NPROC,0x5B6,0x4,0x91,"DragonHawk U+ 240 DC3"},
+ {HPHW_NPROC,0x5B7,0x4,0x91,"DragonHawk U+ 240 DC3-"},
+ {HPHW_NPROC,0x5B8,0x4,0x91,"SPP2250 240 MHz"},
+ {HPHW_NPROC,0x5B9,0x4,0x81,"UL 1w U+/240 (350/550)"},
+ {HPHW_NPROC,0x5BA,0x4,0x91,"UL 2w U+/240 (350/550)"},
+ {HPHW_NPROC,0x5BB,0x4,0x81,"AllegroHigh W"},
+ {HPHW_NPROC,0x5BC,0x4,0x91,"AllegroLow W"},
+ {HPHW_NPROC,0x5BD,0x4,0x91,"Forte W 2-way"},
+ {HPHW_NPROC,0x5BE,0x4,0x91,"Prelude W"},
+ {HPHW_NPROC,0x5BF,0x4,0x91,"Forte W 4-way"},
+ {HPHW_NPROC,0x5C0,0x4,0x91,"M2250"},
+ {HPHW_NPROC,0x5C1,0x4,0x91,"M2500"},
+ {HPHW_NPROC,0x5C2,0x4,0x91,"Sonata 440"},
+ {HPHW_NPROC,0x5C3,0x4,0x91,"Sonata 360"},
+ {HPHW_NPROC,0x5C4,0x4,0x91,"Rhapsody 440"},
+ {HPHW_NPROC,0x5C5,0x4,0x91,"Rhapsody 360"},
+ {HPHW_NPROC,0x5C6,0x4,0x91,"Raven W 360 (9000/780)"},
+ {HPHW_NPROC,0x5C7,0x4,0x91,"Halfdome W 440"},
+ {HPHW_NPROC,0x5C8,0x4,0x81,"Lego 360 processor"},
+ {HPHW_NPROC,0x5C9,0x4,0x91,"Rhapsody DC- 440"},
+ {HPHW_NPROC,0x5CA,0x4,0x91,"Rhapsody DC- 360"},
+ {HPHW_NPROC,0x5CB,0x4,0x91,"Crescendo 440"},
+ {HPHW_NPROC,0x5CC,0x4,0x91,"Prelude W 440"},
+ {HPHW_NPROC,0x5CD,0x4,0x91,"SPP2600"},
+ {HPHW_NPROC,0x5CE,0x4,0x91,"M2600"},
+ {HPHW_NPROC,0x5CF,0x4,0x81,"Allegro W+"},
+ {HPHW_NPROC,0x5D0,0x4,0x81,"Kazoo W+"},
+ {HPHW_NPROC,0x5D1,0x4,0x91,"Forte W+ 2w"},
+ {HPHW_NPROC,0x5D2,0x4,0x91,"Forte W+ 4w"},
+ {HPHW_NPROC,0x5D3,0x4,0x91,"Prelude W+ 540"},
+ {HPHW_NPROC,0x5D4,0x4,0x91,"Duet W+"},
+ {HPHW_NPROC,0x5D5,0x4,0x91,"Crescendo 550"},
+ {HPHW_NPROC,0x5D6,0x4,0x81,"Crescendo DC- 440"},
+ {HPHW_NPROC,0x5D7,0x4,0x91,"Keystone W+"},
+ {HPHW_NPROC,0x5D8,0x4,0x91,"Rhapsody wave 2 W+ DC-"},
+ {HPHW_NPROC,0x5D9,0x4,0x91,"Rhapsody wave 2 W+"},
+ {HPHW_NPROC,0x5DA,0x4,0x91,"Marcato W+ DC-"},
+ {HPHW_NPROC,0x5DB,0x4,0x91,"Marcato W+"},
+ {HPHW_NPROC,0x5DC,0x4,0x91,"Allegro W2"},
+ {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
+ {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
+ {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
+ {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
+ {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
+ {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
+ {HPHW_NPROC,0x5E3,0x4,0x91,"Crescendo 750 W2"},
+ {HPHW_NPROC,0x5E4,0x4,0x91,"Keystone/Matterhorn W2 750"},
+ {HPHW_NPROC,0x5E5,0x4,0x91,"PowerBar W+"},
+ {HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"},
+ {HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"},
+ {HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"},
+ {HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"},
+ {HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"},
+ {HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"},
+ {HPHW_NPROC,0x602,0x4,0x81,"Gecko 100 (712/100)"},
+ {HPHW_NPROC,0x603,0x4,0x81,"Anole 64 (743/64)"},
+ {HPHW_NPROC,0x604,0x4,0x81,"Anole 100 (743/100)"},
+ {HPHW_NPROC,0x605,0x4,0x81,"Gecko 120 (712/120)"},
+ {HPHW_NPROC,0x606,0x4,0x81,"Gila 80"},
+ {HPHW_NPROC,0x607,0x4,0x81,"Gila 100"},
+ {HPHW_NPROC,0x608,0x4,0x81,"Gila 120"},
+ {HPHW_NPROC,0x609,0x4,0x81,"Scorpio-L 80"},
+ {HPHW_NPROC,0x60A,0x4,0x81,"Mirage Jr (715/64)"},
+ {HPHW_NPROC,0x60B,0x4,0x81,"Mirage 100"},
+ {HPHW_NPROC,0x60C,0x4,0x81,"Mirage 100+"},
+ {HPHW_NPROC,0x60D,0x4,0x81,"Electra 100"},
+ {HPHW_NPROC,0x60E,0x4,0x81,"Electra 120"},
+ {HPHW_NPROC,0x610,0x4,0x81,"Scorpio-L 100"},
+ {HPHW_NPROC,0x611,0x4,0x81,"Scorpio-L 120"},
+ {HPHW_NPROC,0x612,0x4,0x81,"Spectra-L 80"},
+ {HPHW_NPROC,0x613,0x4,0x81,"Spectra-L 100"},
+ {HPHW_NPROC,0x614,0x4,0x81,"Spectra-L 120"},
+ {HPHW_NPROC,0x615,0x4,0x81,"Piranha 100"},
+ {HPHW_NPROC,0x616,0x4,0x81,"Piranha 120"},
+ {HPHW_NPROC,0x617,0x4,0x81,"Jason 50"},
+ {HPHW_NPROC,0x618,0x4,0x81,"Jason 100"},
+ {HPHW_NPROC,0x619,0x4,0x81,"Mirage 80"},
+ {HPHW_NPROC,0x61A,0x4,0x81,"SAIC L-80"},
+ {HPHW_NPROC,0x61B,0x4,0x81,"Rocky1 L-60"},
+ {HPHW_NPROC,0x61C,0x4,0x81,"Anole T (743/T)"},
+ {HPHW_NPROC,0x67E,0x4,0x81,"Hitachi Tiny 80"},
+ {HPHW_NPROC,0x67F,0x4,0x81,"Hitachi Tiny 64"},
+ {HPHW_NPROC,0x700,0x4,0x91,"NEC Aska Processor"},
+ {HPHW_NPROC,0x880,0x4,0x91,"Orca Mako"},
+ {HPHW_NPROC,0x881,0x4,0x91,"Everest Mako"},
+ {HPHW_NPROC,0x882,0x4,0x91,"Rainier/Medel Mako Slow"},
+ {HPHW_NPROC,0x883,0x4,0x91,"Rainier/Medel Mako Fast"},
+ {HPHW_NPROC,0x884,0x4,0x91,"Mt. Hamilton"},
+ {HPHW_NPROC,0x885,0x4,0x91,"Mt. Hamilton DC-"},
+ {HPHW_NPROC,0x886,0x4,0x91,"Storm Peak Slow DC-"},
+ {HPHW_NPROC,0x887,0x4,0x91,"Storm Peak Slow"},
+ {HPHW_NPROC,0x888,0x4,0x91,"Storm Peak Fast DC-"},
+ {HPHW_NPROC,0x889,0x4,0x91,"Storm Peak Fast"},
+ {HPHW_NPROC,0x88A,0x4,0x91,"Crestone Peak"},
+ {HPHW_A_DIRECT, 0x004, 0x0000D, 0x00, "Arrakis MUX"},
+ {HPHW_A_DIRECT, 0x005, 0x0000D, 0x00, "Dyun Kiuh MUX"},
+ {HPHW_A_DIRECT, 0x006, 0x0000D, 0x00, "Baat Kiuh AP/MUX (40299B)"},
+ {HPHW_A_DIRECT, 0x007, 0x0000D, 0x00, "Dino AP"},
+ {HPHW_A_DIRECT, 0x009, 0x0000D, 0x00, "Solaris Direct Connect MUX (J2092A)"},
+ {HPHW_A_DIRECT, 0x00A, 0x0000D, 0x00, "Solaris RS-422/423 MUX (J2093A)"},
+ {HPHW_A_DIRECT, 0x00B, 0x0000D, 0x00, "Solaris RS-422/423 Quadriloops MUX"},
+ {HPHW_A_DIRECT, 0x00C, 0x0000D, 0x00, "Solaris Modem MUX (J2094A)"},
+ {HPHW_A_DIRECT, 0x00D, 0x0000D, 0x00, "Twins Direct Connect MUX"},
+ {HPHW_A_DIRECT, 0x00E, 0x0000D, 0x00, "Twins Modem MUX"},
+ {HPHW_A_DIRECT, 0x00F, 0x0000D, 0x00, "Nautilus RS-485"},
+ {HPHW_A_DIRECT, 0x010, 0x0000D, 0x00, "UltraLight CAP/MUX"},
+ {HPHW_A_DIRECT, 0x015, 0x0000D, 0x00, "Eole CAP/MUX"},
+ {HPHW_A_DIRECT, 0x024, 0x0000D, 0x00, "Sahp Kiuh AP/MUX"},
+ {HPHW_A_DIRECT, 0x034, 0x0000D, 0x00, "Sahp Kiuh Low AP/MUX"},
+ {HPHW_A_DIRECT, 0x044, 0x0000D, 0x00, "Sahp Baat Kiuh AP/MUX"},
+ {HPHW_A_DIRECT, 0x004, 0x0000E, 0x80, "Burgundy RS-232"},
+ {HPHW_A_DIRECT, 0x005, 0x0000E, 0x80, "Silverfox RS-232"},
+ {HPHW_A_DIRECT, 0x006, 0x0000E, 0x80, "Lego RS-232"},
+ {HPHW_A_DIRECT, 0x004, 0x0000F, 0x00, "Peacock Graphics"},
+ {HPHW_A_DIRECT, 0x004, 0x00014, 0x80, "Burgundy HIL"},
+ {HPHW_A_DIRECT, 0x005, 0x00014, 0x80, "Peacock HIL"},
+ {HPHW_A_DIRECT, 0x004, 0x00015, 0x80, "Leonardo"},
+ {HPHW_A_DIRECT, 0x004, 0x00016, 0x80, "HP-PB HRM"},
+ {HPHW_A_DIRECT, 0x004, 0x00017, 0x80, "HP-PB HRC"},
+ {HPHW_A_DIRECT, 0x004, 0x0003A, 0x80, "Skunk Centronics (28655A)"},
+ {HPHW_A_DIRECT, 0x024, 0x0003A, 0x80, "Sahp Kiuh Centronics"},
+ {HPHW_A_DIRECT, 0x044, 0x0003A, 0x80, "Sahp Baat Kiuh Centronics"},
+ {HPHW_A_DIRECT, 0x004, 0x0004E, 0x80, "AT&T DataKit (AMSO)"},
+ {HPHW_A_DIRECT, 0x004, 0x0009B, 0x80, "Test&Meas GSC HPIB"},
+ {HPHW_A_DIRECT, 0x004, 0x000A8, 0x00, "Rocky2-120 Front Keyboard"},
+ {HPHW_A_DIRECT, 0x005, 0x000A8, 0x00, "Rocky2-150 Front Keyboard"},
+ {HPHW_A_DIRECT, 0x004, 0x00101, 0x80, "Hitachi Console Module"},
+ {HPHW_A_DIRECT, 0x004, 0x00102, 0x80, "Hitachi Boot Module"},
+ {HPHW_A_DIRECT, 0x004, 0x00203, 0x80, "MELCO HBMLA MLAIT"},
+ {HPHW_A_DIRECT, 0x004, 0x00208, 0x80, "MELCO HBDPC"},
+ {HPHW_A_DIRECT, 0x004, 0x00300, 0x00, "DCI TWINAX TERM IO MUX"},
+ {HPHW_A_DMA, 0x004, 0x00039, 0x80, "Skunk SCSI (28655A)"},
+ {HPHW_A_DMA, 0x005, 0x00039, 0x80, "KittyHawk CSY Core SCSI"},
+ {HPHW_A_DMA, 0x014, 0x00039, 0x80, "Diablo SCSI"},
+ {HPHW_A_DMA, 0x024, 0x00039, 0x80, "Sahp Kiuh SCSI"},
+ {HPHW_A_DMA, 0x034, 0x00039, 0x80, "Sahp Kiuh Low SCSI"},
+ {HPHW_A_DMA, 0x044, 0x00039, 0x80, "Sahp Baat Kiuh SCSI"},
+ {HPHW_A_DMA, 0x004, 0x0003B, 0x80, "Wizard SCSI"},
+ {HPHW_A_DMA, 0x005, 0x0003B, 0x80, "KittyHawk CSY Core FW-SCSI"},
+ {HPHW_A_DMA, 0x006, 0x0003B, 0x80, "Symbios EPIC FW-SCSI"},
+ {HPHW_A_DMA, 0x004, 0x00040, 0x80, "HP-PB Shazam HPIB (28650A)"},
+ {HPHW_A_DMA, 0x005, 0x00040, 0x80, "Burgundy HPIB"},
+ {HPHW_A_DMA, 0x004, 0x00041, 0x80, "HP-PB HP-FL"},
+ {HPHW_A_DMA, 0x004, 0x00042, 0x80, "HP-PB LoQuix HPIB (28650B)"},
+ {HPHW_A_DMA, 0x004, 0x00043, 0x80, "HP-PB Crypt LoQuix"},
+ {HPHW_A_DMA, 0x004, 0x00044, 0x80, "HP-PB Shazam GPIO (28651A)"},
+ {HPHW_A_DMA, 0x004, 0x00045, 0x80, "HP-PB LoQuix GPIO"},
+ {HPHW_A_DMA, 0x004, 0x00046, 0x80, "2-Port X.25 NIO_ACC (AMSO)"},
+ {HPHW_A_DMA, 0x004, 0x00047, 0x80, "4-Port X.25 NIO_ACC (AMSO)"},
+ {HPHW_A_DMA, 0x004, 0x0004B, 0x80, "LGB Control"},
+ {HPHW_A_DMA, 0x004, 0x0004C, 0x80, "Martian RTI (AMSO)"},
+ {HPHW_A_DMA, 0x004, 0x0004D, 0x80, "ACC Mux (AMSO)"},
+ {HPHW_A_DMA, 0x004, 0x00050, 0x80, "Lanbrusca 802.3 (36967A)"},
+ {HPHW_A_DMA, 0x004, 0x00056, 0x80, "HP-PB LoQuix FDDI"},
+ {HPHW_A_DMA, 0x004, 0x00057, 0x80, "HP-PB LoQuix FDDI (28670A)"},
+ {HPHW_A_DMA, 0x004, 0x0005E, 0x00, "Gecko Add-on Token Ring"},
+ {HPHW_A_DMA, 0x012, 0x00089, 0x80, "Barracuda Add-on FW-SCSI"},
+ {HPHW_A_DMA, 0x013, 0x00089, 0x80, "Bluefish Add-on FW-SCSI"},
+ {HPHW_A_DMA, 0x014, 0x00089, 0x80, "Shrike Add-on FW-SCSI"},
+ {HPHW_A_DMA, 0x015, 0x00089, 0x80, "KittyHawk GSY Core FW-SCSI"},
+ {HPHW_A_DMA, 0x017, 0x00089, 0x80, "Shrike Jade Add-on FW-SCSI (A3644A)"},
+ {HPHW_A_DMA, 0x01F, 0x00089, 0x80, "SkyHawk 100/120 FW-SCSI"},
+ {HPHW_A_DMA, 0x027, 0x00089, 0x80, "Piranha 100 FW-SCSI"},
+ {HPHW_A_DMA, 0x032, 0x00089, 0x80, "Raven T' Core FW-SCSI"},
+ {HPHW_A_DMA, 0x03B, 0x00089, 0x80, "Raven U/L2 Core FW-SCSI"},
+ {HPHW_A_DMA, 0x03C, 0x00089, 0x80, "Merlin 132 Core FW-SCSI"},
+ {HPHW_A_DMA, 0x03D, 0x00089, 0x80, "Merlin 160 Core FW-SCSI"},
+ {HPHW_A_DMA, 0x044, 0x00089, 0x80, "Mohawk Core FW-SCSI"},
+ {HPHW_A_DMA, 0x051, 0x00089, 0x80, "Firehawk FW-SCSI"},
+ {HPHW_A_DMA, 0x058, 0x00089, 0x80, "FireHawk 200 FW-SCSI"},
+ {HPHW_A_DMA, 0x05C, 0x00089, 0x80, "SummitHawk 230 Ultra-SCSI"},
+ {HPHW_A_DMA, 0x014, 0x00091, 0x80, "Baby Hugo Add-on Net FC (A3406A)"},
+ {HPHW_A_DMA, 0x020, 0x00091, 0x80, "Baby Jade Add-on Net FC (A3638A)"},
+ {HPHW_A_DMA, 0x004, 0x00092, 0x80, "GSC+ YLIASTER ATM"},
+ {HPHW_A_DMA, 0x004, 0x00095, 0x80, "Hamlyn GSC+ Network Card"},
+ {HPHW_A_DMA, 0x004, 0x00098, 0x80, "Lo-fat Emulator"},
+ {HPHW_A_DMA, 0x004, 0x0009A, 0x80, "GSC+ Venus ATM"},
+ {HPHW_A_DMA, 0x005, 0x0009A, 0x80, "GSC+ Samorobrive ATM"},
+ {HPHW_A_DMA, 0x004, 0x0009D, 0x80, "HP HSC-PCI Cards"},
+ {HPHW_A_DMA, 0x004, 0x0009E, 0x80, "Alaxis GSC+ 155Mb ATM"},
+ {HPHW_A_DMA, 0x005, 0x0009E, 0x80, "Alaxis GSC+ 622Mb ATM"},
+ {HPHW_A_DMA, 0x05C, 0x0009F, 0x80, "SummitHawk 230 USB"},
+ {HPHW_A_DMA, 0x05C, 0x000A0, 0x80, "SummitHawk 230 100BaseT"},
+ {HPHW_A_DMA, 0x015, 0x000A7, 0x80, "Baby Hugo Add-on mass FC (A3404A)"},
+ {HPHW_A_DMA, 0x018, 0x000A7, 0x80, "Mombasa GS Add-on mass FC (A3591)"},
+ {HPHW_A_DMA, 0x021, 0x000A7, 0x80, "Baby Jade Add-on mass FC (A3636A)"},
+ {HPHW_A_DMA, 0x004, 0x00201, 0x80, "MELCO HCMAP"},
+ {HPHW_A_DMA, 0x004, 0x00202, 0x80, "MELCO HBMLA MLAMA"},
+ {HPHW_A_DMA, 0x004, 0x00205, 0x80, "MELCO HBRFU"},
+ {HPHW_A_DMA, 0x004, 0x00380, 0x80, "Interphase NIO-FC"},
+ {HPHW_A_DMA, 0x004, 0x00381, 0x80, "Interphase NIO-ATM"},
+ {HPHW_A_DMA, 0x004, 0x00382, 0x80, "Interphase NIO-100BaseTX"},
+ {HPHW_BA, 0x004, 0x00070, 0x0, "Cobra Core BA"},
+ {HPHW_BA, 0x005, 0x00070, 0x0, "Coral Core BA"},
+ {HPHW_BA, 0x006, 0x00070, 0x0, "Bushmaster Core BA"},
+ {HPHW_BA, 0x007, 0x00070, 0x0, "Scorpio Core BA"},
+ {HPHW_BA, 0x008, 0x00070, 0x0, "Flounder Core BA"},
+ {HPHW_BA, 0x009, 0x00070, 0x0, "Outfield Core BA"},
+ {HPHW_BA, 0x00A, 0x00070, 0x0, "CoralII Core BA"},
+ {HPHW_BA, 0x00B, 0x00070, 0x0, "Scorpio Jr. Core BA"},
+ {HPHW_BA, 0x00C, 0x00070, 0x0, "Strider-50 Core BA"},
+ {HPHW_BA, 0x00D, 0x00070, 0x0, "Strider-33 Core BA"},
+ {HPHW_BA, 0x00E, 0x00070, 0x0, "Trailways-50 Core BA"},
+ {HPHW_BA, 0x00F, 0x00070, 0x0, "Trailways-33 Core BA"},
+ {HPHW_BA, 0x010, 0x00070, 0x0, "Pace Core BA"},
+ {HPHW_BA, 0x011, 0x00070, 0x0, "Sidewinder Core BA"},
+ {HPHW_BA, 0x019, 0x00070, 0x0, "Scorpio Sr. Core BA"},
+ {HPHW_BA, 0x020, 0x00070, 0x0, "Scorpio 100 Core BA"},
+ {HPHW_BA, 0x021, 0x00070, 0x0, "Spectra 50 Core BA"},
+ {HPHW_BA, 0x022, 0x00070, 0x0, "Spectra 75 Core BA"},
+ {HPHW_BA, 0x023, 0x00070, 0x0, "Spectra 100 Core BA"},
+ {HPHW_BA, 0x024, 0x00070, 0x0, "Fast Pace Core BA"},
+ {HPHW_BA, 0x026, 0x00070, 0x0, "CoralII Jaguar Core BA"},
+ {HPHW_BA, 0x004, 0x00076, 0x0, "Cobra EISA BA"},
+ {HPHW_BA, 0x005, 0x00076, 0x0, "Coral EISA BA"},
+ {HPHW_BA, 0x007, 0x00076, 0x0, "Scorpio EISA BA"},
+ {HPHW_BA, 0x00A, 0x00076, 0x0, "CoralII EISA BA"},
+ {HPHW_BA, 0x00B, 0x00076, 0x0, "Scorpio Jr. EISA BA"},
+ {HPHW_BA, 0x00C, 0x00076, 0x0, "Strider-50 Core EISA"},
+ {HPHW_BA, 0x00D, 0x00076, 0x0, "Strider-33 Core EISA"},
+ {HPHW_BA, 0x00E, 0x00076, 0x0, "Trailways-50 Core EISA"},
+ {HPHW_BA, 0x00F, 0x00076, 0x0, "Trailways-33 Core EISA"},
+ {HPHW_BA, 0x010, 0x00076, 0x0, "Pace Core EISA"},
+ {HPHW_BA, 0x019, 0x00076, 0x0, "Scorpio Sr. EISA BA"},
+ {HPHW_BA, 0x020, 0x00076, 0x0, "Scorpio 100 EISA BA"},
+ {HPHW_BA, 0x021, 0x00076, 0x0, "Spectra 50 EISA BA"},
+ {HPHW_BA, 0x022, 0x00076, 0x0, "Spectra 75 EISA BA"},
+ {HPHW_BA, 0x023, 0x00076, 0x0, "Spectra 100 EISA BA"},
+ {HPHW_BA, 0x026, 0x00076, 0x0, "CoralII Jaguar EISA BA"},
+ {HPHW_BA, 0x010, 0x00078, 0x0, "Pace VME BA"},
+ {HPHW_BA, 0x011, 0x00078, 0x0, "Sidewinder VME BA"},
+ {HPHW_BA, 0x01A, 0x00078, 0x0, "Anole 64 VME BA"},
+ {HPHW_BA, 0x01B, 0x00078, 0x0, "Anole 100 VME BA"},
+ {HPHW_BA, 0x024, 0x00078, 0x0, "Fast Pace VME BA"},
+ {HPHW_BA, 0x034, 0x00078, 0x0, "Anole T VME BA"},
+ {HPHW_BA, 0x04A, 0x00078, 0x0, "Anole L2 132 VME BA"},
+ {HPHW_BA, 0x04C, 0x00078, 0x0, "Anole L2 165 VME BA"},
+ {HPHW_BA, 0x011, 0x00081, 0x0, "WB-96 Core BA"},
+ {HPHW_BA, 0x012, 0x00081, 0x0, "Orville UX Core BA"},
+ {HPHW_BA, 0x013, 0x00081, 0x0, "Wilbur UX Core BA"},
+ {HPHW_BA, 0x014, 0x00081, 0x0, "WB-80 Core BA"},
+ {HPHW_BA, 0x015, 0x00081, 0x0, "KittyHawk GSY Core BA"},
+ {HPHW_BA, 0x016, 0x00081, 0x0, "Gecko Core BA"},
+ {HPHW_BA, 0x018, 0x00081, 0x0, "Gecko Optional BA"},
+ {HPHW_BA, 0x01A, 0x00081, 0x0, "Anole 64 Core BA"},
+ {HPHW_BA, 0x01B, 0x00081, 0x0, "Anole 100 Core BA"},
+ {HPHW_BA, 0x01C, 0x00081, 0x0, "Gecko 80 Core BA"},
+ {HPHW_BA, 0x01D, 0x00081, 0x0, "Gecko 100 Core BA"},
+ {HPHW_BA, 0x01F, 0x00081, 0x0, "SkyHawk 100/120 Core BA"},
+ {HPHW_BA, 0x027, 0x00081, 0x0, "Piranha 100 Core BA"},
+ {HPHW_BA, 0x028, 0x00081, 0x0, "Mirage Jr Core BA"},
+ {HPHW_BA, 0x029, 0x00081, 0x0, "Mirage Core BA"},
+ {HPHW_BA, 0x02A, 0x00081, 0x0, "Electra Core BA"},
+ {HPHW_BA, 0x02B, 0x00081, 0x0, "Mirage 80 Core BA"},
+ {HPHW_BA, 0x02C, 0x00081, 0x0, "Mirage 100+ Core BA"},
+ {HPHW_BA, 0x02E, 0x00081, 0x0, "UL 350 Lasi Core BA"},
+ {HPHW_BA, 0x02F, 0x00081, 0x0, "UL 550 Lasi Core BA"},
+ {HPHW_BA, 0x032, 0x00081, 0x0, "Raven T' Core BA"},
+ {HPHW_BA, 0x033, 0x00081, 0x0, "Anole T Core BA"},
+ {HPHW_BA, 0x034, 0x00081, 0x0, "SAIC L-80 Core BA"},
+ {HPHW_BA, 0x035, 0x00081, 0x0, "PCX-L2 712/132 Core BA"},
+ {HPHW_BA, 0x036, 0x00081, 0x0, "PCX-L2 712/160 Core BA"},
+ {HPHW_BA, 0x03B, 0x00081, 0x0, "Raven U/L2 Core BA"},
+ {HPHW_BA, 0x03C, 0x00081, 0x0, "Merlin 132 Core BA"},
+ {HPHW_BA, 0x03D, 0x00081, 0x0, "Merlin 160 Core BA"},
+ {HPHW_BA, 0x03E, 0x00081, 0x0, "Merlin+ 132 Core BA"},
+ {HPHW_BA, 0x03F, 0x00081, 0x0, "Merlin+ 180 Core BA"},
+ {HPHW_BA, 0x044, 0x00081, 0x0, "Mohawk Core BA"},
+ {HPHW_BA, 0x045, 0x00081, 0x0, "Rocky1 Core BA"},
+ {HPHW_BA, 0x046, 0x00081, 0x0, "Rocky2 120 Core BA"},
+ {HPHW_BA, 0x047, 0x00081, 0x0, "Rocky2 150 Core BA"},
+ {HPHW_BA, 0x04B, 0x00081, 0x0, "Anole L2 132 Core BA"},
+ {HPHW_BA, 0x04D, 0x00081, 0x0, "Anole L2 165 Core BA"},
+ {HPHW_BA, 0x04E, 0x00081, 0x0, "Kiji L2 132 Core BA"},
+ {HPHW_BA, 0x050, 0x00081, 0x0, "Merlin Jr 132 Core BA"},
+ {HPHW_BA, 0x051, 0x00081, 0x0, "Firehawk Core BA"},
+ {HPHW_BA, 0x056, 0x00081, 0x0, "Raven+ w SE FWSCSI Core BA"},
+ {HPHW_BA, 0x057, 0x00081, 0x0, "Raven+ w Diff FWSCSI Core BA"},
+ {HPHW_BA, 0x058, 0x00081, 0x0, "FireHawk 200 Core BA"},
+ {HPHW_BA, 0x05C, 0x00081, 0x0, "SummitHawk 230 Core BA"},
+ {HPHW_BA, 0x05E, 0x00081, 0x0, "Staccato 132 Core BA"},
+ {HPHW_BA, 0x05E, 0x00081, 0x0, "Staccato 180 Core BA"},
+ {HPHW_BA, 0x05F, 0x00081, 0x0, "Staccato 180 Lasi"},
+ {HPHW_BA, 0x800, 0x00081, 0x0, "Hitachi Tiny 64 Core BA"},
+ {HPHW_BA, 0x801, 0x00081, 0x0, "Hitachi Tiny 80 Core BA"},
+ {HPHW_BA, 0x004, 0x0008B, 0x0, "Anole Optional PCMCIA BA"},
+ {HPHW_BA, 0x004, 0x0008E, 0x0, "GSC ITR Wax BA"},
+ {HPHW_BA, 0x00C, 0x0008E, 0x0, "Gecko Optional Wax BA"},
+ {HPHW_BA, 0x010, 0x0008E, 0x0, "Pace Wax BA"},
+ {HPHW_BA, 0x011, 0x0008E, 0x0, "SuperPace Wax BA"},
+ {HPHW_BA, 0x012, 0x0008E, 0x0, "Mirage Jr Wax BA"},
+ {HPHW_BA, 0x013, 0x0008E, 0x0, "Mirage Wax BA"},
+ {HPHW_BA, 0x014, 0x0008E, 0x0, "Electra Wax BA"},
+ {HPHW_BA, 0x017, 0x0008E, 0x0, "Raven Backplane Wax BA"},
+ {HPHW_BA, 0x01E, 0x0008E, 0x0, "Raven T' Wax BA"},
+ {HPHW_BA, 0x01F, 0x0008E, 0x0, "SkyHawk Wax BA"},
+ {HPHW_BA, 0x023, 0x0008E, 0x0, "Rocky1 Wax BA"},
+ {HPHW_BA, 0x02B, 0x0008E, 0x0, "Mirage 80 Wax BA"},
+ {HPHW_BA, 0x02C, 0x0008E, 0x0, "Mirage 100+ Wax BA"},
+ {HPHW_BA, 0x030, 0x0008E, 0x0, "UL 350 Core Wax BA"},
+ {HPHW_BA, 0x031, 0x0008E, 0x0, "UL 550 Core Wax BA"},
+ {HPHW_BA, 0x034, 0x0008E, 0x0, "SAIC L-80 Wax BA"},
+ {HPHW_BA, 0x03A, 0x0008E, 0x0, "Merlin+ Wax BA"},
+ {HPHW_BA, 0x040, 0x0008E, 0x0, "Merlin 132 Wax BA"},
+ {HPHW_BA, 0x041, 0x0008E, 0x0, "Merlin 160 Wax BA"},
+ {HPHW_BA, 0x043, 0x0008E, 0x0, "Merlin 132/160 Wax BA"},
+ {HPHW_BA, 0x052, 0x0008E, 0x0, "Raven+ Hi Power Backplane w/EISA Wax BA"},
+ {HPHW_BA, 0x054, 0x0008E, 0x0, "Raven+ Lo Power Backplane w/EISA Wax BA"},
+ {HPHW_BA, 0x059, 0x0008E, 0x0, "FireHawk 200 Wax BA"},
+ {HPHW_BA, 0x05A, 0x0008E, 0x0, "Raven+ L2 Backplane w/EISA Wax BA"},
+ {HPHW_BA, 0x05D, 0x0008E, 0x0, "SummitHawk Wax BA"},
+ {HPHW_BA, 0x800, 0x0008E, 0x0, "Hitachi Tiny 64 Wax BA"},
+ {HPHW_BA, 0x801, 0x0008E, 0x0, "Hitachi Tiny 80 Wax BA"},
+ {HPHW_BA, 0x011, 0x00090, 0x0, "SuperPace Wax EISA BA"},
+ {HPHW_BA, 0x017, 0x00090, 0x0, "Raven Backplane Wax EISA BA"},
+ {HPHW_BA, 0x01E, 0x00090, 0x0, "Raven T' Wax EISA BA"},
+ {HPHW_BA, 0x01F, 0x00090, 0x0, "SkyHawk 100/120 Wax EISA BA"},
+ {HPHW_BA, 0x027, 0x00090, 0x0, "Piranha 100 Wax EISA BA"},
+ {HPHW_BA, 0x028, 0x00090, 0x0, "Mirage Jr Wax EISA BA"},
+ {HPHW_BA, 0x029, 0x00090, 0x0, "Mirage Wax EISA BA"},
+ {HPHW_BA, 0x02A, 0x00090, 0x0, "Electra Wax EISA BA"},
+ {HPHW_BA, 0x02B, 0x00090, 0x0, "Mirage 80 Wax EISA BA"},
+ {HPHW_BA, 0x02C, 0x00090, 0x0, "Mirage 100+ Wax EISA BA"},
+ {HPHW_BA, 0x030, 0x00090, 0x0, "UL 350 Wax EISA BA"},
+ {HPHW_BA, 0x031, 0x00090, 0x0, "UL 550 Wax EISA BA"},
+ {HPHW_BA, 0x034, 0x00090, 0x0, "SAIC L-80 Wax EISA BA"},
+ {HPHW_BA, 0x03A, 0x00090, 0x0, "Merlin+ Wax EISA BA"},
+ {HPHW_BA, 0x040, 0x00090, 0x0, "Merlin 132 Wax EISA BA"},
+ {HPHW_BA, 0x041, 0x00090, 0x0, "Merlin 160 Wax EISA BA"},
+ {HPHW_BA, 0x043, 0x00090, 0x0, "Merlin 132/160 Wax EISA BA"},
+ {HPHW_BA, 0x052, 0x00090, 0x0, "Raven Hi Power Backplane Wax EISA BA"},
+ {HPHW_BA, 0x054, 0x00090, 0x0, "Raven Lo Power Backplane Wax EISA BA"},
+ {HPHW_BA, 0x059, 0x00090, 0x0, "FireHawk 200 Wax EISA BA"},
+ {HPHW_BA, 0x05A, 0x00090, 0x0, "Raven L2 Backplane Wax EISA BA"},
+ {HPHW_BA, 0x05D, 0x00090, 0x0, "SummitHawk Wax EISA BA"},
+ {HPHW_BA, 0x800, 0x00090, 0x0, "Hitachi Tiny 64 Wax EISA BA"},
+ {HPHW_BA, 0x801, 0x00090, 0x0, "Hitachi Tiny 80 Wax EISA BA"},
+ {HPHW_BA, 0x01A, 0x00093, 0x0, "Anole 64 TIMI BA"},
+ {HPHW_BA, 0x01B, 0x00093, 0x0, "Anole 100 TIMI BA"},
+ {HPHW_BA, 0x034, 0x00093, 0x0, "Anole T TIMI BA"},
+ {HPHW_BA, 0x04A, 0x00093, 0x0, "Anole L2 132 TIMI BA"},
+ {HPHW_BA, 0x04C, 0x00093, 0x0, "Anole L2 165 TIMI BA"},
+ {HPHW_BA, 0x582, 0x000A5, 0x00, "Epic PCI Bridge"},
+ {HPHW_BCPORT, 0x504, 0x00000, 0x00, "Phantom PseudoBC GSC+ Port"},
+ {HPHW_BCPORT, 0x505, 0x00000, 0x00, "Phantom PseudoBC GSC+ Port"},
+ {HPHW_BCPORT, 0x503, 0x0000C, 0x00, "Java BC GSC+ Port"},
+ {HPHW_BCPORT, 0x57F, 0x0000C, 0x00, "Hitachi Ghostview GSC+ Port"},
+ {HPHW_BCPORT, 0x501, 0x0000C, 0x00, "U2-IOA BC GSC+ Port"},
+ {HPHW_BCPORT, 0x502, 0x0000C, 0x00, "Uturn-IOA BC GSC+ Port"},
+ {HPHW_BCPORT, 0x780, 0x0000C, 0x00, "Astro BC Ropes Port"},
+ {HPHW_BCPORT, 0x506, 0x0000C, 0x00, "NEC-IOS BC HSC Port"},
+ {HPHW_BCPORT, 0x004, 0x0000C, 0x00, "Cheetah BC SMB Port"},
+ {HPHW_BCPORT, 0x006, 0x0000C, 0x00, "Cheetah BC MID_BUS Port"},
+ {HPHW_BCPORT, 0x005, 0x0000C, 0x00, "Condor BC MID_BUS Port"},
+ {HPHW_BCPORT, 0x100, 0x0000C, 0x00, "Condor BC HP-PB Port"},
+ {HPHW_BCPORT, 0x184, 0x0000C, 0x00, "Summit BC Port"},
+ {HPHW_BCPORT, 0x101, 0x0000C, 0x00, "Summit BC HP-PB Port"},
+ {HPHW_BCPORT, 0x102, 0x0000C, 0x00, "HP-PB Port (prefetch)"},
+ {HPHW_BCPORT, 0x500, 0x0000C, 0x00, "Gecko BOA BC GSC+ Port"},
+ {HPHW_BCPORT, 0x103, 0x0000C, 0x00, "Gecko BOA BC HP-PB Port"},
+ {HPHW_BCPORT, 0x507, 0x0000C, 0x00, "Keyaki BC GSC+ Port"},
+ {HPHW_BCPORT, 0x508, 0x0000C, 0x00, "Keyaki-DX BC GSC+ Port"},
+ {HPHW_BCPORT, 0x584, 0x0000C, 0x10, "DEW BC Runway Port"},
+ {HPHW_BCPORT, 0x800, 0x0000C, 0x10, "DEW BC Merced Port"},
+ {HPHW_BCPORT, 0x801, 0x0000C, 0x10, "SMC Bus Interface Merced Bus0"},
+ {HPHW_BCPORT, 0x802, 0x0000C, 0x10, "SMC Bus INterface Merced Bus1"},
+ {HPHW_BCPORT, 0x803, 0x0000C, 0x10, "IKE I/O BC Merced Port"},
+ {HPHW_BCPORT, 0x781, 0x0000C, 0x00, "IKE I/O BC Ropes Port"},
+ {HPHW_BCPORT, 0x804, 0x0000C, 0x10, "REO I/O BC Merced Port"},
+ {HPHW_BCPORT, 0x782, 0x0000C, 0x00, "REO I/O BC Ropes Port"},
+ {HPHW_BCPORT, 0x784, 0x0000C, 0x00, "Pluto I/O BC Ropes Port"},
+ {HPHW_BRIDGE, 0x680, 0x0000A, 0x00, "Dino PCI Bridge"},
+ {HPHW_BRIDGE, 0x682, 0x0000A, 0x00, "Cujo PCI Bridge"},
+ {HPHW_BRIDGE, 0x782, 0x0000A, 0x00, "Elroy PCI Bridge"},
+ {HPHW_BRIDGE, 0x583, 0x000A5, 0x00, "Saga PCI Bridge"},
+ {HPHW_BRIDGE, 0x783, 0x0000A, 0x00, "Mercury PCI Bridge"},
+ {HPHW_BRIDGE, 0x784, 0x0000A, 0x00, "Quicksilver AGP Bridge"},
+ {HPHW_B_DMA, 0x004, 0x00018, 0x00, "Parallel I/O"},
+ {HPHW_B_DMA, 0x004, 0x00019, 0x00, "Parallel RDB"},
+ {HPHW_B_DMA, 0x004, 0x00020, 0x80, "MID_BUS PSI"},
+ {HPHW_B_DMA, 0x004, 0x0002F, 0x80, "HP-PB Transit PSI (36960A)"},
+ {HPHW_B_DMA, 0x008, 0x00051, 0x80, "HP-PB Transit 802.3"},
+ {HPHW_B_DMA, 0x004, 0x00052, 0x80, "Miura LAN/Console (J2146A)"},
+ {HPHW_B_DMA, 0x008, 0x00058, 0x80, "HP-PB Transit 802.4"},
+ {HPHW_B_DMA, 0x005, 0x00060, 0x80, "KittyHawk CSY Core LAN/Console"},
+ {HPHW_B_DMA, 0x014, 0x00060, 0x80, "Diablo LAN/Console"},
+ {HPHW_B_DMA, 0x054, 0x00060, 0x80, "Countach LAN/Console"},
+ {HPHW_B_DMA, 0x004, 0x00094, 0x80, "KittyHawk GSC+ Exerciser"},
+ {HPHW_B_DMA, 0x004, 0x00100, 0x80, "HP-PB HF Interface"},
+ {HPHW_B_DMA, 0x000, 0x00206, 0x80, "MELCO HMPHA"},
+ {HPHW_B_DMA, 0x005, 0x00206, 0x80, "MELCO HMPHA_10"},
+ {HPHW_B_DMA, 0x006, 0x00206, 0x80, "MELCO HMQHA"},
+ {HPHW_B_DMA, 0x007, 0x00206, 0x80, "MELCO HMQHA_10"},
+ {HPHW_B_DMA, 0x004, 0x207, 0x80, "MELCO HNDWA MDWS-70"},
+ {HPHW_CIO, 0x004, 0x00010, 0x00, "VLSI CIO"},
+ {HPHW_CIO, 0x005, 0x00010, 0x00, "Silverfox CIO"},
+ {HPHW_CIO, 0x006, 0x00010, 0x00, "Emerald CIO"},
+ {HPHW_CIO, 0x008, 0x00010, 0x00, "Discrete CIO"},
+ {HPHW_CONSOLE, 0x004, 0x0001C, 0x00, "Cheetah console"},
+ {HPHW_CONSOLE, 0x005, 0x0001C, 0x00, "Emerald console"},
+ {HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"},
+ {HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"},
+ {HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"},
+ {HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"},
+ {HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"},
+ {HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"},
+ {HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"},
+ {HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"},
+ {HPHW_FIO, 0x004, 0x00071, 0x0, "Cobra Core SCSI"},
+ {HPHW_FIO, 0x005, 0x00071, 0x0, "Coral Core SCSI"},
+ {HPHW_FIO, 0x006, 0x00071, 0x0, "Bushmaster Core SCSI"},
+ {HPHW_FIO, 0x007, 0x00071, 0x0, "Scorpio Core SCSI"},
+ {HPHW_FIO, 0x008, 0x00071, 0x0, "Flounder Core SCSI"},
+ {HPHW_FIO, 0x009, 0x00071, 0x0, "Outfield Core SCSI"},
+ {HPHW_FIO, 0x00A, 0x00071, 0x0, "CoralII Core SCSI"},
+ {HPHW_FIO, 0x00B, 0x00071, 0x0, "Scorpio Jr. Core SCSI"},
+ {HPHW_FIO, 0x00C, 0x00071, 0x0, "Strider-50 Core SCSI"},
+ {HPHW_FIO, 0x00D, 0x00071, 0x0, "Strider-33 Core SCSI"},
+ {HPHW_FIO, 0x00E, 0x00071, 0x0, "Trailways-50 Core SCSI"},
+ {HPHW_FIO, 0x00F, 0x00071, 0x0, "Trailways-33 Core SCSI"},
+ {HPHW_FIO, 0x010, 0x00071, 0x0, "Pace Core SCSI"},
+ {HPHW_FIO, 0x011, 0x00071, 0x0, "Sidewinder Core SCSI"},
+ {HPHW_FIO, 0x019, 0x00071, 0x0, "Scorpio Sr. Core SCSI"},
+ {HPHW_FIO, 0x020, 0x00071, 0x0, "Scorpio 100 Core SCSI"},
+ {HPHW_FIO, 0x021, 0x00071, 0x0, "Spectra 50 Core SCSI"},
+ {HPHW_FIO, 0x022, 0x00071, 0x0, "Spectra 75 Core SCSI"},
+ {HPHW_FIO, 0x023, 0x00071, 0x0, "Spectra 100 Core SCSI"},
+ {HPHW_FIO, 0x024, 0x00071, 0x0, "Fast Pace Core SCSI"},
+ {HPHW_FIO, 0x026, 0x00071, 0x0, "CoralII Jaguar Core SCSI"},
+ {HPHW_FIO, 0x004, 0x00072, 0x0, "Cobra Core LAN (802.3)"},
+ {HPHW_FIO, 0x005, 0x00072, 0x0, "Coral Core LAN (802.3)"},
+ {HPHW_FIO, 0x006, 0x00072, 0x0, "Bushmaster Core LAN (802.3)"},
+ {HPHW_FIO, 0x007, 0x00072, 0x0, "Scorpio Core LAN (802.3)"},
+ {HPHW_FIO, 0x008, 0x00072, 0x0, "Flounder Core LAN (802.3)"},
+ {HPHW_FIO, 0x009, 0x00072, 0x0, "Outfield Core LAN (802.3)"},
+ {HPHW_FIO, 0x00A, 0x00072, 0x0, "CoralII Core LAN (802.3)"},
+ {HPHW_FIO, 0x00B, 0x00072, 0x0, "Scorpio Jr. Core LAN (802.3)"},
+ {HPHW_FIO, 0x00C, 0x00072, 0x0, "Strider-50 Core LAN (802.3)"},
+ {HPHW_FIO, 0x00D, 0x00072, 0x0, "Strider-33 Core LAN (802.3)"},
+ {HPHW_FIO, 0x00E, 0x00072, 0x0, "Trailways-50 Core LAN (802.3)"},
+ {HPHW_FIO, 0x00F, 0x00072, 0x0, "Trailways-33 Core LAN (802.3)"},
+ {HPHW_FIO, 0x010, 0x00072, 0x0, "Pace Core LAN (802.3)"},
+ {HPHW_FIO, 0x011, 0x00072, 0x0, "Sidewinder Core LAN (802.3)"},
+ {HPHW_FIO, 0x019, 0x00072, 0x0, "Scorpio Sr. Core LAN (802.3)"},
+ {HPHW_FIO, 0x020, 0x00072, 0x0, "Scorpio 100 Core LAN (802.3)"},
+ {HPHW_FIO, 0x021, 0x00072, 0x0, "Spectra 50 Core LAN (802.3)"},
+ {HPHW_FIO, 0x022, 0x00072, 0x0, "Spectra 75 Core LAN (802.3)"},
+ {HPHW_FIO, 0x023, 0x00072, 0x0, "Spectra 100 Core LAN (802.3)"},
+ {HPHW_FIO, 0x024, 0x00072, 0x0, "Fast Pace Core LAN (802.3)"},
+ {HPHW_FIO, 0x026, 0x00072, 0x0, "CoralII Jaguar Core LAN (802.3)"},
+ {HPHW_FIO, 0x004, 0x00073, 0x0, "Cobra Core HIL"},
+ {HPHW_FIO, 0x005, 0x00073, 0x0, "Coral Core HIL"},
+ {HPHW_FIO, 0x006, 0x00073, 0x0, "Bushmaster Core HIL"},
+ {HPHW_FIO, 0x007, 0x00073, 0x0, "Scorpio Core HIL"},
+ {HPHW_FIO, 0x008, 0x00073, 0x0, "Flounder Core HIL"},
+ {HPHW_FIO, 0x009, 0x00073, 0x0, "Outfield Core HIL"},
+ {HPHW_FIO, 0x00A, 0x00073, 0x0, "CoralII Core HIL"},
+ {HPHW_FIO, 0x00B, 0x00073, 0x0, "Scorpio Jr. Core HIL"},
+ {HPHW_FIO, 0x00C, 0x00073, 0x0, "Strider-50 Core HIL"},
+ {HPHW_FIO, 0x00D, 0x00073, 0x0, "Strider-33 Core HIL"},
+ {HPHW_FIO, 0x00E, 0x00073, 0x0, "Trailways-50 Core HIL"},
+ {HPHW_FIO, 0x00F, 0x00073, 0x0, "Trailways-33 Core HIL"},
+ {HPHW_FIO, 0x010, 0x00073, 0x0, "Pace Core HIL"},
+ {HPHW_FIO, 0x011, 0x00073, 0xcc, "SuperPace Wax HIL"},
+ {HPHW_FIO, 0x012, 0x00073, 0x0, "Mirage Jr Wax HIL"},
+ {HPHW_FIO, 0x013, 0x00073, 0x0, "Mirage 100 Wax HIL"},
+ {HPHW_FIO, 0x014, 0x00073, 0x0, "Electra Wax HIL"},
+ {HPHW_FIO, 0x017, 0x00073, 0x0, "Raven Backplane Wax HIL"},
+ {HPHW_FIO, 0x019, 0x00073, 0x0, "Scorpio Sr. Core HIL"},
+ {HPHW_FIO, 0x01E, 0x00073, 0x0, "Raven T' Wax HIL"},
+ {HPHW_FIO, 0x01F, 0x00073, 0x0, "SkyHawk 100/120 Wax HIL"},
+ {HPHW_FIO, 0x020, 0x00073, 0x0, "Scorpio 100 Core HIL"},
+ {HPHW_FIO, 0x021, 0x00073, 0x0, "Spectra 50 Core HIL"},
+ {HPHW_FIO, 0x022, 0x00073, 0x0, "Spectra 75 Core HIL"},
+ {HPHW_FIO, 0x023, 0x00073, 0x0, "Spectra 100 Core HIL"},
+ {HPHW_FIO, 0x024, 0x00073, 0x0, "Fast Pace Core HIL"},
+ {HPHW_FIO, 0x026, 0x00073, 0x0, "CoralII Jaguar Core HIL"},
+ {HPHW_FIO, 0x02B, 0x00073, 0x0, "Mirage 80 Wax HIL"},
+ {HPHW_FIO, 0x02C, 0x00073, 0x0, "Mirage 100+ Wax HIL"},
+ {HPHW_FIO, 0x03A, 0x00073, 0x0, "Merlin+ Wax HIL"},
+ {HPHW_FIO, 0x040, 0x00073, 0x0, "Merlin 132 Wax HIL"},
+ {HPHW_FIO, 0x041, 0x00073, 0x0, "Merlin 160 Wax HIL"},
+ {HPHW_FIO, 0x043, 0x00073, 0x0, "Merlin 132/160 Wax HIL"},
+ {HPHW_FIO, 0x052, 0x00073, 0x0, "Raven+ Hi Power Backplane w/EISA Wax HIL"},
+ {HPHW_FIO, 0x053, 0x00073, 0x0, "Raven+ Hi Power Backplane wo/EISA Wax HIL"},
+ {HPHW_FIO, 0x054, 0x00073, 0x0, "Raven+ Lo Power Backplane w/EISA Wax HIL"},
+ {HPHW_FIO, 0x055, 0x00073, 0x0, "Raven+ Lo Power Backplane wo/EISA Wax HIL"},
+ {HPHW_FIO, 0x059, 0x00073, 0x0, "FireHawk 200 Wax HIL"},
+ {HPHW_FIO, 0x05A, 0x00073, 0x0, "Raven+ L2 Backplane w/EISA Wax HIL"},
+ {HPHW_FIO, 0x05B, 0x00073, 0x0, "Raven+ L2 Backplane wo/EISA Wax HIL"},
+ {HPHW_FIO, 0x05D, 0x00073, 0x0, "SummitHawk Wax HIL"},
+ {HPHW_FIO, 0x800, 0x00073, 0x0, "Hitachi Tiny 64 Wax HIL"},
+ {HPHW_FIO, 0x801, 0x00073, 0x0, "Hitachi Tiny 80 Wax HIL"},
+ {HPHW_FIO, 0x004, 0x00074, 0x0, "Cobra Core Centronics"},
+ {HPHW_FIO, 0x005, 0x00074, 0x0, "Coral Core Centronics"},
+ {HPHW_FIO, 0x006, 0x00074, 0x0, "Bushmaster Core Centronics"},
+ {HPHW_FIO, 0x007, 0x00074, 0x0, "Scorpio Core Centronics"},
+ {HPHW_FIO, 0x008, 0x00074, 0x0, "Flounder Core Centronics"},
+ {HPHW_FIO, 0x009, 0x00074, 0x0, "Outfield Core Centronics"},
+ {HPHW_FIO, 0x00A, 0x00074, 0x0, "CoralII Core Centronics"},
+ {HPHW_FIO, 0x00B, 0x00074, 0x0, "Scorpio Jr. Core Centronics"},
+ {HPHW_FIO, 0x00C, 0x00074, 0x0, "Strider-50 Core Centronics"},
+ {HPHW_FIO, 0x00D, 0x00074, 0x0, "Strider-33 Core Centronics"},
+ {HPHW_FIO, 0x00E, 0x00074, 0x0, "Trailways-50 Core Centronics"},
+ {HPHW_FIO, 0x00F, 0x00074, 0x0, "Trailways-33 Core Centronics"},
+ {HPHW_FIO, 0x010, 0x00074, 0x0, "Pace Core Centronics"},
+ {HPHW_FIO, 0x011, 0x00074, 0x0, "Sidewinder Core Centronics"},
+ {HPHW_FIO, 0x015, 0x00074, 0x0, "KittyHawk GSY Core Centronics"},
+ {HPHW_FIO, 0x016, 0x00074, 0x0, "Gecko Core Centronics"},
+ {HPHW_FIO, 0x019, 0x00074, 0x0, "Scorpio Sr. Core Centronics"},
+ {HPHW_FIO, 0x01A, 0x00074, 0x0, "Anole 64 Core Centronics"},
+ {HPHW_FIO, 0x01B, 0x00074, 0x0, "Anole 100 Core Centronics"},
+ {HPHW_FIO, 0x01C, 0x00074, 0x0, "Gecko 80 Core Centronics"},
+ {HPHW_FIO, 0x01D, 0x00074, 0x0, "Gecko 100 Core Centronics"},
+ {HPHW_FIO, 0x01F, 0x00074, 0x0, "SkyHawk 100/120 Core Centronics"},
+ {HPHW_FIO, 0x020, 0x00074, 0x0, "Scorpio 100 Core Centronics"},
+ {HPHW_FIO, 0x021, 0x00074, 0x0, "Spectra 50 Core Centronics"},
+ {HPHW_FIO, 0x022, 0x00074, 0x0, "Spectra 75 Core Centronics"},
+ {HPHW_FIO, 0x023, 0x00074, 0x0, "Spectra 100 Core Centronics"},
+ {HPHW_FIO, 0x024, 0x00074, 0x0, "Fast Pace Core Centronics"},
+ {HPHW_FIO, 0x026, 0x00074, 0x0, "CoralII Jaguar Core Centronics"},
+ {HPHW_FIO, 0x027, 0x00074, 0x0, "Piranha 100 Core Centronics"},
+ {HPHW_FIO, 0x028, 0x00074, 0x0, "Mirage Jr Core Centronics"},
+ {HPHW_FIO, 0x029, 0x00074, 0x0, "Mirage Core Centronics"},
+ {HPHW_FIO, 0x02A, 0x00074, 0x0, "Electra Core Centronics"},
+ {HPHW_FIO, 0x02B, 0x00074, 0x0, "Mirage 80 Core Centronics"},
+ {HPHW_FIO, 0x02C, 0x00074, 0x0, "Mirage 100+ Core Centronics"},
+ {HPHW_FIO, 0x02E, 0x00074, 0x0, "UL 350 Core Centronics"},
+ {HPHW_FIO, 0x02F, 0x00074, 0x0, "UL 550 Core Centronics"},
+ {HPHW_FIO, 0x032, 0x00074, 0x0, "Raven T' Core Centronics"},
+ {HPHW_FIO, 0x033, 0x00074, 0x0, "Anole T Core Centronics"},
+ {HPHW_FIO, 0x034, 0x00074, 0x0, "SAIC L-80 Core Centronics"},
+ {HPHW_FIO, 0x035, 0x00074, 0x0, "PCX-L2 712/132 Core Centronics"},
+ {HPHW_FIO, 0x036, 0x00074, 0x0, "PCX-L2 712/160 Core Centronics"},
+ {HPHW_FIO, 0x03B, 0x00074, 0x0, "Raven U/L2 Core Centronics"},
+ {HPHW_FIO, 0x03C, 0x00074, 0x0, "Merlin 132 Core Centronics"},
+ {HPHW_FIO, 0x03D, 0x00074, 0x0, "Merlin 160 Core Centronics"},
+ {HPHW_FIO, 0x03E, 0x00074, 0x0, "Merlin+ 132 Core Centronics"},
+ {HPHW_FIO, 0x03F, 0x00074, 0x0, "Merlin+ 180 Core Centronics"},
+ {HPHW_FIO, 0x044, 0x00074, 0x0, "Mohawk Core Centronics"},
+ {HPHW_FIO, 0x045, 0x00074, 0x0, "Rocky1 Core Centronics"},
+ {HPHW_FIO, 0x046, 0x00074, 0x0, "Rocky2 120 Core Centronics"},
+ {HPHW_FIO, 0x047, 0x00074, 0x0, "Rocky2 150 Core Centronics"},
+ {HPHW_FIO, 0x04B, 0x00074, 0x0, "Anole L2 132 Core Centronics"},
+ {HPHW_FIO, 0x04D, 0x00074, 0x0, "Anole L2 165 Core Centronics"},
+ {HPHW_FIO, 0x050, 0x00074, 0x0, "Merlin Jr 132 Core Centronics"},
+ {HPHW_FIO, 0x051, 0x00074, 0x0, "Firehawk Core Centronics"},
+ {HPHW_FIO, 0x056, 0x00074, 0x0, "Raven+ w SE FWSCSI Core Centronics"},
+ {HPHW_FIO, 0x057, 0x00074, 0x0, "Raven+ w Diff FWSCSI Core Centronics"},
+ {HPHW_FIO, 0x058, 0x00074, 0x0, "FireHawk 200 Core Centronics"},
+ {HPHW_FIO, 0x05C, 0x00074, 0x0, "SummitHawk 230 Core Centronics"},
+ {HPHW_FIO, 0x800, 0x00074, 0x0, "Hitachi Tiny 64 Core Centronics"},
+ {HPHW_FIO, 0x801, 0x00074, 0x0, "Hitachi Tiny 80 Core Centronics"},
+ {HPHW_FIO, 0x004, 0x00075, 0x0, "Cobra Core RS-232"},
+ {HPHW_FIO, 0x005, 0x00075, 0x0, "Coral Core RS-232"},
+ {HPHW_FIO, 0x006, 0x00075, 0x0, "Bushmaster Core RS-232"},
+ {HPHW_FIO, 0x007, 0x00075, 0x0, "Scorpio Core RS-232"},
+ {HPHW_FIO, 0x008, 0x00075, 0x0, "Flounder Core RS-232"},
+ {HPHW_FIO, 0x009, 0x00075, 0x0, "Outfield Core RS-232"},
+ {HPHW_FIO, 0x00A, 0x00075, 0x0, "CoralII Core RS-232"},
+ {HPHW_FIO, 0x00B, 0x00075, 0x0, "Scorpio Jr. Core RS-232"},
+ {HPHW_FIO, 0x00C, 0x00075, 0x0, "Strider-50 Core RS-232"},
+ {HPHW_FIO, 0x00D, 0x00075, 0x0, "Strider-33 Core RS-232"},
+ {HPHW_FIO, 0x00E, 0x00075, 0x0, "Trailways-50 Core RS-232"},
+ {HPHW_FIO, 0x00F, 0x00075, 0x0, "Trailways-33 Core RS-232"},
+ {HPHW_FIO, 0x010, 0x00075, 0x0, "Pace Core RS-232"},
+ {HPHW_FIO, 0x011, 0x00075, 0x0, "Sidewinder Core RS-232"},
+ {HPHW_FIO, 0x019, 0x00075, 0x0, "Scorpio Sr. Core RS-232"},
+ {HPHW_FIO, 0x020, 0x00075, 0x0, "Scorpio 100 Core RS-232"},
+ {HPHW_FIO, 0x021, 0x00075, 0x0, "Spectra 50 Core RS-232"},
+ {HPHW_FIO, 0x022, 0x00075, 0x0, "Spectra 75 Core RS-232"},
+ {HPHW_FIO, 0x023, 0x00075, 0x0, "Spectra 100 Core RS-232"},
+ {HPHW_FIO, 0x024, 0x00075, 0x0, "Fast Pace Core RS-232"},
+ {HPHW_FIO, 0x026, 0x00075, 0x0, "CoralII Jaguar Core RS-232"},
+ {HPHW_FIO, 0x004, 0x00077, 0x0, "Coral SGC Graphics"},
+ {HPHW_FIO, 0x005, 0x00077, 0x0, "Hyperdrive Optional Graphics"},
+ {HPHW_FIO, 0x006, 0x00077, 0x0, "Stinger Optional Graphics"},
+ {HPHW_FIO, 0x007, 0x00077, 0x0, "Scorpio Builtin Graphics"},
+ {HPHW_FIO, 0x008, 0x00077, 0x0, "Anole Hyperdrive Optional Graphics"},
+ {HPHW_FIO, 0x009, 0x00077, 0x0, "Thunder II graphics EISA form"},
+ {HPHW_FIO, 0x00A, 0x00077, 0x0, "Thunder II graphics GSA form"},
+ {HPHW_FIO, 0x00B, 0x00077, 0x0, "Scorpio Jr Builtin Graphics"},
+ {HPHW_FIO, 0x00C, 0x00077, 0x0, "Strider-50 SSC Graphics"},
+ {HPHW_FIO, 0x00D, 0x00077, 0x0, "Strider-33 SSC Graphics"},
+ {HPHW_FIO, 0x00E, 0x00077, 0x0, "Trailways-50 SSC Graphics"},
+ {HPHW_FIO, 0x00F, 0x00077, 0x0, "Trailways-33 SSC Graphics"},
+ {HPHW_FIO, 0x010, 0x00077, 0x0, "Pace SGC Graphics"},
+ {HPHW_FIO, 0x011, 0x00077, 0x0, "Mohawk Opt. 2D Graphics (Kid)"},
+ {HPHW_FIO, 0x012, 0x00077, 0x0, "Raven Opt. 2D Graphics (Goat)"},
+ {HPHW_FIO, 0x016, 0x00077, 0x0, "Lego 24 SCG Graphics"},
+ {HPHW_FIO, 0x017, 0x00077, 0x0, "Lego 24Z SCG Graphics"},
+ {HPHW_FIO, 0x018, 0x00077, 0x0, "Lego 48Z SCG Graphics"},
+ {HPHW_FIO, 0x019, 0x00077, 0x0, "Scorpio Sr Builtin Graphics"},
+ {HPHW_FIO, 0x020, 0x00077, 0x0, "Scorpio 100 Builtin Graphics"},
+ {HPHW_FIO, 0x021, 0x00077, 0x0, "Spectra 50 Builtin Graphics"},
+ {HPHW_FIO, 0x022, 0x00077, 0x0, "Spectra 75 Builtin Graphics"},
+ {HPHW_FIO, 0x023, 0x00077, 0x0, "Spectra 100 Builtin Graphics"},
+ {HPHW_FIO, 0x024, 0x00077, 0x0, "Fast Pace SGC Graphics"},
+ {HPHW_FIO, 0x006, 0x0007A, 0x0, "Bushmaster Audio"},
+ {HPHW_FIO, 0x008, 0x0007A, 0x0, "Flounder Audio"},
+ {HPHW_FIO, 0x004, 0x0007B, 0x0, "UL Optional Audio"},
+ {HPHW_FIO, 0x007, 0x0007B, 0x0, "Scorpio Audio"},
+ {HPHW_FIO, 0x00B, 0x0007B, 0x0, "Scorpio Jr. Audio"},
+ {HPHW_FIO, 0x00C, 0x0007B, 0x0, "Strider-50 Audio"},
+ {HPHW_FIO, 0x00D, 0x0007B, 0x0, "Strider-33 Audio"},
+ {HPHW_FIO, 0x00E, 0x0007B, 0x0, "Trailways-50 Audio"},
+ {HPHW_FIO, 0x00F, 0x0007B, 0x0, "Trailways-33 Audio"},
+ {HPHW_FIO, 0x015, 0x0007B, 0x0, "KittyHawk GSY Core Audio"},
+ {HPHW_FIO, 0x016, 0x0007B, 0x0, "Gecko Audio"},
+ {HPHW_FIO, 0x019, 0x0007B, 0x0, "Scorpio Sr. Audio"},
+ {HPHW_FIO, 0x01A, 0x0007B, 0x0, "Anole 64 Audio"},
+ {HPHW_FIO, 0x01B, 0x0007B, 0x0, "Anole 100 Audio"},
+ {HPHW_FIO, 0x01C, 0x0007B, 0x0, "Gecko 80 Audio"},
+ {HPHW_FIO, 0x01D, 0x0007B, 0x0, "Gecko 100 Audio"},
+ {HPHW_FIO, 0x01F, 0x0007B, 0x0, "SkyHawk 100/120 Audio"},
+ {HPHW_FIO, 0x020, 0x0007B, 0x0, "Scorpio 100 Audio"},
+ {HPHW_FIO, 0x021, 0x0007B, 0x0, "Spectra 50 Audio"},
+ {HPHW_FIO, 0x022, 0x0007B, 0x0, "Spectra 75 Audio"},
+ {HPHW_FIO, 0x023, 0x0007B, 0x0, "Spectra 100 Audio"},
+ {HPHW_FIO, 0x028, 0x0007B, 0x0, "Mirage Jr Audio"},
+ {HPHW_FIO, 0x029, 0x0007B, 0x0, "Mirage Audio"},
+ {HPHW_FIO, 0x02A, 0x0007B, 0x0, "Electra Audio"},
+ {HPHW_FIO, 0x02B, 0x0007B, 0x0, "Mirage 80 Audio"},
+ {HPHW_FIO, 0x02C, 0x0007B, 0x0, "Mirage 100+ Audio"},
+ {HPHW_FIO, 0x032, 0x0007B, 0x0, "Raven T' Audio"},
+ {HPHW_FIO, 0x034, 0x0007B, 0x0, "SAIC L-80 Audio"},
+ {HPHW_FIO, 0x035, 0x0007B, 0x0, "PCX-L2 712/132 Core Audio"},
+ {HPHW_FIO, 0x036, 0x0007B, 0x0, "PCX-L2 712/160 Core Audio"},
+ {HPHW_FIO, 0x03B, 0x0007B, 0x0, "Raven U/L2 Core Audio"},
+ {HPHW_FIO, 0x03C, 0x0007B, 0x0, "Merlin 132 Core Audio"},
+ {HPHW_FIO, 0x03D, 0x0007B, 0x0, "Merlin 160 Core Audio"},
+ {HPHW_FIO, 0x03E, 0x0007B, 0x0, "Merlin+ 132 Core Audio"},
+ {HPHW_FIO, 0x03F, 0x0007B, 0x0, "Merlin+ 180 Core Audio"},
+ {HPHW_FIO, 0x044, 0x0007B, 0x0, "Mohawk Core Audio"},
+ {HPHW_FIO, 0x046, 0x0007B, 0x0, "Rocky2 120 Core Audio"},
+ {HPHW_FIO, 0x047, 0x0007B, 0x0, "Rocky2 150 Core Audio"},
+ {HPHW_FIO, 0x04B, 0x0007B, 0x0, "Anole L2 132 Core Audio"},
+ {HPHW_FIO, 0x04D, 0x0007B, 0x0, "Anole L2 165 Core Audio"},
+ {HPHW_FIO, 0x04E, 0x0007B, 0x0, "Kiji L2 132 Core Audio"},
+ {HPHW_FIO, 0x050, 0x0007B, 0x0, "Merlin Jr 132 Core Audio"},
+ {HPHW_FIO, 0x051, 0x0007B, 0x0, "Firehawk Audio"},
+ {HPHW_FIO, 0x056, 0x0007B, 0x0, "Raven+ w SE FWSCSI Core Audio"},
+ {HPHW_FIO, 0x057, 0x0007B, 0x0, "Raven+ w Diff FWSCSI Core Audio"},
+ {HPHW_FIO, 0x058, 0x0007B, 0x0, "FireHawk 200 Audio"},
+ {HPHW_FIO, 0x05C, 0x0007B, 0x0, "SummitHawk 230 Core Audio"},
+ {HPHW_FIO, 0x800, 0x0007B, 0x0, "Hitachi Tiny 64 Audio"},
+ {HPHW_FIO, 0x801, 0x0007B, 0x0, "Hitachi Tiny 80 Audio"},
+ {HPHW_FIO, 0x009, 0x0007C, 0x0, "Outfield FW SCSI"},
+ {HPHW_FIO, 0x00A, 0x0007C, 0x0, "CoralII FW SCSI"},
+ {HPHW_FIO, 0x026, 0x0007C, 0x0, "CoralII Jaguar FW SCSI"},
+ {HPHW_FIO, 0x009, 0x0007D, 0x0, "Outfield FDDI"},
+ {HPHW_FIO, 0x00A, 0x0007D, 0x0, "CoralII FDDI"},
+ {HPHW_FIO, 0x026, 0x0007D, 0x0, "CoralII Jaguar FDDI"},
+ {HPHW_FIO, 0x010, 0x0007E, 0x0, "Pace Audio"},
+ {HPHW_FIO, 0x024, 0x0007E, 0x0, "Fast Pace Audio"},
+ {HPHW_FIO, 0x009, 0x0007F, 0x0, "Outfield Audio"},
+ {HPHW_FIO, 0x00A, 0x0007F, 0x0, "CoralII Audio"},
+ {HPHW_FIO, 0x026, 0x0007F, 0x0, "CoralII Jaguar Audio"},
+ {HPHW_FIO, 0x010, 0x00080, 0x0, "Pace Core HPIB"},
+ {HPHW_FIO, 0x024, 0x00080, 0x0, "Fast Pace Core HPIB"},
+ {HPHW_FIO, 0x015, 0x00082, 0x0, "KittyHawk GSY Core SCSI"},
+ {HPHW_FIO, 0x016, 0x00082, 0x0, "Gecko Core SCSI"},
+ {HPHW_FIO, 0x01A, 0x00082, 0x0, "Anole 64 Core SCSI"},
+ {HPHW_FIO, 0x01B, 0x00082, 0x0, "Anole 100 Core SCSI"},
+ {HPHW_FIO, 0x01C, 0x00082, 0x0, "Gecko 80 Core SCSI"},
+ {HPHW_FIO, 0x01D, 0x00082, 0x0, "Gecko 100 Core SCSI"},
+ {HPHW_FIO, 0x01F, 0x00082, 0x0, "SkyHawk 100/120 Core SCSI"},
+ {HPHW_FIO, 0x027, 0x00082, 0x0, "Piranha 100 Core SCSI"},
+ {HPHW_FIO, 0x028, 0x00082, 0x0, "Mirage Jr Core SCSI"},
+ {HPHW_FIO, 0x029, 0x00082, 0x0, "Mirage Core SCSI"},
+ {HPHW_FIO, 0x02A, 0x00082, 0x0, "Electra Core SCSI"},
+ {HPHW_FIO, 0x02B, 0x00082, 0x0, "Mirage 80 Core SCSI"},
+ {HPHW_FIO, 0x02C, 0x00082, 0x0, "Mirage 100+ Core SCSI"},
+ {HPHW_FIO, 0x02E, 0x00082, 0x0, "UL 350 Core SCSI"},
+ {HPHW_FIO, 0x02F, 0x00082, 0x0, "UL 550 Core SCSI"},
+ {HPHW_FIO, 0x032, 0x00082, 0x0, "Raven T' Core SCSI"},
+ {HPHW_FIO, 0x033, 0x00082, 0x0, "Anole T Core SCSI"},
+ {HPHW_FIO, 0x034, 0x00082, 0x0, "SAIC L-80 Core SCSI"},
+ {HPHW_FIO, 0x035, 0x00082, 0x0, "PCX-L2 712/132 Core SCSI"},
+ {HPHW_FIO, 0x036, 0x00082, 0x0, "PCX-L2 712/160 Core SCSI"},
+ {HPHW_FIO, 0x03B, 0x00082, 0x0, "Raven U/L2 Core SCSI"},
+ {HPHW_FIO, 0x03C, 0x00082, 0x0, "Merlin 132 Core SCSI"},
+ {HPHW_FIO, 0x03D, 0x00082, 0x0, "Merlin 160 Core SCSI"},
+ {HPHW_FIO, 0x03E, 0x00082, 0x0, "Merlin+ 132 Core SCSI"},
+ {HPHW_FIO, 0x03F, 0x00082, 0x0, "Merlin+ 180 Core SCSI"},
+ {HPHW_FIO, 0x044, 0x00082, 0x0, "Mohawk Core SCSI"},
+ {HPHW_FIO, 0x045, 0x00082, 0x0, "Rocky1 Core SCSI"},
+ {HPHW_FIO, 0x046, 0x00082, 0x0, "Rocky2 120 Core SCSI"},
+ {HPHW_FIO, 0x047, 0x00082, 0x0, "Rocky2 150 Core SCSI"},
+ {HPHW_FIO, 0x04B, 0x00082, 0x0, "Anole L2 132 Core SCSI"},
+ {HPHW_FIO, 0x04D, 0x00082, 0x0, "Anole L2 165 Core SCSI"},
+ {HPHW_FIO, 0x04E, 0x00082, 0x0, "Kiji L2 132 Core SCSI"},
+ {HPHW_FIO, 0x050, 0x00082, 0x0, "Merlin Jr 132 Core SCSI"},
+ {HPHW_FIO, 0x051, 0x00082, 0x0, "Firehawk Core SCSI"},
+ {HPHW_FIO, 0x056, 0x00082, 0x0, "Raven+ w SE FWSCSI Core SCSI"},
+ {HPHW_FIO, 0x057, 0x00082, 0x0, "Raven+ w Diff FWSCSI Core SCSI"},
+ {HPHW_FIO, 0x058, 0x00082, 0x0, "FireHawk 200 Core SCSI"},
+ {HPHW_FIO, 0x05C, 0x00082, 0x0, "SummitHawk 230 Core SCSI"},
+ {HPHW_FIO, 0x05E, 0x00082, 0x0, "Staccato 132 Core SCSI"},
+ {HPHW_FIO, 0x05F, 0x00082, 0x0, "Staccato 180 Core SCSI"},
+ {HPHW_FIO, 0x800, 0x00082, 0x0, "Hitachi Tiny 64 Core SCSI"},
+ {HPHW_FIO, 0x801, 0x00082, 0x0, "Hitachi Tiny 80 Core SCSI"},
+ {HPHW_FIO, 0x016, 0x00083, 0x0, "Gecko Core PC Floppy"},
+ {HPHW_FIO, 0x01C, 0x00083, 0x0, "Gecko 80 Core PC Floppy"},
+ {HPHW_FIO, 0x01D, 0x00083, 0x0, "Gecko 100 Core PC Floppy"},
+ {HPHW_FIO, 0x051, 0x00083, 0x0, "Firehawk Core PC Floppy"},
+ {HPHW_FIO, 0x058, 0x00083, 0x0, "FireHawk 200 Core PC Floppy"},
+ {HPHW_FIO, 0x027, 0x00083, 0x0, "Piranha 100 Core PC Floppy"},
+ {HPHW_FIO, 0x028, 0x00083, 0x0, "Mirage Jr Core PC Floppy"},
+ {HPHW_FIO, 0x029, 0x00083, 0x0, "Mirage Core PC Floppy"},
+ {HPHW_FIO, 0x02A, 0x00083, 0x0, "Electra Core PC Floppy"},
+ {HPHW_FIO, 0x02B, 0x00083, 0x0, "Mirage 80 Core PC Floppy"},
+ {HPHW_FIO, 0x02C, 0x00083, 0x0, "Mirage 100+ Core PC Floppy"},
+ {HPHW_FIO, 0x02E, 0x00083, 0x0, "UL 350 Core PC Floppy"},
+ {HPHW_FIO, 0x02F, 0x00083, 0x0, "UL 550 Core PC Floppy"},
+ {HPHW_FIO, 0x032, 0x00083, 0x0, "Raven T' Core PC Floppy"},
+ {HPHW_FIO, 0x034, 0x00083, 0x0, "SAIC L-80 Core PC Floppy"},
+ {HPHW_FIO, 0x035, 0x00083, 0x0, "PCX-L2 712/132 Core Floppy"},
+ {HPHW_FIO, 0x036, 0x00083, 0x0, "PCX-L2 712/160 Core Floppy"},
+ {HPHW_FIO, 0x03B, 0x00083, 0x0, "Raven U/L2 Core PC Floppy"},
+ {HPHW_FIO, 0x03C, 0x00083, 0x0, "Merlin 132 Core PC Floppy"},
+ {HPHW_FIO, 0x03D, 0x00083, 0x0, "Merlin 160 Core PC Floppy"},
+ {HPHW_FIO, 0x03E, 0x00083, 0x0, "Merlin+ 132 Core PC Floppy"},
+ {HPHW_FIO, 0x03F, 0x00083, 0x0, "Merlin+ 180 Core PC Floppy"},
+ {HPHW_FIO, 0x045, 0x00083, 0x0, "Rocky1 Core PC Floppy"},
+ {HPHW_FIO, 0x046, 0x00083, 0x0, "Rocky2 120 Core PC Floppy"},
+ {HPHW_FIO, 0x047, 0x00083, 0x0, "Rocky2 150 Core PC Floppy"},
+ {HPHW_FIO, 0x04E, 0x00083, 0x0, "Kiji L2 132 Core PC Floppy"},
+ {HPHW_FIO, 0x050, 0x00083, 0x0, "Merlin Jr 132 Core PC Floppy"},
+ {HPHW_FIO, 0x056, 0x00083, 0x0, "Raven+ w SE FWSCSI Core PC Floppy"},
+ {HPHW_FIO, 0x057, 0x00083, 0x0, "Raven+ w Diff FWSCSI Core PC Floppy"},
+ {HPHW_FIO, 0x800, 0x00083, 0x0, "Hitachi Tiny 64 Core PC Floppy"},
+ {HPHW_FIO, 0x801, 0x00083, 0x0, "Hitachi Tiny 80 Core PC Floppy"},
+ {HPHW_FIO, 0x015, 0x00084, 0x0, "KittyHawk GSY Core PS/2 Port"},
+ {HPHW_FIO, 0x016, 0x00084, 0x0, "Gecko Core PS/2 Port"},
+ {HPHW_FIO, 0x018, 0x00084, 0x0, "Gecko Optional PS/2 Port"},
+ {HPHW_FIO, 0x01A, 0x00084, 0x0, "Anole 64 Core PS/2 Port"},
+ {HPHW_FIO, 0x01B, 0x00084, 0x0, "Anole 100 Core PS/2 Port"},
+ {HPHW_FIO, 0x01C, 0x00084, 0x0, "Gecko 80 Core PS/2 Port"},
+ {HPHW_FIO, 0x01D, 0x00084, 0x0, "Gecko 100 Core PS/2 Port"},
+ {HPHW_FIO, 0x01F, 0x00084, 0x0, "SkyHawk 100/120 Core PS/2 Port"},
+ {HPHW_FIO, 0x027, 0x00084, 0x0, "Piranha 100 Core PS/2 Port"},
+ {HPHW_FIO, 0x028, 0x00084, 0x0, "Mirage Jr Core PS/2 Port"},
+ {HPHW_FIO, 0x029, 0x00084, 0x0, "Mirage Core PS/2 Port"},
+ {HPHW_FIO, 0x02A, 0x00084, 0x0, "Electra Core PS/2 Port"},
+ {HPHW_FIO, 0x02B, 0x00084, 0x0, "Mirage 80 Core PS/2 Port"},
+ {HPHW_FIO, 0x02C, 0x00084, 0x0, "Mirage 100+ Core PS/2 Port"},
+ {HPHW_FIO, 0x02E, 0x00084, 0x0, "UL 350 Core PS/2 Port"},
+ {HPHW_FIO, 0x02F, 0x00084, 0x0, "UL 550 Core PS/2 Port"},
+ {HPHW_FIO, 0x032, 0x00084, 0x0, "Raven T' Core PS/2 Port"},
+ {HPHW_FIO, 0x033, 0x00084, 0x0, "Anole T Core PS/2 Port"},
+ {HPHW_FIO, 0x034, 0x00084, 0x0, "SAIC L-80 Core PS/2 Port"},
+ {HPHW_FIO, 0x035, 0x00084, 0x0, "PCX-L2 712/132 Core PS/2 Port"},
+ {HPHW_FIO, 0x036, 0x00084, 0x0, "PCX-L2 712/160 Core PS/2 Port"},
+ {HPHW_FIO, 0x03B, 0x00084, 0x0, "Raven U/L2 Core PS/2 Port"},
+ {HPHW_FIO, 0x03C, 0x00084, 0x0, "Merlin 132 Core PS/2 Port"},
+ {HPHW_FIO, 0x03D, 0x00084, 0x0, "Merlin 160 Core PS/2 Port"},
+ {HPHW_FIO, 0x03E, 0x00084, 0x0, "Merlin+ 132 Core PS/2 Port"},
+ {HPHW_FIO, 0x03F, 0x00084, 0x0, "Merlin+ 180 Core PS/2 Port"},
+ {HPHW_FIO, 0x044, 0x00084, 0x0, "Mohawk Core PS/2 Port"},
+ {HPHW_FIO, 0x045, 0x00084, 0x0, "Rocky1 Core PS/2 Port"},
+ {HPHW_FIO, 0x046, 0x00084, 0x0, "Rocky2 120 Core PS/2 Port"},
+ {HPHW_FIO, 0x047, 0x00084, 0x0, "Rocky2 150 Core PS/2 Port"},
+ {HPHW_FIO, 0x048, 0x00084, 0x0, "Rocky2 120 Dino PS/2 Port"},
+ {HPHW_FIO, 0x049, 0x00084, 0x0, "Rocky2 150 Dino PS/2 Port"},
+ {HPHW_FIO, 0x04B, 0x00084, 0x0, "Anole L2 132 Core PS/2 Port"},
+ {HPHW_FIO, 0x04D, 0x00084, 0x0, "Anole L2 165 Core PS/2 Port"},
+ {HPHW_FIO, 0x04E, 0x00084, 0x0, "Kiji L2 132 Core PS/2 Port"},
+ {HPHW_FIO, 0x050, 0x00084, 0x0, "Merlin Jr 132 Core PS/2 Port"},
+ {HPHW_FIO, 0x051, 0x00084, 0x0, "Firehawk Core PS/2 Port"},
+ {HPHW_FIO, 0x056, 0x00084, 0x0, "Raven+ w SE FWSCSI Core PS/2 Port"},
+ {HPHW_FIO, 0x057, 0x00084, 0x0, "Raven+ w Diff FWSCSI Core PS/2 Port"},
+ {HPHW_FIO, 0x058, 0x00084, 0x0, "FireHawk 200 Core PS/2 Port"},
+ {HPHW_FIO, 0x05C, 0x00084, 0x0, "SummitHawk 230 Core PS/2 Port"},
+ {HPHW_FIO, 0x800, 0x00084, 0x0, "Hitachi Tiny 64 Core PS/2 Port"},
+ {HPHW_FIO, 0x801, 0x00084, 0x0, "Hitachi Tiny 80 Core PS/2 Port"},
+ {HPHW_FIO, 0x004, 0x00085, 0x0, "Solo GSC Optional Graphics"},
+ {HPHW_FIO, 0x005, 0x00085, 0x0, "Duet GSC Optional Graphics"},
+ {HPHW_FIO, 0x008, 0x00085, 0x0, "Anole Artist Optional Graphics"},
+ {HPHW_FIO, 0x010, 0x00085, 0x0, "Mirage 80 GSC Builtin Graphics"},
+ {HPHW_FIO, 0x011, 0x00085, 0x0, "Mirage 100+ GSC Builtin Graphics"},
+ {HPHW_FIO, 0x012, 0x00085, 0x0, "Mirage Jr GSC Builtin Graphics"},
+ {HPHW_FIO, 0x013, 0x00085, 0x0, "Mirage GSC Builtin Graphics"},
+ {HPHW_FIO, 0x014, 0x00085, 0x0, "Electra GSC Builtin Graphics"},
+ {HPHW_FIO, 0x016, 0x00085, 0x0, "Gecko GSC Core Graphics"},
+ {HPHW_FIO, 0x017, 0x00085, 0x0, "Gecko GSC Optional Graphics"},
+ {HPHW_FIO, 0x01A, 0x00085, 0x0, "Anole 64 Artist Builtin Graphics"},
+ {HPHW_FIO, 0x01B, 0x00085, 0x0, "Anole 100 Artist Builtin Graphics"},
+ {HPHW_FIO, 0x01C, 0x00085, 0x0, "Gecko 80 GSC Core Graphics"},
+ {HPHW_FIO, 0x01D, 0x00085, 0x0, "Gecko 100 GSC Core Graphics"},
+ {HPHW_FIO, 0x032, 0x00085, 0x0, "Raven T' GSC Core Graphics"},
+ {HPHW_FIO, 0x033, 0x00085, 0x0, "Anole T Artist Builtin Graphics"},
+ {HPHW_FIO, 0x034, 0x00085, 0x0, "SAIC L-80 GSC Core Graphics"},
+ {HPHW_FIO, 0x035, 0x00085, 0x0, "PCX-L2 712/132 Core Graphics"},
+ {HPHW_FIO, 0x036, 0x00085, 0x0, "PCX-L2 712/160 Core Graphics"},
+ {HPHW_FIO, 0x03B, 0x00085, 0x0, "Raven U/L2 Core Graphics"},
+ {HPHW_FIO, 0x03C, 0x00085, 0x0, "Merlin 132 Core Graphics"},
+ {HPHW_FIO, 0x03D, 0x00085, 0x0, "Merlin 160 Core Graphics"},
+ {HPHW_FIO, 0x03E, 0x00085, 0x0, "Merlin+ 132 Core Graphics"},
+ {HPHW_FIO, 0x03F, 0x00085, 0x0, "Merlin+ 180 Core Graphics"},
+ {HPHW_FIO, 0x045, 0x00085, 0x0, "Rocky1 Core Graphics"},
+ {HPHW_FIO, 0x046, 0x00085, 0x0, "Rocky2 120 Core Graphics"},
+ {HPHW_FIO, 0x047, 0x00085, 0x0, "Rocky2 150 Core Graphics"},
+ {HPHW_FIO, 0x04B, 0x00085, 0x0, "Anole L2 132 Core Graphics"},
+ {HPHW_FIO, 0x04D, 0x00085, 0x0, "Anole L2 165 Core Graphics"},
+ {HPHW_FIO, 0x04E, 0x00085, 0x0, "Kiji L2 132 Core Graphics"},
+ {HPHW_FIO, 0x050, 0x00085, 0x0, "Merlin Jr 132 Core Graphics"},
+ {HPHW_FIO, 0x056, 0x00085, 0x0, "Raven+ w SE FWSCSI Core Graphics"},
+ {HPHW_FIO, 0x057, 0x00085, 0x0, "Raven+ w Diff FWSCSI Core Graphics"},
+ {HPHW_FIO, 0x800, 0x00085, 0x0, "Hitachi Tiny 64 Core Graphics"},
+ {HPHW_FIO, 0x801, 0x00085, 0x0, "Hitachi Tiny 80 Core Graphics"},
+ {HPHW_FIO, 0x004, 0x00086, 0x0, "GSC IBM Token Ring"},
+ {HPHW_FIO, 0x015, 0x00087, 0x0, "Gecko Optional ISDN"},
+ {HPHW_FIO, 0x016, 0x00087, 0x0, "Gecko Core ISDN"},
+ {HPHW_FIO, 0x01C, 0x00087, 0x0, "Gecko 80 Core ISDN"},
+ {HPHW_FIO, 0x01D, 0x00087, 0x0, "Gecko 100 Core ISDN"},
+ {HPHW_FIO, 0x010, 0x00088, 0x0, "Pace VME Networking"},
+ {HPHW_FIO, 0x011, 0x00088, 0x0, "Sidewinder VME Networking"},
+ {HPHW_FIO, 0x01A, 0x00088, 0x0, "Anole 64 VME Networking"},
+ {HPHW_FIO, 0x01B, 0x00088, 0x0, "Anole 100 VME Networking"},
+ {HPHW_FIO, 0x024, 0x00088, 0x0, "Fast Pace VME Networking"},
+ {HPHW_FIO, 0x034, 0x00088, 0x0, "Anole T VME Networking"},
+ {HPHW_FIO, 0x04A, 0x00088, 0x0, "Anole L2 132 VME Networking"},
+ {HPHW_FIO, 0x04C, 0x00088, 0x0, "Anole L2 165 VME Networking"},
+ {HPHW_FIO, 0x011, 0x0008A, 0x0, "WB-96 Core LAN (802.3)"},
+ {HPHW_FIO, 0x012, 0x0008A, 0x0, "Orville Core LAN (802.3)"},
+ {HPHW_FIO, 0x013, 0x0008A, 0x0, "Wilbur Core LAN (802.3)"},
+ {HPHW_FIO, 0x014, 0x0008A, 0x0, "WB-80 Core LAN (802.3)"},
+ {HPHW_FIO, 0x015, 0x0008A, 0x0, "KittyHawk GSY Core LAN (802.3)"},
+ {HPHW_FIO, 0x016, 0x0008A, 0x0, "Gecko Core LAN (802.3)"},
+ {HPHW_FIO, 0x018, 0x0008A, 0x0, "Gecko Optional LAN (802.3)"},
+ {HPHW_FIO, 0x01A, 0x0008A, 0x0, "Anole 64 Core LAN (802.3)"},
+ {HPHW_FIO, 0x01B, 0x0008A, 0x0, "Anole 100 Core LAN (802.3)"},
+ {HPHW_FIO, 0x01C, 0x0008A, 0x0, "Gecko 80 Core LAN (802.3)"},
+ {HPHW_FIO, 0x01D, 0x0008A, 0x0, "Gecko 100 Core LAN (802.3)"},
+ {HPHW_FIO, 0x01F, 0x0008A, 0x0, "SkyHawk 100/120 Core LAN (802.3)"},
+ {HPHW_FIO, 0x027, 0x0008A, 0x0, "Piranha 100 Core LAN (802.3)"},
+ {HPHW_FIO, 0x028, 0x0008A, 0x0, "Mirage Jr Core LAN (802.3)"},
+ {HPHW_FIO, 0x029, 0x0008A, 0x0, "Mirage Core LAN (802.3)"},
+ {HPHW_FIO, 0x02A, 0x0008A, 0x0, "Electra Core LAN (802.3)"},
+ {HPHW_FIO, 0x02B, 0x0008A, 0x0, "Mirage 80 Core LAN (802.3)"},
+ {HPHW_FIO, 0x02C, 0x0008A, 0x0, "Mirage 100+ Core LAN (802.3)"},
+ {HPHW_FIO, 0x02E, 0x0008A, 0x0, "UL 350 Core LAN (802.3)"},
+ {HPHW_FIO, 0x02F, 0x0008A, 0x0, "UL 350 Core LAN (802.3)"},
+ {HPHW_FIO, 0x032, 0x0008A, 0x0, "Raven T' Core LAN (802.3)"},
+ {HPHW_FIO, 0x033, 0x0008A, 0x0, "Anole T Core LAN (802.3)"},
+ {HPHW_FIO, 0x034, 0x0008A, 0x0, "SAIC L-80 Core LAN (802.3)"},
+ {HPHW_FIO, 0x035, 0x0008A, 0x0, "PCX-L2 712/132 Core LAN (802.3)"},
+ {HPHW_FIO, 0x036, 0x0008A, 0x0, "PCX-L2 712/160 Core LAN (802.3)"},
+ {HPHW_FIO, 0x03B, 0x0008A, 0x0, "Raven U/L2 Core LAN (802.3)"},
+ {HPHW_FIO, 0x03C, 0x0008A, 0x0, "Merlin 132 Core LAN (802.3)"},
+ {HPHW_FIO, 0x03D, 0x0008A, 0x0, "Merlin 160 Core LAN (802.3)"},
+ {HPHW_FIO, 0x044, 0x0008A, 0x0, "Mohawk Core LAN (802.3)"},
+ {HPHW_FIO, 0x045, 0x0008A, 0x0, "Rocky1 Core LAN (802.3)"},
+ {HPHW_FIO, 0x046, 0x0008A, 0x0, "Rocky2 120 Core LAN (802.3)"},
+ {HPHW_FIO, 0x047, 0x0008A, 0x0, "Rocky2 150 Core LAN (802.3)"},
+ {HPHW_FIO, 0x04B, 0x0008A, 0x0, "Anole L2 132 Core LAN (802.3)"},
+ {HPHW_FIO, 0x04D, 0x0008A, 0x0, "Anole L2 165 Core LAN (802.3)"},
+ {HPHW_FIO, 0x04E, 0x0008A, 0x0, "Kiji L2 132 Core LAN (802.3)"},
+ {HPHW_FIO, 0x050, 0x0008A, 0x0, "Merlin Jr 132 Core LAN (802.3)"},
+ {HPHW_FIO, 0x058, 0x0008A, 0x0, "FireHawk 200 Core LAN (802.3)"},
+ {HPHW_FIO, 0x800, 0x0008A, 0x0, "Hitachi Tiny 64 Core LAN (802.3)"},
+ {HPHW_FIO, 0x801, 0x0008A, 0x0, "Hitachi Tiny 80 Core LAN (802.3)"},
+ {HPHW_FIO, 0x004, 0x0008C, 0x0, "SkyHawk 100/120 Wax RS-232"},
+ {HPHW_FIO, 0x005, 0x0008C, 0x0, "SAIC L-80 Wax RS-232"},
+ {HPHW_FIO, 0x006, 0x0008C, 0x0, "Raven U/L2 Dino RS-232"},
+ {HPHW_FIO, 0x007, 0x0008C, 0x0, "Dino RS-232"},
+ {HPHW_FIO, 0x008, 0x0008C, 0x0, "Merlin 132 Dino RS-232"},
+ {HPHW_FIO, 0x009, 0x0008C, 0x0, "Merlin 160 Dino RS-232"},
+ {HPHW_FIO, 0x00A, 0x0008C, 0x0, "Merlin Jr 132 Dino RS-232"},
+ {HPHW_FIO, 0x010, 0x0008C, 0x0, "Mirage 80 Wax RS-232"},
+ {HPHW_FIO, 0x011, 0x0008C, 0x0, "Mirage 100+ Wax RS-232"},
+ {HPHW_FIO, 0x012, 0x0008C, 0x0, "Mirage Jr Wax RS-232"},
+ {HPHW_FIO, 0x013, 0x0008C, 0x0, "Mirage Wax RS-232"},
+ {HPHW_FIO, 0x014, 0x0008C, 0x0, "Electra Wax RS-232"},
+ {HPHW_FIO, 0x015, 0x0008C, 0x0, "KittyHawk GSY Core RS-232"},
+ {HPHW_FIO, 0x016, 0x0008C, 0x0, "Gecko Core RS-232"},
+ {HPHW_FIO, 0x017, 0x0008C, 0x0, "Raven Backplane RS-232"},
+ {HPHW_FIO, 0x018, 0x0008C, 0x0, "Gecko Optional RS-232"},
+ {HPHW_FIO, 0x019, 0x0008C, 0x0, "Merlin+ 180 Dino RS-232"},
+ {HPHW_FIO, 0x01A, 0x0008C, 0x0, "Anole 64 Core RS-232"},
+ {HPHW_FIO, 0x01B, 0x0008C, 0x0, "Anole 100 Core RS-232"},
+ {HPHW_FIO, 0x01C, 0x0008C, 0x0, "Gecko 80 Core RS-232"},
+ {HPHW_FIO, 0x01D, 0x0008C, 0x0, "Gecko 100 Core RS-232"},
+ {HPHW_FIO, 0x01E, 0x0008C, 0x0, "Raven T' Wax RS-232"},
+ {HPHW_FIO, 0x01F, 0x0008C, 0x0, "SkyHawk 100/120 Core RS-232"},
+ {HPHW_FIO, 0x020, 0x0008C, 0x0, "Anole 64 Timi RS-232"},
+ {HPHW_FIO, 0x021, 0x0008C, 0x0, "Anole 100 Timi RS-232"},
+ {HPHW_FIO, 0x022, 0x0008C, 0x0, "Merlin+ 132 Dino RS-232"},
+ {HPHW_FIO, 0x023, 0x0008C, 0x0, "Rocky1 Wax RS-232"},
+ {HPHW_FIO, 0x025, 0x0008C, 0x0, "Armyknife Optional RS-232"},
+ {HPHW_FIO, 0x026, 0x0008C, 0x0, "Piranha 100 Wax RS-232"},
+ {HPHW_FIO, 0x027, 0x0008C, 0x0, "Piranha 100 Core RS-232"},
+ {HPHW_FIO, 0x028, 0x0008C, 0x0, "Mirage Jr Core RS-232"},
+ {HPHW_FIO, 0x029, 0x0008C, 0x0, "Mirage Core RS-232"},
+ {HPHW_FIO, 0x02A, 0x0008C, 0x0, "Electra Core RS-232"},
+ {HPHW_FIO, 0x02B, 0x0008C, 0x0, "Mirage 80 Core RS-232"},
+ {HPHW_FIO, 0x02C, 0x0008C, 0x0, "Mirage 100+ Core RS-232"},
+ {HPHW_FIO, 0x02E, 0x0008C, 0x0, "UL 350 Lasi Core RS-232"},
+ {HPHW_FIO, 0x02F, 0x0008C, 0x0, "UL 550 Lasi Core RS-232"},
+ {HPHW_FIO, 0x030, 0x0008C, 0x0, "UL 350 Wax Core RS-232"},
+ {HPHW_FIO, 0x031, 0x0008C, 0x0, "UL 550 Wax Core RS-232"},
+ {HPHW_FIO, 0x032, 0x0008C, 0x0, "Raven T' Lasi Core RS-232"},
+ {HPHW_FIO, 0x033, 0x0008C, 0x0, "Anole T Core RS-232"},
+ {HPHW_FIO, 0x034, 0x0008C, 0x0, "SAIC L-80 Core RS-232"},
+ {HPHW_FIO, 0x035, 0x0008C, 0x0, "PCX-L2 712/132 Core RS-232"},
+ {HPHW_FIO, 0x036, 0x0008C, 0x0, "PCX-L2 712/160 Core RS-232"},
+ {HPHW_FIO, 0x03A, 0x0008C, 0x0, "Merlin+ Wax RS-232"},
+ {HPHW_FIO, 0x03B, 0x0008C, 0x0, "Raven U/L2 Core RS-232"},
+ {HPHW_FIO, 0x03C, 0x0008C, 0x0, "Merlin 132 Core RS-232"},
+ {HPHW_FIO, 0x03D, 0x0008C, 0x0, "Merlin 160 Core RS-232"},
+ {HPHW_FIO, 0x03E, 0x0008C, 0x0, "Merlin+ 132 Core RS-232"},
+ {HPHW_FIO, 0x03F, 0x0008C, 0x0, "Merlin+ 180 Core RS-232"},
+ {HPHW_FIO, 0x040, 0x0008C, 0x0, "Merlin 132 Wax RS-232"},
+ {HPHW_FIO, 0x041, 0x0008C, 0x0, "Merlin 160 Wax RS-232"},
+ {HPHW_FIO, 0x043, 0x0008C, 0x0, "Merlin 132/160 Wax RS-232"},
+ {HPHW_FIO, 0x044, 0x0008C, 0x0, "Mohawk Core RS-232"},
+ {HPHW_FIO, 0x045, 0x0008C, 0x0, "Rocky1 Core RS-232"},
+ {HPHW_FIO, 0x046, 0x0008C, 0x0, "Rocky2 120 Core RS-232"},
+ {HPHW_FIO, 0x047, 0x0008C, 0x0, "Rocky2 150 Core RS-232"},
+ {HPHW_FIO, 0x048, 0x0008C, 0x0, "Rocky2 120 Dino RS-232"},
+ {HPHW_FIO, 0x049, 0x0008C, 0x0, "Rocky2 150 Dino RS-232"},
+ {HPHW_FIO, 0x04A, 0x0008C, 0x0, "Anole L2 132 TIMI RS-232"},
+ {HPHW_FIO, 0x04B, 0x0008C, 0x0, "Anole L2 l32 Core RS-232"},
+ {HPHW_FIO, 0x04C, 0x0008D, 0x0, "Anole L2 165 TIMI RS-232"},
+ {HPHW_FIO, 0x04D, 0x0008C, 0x0, "Anole L2 165 Core RS-232"},
+ {HPHW_FIO, 0x04E, 0x0008C, 0x0, "Kiji L2 132 Core RS-232"},
+ {HPHW_FIO, 0x04F, 0x0008C, 0x0, "Kiji L2 132 Dino RS-232"},
+ {HPHW_FIO, 0x050, 0x0008C, 0x0, "Merlin Jr 132 Core RS-232"},
+ {HPHW_FIO, 0x051, 0x0008C, 0x0, "Firehawk Core RS-232"},
+ {HPHW_FIO, 0x052, 0x0008C, 0x0, "Raven+ Hi Power Backplane w EISA RS-232"},
+ {HPHW_FIO, 0x053, 0x0008C, 0x0, "Raven+ Hi Power Backplane w/o EISA RS-232"},
+ {HPHW_FIO, 0x054, 0x0008C, 0x0, "Raven+ Lo Power Backplane w EISA RS-232"},
+ {HPHW_FIO, 0x055, 0x0008C, 0x0, "Raven+ Lo Power Backplane w/o EISA RS-232"},
+ {HPHW_FIO, 0x056, 0x0008C, 0x0, "Raven+ w SE FWSCSI Core RS-232"},
+ {HPHW_FIO, 0x057, 0x0008C, 0x0, "Raven+ w Diff FWSCSI Core RS-232"},
+ {HPHW_FIO, 0x058, 0x0008C, 0x0, "FireHawk 200 Core RS-232"},
+ {HPHW_FIO, 0x059, 0x0008C, 0x0, "FireHawk 200 Wax RS-232"},
+ {HPHW_FIO, 0x05A, 0x0008C, 0x0, "Raven+ L2 Backplane w EISA RS-232"},
+ {HPHW_FIO, 0x05B, 0x0008C, 0x0, "Raven+ L2 Backplane w/o EISA RS-232"},
+ {HPHW_FIO, 0x05D, 0x0008C, 0x0, "SummitHawk Dino RS-232"},
+ {HPHW_FIO, 0x05E, 0x0008C, 0x0, "Staccato 132 Core LAN RS-232"},
+ {HPHW_FIO, 0x05F, 0x0008C, 0x0, "Staccato 180 Core LAN RS-232"},
+ {HPHW_FIO, 0x800, 0x0008C, 0x0, "Hitachi Tiny 64 Core RS-232"},
+ {HPHW_FIO, 0x801, 0x0008C, 0x0, "Hitachi Tiny 80 Core RS-232"},
+ {HPHW_FIO, 0x015, 0x0008D, 0x0, "Gecko Optional RJ-16"},
+ {HPHW_FIO, 0x016, 0x0008D, 0x0, "Gecko Core RJ-16"},
+ {HPHW_FIO, 0x01C, 0x0008D, 0x0, "Gecko 80 Core RJ-16"},
+ {HPHW_FIO, 0x01D, 0x0008D, 0x0, "Gecko 100 Core RJ-16"},
+ {HPHW_FIO, 0x004, 0x0008F, 0x0, "Anole Boot Rom"},
+ {HPHW_FIO, 0x005, 0x0008F, 0x0, "Rocky1 Boot Rom"},
+ {HPHW_FIO, 0x006, 0x0008F, 0x0, "Rocky2 120 Boot Rom"},
+ {HPHW_FIO, 0x007, 0x0008F, 0x0, "Rocky2 150 Boot Rom"},
+ {HPHW_FIO, 0x01B, 0x0008F, 0x0, "Anole 100 Boot Rom"},
+ {HPHW_FIO, 0x006, 0x00096, 0x0, "Raven U/L2 Dino PS/2 Port"},
+ {HPHW_FIO, 0x007, 0x00096, 0x0, "Dino PS/2 Port"},
+ {HPHW_FIO, 0x008, 0x00096, 0x0, "Merlin 132 Dino PS/2 Port"},
+ {HPHW_FIO, 0x009, 0x00096, 0x0, "Merlin 160 Dino PS/2 Port"},
+ {HPHW_FIO, 0x00A, 0x00096, 0x0, "Merlin Jr 132 Dino PS/2 Port"},
+ {HPHW_FIO, 0x019, 0x00096, 0x0, "Merlin+ 180 Dino PS/2 Port"},
+ {HPHW_FIO, 0x022, 0x00096, 0x0, "Merlin+ 132 Dino PS/2 Port"},
+ {HPHW_FIO, 0x004, 0x00097, 0x0, "Cascade EISA 100VG LAN"},
+ {HPHW_FIO, 0x023, 0x00099, 0x0, "Rocky1 Wax HPIB"},
+ {HPHW_FIO, 0x048, 0x00099, 0x0, "Rocky2 120 Clark/Dino HPIB"},
+ {HPHW_FIO, 0x049, 0x00099, 0x0, "Rocky2 150 Clark/Dino HPIB"},
+ {HPHW_FIO, 0x004, 0x000A1, 0x0, "SPP2000 Console TTY"},
+ {HPHW_FIO, 0x004, 0x000A2, 0x0, "Forte Core PCI 10/100BT LAN"},
+ {HPHW_FIO, 0x005, 0x000A2, 0x0, "AllegroLow PCI 10/100BT LAN"},
+ {HPHW_FIO, 0x006, 0x000A2, 0x0, "AllegroHIgh Core PCI 10/100BT LAN"},
+ {HPHW_FIO, 0x007, 0x000A2, 0x0, "PCI Plug-in LAN"},
+ {HPHW_FIO, 0x00A, 0x000A2, 0x0, "Lego 360 Core PCI 10/100BT LAN"},
+ {HPHW_FIO, 0x03E, 0x000A2, 0x0, "Merlin+ 132 Core PCI LAN"},
+ {HPHW_FIO, 0x03F, 0x000A2, 0x0, "Merlin+ 180 Core PCI LAN"},
+ {HPHW_FIO, 0x056, 0x000A2, 0x0, "Raven+ w SE FWSCSI Core PCI LAN"},
+ {HPHW_FIO, 0x057, 0x000A2, 0x0, "Raven+ w Diff FWSCSI Core PCI LAN"},
+ {HPHW_FIO, 0x05E, 0x000A2, 0x0, "Staccato 132 PCI LAN"},
+ {HPHW_FIO, 0x05F, 0x000A2, 0x0, "Staccato 180 PCI LAN"},
+ {HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI LVD Ultra2 SCSI"},
+ {HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI SE UltraSCSI"},
+ {HPHW_FIO, 0x004, 0x000A3, 0x0, "Forte Core PCI IDE/ATAPI CD-ROM"},
+ {HPHW_FIO, 0x005, 0x000A3, 0x0, "AllegroLow Core PCI LVD Ultra2 SCSI"},
+ {HPHW_FIO, 0x005, 0x000A3, 0x0, "AllegroLow Core PCI IDE/ATAPI CD-ROM"},
+ {HPHW_FIO, 0x006, 0x000A3, 0x0, "AllegroHigh Core PCI LVD Ultra2 SCSI"},
+ {HPHW_FIO, 0x006, 0x000A3, 0x0, "AllegroHigh Core PCI IDE/ATAPI CD-ROM"},
+ {HPHW_FIO, 0x007, 0x000A3, 0x0, "PCI Plug-in Disk"},
+ {HPHW_FIO, 0x008, 0x000A3, 0x0, "A5158A S FC Tachlite HBA"},
+ {HPHW_FIO, 0x009, 0x000A3, 0x0, "A5157A D FC HBA"},
+ {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI LVD Ultra2 SCSI"},
+ {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI NSE UltraSCSI"},
+ {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI WSE UltraSCSI"},
+ {HPHW_FIO, 0x00A, 0x000A3, 0x0, "Lego 360 Core PCI IDE/ATAPI CD-ROM"},
+ {HPHW_FIO, 0x03E, 0x000A3, 0x0, "Merlin+ 132 Core SE FWSCSI PCI Disk"},
+ {HPHW_FIO, 0x03F, 0x000A3, 0x0, "Merlin+ 180 Core SE FWSCSI PCI Disk"},
+ {HPHW_FIO, 0x056, 0x000A3, 0x0, "Raven+ w SE FWSCSI Core PCI Disk"},
+ {HPHW_FIO, 0x057, 0x000A3, 0x0, "Raven+ w Diff FWSCSI Core PCI Disk"},
+ {HPHW_FIO, 0x004, 0x000A4, 0x0, "SPP2000 Core BA"},
+ {HPHW_FIO, 0x004, 0x000A6, 0x0, "Sonic Ethernet 802.3 Card"},
+ {HPHW_FIO, 0x004, 0x000A9, 0x00, "Forte Core PCI SuperIO RS-232"},
+ {HPHW_FIO, 0x004, 0x000A9, 0x00, "Forte Core PCI USB KB"},
+ {HPHW_FIO, 0x005, 0x000A9, 0x00, "AllegroLow Core PCI SuperIO RS-232"},
+ {HPHW_FIO, 0x005, 0x000A9, 0x00, "AllegroLow Core PCI USB KB"},
+ {HPHW_FIO, 0x006, 0x000A9, 0x00, "AllegroHigh Core PCI SuperIO RS-232"},
+ {HPHW_FIO, 0x006, 0x000A9, 0x00, "AllegroHigh Core PCI USB KB"},
+ {HPHW_FIO, 0x007, 0x000A9, 0x0, "Miscelaneous PCI Plug-in"},
+ {HPHW_FIO, 0x00A, 0x000A9, 0x0, "Lego 360 Core PCI SuperIO RS-232"},
+ {HPHW_FIO, 0x00A, 0x000A9, 0x0, "Lego 360 Core PCI USB KB"},
+ {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
+ {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
+ {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
+ {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
+ {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
+ {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
+ {HPHW_IOA, 0x581, 0x0000B, 0x10, "Uturn-IOA BC Runway Port"},
+ {HPHW_IOA, 0x582, 0x0000B, 0x10, "Astro BC Runway Port"},
+ {HPHW_IOA, 0x700, 0x0000B, 0x00, "NEC-IOS BC System Bus Port"},
+ {HPHW_IOA, 0x880, 0x0000C, 0x10, "Pluto BC McKinley Port"},
+ {HPHW_MEMORY, 0x002, 0x00008, 0x00, "MID_BUS"},
+ {HPHW_MEMORY, 0x063, 0x00009, 0x00, "712/132 L2 Upgrade"},
+ {HPHW_MEMORY, 0x064, 0x00009, 0x00, "712/160 L2 Upgrade"},
+ {HPHW_MEMORY, 0x065, 0x00009, 0x00, "715/132 L2 Upgrade"},
+ {HPHW_MEMORY, 0x066, 0x00009, 0x00, "715/160 L2 Upgrade"},
+ {HPHW_MEMORY, 0x0AF, 0x00009, 0x00, "Everest Mako Memory"},
+ {HPHW_OTHER, 0x004, 0x00030, 0x00, "Master"},
+ {HPHW_OTHER, 0x004, 0x00034, 0x00, "Slave"},
+ {HPHW_OTHER, 0x004, 0x00038, 0x00, "EDU"},
+ {HPHW_OTHER, 0x004, 0x00049, 0x00, "LGB Control"},
+ {HPHW_MC, 0x004, 0x000C0, 0x00, "BMC IPMI Mgmt Ctlr"},
+ {HPHW_FAULTY, 0, } /* Special Marker for last entry */
+};
+
+
+static struct hp_cpu_type_mask {
+ unsigned short model;
+ unsigned short mask;
+ enum cpu_type cpu;
+} hp_cpu_type_mask_list[] __initdata = {
+
+ { 0x0000, 0x0ff0, pcx }, /* 0x0000 - 0x000f */
+ { 0x0048, 0x0ff0, pcxl }, /* 0x0040 - 0x004f */
+ { 0x0080, 0x0ff0, pcx }, /* 0x0080 - 0x008f */
+ { 0x0100, 0x0ff0, pcx }, /* 0x0100 - 0x010f */
+ { 0x0182, 0x0ffe, pcx }, /* 0x0182 - 0x0183 */
+ { 0x0182, 0x0ffe, pcxt }, /* 0x0182 - 0x0183 */
+ { 0x0184, 0x0fff, pcxu }, /* 0x0184 - 0x0184 */
+ { 0x0200, 0x0ffe, pcxs }, /* 0x0200 - 0x0201 */
+ { 0x0202, 0x0fff, pcxs }, /* 0x0202 - 0x0202 */
+ { 0x0203, 0x0fff, pcxt }, /* 0x0203 - 0x0203 */
+ { 0x0204, 0x0ffc, pcxt }, /* 0x0204 - 0x0207 */
+ { 0x0280, 0x0ffc, pcxs }, /* 0x0280 - 0x0283 */
+ { 0x0284, 0x0ffc, pcxt }, /* 0x0284 - 0x0287 */
+ { 0x0288, 0x0fff, pcxt }, /* 0x0288 - 0x0288 */
+ { 0x0300, 0x0ffc, pcxs }, /* 0x0300 - 0x0303 */
+ { 0x0310, 0x0ff0, pcxt }, /* 0x0310 - 0x031f */
+ { 0x0320, 0x0ff0, pcxt }, /* 0x0320 - 0x032f */
+ { 0x0400, 0x0ff0, pcxt }, /* 0x0400 - 0x040f */
+ { 0x0480, 0x0ff0, pcxl }, /* 0x0480 - 0x048f */
+ { 0x0500, 0x0ff0, pcxl2 }, /* 0x0500 - 0x050f */
+ { 0x0510, 0x0ff0, pcxl2 }, /* 0x0510 - 0x051f */
+ { 0x0580, 0x0ff8, pcxt_ }, /* 0x0580 - 0x0587 */
+ { 0x0588, 0x0ffc, pcxt_ }, /* 0x0588 - 0x058b */
+ { 0x058c, 0x0ffe, pcxt_ }, /* 0x058c - 0x058d */
+ { 0x058e, 0x0fff, pcxt_ }, /* 0x058e - 0x058e */
+ { 0x058f, 0x0fff, pcxu }, /* 0x058f - 0x058f */
+ { 0x0590, 0x0ffe, pcxu }, /* 0x0590 - 0x0591 */
+ { 0x0592, 0x0fff, pcxt_ }, /* 0x0592 - 0x0592 */
+ { 0x0593, 0x0fff, pcxu }, /* 0x0593 - 0x0593 */
+ { 0x0594, 0x0ffc, pcxu }, /* 0x0594 - 0x0597 */
+ { 0x0598, 0x0ffe, pcxu_ }, /* 0x0598 - 0x0599 */
+ { 0x059a, 0x0ffe, pcxu }, /* 0x059a - 0x059b */
+ { 0x059c, 0x0fff, pcxu }, /* 0x059c - 0x059c */
+ { 0x059d, 0x0fff, pcxu_ }, /* 0x059d - 0x059d */
+ { 0x059e, 0x0fff, pcxt_ }, /* 0x059e - 0x059e */
+ { 0x059f, 0x0fff, pcxu }, /* 0x059f - 0x059f */
+ { 0x05a0, 0x0ffe, pcxt_ }, /* 0x05a0 - 0x05a1 */
+ { 0x05a2, 0x0ffe, pcxu }, /* 0x05a2 - 0x05a3 */
+ { 0x05a4, 0x0ffc, pcxu }, /* 0x05a4 - 0x05a7 */
+ { 0x05a8, 0x0ffc, pcxu }, /* 0x05a8 - 0x05ab */
+ { 0x05ad, 0x0fff, pcxu_ }, /* 0x05ad - 0x05ad */
+ { 0x05ae, 0x0ffe, pcxu_ }, /* 0x05ae - 0x05af */
+ { 0x05b0, 0x0ffe, pcxu_ }, /* 0x05b0 - 0x05b1 */
+ { 0x05b2, 0x0fff, pcxu_ }, /* 0x05b2 - 0x05b2 */
+ { 0x05b3, 0x0fff, pcxu }, /* 0x05b3 - 0x05b3 */
+ { 0x05b4, 0x0fff, pcxw }, /* 0x05b4 - 0x05b4 */
+ { 0x05b5, 0x0fff, pcxu_ }, /* 0x05b5 - 0x05b5 */
+ { 0x05b6, 0x0ffe, pcxu_ }, /* 0x05b6 - 0x05b7 */
+ { 0x05b8, 0x0ffe, pcxu_ }, /* 0x05b8 - 0x05b9 */
+ { 0x05ba, 0x0fff, pcxu_ }, /* 0x05ba - 0x05ba */
+ { 0x05bb, 0x0fff, pcxw }, /* 0x05bb - 0x05bb */
+ { 0x05bc, 0x0ffc, pcxw }, /* 0x05bc - 0x05bf */
+ { 0x05c0, 0x0ffc, pcxw }, /* 0x05c0 - 0x05c3 */
+ { 0x05c4, 0x0ffe, pcxw }, /* 0x05c4 - 0x05c5 */
+ { 0x05c6, 0x0fff, pcxw }, /* 0x05c6 - 0x05c6 */
+ { 0x05c7, 0x0fff, pcxw_ }, /* 0x05c7 - 0x05c7 */
+ { 0x05c8, 0x0ffc, pcxw }, /* 0x05c8 - 0x05cb */
+ { 0x05cc, 0x0ffe, pcxw }, /* 0x05cc - 0x05cd */
+ { 0x05ce, 0x0ffe, pcxw_ }, /* 0x05ce - 0x05cf */
+ { 0x05d0, 0x0ffc, pcxw_ }, /* 0x05d0 - 0x05d3 */
+ { 0x05d4, 0x0ffe, pcxw_ }, /* 0x05d4 - 0x05d5 */
+ { 0x05d6, 0x0fff, pcxw }, /* 0x05d6 - 0x05d6 */
+ { 0x05d7, 0x0fff, pcxw_ }, /* 0x05d7 - 0x05d7 */
+ { 0x05d8, 0x0ffc, pcxw_ }, /* 0x05d8 - 0x05db */
+ { 0x05dc, 0x0ffe, pcxw2 }, /* 0x05dc - 0x05dd */
+ { 0x05de, 0x0fff, pcxw_ }, /* 0x05de - 0x05de */
+ { 0x05df, 0x0fff, pcxw2 }, /* 0x05df - 0x05df */
+ { 0x05e0, 0x0ffc, pcxw2 }, /* 0x05e0 - 0x05e3 */
+ { 0x05e4, 0x0fff, pcxw2 }, /* 0x05e4 - 0x05e4 */
+ { 0x05e5, 0x0fff, pcxw_ }, /* 0x05e5 - 0x05e5 */
+ { 0x05e6, 0x0ffe, pcxw2 }, /* 0x05e6 - 0x05e7 */
+ { 0x05e8, 0x0ff8, pcxw2 }, /* 0x05e8 - 0x05ef */
+ { 0x05f0, 0x0ff0, pcxw2 }, /* 0x05f0 - 0x05ff */
+ { 0x0600, 0x0fe0, pcxl }, /* 0x0600 - 0x061f */
+ { 0x0880, 0x0ff0, mako }, /* 0x0880 - 0x088f */
+ { 0x0000, 0x0000, pcx } /* terminate table */
+};
+
+char *cpu_name_version[][2] = {
+ [pcx] = { "PA7000 (PCX)", "1.0" },
+ [pcxs] = { "PA7000 (PCX-S)", "1.1a" },
+ [pcxt] = { "PA7100 (PCX-T)", "1.1b" },
+ [pcxt_] = { "PA7200 (PCX-T')", "1.1c" },
+ [pcxl] = { "PA7100LC (PCX-L)", "1.1d" },
+ [pcxl2] = { "PA7300LC (PCX-L2)", "1.1e" },
+ [pcxu] = { "PA8000 (PCX-U)", "2.0" },
+ [pcxu_] = { "PA8200 (PCX-U+)", "2.0" },
+ [pcxw] = { "PA8500 (PCX-W)", "2.0" },
+ [pcxw_] = { "PA8600 (PCX-W+)", "2.0" },
+ [pcxw2] = { "PA8700 (PCX-W2)", "2.0" },
+ [mako] = { "PA8800 (Mako)", "2.0" }
+};
+
+const char * __init
+parisc_hardware_description(struct parisc_device_id *id)
+{
+ struct hp_hardware *listptr;
+
+ for (listptr = hp_hardware_list; listptr->hw_type != HPHW_FAULTY; listptr++) {
+ if ((listptr->hw_type == id->hw_type) &&
+ (listptr->hversion == id->hversion) &&
+ (listptr->sversion == id->sversion)){
+ return listptr->name;
+ }
+ }
+
+ /*
+ * ok, the above hardware table isn't complete, and we haven't found
+ * our device in this table. So let's now try to find a generic name
+ * to describe the given hardware...
+ */
+ switch (id->hw_type) {
+ case HPHW_NPROC:
+ return "Unknown machine";
+
+ case HPHW_A_DIRECT:
+ switch (id->sversion) {
+ case 0x0D: return "MUX port";
+ case 0x0E: return "RS-232 port";
+ }
+ break;
+
+ case HPHW_MEMORY:
+ return "Memory";
+
+ }
+
+ return "unknown device";
+}
+
+
+/* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */
+enum cpu_type __init
+parisc_get_cpu_type(unsigned long hversion)
+{
+ struct hp_cpu_type_mask *ptr;
+ unsigned short model = ((unsigned short) (hversion)) >> 4;
+
+ for (ptr = hp_cpu_type_mask_list; 0 != ptr->mask; ptr++) {
+ if (ptr->model == (model & ptr->mask))
+ return ptr->cpu;
+ }
+ panic("could not identify CPU type\n");
+
+ return pcx; /* not reached: */
+}
+
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
new file mode 100644
index 000000000000..ddf7e914f15e
--- /dev/null
+++ b/arch/parisc/kernel/head.S
@@ -0,0 +1,386 @@
+/* This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999 by Helge Deller
+ * Copyright 1999 SuSE GmbH (Philipp Rumpf)
+ * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
+ * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
+ * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
+ * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
+ *
+ * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/autoconf.h> /* for CONFIG_SMP */
+
+#include <asm/offsets.h>
+#include <asm/psw.h>
+#include <asm/pdc.h>
+
+#include <asm/assembly.h>
+#include <asm/pgtable.h>
+
+ .level LEVEL
+
+ .data
+
+ .export boot_args
+boot_args:
+ .word 0 /* arg0 */
+ .word 0 /* arg1 */
+ .word 0 /* arg2 */
+ .word 0 /* arg3 */
+
+ .text
+ .align 4
+ .import init_thread_union,data
+ .import fault_vector_20,code /* IVA parisc 2.0 32 bit */
+#ifndef __LP64__
+ .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
+ .import $global$ /* forward declaration */
+#endif /*!LP64*/
+ .export stext
+ .export _stext,data /* Kernel want it this way! */
+_stext:
+stext:
+ .proc
+ .callinfo
+
+ /* Make sure sr4-sr7 are set to zero for the kernel address space */
+ mtsp %r0,%sr4
+ mtsp %r0,%sr5
+ mtsp %r0,%sr6
+ mtsp %r0,%sr7
+
+ /* Clear BSS (shouldn't the boot loader do this?) */
+
+ .import __bss_start,data
+ .import __bss_stop,data
+
+ load32 PA(__bss_start),%r3
+ load32 PA(__bss_stop),%r4
+$bss_loop:
+ cmpb,<<,n %r3,%r4,$bss_loop
+ stw,ma %r0,4(%r3)
+
+ /* Save away the arguments the boot loader passed in (32 bit args) */
+ load32 PA(boot_args),%r1
+ stw,ma %arg0,4(%r1)
+ stw,ma %arg1,4(%r1)
+ stw,ma %arg2,4(%r1)
+ stw,ma %arg3,4(%r1)
+
+ /* Initialize startup VM. Just map first 8/16 MB of memory */
+ load32 PA(swapper_pg_dir),%r4
+ mtctl %r4,%cr24 /* Initialize kernel root pointer */
+ mtctl %r4,%cr25 /* Initialize user root pointer */
+
+#ifdef __LP64__
+ /* Set pmd in pgd */
+ load32 PA(pmd0),%r5
+ shrd %r5,PxD_VALUE_SHIFT,%r3
+ ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+ stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
+ ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
+#else
+ /* 2-level page table, so pmd == pgd */
+ ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
+#endif
+
+ /* Fill in pmd with enough pte directories */
+ load32 PA(pg0),%r1
+ SHRREG %r1,PxD_VALUE_SHIFT,%r3
+ ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
+
+ ldi ASM_PT_INITIAL,%r1
+
+1:
+ stw %r3,0(%r4)
+ ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
+ addib,> -1,%r1,1b
+#ifdef __LP64__
+ ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
+#else
+ ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
+#endif
+
+
+ /* Now initialize the PTEs themselves */
+ ldo _PAGE_KERNEL(%r0),%r3 /* Hardwired 0 phys addr start */
+ load32 PA(pg0),%r1
+
+$pgt_fill_loop:
+ STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
+ ldo ASM_PAGE_SIZE(%r3),%r3
+ bb,>= %r3,31-KERNEL_INITIAL_ORDER,$pgt_fill_loop
+ nop
+
+ /* Load the return address...er...crash 'n burn */
+ copy %r0,%r2
+
+ /* And the RFI Target address too */
+ load32 start_kernel,%r11
+
+ /* And the initial task pointer */
+ load32 init_thread_union,%r6
+ mtctl %r6,%cr30
+
+ /* And the stack pointer too */
+ ldo THREAD_SZ_ALGN(%r6),%sp
+
+ /* And the interrupt stack */
+ load32 interrupt_stack,%r6
+ mtctl %r6,%cr31
+
+#ifdef CONFIG_SMP
+ /* Set the smp rendevous address into page zero.
+ ** It would be safer to do this in init_smp_config() but
+ ** it's just way easier to deal with here because
+ ** of 64-bit function ptrs and the address is local to this file.
+ */
+ load32 PA(smp_slave_stext),%r10
+ stw %r10,0x10(%r0) /* MEM_RENDEZ */
+ stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
+
+ /* FALLTHROUGH */
+ .procend
+
+ /*
+ ** Code Common to both Monarch and Slave processors.
+ ** Entry:
+ **
+ ** 1.1:
+ ** %r11 must contain RFI target address.
+ ** %r25/%r26 args to pass to target function
+ ** %r2 in case rfi target decides it didn't like something
+ **
+ ** 2.0w:
+ ** %r3 PDCE_PROC address
+ ** %r11 RFI target address
+ **
+ ** Caller must init: SR4-7, %sp, %r10, %cr24/25,
+ */
+common_stext:
+ .proc
+ .callinfo
+#else
+ /* Clear PDC entry point - we won't use it */
+ stw %r0,0x10(%r0) /* MEM_RENDEZ */
+ stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
+#endif /*CONFIG_SMP*/
+
+#ifdef __LP64__
+ tophys_r1 %sp
+
+ /* Save the rfi target address */
+ ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
+ tophys_r1 %r10
+ std %r11, TASK_PT_GR11(%r10)
+ /* Switch to wide mode Superdome doesn't support narrow PDC
+ ** calls.
+ */
+1: mfia %rp /* clear upper part of pcoq */
+ ldo 2f-1b(%rp),%rp
+ depdi 0,31,32,%rp
+ bv (%rp)
+ ssm PSW_SM_W,%r0
+
+ /* Set Wide mode as the "Default" (eg for traps)
+ ** First trap occurs *right* after (or part of) rfi for slave CPUs.
+ ** Someday, palo might not do this for the Monarch either.
+ */
+2:
+#define MEM_PDC_LO 0x388
+#define MEM_PDC_HI 0x35C
+ ldw MEM_PDC_LO(%r0),%r3
+ ldw MEM_PDC_HI(%r0),%r6
+ depd %r6, 31, 32, %r3 /* move to upper word */
+
+ ldo PDC_PSW(%r0),%arg0 /* 21 */
+ ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
+ ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
+ load32 PA(stext_pdc_ret), %rp
+ bv (%r3)
+ copy %r0,%arg3
+
+stext_pdc_ret:
+ /* restore rfi target address*/
+ ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
+ tophys_r1 %r10
+ ldd TASK_PT_GR11(%r10), %r11
+ tovirt_r1 %sp
+#endif
+
+ /* PARANOID: clear user scratch/user space SR's */
+ mtsp %r0,%sr0
+ mtsp %r0,%sr1
+ mtsp %r0,%sr2
+ mtsp %r0,%sr3
+
+ /* Initialize Protection Registers */
+ mtctl %r0,%cr8
+ mtctl %r0,%cr9
+ mtctl %r0,%cr12
+ mtctl %r0,%cr13
+
+ /* Prepare to RFI! Man all the cannons! */
+
+ /* Initialize the global data pointer */
+ loadgp
+
+ /* Set up our interrupt table. HPMCs might not work after this!
+ *
+ * We need to install the correct iva for PA1.1 or PA2.0. The
+ * following short sequence of instructions can determine this
+ * (without being illegal on a PA1.1 machine).
+ */
+#ifndef __LP64__
+ ldi 32,%r10
+ mtctl %r10,%cr11
+ .level 2.0
+ mfctl,w %cr11,%r10
+ .level 1.1
+ comib,<>,n 0,%r10,$is_pa20
+ ldil L%PA(fault_vector_11),%r10
+ b $install_iva
+ ldo R%PA(fault_vector_11)(%r10),%r10
+
+$is_pa20:
+ .level LEVEL /* restore 1.1 || 2.0w */
+#endif /*!LP64*/
+ load32 PA(fault_vector_20),%r10
+
+$install_iva:
+ mtctl %r10,%cr14
+
+#ifdef __LP64__
+ b aligned_rfi
+ nop
+
+ .align 256
+aligned_rfi:
+ ssm 0,0
+ nop /* 1 */
+ nop /* 2 */
+ nop /* 3 */
+ nop /* 4 */
+ nop /* 5 */
+ nop /* 6 */
+ nop /* 7 */
+ nop /* 8 */
+#endif
+
+#ifdef __LP64__ /* move to psw.h? */
+#define PSW_BITS PSW_Q+PSW_I+PSW_D+PSW_P+PSW_R
+#else
+#define PSW_BITS PSW_SM_Q
+#endif
+
+$rfi:
+ /* turn off troublesome PSW bits */
+ rsm PSW_BITS,%r0
+
+ /* kernel PSW:
+ * - no interruptions except HPMC and TOC (which are handled by PDC)
+ * - Q bit set (IODC / PDC interruptions)
+ * - big-endian
+ * - virtually mapped
+ */
+ load32 KERNEL_PSW,%r10
+ mtctl %r10,%ipsw
+
+ /* Set the space pointers for the post-RFI world
+ ** Clear the two-level IIA Space Queue, effectively setting
+ ** Kernel space.
+ */
+ mtctl %r0,%cr17 /* Clear IIASQ tail */
+ mtctl %r0,%cr17 /* Clear IIASQ head */
+
+ /* Load RFI target into PC queue */
+ mtctl %r11,%cr18 /* IIAOQ head */
+ ldo 4(%r11),%r11
+ mtctl %r11,%cr18 /* IIAOQ tail */
+
+ /* Jump to hyperspace */
+ rfi
+ nop
+
+ .procend
+
+#ifdef CONFIG_SMP
+
+ .import smp_init_current_idle_task,data
+ .import smp_callin,code
+
+#ifndef __LP64__
+smp_callin_rtn:
+ .proc
+ .callinfo
+ break 1,1 /* Break if returned from start_secondary */
+ nop
+ nop
+ .procend
+#endif /*!LP64*/
+
+/***************************************************************************
+* smp_slave_stext is executed by all non-monarch Processors when the Monarch
+* pokes the slave CPUs in smp.c:smp_boot_cpus().
+*
+* Once here, registers values are initialized in order to branch to virtual
+* mode. Once all available/eligible CPUs are in virtual mode, all are
+* released and start out by executing their own idle task.
+*****************************************************************************/
+smp_slave_stext:
+ .proc
+ .callinfo
+
+ /*
+ ** Initialize Space registers
+ */
+ mtsp %r0,%sr4
+ mtsp %r0,%sr5
+ mtsp %r0,%sr6
+ mtsp %r0,%sr7
+
+ /* Initialize the SP - monarch sets up smp_init_current_idle_task */
+ load32 PA(smp_init_current_idle_task),%sp
+ LDREG 0(%sp),%sp /* load task address */
+ tophys_r1 %sp
+ LDREG TASK_THREAD_INFO(%sp),%sp
+ mtctl %sp,%cr30 /* store in cr30 */
+ ldo THREAD_SZ_ALGN(%sp),%sp
+
+ /* point CPU to kernel page tables */
+ load32 PA(swapper_pg_dir),%r4
+ mtctl %r4,%cr24 /* Initialize kernel root pointer */
+ mtctl %r4,%cr25 /* Initialize user root pointer */
+
+#ifdef __LP64__
+ /* Setup PDCE_PROC entry */
+ copy %arg0,%r3
+#else
+ /* Load RFI *return* address in case smp_callin bails */
+ load32 smp_callin_rtn,%r2
+#endif
+
+ /* Load RFI target address. */
+ load32 smp_callin,%r11
+
+ /* ok...common code can handle the rest */
+ b common_stext
+ nop
+
+ .procend
+#endif /* CONFIG_SMP */
+#ifndef __LP64__
+ .data
+
+ .align 4
+ .export $global$,data
+
+ .type $global$,@object
+ .size $global$,4
+$global$:
+ .word 0
+#endif /*!LP64*/
diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
new file mode 100644
index 000000000000..c412c0adc4a9
--- /dev/null
+++ b/arch/parisc/kernel/hpmc.S
@@ -0,0 +1,304 @@
+/*
+ * HPMC (High Priority Machine Check) handler.
+ *
+ * Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
+ * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
+ * Copyright (C) 2000 Hewlett-Packard (John Marvin)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/*
+ * This HPMC handler retrieves the HPMC pim data, resets IO and
+ * returns to the default trap handler with code set to 1 (HPMC).
+ * The default trap handler calls handle interruption, which
+ * does a stack and register dump. This at least allows kernel
+ * developers to get back to C code in virtual mode, where they
+ * have the option to examine and print values from memory that
+ * would help in debugging an HPMC caused by a software bug.
+ *
+ * There is more to do here:
+ *
+ * 1) On MP systems we need to synchronize processors
+ * before calling pdc/iodc.
+ * 2) We should be checking the system state and not
+ * returning to the fault handler if things are really
+ * bad.
+ *
+ */
+
+ .level 1.1
+ .data
+
+#include <asm/assembly.h>
+#include <asm/pdc.h>
+
+ /*
+ * stack for os_hpmc, the HPMC handler.
+ * buffer for IODC procedures (for the HPMC handler).
+ *
+ * IODC requires 7K byte stack. That leaves 1K byte for os_hpmc.
+ */
+
+ .align 4096
+hpmc_stack:
+ .block 16384
+
+#define HPMC_IODC_BUF_SIZE 0x8000
+
+ .align 4096
+hpmc_iodc_buf:
+ .block HPMC_IODC_BUF_SIZE
+
+ .align 8
+hpmc_raddr:
+ .block 128
+
+#define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */
+
+ .export hpmc_pim_data, data
+ .align 8
+hpmc_pim_data:
+ .block HPMC_PIM_DATA_SIZE
+
+ .text
+
+ .export os_hpmc, code
+ .import intr_save, code
+
+os_hpmc:
+
+ /*
+ * registers modified:
+ *
+ * Using callee saves registers without saving them. The
+ * original values are in the pim dump if we need them.
+ *
+ * r2 (rp) return pointer
+ * r3 address of PDCE_PROC
+ * r4 scratch
+ * r5 scratch
+ * r23 (arg3) procedure arg
+ * r24 (arg2) procedure arg
+ * r25 (arg1) procedure arg
+ * r26 (arg0) procedure arg
+ * r30 (sp) stack pointer
+ *
+ * registers read:
+ *
+ * r26 contains address of PDCE_PROC on entry
+ * r28 (ret0) return value from procedure
+ */
+
+ copy arg0, %r3 /* save address of PDCE_PROC */
+
+ /*
+ * disable nested HPMCs
+ *
+ * Increment os_hpmc checksum to invalidate it.
+ * Do this before turning the PSW M bit off.
+ */
+
+ mfctl %cr14, %r4
+ ldw 52(%r4),%r5
+ addi 1,%r5,%r5
+ stw %r5,52(%r4)
+
+ /* MP_FIXME: synchronize all processors. */
+
+ /* Setup stack pointer. */
+
+ load32 PA(hpmc_stack),sp
+
+ ldo 128(sp),sp /* leave room for arguments */
+
+ /*
+ * Most PDC routines require that the M bit be off.
+ * So turn on the Q bit and turn off the M bit.
+ */
+
+ ldo 8(%r0),%r4 /* PSW Q on, PSW M off */
+ mtctl %r4,ipsw
+ mtctl %r0,pcsq
+ mtctl %r0,pcsq
+ load32 PA(os_hpmc_1),%r4
+ mtctl %r4,pcoq
+ ldo 4(%r4),%r4
+ mtctl %r4,pcoq
+ rfi
+ nop
+
+os_hpmc_1:
+
+ /* Call PDC_PIM to get HPMC pim info */
+
+ /*
+ * Note that on some newer boxes, PDC_PIM must be called
+ * before PDC_IO if you want IO to be reset. PDC_PIM sets
+ * a flag that PDC_IO examines.
+ */
+
+ ldo PDC_PIM(%r0), arg0
+ ldo PDC_PIM_HPMC(%r0),arg1 /* Transfer HPMC data */
+ load32 PA(hpmc_raddr),arg2
+ load32 PA(hpmc_pim_data),arg3
+ load32 HPMC_PIM_DATA_SIZE,%r4
+ stw %r4,-52(sp)
+
+ ldil L%PA(os_hpmc_2), rp
+ bv (r3) /* call pdce_proc */
+ ldo R%PA(os_hpmc_2)(rp), rp
+
+os_hpmc_2:
+ comib,<> 0,ret0, os_hpmc_fail
+
+ /* Reset IO by calling the hversion dependent PDC_IO routine */
+
+ ldo PDC_IO(%r0),arg0
+ ldo 0(%r0),arg1 /* log IO errors */
+ ldo 0(%r0),arg2 /* reserved */
+ ldo 0(%r0),arg3 /* reserved */
+ stw %r0,-52(sp) /* reserved */
+
+ ldil L%PA(os_hpmc_3),rp
+ bv (%r3) /* call pdce_proc */
+ ldo R%PA(os_hpmc_3)(rp),rp
+
+os_hpmc_3:
+
+ /* FIXME? Check for errors from PDC_IO (-1 might be OK) */
+
+ /*
+ * Initialize the IODC console device (HPA,SPA, path etc.
+ * are stored on page 0.
+ */
+
+ /*
+ * Load IODC into hpmc_iodc_buf by calling PDC_IODC.
+ * Note that PDC_IODC handles flushing the appropriate
+ * data and instruction cache lines.
+ */
+
+ ldo PDC_IODC(%r0),arg0
+ ldo PDC_IODC_READ(%r0),arg1
+ load32 PA(hpmc_raddr),arg2
+ ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg3 /* console hpa */
+ ldo PDC_IODC_RI_INIT(%r0),%r4
+ stw %r4,-52(sp)
+ load32 PA(hpmc_iodc_buf),%r4
+ stw %r4,-56(sp)
+ load32 HPMC_IODC_BUF_SIZE,%r4
+ stw %r4,-60(sp)
+
+ ldil L%PA(os_hpmc_4),rp
+ bv (%r3) /* call pdce_proc */
+ ldo R%PA(os_hpmc_4)(rp),rp
+
+os_hpmc_4:
+ comib,<> 0,ret0,os_hpmc_fail
+
+ /* Call the entry init (just loaded by PDC_IODC) */
+
+ ldw BOOT_CONSOLE_HPA_OFFSET(%r0),arg0 /* console hpa */
+ ldo ENTRY_INIT_MOD_DEV(%r0), arg1
+ ldw BOOT_CONSOLE_SPA_OFFSET(%r0),arg2 /* console spa */
+ depi 0,31,11,arg2 /* clear bits 21-31 */
+ ldo BOOT_CONSOLE_PATH_OFFSET(%r0),arg3 /* console path */
+ load32 PA(hpmc_raddr),%r4
+ stw %r4, -52(sp)
+ stw %r0, -56(sp) /* HV */
+ stw %r0, -60(sp) /* HV */
+ stw %r0, -64(sp) /* HV */
+ stw %r0, -68(sp) /* lang, must be zero */
+
+ load32 PA(hpmc_iodc_buf),%r5
+ ldil L%PA(os_hpmc_5),rp
+ bv (%r5)
+ ldo R%PA(os_hpmc_5)(rp),rp
+
+os_hpmc_5:
+ comib,<> 0,ret0,os_hpmc_fail
+
+ /* Prepare to call intr_save */
+
+ /*
+ * Load kernel page directory (load into user also, since
+ * we don't intend to ever return to user land anyway)
+ */
+
+ load32 PA(swapper_pg_dir),%r4
+ mtctl %r4,%cr24 /* Initialize kernel root pointer */
+ mtctl %r4,%cr25 /* Initialize user root pointer */
+
+ /* Clear sr4-sr7 */
+
+ mtsp %r0, %sr4
+ mtsp %r0, %sr5
+ mtsp %r0, %sr6
+ mtsp %r0, %sr7
+
+ tovirt_r1 %r30 /* make sp virtual */
+
+ rsm 8,%r0 /* Clear Q bit */
+ ldi 1,%r8 /* Set trap code to "1" for HPMC */
+ load32 PA(intr_save),%r1
+ be 0(%sr7,%r1)
+ nop
+
+os_hpmc_fail:
+
+ /*
+ * Reset the system
+ *
+ * Some systems may lockup from a broadcast reset, so try the
+ * hversion PDC_BROADCAST_RESET() first.
+ * MP_FIXME: reset all processors if more than one central bus.
+ */
+
+ /* PDC_BROADCAST_RESET() */
+
+ ldo PDC_BROADCAST_RESET(%r0),arg0
+ ldo 0(%r0),arg1 /* do reset */
+
+ ldil L%PA(os_hpmc_6),rp
+ bv (%r3) /* call pdce_proc */
+ ldo R%PA(os_hpmc_6)(rp),rp
+
+os_hpmc_6:
+
+ /*
+ * possible return values:
+ * -1 non-existent procedure
+ * -2 non-existent option
+ * -16 unaligned stack
+ *
+ * If call returned, do a broadcast reset.
+ */
+
+ ldil L%0xfffc0000,%r4 /* IO_BROADCAST */
+ ldo 5(%r0),%r5
+ stw %r5,48(%r4) /* CMD_RESET to IO_COMMAND offset */
+
+ b .
+ nop
+
+ /* this label used to compute os_hpmc checksum */
+
+ .export os_hpmc_end, code
+
+os_hpmc_end:
+
+ nop
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c
new file mode 100644
index 000000000000..7e898fd64415
--- /dev/null
+++ b/arch/parisc/kernel/init_task.c
@@ -0,0 +1,76 @@
+/*
+ * Static declaration of "init" task data structure.
+ *
+ * Copyright (C) 2000 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2001 Helge Deller <deller @ parisc-linux.org>
+ * Copyright (C) 2002 Matthew Wilcox <willy with parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 16384-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+unsigned char interrupt_stack[ISTACK_SIZE] __attribute__ ((section("init_istack"), aligned(4096)));
+union thread_union init_thread_union
+ __attribute__((aligned(128))) __attribute__((__section__(".data.init_task"))) =
+ { INIT_THREAD_INFO(init_task) };
+
+#ifdef __LP64__
+/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
+ * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
+ * guarantee that global objects will be laid out in memory in the same order
+ * as the order of declaration, so put these in different sections and use
+ * the linker script to order them. */
+pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pmd"))) = { {0}, };
+
+#endif
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pgd"))) = { {0}, };
+pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((aligned(PAGE_SIZE))) __attribute__ ((__section__ (".data.vm0.pte"))) = { {0}, };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+EXPORT_SYMBOL(init_task);
+
+__asm__(".data");
+struct task_struct init_task = INIT_TASK(init_task);
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
new file mode 100644
index 000000000000..1a1c66422736
--- /dev/null
+++ b/arch/parisc/kernel/inventory.c
@@ -0,0 +1,612 @@
+/*
+ * inventory.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
+ * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
+ *
+ * These are the routines to discover what hardware exists in this box.
+ * This task is complicated by there being 3 different ways of
+ * performing an inventory, depending largely on the age of the box.
+ * The recommended way to do this is to check to see whether the machine
+ * is a `Snake' first, then try System Map, then try PAT. We try System
+ * Map before checking for a Snake -- this probably doesn't cause any
+ * problems, but...
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/mmzone.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/parisc-device.h>
+
+/*
+** Debug options
+** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
+*/
+#undef DEBUG_PAT
+
+int pdc_type = PDC_TYPE_ILLEGAL;
+
+void __init setup_pdc(void)
+{
+ long status;
+ unsigned int bus_id;
+ struct pdc_system_map_mod_info module_result;
+ struct pdc_module_path module_path;
+ struct pdc_model model;
+#ifdef __LP64__
+ struct pdc_pat_cell_num cell_info;
+#endif
+
+ /* Determine the pdc "type" used on this machine */
+
+ printk(KERN_INFO "Determining PDC firmware type: ");
+
+ status = pdc_system_map_find_mods(&module_result, &module_path, 0);
+ if (status == PDC_OK) {
+ pdc_type = PDC_TYPE_SYSTEM_MAP;
+ printk("System Map.\n");
+ return;
+ }
+
+ /*
+ * If the machine doesn't support PDC_SYSTEM_MAP then either it
+ * is a pdc pat box, or it is an older box. All 64 bit capable
+ * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
+ */
+
+ /*
+ * TODO: We should test for 64 bit capability and give a
+ * clearer message.
+ */
+
+#ifdef __LP64__
+ status = pdc_pat_cell_get_number(&cell_info);
+ if (status == PDC_OK) {
+ pdc_type = PDC_TYPE_PAT;
+ printk("64 bit PAT.\n");
+ return;
+ }
+#endif
+
+ /* Check the CPU's bus ID. There's probably a better test. */
+
+ status = pdc_model_info(&model);
+
+ bus_id = (model.hversion >> (4 + 7)) & 0x1f;
+
+ switch (bus_id) {
+ case 0x4: /* 720, 730, 750, 735, 755 */
+ case 0x6: /* 705, 710 */
+ case 0x7: /* 715, 725 */
+ case 0x8: /* 745, 747, 742 */
+ case 0xA: /* 712 and similiar */
+ case 0xC: /* 715/64, at least */
+
+ pdc_type = PDC_TYPE_SNAKE;
+ printk("Snake.\n");
+ return;
+
+ default: /* Everything else */
+
+ printk("Unsupported.\n");
+ panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
+ }
+}
+
+#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
+
+static void __init
+set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
+ unsigned long pages4k)
+{
+ /* Rather than aligning and potentially throwing away
+ * memory, we'll assume that any ranges are already
+ * nicely aligned with any reasonable page size, and
+ * panic if they are not (it's more likely that the
+ * pdc info is bad in this case).
+ */
+
+ if ( ((start & (PAGE_SIZE - 1)) != 0)
+ || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) ) {
+
+ panic("Memory range doesn't align with page size!\n");
+ }
+
+ pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
+ pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
+}
+
+static void __init pagezero_memconfig(void)
+{
+ unsigned long npages;
+
+ /* Use the 32 bit information from page zero to create a single
+ * entry in the pmem_ranges[] table.
+ *
+ * We currently don't support machines with contiguous memory
+ * >= 4 Gb, who report that memory using 64 bit only fields
+ * on page zero. It's not worth doing until it can be tested,
+ * and it is not clear we can support those machines for other
+ * reasons.
+ *
+ * If that support is done in the future, this is where it
+ * should be done.
+ */
+
+ npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
+ set_pmem_entry(pmem_ranges,0UL,npages);
+ npmem_ranges = 1;
+}
+
+#ifdef __LP64__
+
+/* All of the PDC PAT specific code is 64-bit only */
+
+/*
+** The module object is filled via PDC_PAT_CELL[Return Cell Module].
+** If a module is found, register module will get the IODC bytes via
+** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
+**
+** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
+** only for SBAs and LBAs. This view will cause an invalid
+** argument error for all other cell module types.
+**
+*/
+
+static int __init
+pat_query_module(ulong pcell_loc, ulong mod_index)
+{
+ pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
+ unsigned long bytecnt;
+ unsigned long temp; /* 64-bit scratch value */
+ long status; /* PDC return value status */
+ struct parisc_device *dev;
+
+ /* return cell module (PA or Processor view) */
+ status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
+ PA_VIEW, &pa_pdc_cell);
+
+ if (status != PDC_OK) {
+ /* no more cell modules or error */
+ return status;
+ }
+
+ temp = pa_pdc_cell.cba;
+ dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
+ if (!dev) {
+ return PDC_NE_MOD;
+ }
+
+ /* alloc_pa_dev sets dev->hpa */
+
+ /*
+ ** save parameters in the parisc_device
+ ** (The idea being the device driver will call pdc_pat_cell_module()
+ ** and store the results in its own data structure.)
+ */
+ dev->pcell_loc = pcell_loc;
+ dev->mod_index = mod_index;
+
+ /* save generic info returned from the call */
+ /* REVISIT: who is the consumer of this? not sure yet... */
+ dev->mod_info = pa_pdc_cell.mod_info; /* pass to PAT_GET_ENTITY() */
+ dev->pmod_loc = pa_pdc_cell.mod_location;
+
+ register_parisc_device(dev); /* advertise device */
+
+#ifdef DEBUG_PAT
+ pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
+ /* dump what we see so far... */
+ switch (PAT_GET_ENTITY(dev->mod_info)) {
+ unsigned long i;
+
+ case PAT_ENTITY_PROC:
+ printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
+ pa_pdc_cell.mod[0]);
+ break;
+
+ case PAT_ENTITY_MEM:
+ printk(KERN_DEBUG
+ "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
+ pa_pdc_cell.mod[0], pa_pdc_cell.mod[1],
+ pa_pdc_cell.mod[2]);
+ break;
+ case PAT_ENTITY_CA:
+ printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
+ break;
+
+ case PAT_ENTITY_PBC:
+ printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
+ goto print_ranges;
+
+ case PAT_ENTITY_SBA:
+ printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
+ goto print_ranges;
+
+ case PAT_ENTITY_LBA:
+ printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
+
+ print_ranges:
+ pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
+ IO_VIEW, &io_pdc_cell);
+ printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell.mod[1]);
+ for (i = 0; i < pa_pdc_cell.mod[1]; i++) {
+ printk(KERN_DEBUG
+ " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
+ i, pa_pdc_cell.mod[2 + i * 3], /* type */
+ pa_pdc_cell.mod[3 + i * 3], /* start */
+ pa_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
+ printk(KERN_DEBUG
+ " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
+ i, io_pdc_cell.mod[2 + i * 3], /* type */
+ io_pdc_cell.mod[3 + i * 3], /* start */
+ io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
+ }
+ printk(KERN_DEBUG "\n");
+ break;
+ }
+#endif /* DEBUG_PAT */
+ return PDC_OK;
+}
+
+
+/* pat pdc can return information about a variety of different
+ * types of memory (e.g. firmware,i/o, etc) but we only care about
+ * the usable physical ram right now. Since the firmware specific
+ * information is allocated on the stack, we'll be generous, in
+ * case there is a lot of other information we don't care about.
+ */
+
+#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
+
+static void __init pat_memconfig(void)
+{
+ unsigned long actual_len;
+ struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
+ struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
+ physmem_range_t *pmem_ptr;
+ long status;
+ int entries;
+ unsigned long length;
+ int i;
+
+ length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
+
+ status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
+
+ if ((status != PDC_OK)
+ || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
+
+ /* The above pdc call shouldn't fail, but, just in
+ * case, just use the PAGE0 info.
+ */
+
+ printk("\n\n\n");
+ printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
+ "All memory may not be used!\n\n\n");
+ pagezero_memconfig();
+ return;
+ }
+
+ entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
+
+ if (entries > PAT_MAX_RANGES) {
+ printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
+ printk(KERN_WARNING "Some memory may not be used!\n");
+ }
+
+ /* Copy information into the firmware independent pmem_ranges
+ * array, skipping types we don't care about. Notice we said
+ * "may" above. We'll use all the entries that were returned.
+ */
+
+ npmem_ranges = 0;
+ mtbl_ptr = mem_table;
+ pmem_ptr = pmem_ranges; /* Global firmware independent table */
+ for (i = 0; i < entries; i++,mtbl_ptr++) {
+ if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
+ || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
+ || (mtbl_ptr->pages == 0)
+ || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
+ && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
+ && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
+
+ continue;
+ }
+
+ if (npmem_ranges == MAX_PHYSMEM_RANGES) {
+ printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
+ printk(KERN_WARNING "Some memory will not be used!\n");
+ break;
+ }
+
+ set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
+ npmem_ranges++;
+ }
+}
+
+static int __init pat_inventory(void)
+{
+ int status;
+ ulong mod_index = 0;
+ struct pdc_pat_cell_num cell_info;
+
+ /*
+ ** Note: Prelude (and it's successors: Lclass, A400/500) only
+ ** implement PDC_PAT_CELL sub-options 0 and 2.
+ */
+ status = pdc_pat_cell_get_number(&cell_info);
+ if (status != PDC_OK) {
+ return 0;
+ }
+
+#ifdef DEBUG_PAT
+ printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
+ cell_info.cell_loc);
+#endif
+
+ while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
+ mod_index++;
+ }
+
+ return mod_index;
+}
+
+/* We only look for extended memory ranges on a 64 bit capable box */
+static void __init sprockets_memconfig(void)
+{
+ struct pdc_memory_table_raddr r_addr;
+ struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
+ struct pdc_memory_table *mtbl_ptr;
+ physmem_range_t *pmem_ptr;
+ long status;
+ int entries;
+ int i;
+
+ status = pdc_mem_mem_table(&r_addr,mem_table,
+ (unsigned long)MAX_PHYSMEM_RANGES);
+
+ if (status != PDC_OK) {
+
+ /* The above pdc call only works on boxes with sprockets
+ * firmware (newer B,C,J class). Other non PAT PDC machines
+ * do support more than 3.75 Gb of memory, but we don't
+ * support them yet.
+ */
+
+ pagezero_memconfig();
+ return;
+ }
+
+ if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
+ printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
+ printk(KERN_WARNING "Some memory will not be used!\n");
+ }
+
+ entries = (int)r_addr.entries_returned;
+
+ npmem_ranges = 0;
+ mtbl_ptr = mem_table;
+ pmem_ptr = pmem_ranges; /* Global firmware independent table */
+ for (i = 0; i < entries; i++,mtbl_ptr++) {
+ set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
+ npmem_ranges++;
+ }
+}
+
+#else /* !__LP64__ */
+
+#define pat_inventory() do { } while (0)
+#define pat_memconfig() do { } while (0)
+#define sprockets_memconfig() pagezero_memconfig()
+
+#endif /* !__LP64__ */
+
+
+#ifndef CONFIG_PA20
+
+/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
+
+static struct parisc_device * __init
+legacy_create_device(struct pdc_memory_map *r_addr,
+ struct pdc_module_path *module_path)
+{
+ struct parisc_device *dev;
+ int status = pdc_mem_map_hpa(r_addr, module_path);
+ if (status != PDC_OK)
+ return NULL;
+
+ dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
+ if (dev == NULL)
+ return NULL;
+
+ register_parisc_device(dev);
+ return dev;
+}
+
+/**
+ * snake_inventory
+ *
+ * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
+ * To use it, we initialise the mod_path.bc to 0xff and try all values of
+ * mod to get the HPA for the top-level devices. Bus adapters may have
+ * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
+ * module, then trying all possible functions.
+ */
+static void __init snake_inventory(void)
+{
+ int mod;
+ for (mod = 0; mod < 16; mod++) {
+ struct parisc_device *dev;
+ struct pdc_module_path module_path;
+ struct pdc_memory_map r_addr;
+ unsigned int func;
+
+ memset(module_path.path.bc, 0xff, 6);
+ module_path.path.mod = mod;
+ dev = legacy_create_device(&r_addr, &module_path);
+ if ((!dev) || (dev->id.hw_type != HPHW_BA))
+ continue;
+
+ memset(module_path.path.bc, 0xff, 4);
+ module_path.path.bc[4] = mod;
+
+ for (func = 0; func < 16; func++) {
+ module_path.path.bc[5] = 0;
+ module_path.path.mod = func;
+ legacy_create_device(&r_addr, &module_path);
+ }
+ }
+}
+
+#else /* CONFIG_PA20 */
+#define snake_inventory() do { } while (0)
+#endif /* CONFIG_PA20 */
+
+/* Common 32/64 bit based code goes here */
+
+/**
+ * add_system_map_addresses - Add additional addresses to the parisc device.
+ * @dev: The parisc device.
+ * @num_addrs: Then number of addresses to add;
+ * @module_instance: The system_map module instance.
+ *
+ * This function adds any additional addresses reported by the system_map
+ * firmware to the parisc device.
+ */
+static void __init
+add_system_map_addresses(struct parisc_device *dev, int num_addrs,
+ int module_instance)
+{
+ int i;
+ long status;
+ struct pdc_system_map_addr_info addr_result;
+
+ dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
+ if(!dev->addr) {
+ printk(KERN_ERR "%s %s(): memory allocation failure\n",
+ __FILE__, __FUNCTION__);
+ return;
+ }
+
+ for(i = 1; i <= num_addrs; ++i) {
+ status = pdc_system_map_find_addrs(&addr_result,
+ module_instance, i);
+ if(PDC_OK == status) {
+ dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
+ dev->num_addrs++;
+ } else {
+ printk(KERN_WARNING
+ "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
+ status, i);
+ }
+ }
+}
+
+/**
+ * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
+ *
+ * This function attempts to retrieve and register all the devices firmware
+ * knows about via the SYSTEM_MAP PDC call.
+ */
+static void __init system_map_inventory(void)
+{
+ int i;
+ long status = PDC_OK;
+
+ for (i = 0; i < 256; i++) {
+ struct parisc_device *dev;
+ struct pdc_system_map_mod_info module_result;
+ struct pdc_module_path module_path;
+
+ status = pdc_system_map_find_mods(&module_result,
+ &module_path, i);
+ if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
+ break;
+ if (status != PDC_OK)
+ continue;
+
+ dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
+ if (!dev)
+ continue;
+
+ register_parisc_device(dev);
+
+ /* if available, get the additional addresses for a module */
+ if (!module_result.add_addrs)
+ continue;
+
+ add_system_map_addresses(dev, module_result.add_addrs, i);
+ }
+
+ walk_central_bus();
+ return;
+}
+
+void __init do_memory_inventory(void)
+{
+ switch (pdc_type) {
+
+ case PDC_TYPE_PAT:
+ pat_memconfig();
+ break;
+
+ case PDC_TYPE_SYSTEM_MAP:
+ sprockets_memconfig();
+ break;
+
+ case PDC_TYPE_SNAKE:
+ pagezero_memconfig();
+ return;
+
+ default:
+ panic("Unknown PDC type!\n");
+ }
+
+ if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
+ printk(KERN_WARNING "Bad memory configuration returned!\n");
+ printk(KERN_WARNING "Some memory may not be used!\n");
+ pagezero_memconfig();
+ }
+}
+
+void __init do_device_inventory(void)
+{
+ printk(KERN_INFO "Searching for devices...\n");
+
+ init_parisc_bus();
+
+ switch (pdc_type) {
+
+ case PDC_TYPE_PAT:
+ pat_inventory();
+ break;
+
+ case PDC_TYPE_SYSTEM_MAP:
+ system_map_inventory();
+ break;
+
+ case PDC_TYPE_SNAKE:
+ snake_inventory();
+ break;
+
+ default:
+ panic("Unknown PDC type!\n");
+ }
+ printk(KERN_INFO "Found devices:\n");
+ print_parisc_devices();
+}
diff --git a/arch/parisc/kernel/ioctl32.c b/arch/parisc/kernel/ioctl32.c
new file mode 100644
index 000000000000..1d3824b670d1
--- /dev/null
+++ b/arch/parisc/kernel/ioctl32.c
@@ -0,0 +1,625 @@
+/* $Id: ioctl32.c,v 1.5 2002/10/18 00:21:43 varenet Exp $
+ * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
+ *
+ * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
+ * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * ioctls.
+ */
+
+#include <linux/syscalls.h>
+
+#define INCLUDES
+#include "compat_ioctl.c"
+
+#include <asm/perf.h>
+#include <asm/ioctls.h>
+
+#define CODE
+#include "compat_ioctl.c"
+
+/* Use this to get at 32-bit user passed pointers.
+ See sys_sparc32.c for description about these. */
+#define A(__x) ((unsigned long)(__x))
+/* The same for use with copy_from_user() and copy_to_user(). */
+#define B(__x) ((void *)(unsigned long)(__x))
+
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+/* This really belongs in include/linux/drm.h -DaveM */
+#include "../../../drivers/char/drm/drm.h"
+
+typedef struct drm32_version {
+ int version_major; /* Major version */
+ int version_minor; /* Minor version */
+ int version_patchlevel;/* Patch level */
+ int name_len; /* Length of name buffer */
+ u32 name; /* Name of driver */
+ int date_len; /* Length of date buffer */
+ u32 date; /* User-space buffer to hold date */
+ int desc_len; /* Length of desc buffer */
+ u32 desc; /* User-space buffer to hold desc */
+} drm32_version_t;
+#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
+
+static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_version_t *uversion = (drm32_version_t *)arg;
+ char *name_ptr, *date_ptr, *desc_ptr;
+ u32 tmp1, tmp2, tmp3;
+ drm_version_t kversion;
+ mm_segment_t old_fs;
+ int ret;
+
+ memset(&kversion, 0, sizeof(kversion));
+ if (get_user(kversion.name_len, &uversion->name_len) ||
+ get_user(kversion.date_len, &uversion->date_len) ||
+ get_user(kversion.desc_len, &uversion->desc_len) ||
+ get_user(tmp1, &uversion->name) ||
+ get_user(tmp2, &uversion->date) ||
+ get_user(tmp3, &uversion->desc))
+ return -EFAULT;
+
+ name_ptr = (char *) A(tmp1);
+ date_ptr = (char *) A(tmp2);
+ desc_ptr = (char *) A(tmp3);
+
+ ret = -ENOMEM;
+ if (kversion.name_len && name_ptr) {
+ kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
+ if (!kversion.name)
+ goto out;
+ }
+ if (kversion.date_len && date_ptr) {
+ kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
+ if (!kversion.date)
+ goto out;
+ }
+ if (kversion.desc_len && desc_ptr) {
+ kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
+ if (!kversion.desc)
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if ((kversion.name &&
+ copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
+ (kversion.date &&
+ copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
+ (kversion.desc &&
+ copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
+ ret = -EFAULT;
+ if (put_user(kversion.version_major, &uversion->version_major) ||
+ put_user(kversion.version_minor, &uversion->version_minor) ||
+ put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
+ put_user(kversion.name_len, &uversion->name_len) ||
+ put_user(kversion.date_len, &uversion->date_len) ||
+ put_user(kversion.desc_len, &uversion->desc_len))
+ ret = -EFAULT;
+ }
+
+out:
+ if (kversion.name)
+ kfree(kversion.name);
+ if (kversion.date)
+ kfree(kversion.date);
+ if (kversion.desc)
+ kfree(kversion.desc);
+ return ret;
+}
+
+typedef struct drm32_unique {
+ int unique_len; /* Length of unique */
+ u32 unique; /* Unique name for driver instantiation */
+} drm32_unique_t;
+#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
+#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
+
+static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_unique_t *uarg = (drm32_unique_t *)arg;
+ drm_unique_t karg;
+ mm_segment_t old_fs;
+ char *uptr;
+ u32 tmp;
+ int ret;
+
+ if (get_user(karg.unique_len, &uarg->unique_len))
+ return -EFAULT;
+ karg.unique = NULL;
+
+ if (get_user(tmp, &uarg->unique))
+ return -EFAULT;
+
+ uptr = (char *) A(tmp);
+
+ if (uptr) {
+ karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
+ if (!karg.unique)
+ return -ENOMEM;
+ if (cmd == DRM32_IOCTL_SET_UNIQUE &&
+ copy_from_user(karg.unique, uptr, karg.unique_len)) {
+ kfree(karg.unique);
+ return -EFAULT;
+ }
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ if (cmd == DRM32_IOCTL_GET_UNIQUE)
+ ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
+ else
+ ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (cmd == DRM32_IOCTL_GET_UNIQUE &&
+ uptr != NULL &&
+ copy_to_user(uptr, karg.unique, karg.unique_len))
+ ret = -EFAULT;
+ if (put_user(karg.unique_len, &uarg->unique_len))
+ ret = -EFAULT;
+ }
+
+ if (karg.unique != NULL)
+ kfree(karg.unique);
+
+ return ret;
+}
+
+typedef struct drm32_map {
+ u32 offset; /* Requested physical address (0 for SAREA)*/
+ u32 size; /* Requested physical size (bytes) */
+ drm_map_type_t type; /* Type of memory to map */
+ drm_map_flags_t flags; /* Flags */
+ u32 handle; /* User-space: "Handle" to pass to mmap */
+ /* Kernel-space: kernel-virtual address */
+ int mtrr; /* MTRR slot used */
+ /* Private data */
+} drm32_map_t;
+#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
+
+static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_map_t *uarg = (drm32_map_t *) arg;
+ drm_map_t karg;
+ mm_segment_t old_fs;
+ u32 tmp;
+ int ret;
+
+ ret = get_user(karg.offset, &uarg->offset);
+ ret |= get_user(karg.size, &uarg->size);
+ ret |= get_user(karg.type, &uarg->type);
+ ret |= get_user(karg.flags, &uarg->flags);
+ ret |= get_user(tmp, &uarg->handle);
+ ret |= get_user(karg.mtrr, &uarg->mtrr);
+ if (ret)
+ return -EFAULT;
+
+ karg.handle = (void *) A(tmp);
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ ret = put_user(karg.offset, &uarg->offset);
+ ret |= put_user(karg.size, &uarg->size);
+ ret |= put_user(karg.type, &uarg->type);
+ ret |= put_user(karg.flags, &uarg->flags);
+ tmp = (u32) (long)karg.handle;
+ ret |= put_user(tmp, &uarg->handle);
+ ret |= put_user(karg.mtrr, &uarg->mtrr);
+ if (ret)
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+typedef struct drm32_buf_info {
+ int count; /* Entries in list */
+ u32 list; /* (drm_buf_desc_t *) */
+} drm32_buf_info_t;
+#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
+
+static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_buf_info_t *uarg = (drm32_buf_info_t *)arg;
+ drm_buf_desc_t *ulist;
+ drm_buf_info_t karg;
+ mm_segment_t old_fs;
+ int orig_count, ret;
+ u32 tmp;
+
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->list))
+ return -EFAULT;
+
+ ulist = (drm_buf_desc_t *) A(tmp);
+
+ orig_count = karg.count;
+
+ karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
+ if (!karg.list)
+ return -EFAULT;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (karg.count <= orig_count &&
+ (copy_to_user(ulist, karg.list,
+ karg.count * sizeof(drm_buf_desc_t))))
+ ret = -EFAULT;
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
+
+ kfree(karg.list);
+
+ return ret;
+}
+
+typedef struct drm32_buf_free {
+ int count;
+ u32 list; /* (int *) */
+} drm32_buf_free_t;
+#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
+
+static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_buf_free_t *uarg = (drm32_buf_free_t *)arg;
+ drm_buf_free_t karg;
+ mm_segment_t old_fs;
+ int *ulist;
+ int ret;
+ u32 tmp;
+
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->list))
+ return -EFAULT;
+
+ ulist = (int *) A(tmp);
+
+ karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
+ if (!karg.list)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
+ goto out;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+out:
+ kfree(karg.list);
+
+ return ret;
+}
+
+typedef struct drm32_buf_pub {
+ int idx; /* Index into master buflist */
+ int total; /* Buffer size */
+ int used; /* Amount of buffer in use (for DMA) */
+ u32 address; /* Address of buffer (void *) */
+} drm32_buf_pub_t;
+
+typedef struct drm32_buf_map {
+ int count; /* Length of buflist */
+ u32 virtual; /* Mmaped area in user-virtual (void *) */
+ u32 list; /* Buffer information (drm_buf_pub_t *) */
+} drm32_buf_map_t;
+#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
+
+static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_buf_map_t *uarg = (drm32_buf_map_t *)arg;
+ drm32_buf_pub_t *ulist;
+ drm_buf_map_t karg;
+ mm_segment_t old_fs;
+ int orig_count, ret, i;
+ u32 tmp1, tmp2;
+
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp1, &uarg->virtual) ||
+ get_user(tmp2, &uarg->list))
+ return -EFAULT;
+
+ karg.virtual = (void *) A(tmp1);
+ ulist = (drm32_buf_pub_t *) A(tmp2);
+
+ orig_count = karg.count;
+
+ karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
+ if (!karg.list)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ for (i = 0; i < karg.count; i++) {
+ if (get_user(karg.list[i].idx, &ulist[i].idx) ||
+ get_user(karg.list[i].total, &ulist[i].total) ||
+ get_user(karg.list[i].used, &ulist[i].used) ||
+ get_user(tmp1, &ulist[i].address))
+ goto out;
+
+ karg.list[i].address = (void *) A(tmp1);
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ for (i = 0; i < orig_count; i++) {
+ tmp1 = (u32) (long) karg.list[i].address;
+ if (put_user(karg.list[i].idx, &ulist[i].idx) ||
+ put_user(karg.list[i].total, &ulist[i].total) ||
+ put_user(karg.list[i].used, &ulist[i].used) ||
+ put_user(tmp1, &ulist[i].address)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
+
+out:
+ kfree(karg.list);
+ return ret;
+}
+
+typedef struct drm32_dma {
+ /* Indices here refer to the offset into
+ buflist in drm_buf_get_t. */
+ int context; /* Context handle */
+ int send_count; /* Number of buffers to send */
+ u32 send_indices; /* List of handles to buffers (int *) */
+ u32 send_sizes; /* Lengths of data to send (int *) */
+ drm_dma_flags_t flags; /* Flags */
+ int request_count; /* Number of buffers requested */
+ int request_size; /* Desired size for buffers */
+ u32 request_indices; /* Buffer information (int *) */
+ u32 request_sizes; /* (int *) */
+ int granted_count; /* Number of buffers granted */
+} drm32_dma_t;
+#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
+
+/* RED PEN The DRM layer blindly dereferences the send/request
+ * indice/size arrays even though they are userland
+ * pointers. -DaveM
+ */
+static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_dma_t *uarg = (drm32_dma_t *) arg;
+ int *u_si, *u_ss, *u_ri, *u_rs;
+ drm_dma_t karg;
+ mm_segment_t old_fs;
+ int ret;
+ u32 tmp1, tmp2, tmp3, tmp4;
+
+ karg.send_indices = karg.send_sizes = NULL;
+ karg.request_indices = karg.request_sizes = NULL;
+
+ if (get_user(karg.context, &uarg->context) ||
+ get_user(karg.send_count, &uarg->send_count) ||
+ get_user(tmp1, &uarg->send_indices) ||
+ get_user(tmp2, &uarg->send_sizes) ||
+ get_user(karg.flags, &uarg->flags) ||
+ get_user(karg.request_count, &uarg->request_count) ||
+ get_user(karg.request_size, &uarg->request_size) ||
+ get_user(tmp3, &uarg->request_indices) ||
+ get_user(tmp4, &uarg->request_sizes) ||
+ get_user(karg.granted_count, &uarg->granted_count))
+ return -EFAULT;
+
+ u_si = (int *) A(tmp1);
+ u_ss = (int *) A(tmp2);
+ u_ri = (int *) A(tmp3);
+ u_rs = (int *) A(tmp4);
+
+ if (karg.send_count) {
+ karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
+ karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
+
+ ret = -ENOMEM;
+ if (!karg.send_indices || !karg.send_sizes)
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_from_user(karg.send_indices, u_si,
+ (karg.send_count * sizeof(int))) ||
+ copy_from_user(karg.send_sizes, u_ss,
+ (karg.send_count * sizeof(int))))
+ goto out;
+ }
+
+ if (karg.request_count) {
+ karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
+ karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
+
+ ret = -ENOMEM;
+ if (!karg.request_indices || !karg.request_sizes)
+ goto out;
+
+ ret = -EFAULT;
+ if (copy_from_user(karg.request_indices, u_ri,
+ (karg.request_count * sizeof(int))) ||
+ copy_from_user(karg.request_sizes, u_rs,
+ (karg.request_count * sizeof(int))))
+ goto out;
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (put_user(karg.context, &uarg->context) ||
+ put_user(karg.send_count, &uarg->send_count) ||
+ put_user(karg.flags, &uarg->flags) ||
+ put_user(karg.request_count, &uarg->request_count) ||
+ put_user(karg.request_size, &uarg->request_size) ||
+ put_user(karg.granted_count, &uarg->granted_count))
+ ret = -EFAULT;
+
+ if (karg.send_count) {
+ if (copy_to_user(u_si, karg.send_indices,
+ (karg.send_count * sizeof(int))) ||
+ copy_to_user(u_ss, karg.send_sizes,
+ (karg.send_count * sizeof(int))))
+ ret = -EFAULT;
+ }
+ if (karg.request_count) {
+ if (copy_to_user(u_ri, karg.request_indices,
+ (karg.request_count * sizeof(int))) ||
+ copy_to_user(u_rs, karg.request_sizes,
+ (karg.request_count * sizeof(int))))
+ ret = -EFAULT;
+ }
+ }
+
+out:
+ if (karg.send_indices)
+ kfree(karg.send_indices);
+ if (karg.send_sizes)
+ kfree(karg.send_sizes);
+ if (karg.request_indices)
+ kfree(karg.request_indices);
+ if (karg.request_sizes)
+ kfree(karg.request_sizes);
+
+ return ret;
+}
+
+typedef struct drm32_ctx_res {
+ int count;
+ u32 contexts; /* (drm_ctx_t *) */
+} drm32_ctx_res_t;
+#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
+
+static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ drm32_ctx_res_t *uarg = (drm32_ctx_res_t *) arg;
+ drm_ctx_t *ulist;
+ drm_ctx_res_t karg;
+ mm_segment_t old_fs;
+ int orig_count, ret;
+ u32 tmp;
+
+ karg.contexts = NULL;
+ if (get_user(karg.count, &uarg->count) ||
+ get_user(tmp, &uarg->contexts))
+ return -EFAULT;
+
+ ulist = (drm_ctx_t *) A(tmp);
+
+ orig_count = karg.count;
+ if (karg.count && ulist) {
+ karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
+ if (!karg.contexts)
+ return -ENOMEM;
+ if (copy_from_user(karg.contexts, ulist,
+ (karg.count * sizeof(drm_ctx_t)))) {
+ kfree(karg.contexts);
+ return -EFAULT;
+ }
+ }
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
+ set_fs(old_fs);
+
+ if (!ret) {
+ if (orig_count) {
+ if (copy_to_user(ulist, karg.contexts,
+ (orig_count * sizeof(drm_ctx_t))))
+ ret = -EFAULT;
+ }
+ if (put_user(karg.count, &uarg->count))
+ ret = -EFAULT;
+ }
+
+ if (karg.contexts)
+ kfree(karg.contexts);
+
+ return ret;
+}
+
+#endif
+
+#define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL },
+#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl)
+
+#define IOCTL_TABLE_START struct ioctl_trans ioctl_start[] = {
+#define IOCTL_TABLE_END };
+
+IOCTL_TABLE_START
+#include <linux/compat_ioctl.h>
+
+#define DECLARES
+#include "compat_ioctl.c"
+
+/* Might be moved to compat_ioctl.h with some ifdefs... */
+COMPATIBLE_IOCTL(TIOCSTART)
+COMPATIBLE_IOCTL(TIOCSTOP)
+COMPATIBLE_IOCTL(TIOCSLTC)
+
+/* PA-specific ioctls */
+COMPATIBLE_IOCTL(PA_PERF_ON)
+COMPATIBLE_IOCTL(PA_PERF_OFF)
+COMPATIBLE_IOCTL(PA_PERF_VERSION)
+
+/* And these ioctls need translation */
+HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc)
+HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc)
+HANDLE_IOCTL(SIOCGPPPVER, dev_ifsioc)
+
+#if defined(CONFIG_GEN_RTC)
+COMPATIBLE_IOCTL(RTC_AIE_ON)
+COMPATIBLE_IOCTL(RTC_AIE_OFF)
+COMPATIBLE_IOCTL(RTC_UIE_ON)
+COMPATIBLE_IOCTL(RTC_UIE_OFF)
+COMPATIBLE_IOCTL(RTC_PIE_ON)
+COMPATIBLE_IOCTL(RTC_PIE_OFF)
+COMPATIBLE_IOCTL(RTC_WIE_ON)
+COMPATIBLE_IOCTL(RTC_WIE_OFF)
+COMPATIBLE_IOCTL(RTC_ALM_SET) /* struct rtc_time only has ints */
+COMPATIBLE_IOCTL(RTC_ALM_READ) /* struct rtc_time only has ints */
+COMPATIBLE_IOCTL(RTC_RD_TIME) /* struct rtc_time only has ints */
+COMPATIBLE_IOCTL(RTC_SET_TIME) /* struct rtc_time only has ints */
+HANDLE_IOCTL(RTC_IRQP_READ, w_long)
+COMPATIBLE_IOCTL(RTC_IRQP_SET)
+HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
+COMPATIBLE_IOCTL(RTC_EPOCH_SET)
+#endif
+
+#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version);
+HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique);
+HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique);
+HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap);
+HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs);
+HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs);
+HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs);
+HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma);
+HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx);
+#endif /* DRM */
+IOCTL_TABLE_END
+
+int ioctl_table_size = ARRAY_SIZE(ioctl_start);
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
new file mode 100644
index 000000000000..006385dbee66
--- /dev/null
+++ b/arch/parisc/kernel/irq.c
@@ -0,0 +1,343 @@
+/*
+ * Code to handle x86 style IRQs plus some generic interrupt stuff.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
+ * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
+ * Copyright (C) 1999-2000 Grant Grundler
+ * Copyright (c) 2005 Matthew Wilcox
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/bitops.h>
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#undef PARISC_IRQ_CR16_COUNTS
+
+extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
+extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
+
+#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
+
+/* Bits in EIEM correlate with cpu_irq_action[].
+** Numbered *Big Endian*! (ie bit 0 is MSB)
+*/
+static volatile unsigned long cpu_eiem = 0;
+
+static void cpu_set_eiem(void *info)
+{
+ set_eiem((unsigned long) info);
+}
+
+static inline void cpu_disable_irq(unsigned int irq)
+{
+ unsigned long eirr_bit = EIEM_MASK(irq);
+
+ cpu_eiem &= ~eirr_bit;
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
+}
+
+static void cpu_enable_irq(unsigned int irq)
+{
+ unsigned long eirr_bit = EIEM_MASK(irq);
+
+ mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
+ cpu_eiem |= eirr_bit;
+ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
+}
+
+static unsigned int cpu_startup_irq(unsigned int irq)
+{
+ cpu_enable_irq(irq);
+ return 0;
+}
+
+void no_ack_irq(unsigned int irq) { }
+void no_end_irq(unsigned int irq) { }
+
+static struct hw_interrupt_type cpu_interrupt_type = {
+ .typename = "CPU",
+ .startup = cpu_startup_irq,
+ .shutdown = cpu_disable_irq,
+ .enable = cpu_enable_irq,
+ .disable = cpu_disable_irq,
+ .ack = no_ack_irq,
+ .end = no_end_irq,
+// .set_affinity = cpu_set_affinity_irq,
+};
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, j;
+ unsigned long flags;
+
+ if (i == 0) {
+ seq_puts(p, " ");
+ for_each_online_cpu(j)
+ seq_printf(p, " CPU%d", j);
+
+#ifdef PARISC_IRQ_CR16_COUNTS
+ seq_printf(p, " [min/avg/max] (CPU cycle counts)");
+#endif
+ seq_putc(p, '\n');
+ }
+
+ if (i < NR_IRQS) {
+ struct irqaction *action;
+
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto skip;
+ seq_printf(p, "%3d: ", i);
+#ifdef CONFIG_SMP
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#else
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#endif
+
+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
+#ifndef PARISC_IRQ_CR16_COUNTS
+ seq_printf(p, " %s", action->name);
+
+ while ((action = action->next))
+ seq_printf(p, ", %s", action->name);
+#else
+ for ( ;action; action = action->next) {
+ unsigned int k, avg, min, max;
+
+ min = max = action->cr16_hist[0];
+
+ for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
+ int hist = action->cr16_hist[k];
+
+ if (hist) {
+ avg += hist;
+ } else
+ break;
+
+ if (hist > max) max = hist;
+ if (hist < min) min = hist;
+ }
+
+ avg /= k;
+ seq_printf(p, " %s[%d/%d/%d]", action->name,
+ min,avg,max);
+ }
+#endif
+
+ seq_putc(p, '\n');
+ skip:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ }
+
+ return 0;
+}
+
+
+
+/*
+** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
+** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
+**
+** To use txn_XXX() interfaces, get a Virtual IRQ first.
+** Then use that to get the Transaction address and data.
+*/
+
+int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *type, void *data)
+{
+ if (irq_desc[irq].action)
+ return -EBUSY;
+ if (irq_desc[irq].handler != &cpu_interrupt_type)
+ return -EBUSY;
+
+ if (type) {
+ irq_desc[irq].handler = type;
+ irq_desc[irq].handler_data = data;
+ cpu_interrupt_type.enable(irq);
+ }
+ return 0;
+}
+
+int txn_claim_irq(int irq)
+{
+ return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
+}
+
+/*
+ * The bits_wide parameter accommodates the limitations of the HW/SW which
+ * use these bits:
+ * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
+ * V-class (EPIC): 6 bits
+ * N/L/A-class (iosapic): 8 bits
+ * PCI 2.2 MSI: 16 bits
+ * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
+ *
+ * On the service provider side:
+ * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
+ * o PA 2.0 wide mode 6-bits (per processor)
+ * o IA64 8-bits (0-256 total)
+ *
+ * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
+ * by the processor...and the N/L-class I/O subsystem supports more bits than
+ * PA2.0 has. The first case is the problem.
+ */
+int txn_alloc_irq(unsigned int bits_wide)
+{
+ int irq;
+
+ /* never return irq 0 cause that's the interval timer */
+ for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
+ if (cpu_claim_irq(irq, NULL, NULL) < 0)
+ continue;
+ if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
+ continue;
+ return irq;
+ }
+
+ /* unlikely, but be prepared */
+ return -1;
+}
+
+unsigned long txn_alloc_addr(unsigned int virt_irq)
+{
+ static int next_cpu = -1;
+
+ next_cpu++; /* assign to "next" CPU we want this bugger on */
+
+ /* validate entry */
+ while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr ||
+ !cpu_online(next_cpu)))
+ next_cpu++;
+
+ if (next_cpu >= NR_CPUS)
+ next_cpu = 0; /* nothing else, assign monarch */
+
+ return cpu_data[next_cpu].txn_addr;
+}
+
+
+unsigned int txn_alloc_data(unsigned int virt_irq)
+{
+ return virt_irq - CPU_IRQ_BASE;
+}
+
+/* ONLY called from entry.S:intr_extint() */
+void do_cpu_irq_mask(struct pt_regs *regs)
+{
+ unsigned long eirr_val;
+
+ irq_enter();
+
+ /*
+ * Only allow interrupt processing to be interrupted by the
+ * timer tick
+ */
+ set_eiem(EIEM_MASK(TIMER_IRQ));
+
+ /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
+ * 2) We loop here on EIRR contents in order to avoid
+ * nested interrupts or having to take another interrupt
+ * when we could have just handled it right away.
+ */
+ for (;;) {
+ unsigned long bit = (1UL << (BITS_PER_LONG - 1));
+ unsigned int irq;
+ eirr_val = mfctl(23) & cpu_eiem;
+ if (!eirr_val)
+ break;
+
+ if (eirr_val & EIEM_MASK(TIMER_IRQ))
+ set_eiem(0);
+
+ mtctl(eirr_val, 23); /* reset bits we are going to process */
+
+ /* Work our way from MSb to LSb...same order we alloc EIRs */
+ for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
+ if (!(bit & eirr_val))
+ continue;
+
+ /* clear bit in mask - can exit loop sooner */
+ eirr_val &= ~bit;
+
+ __do_IRQ(irq, regs);
+ }
+ }
+ set_eiem(cpu_eiem);
+ irq_exit();
+}
+
+
+static struct irqaction timer_action = {
+ .handler = timer_interrupt,
+ .name = "timer",
+};
+
+#ifdef CONFIG_SMP
+static struct irqaction ipi_action = {
+ .handler = ipi_interrupt,
+ .name = "IPI",
+};
+#endif
+
+static void claim_cpu_irqs(void)
+{
+ int i;
+ for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
+ irq_desc[i].handler = &cpu_interrupt_type;
+ }
+
+ irq_desc[TIMER_IRQ].action = &timer_action;
+ irq_desc[TIMER_IRQ].status |= IRQ_PER_CPU;
+#ifdef CONFIG_SMP
+ irq_desc[IPI_IRQ].action = &ipi_action;
+ irq_desc[IPI_IRQ].status = IRQ_PER_CPU;
+#endif
+}
+
+void __init init_IRQ(void)
+{
+ local_irq_disable(); /* PARANOID - should already be disabled */
+ mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
+ claim_cpu_irqs();
+#ifdef CONFIG_SMP
+ if (!cpu_eiem)
+ cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
+#else
+ cpu_eiem = EIEM_MASK(TIMER_IRQ);
+#endif
+ set_eiem(cpu_eiem); /* EIEM : enable all external intr */
+
+}
+
+void hw_resend_irq(struct hw_interrupt_type *type, unsigned int irq)
+{
+ /* XXX: Needs to be written. We managed without it so far, but
+ * we really ought to write it.
+ */
+}
+
+void ack_bad_irq(unsigned int irq)
+{
+ printk("unexpected IRQ %d\n", irq);
+}
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
new file mode 100644
index 000000000000..f27cfe4771b8
--- /dev/null
+++ b/arch/parisc/kernel/module.c
@@ -0,0 +1,822 @@
+/* Kernel dynamically loadable module help for PARISC.
+ *
+ * The best reference for this stuff is probably the Processor-
+ * Specific ELF Supplement for PA-RISC:
+ * http://ftp.parisc-linux.org/docs/arch/elf-pa-hp.pdf
+ *
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ * Copyright (C) 2003 Randolph Chung <tausq at debian . org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * Notes:
+ * - SEGREL32 handling
+ * We are not doing SEGREL32 handling correctly. According to the ABI, we
+ * should do a value offset, like this:
+ * if (is_init(me, (void *)val))
+ * val -= (uint32_t)me->module_init;
+ * else
+ * val -= (uint32_t)me->module_core;
+ * However, SEGREL32 is used only for PARISC unwind entries, and we want
+ * those entries to have an absolute address, and not just an offset.
+ *
+ * The unwind table mechanism has the ability to specify an offset for
+ * the unwind table; however, because we split off the init functions into
+ * a different piece of memory, it is not possible to do this using a
+ * single offset. Instead, we use the above hack for now.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#include <asm/unwind.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+#define CHECK_RELOC(val, bits) \
+ if ( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
+ ( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) { \
+ printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
+ me->name, strtab + sym->st_name, (unsigned long)val, bits); \
+ return -ENOEXEC; \
+ }
+
+/* Maximum number of GOT entries. We use a long displacement ldd from
+ * the bottom of the table, which has a maximum signed displacement of
+ * 0x3fff; however, since we're only going forward, this becomes
+ * 0x1fff, and thus, since each GOT entry is 8 bytes long we can have
+ * at most 1023 entries */
+#define MAX_GOTS 1023
+
+/* three functions to determine where in the module core
+ * or init pieces the location is */
+static inline int is_init(struct module *me, void *loc)
+{
+ return (loc >= me->module_init &&
+ loc <= (me->module_init + me->init_size));
+}
+
+static inline int is_core(struct module *me, void *loc)
+{
+ return (loc >= me->module_core &&
+ loc <= (me->module_core + me->core_size));
+}
+
+static inline int is_local(struct module *me, void *loc)
+{
+ return is_init(me, loc) || is_core(me, loc);
+}
+
+
+#ifndef __LP64__
+struct got_entry {
+ Elf32_Addr addr;
+};
+
+#define Elf_Fdesc Elf32_Fdesc
+
+struct stub_entry {
+ Elf32_Word insns[2]; /* each stub entry has two insns */
+};
+#else
+struct got_entry {
+ Elf64_Addr addr;
+};
+
+#define Elf_Fdesc Elf64_Fdesc
+
+struct stub_entry {
+ Elf64_Word insns[4]; /* each stub entry has four insns */
+};
+#endif
+
+/* Field selection types defined by hppa */
+#define rnd(x) (((x)+0x1000)&~0x1fff)
+/* fsel: full 32 bits */
+#define fsel(v,a) ((v)+(a))
+/* lsel: select left 21 bits */
+#define lsel(v,a) (((v)+(a))>>11)
+/* rsel: select right 11 bits */
+#define rsel(v,a) (((v)+(a))&0x7ff)
+/* lrsel with rounding of addend to nearest 8k */
+#define lrsel(v,a) (((v)+rnd(a))>>11)
+/* rrsel with rounding of addend to nearest 8k */
+#define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
+
+#define mask(x,sz) ((x) & ~((1<<(sz))-1))
+
+
+/* The reassemble_* functions prepare an immediate value for
+ insertion into an opcode. pa-risc uses all sorts of weird bitfields
+ in the instruction to hold the value. */
+static inline int reassemble_14(int as14)
+{
+ return (((as14 & 0x1fff) << 1) |
+ ((as14 & 0x2000) >> 13));
+}
+
+static inline int reassemble_17(int as17)
+{
+ return (((as17 & 0x10000) >> 16) |
+ ((as17 & 0x0f800) << 5) |
+ ((as17 & 0x00400) >> 8) |
+ ((as17 & 0x003ff) << 3));
+}
+
+static inline int reassemble_21(int as21)
+{
+ return (((as21 & 0x100000) >> 20) |
+ ((as21 & 0x0ffe00) >> 8) |
+ ((as21 & 0x000180) << 7) |
+ ((as21 & 0x00007c) << 14) |
+ ((as21 & 0x000003) << 12));
+}
+
+static inline int reassemble_22(int as22)
+{
+ return (((as22 & 0x200000) >> 21) |
+ ((as22 & 0x1f0000) << 5) |
+ ((as22 & 0x00f800) << 5) |
+ ((as22 & 0x000400) >> 8) |
+ ((as22 & 0x0003ff) << 3));
+}
+
+void *module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+ return vmalloc(size);
+}
+
+#ifndef __LP64__
+static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
+{
+ return 0;
+}
+
+static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
+{
+ return 0;
+}
+
+static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
+{
+ unsigned long cnt = 0;
+
+ for (; n > 0; n--, rela++)
+ {
+ switch (ELF32_R_TYPE(rela->r_info)) {
+ case R_PARISC_PCREL17F:
+ case R_PARISC_PCREL22F:
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+#else
+static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
+{
+ unsigned long cnt = 0;
+
+ for (; n > 0; n--, rela++)
+ {
+ switch (ELF64_R_TYPE(rela->r_info)) {
+ case R_PARISC_LTOFF21L:
+ case R_PARISC_LTOFF14R:
+ case R_PARISC_PCREL22F:
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
+static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
+{
+ unsigned long cnt = 0;
+
+ for (; n > 0; n--, rela++)
+ {
+ switch (ELF64_R_TYPE(rela->r_info)) {
+ case R_PARISC_FPTR64:
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
+static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
+{
+ unsigned long cnt = 0;
+
+ for (; n > 0; n--, rela++)
+ {
+ switch (ELF64_R_TYPE(rela->r_info)) {
+ case R_PARISC_PCREL22F:
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+#endif
+
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ vfree(module_region);
+ /* FIXME: If module_region == mod->init_region, trim exception
+ table entries. */
+}
+
+#define CONST
+int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+ CONST Elf_Shdr *sechdrs,
+ CONST char *secstrings,
+ struct module *me)
+{
+ unsigned long gots = 0, fdescs = 0, stubs = 0, init_stubs = 0;
+ unsigned int i;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ const Elf_Rela *rels = (void *)hdr + sechdrs[i].sh_offset;
+ unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
+
+ if (strncmp(secstrings + sechdrs[i].sh_name,
+ ".PARISC.unwind", 14) == 0)
+ me->arch.unwind_section = i;
+
+ if (sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* some of these are not relevant for 32-bit/64-bit
+ * we leave them here to make the code common. the
+ * compiler will do its thing and optimize out the
+ * stuff we don't need
+ */
+ gots += count_gots(rels, nrels);
+ fdescs += count_fdescs(rels, nrels);
+ if(strncmp(secstrings + sechdrs[i].sh_name,
+ ".rela.init", 10) == 0)
+ init_stubs += count_stubs(rels, nrels);
+ else
+ stubs += count_stubs(rels, nrels);
+ }
+
+ /* align things a bit */
+ me->core_size = ALIGN(me->core_size, 16);
+ me->arch.got_offset = me->core_size;
+ me->core_size += gots * sizeof(struct got_entry);
+
+ me->core_size = ALIGN(me->core_size, 16);
+ me->arch.fdesc_offset = me->core_size;
+ me->core_size += fdescs * sizeof(Elf_Fdesc);
+
+ me->core_size = ALIGN(me->core_size, 16);
+ me->arch.stub_offset = me->core_size;
+ me->core_size += stubs * sizeof(struct stub_entry);
+
+ me->init_size = ALIGN(me->init_size, 16);
+ me->arch.init_stub_offset = me->init_size;
+ me->init_size += init_stubs * sizeof(struct stub_entry);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+ me->arch.stub_max = stubs;
+ me->arch.init_stub_max = init_stubs;
+
+ return 0;
+}
+
+#ifdef __LP64__
+static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+{
+ unsigned int i;
+ struct got_entry *got;
+
+ value += addend;
+
+ BUG_ON(value == 0);
+
+ got = me->module_core + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+
+ BUG_ON(++me->arch.got_count > me->arch.got_max);
+
+ got[i].addr = value;
+ out:
+ DEBUGP("GOT ENTRY %d[%x] val %lx\n", i, i*sizeof(struct got_entry),
+ value);
+ return i * sizeof(struct got_entry);
+}
+#endif /* __LP64__ */
+
+#ifdef __LP64__
+static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+{
+ Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+ return 0;
+ }
+
+ /* Look for existing fdesc entry. */
+ while (fdesc->addr) {
+ if (fdesc->addr == value)
+ return (Elf_Addr)fdesc;
+ fdesc++;
+ }
+
+ BUG_ON(++me->arch.fdesc_count > me->arch.fdesc_max);
+
+ /* Create new one */
+ fdesc->addr = value;
+ fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+}
+#endif /* __LP64__ */
+
+static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
+ int millicode, int init_section)
+{
+ unsigned long i;
+ struct stub_entry *stub;
+
+ if(init_section) {
+ i = me->arch.init_stub_count++;
+ BUG_ON(me->arch.init_stub_count > me->arch.init_stub_max);
+ stub = me->module_init + me->arch.init_stub_offset +
+ i * sizeof(struct stub_entry);
+ } else {
+ i = me->arch.stub_count++;
+ BUG_ON(me->arch.stub_count > me->arch.stub_max);
+ stub = me->module_core + me->arch.stub_offset +
+ i * sizeof(struct stub_entry);
+ }
+
+#ifndef __LP64__
+/* for 32-bit the stub looks like this:
+ * ldil L'XXX,%r1
+ * be,n R'XXX(%sr4,%r1)
+ */
+ //value = *(unsigned long *)((value + addend) & ~3); /* why? */
+
+ stub->insns[0] = 0x20200000; /* ldil L'XXX,%r1 */
+ stub->insns[1] = 0xe0202002; /* be,n R'XXX(%sr4,%r1) */
+
+ stub->insns[0] |= reassemble_21(lrsel(value, addend));
+ stub->insns[1] |= reassemble_17(rrsel(value, addend) / 4);
+
+#else
+/* for 64-bit we have two kinds of stubs:
+ * for normal function calls:
+ * ldd 0(%dp),%dp
+ * ldd 10(%dp), %r1
+ * bve (%r1)
+ * ldd 18(%dp), %dp
+ *
+ * for millicode:
+ * ldil 0, %r1
+ * ldo 0(%r1), %r1
+ * ldd 10(%r1), %r1
+ * bve,n (%r1)
+ */
+ if (!millicode)
+ {
+ stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */
+ stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */
+ stub->insns[2] = 0xe820d000; /* bve (%r1) */
+ stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */
+
+ stub->insns[0] |= reassemble_14(get_got(me, value, addend) & 0x3fff);
+ }
+ else
+ {
+ stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
+ stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
+ stub->insns[2] = 0x50210020; /* ldd 10(%r1),%r1 */
+ stub->insns[3] = 0xe820d002; /* bve,n (%r1) */
+
+ stub->insns[0] |= reassemble_21(lrsel(value, addend));
+ stub->insns[1] |= reassemble_14(rrsel(value, addend));
+ }
+#endif
+
+ return (Elf_Addr)stub;
+}
+
+int apply_relocate(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ /* parisc should not need this ... */
+ printk(KERN_ERR "module %s: RELOCATION unsupported\n",
+ me->name);
+ return -ENOEXEC;
+}
+
+#ifndef __LP64__
+int apply_relocate_add(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ int i;
+ Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ Elf32_Word *loc;
+ Elf32_Addr val;
+ Elf32_Sword addend;
+ Elf32_Addr dot;
+ //unsigned long dp = (unsigned long)$global$;
+ register unsigned long dp asm ("r27");
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+ if (!sym->st_value) {
+ printk(KERN_WARNING "%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+ //dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
+ dot = (Elf32_Addr)loc & ~0x03;
+
+ val = sym->st_value;
+ addend = rel[i].r_addend;
+
+#if 0
+#define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t :
+ DEBUGP("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n",
+ strtab + sym->st_name,
+ (uint32_t)loc, val, addend,
+ r(R_PARISC_PLABEL32)
+ r(R_PARISC_DIR32)
+ r(R_PARISC_DIR21L)
+ r(R_PARISC_DIR14R)
+ r(R_PARISC_SEGREL32)
+ r(R_PARISC_DPREL21L)
+ r(R_PARISC_DPREL14R)
+ r(R_PARISC_PCREL17F)
+ r(R_PARISC_PCREL22F)
+ "UNKNOWN");
+#undef r
+#endif
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_PARISC_PLABEL32:
+ /* 32-bit function address */
+ /* no function descriptors... */
+ *loc = fsel(val, addend);
+ break;
+ case R_PARISC_DIR32:
+ /* direct 32-bit ref */
+ *loc = fsel(val, addend);
+ break;
+ case R_PARISC_DIR21L:
+ /* left 21 bits of effective address */
+ val = lrsel(val, addend);
+ *loc = mask(*loc, 21) | reassemble_21(val);
+ break;
+ case R_PARISC_DIR14R:
+ /* right 14 bits of effective address */
+ val = rrsel(val, addend);
+ *loc = mask(*loc, 14) | reassemble_14(val);
+ break;
+ case R_PARISC_SEGREL32:
+ /* 32-bit segment relative address */
+ /* See note about special handling of SEGREL32 at
+ * the beginning of this file.
+ */
+ *loc = fsel(val, addend);
+ break;
+ case R_PARISC_DPREL21L:
+ /* left 21 bit of relative address */
+ val = lrsel(val - dp, addend);
+ *loc = mask(*loc, 21) | reassemble_21(val);
+ break;
+ case R_PARISC_DPREL14R:
+ /* right 14 bit of relative address */
+ val = rrsel(val - dp, addend);
+ *loc = mask(*loc, 14) | reassemble_14(val);
+ break;
+ case R_PARISC_PCREL17F:
+ /* 17-bit PC relative address */
+ val = get_stub(me, val, addend, 0, is_init(me, loc));
+ val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 17)
+ *loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
+ break;
+ case R_PARISC_PCREL22F:
+ /* 22-bit PC relative address; only defined for pa20 */
+ val = get_stub(me, val, addend, 0, is_init(me, loc));
+ DEBUGP("STUB FOR %s loc %lx+%lx at %lx\n",
+ strtab + sym->st_name, (unsigned long)loc, addend,
+ val)
+ val = (val - dot - 8)/4;
+ CHECK_RELOC(val, 22);
+ *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
+ break;
+
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+ me->name, ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+
+ return 0;
+}
+
+#else
+int apply_relocate_add(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ int i;
+ Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf64_Sym *sym;
+ Elf64_Word *loc;
+ Elf64_Xword *loc64;
+ Elf64_Addr val;
+ Elf64_Sxword addend;
+ Elf64_Addr dot;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ + ELF64_R_SYM(rel[i].r_info);
+ if (!sym->st_value) {
+ printk(KERN_WARNING "%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+ //dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
+ dot = (Elf64_Addr)loc & ~0x03;
+ loc64 = (Elf64_Xword *)loc;
+
+ val = sym->st_value;
+ addend = rel[i].r_addend;
+
+#if 0
+#define r(t) ELF64_R_TYPE(rel[i].r_info)==t ? #t :
+ printk("Symbol %s loc %p val 0x%Lx addend 0x%Lx: %s\n",
+ strtab + sym->st_name,
+ loc, val, addend,
+ r(R_PARISC_LTOFF14R)
+ r(R_PARISC_LTOFF21L)
+ r(R_PARISC_PCREL22F)
+ r(R_PARISC_DIR64)
+ r(R_PARISC_SEGREL32)
+ r(R_PARISC_FPTR64)
+ "UNKNOWN");
+#undef r
+#endif
+
+ switch (ELF64_R_TYPE(rel[i].r_info)) {
+ case R_PARISC_LTOFF21L:
+ /* LT-relative; left 21 bits */
+ val = get_got(me, val, addend);
+ DEBUGP("LTOFF21L Symbol %s loc %p val %lx\n",
+ strtab + sym->st_name,
+ loc, val);
+ val = lrsel(val, 0);
+ *loc = mask(*loc, 21) | reassemble_21(val);
+ break;
+ case R_PARISC_LTOFF14R:
+ /* L(ltoff(val+addend)) */
+ /* LT-relative; right 14 bits */
+ val = get_got(me, val, addend);
+ val = rrsel(val, 0);
+ DEBUGP("LTOFF14R Symbol %s loc %p val %lx\n",
+ strtab + sym->st_name,
+ loc, val);
+ *loc = mask(*loc, 14) | reassemble_14(val);
+ break;
+ case R_PARISC_PCREL22F:
+ /* PC-relative; 22 bits */
+ DEBUGP("PCREL22F Symbol %s loc %p val %lx\n",
+ strtab + sym->st_name,
+ loc, val);
+ /* can we reach it locally? */
+ if(!is_local(me, (void *)val)) {
+ if (strncmp(strtab + sym->st_name, "$$", 2)
+ == 0)
+ val = get_stub(me, val, addend, 1,
+ is_init(me, loc));
+ else
+ val = get_stub(me, val, addend, 0,
+ is_init(me, loc));
+ }
+ DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n",
+ strtab + sym->st_name, loc, sym->st_value,
+ addend, val);
+ /* FIXME: local symbols work as long as the
+ * core and init pieces aren't separated too
+ * far. If this is ever broken, you will trip
+ * the check below. The way to fix it would
+ * be to generate local stubs to go between init
+ * and core */
+ if((Elf64_Sxword)(val - dot - 8) > 0x800000 -1 ||
+ (Elf64_Sxword)(val - dot - 8) < -0x800000) {
+ printk(KERN_ERR "Module %s, symbol %s is out of range for PCREL22F relocation\n",
+ me->name, strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+ val = (val - dot - 8)/4;
+ *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
+ break;
+ case R_PARISC_DIR64:
+ /* 64-bit effective address */
+ *loc64 = val + addend;
+ break;
+ case R_PARISC_SEGREL32:
+ /* 32-bit segment relative address */
+ /* See note about special handling of SEGREL32 at
+ * the beginning of this file.
+ */
+ *loc = fsel(val, addend);
+ break;
+ case R_PARISC_FPTR64:
+ /* 64-bit function address */
+ if(is_local(me, (void *)(val + addend))) {
+ *loc64 = get_fdesc(me, val+addend);
+ DEBUGP("FDESC for %s at %p points to %lx\n",
+ strtab + sym->st_name, *loc64,
+ ((Elf_Fdesc *)*loc64)->addr);
+ } else {
+ /* if the symbol is not local to this
+ * module then val+addend is a pointer
+ * to the function descriptor */
+ DEBUGP("Non local FPTR64 Symbol %s loc %p val %lx\n",
+ strtab + sym->st_name,
+ loc, val);
+ *loc64 = val + addend;
+ }
+ break;
+
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %Lu\n",
+ me->name, ELF64_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+#endif
+
+static void
+register_unwind_table(struct module *me,
+ const Elf_Shdr *sechdrs)
+{
+ unsigned char *table, *end;
+ unsigned long gp;
+
+ if (!me->arch.unwind_section)
+ return;
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+ gp = (Elf_Addr)me->module_core + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+ me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end);
+}
+
+static void
+deregister_unwind_table(struct module *me)
+{
+ if (me->arch.unwind)
+ unwind_table_remove(me->arch.unwind);
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ int i;
+ unsigned long nsyms;
+ const char *strtab = NULL;
+ Elf_Sym *newptr, *oldptr;
+ Elf_Shdr *symhdr = NULL;
+#ifdef DEBUG
+ Elf_Fdesc *entry;
+ u32 *addr;
+
+ entry = (Elf_Fdesc *)me->init;
+ printk("FINALIZE, ->init FPTR is %p, GP %lx ADDR %lx\n", entry,
+ entry->gp, entry->addr);
+ addr = (u32 *)entry->addr;
+ printk("INSNS: %x %x %x %x\n",
+ addr[0], addr[1], addr[2], addr[3]);
+ printk("stubs used %ld, stubs max %ld\n"
+ "init_stubs used %ld, init stubs max %ld\n"
+ "got entries used %ld, gots max %ld\n"
+ "fdescs used %ld, fdescs max %ld\n",
+ me->arch.stub_count, me->arch.stub_max,
+ me->arch.init_stub_count, me->arch.init_stub_max,
+ me->arch.got_count, me->arch.got_max,
+ me->arch.fdesc_count, me->arch.fdesc_max);
+#endif
+
+ register_unwind_table(me, sechdrs);
+
+ /* haven't filled in me->symtab yet, so have to find it
+ * ourselves */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if(sechdrs[i].sh_type == SHT_SYMTAB
+ && (sechdrs[i].sh_type & SHF_ALLOC)) {
+ int strindex = sechdrs[i].sh_link;
+ /* FIXME: AWFUL HACK
+ * The cast is to drop the const from
+ * the sechdrs pointer */
+ symhdr = (Elf_Shdr *)&sechdrs[i];
+ strtab = (char *)sechdrs[strindex].sh_addr;
+ break;
+ }
+ }
+
+ DEBUGP("module %s: strtab %p, symhdr %p\n",
+ me->name, strtab, symhdr);
+
+ if(me->arch.got_count > MAX_GOTS) {
+ printk(KERN_ERR "%s: Global Offset Table overflow (used %ld, allowed %d\n", me->name, me->arch.got_count, MAX_GOTS);
+ return -EINVAL;
+ }
+
+ /* no symbol table */
+ if(symhdr == NULL)
+ return 0;
+
+ oldptr = (void *)symhdr->sh_addr;
+ newptr = oldptr + 1; /* we start counting at 1 */
+ nsyms = symhdr->sh_size / sizeof(Elf_Sym);
+ DEBUGP("OLD num_symtab %lu\n", nsyms);
+
+ for (i = 1; i < nsyms; i++) {
+ oldptr++; /* note, count starts at 1 so preincrement */
+ if(strncmp(strtab + oldptr->st_name,
+ ".L", 2) == 0)
+ continue;
+
+ if(newptr != oldptr)
+ *newptr++ = *oldptr;
+ else
+ newptr++;
+
+ }
+ nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
+ DEBUGP("NEW num_symtab %lu\n", nsyms);
+ symhdr->sh_size = nsyms * sizeof(Elf_Sym);
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+ deregister_unwind_table(mod);
+}
diff --git a/arch/parisc/kernel/pa7300lc.c b/arch/parisc/kernel/pa7300lc.c
new file mode 100644
index 000000000000..8a89780223aa
--- /dev/null
+++ b/arch/parisc/kernel/pa7300lc.c
@@ -0,0 +1,49 @@
+/*
+ * linux/arch/parisc/kernel/pa7300lc.c
+ * - PA7300LC-specific functions
+ *
+ * Copyright (C) 2000 Philipp Rumpf */
+
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/ptrace.h>
+#include <asm/machdep.h>
+
+/* CPU register indices */
+
+#define MIOC_STATUS 0xf040
+#define MIOC_CONTROL 0xf080
+#define MDERRADD 0xf0e0
+#define DMAERR 0xf0e8
+#define DIOERR 0xf0ec
+#define HIDMAMEM 0xf0f4
+
+/* this returns the HPA of the CPU it was called on */
+static u32 cpu_hpa(void)
+{
+ return 0xfffb0000;
+}
+
+static void pa7300lc_lpmc(int code, struct pt_regs *regs)
+{
+ u32 hpa;
+ printk(KERN_WARNING "LPMC on CPU %d\n", smp_processor_id());
+
+ show_regs(regs);
+
+ hpa = cpu_hpa();
+ printk(KERN_WARNING
+ "MIOC_CONTROL %08x\n" "MIOC_STATUS %08x\n"
+ "MDERRADD %08x\n" "DMAERR %08x\n"
+ "DIOERR %08x\n" "HIDMAMEM %08x\n",
+ gsc_readl(hpa+MIOC_CONTROL), gsc_readl(hpa+MIOC_STATUS),
+ gsc_readl(hpa+MDERRADD), gsc_readl(hpa+DMAERR),
+ gsc_readl(hpa+DIOERR), gsc_readl(hpa+HIDMAMEM));
+}
+
+void pa7300lc_init(void)
+{
+ cpu_lpmc = pa7300lc_lpmc;
+}
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
new file mode 100644
index 000000000000..77e03bc0f935
--- /dev/null
+++ b/arch/parisc/kernel/pacache.S
@@ -0,0 +1,1086 @@
+/*
+ * PARISC TLB and cache flushing support
+ * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
+ * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
+ * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * NOTE: fdc,fic, and pdc instructions that use base register modification
+ * should only use index and base registers that are not shadowed,
+ * so that the fast path emulation in the non access miss handler
+ * can be used.
+ */
+
+#ifdef __LP64__
+#define ADDIB addib,*
+#define CMPB cmpb,*
+#define ANDCM andcm,*
+
+ .level 2.0w
+#else
+#define ADDIB addib,
+#define CMPB cmpb,
+#define ANDCM andcm
+
+ .level 2.0
+#endif
+
+#include <asm/assembly.h>
+#include <asm/psw.h>
+#include <asm/pgtable.h>
+#include <asm/cache.h>
+
+ .text
+ .align 128
+
+ .export flush_tlb_all_local,code
+
+flush_tlb_all_local:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ /*
+ * The pitlbe and pdtlbe instructions should only be used to
+ * flush the entire tlb. Also, there needs to be no intervening
+ * tlb operations, e.g. tlb misses, so the operation needs
+ * to happen in real mode with all interruptions disabled.
+ */
+
+ /*
+ * Once again, we do the rfi dance ... some day we need examine
+ * all of our uses of this type of code and see what can be
+ * consolidated.
+ */
+
+ rsm PSW_SM_I, %r19 /* relied upon translation! PA 2.0 Arch. F-5 */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ rsm PSW_SM_Q, %r0 /* Turn off Q bit to load iia queue */
+ ldil L%REAL_MODE_PSW, %r1
+ ldo R%REAL_MODE_PSW(%r1), %r1
+ mtctl %r1, %cr22
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ ldil L%PA(1f), %r1
+ ldo R%PA(1f)(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ rfi
+ nop
+
+1: ldil L%PA(cache_info), %r1
+ ldo R%PA(cache_info)(%r1), %r1
+
+ /* Flush Instruction Tlb */
+
+ LDREG ITLB_SID_BASE(%r1), %r20
+ LDREG ITLB_SID_STRIDE(%r1), %r21
+ LDREG ITLB_SID_COUNT(%r1), %r22
+ LDREG ITLB_OFF_BASE(%r1), %arg0
+ LDREG ITLB_OFF_STRIDE(%r1), %arg1
+ LDREG ITLB_OFF_COUNT(%r1), %arg2
+ LDREG ITLB_LOOP(%r1), %arg3
+
+ ADDIB= -1, %arg3, fitoneloop /* Preadjust and test */
+ movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
+ copy %arg0, %r28 /* Init base addr */
+
+fitmanyloop: /* Loop if LOOP >= 2 */
+ mtsp %r20, %sr1
+ add %r21, %r20, %r20 /* increment space */
+ copy %arg2, %r29 /* Init middle loop count */
+
+fitmanymiddle: /* Loop if LOOP >= 2 */
+ ADDIB> -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
+ pitlbe 0(%sr1, %r28)
+ pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
+ ADDIB> -1, %r29, fitmanymiddle /* Middle loop decr */
+ copy %arg3, %r31 /* Re-init inner loop count */
+
+ movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
+ ADDIB<=,n -1, %r22, fitdone /* Outer loop count decr */
+
+fitoneloop: /* Loop if LOOP = 1 */
+ mtsp %r20, %sr1
+ copy %arg0, %r28 /* init base addr */
+ copy %arg2, %r29 /* init middle loop count */
+
+fitonemiddle: /* Loop if LOOP = 1 */
+ ADDIB> -1, %r29, fitonemiddle /* Middle loop count decr */
+ pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
+
+ ADDIB> -1, %r22, fitoneloop /* Outer loop count decr */
+ add %r21, %r20, %r20 /* increment space */
+
+fitdone:
+
+ /* Flush Data Tlb */
+
+ LDREG DTLB_SID_BASE(%r1), %r20
+ LDREG DTLB_SID_STRIDE(%r1), %r21
+ LDREG DTLB_SID_COUNT(%r1), %r22
+ LDREG DTLB_OFF_BASE(%r1), %arg0
+ LDREG DTLB_OFF_STRIDE(%r1), %arg1
+ LDREG DTLB_OFF_COUNT(%r1), %arg2
+ LDREG DTLB_LOOP(%r1), %arg3
+
+ ADDIB= -1, %arg3, fdtoneloop /* Preadjust and test */
+ movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
+ copy %arg0, %r28 /* Init base addr */
+
+fdtmanyloop: /* Loop if LOOP >= 2 */
+ mtsp %r20, %sr1
+ add %r21, %r20, %r20 /* increment space */
+ copy %arg2, %r29 /* Init middle loop count */
+
+fdtmanymiddle: /* Loop if LOOP >= 2 */
+ ADDIB> -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
+ pdtlbe 0(%sr1, %r28)
+ pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
+ ADDIB> -1, %r29, fdtmanymiddle /* Middle loop decr */
+ copy %arg3, %r31 /* Re-init inner loop count */
+
+ movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
+ ADDIB<=,n -1, %r22,fdtdone /* Outer loop count decr */
+
+fdtoneloop: /* Loop if LOOP = 1 */
+ mtsp %r20, %sr1
+ copy %arg0, %r28 /* init base addr */
+ copy %arg2, %r29 /* init middle loop count */
+
+fdtonemiddle: /* Loop if LOOP = 1 */
+ ADDIB> -1, %r29, fdtonemiddle /* Middle loop count decr */
+ pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
+
+ ADDIB> -1, %r22, fdtoneloop /* Outer loop count decr */
+ add %r21, %r20, %r20 /* increment space */
+
+fdtdone:
+
+ /* Switch back to virtual mode */
+
+ rsm PSW_SM_Q, %r0 /* clear Q bit to load iia queue */
+ ldil L%KERNEL_PSW, %r1
+ ldo R%KERNEL_PSW(%r1), %r1
+ or %r1, %r19, %r1 /* Set I bit if set on entry */
+ mtctl %r1, %cr22
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ ldil L%(2f), %r1
+ ldo R%(2f)(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ rfi
+ nop
+
+2: bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .export flush_instruction_cache_local,code
+ .import cache_info,data
+
+flush_instruction_cache_local:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ mtsp %r0, %sr1
+ ldil L%cache_info, %r1
+ ldo R%cache_info(%r1), %r1
+
+ /* Flush Instruction Cache */
+
+ LDREG ICACHE_BASE(%r1), %arg0
+ LDREG ICACHE_STRIDE(%r1), %arg1
+ LDREG ICACHE_COUNT(%r1), %arg2
+ LDREG ICACHE_LOOP(%r1), %arg3
+ rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
+ ADDIB= -1, %arg3, fioneloop /* Preadjust and test */
+ movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
+
+fimanyloop: /* Loop if LOOP >= 2 */
+ ADDIB> -1, %r31, fimanyloop /* Adjusted inner loop decr */
+ fice 0(%sr1, %arg0)
+ fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
+ movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
+ ADDIB<=,n -1, %arg2, fisync /* Outer loop decr */
+
+fioneloop: /* Loop if LOOP = 1 */
+ ADDIB> -1, %arg2, fioneloop /* Outer loop count decr */
+ fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
+
+fisync:
+ sync
+ mtsm %r22
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .export flush_data_cache_local, code
+ .import cache_info, data
+
+flush_data_cache_local:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ mtsp %r0, %sr1
+ ldil L%cache_info, %r1
+ ldo R%cache_info(%r1), %r1
+
+ /* Flush Data Cache */
+
+ LDREG DCACHE_BASE(%r1), %arg0
+ LDREG DCACHE_STRIDE(%r1), %arg1
+ LDREG DCACHE_COUNT(%r1), %arg2
+ LDREG DCACHE_LOOP(%r1), %arg3
+ rsm PSW_SM_I, %r22
+ ADDIB= -1, %arg3, fdoneloop /* Preadjust and test */
+ movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
+
+fdmanyloop: /* Loop if LOOP >= 2 */
+ ADDIB> -1, %r31, fdmanyloop /* Adjusted inner loop decr */
+ fdce 0(%sr1, %arg0)
+ fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
+ movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
+ ADDIB<=,n -1, %arg2, fdsync /* Outer loop decr */
+
+fdoneloop: /* Loop if LOOP = 1 */
+ ADDIB> -1, %arg2, fdoneloop /* Outer loop count decr */
+ fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
+
+fdsync:
+ syncdma
+ sync
+ mtsm %r22
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .export copy_user_page_asm,code
+ .align 16
+
+copy_user_page_asm:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+#ifdef __LP64__
+ /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
+ * Unroll the loop by hand and arrange insn appropriately.
+ * GCC probably can do this just as well.
+ */
+
+ ldd 0(%r25), %r19
+ ldi 32, %r1 /* PAGE_SIZE/128 == 32 */
+ ldw 64(%r25), %r0 /* prefetch 1 cacheline ahead */
+ ldw 128(%r25), %r0 /* prefetch 2 */
+
+1: ldd 8(%r25), %r20
+ ldw 192(%r25), %r0 /* prefetch 3 */
+ ldw 256(%r25), %r0 /* prefetch 4 */
+
+ ldd 16(%r25), %r21
+ ldd 24(%r25), %r22
+ std %r19, 0(%r26)
+ std %r20, 8(%r26)
+
+ ldd 32(%r25), %r19
+ ldd 40(%r25), %r20
+ std %r21, 16(%r26)
+ std %r22, 24(%r26)
+
+ ldd 48(%r25), %r21
+ ldd 56(%r25), %r22
+ std %r19, 32(%r26)
+ std %r20, 40(%r26)
+
+ ldd 64(%r25), %r19
+ ldd 72(%r25), %r20
+ std %r21, 48(%r26)
+ std %r22, 56(%r26)
+
+ ldd 80(%r25), %r21
+ ldd 88(%r25), %r22
+ std %r19, 64(%r26)
+ std %r20, 72(%r26)
+
+ ldd 96(%r25), %r19
+ ldd 104(%r25), %r20
+ std %r21, 80(%r26)
+ std %r22, 88(%r26)
+
+ ldd 112(%r25), %r21
+ ldd 120(%r25), %r22
+ std %r19, 96(%r26)
+ std %r20, 104(%r26)
+
+ ldo 128(%r25), %r25
+ std %r21, 112(%r26)
+ std %r22, 120(%r26)
+ ldo 128(%r26), %r26
+
+ ADDIB> -1, %r1, 1b /* bundle 10 */
+ ldd 0(%r25), %r19 /* start next loads */
+
+#else
+
+ /*
+ * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
+ * bundles (very restricted rules for bundling).
+ * Note that until (if) we start saving
+ * the full 64 bit register values on interrupt, we can't
+ * use ldd/std on a 32 bit kernel.
+ */
+ ldi 64, %r1 /* PAGE_SIZE/64 == 64 */
+
+1:
+ ldw 0(%r25), %r19
+ ldw 4(%r25), %r20
+ ldw 8(%r25), %r21
+ ldw 12(%r25), %r22
+ stw %r19, 0(%r26)
+ stw %r20, 4(%r26)
+ stw %r21, 8(%r26)
+ stw %r22, 12(%r26)
+ ldw 16(%r25), %r19
+ ldw 20(%r25), %r20
+ ldw 24(%r25), %r21
+ ldw 28(%r25), %r22
+ stw %r19, 16(%r26)
+ stw %r20, 20(%r26)
+ stw %r21, 24(%r26)
+ stw %r22, 28(%r26)
+ ldw 32(%r25), %r19
+ ldw 36(%r25), %r20
+ ldw 40(%r25), %r21
+ ldw 44(%r25), %r22
+ stw %r19, 32(%r26)
+ stw %r20, 36(%r26)
+ stw %r21, 40(%r26)
+ stw %r22, 44(%r26)
+ ldw 48(%r25), %r19
+ ldw 52(%r25), %r20
+ ldw 56(%r25), %r21
+ ldw 60(%r25), %r22
+ stw %r19, 48(%r26)
+ stw %r20, 52(%r26)
+ stw %r21, 56(%r26)
+ stw %r22, 60(%r26)
+ ldo 64(%r26), %r26
+ ADDIB> -1, %r1, 1b
+ ldo 64(%r25), %r25
+#endif
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+/*
+ * NOTE: Code in clear_user_page has a hard coded dependency on the
+ * maximum alias boundary being 4 Mb. We've been assured by the
+ * parisc chip designers that there will not ever be a parisc
+ * chip with a larger alias boundary (Never say never :-) ).
+ *
+ * Subtle: the dtlb miss handlers support the temp alias region by
+ * "knowing" that if a dtlb miss happens within the temp alias
+ * region it must have occurred while in clear_user_page. Since
+ * this routine makes use of processor local translations, we
+ * don't want to insert them into the kernel page table. Instead,
+ * we load up some general registers (they need to be registers
+ * which aren't shadowed) with the physical page numbers (preshifted
+ * for tlb insertion) needed to insert the translations. When we
+ * miss on the translation, the dtlb miss handler inserts the
+ * translation into the tlb using these values:
+ *
+ * %r26 physical page (shifted for tlb insert) of "to" translation
+ * %r23 physical page (shifted for tlb insert) of "from" translation
+ */
+
+#if 0
+
+ /*
+ * We can't do this since copy_user_page is used to bring in
+ * file data that might have instructions. Since the data would
+ * then need to be flushed out so the i-fetch can see it, it
+ * makes more sense to just copy through the kernel translation
+ * and flush it.
+ *
+ * I'm still keeping this around because it may be possible to
+ * use it if more information is passed into copy_user_page().
+ * Have to do some measurements to see if it is worthwhile to
+ * lobby for such a change.
+ */
+
+ .export copy_user_page_asm,code
+
+copy_user_page_asm:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%(__PAGE_OFFSET), %r1
+ sub %r26, %r1, %r26
+ sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */
+
+ ldil L%(TMPALIAS_MAP_START), %r28
+#ifdef __LP64__
+ extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */
+ extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */
+ depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,12, %r28 /* Clear any offset bits */
+ copy %r28, %r29
+ depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
+#else
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
+ depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,12, %r28 /* Clear any offset bits */
+ copy %r28, %r29
+ depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
+#endif
+
+ /* Purge any old translations */
+
+ pdtlb 0(%r28)
+ pdtlb 0(%r29)
+
+ ldi 64, %r1
+
+ /*
+ * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
+ * bundles (very restricted rules for bundling). It probably
+ * does OK on PCXU and better, but we could do better with
+ * ldd/std instructions. Note that until (if) we start saving
+ * the full 64 bit register values on interrupt, we can't
+ * use ldd/std on a 32 bit kernel.
+ */
+
+
+1:
+ ldw 0(%r29), %r19
+ ldw 4(%r29), %r20
+ ldw 8(%r29), %r21
+ ldw 12(%r29), %r22
+ stw %r19, 0(%r28)
+ stw %r20, 4(%r28)
+ stw %r21, 8(%r28)
+ stw %r22, 12(%r28)
+ ldw 16(%r29), %r19
+ ldw 20(%r29), %r20
+ ldw 24(%r29), %r21
+ ldw 28(%r29), %r22
+ stw %r19, 16(%r28)
+ stw %r20, 20(%r28)
+ stw %r21, 24(%r28)
+ stw %r22, 28(%r28)
+ ldw 32(%r29), %r19
+ ldw 36(%r29), %r20
+ ldw 40(%r29), %r21
+ ldw 44(%r29), %r22
+ stw %r19, 32(%r28)
+ stw %r20, 36(%r28)
+ stw %r21, 40(%r28)
+ stw %r22, 44(%r28)
+ ldw 48(%r29), %r19
+ ldw 52(%r29), %r20
+ ldw 56(%r29), %r21
+ ldw 60(%r29), %r22
+ stw %r19, 48(%r28)
+ stw %r20, 52(%r28)
+ stw %r21, 56(%r28)
+ stw %r22, 60(%r28)
+ ldo 64(%r28), %r28
+ ADDIB> -1, %r1,1b
+ ldo 64(%r29), %r29
+
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+#endif
+
+ .export __clear_user_page_asm,code
+
+__clear_user_page_asm:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ tophys_r1 %r26
+
+ ldil L%(TMPALIAS_MAP_START), %r28
+#ifdef __LP64__
+#if (TMPALIAS_MAP_START >= 0x80000000)
+ depdi 0, 31,32, %r28 /* clear any sign extension */
+#endif
+ extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
+ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,12, %r28 /* Clear any offset bits */
+#else
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,12, %r28 /* Clear any offset bits */
+#endif
+
+ /* Purge any old translation */
+
+ pdtlb 0(%r28)
+
+#ifdef __LP64__
+ ldi 32, %r1 /* PAGE_SIZE/128 == 32 */
+
+ /* PREFETCH (Write) has not (yet) been proven to help here */
+/* #define PREFETCHW_OP ldd 256(%0), %r0 */
+
+1: std %r0, 0(%r28)
+ std %r0, 8(%r28)
+ std %r0, 16(%r28)
+ std %r0, 24(%r28)
+ std %r0, 32(%r28)
+ std %r0, 40(%r28)
+ std %r0, 48(%r28)
+ std %r0, 56(%r28)
+ std %r0, 64(%r28)
+ std %r0, 72(%r28)
+ std %r0, 80(%r28)
+ std %r0, 88(%r28)
+ std %r0, 96(%r28)
+ std %r0, 104(%r28)
+ std %r0, 112(%r28)
+ std %r0, 120(%r28)
+ ADDIB> -1, %r1, 1b
+ ldo 128(%r28), %r28
+
+#else /* ! __LP64 */
+
+ ldi 64, %r1 /* PAGE_SIZE/64 == 64 */
+
+1:
+ stw %r0, 0(%r28)
+ stw %r0, 4(%r28)
+ stw %r0, 8(%r28)
+ stw %r0, 12(%r28)
+ stw %r0, 16(%r28)
+ stw %r0, 20(%r28)
+ stw %r0, 24(%r28)
+ stw %r0, 28(%r28)
+ stw %r0, 32(%r28)
+ stw %r0, 36(%r28)
+ stw %r0, 40(%r28)
+ stw %r0, 44(%r28)
+ stw %r0, 48(%r28)
+ stw %r0, 52(%r28)
+ stw %r0, 56(%r28)
+ stw %r0, 60(%r28)
+ ADDIB> -1, %r1, 1b
+ ldo 64(%r28), %r28
+#endif /* __LP64 */
+
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .export flush_kernel_dcache_page
+
+flush_kernel_dcache_page:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+
+#ifdef __LP64__
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r26, %r25, %r25
+ sub %r25, %r23, %r25
+
+
+1: fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ fdc,m %r23(%r26)
+ CMPB<< %r26, %r25,1b
+ fdc,m %r23(%r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .export flush_user_dcache_page
+
+flush_user_dcache_page:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+
+#ifdef __LP64__
+ depdi,z 1,63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1,31-PAGE_SHIFT,1, %r25
+#endif
+ add %r26, %r25, %r25
+ sub %r25, %r23, %r25
+
+
+1: fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ fdc,m %r23(%sr3, %r26)
+ CMPB<< %r26, %r25,1b
+ fdc,m %r23(%sr3, %r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .export flush_user_icache_page
+
+flush_user_icache_page:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+
+#ifdef __LP64__
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r26, %r25, %r25
+ sub %r25, %r23, %r25
+
+
+1: fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ fic,m %r23(%sr3, %r26)
+ CMPB<< %r26, %r25,1b
+ fic,m %r23(%sr3, %r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+
+ .export purge_kernel_dcache_page
+
+purge_kernel_dcache_page:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+
+#ifdef __LP64__
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r26, %r25, %r25
+ sub %r25, %r23, %r25
+
+1: pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ pdc,m %r23(%r26)
+ CMPB<< %r26, %r25, 1b
+ pdc,m %r23(%r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+#if 0
+ /* Currently not used, but it still is a possible alternate
+ * solution.
+ */
+
+ .export flush_alias_page
+
+flush_alias_page:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ tophys_r1 %r26
+
+ ldil L%(TMPALIAS_MAP_START), %r28
+#ifdef __LP64__
+ extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */
+ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
+ depdi 0, 63,12, %r28 /* Clear any offset bits */
+#else
+ extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
+ depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
+ depwi 0, 31,12, %r28 /* Clear any offset bits */
+#endif
+
+ /* Purge any old translation */
+
+ pdtlb 0(%r28)
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+
+#ifdef __LP64__
+ depdi,z 1, 63-PAGE_SHIFT,1, %r29
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r29
+#endif
+ add %r28, %r29, %r29
+ sub %r29, %r23, %r29
+
+1: fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ fdc,m %r23(%r28)
+ CMPB<< %r28, %r29, 1b
+ fdc,m %r23(%r28)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+#endif
+
+ .export flush_user_dcache_range_asm
+
+flush_user_dcache_range_asm:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+ ldo -1(%r23), %r21
+ ANDCM %r26, %r21, %r26
+
+1: CMPB<<,n %r26, %r25, 1b
+ fdc,m %r23(%sr3, %r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .export flush_kernel_dcache_range_asm
+
+flush_kernel_dcache_range_asm:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%dcache_stride, %r1
+ ldw R%dcache_stride(%r1), %r23
+ ldo -1(%r23), %r21
+ ANDCM %r26, %r21, %r26
+
+1: CMPB<<,n %r26, %r25,1b
+ fdc,m %r23(%r26)
+
+ sync
+ syncdma
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .export flush_user_icache_range_asm
+
+flush_user_icache_range_asm:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%icache_stride, %r1
+ ldw R%icache_stride(%r1), %r23
+ ldo -1(%r23), %r21
+ ANDCM %r26, %r21, %r26
+
+1: CMPB<<,n %r26, %r25,1b
+ fic,m %r23(%sr3, %r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .export flush_kernel_icache_page
+
+flush_kernel_icache_page:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%icache_stride, %r1
+ ldw R%icache_stride(%r1), %r23
+
+#ifdef __LP64__
+ depdi,z 1, 63-PAGE_SHIFT,1, %r25
+#else
+ depwi,z 1, 31-PAGE_SHIFT,1, %r25
+#endif
+ add %r26, %r25, %r25
+ sub %r25, %r23, %r25
+
+
+1: fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ fic,m %r23(%r26)
+ CMPB<< %r26, %r25, 1b
+ fic,m %r23(%r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .export flush_kernel_icache_range_asm
+
+flush_kernel_icache_range_asm:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ ldil L%icache_stride, %r1
+ ldw R%icache_stride(%r1), %r23
+ ldo -1(%r23), %r21
+ ANDCM %r26, %r21, %r26
+
+1: CMPB<<,n %r26, %r25, 1b
+ fic,m %r23(%r26)
+
+ sync
+ bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .align 128
+
+ .export disable_sr_hashing_asm,code
+
+disable_sr_hashing_asm:
+ .proc
+ .callinfo NO_CALLS
+ .entry
+
+ /* Switch to real mode */
+
+ ssm 0, %r0 /* relied upon translation! */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ rsm (PSW_SM_Q|PSW_SM_I), %r0 /* disable Q&I to load the iia queue */
+ ldil L%REAL_MODE_PSW, %r1
+ ldo R%REAL_MODE_PSW(%r1), %r1
+ mtctl %r1, %cr22
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ ldil L%PA(1f), %r1
+ ldo R%PA(1f)(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ rfi
+ nop
+
+1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
+ cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
+ cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
+ b,n srdis_done
+
+srdis_pcxs:
+
+ /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
+
+ .word 0x141c1a00 /* mfdiag %dr0, %r28 */
+ .word 0x141c1a00 /* must issue twice */
+ depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
+ depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
+ .word 0x141c1600 /* mtdiag %r28, %dr0 */
+ .word 0x141c1600 /* must issue twice */
+ b,n srdis_done
+
+srdis_pcxl:
+
+ /* Disable Space Register Hashing for PCXL */
+
+ .word 0x141c0600 /* mfdiag %dr0, %r28 */
+ depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
+ .word 0x141c0240 /* mtdiag %r28, %dr0 */
+ b,n srdis_done
+
+srdis_pa20:
+
+ /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+ */
+
+ .word 0x144008bc /* mfdiag %dr2, %r28 */
+ depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
+ .word 0x145c1840 /* mtdiag %r28, %dr2 */
+
+srdis_done:
+
+ /* Switch back to virtual mode */
+
+ rsm PSW_SM_Q, %r0 /* clear Q bit to load iia queue */
+ ldil L%KERNEL_PSW, %r1
+ ldo R%KERNEL_PSW(%r1), %r1
+ mtctl %r1, %cr22
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ ldil L%(2f), %r1
+ ldo R%(2f)(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ rfi
+ nop
+
+2: bv %r0(%r2)
+ nop
+ .exit
+
+ .procend
+
+ .end
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
new file mode 100644
index 000000000000..f40a777dd388
--- /dev/null
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -0,0 +1,187 @@
+/*
+ * Architecture-specific kernel symbols
+ *
+ * Copyright (C) 2000-2001 Richard Hirst <rhirst with parisc-linux.org>
+ * Copyright (C) 2001 Dave Kennedy
+ * Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2001-2003 Grant Grundler <grundler with parisc-linux.org>
+ * Copyright (C) 2002-2003 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2002 Randolph Chung <tausq at parisc-linux.org>
+ * Copyright (C) 2002-2003 Helge Deller <deller with parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+
+#include <linux/string.h>
+EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strpbrk);
+
+#include <linux/pm.h>
+EXPORT_SYMBOL(pm_power_off);
+
+#include <asm/atomic.h>
+EXPORT_SYMBOL(__xchg8);
+EXPORT_SYMBOL(__xchg32);
+EXPORT_SYMBOL(__cmpxchg_u32);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(__atomic_hash);
+#endif
+#ifdef __LP64__
+EXPORT_SYMBOL(__xchg64);
+EXPORT_SYMBOL(__cmpxchg_u64);
+#endif
+
+#include <asm/uaccess.h>
+EXPORT_SYMBOL(lstrncpy_from_user);
+EXPORT_SYMBOL(lclear_user);
+EXPORT_SYMBOL(lstrnlen_user);
+
+/* Global fixups */
+extern void fixup_get_user_skip_1(void);
+extern void fixup_get_user_skip_2(void);
+extern void fixup_put_user_skip_1(void);
+extern void fixup_put_user_skip_2(void);
+EXPORT_SYMBOL(fixup_get_user_skip_1);
+EXPORT_SYMBOL(fixup_get_user_skip_2);
+EXPORT_SYMBOL(fixup_put_user_skip_1);
+EXPORT_SYMBOL(fixup_put_user_skip_2);
+
+#ifndef __LP64__
+/* Needed so insmod can set dp value */
+extern int $global$;
+EXPORT_SYMBOL($global$);
+#endif
+
+#include <asm/io.h>
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(memcpy_toio);
+EXPORT_SYMBOL(memcpy_fromio);
+EXPORT_SYMBOL(memset_io);
+
+#include <asm/unistd.h>
+EXPORT_SYMBOL(sys_open);
+EXPORT_SYMBOL(sys_lseek);
+EXPORT_SYMBOL(sys_read);
+EXPORT_SYMBOL(sys_write);
+
+#include <asm/semaphore.h>
+EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(__down_interruptible);
+EXPORT_SYMBOL(__down);
+
+extern void $$divI(void);
+extern void $$divU(void);
+extern void $$remI(void);
+extern void $$remU(void);
+extern void $$mulI(void);
+extern void $$divU_3(void);
+extern void $$divU_5(void);
+extern void $$divU_6(void);
+extern void $$divU_9(void);
+extern void $$divU_10(void);
+extern void $$divU_12(void);
+extern void $$divU_7(void);
+extern void $$divU_14(void);
+extern void $$divU_15(void);
+extern void $$divI_3(void);
+extern void $$divI_5(void);
+extern void $$divI_6(void);
+extern void $$divI_7(void);
+extern void $$divI_9(void);
+extern void $$divI_10(void);
+extern void $$divI_12(void);
+extern void $$divI_14(void);
+extern void $$divI_15(void);
+
+EXPORT_SYMBOL($$divI);
+EXPORT_SYMBOL($$divU);
+EXPORT_SYMBOL($$remI);
+EXPORT_SYMBOL($$remU);
+EXPORT_SYMBOL($$mulI);
+EXPORT_SYMBOL($$divU_3);
+EXPORT_SYMBOL($$divU_5);
+EXPORT_SYMBOL($$divU_6);
+EXPORT_SYMBOL($$divU_9);
+EXPORT_SYMBOL($$divU_10);
+EXPORT_SYMBOL($$divU_12);
+EXPORT_SYMBOL($$divU_7);
+EXPORT_SYMBOL($$divU_14);
+EXPORT_SYMBOL($$divU_15);
+EXPORT_SYMBOL($$divI_3);
+EXPORT_SYMBOL($$divI_5);
+EXPORT_SYMBOL($$divI_6);
+EXPORT_SYMBOL($$divI_7);
+EXPORT_SYMBOL($$divI_9);
+EXPORT_SYMBOL($$divI_10);
+EXPORT_SYMBOL($$divI_12);
+EXPORT_SYMBOL($$divI_14);
+EXPORT_SYMBOL($$divI_15);
+
+extern void __ashrdi3(void);
+extern void __ashldi3(void);
+extern void __lshrdi3(void);
+extern void __muldi3(void);
+
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__muldi3);
+
+asmlinkage void * __canonicalize_funcptr_for_compare(void *);
+EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
+
+#ifdef __LP64__
+extern void __divdi3(void);
+extern void __udivdi3(void);
+extern void __umoddi3(void);
+extern void __moddi3(void);
+
+EXPORT_SYMBOL(__divdi3);
+EXPORT_SYMBOL(__udivdi3);
+EXPORT_SYMBOL(__umoddi3);
+EXPORT_SYMBOL(__moddi3);
+#endif
+
+#ifndef __LP64__
+extern void $$dyncall(void);
+EXPORT_SYMBOL($$dyncall);
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+#include <asm/mmzone.h>
+EXPORT_SYMBOL(node_data);
+EXPORT_SYMBOL(pfnnid_map);
+#endif
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
new file mode 100644
index 000000000000..368cc095c99f
--- /dev/null
+++ b/arch/parisc/kernel/pci-dma.c
@@ -0,0 +1,578 @@
+/*
+** PARISC 1.1 Dynamic DMA mapping support.
+** This implementation is for PA-RISC platforms that do not support
+** I/O TLBs (aka DMA address translation hardware).
+** See Documentation/DMA-mapping.txt for interface definitions.
+**
+** (c) Copyright 1999,2000 Hewlett-Packard Company
+** (c) Copyright 2000 Grant Grundler
+** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
+** (c) Copyright 2000 John Marvin
+**
+** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
+** (I assume it's from David Mosberger-Tang but there was no Copyright)
+**
+** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
+**
+** - ggg
+*/
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
+#include <asm/io.h>
+#include <asm/page.h> /* get_order */
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+
+
+static struct proc_dir_entry * proc_gsc_root = NULL;
+static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length);
+static unsigned long pcxl_used_bytes = 0;
+static unsigned long pcxl_used_pages = 0;
+
+extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
+static spinlock_t pcxl_res_lock;
+static char *pcxl_res_map;
+static int pcxl_res_hint;
+static int pcxl_res_size;
+
+#ifdef DEBUG_PCXL_RESOURCE
+#define DBG_RES(x...) printk(x)
+#else
+#define DBG_RES(x...)
+#endif
+
+
+/*
+** Dump a hex representation of the resource map.
+*/
+
+#ifdef DUMP_RESMAP
+static
+void dump_resmap(void)
+{
+ u_long *res_ptr = (unsigned long *)pcxl_res_map;
+ u_long i = 0;
+
+ printk("res_map: ");
+ for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
+ printk("%08lx ", *res_ptr);
+
+ printk("\n");
+}
+#else
+static inline void dump_resmap(void) {;}
+#endif
+
+static int pa11_dma_supported( struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static inline int map_pte_uncached(pte_t * pte,
+ unsigned long vaddr,
+ unsigned long size, unsigned long *paddr_ptr)
+{
+ unsigned long end;
+ unsigned long orig_vaddr = vaddr;
+
+ vaddr &= ~PMD_MASK;
+ end = vaddr + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ if (!pte_none(*pte))
+ printk(KERN_ERR "map_pte_uncached: page already exists\n");
+ set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
+ purge_tlb_start();
+ pdtlb_kernel(orig_vaddr);
+ purge_tlb_end();
+ vaddr += PAGE_SIZE;
+ orig_vaddr += PAGE_SIZE;
+ (*paddr_ptr) += PAGE_SIZE;
+ pte++;
+ } while (vaddr < end);
+ return 0;
+}
+
+static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
+ unsigned long size, unsigned long *paddr_ptr)
+{
+ unsigned long end;
+ unsigned long orig_vaddr = vaddr;
+
+ vaddr &= ~PGDIR_MASK;
+ end = vaddr + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ do {
+ pte_t * pte = pte_alloc_kernel(&init_mm, pmd, vaddr);
+ if (!pte)
+ return -ENOMEM;
+ if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
+ return -ENOMEM;
+ vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
+ orig_vaddr += PMD_SIZE;
+ pmd++;
+ } while (vaddr < end);
+ return 0;
+}
+
+static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
+ unsigned long paddr)
+{
+ pgd_t * dir;
+ unsigned long end = vaddr + size;
+
+ dir = pgd_offset_k(vaddr);
+ do {
+ pmd_t *pmd;
+
+ pmd = pmd_alloc(NULL, dir, vaddr);
+ if (!pmd)
+ return -ENOMEM;
+ if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
+ return -ENOMEM;
+ vaddr = vaddr + PGDIR_SIZE;
+ dir++;
+ } while (vaddr && (vaddr < end));
+ return 0;
+}
+
+static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
+ unsigned long size)
+{
+ pte_t * pte;
+ unsigned long end;
+ unsigned long orig_vaddr = vaddr;
+
+ if (pmd_none(*pmd))
+ return;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ return;
+ }
+ pte = pte_offset_map(pmd, vaddr);
+ vaddr &= ~PMD_MASK;
+ end = vaddr + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t page = *pte;
+ pte_clear(&init_mm, vaddr, pte);
+ purge_tlb_start();
+ pdtlb_kernel(orig_vaddr);
+ purge_tlb_end();
+ vaddr += PAGE_SIZE;
+ orig_vaddr += PAGE_SIZE;
+ pte++;
+ if (pte_none(page) || pte_present(page))
+ continue;
+ printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
+ } while (vaddr < end);
+}
+
+static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
+ unsigned long size)
+{
+ pmd_t * pmd;
+ unsigned long end;
+ unsigned long orig_vaddr = vaddr;
+
+ if (pgd_none(*dir))
+ return;
+ if (pgd_bad(*dir)) {
+ pgd_ERROR(*dir);
+ pgd_clear(dir);
+ return;
+ }
+ pmd = pmd_offset(dir, vaddr);
+ vaddr &= ~PGDIR_MASK;
+ end = vaddr + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ do {
+ unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
+ vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
+ orig_vaddr += PMD_SIZE;
+ pmd++;
+ } while (vaddr < end);
+}
+
+static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
+{
+ pgd_t * dir;
+ unsigned long end = vaddr + size;
+
+ dir = pgd_offset_k(vaddr);
+ do {
+ unmap_uncached_pmd(dir, vaddr, end - vaddr);
+ vaddr = vaddr + PGDIR_SIZE;
+ dir++;
+ } while (vaddr && (vaddr < end));
+}
+
+#define PCXL_SEARCH_LOOP(idx, mask, size) \
+ for(; res_ptr < res_end; ++res_ptr) \
+ { \
+ if(0 == ((*res_ptr) & mask)) { \
+ *res_ptr |= mask; \
+ idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
+ pcxl_res_hint = idx + (size >> 3); \
+ goto resource_found; \
+ } \
+ }
+
+#define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
+ u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
+ u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
+ PCXL_SEARCH_LOOP(idx, mask, size); \
+ res_ptr = (u##size *)&pcxl_res_map[0]; \
+ PCXL_SEARCH_LOOP(idx, mask, size); \
+}
+
+unsigned long
+pcxl_alloc_range(size_t size)
+{
+ int res_idx;
+ u_long mask, flags;
+ unsigned int pages_needed = size >> PAGE_SHIFT;
+
+ mask = (u_long) -1L;
+ mask >>= BITS_PER_LONG - pages_needed;
+
+ DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
+ size, pages_needed, mask);
+
+ spin_lock_irqsave(&pcxl_res_lock, flags);
+
+ if(pages_needed <= 8) {
+ PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
+ } else if(pages_needed <= 16) {
+ PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
+ } else if(pages_needed <= 32) {
+ PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
+ } else {
+ panic("%s: pcxl_alloc_range() Too many pages to map.\n",
+ __FILE__);
+ }
+
+ dump_resmap();
+ panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
+ __FILE__);
+
+resource_found:
+
+ DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
+ res_idx, mask, pcxl_res_hint);
+
+ pcxl_used_pages += pages_needed;
+ pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
+
+ spin_unlock_irqrestore(&pcxl_res_lock, flags);
+
+ dump_resmap();
+
+ /*
+ ** return the corresponding vaddr in the pcxl dma map
+ */
+ return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
+}
+
+#define PCXL_FREE_MAPPINGS(idx, m, size) \
+ u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
+ /* BUG_ON((*res_ptr & m) != m); */ \
+ *res_ptr &= ~m;
+
+/*
+** clear bits in the pcxl resource map
+*/
+static void
+pcxl_free_range(unsigned long vaddr, size_t size)
+{
+ u_long mask, flags;
+ unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
+ unsigned int pages_mapped = size >> PAGE_SHIFT;
+
+ mask = (u_long) -1L;
+ mask >>= BITS_PER_LONG - pages_mapped;
+
+ DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
+ res_idx, size, pages_mapped, mask);
+
+ spin_lock_irqsave(&pcxl_res_lock, flags);
+
+ if(pages_mapped <= 8) {
+ PCXL_FREE_MAPPINGS(res_idx, mask, 8);
+ } else if(pages_mapped <= 16) {
+ PCXL_FREE_MAPPINGS(res_idx, mask, 16);
+ } else if(pages_mapped <= 32) {
+ PCXL_FREE_MAPPINGS(res_idx, mask, 32);
+ } else {
+ panic("%s: pcxl_free_range() Too many pages to unmap.\n",
+ __FILE__);
+ }
+
+ pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
+ pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
+
+ spin_unlock_irqrestore(&pcxl_res_lock, flags);
+
+ dump_resmap();
+}
+
+static int __init
+pcxl_dma_init(void)
+{
+ if (pcxl_dma_start == 0)
+ return 0;
+
+ spin_lock_init(&pcxl_res_lock);
+ pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
+ pcxl_res_hint = 0;
+ pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
+ get_order(pcxl_res_size));
+ memset(pcxl_res_map, 0, pcxl_res_size);
+ proc_gsc_root = proc_mkdir("gsc", 0);
+ create_proc_info_entry("dino", 0, proc_gsc_root, pcxl_proc_info);
+ return 0;
+}
+
+__initcall(pcxl_dma_init);
+
+static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flag)
+{
+ unsigned long vaddr;
+ unsigned long paddr;
+ int order;
+
+ order = get_order(size);
+ size = 1 << (order + PAGE_SHIFT);
+ vaddr = pcxl_alloc_range(size);
+ paddr = __get_free_pages(flag, order);
+ flush_kernel_dcache_range(paddr, size);
+ paddr = __pa(paddr);
+ map_uncached_pages(vaddr, size, paddr);
+ *dma_handle = (dma_addr_t) paddr;
+
+#if 0
+/* This probably isn't needed to support EISA cards.
+** ISA cards will certainly only support 24-bit DMA addressing.
+** Not clear if we can, want, or need to support ISA.
+*/
+ if (!dev || *dev->coherent_dma_mask < 0xffffffff)
+ gfp |= GFP_DMA;
+#endif
+ return (void *)vaddr;
+}
+
+static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+{
+ int order;
+
+ order = get_order(size);
+ size = 1 << (order + PAGE_SHIFT);
+ unmap_uncached_pages((unsigned long)vaddr, size);
+ pcxl_free_range((unsigned long)vaddr, size);
+ free_pages((unsigned long)__va(dma_handle), order);
+}
+
+static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
+{
+ if (direction == DMA_NONE) {
+ printk(KERN_ERR "pa11_dma_map_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
+ BUG();
+ }
+
+ flush_kernel_dcache_range((unsigned long) addr, size);
+ return virt_to_phys(addr);
+}
+
+static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+{
+ if (direction == DMA_NONE) {
+ printk(KERN_ERR "pa11_dma_unmap_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
+ BUG();
+ }
+
+ if (direction == DMA_TO_DEVICE)
+ return;
+
+ /*
+ * For PCI_DMA_FROMDEVICE this flush is not necessary for the
+ * simple map/unmap case. However, it IS necessary if if
+ * pci_dma_sync_single_* has been called and the buffer reused.
+ */
+
+ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
+ return;
+}
+
+static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ if (direction == DMA_NONE)
+ BUG();
+
+ for (i = 0; i < nents; i++, sglist++ ) {
+ unsigned long vaddr = sg_virt_addr(sglist);
+ sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
+ sg_dma_len(sglist) = sglist->length;
+ flush_kernel_dcache_range(vaddr, sglist->length);
+ }
+ return nents;
+}
+
+static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ if (direction == DMA_NONE)
+ BUG();
+
+ if (direction == DMA_TO_DEVICE)
+ return;
+
+ /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
+
+ for (i = 0; i < nents; i++, sglist++ )
+ flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
+ return;
+}
+
+static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ if (direction == DMA_NONE)
+ BUG();
+
+ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
+}
+
+static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
+{
+ if (direction == DMA_NONE)
+ BUG();
+
+ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
+}
+
+static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
+
+ for (i = 0; i < nents; i++, sglist++ )
+ flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
+}
+
+static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+{
+ int i;
+
+ /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
+
+ for (i = 0; i < nents; i++, sglist++ )
+ flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
+}
+
+struct hppa_dma_ops pcxl_dma_ops = {
+ .dma_supported = pa11_dma_supported,
+ .alloc_consistent = pa11_dma_alloc_consistent,
+ .alloc_noncoherent = pa11_dma_alloc_consistent,
+ .free_consistent = pa11_dma_free_consistent,
+ .map_single = pa11_dma_map_single,
+ .unmap_single = pa11_dma_unmap_single,
+ .map_sg = pa11_dma_map_sg,
+ .unmap_sg = pa11_dma_unmap_sg,
+ .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
+ .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
+ .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
+ .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
+};
+
+static void *fail_alloc_consistent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int flag)
+{
+ return NULL;
+}
+
+static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int flag)
+{
+ void *addr = NULL;
+
+ /* rely on kmalloc to be cacheline aligned */
+ addr = kmalloc(size, flag);
+ if(addr)
+ *dma_handle = (dma_addr_t)virt_to_phys(addr);
+
+ return addr;
+}
+
+static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t iova)
+{
+ kfree(vaddr);
+ return;
+}
+
+struct hppa_dma_ops pcx_dma_ops = {
+ .dma_supported = pa11_dma_supported,
+ .alloc_consistent = fail_alloc_consistent,
+ .alloc_noncoherent = pa11_dma_alloc_noncoherent,
+ .free_consistent = pa11_dma_free_noncoherent,
+ .map_single = pa11_dma_map_single,
+ .unmap_single = pa11_dma_unmap_single,
+ .map_sg = pa11_dma_map_sg,
+ .unmap_sg = pa11_dma_unmap_sg,
+ .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
+ .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
+ .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
+ .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
+};
+
+
+static int pcxl_proc_info(char *buf, char **start, off_t offset, int len)
+{
+ u_long i = 0;
+ unsigned long *res_ptr = (u_long *)pcxl_res_map;
+ unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */
+
+ sprintf(buf, "\nDMA Mapping Area size : %d bytes (%d pages)\n",
+ PCXL_DMA_MAP_SIZE,
+ (pcxl_res_size << 3) ); /* 1 bit per page */
+
+ sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n",
+ buf, pcxl_res_size, pcxl_res_size << 3); /* 8 bits per byte */
+
+ strcat(buf, " total: free: used: % used:\n");
+ sprintf(buf, "%sblocks %8d %8ld %8ld %8ld%%\n", buf, pcxl_res_size,
+ pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
+ (pcxl_used_bytes * 100) / pcxl_res_size);
+
+ sprintf(buf, "%spages %8ld %8ld %8ld %8ld%%\n", buf, total_pages,
+ total_pages - pcxl_used_pages, pcxl_used_pages,
+ (pcxl_used_pages * 100 / total_pages));
+
+ strcat(buf, "\nResource bitmap:");
+
+ for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
+ if ((i & 7) == 0)
+ strcat(buf,"\n ");
+ sprintf(buf, "%s %08lx", buf, *res_ptr);
+ }
+ strcat(buf, "\n");
+ return strlen(buf);
+}
+
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
new file mode 100644
index 000000000000..3cb08a4a513a
--- /dev/null
+++ b/arch/parisc/kernel/pci.c
@@ -0,0 +1,346 @@
+/* $Id: pci.c,v 1.6 2000/01/29 00:12:05 grundler Exp $
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1997, 1998 Ralf Baechle
+ * Copyright (C) 1999 SuSE GmbH
+ * Copyright (C) 1999-2001 Hewlett-Packard Company
+ * Copyright (C) 1999-2001 Grant Grundler
+ */
+#include <linux/config.h>
+#include <linux/eisa.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/cache.h> /* for L1_CACHE_BYTES */
+#include <asm/superio.h>
+
+#define DEBUG_RESOURCES 0
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBGC(x...) printk(KERN_DEBUG x)
+#else
+# define DBGC(x...)
+#endif
+
+
+#if DEBUG_RESOURCES
+#define DBG_RES(x...) printk(KERN_DEBUG x)
+#else
+#define DBG_RES(x...)
+#endif
+
+/* To be used as: mdelay(pci_post_reset_delay);
+ *
+ * post_reset is the time the kernel should stall to prevent anyone from
+ * accessing the PCI bus once #RESET is de-asserted.
+ * PCI spec somewhere says 1 second but with multi-PCI bus systems,
+ * this makes the boot time much longer than necessary.
+ * 20ms seems to work for all the HP PCI implementations to date.
+ *
+ * XXX: turn into a #defined constant in <asm/pci.h> ?
+ */
+int pci_post_reset_delay = 50;
+
+struct pci_port_ops *pci_port;
+struct pci_bios_ops *pci_bios;
+
+int pci_hba_count = 0;
+
+/* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */
+#define PCI_HBA_MAX 32
+struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX];
+
+
+/********************************************************************
+**
+** I/O port space support
+**
+*********************************************************************/
+
+/* EISA port numbers and PCI port numbers share the same interface. Some
+ * machines have both EISA and PCI adapters installed. Rather than turn
+ * pci_port into an array, we reserve bus 0 for EISA and call the EISA
+ * routines if the access is to a port on bus 0. We don't want to fix
+ * EISA and ISA drivers which assume port space is <= 0xffff.
+ */
+
+#ifdef CONFIG_EISA
+#define EISA_IN(size) if (EISA_bus && (b == 0)) return eisa_in##size(addr)
+#define EISA_OUT(size) if (EISA_bus && (b == 0)) return eisa_out##size(d, addr)
+#else
+#define EISA_IN(size)
+#define EISA_OUT(size)
+#endif
+
+#define PCI_PORT_IN(type, size) \
+u##size in##type (int addr) \
+{ \
+ int b = PCI_PORT_HBA(addr); \
+ EISA_IN(size); \
+ if (!parisc_pci_hba[b]) return (u##size) -1; \
+ return pci_port->in##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr)); \
+} \
+EXPORT_SYMBOL(in##type);
+
+PCI_PORT_IN(b, 8)
+PCI_PORT_IN(w, 16)
+PCI_PORT_IN(l, 32)
+
+
+#define PCI_PORT_OUT(type, size) \
+void out##type (u##size d, int addr) \
+{ \
+ int b = PCI_PORT_HBA(addr); \
+ EISA_OUT(size); \
+ if (!parisc_pci_hba[b]) return; \
+ pci_port->out##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr), d); \
+} \
+EXPORT_SYMBOL(out##type);
+
+PCI_PORT_OUT(b, 8)
+PCI_PORT_OUT(w, 16)
+PCI_PORT_OUT(l, 32)
+
+
+
+/*
+ * BIOS32 replacement.
+ */
+static int __init pcibios_init(void)
+{
+ if (!pci_bios)
+ return -1;
+
+ if (pci_bios->init) {
+ pci_bios->init();
+ } else {
+ printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
+ }
+ return 0;
+}
+
+
+/* Called from pci_do_scan_bus() *after* walking a bus but before walking PPBs. */
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+ if (pci_bios->fixup_bus) {
+ pci_bios->fixup_bus(bus);
+ } else {
+ printk(KERN_WARNING "pci_bios != NULL but fixup_bus() is!\n");
+ }
+}
+
+
+char *pcibios_setup(char *str)
+{
+ return str;
+}
+
+/*
+ * Called by pci_set_master() - a driver interface.
+ *
+ * Legacy PDC guarantees to set:
+ * Map Memory BAR's into PA IO space.
+ * Map Expansion ROM BAR into one common PA IO space per bus.
+ * Map IO BAR's into PCI IO space.
+ * Command (see below)
+ * Cache Line Size
+ * Latency Timer
+ * Interrupt Line
+ * PPB: secondary latency timer, io/mmio base/limit,
+ * bus numbers, bridge control
+ *
+ */
+void pcibios_set_master(struct pci_dev *dev)
+{
+ u8 lat;
+
+ /* If someone already mucked with this, don't touch it. */
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+ if (lat >= 16) return;
+
+ /*
+ ** HP generally has fewer devices on the bus than other architectures.
+ ** upper byte is PCI_LATENCY_TIMER.
+ */
+ pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
+ (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
+}
+
+
+void __init pcibios_init_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev = bus->self;
+ unsigned short bridge_ctl;
+
+ /* We deal only with pci controllers and pci-pci bridges. */
+ if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ return;
+
+ /* PCI-PCI bridge - set the cache line and default latency
+ (32) for primary and secondary buses. */
+ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
+
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
+ bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
+}
+
+
+/* KLUGE: Link the child and parent resources - generic PCI didn't */
+static void
+pcibios_link_hba_resources( struct resource *hba_res, struct resource *r)
+{
+ if (!r->parent) {
+ printk(KERN_EMERG "PCI: Tell willy he's wrong\n");
+ r->parent = hba_res;
+
+ /* reverse link is harder *sigh* */
+ if (r->parent->child) {
+ if (r->parent->sibling) {
+ struct resource *next = r->parent->sibling;
+ while (next->sibling)
+ next = next->sibling;
+ next->sibling = r;
+ } else {
+ r->parent->sibling = r;
+ }
+ } else
+ r->parent->child = r;
+ }
+}
+
+/* called by drivers/pci/setup-bus.c:pci_setup_bridge(). */
+void __devinit pcibios_resource_to_bus(struct pci_dev *dev,
+ struct pci_bus_region *region, struct resource *res)
+{
+ struct pci_bus *bus = dev->bus;
+ struct pci_hba_data *hba = HBA_DATA(bus->bridge->platform_data);
+
+ if (res->flags & IORESOURCE_IO) {
+ /*
+ ** I/O space may see busnumbers here. Something
+ ** in the form of 0xbbxxxx where bb is the bus num
+ ** and xxxx is the I/O port space address.
+ ** Remaining address translation are done in the
+ ** PCI Host adapter specific code - ie dino_out8.
+ */
+ region->start = PCI_PORT_ADDR(res->start);
+ region->end = PCI_PORT_ADDR(res->end);
+ } else if (res->flags & IORESOURCE_MEM) {
+ /* Convert MMIO addr to PCI addr (undo global virtualization) */
+ region->start = PCI_BUS_ADDR(hba, res->start);
+ region->end = PCI_BUS_ADDR(hba, res->end);
+ }
+
+ DBG_RES("pcibios_resource_to_bus(%02x %s [%lx,%lx])\n",
+ bus->number, res->flags & IORESOURCE_IO ? "IO" : "MEM",
+ region->start, region->end);
+
+ /* KLUGE ALERT
+ ** if this resource isn't linked to a "parent", then it seems
+ ** to be a child of the HBA - lets link it in.
+ */
+ pcibios_link_hba_resources(&hba->io_space, bus->resource[0]);
+ pcibios_link_hba_resources(&hba->lmmio_space, bus->resource[1]);
+}
+
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+#endif
+
+/*
+ * pcibios align resources() is called every time generic PCI code
+ * wants to generate a new address. The process of looking for
+ * an available address, each candidate is first "aligned" and
+ * then checked if the resource is available until a match is found.
+ *
+ * Since we are just checking candidates, don't use any fields other
+ * than res->start.
+ */
+void pcibios_align_resource(void *data, struct resource *res,
+ unsigned long size, unsigned long alignment)
+{
+ unsigned long mask, align;
+
+ DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n",
+ pci_name(((struct pci_dev *) data)),
+ res->parent, res->start, res->end,
+ (int) res->flags, size, alignment);
+
+ /* If it's not IO, then it's gotta be MEM */
+ align = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
+
+ /* Align to largest of MIN or input size */
+ mask = max(alignment, align) - 1;
+ res->start += mask;
+ res->start &= ~mask;
+
+ /* The caller updates the end field, we don't. */
+}
+
+
+/*
+ * A driver is enabling the device. We make sure that all the appropriate
+ * bits are set to allow the device to operate as the driver is expecting.
+ * We enable the port IO and memory IO bits if the device has any BARs of
+ * that type, and we enable the PERR and SERR bits unconditionally.
+ * Drivers that do not need parity (eg graphics and possibly networking)
+ * can clear these bits if they want.
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd;
+ int idx;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+ for (idx = 0; idx < DEVICE_COUNT_RESOURCE; idx++) {
+ struct resource *r = &dev->resource[idx];
+
+ /* only setup requested resources */
+ if (!(mask & (1<<idx)))
+ continue;
+
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+
+ cmd |= (PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+
+#if 0
+ /* If bridge/bus controller has FBB enabled, child must too. */
+ if (dev->bus->bridge_ctl & PCI_BRIDGE_CTL_FAST_BACK)
+ cmd |= PCI_COMMAND_FAST_BACK;
+#endif
+ DBGC("PCIBIOS: Enabling device %s cmd 0x%04x\n", pci_name(dev), cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ return 0;
+}
+
+
+/* PA-RISC specific */
+void pcibios_register_hba(struct pci_hba_data *hba)
+{
+ if (pci_hba_count >= PCI_HBA_MAX) {
+ printk(KERN_ERR "PCI: Too many Host Bus Adapters\n");
+ return;
+ }
+
+ parisc_pci_hba[pci_hba_count] = hba;
+ hba->hba_num = pci_hba_count++;
+}
+
+subsys_initcall(pcibios_init);
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
new file mode 100644
index 000000000000..52004ae28d20
--- /dev/null
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -0,0 +1,245 @@
+/*
+ * interfaces to log Chassis Codes via PDC (firmware)
+ *
+ * Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
+ * Copyright (C) 2002-2004 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#undef PDC_CHASSIS_DEBUG
+#ifdef PDC_CHASSIS_DEBUG
+#define DPRINTK(fmt, args...) printk(fmt, ## args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+
+#include <asm/pdc_chassis.h>
+#include <asm/processor.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+
+
+#ifdef CONFIG_PDC_CHASSIS
+static int pdc_chassis_old = 0;
+static unsigned int pdc_chassis_enabled = 1;
+
+
+/**
+ * pdc_chassis_setup() - Enable/disable pdc_chassis code at boot time.
+ * @str configuration param: 0 to disable chassis log
+ * @return 1
+ */
+
+static int __init pdc_chassis_setup(char *str)
+{
+ /*panic_timeout = simple_strtoul(str, NULL, 0);*/
+ get_option(&str, &pdc_chassis_enabled);
+ return 1;
+}
+__setup("pdcchassis=", pdc_chassis_setup);
+
+
+/**
+ * pdc_chassis_checkold() - Checks for old PDC_CHASSIS compatibility
+ * @pdc_chassis_old: 1 if old pdc chassis style
+ *
+ * Currently, only E class and A180 are known to work with this.
+ * Inspired by Christoph Plattner
+ */
+
+static void __init pdc_chassis_checkold(void)
+{
+ switch(CPU_HVERSION) {
+ case 0x480: /* E25 */
+ case 0x481: /* E35 */
+ case 0x482: /* E45 */
+ case 0x483: /* E55 */
+ case 0x516: /* A180 */
+ pdc_chassis_old = 1;
+ break;
+
+ default:
+ break;
+ }
+ DPRINTK(KERN_DEBUG "%s: pdc_chassis_checkold(); pdc_chassis_old = %d\n", __FILE__, pdc_chassis_old);
+}
+
+
+/**
+ * pdc_chassis_panic_event() - Called by the panic handler.
+ *
+ * As soon as a panic occurs, we should inform the PDC.
+ */
+
+static int pdc_chassis_panic_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ return NOTIFY_DONE;
+}
+
+
+static struct notifier_block pdc_chassis_panic_block = {
+ .notifier_call = pdc_chassis_panic_event,
+ .priority = INT_MAX,
+};
+
+
+/**
+ * parisc_reboot_event() - Called by the reboot handler.
+ *
+ * As soon as a reboot occurs, we should inform the PDC.
+ */
+
+static int pdc_chassis_reboot_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
+ return NOTIFY_DONE;
+}
+
+
+static struct notifier_block pdc_chassis_reboot_block = {
+ .notifier_call = pdc_chassis_reboot_event,
+ .priority = INT_MAX,
+};
+#endif /* CONFIG_PDC_CHASSIS */
+
+
+/**
+ * parisc_pdc_chassis_init() - Called at boot time.
+ */
+
+void __init parisc_pdc_chassis_init(void)
+{
+#ifdef CONFIG_PDC_CHASSIS
+ int handle = 0;
+ if (pdc_chassis_enabled) {
+ DPRINTK(KERN_DEBUG "%s: parisc_pdc_chassis_init()\n", __FILE__);
+
+ /* Let see if we have something to handle... */
+ /* Check for PDC_PAT or old LED Panel */
+ pdc_chassis_checkold();
+ if (is_pdc_pat()) {
+ printk(KERN_INFO "Enabling PDC_PAT chassis codes support.\n");
+ handle = 1;
+ }
+ else if (pdc_chassis_old) {
+ printk(KERN_INFO "Enabling old style chassis LED panel support.\n");
+ handle = 1;
+ }
+
+ if (handle) {
+ /* initialize panic notifier chain */
+ notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block);
+
+ /* initialize reboot notifier chain */
+ register_reboot_notifier(&pdc_chassis_reboot_block);
+ }
+ }
+#endif /* CONFIG_PDC_CHASSIS */
+}
+
+
+/**
+ * pdc_chassis_send_status() - Sends a predefined message to the chassis,
+ * and changes the front panel LEDs according to the new system state
+ * @retval: PDC call return value.
+ *
+ * Only machines with 64 bits PDC PAT and those reported in
+ * pdc_chassis_checkold() are supported atm.
+ *
+ * returns 0 if no error, -1 if no supported PDC is present or invalid message,
+ * else returns the appropriate PDC error code.
+ *
+ * For a list of predefined messages, see asm-parisc/pdc_chassis.h
+ */
+
+int pdc_chassis_send_status(int message)
+{
+ /* Maybe we should do that in an other way ? */
+ int retval = 0;
+#ifdef CONFIG_PDC_CHASSIS
+ if (pdc_chassis_enabled) {
+
+ DPRINTK(KERN_DEBUG "%s: pdc_chassis_send_status(%d)\n", __FILE__, message);
+
+#ifdef CONFIG_64BIT
+ if (is_pdc_pat()) {
+ switch(message) {
+ case PDC_CHASSIS_DIRECT_BSTART:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BSTART, PDC_CHASSIS_LSTATE_RUN_NORMAL);
+ break;
+
+ case PDC_CHASSIS_DIRECT_BCOMPLETE:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_BCOMPLETE, PDC_CHASSIS_LSTATE_RUN_NORMAL);
+ break;
+
+ case PDC_CHASSIS_DIRECT_SHUTDOWN:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_SHUTDOWN, PDC_CHASSIS_LSTATE_NONOS);
+ break;
+
+ case PDC_CHASSIS_DIRECT_PANIC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_PANIC, PDC_CHASSIS_LSTATE_RUN_CRASHREC);
+ break;
+
+ case PDC_CHASSIS_DIRECT_LPMC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_LPMC, PDC_CHASSIS_LSTATE_RUN_SYSINT);
+ break;
+
+ case PDC_CHASSIS_DIRECT_HPMC:
+ retval = pdc_pat_chassis_send_log(PDC_CHASSIS_PMSG_HPMC, PDC_CHASSIS_LSTATE_RUN_NCRIT);
+ break;
+
+ default:
+ retval = -1;
+ }
+ } else retval = -1;
+#else
+ if (pdc_chassis_old) {
+ switch (message) {
+ case PDC_CHASSIS_DIRECT_BSTART:
+ case PDC_CHASSIS_DIRECT_BCOMPLETE:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_RUN));
+ break;
+
+ case PDC_CHASSIS_DIRECT_SHUTDOWN:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_SHUT));
+ break;
+
+ case PDC_CHASSIS_DIRECT_HPMC:
+ case PDC_CHASSIS_DIRECT_PANIC:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_FLT));
+ break;
+
+ case PDC_CHASSIS_DIRECT_LPMC:
+ retval = pdc_chassis_disp(PDC_CHASSIS_DISP_DATA(OSTAT_WARN));
+ break;
+
+ default:
+ retval = -1;
+ }
+ } else retval = -1;
+#endif /* CONFIG_64BIT */
+ } /* if (pdc_chassis_enabled) */
+#endif /* CONFIG_PDC_CHASSIS */
+ return retval;
+}
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
new file mode 100644
index 000000000000..01f676d1673b
--- /dev/null
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -0,0 +1,189 @@
+/*
+ * PDC Console support - ie use firmware to dump text via boot console
+ *
+ * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
+ * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
+ * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
+ * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
+ * Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
+ * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * The PDC console is a simple console, which can be used for debugging
+ * boot related problems on HP PA-RISC machines.
+ *
+ * This code uses the ROM (=PDC) based functions to read and write characters
+ * from and to PDC's boot path.
+ * Since all character read from that path must be polled, this code never
+ * can or will be a fully functional linux console.
+ */
+
+/* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems.
+ * On production kernels EARLY_BOOTUP_DEBUG should be undefined. */
+#undef EARLY_BOOTUP_DEBUG
+
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/tty.h>
+#include <asm/page.h>
+#include <asm/types.h>
+#include <asm/system.h>
+#include <asm/pdc.h> /* for iodc_call() proto and friends */
+
+
+static void pdc_console_write(struct console *co, const char *s, unsigned count)
+{
+ while(count--)
+ pdc_iodc_putc(*s++);
+}
+
+void pdc_outc(unsigned char c)
+{
+ pdc_iodc_outc(c);
+}
+
+void pdc_printf(const char *fmt, ...)
+{
+ va_list args;
+ char buf[1024];
+ int i, len;
+
+ va_start(args, fmt);
+ len = vscnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ for (i = 0; i < len; i++)
+ pdc_iodc_outc(buf[i]);
+}
+
+int pdc_console_poll_key(struct console *co)
+{
+ return pdc_iodc_getc();
+}
+
+static int pdc_console_setup(struct console *co, char *options)
+{
+ return 0;
+}
+
+#if defined(CONFIG_PDC_CONSOLE)
+#define PDC_CONSOLE_DEVICE pdc_console_device
+static struct tty_driver * pdc_console_device (struct console *c, int *index)
+{
+ extern struct tty_driver console_driver;
+ *index = c->index ? c->index-1 : fg_console;
+ return &console_driver;
+}
+#else
+#define PDC_CONSOLE_DEVICE NULL
+#endif
+
+static struct console pdc_cons = {
+ .name = "ttyB",
+ .write = pdc_console_write,
+ .device = PDC_CONSOLE_DEVICE,
+ .setup = pdc_console_setup,
+ .flags = CON_BOOT|CON_PRINTBUFFER|CON_ENABLED,
+ .index = -1,
+};
+
+static int pdc_console_initialized;
+extern unsigned long con_start; /* kernel/printk.c */
+extern unsigned long log_end; /* kernel/printk.c */
+
+
+static void pdc_console_init_force(void)
+{
+ if (pdc_console_initialized)
+ return;
+ ++pdc_console_initialized;
+
+ /* If the console is duplex then copy the COUT parameters to CIN. */
+ if (PAGE0->mem_cons.cl_class == CL_DUPLEX)
+ memcpy(&PAGE0->mem_kbd, &PAGE0->mem_cons, sizeof(PAGE0->mem_cons));
+
+ /* register the pdc console */
+ register_console(&pdc_cons);
+}
+
+void __init pdc_console_init(void)
+{
+#if defined(EARLY_BOOTUP_DEBUG) || defined(CONFIG_PDC_CONSOLE)
+ pdc_console_init_force();
+#endif
+#ifdef EARLY_BOOTUP_DEBUG
+ printk(KERN_INFO "Initialized PDC Console for debugging.\n");
+#endif
+}
+
+
+/* Unregister the pdc console with the printk console layer */
+void pdc_console_die(void)
+{
+ if (!pdc_console_initialized)
+ return;
+ --pdc_console_initialized;
+
+ printk(KERN_INFO "Switching from PDC console\n");
+
+ /* Don't repeat what we've already printed */
+ con_start = log_end;
+
+ unregister_console(&pdc_cons);
+}
+
+
+/*
+ * Used for emergencies. Currently only used if an HPMC occurs. If an
+ * HPMC occurs, it is possible that the current console may not be
+ * properly initialed after the PDC IO reset. This routine unregisters all
+ * of the current consoles, reinitializes the pdc console and
+ * registers it.
+ */
+
+void pdc_console_restart(void)
+{
+ struct console *console;
+
+ if (pdc_console_initialized)
+ return;
+
+ while ((console = console_drivers) != NULL)
+ unregister_console(console_drivers);
+
+ /* Don't repeat what we've already printed */
+ con_start = log_end;
+
+ /* force registering the pdc console */
+ pdc_console_init_force();
+}
+
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
new file mode 100644
index 000000000000..b3ad0a505b87
--- /dev/null
+++ b/arch/parisc/kernel/perf.c
@@ -0,0 +1,841 @@
+/*
+ * Parisc performance counters
+ * Copyright (C) 2001 Randolph Chung <tausq@debian.org>
+ *
+ * This code is derived, with permission, from HP/UX sources.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Edited comment from original sources:
+ *
+ * This driver programs the PCX-U/PCX-W performance counters
+ * on the PA-RISC 2.0 chips. The driver keeps all images now
+ * internally to the kernel to hopefully eliminate the possiblity
+ * of a bad image halting the CPU. Also, there are different
+ * images for the PCX-W and later chips vs the PCX-U chips.
+ *
+ * Only 1 process is allowed to access the driver at any time,
+ * so the only protection that is needed is at open and close.
+ * A variable "perf_enabled" is used to hold the state of the
+ * driver. The spinlock "perf_lock" is used to protect the
+ * modification of the state during open/close operations so
+ * multiple processes don't get into the driver simultaneously.
+ *
+ * This driver accesses the processor directly vs going through
+ * the PDC INTRIGUE calls. This is done to eliminate bugs introduced
+ * in various PDC revisions. The code is much more maintainable
+ * and reliable this way vs having to debug on every version of PDC
+ * on every box.
+ */
+
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+
+#include <asm/uaccess.h>
+#include <asm/perf.h>
+#include <asm/parisc-device.h>
+#include <asm/processor.h>
+#include <asm/runway.h>
+#include <asm/io.h> /* for __raw_read() */
+
+#include "perf_images.h"
+
+#define MAX_RDR_WORDS 24
+#define PERF_VERSION 2 /* derived from hpux's PI v2 interface */
+
+/* definition of RDR regs */
+struct rdr_tbl_ent {
+ uint16_t width;
+ uint8_t num_words;
+ uint8_t write_control;
+};
+
+static int perf_processor_interface = UNKNOWN_INTF;
+static int perf_enabled = 0;
+static spinlock_t perf_lock;
+struct parisc_device *cpu_device = NULL;
+
+/* RDRs to write for PCX-W */
+static int perf_rdrs_W[] =
+ { 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
+
+/* RDRs to write for PCX-U */
+static int perf_rdrs_U[] =
+ { 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
+
+/* RDR register descriptions for PCX-W */
+static struct rdr_tbl_ent perf_rdr_tbl_W[] = {
+ { 19, 1, 8 }, /* RDR 0 */
+ { 16, 1, 16 }, /* RDR 1 */
+ { 72, 2, 0 }, /* RDR 2 */
+ { 81, 2, 0 }, /* RDR 3 */
+ { 328, 6, 0 }, /* RDR 4 */
+ { 160, 3, 0 }, /* RDR 5 */
+ { 336, 6, 0 }, /* RDR 6 */
+ { 164, 3, 0 }, /* RDR 7 */
+ { 0, 0, 0 }, /* RDR 8 */
+ { 35, 1, 0 }, /* RDR 9 */
+ { 6, 1, 0 }, /* RDR 10 */
+ { 18, 1, 0 }, /* RDR 11 */
+ { 13, 1, 0 }, /* RDR 12 */
+ { 8, 1, 0 }, /* RDR 13 */
+ { 8, 1, 0 }, /* RDR 14 */
+ { 8, 1, 0 }, /* RDR 15 */
+ { 1530, 24, 0 }, /* RDR 16 */
+ { 16, 1, 0 }, /* RDR 17 */
+ { 4, 1, 0 }, /* RDR 18 */
+ { 0, 0, 0 }, /* RDR 19 */
+ { 152, 3, 24 }, /* RDR 20 */
+ { 152, 3, 24 }, /* RDR 21 */
+ { 233, 4, 48 }, /* RDR 22 */
+ { 233, 4, 48 }, /* RDR 23 */
+ { 71, 2, 0 }, /* RDR 24 */
+ { 71, 2, 0 }, /* RDR 25 */
+ { 11, 1, 0 }, /* RDR 26 */
+ { 18, 1, 0 }, /* RDR 27 */
+ { 128, 2, 0 }, /* RDR 28 */
+ { 0, 0, 0 }, /* RDR 29 */
+ { 16, 1, 0 }, /* RDR 30 */
+ { 16, 1, 0 }, /* RDR 31 */
+};
+
+/* RDR register descriptions for PCX-U */
+static struct rdr_tbl_ent perf_rdr_tbl_U[] = {
+ { 19, 1, 8 }, /* RDR 0 */
+ { 32, 1, 16 }, /* RDR 1 */
+ { 20, 1, 0 }, /* RDR 2 */
+ { 0, 0, 0 }, /* RDR 3 */
+ { 344, 6, 0 }, /* RDR 4 */
+ { 176, 3, 0 }, /* RDR 5 */
+ { 336, 6, 0 }, /* RDR 6 */
+ { 0, 0, 0 }, /* RDR 7 */
+ { 0, 0, 0 }, /* RDR 8 */
+ { 0, 0, 0 }, /* RDR 9 */
+ { 28, 1, 0 }, /* RDR 10 */
+ { 33, 1, 0 }, /* RDR 11 */
+ { 0, 0, 0 }, /* RDR 12 */
+ { 230, 4, 0 }, /* RDR 13 */
+ { 32, 1, 0 }, /* RDR 14 */
+ { 128, 2, 0 }, /* RDR 15 */
+ { 1494, 24, 0 }, /* RDR 16 */
+ { 18, 1, 0 }, /* RDR 17 */
+ { 4, 1, 0 }, /* RDR 18 */
+ { 0, 0, 0 }, /* RDR 19 */
+ { 158, 3, 24 }, /* RDR 20 */
+ { 158, 3, 24 }, /* RDR 21 */
+ { 194, 4, 48 }, /* RDR 22 */
+ { 194, 4, 48 }, /* RDR 23 */
+ { 71, 2, 0 }, /* RDR 24 */
+ { 71, 2, 0 }, /* RDR 25 */
+ { 28, 1, 0 }, /* RDR 26 */
+ { 33, 1, 0 }, /* RDR 27 */
+ { 88, 2, 0 }, /* RDR 28 */
+ { 32, 1, 0 }, /* RDR 29 */
+ { 24, 1, 0 }, /* RDR 30 */
+ { 16, 1, 0 }, /* RDR 31 */
+};
+
+/*
+ * A non-zero write_control in the above tables is a byte offset into
+ * this array.
+ */
+static uint64_t perf_bitmasks[] = {
+ 0x0000000000000000ul, /* first dbl word must be zero */
+ 0xfdffe00000000000ul, /* RDR0 bitmask */
+ 0x003f000000000000ul, /* RDR1 bitmask */
+ 0x00fffffffffffffful, /* RDR20-RDR21 bitmask (152 bits) */
+ 0xfffffffffffffffful,
+ 0xfffffffc00000000ul,
+ 0xfffffffffffffffful, /* RDR22-RDR23 bitmask (233 bits) */
+ 0xfffffffffffffffful,
+ 0xfffffffffffffffcul,
+ 0xff00000000000000ul
+};
+
+/*
+ * Write control bitmasks for Pa-8700 processor given
+ * somethings have changed slightly.
+ */
+static uint64_t perf_bitmasks_piranha[] = {
+ 0x0000000000000000ul, /* first dbl word must be zero */
+ 0xfdffe00000000000ul, /* RDR0 bitmask */
+ 0x003f000000000000ul, /* RDR1 bitmask */
+ 0x00fffffffffffffful, /* RDR20-RDR21 bitmask (158 bits) */
+ 0xfffffffffffffffful,
+ 0xfffffffc00000000ul,
+ 0xfffffffffffffffful, /* RDR22-RDR23 bitmask (210 bits) */
+ 0xfffffffffffffffful,
+ 0xfffffffffffffffful,
+ 0xfffc000000000000ul
+};
+
+static uint64_t *bitmask_array; /* array of bitmasks to use */
+
+/******************************************************************************
+ * Function Prototypes
+ *****************************************************************************/
+static int perf_config(uint32_t *image_ptr);
+static int perf_release(struct inode *inode, struct file *file);
+static int perf_open(struct inode *inode, struct file *file);
+static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
+static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos);
+static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg);
+static void perf_start_counters(void);
+static int perf_stop_counters(uint32_t *raddr);
+static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
+static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer);
+static int perf_rdr_clear(uint32_t rdr_num);
+static int perf_write_image(uint64_t *memaddr);
+static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer);
+
+/* External Assembly Routines */
+extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width);
+extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width);
+extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer);
+extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer);
+extern void perf_intrigue_enable_perf_counters (void);
+extern void perf_intrigue_disable_perf_counters (void);
+
+/******************************************************************************
+ * Function Definitions
+ *****************************************************************************/
+
+
+/*
+ * configure:
+ *
+ * Configure the cpu with a given data image. First turn off the counters,
+ * then download the image, then turn the counters back on.
+ */
+static int perf_config(uint32_t *image_ptr)
+{
+ long error;
+ uint32_t raddr[4];
+
+ /* Stop the counters*/
+ error = perf_stop_counters(raddr);
+ if (error != 0) {
+ printk("perf_config: perf_stop_counters = %ld\n", error);
+ return -EINVAL;
+ }
+
+printk("Preparing to write image\n");
+ /* Write the image to the chip */
+ error = perf_write_image((uint64_t *)image_ptr);
+ if (error != 0) {
+ printk("perf_config: DOWNLOAD = %ld\n", error);
+ return -EINVAL;
+ }
+
+printk("Preparing to start counters\n");
+
+ /* Start the counters */
+ perf_start_counters();
+
+ return sizeof(uint32_t);
+}
+
+/*
+ * Open the device and initialize all of its memory. The device is only
+ * opened once, but can be "queried" by multiple processes that know its
+ * file descriptor.
+ */
+static int perf_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&perf_lock);
+ if (perf_enabled) {
+ spin_unlock(&perf_lock);
+ return -EBUSY;
+ }
+ perf_enabled = 1;
+ spin_unlock(&perf_lock);
+
+ return 0;
+}
+
+/*
+ * Close the device.
+ */
+static int perf_release(struct inode *inode, struct file *file)
+{
+ spin_lock(&perf_lock);
+ perf_enabled = 0;
+ spin_unlock(&perf_lock);
+
+ return 0;
+}
+
+/*
+ * Read does nothing for this driver
+ */
+static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
+{
+ return 0;
+}
+
+/*
+ * write:
+ *
+ * This routine downloads the image to the chip. It must be
+ * called on the processor that the download should happen
+ * on.
+ */
+static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ int err;
+ size_t image_size;
+ uint32_t image_type;
+ uint32_t interface_type;
+ uint32_t test;
+
+ if (perf_processor_interface == ONYX_INTF)
+ image_size = PCXU_IMAGE_SIZE;
+ else if (perf_processor_interface == CUDA_INTF)
+ image_size = PCXW_IMAGE_SIZE;
+ else
+ return -EFAULT;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (count != sizeof(uint32_t))
+ return -EIO;
+
+ if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0)
+ return err;
+
+ /* Get the interface type and test type */
+ interface_type = (image_type >> 16) & 0xffff;
+ test = (image_type & 0xffff);
+
+ /* Make sure everything makes sense */
+
+ /* First check the machine type is correct for
+ the requested image */
+ if (((perf_processor_interface == CUDA_INTF) &&
+ (interface_type != CUDA_INTF)) ||
+ ((perf_processor_interface == ONYX_INTF) &&
+ (interface_type != ONYX_INTF)))
+ return -EINVAL;
+
+ /* Next check to make sure the requested image
+ is valid */
+ if (((interface_type == CUDA_INTF) &&
+ (test >= MAX_CUDA_IMAGES)) ||
+ ((interface_type == ONYX_INTF) &&
+ (test >= MAX_ONYX_IMAGES)))
+ return -EINVAL;
+
+ /* Copy the image into the processor */
+ if (interface_type == CUDA_INTF)
+ return perf_config(cuda_images[test]);
+ else
+ return perf_config(onyx_images[test]);
+
+ return count;
+}
+
+/*
+ * Patch the images that need to know the IVA addresses.
+ */
+static void perf_patch_images(void)
+{
+#if 0 /* FIXME!! */
+/*
+ * NOTE: this routine is VERY specific to the current TLB image.
+ * If the image is changed, this routine might also need to be changed.
+ */
+ extern void $i_itlb_miss_2_0();
+ extern void $i_dtlb_miss_2_0();
+ extern void PA2_0_iva();
+
+ /*
+ * We can only use the lower 32-bits, the upper 32-bits should be 0
+ * anyway given this is in the kernel
+ */
+ uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
+ uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
+ uint32_t IVAaddress = (uint32_t)&PA2_0_iva;
+
+ if (perf_processor_interface == ONYX_INTF) {
+ /* clear last 2 bytes */
+ onyx_images[TLBMISS][15] &= 0xffffff00;
+ /* set 2 bytes */
+ onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
+ onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
+ onyx_images[TLBMISS][17] = itlb_addr;
+
+ /* clear last 2 bytes */
+ onyx_images[TLBHANDMISS][15] &= 0xffffff00;
+ /* set 2 bytes */
+ onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
+ onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
+ onyx_images[TLBHANDMISS][17] = itlb_addr;
+
+ /* clear last 2 bytes */
+ onyx_images[BIG_CPI][15] &= 0xffffff00;
+ /* set 2 bytes */
+ onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
+ onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
+ onyx_images[BIG_CPI][17] = itlb_addr;
+
+ onyx_images[PANIC][15] &= 0xffffff00; /* clear last 2 bytes */
+ onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24)); /* set 2 bytes */
+ onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00;
+
+
+ } else if (perf_processor_interface == CUDA_INTF) {
+ /* Cuda interface */
+ cuda_images[TLBMISS][16] =
+ (cuda_images[TLBMISS][16]&0xffff0000) |
+ ((dtlb_addr >> 8)&0x0000ffff);
+ cuda_images[TLBMISS][17] =
+ ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+ cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
+
+ cuda_images[TLBHANDMISS][16] =
+ (cuda_images[TLBHANDMISS][16]&0xffff0000) |
+ ((dtlb_addr >> 8)&0x0000ffff);
+ cuda_images[TLBHANDMISS][17] =
+ ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+ cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
+
+ cuda_images[BIG_CPI][16] =
+ (cuda_images[BIG_CPI][16]&0xffff0000) |
+ ((dtlb_addr >> 8)&0x0000ffff);
+ cuda_images[BIG_CPI][17] =
+ ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+ cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
+ } else {
+ /* Unknown type */
+ }
+#endif
+}
+
+
+/*
+ * ioctl routine
+ * All routines effect the processor that they are executed on. Thus you
+ * must be running on the processor that you wish to change.
+ */
+
+static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long error_start;
+ uint32_t raddr[4];
+
+ switch (cmd) {
+
+ case PA_PERF_ON:
+ /* Start the counters */
+ perf_start_counters();
+ return 0;
+
+ case PA_PERF_OFF:
+ error_start = perf_stop_counters(raddr);
+ if (error_start != 0) {
+ printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
+ return -EFAULT;
+ }
+
+ /* copy out the Counters */
+ if (copy_to_user((void __user *)arg, raddr,
+ sizeof (raddr)) != 0) {
+ return -EFAULT;
+ }
+ return 0;
+
+ case PA_PERF_VERSION:
+ /* Return the version # */
+ return put_user(PERF_VERSION, (int *)arg);
+
+ default:
+ break;
+ }
+ return -ENOTTY;
+}
+
+static struct file_operations perf_fops = {
+ .llseek = no_llseek,
+ .read = perf_read,
+ .write = perf_write,
+ .ioctl = perf_ioctl,
+ .open = perf_open,
+ .release = perf_release
+};
+
+static struct miscdevice perf_dev = {
+ MISC_DYNAMIC_MINOR,
+ PA_PERF_DEV,
+ &perf_fops
+};
+
+/*
+ * Initialize the module
+ */
+static int __init perf_init(void)
+{
+ int ret;
+
+ /* Determine correct processor interface to use */
+ bitmask_array = perf_bitmasks;
+
+ if (boot_cpu_data.cpu_type == pcxu ||
+ boot_cpu_data.cpu_type == pcxu_) {
+ perf_processor_interface = ONYX_INTF;
+ } else if (boot_cpu_data.cpu_type == pcxw ||
+ boot_cpu_data.cpu_type == pcxw_ ||
+ boot_cpu_data.cpu_type == pcxw2 ||
+ boot_cpu_data.cpu_type == mako) {
+ perf_processor_interface = CUDA_INTF;
+ if (boot_cpu_data.cpu_type == pcxw2 ||
+ boot_cpu_data.cpu_type == mako)
+ bitmask_array = perf_bitmasks_piranha;
+ } else {
+ perf_processor_interface = UNKNOWN_INTF;
+ printk("Performance monitoring counters not supported on this processor\n");
+ return -ENODEV;
+ }
+
+ ret = misc_register(&perf_dev);
+ if (ret) {
+ printk(KERN_ERR "Performance monitoring counters: "
+ "cannot register misc device.\n");
+ return ret;
+ }
+
+ /* Patch the images to match the system */
+ perf_patch_images();
+
+ spin_lock_init(&perf_lock);
+
+ /* TODO: this only lets us access the first cpu.. what to do for SMP? */
+ cpu_device = cpu_data[0].dev;
+ printk("Performance monitoring counters enabled for %s\n",
+ cpu_data[0].dev->name);
+
+ return 0;
+}
+
+/*
+ * perf_start_counters(void)
+ *
+ * Start the counters.
+ */
+static void perf_start_counters(void)
+{
+ /* Enable performance monitor counters */
+ perf_intrigue_enable_perf_counters();
+}
+
+/*
+ * perf_stop_counters
+ *
+ * Stop the performance counters and save counts
+ * in a per_processor array.
+ */
+static int perf_stop_counters(uint32_t *raddr)
+{
+ uint64_t userbuf[MAX_RDR_WORDS];
+
+ /* Disable performance counters */
+ perf_intrigue_disable_perf_counters();
+
+ if (perf_processor_interface == ONYX_INTF) {
+ uint64_t tmp64;
+ /*
+ * Read the counters
+ */
+ if (!perf_rdr_read_ubuf(16, userbuf))
+ return -13;
+
+ /* Counter0 is bits 1398 thru 1429 */
+ tmp64 = (userbuf[21] << 22) & 0x00000000ffc00000;
+ tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff;
+ /* OR sticky0 (bit 1430) to counter0 bit 32 */
+ tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000;
+ raddr[0] = (uint32_t)tmp64;
+
+ /* Counter1 is bits 1431 thru 1462 */
+ tmp64 = (userbuf[22] >> 9) & 0x00000000ffffffff;
+ /* OR sticky1 (bit 1463) to counter1 bit 32 */
+ tmp64 |= (userbuf[22] << 23) & 0x0000000080000000;
+ raddr[1] = (uint32_t)tmp64;
+
+ /* Counter2 is bits 1464 thru 1495 */
+ tmp64 = (userbuf[22] << 24) & 0x00000000ff000000;
+ tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff;
+ /* OR sticky2 (bit 1496) to counter2 bit 32 */
+ tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
+ raddr[2] = (uint32_t)tmp64;
+
+ /* Counter3 is bits 1497 thru 1528 */
+ tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
+ /* OR sticky3 (bit 1529) to counter3 bit 32 */
+ tmp64 |= (userbuf[23] << 25) & 0x0000000080000000;
+ raddr[3] = (uint32_t)tmp64;
+
+ /*
+ * Zero out the counters
+ */
+
+ /*
+ * The counters and sticky-bits comprise the last 132 bits
+ * (1398 - 1529) of RDR16 on a U chip. We'll zero these
+ * out the easy way: zero out last 10 bits of dword 21,
+ * all of dword 22 and 58 bits (plus 6 don't care bits) of
+ * dword 23.
+ */
+ userbuf[21] &= 0xfffffffffffffc00ul; /* 0 to last 10 bits */
+ userbuf[22] = 0;
+ userbuf[23] = 0;
+
+ /*
+ * Write back the zero'ed bytes + the image given
+ * the read was destructive.
+ */
+ perf_rdr_write(16, userbuf);
+ } else {
+
+ /*
+ * Read RDR-15 which contains the counters and sticky bits
+ */
+ if (!perf_rdr_read_ubuf(15, userbuf)) {
+ return -13;
+ }
+
+ /*
+ * Clear out the counters
+ */
+ perf_rdr_clear(15);
+
+ /*
+ * Copy the counters
+ */
+ raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL);
+ raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL);
+ raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
+ raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
+ }
+
+ return 0;
+}
+
+/*
+ * perf_rdr_get_entry
+ *
+ * Retrieve a pointer to the description of what this
+ * RDR contains.
+ */
+static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
+{
+ if (perf_processor_interface == ONYX_INTF) {
+ return &perf_rdr_tbl_U[rdr_num];
+ } else {
+ return &perf_rdr_tbl_W[rdr_num];
+ }
+}
+
+/*
+ * perf_rdr_read_ubuf
+ *
+ * Read the RDR value into the buffer specified.
+ */
+static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
+{
+ uint64_t data, data_mask = 0;
+ uint32_t width, xbits, i;
+ struct rdr_tbl_ent *tentry;
+
+ tentry = perf_rdr_get_entry(rdr_num);
+ if ((width = tentry->width) == 0)
+ return 0;
+
+ /* Clear out buffer */
+ i = tentry->num_words;
+ while (i--) {
+ buffer[i] = 0;
+ }
+
+ /* Check for bits an even number of 64 */
+ if ((xbits = width & 0x03f) != 0) {
+ data_mask = 1;
+ data_mask <<= (64 - xbits);
+ data_mask--;
+ }
+
+ /* Grab all of the data */
+ i = tentry->num_words;
+ while (i--) {
+
+ if (perf_processor_interface == ONYX_INTF) {
+ data = perf_rdr_shift_in_U(rdr_num, width);
+ } else {
+ data = perf_rdr_shift_in_W(rdr_num, width);
+ }
+ if (xbits) {
+ buffer[i] |= (data << (64 - xbits));
+ if (i) {
+ buffer[i-1] |= ((data >> xbits) & data_mask);
+ }
+ } else {
+ buffer[i] = data;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * perf_rdr_clear
+ *
+ * Zero out the given RDR register
+ */
+static int perf_rdr_clear(uint32_t rdr_num)
+{
+ struct rdr_tbl_ent *tentry;
+ int32_t i;
+
+ tentry = perf_rdr_get_entry(rdr_num);
+
+ if (tentry->width == 0) {
+ return -1;
+ }
+
+ i = tentry->num_words;
+ while (i--) {
+ if (perf_processor_interface == ONYX_INTF) {
+ perf_rdr_shift_out_U(rdr_num, 0UL);
+ } else {
+ perf_rdr_shift_out_W(rdr_num, 0UL);
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * perf_write_image
+ *
+ * Write the given image out to the processor
+ */
+static int perf_write_image(uint64_t *memaddr)
+{
+ uint64_t buffer[MAX_RDR_WORDS];
+ uint64_t *bptr;
+ uint32_t dwords;
+ uint32_t *intrigue_rdr;
+ uint64_t *intrigue_bitmask, tmp64, proc_hpa;
+ struct rdr_tbl_ent *tentry;
+ int i;
+
+ /* Clear out counters */
+ if (perf_processor_interface == ONYX_INTF) {
+
+ perf_rdr_clear(16);
+
+ /* Toggle performance monitor */
+ perf_intrigue_enable_perf_counters();
+ perf_intrigue_disable_perf_counters();
+
+ intrigue_rdr = perf_rdrs_U;
+ } else {
+ perf_rdr_clear(15);
+ intrigue_rdr = perf_rdrs_W;
+ }
+
+ /* Write all RDRs */
+ while (*intrigue_rdr != -1) {
+ tentry = perf_rdr_get_entry(*intrigue_rdr);
+ perf_rdr_read_ubuf(*intrigue_rdr, buffer);
+ bptr = &buffer[0];
+ dwords = tentry->num_words;
+ if (tentry->write_control) {
+ intrigue_bitmask = &bitmask_array[tentry->write_control >> 3];
+ while (dwords--) {
+ tmp64 = *intrigue_bitmask & *memaddr++;
+ tmp64 |= (~(*intrigue_bitmask++)) & *bptr;
+ *bptr++ = tmp64;
+ }
+ } else {
+ while (dwords--) {
+ *bptr++ = *memaddr++;
+ }
+ }
+
+ perf_rdr_write(*intrigue_rdr, buffer);
+ intrigue_rdr++;
+ }
+
+ /*
+ * Now copy out the Runway stuff which is not in RDRs
+ */
+
+ if (cpu_device == NULL)
+ {
+ printk(KERN_ERR "write_image: cpu_device not yet initialized!\n");
+ return -1;
+ }
+
+ proc_hpa = cpu_device->hpa;
+
+ /* Merge intrigue bits into Runway STATUS 0 */
+ tmp64 = __raw_readq(proc_hpa + RUNWAY_STATUS) & 0xffecfffffffffffful;
+ __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), proc_hpa + RUNWAY_STATUS);
+
+ /* Write RUNWAY DEBUG registers */
+ for (i = 0; i < 8; i++) {
+ __raw_writeq(*memaddr++, proc_hpa + RUNWAY_DEBUG + i);
+ }
+
+ return 0;
+}
+
+/*
+ * perf_rdr_write
+ *
+ * Write the given RDR register with the contents
+ * of the given buffer.
+ */
+static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
+{
+ struct rdr_tbl_ent *tentry;
+ int32_t i;
+
+printk("perf_rdr_write\n");
+ tentry = perf_rdr_get_entry(rdr_num);
+ if (tentry->width == 0) { return; }
+
+ i = tentry->num_words;
+ while (i--) {
+ if (perf_processor_interface == ONYX_INTF) {
+ perf_rdr_shift_out_U(rdr_num, buffer[i]);
+ } else {
+ perf_rdr_shift_out_W(rdr_num, buffer[i]);
+ }
+ }
+printk("perf_rdr_write done\n");
+}
+
+module_init(perf_init);
diff --git a/arch/parisc/kernel/perf_asm.S b/arch/parisc/kernel/perf_asm.S
new file mode 100644
index 000000000000..adb3c6444910
--- /dev/null
+++ b/arch/parisc/kernel/perf_asm.S
@@ -0,0 +1,1691 @@
+
+/* low-level asm for "intrigue" (PA8500-8700 CPU perf counters)
+ *
+ * Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org>
+ * Copyright (C) 2001 Hewlett-Packard (Grant Grundler)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <asm/assembly.h>
+
+#ifdef CONFIG_64BIT
+ .level 2.0w
+#endif /* CONFIG_64BIT */
+
+#define MTDIAG_1(gr) .word 0x14201840 + gr*0x10000
+#define MTDIAG_2(gr) .word 0x14401840 + gr*0x10000
+#define MFDIAG_1(gr) .word 0x142008A0 + gr
+#define MFDIAG_2(gr) .word 0x144008A0 + gr
+#define STDIAG(dr) .word 0x14000AA0 + dr*0x200000
+#define SFDIAG(dr) .word 0x14000BA0 + dr*0x200000
+#define DR2_SLOW_RET 53
+
+
+;
+; Enable the performance counters
+;
+; The coprocessor only needs to be enabled when
+; starting/stopping the coprocessor with the pmenb/pmdis.
+;
+ .text
+ .align 32
+
+ .export perf_intrigue_enable_perf_counters,code
+perf_intrigue_enable_perf_counters:
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+
+ ldi 0x20,%r25 ; load up perfmon bit
+ mfctl ccr,%r26 ; get coprocessor register
+ or %r25,%r26,%r26 ; set bit
+ mtctl %r26,ccr ; turn on performance coprocessor
+ pmenb ; enable performance monitor
+ ssm 0,0 ; dummy op to ensure completion
+ sync ; follow ERS
+ andcm %r26,%r25,%r26 ; clear bit now
+ mtctl %r26,ccr ; turn off performance coprocessor
+ nop ; NOPs as specified in ERS
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ bve (%r2)
+ nop
+ .exit
+ .procend
+
+ .export perf_intrigue_disable_perf_counters,code
+perf_intrigue_disable_perf_counters:
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+ ldi 0x20,%r25 ; load up perfmon bit
+ mfctl ccr,%r26 ; get coprocessor register
+ or %r25,%r26,%r26 ; set bit
+ mtctl %r26,ccr ; turn on performance coprocessor
+ pmdis ; disable performance monitor
+ ssm 0,0 ; dummy op to ensure completion
+ andcm %r26,%r25,%r26 ; clear bit now
+ bve (%r2)
+ mtctl %r26,ccr ; turn off performance coprocessor
+ .exit
+ .procend
+
+;***********************************************************************
+;*
+;* Name: perf_rdr_shift_in_W
+;*
+;* Description:
+;* This routine shifts data in from the RDR in arg0 and returns
+;* the result in ret0. If the RDR is <= 64 bits in length, it
+;* is shifted shifted backup immediately. This is to compensate
+;* for RDR10 which has bits that preclude PDC stack operations
+;* when they are in the wrong state.
+;*
+;* Arguments:
+;* arg0 : rdr to be read
+;* arg1 : bit length of rdr
+;*
+;* Returns:
+;* ret0 = next 64 bits of rdr data from staging register
+;*
+;* Register usage:
+;* arg0 : rdr to be read
+;* arg1 : bit length of rdr
+;* %r24 - original DR2 value
+;* %r1 - scratch
+;* %r29 - scratch
+;*
+;* Returns:
+;* ret0 = RDR data (right justified)
+;*
+;***********************************************************************
+
+ .export perf_rdr_shift_in_W,code
+perf_rdr_shift_in_W:
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+;
+; read(shift in) the RDR.
+;
+
+; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, remote diagnose registers.
+;
+
+ depdi,z 1,DR2_SLOW_RET,1,%r29
+ MFDIAG_2 (24)
+ or %r24,%r29,%r29
+ MTDIAG_2 (29) ; set DR2_SLOW_RET
+
+ nop
+ nop
+ nop
+ nop
+
+;
+; Cacheline start (32-byte cacheline)
+;
+ nop
+ nop
+ nop
+ extrd,u arg1,63,6,%r1 ; setup shift amount by bits to move
+
+ mtsar %r1
+ shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
+ blr %r1,%r0 ; branch to 8-instruction sequence
+ nop
+
+;
+; Cacheline start (32-byte cacheline)
+;
+
+ ;
+ ; RDR 0 sequence
+ ;
+ SFDIAG (0)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1) ; mtdiag %dr1, %r1
+ STDIAG (0)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 1 sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (1)
+ ssm 0,0
+ MFDIAG_1 (28)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+ nop
+
+ ;
+ ; RDR 2 read sequence
+ ;
+ SFDIAG (2)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (2)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 3 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 4 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (4)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 5 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (5)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 6 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (6)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 7 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 8 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 9 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 10 read sequence
+ ;
+ SFDIAG (10)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (10)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 11 read sequence
+ ;
+ SFDIAG (11)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (11)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 12 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 13 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (13)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 14 read sequence
+ ;
+ SFDIAG (14)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (14)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 15 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (15)
+ ssm 0,0
+ MFDIAG_1 (28)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+ nop
+
+ ;
+ ; RDR 16 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (16)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 17 read sequence
+ ;
+ SFDIAG (17)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (17)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 18 read sequence
+ ;
+ SFDIAG (18)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (18)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 19 read sequence
+ ;
+ b,n perf_rdr_shift_in_W_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ ;
+ ; RDR 20 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (20)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 21 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (21)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 22 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (22)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 23 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (23)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 24 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (24)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 25 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (25)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 26 read sequence
+ ;
+ SFDIAG (26)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (26)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 27 read sequence
+ ;
+ SFDIAG (27)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (27)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 28 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (28)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 29 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (29)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 30 read sequence
+ ;
+ SFDIAG (30)
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (30)
+ ssm 0,0
+ b,n perf_rdr_shift_in_W_leave
+
+ ;
+ ; RDR 31 read sequence
+ ;
+ sync
+ ssm 0,0
+ SFDIAG (31)
+ ssm 0,0
+ MFDIAG_1 (28)
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; Fallthrough
+ ;
+
+perf_rdr_shift_in_W_leave:
+ bve (%r2)
+ .exit
+ MTDIAG_2 (24) ; restore DR2
+ .procend
+
+
+;***********************************************************************
+;*
+;* Name: perf_rdr_shift_out_W
+;*
+;* Description:
+;* This routine moves data to the RDR's. The double-word that
+;* arg1 points to is loaded and moved into the staging register.
+;* Then the STDIAG instruction for the RDR # in arg0 is called
+;* to move the data to the RDR.
+;*
+;* Arguments:
+;* arg0 = rdr number
+;* arg1 = 64-bit value to write
+;* %r24 - DR2 | DR2_SLOW_RET
+;* %r23 - original DR2 value
+;*
+;* Returns:
+;* None
+;*
+;* Register usage:
+;*
+;***********************************************************************
+
+ .export perf_rdr_shift_out_W,code
+perf_rdr_shift_out_W:
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+;
+; NOTE: The PCX-W ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, the remote diagnose registers.
+;
+
+ depdi,z 1,DR2_SLOW_RET,1,%r24
+ MFDIAG_2 (23)
+ or %r24,%r23,%r24
+ MTDIAG_2 (24) ; set DR2_SLOW_RET
+ MTDIAG_1 (25) ; data to the staging register
+ shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
+ blr %r1,%r0 ; branch to 8-instruction sequence
+ nop
+
+ ;
+ ; RDR 0 write sequence
+ ;
+ sync ; RDR 0 write sequence
+ ssm 0,0
+ STDIAG (0)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 1 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (1)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 2 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (2)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 3 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (3)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 4 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (4)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 5 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (5)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 6 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (6)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 7 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (7)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 8 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (8)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 9 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (9)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 10 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (10)
+ STDIAG (26)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 11 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (11)
+ STDIAG (27)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 12 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (12)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 13 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (13)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 14 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (14)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 15 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (15)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 16 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (16)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 17 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (17)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 18 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (18)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 19 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (19)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 20 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (20)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 21 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (21)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 22 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (22)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 23 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (23)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 24 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (24)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 25 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (25)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 26 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (10)
+ STDIAG (26)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 27 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (11)
+ STDIAG (27)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 28 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (28)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 29 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (29)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 30 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (30)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+ ;
+ ; RDR 31 write sequence
+ ;
+ sync
+ ssm 0,0
+ STDIAG (31)
+ ssm 0,0
+ b,n perf_rdr_shift_out_W_leave
+ nop
+ ssm 0,0
+ nop
+
+perf_rdr_shift_out_W_leave:
+ bve (%r2)
+ .exit
+ MTDIAG_2 (23) ; restore DR2
+ .procend
+
+
+;***********************************************************************
+;*
+;* Name: rdr_shift_in_U
+;*
+;* Description:
+;* This routine shifts data in from the RDR in arg0 and returns
+;* the result in ret0. If the RDR is <= 64 bits in length, it
+;* is shifted shifted backup immediately. This is to compensate
+;* for RDR10 which has bits that preclude PDC stack operations
+;* when they are in the wrong state.
+;*
+;* Arguments:
+;* arg0 : rdr to be read
+;* arg1 : bit length of rdr
+;*
+;* Returns:
+;* ret0 = next 64 bits of rdr data from staging register
+;*
+;* Register usage:
+;* arg0 : rdr to be read
+;* arg1 : bit length of rdr
+;* %r24 - original DR2 value
+;* %r23 - DR2 | DR2_SLOW_RET
+;* %r1 - scratch
+;*
+;***********************************************************************
+
+ .export perf_rdr_shift_in_U,code
+perf_rdr_shift_in_U:
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+
+; read(shift in) the RDR.
+;
+; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, remote diagnose registers.
+
+ depdi,z 1,DR2_SLOW_RET,1,%r29
+ MFDIAG_2 (24)
+ or %r24,%r29,%r29
+ MTDIAG_2 (29) ; set DR2_SLOW_RET
+
+ nop
+ nop
+ nop
+ nop
+
+;
+; Start of next 32-byte cacheline
+;
+ nop
+ nop
+ nop
+ extrd,u arg1,63,6,%r1
+
+ mtsar %r1
+ shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
+ blr %r1,%r0 ; branch to 8-instruction sequence
+ nop
+
+;
+; Start of next 32-byte cacheline
+;
+ SFDIAG (0) ; RDR 0 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (0)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (1) ; RDR 1 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (1)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ sync ; RDR 2 read sequence
+ ssm 0,0
+ SFDIAG (4)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 3 read sequence
+ ssm 0,0
+ SFDIAG (3)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 4 read sequence
+ ssm 0,0
+ SFDIAG (4)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 5 read sequence
+ ssm 0,0
+ SFDIAG (5)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 6 read sequence
+ ssm 0,0
+ SFDIAG (6)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 7 read sequence
+ ssm 0,0
+ SFDIAG (7)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ b,n perf_rdr_shift_in_U_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ SFDIAG (9) ; RDR 9 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (9)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (10) ; RDR 10 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (10)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (11) ; RDR 11 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (11)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (12) ; RDR 12 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (12)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (13) ; RDR 13 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (13)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (14) ; RDR 14 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (14)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (15) ; RDR 15 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (15)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ sync ; RDR 16 read sequence
+ ssm 0,0
+ SFDIAG (16)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ SFDIAG (17) ; RDR 17 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (17)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (18) ; RDR 18 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (18)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ b,n perf_rdr_shift_in_U_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ sync ; RDR 20 read sequence
+ ssm 0,0
+ SFDIAG (20)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 21 read sequence
+ ssm 0,0
+ SFDIAG (21)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 22 read sequence
+ ssm 0,0
+ SFDIAG (22)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 23 read sequence
+ ssm 0,0
+ SFDIAG (23)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 24 read sequence
+ ssm 0,0
+ SFDIAG (24)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ sync ; RDR 25 read sequence
+ ssm 0,0
+ SFDIAG (25)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ SFDIAG (26) ; RDR 26 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (26)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (27) ; RDR 27 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (27)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ sync ; RDR 28 read sequence
+ ssm 0,0
+ SFDIAG (28)
+ ssm 0,0
+ MFDIAG_1 (28)
+ b,n perf_rdr_shift_in_U_leave
+ ssm 0,0
+ nop
+
+ b,n perf_rdr_shift_in_U_leave
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ SFDIAG (30) ; RDR 30 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (30)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+
+ SFDIAG (31) ; RDR 31 read sequence
+ ssm 0,0
+ MFDIAG_1 (28)
+ shrpd ret0,%r0,%sar,%r1
+ MTDIAG_1 (1)
+ STDIAG (31)
+ ssm 0,0
+ b,n perf_rdr_shift_in_U_leave
+ nop
+
+perf_rdr_shift_in_U_leave:
+ bve (%r2)
+ .exit
+ MTDIAG_2 (24) ; restore DR2
+ .procend
+
+;***********************************************************************
+;*
+;* Name: rdr_shift_out_U
+;*
+;* Description:
+;* This routine moves data to the RDR's. The double-word that
+;* arg1 points to is loaded and moved into the staging register.
+;* Then the STDIAG instruction for the RDR # in arg0 is called
+;* to move the data to the RDR.
+;*
+;* Arguments:
+;* arg0 = rdr target
+;* arg1 = buffer pointer
+;*
+;* Returns:
+;* None
+;*
+;* Register usage:
+;* arg0 = rdr target
+;* arg1 = buffer pointer
+;* %r24 - DR2 | DR2_SLOW_RET
+;* %r23 - original DR2 value
+;*
+;***********************************************************************
+
+ .export perf_rdr_shift_out_U,code
+perf_rdr_shift_out_U:
+ .proc
+ .callinfo frame=0,NO_CALLS
+ .entry
+
+;
+; NOTE: The PCX-U ERS states that DR2_SLOW_RET must be set before any
+; shifting is done, from or to, the remote diagnose registers.
+;
+
+ depdi,z 1,DR2_SLOW_RET,1,%r24
+ MFDIAG_2 (23)
+ or %r24,%r23,%r24
+ MTDIAG_2 (24) ; set DR2_SLOW_RET
+
+ MTDIAG_1 (25) ; data to the staging register
+ shladd arg0,2,%r0,%r1 ; %r1 = 4 * RDR number
+ blr %r1,%r0 ; branch to 8-instruction sequence
+ nop
+
+;
+; 32-byte cachline aligned
+;
+
+ sync ; RDR 0 write sequence
+ ssm 0,0
+ STDIAG (0)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 1 write sequence
+ ssm 0,0
+ STDIAG (1)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 2 write sequence
+ ssm 0,0
+ STDIAG (2)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 3 write sequence
+ ssm 0,0
+ STDIAG (3)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 4 write sequence
+ ssm 0,0
+ STDIAG (4)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 5 write sequence
+ ssm 0,0
+ STDIAG (5)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 6 write sequence
+ ssm 0,0
+ STDIAG (6)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 7 write sequence
+ ssm 0,0
+ STDIAG (7)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 8 write sequence
+ ssm 0,0
+ STDIAG (8)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 9 write sequence
+ ssm 0,0
+ STDIAG (9)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 10 write sequence
+ ssm 0,0
+ STDIAG (10)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 11 write sequence
+ ssm 0,0
+ STDIAG (11)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 12 write sequence
+ ssm 0,0
+ STDIAG (12)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 13 write sequence
+ ssm 0,0
+ STDIAG (13)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 14 write sequence
+ ssm 0,0
+ STDIAG (14)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 15 write sequence
+ ssm 0,0
+ STDIAG (15)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 16 write sequence
+ ssm 0,0
+ STDIAG (16)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 17 write sequence
+ ssm 0,0
+ STDIAG (17)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 18 write sequence
+ ssm 0,0
+ STDIAG (18)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 19 write sequence
+ ssm 0,0
+ STDIAG (19)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 20 write sequence
+ ssm 0,0
+ STDIAG (20)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 21 write sequence
+ ssm 0,0
+ STDIAG (21)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 22 write sequence
+ ssm 0,0
+ STDIAG (22)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 23 write sequence
+ ssm 0,0
+ STDIAG (23)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 24 write sequence
+ ssm 0,0
+ STDIAG (24)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 25 write sequence
+ ssm 0,0
+ STDIAG (25)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 26 write sequence
+ ssm 0,0
+ STDIAG (26)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 27 write sequence
+ ssm 0,0
+ STDIAG (27)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 28 write sequence
+ ssm 0,0
+ STDIAG (28)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 29 write sequence
+ ssm 0,0
+ STDIAG (29)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 30 write sequence
+ ssm 0,0
+ STDIAG (30)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+ sync ; RDR 31 write sequence
+ ssm 0,0
+ STDIAG (31)
+ ssm 0,0
+ b,n perf_rdr_shift_out_U_leave
+ nop
+ ssm 0,0
+ nop
+
+perf_rdr_shift_out_U_leave:
+ bve (%r2)
+ .exit
+ MTDIAG_2 (23) ; restore DR2
+ .procend
+
diff --git a/arch/parisc/kernel/perf_images.h b/arch/parisc/kernel/perf_images.h
new file mode 100644
index 000000000000..d9562fe3f75c
--- /dev/null
+++ b/arch/parisc/kernel/perf_images.h
@@ -0,0 +1,3138 @@
+/*
+ * Imagine for use with the Onyx (PCX-U) CPU interface
+ *
+ * Copyright (C) 2001 Randolph Chung <tausq at parisc-linux.org>
+ * Copyright (C) 2001 Hewlett-Packard (Grant Grundler)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef PERF_IMAGES_H
+#define PERF_IMAGES_H
+
+/* Magic numbers taken without modification from HPUX stuff */
+
+#define PCXU_IMAGE_SIZE 584
+
+static uint32_t onyx_images[][PCXU_IMAGE_SIZE/sizeof(uint32_t)] = {
+/*
+ * CPI:
+ *
+ * Counts the following:
+ *
+ * ctr0 : total cycles
+ * ctr1 : total cycles where nothing retired
+ * ctr2 : total instructions retired, including nullified
+ * ctr3 : total instructions retired, less nullified instructions
+ */
+ {
+ 0x4c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x004e0004, 0x07ffffff, 0xffc01380,
+ 0x0101ffff, 0xfffff104, 0xe000c07f, 0xfffffffc,
+ 0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+ 0x00000fff, 0xff00000f, 0xffff0000, 0x0fffff00,
+ 0x000fffff, 0x00000000, 0x00000000, 0x00ffffff,
+ 0xfffff000, 0x0000000f, 0xffffffff, 0xff000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+ 0xfff55fff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000030, 0x00003c00, 0x067f080c, 0x02019fc0,
+ 0x02804067, 0xf0009030, 0x19fc002c, 0x40067f08,
+ 0x0c12019f, 0xc0028440, 0x67f00091, 0x3019fc00,
+ 0x2fc007ff, 0xf800f001, 0xfffe003c, 0x007fff80,
+ 0x0f001fff, 0xe003c007, 0xfff800f0, 0x01fffe00,
+ 0x3c007fff, 0x800f001f, 0xffe003c0, 0x07fff800,
+ 0xf001fffe, 0x003c007f, 0xff800f00, 0x1fffe003,
+ 0xc007fff8, 0x00f001ff, 0xfe003c00, 0x7fff800f,
+ 0x001fffe0, 0x03c007ff, 0xf800f001, 0xfffe003c,
+ 0x007fff80, 0x0f001fff, 0xe003c007, 0xfff800f0,
+ 0x01fffe00, 0x3c007fff, 0x800f001f, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff},
+
+/* Bus utilization image (bus_util)
+ *
+ * ctr0 : counts address valid cycles
+ * ctr1 : counts data valid cycles
+ * ctr2 : counts overflow from counter 0
+ * ctr3 : counts overflow from counter 1
+ */
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0x0000000c, 0x00003c00, 0x07930000, 0x0041e4c0,
+ 0x01002079, 0x3000800c, 0x1e4c0030, 0x00279300,
+ 0x010049e4, 0xc0014022, 0x79300090, 0x0c9e4c00,
+ 0x34004793, 0x00020051, 0xe4c00180, 0x24793000,
+ 0xa00d1e4c, 0x00380067, 0x93000300, 0x59e4c001,
+ 0xc0267930, 0x00b00d9e, 0x4c003fff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffc00,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00100000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff },
+
+/*
+ * TLB counts (same as tlbStats image):
+ *
+ * Counts the following:
+ *
+ * ctr0: DTLB misses
+ * ctr1: ITLB misses
+ * ctr2: total cycles in the miss handlers
+ * ctr3: total cycles
+ */
+
+ {
+ 0x0c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe7e7e0e0, 0x004e0004, 0x07ffffff, 0xffc01380,
+ 0x0101ffff, 0xfffff104, 0xe000c06a, 0xafffc85c,
+ 0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+ 0x01b9e000, 0x0001b8c0, 0x00000000, 0x0fffff00,
+ 0x000fffff, 0x00000000, 0x00000000, 0x00400000,
+ 0x00001000, 0x00000004, 0x00000000, 0x01000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+ 0xfff55ff5, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000000, 0x00003c00, 0x01ff0001, 0x08007fc2,
+ 0x02c1001f, 0xf0807100, 0x1bfc200c, 0x4806ff00,
+ 0x03f001ff, 0xfe003c00, 0x7fff800f, 0x001fffe0,
+ 0x03c007ff, 0xf800f001, 0xfffe003c, 0x007fff80,
+ 0x0f001fff, 0xe003c007, 0xfff800f0, 0x01fffe00,
+ 0x3c007fff, 0x800f001f, 0xffe003c0, 0x07fff800,
+ 0xf001fffe, 0x003c007f, 0xff800f00, 0x1fffe003,
+ 0xc007fff8, 0x00f001ff, 0xfe003c00, 0x7fff800f,
+ 0x001fffe0, 0x03c007ff, 0xf800f001, 0xfffe003c,
+ 0x007fff80, 0x0f001fff, 0xe003c007, 0xfff800f0,
+ 0x01fffe00, 0x3c007fff, 0x800f001f, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff },
+
+/* tlbHandMiss
+ *
+ * ctr0: counts TLB misses
+ * ctr1: counts dmisses inside tlb miss handlers
+ * ctr2: counts cycles in the tlb miss handlers
+ * ctr3: counts overflows of ctr2
+ */
+{
+0x1c00c000,00000000,0x00060000,00000000,
+0xe7e7e0e0,0x004e0004,0x07ffffff,0xffc01380,
+0x0101ffff,0xfffff104,0xe000c06a,0xafffc85c,
+0x01380010,0x1fffffff,0xff000000,00000000,
+0x01b9e000,0x0001b8c0,00000000,0x0fffff00,
+0x000fffff,00000000,00000000,0x00400000,
+0x00001000,0x00000004,00000000,0x01000000,
+0x0000ffff,0xfffffff0,00000000,0x0fffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff55ff5,0xffffffff,0xffffffff,0xf0000000,
+0xf0000000,0x00003c00,0x01fd0000,0x08007f42,
+0x0281001f,0xd080a100,0x19f42008,0x44067d08,
+0x0612019f,0x400084c0,0x67d00060,0x0047f400,
+0x042011fd,0x080b0404,0x7f4202c4,0x0167d080,
+0x311059f4,0x201c4816,0x7d000313,0x059f4001,
+0xfc007fff,0x800f001f,0xffe003c0,0x07fff800,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x6fff0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* branch_taken image (ptkn image)
+ *
+ * ctr0: overflow for ctr1
+ * ctr1: predicted taken branches, actually taken
+ * ctr2: all predicted taken branches (nullfied or not)
+ * ctr3: overflow for ctr2
+ */
+
+ {
+ 0xcc01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xa08080a0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000000, 0x00003c00, 0x04f90000, 0x02013e40,
+ 0x0081004f, 0x90004060, 0x13e40018, 0x0024f900,
+ 0x0802093e, 0x40028102, 0x4f9000c0, 0x6093e400,
+ 0x380014f9, 0x00010205, 0x3e4000c1, 0x014f9000,
+ 0x506053e4, 0x001c0034, 0xf9000902, 0x0d3e4002,
+ 0xc1034f90, 0x00d060d3, 0xe4003fff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffc00,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff },
+
+/* branch_nottaken (pntkn image)
+ *
+ * ctr0: overflow for ctr1
+ * ctr1: counts branches predicted not-taken, but actually taken
+ * ctr2: counts all predictable branches predicted not-taken
+ * ctr3: overflow for ctr2
+ */
+{
+0xcc01e000,00000000,0x00060000,00000000,
+0xc0c0c0e0,0xffb1fffb,0xfff7ffff,0xffffffff,
+0xffffffff,0xfffffffb,0x1fffbfff,0x7fffffff,
+0xfcc7ffff,0xffdffffa,0x5f000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+
+/* imiss image
+ *
+ * ctr0 : counts imiss aligned on 0
+ * ctr1 : counts imiss aligned on 4
+ * ctr2 : counts imiss aligned on 8
+ * ctr3 : counts imiss aligned on C
+ */
+ {
+ 0x0c00c000, 0x00000000, 0x00010000, 0x00000000,
+ 0xe7ebedee, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+ 0xfff55fff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000000, 0x00003c00, 0x007f0000, 0x01001fc0,
+ 0x00408007, 0xf0002030, 0x01fc000c, 0x10007f00,
+ 0x0405001f, 0xc0014180, 0x07f00060, 0x7001fc00,
+ 0x1c20007f, 0x00080900, 0x1fc00242, 0x8007f000,
+ 0xa0b001fc, 0x002c3000, 0x7f000c0d, 0x001fc003,
+ 0x438007f0, 0x00e0f001, 0xfc003fff, 0xfffff800,
+ 0xfffffffe, 0x003fffff, 0xff800fff, 0xffffe003,
+ 0xfffffff8, 0x00ffffff, 0xfe003fff, 0xffff800f,
+ 0xffffffe0, 0x03ffffff, 0xf800ffff, 0xfffe003f,
+ 0xffffff80, 0x0fffffff, 0xe003ffff, 0xfff800ff,
+ 0xfffffe00, 0x3fffffff, 0x800fffff, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff},
+
+/* dmiss image
+ *
+ * ctr0 : counts cycles
+ * ctr1 : counts cycles where something retired
+ * ctr2 : counts dmisses
+ * ctr3 : (same as ctr2)
+ */
+ {
+ 0x3c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x6fffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000000, 0x00003c04, 0x007f0009, 0x02001fc0,
+ 0x0280c007, 0xf000b040, 0x01fc0030, 0x14007f00,
+ 0x0d06001f, 0xc00381c0, 0x07f000f0, 0x8001fc00,
+ 0x2024007f, 0x00090a00, 0x1fc00282, 0xc007f000,
+ 0xb0c001fc, 0x00303400, 0x7f000d0e, 0x001fc003,
+ 0x83c007f0, 0x00f00001, 0xfc0023ff, 0xfffff800,
+ 0xfffffffe, 0x003fffff, 0xff800fff, 0xffffe003,
+ 0xfffffff8, 0x00ffffff, 0xfe003fff, 0xffff800f,
+ 0xffffffe0, 0x03ffffff, 0xf800ffff, 0xfffe003f,
+ 0xffffff80, 0x0fffffff, 0xe003ffff, 0xfff800ff,
+ 0xfffffe00, 0x3fffffff, 0x800fffff, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x6fff0000, 0x00000000, 0x60000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff },
+
+/* dcmiss
+ *
+ * ctr0: counts store instructions retired
+ * ctr1: counts load instructions retired
+ * ctr2: counts dmisses
+ * ctr3: counts READ_SHARED_OR_PRIV and READ_PRIVATE transactions on Runway
+ */
+{
+0x2c90c000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf00000e8,0x00003c02,0x00bf0001,0x02002fc0,
+0x0080a00b,0xf0003040,0x02fc0010,0x1200bf00,
+0x0506002f,0xc00181a0,0x0bf00070,0x8002fc00,
+0x202200bf,0x00090a00,0x2fc00282,0xa00bf000,
+0xb0c002fc,0x00303200,0xbf000d0e,0x002fc003,
+0x83a00bf0,0x00ffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0x6fff0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0x55555555,0xd5555555,
+0x55555555,0x75555555,0x5e1ffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf8000000,00000000,
+00000000,00000000,0xf4000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* big_cpi
+ *
+ * ctr0: counts total cycles
+ * ctr1: counts overflows of ctr0 (for greater than 32-bit values)
+ * ctr2: counts overflows of ctr3 (for greater than 32-bit values)
+ * ctr3: counts unnullified instructions retired
+ */
+{
+0x0c00c000,00000000,0x00060000,00000000,
+0xe7e7e0e0,0x004e0004,0x07ffffff,0xffc01380,
+0x0101ffff,0xfffff104,0xe000c06a,0xafffc85c,
+0x01380010,0x1fffffff,0xff000000,00000000,
+0x01b9e000,0x0001b8c0,00000000,0x0fffff00,
+0x000fffff,00000000,00000000,0x00400000,
+0x00001000,0x00000004,00000000,0x01000000,
+0x0000ffff,0xfffffff0,00000000,0x0fffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff55ff5,0xffffffff,0xffffffff,0xf0000000,
+0xf0000010,0x00003c00,0x01760008,0x00025d80,
+0x02800417,0x6000c001,0x25d80038,0x04017600,
+0x0901025d,0x8002c044,0x176000d0,0x1125d800,
+0x3c2001f6,0x08080400,0x7d820203,0x001f6080,
+0x804027d8,0x20282009,0xf6080a0c,0x027d8202,
+0x81041f60,0x80c08107,0xd8203030,0x41f6080c,
+0x04127d82,0x0382049f,0x6080e0c1,0x27d82038,
+0x4006f608,0x081011bd,0x82030400,0xef6080a1,
+0x013bd820,0x384806f6,0x00081211,0xbd800304,
+0x80ef6000,0xa1213bd8,0x003bc007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x6fff0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* big_ls
+ *
+ * ctr0:counts the total number of cycles for which local_stall_A1 is asserted.
+ * ctr1: is the overflow for counter 0.
+ * ctr2: counts IFLUSH_AV
+ * ctr3: is the overflow for counter 2.
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00029408,0x02f50002,0x0800bd40,
+0x0202802f,0x5000a000,0x4bd40004,0x0812f500,
+0x030804bd,0x40024281,0x2f5000b0,0x010bd400,
+0x100842f5,0x00060810,0xbd400302,0x842f5000,
+0xe0014bd4,0x00140852,0xf5000708,0x14bd4003,
+0x42852f50,0x00ff001f,0xffe003c0,0x07fff800,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x0df70000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* br_abort
+ *
+ * ctr0: counts BRAD_STALLH
+ * ctr1: counts ONE_QUAD
+ * ctr2: counts BR0_ABRT
+ * ctr3: counts BR1_ABRT
+ */
+{
+0x0c002000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x1fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x1a250000,00000000,0x10000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* isnt
+ *
+ * ctr0: counts the total number of cycles for which iside_notrans is asserted
+ * ctr1: counts the number of times iside_notrans is asserted for 1-4 cycles
+ * ctr2: counts the number of times iside_notrans is asserted for 5-7 cycles
+ * ctr3: counts the number of times iside_notrans is asserted for > 7 cycles
+ */
+{
+0x0c018000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xcfffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00021c20,0x03ff0808,0x1800ffc4,
+0x0204003f,0xf0004280,0x0ffc6020,0x8003ff00,
+0x043800ff,0xc8020c00,0x3ff00044,0x800ffca0,
+0x210003ff,0x00045800,0xffcc0214,0x003ff000,
+0x26800ffc,0xe0218003,0xff000278,0x00ffd002,
+0x1c003ff0,0x0028800f,0xfd002200,0x03ff0001,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0xcdff0000,00000000,0xc0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* quadrant
+ *
+ * ctr0: Total number of instructions in quadrant 0
+ * ctr1: Total number of instructions in quadrant 1
+ * ctr2: Total number of instructions in quadrant 2
+ * ctr3: Total number of instructions in quadrant 3
+ * Works only with 32-bit
+ */
+
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x004e0004, 0x07ffffff, 0xffc01380,
+ 0x0101ffff, 0xfffff004, 0xe000407f, 0xfffffffc,
+ 0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+ 0x00000fff, 0xff00000f, 0xffff0000, 0x0fffff00,
+ 0x000fffff, 0x00000000, 0x00000000, 0x00ffffff,
+ 0xffcff000, 0x0000040f, 0xfffffffc, 0xff000000,
+ 0x0080ffff, 0xffffcff0, 0x0000000c, 0x0fffffff,
+ 0xfcff0000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xfff55ff5, 0x5fffffff, 0xffffffff, 0xf0000000,
+ 0xf00000f0, 0x00003c00, 0x007f0000, 0x01001fc0,
+ 0x00408007, 0xf0002030, 0x01fc000c, 0x10007f00,
+ 0x0405001f, 0xc0014180, 0x07f00060, 0x7001fc00,
+ 0x1c20007f, 0x00080900, 0x1fc00242, 0x8007f000,
+ 0xa0b001fc, 0x002c3000, 0x7f000c0d, 0x001fc003,
+ 0x438007f0, 0x00e0f001, 0xfc003fff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffc00,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff},
+
+/* rw_pdfet (READ_PRIV transactions)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles
+ * ctr2: is the overflow from counter 0
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf8000000,00000000,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff},
+
+/* rw_wdfet (WRITEBACKS)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles
+ * ctr2: is the overflow from counter 0
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0x98000000,00000000,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff},
+
+/* shlib_cpi
+ *
+ * ctr0: Total number of instructions in quad 0
+ * ctr1: Total number of CPU clock cycles in quad 0
+ * ctr2: total instructions without nullified
+ * ctr3: total number of CPU clock cycles
+ */
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x004e0004, 0x07ffffff, 0xffc01380,
+ 0x0101ffff, 0xfffff004, 0xe000407f, 0xfffffffc,
+ 0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+ 0x00000fff, 0xff00000f, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0x00000000, 0x00ffffff,
+ 0xffcff000, 0x0000000f, 0xfffffffc, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xfff77ff5, 0x7fffffff, 0xffffffff, 0xf0000000,
+ 0xf00000a0, 0x00003c00, 0x01ff0005, 0x08007fc0,
+ 0x03c1001f, 0xf08030c0, 0x07fc203c, 0x4001ff08,
+ 0x0118007f, 0xc003c500, 0x1ff08031, 0xc007fc00,
+ 0x3fffffff, 0xf800ffff, 0xfffe003f, 0xffffff80,
+ 0x0fffffff, 0xe003ffff, 0xfff800ff, 0xfffffe00,
+ 0x3fffffff, 0x800fffff, 0xffe003ff, 0xfffff800,
+ 0xfffffffe, 0x003fffff, 0xff800fff, 0xffffe003,
+ 0xfffffff8, 0x00ffffff, 0xfe003fff, 0xffff800f,
+ 0xffffffe0, 0x03ffffff, 0xf800ffff, 0xfffe003f,
+ 0xffffff80, 0x0fffffff, 0xe003ffff, 0xfff800ff,
+ 0xfffffe00, 0x3fffffff, 0x800fffff, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff},
+
+
+/* addr_inv_abort_alu
+ *
+ * ctr0: counts ABORT_ALU0L
+ * ctr1: counts ABORT_ALU1L
+ * ctr2: counts ADDR0_INVALID
+ * ctr3: counts ADDR1_INVALID
+ */
+
+{
+0x0c00c000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000d,0x01001fc0,
+0x03008007,0xf000f030,0x01fc0038,0x10007f00,
+0x0905001f,0xc0020180,0x07f000b0,0x7001fc00,
+0x2820007f,0x00050900,0x1fc00102,0x8007f000,
+0x70b001fc,0x00183000,0x7f00010d,0x001fc000,
+0x038007f0,0x0030f001,0xfc000bff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x65380000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+
+/* brad_stall
+ *
+ * ctr0: counts the total number of cycles for which brad_stall is asserted
+ * ctr1: counts the number of times brad_stall is asserted for 1-4 cycles
+ * ctr2: counts the number of times brad_stall is asserted for 5-7 cycles
+ * ctr3: counts the number of times brad_stall is asserted for > 7 cycles
+ */
+{
+0x0c002000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x1fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00021c20,0x03ff0808,0x1800ffc4,
+0x0204003f,0xf0004280,0x0ffc6020,0x8003ff00,
+0x043800ff,0xc8020c00,0x3ff00044,0x800ffca0,
+0x210003ff,0x00045800,0xffcc0214,0x003ff000,
+0x26800ffc,0xe0218003,0xff000278,0x00ffd002,
+0x1c003ff0,0x0028800f,0xfd002200,0x03ff0001,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x1bff0000,00000000,0x10000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* cntl_in_pipel
+ *
+ * ctr0: counts the total number of cycles for which cntl_in_pipel is asserted
+ * ctr1: counts the number of times cntl_in_pipel is asserted for 1-4 cycles
+ * ctr2: counts the number of times cntl_in_pipel is asserted for 5-7 cycles
+ * ctr3: counts the number of times cntl_in_pipel is asserted for > 7 cycles
+ */
+{
+0x0c006000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x3fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00021c00,0x03ff0808,0x1000ffc4,
+0x0206003f,0xf0004200,0x0ffc6020,0xa003ff00,
+0x043000ff,0xc8020e00,0x3ff00044,0x000ffca0,
+0x212003ff,0x00045000,0xffcc0216,0x003ff000,
+0x26000ffc,0xe021a003,0xff000270,0x00ffd002,
+0x1e003ff0,0x0028000f,0xfd002220,0x03ff0001,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x3fff0000,00000000,0x30000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+/* dsnt_xfh
+ *
+ * ctr0: counts dside_notrans
+ * ctr1: counts xfhang
+ * ctr2: is the overflow for ctr0
+ * ctr3: is the overflow for ctr1
+ */
+{
+0x0c018000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xcfffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x00030000,0x01f30000,0x00087cc0,
+0x0040041f,0x30002001,0x87cc000c,0x1001f300,
+0x0404087c,0xc0014104,0x1f300060,0x4187cc00,
+0x1c2001f3,0x00080808,0x7cc00242,0x041f3000,
+0xa08187cc,0x002c3001,0xf3000c0c,0x087cc003,
+0x43041f30,0x00e0c187,0xcc003fc0,0x07fff800,
+0xf001fffe,0x003c007f,0xff800f00,0x1fffe003,
+0xc007fff8,0x00f001ff,0xfe003c00,0x7fff800f,
+0x001fffe0,0x03c007ff,0xf800f001,0xfffe003c,
+0x007fff80,0x0f001fff,0xe003c007,0xfff800f0,
+0x01fffe00,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0xcb3f0000,00000000,0xc0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* fet_sig1
+ *
+ * ctr0: counts ICORE_AV
+ * ctr1: counts ITRANS_STALL
+ * ctr2: counts SEL_PCQH
+ * ctr3: counts OUT_OF_CONTEXT
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x07c10000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff},
+
+/* fet_sig2
+ *
+ * ctr0: counts ICORE_AV
+ * ctr1: counts IRTN_AV
+ * ctr2: counts ADDRESS_INC
+ * ctr3: counts ADDRESS_DEC
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x06930000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_1
+ *
+ * ctr0: counts HIT_RETRY0
+ * ctr1: counts HIT_RETRY1
+ * ctr2: counts GO_TAG_E
+ * ctr3: counts GO_TAG_O
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x71c10000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_2
+ *
+ * ctr0: counts HIT_DM0
+ * ctr1: counts HIT_DM1
+ * ctr2: counts GO_STORE_E
+ * ctr3: counts GO_STORE_O
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x72930000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_3
+ *
+ * ctr0: counts HIT_DV0
+ * ctr1: counts HIT_DV1
+ * ctr2: counts STBYPT_E (load bypasses from store queue)
+ * ctr3: counts STBYPT_O
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f0002,0x01001fc0,
+0x00c08007,0xf0000030,0x01fc0004,0x10007f00,
+0x0605001f,0xc001c180,0x07f00040,0x7001fc00,
+0x1420007f,0x000a0900,0x1fc002c2,0x8007f000,
+0x80b001fc,0x00243000,0x7f000e0d,0x001fc003,
+0xc38007f0,0x00c0f001,0xfc0037ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x77250000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* g7_4
+ *
+ * ctr0: counts HIT_DIRTY0
+ * ctr1: counts HIT_DIRTY1
+ * ctr2: counts CA_BYP_E (quick launch)
+ * ctr3: counts CA_BYP_O
+ */
+{
+0x0c00e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x7fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x7bb70000,00000000,0x70000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+/* mpb_labort
+ *
+ * ctr0: counts L_ABORT_ALU0L
+ * ctr1: counts L_ABORT_ALU1L
+ * ctr2: counts MPB0H
+ * ctr3: counts MPB1H
+ */
+{
+0x0c00c000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffa5ffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x6fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+00000000,0x0003f800,0x007f000e,0x01001fc0,
+0x03c08007,0xf000c030,0x01fc0034,0x10007f00,
+0x0a05001f,0xc002c180,0x07f00080,0x7001fc00,
+0x2420007f,0x00060900,0x1fc001c2,0x8007f000,
+0x40b001fc,0x00143000,0x7f00020d,0x001fc000,
+0xc38007f0,0x0000f001,0xfc0007ff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x605c0000,00000000,0x60000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffaaaa,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* panic
+ *
+ * ctr0: is the overflow for counter 1
+ * ctr1: counts traps and RFI's
+ * ctr2: counts panic traps
+ * ctr3: is the overflow for counter 2
+ */
+{
+0x0c002000,00000000,0x00060000,00000000,
+0xe7efe0e0,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffffc,
+0x41380030,0x1aabfff2,0x17000000,00000000,
+0x01b80000,0x3effffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,00000000,0x00400000,
+0x00001fff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x1fffffff,0xffffffff,
+0xfff7fff7,0xffffffff,0xffffffff,0xf0000000,
+0xb0000000,0x00012c04,0x05790804,0x14013e44,
+0x0008004f,0x90000040,0x15e46000,0xc0047920,
+0x004a003e,0x40011080,0x0f900024,0x4003e460,
+0x00c80479,0x00023301,0x1e400100,0x4157d080,
+0x514053f4,0x40048014,0xfd000104,0x055f4600,
+0x4c0147d2,0x0014a043,0xf4001508,0x10fd0003,
+0x44043f46,0x004c8147,0xd0003330,0x51f40014,
+0x04257908,0x0c14093e,0x44020802,0x4f900080,
+0x4095e460,0x20c02479,0x20084a08,0x3e400310,
+0x820f9000,0xa44083e4,0x6020c824,0x79000a33,
+0x091e4003,0x3c007fff,0x800f001f,0xffe00000,
+00000000,00000000,00000000,00000000,
+0x10400000,00000000,0x10000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* rare_inst
+ *
+ * ctr0: counts sync and syncdma instructions
+ * ctr1: counts pxtlbx,x instructions
+ * ctr2: counts ixtlbt instructions
+ * ctr3: counts cycles
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xe0e0e0e0,0x004e000c,0x000843fc,0x85c09380,
+0x0121ebfd,0xff217124,0xe0004000,0x943fc85f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xe00000e0,0x00003c00,0x007f0001,0x01001fc0,
+0x00408007,0xf0003030,0x01fc000c,0x10007f00,
+0x0505001f,0xc0014180,0x07f00070,0x7001fc00,
+0x1c20007f,0x00090900,0x1fc00242,0x8007f000,
+0xb0b001fc,0x002c3000,0x7f000d0d,0x001fc003,
+0x438007f0,0x00f0f001,0xfc003fff,0xfffff800,
+0xfffffffe,0x003fffff,0xff800fff,0xffffe003,
+0xfffffff8,0x00ffffff,0xfe003fff,0xffff800f,
+0xffffffe0,0x03ffffff,0xf800ffff,0xfffe003f,
+0xffffff80,0x0fffffff,0xe003ffff,0xfff800ff,
+0xfffffe00,0x3fffffff,0x800fffff,0xffe00000,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* rw_dfet (for D-cache misses and writebacks)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles
+ * ctr2: is the overflow from counter 0
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf0000000,00000000,
+00000000,00000000,0x98000000,00000000,
+0xffffffff,0xffffffff,0x0fffffff,0xffffffff,
+00000000,00000000,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* rw_ifet (I-cache misses -- actually dumb READ transactions)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles
+ * ctr2: is the overflow from counter 0
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xd0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff,00000000,00000000,
+0xffffffff,0xffffffff },
+
+
+/* rw_sdfet (READ_SHARED_OR_PRIVATE transactions)
+ *
+ * ctr0: counts address valid cycles
+ * ctr1: counts *all* data valid cycles
+ * ctr2: is the overflow from counter 0
+ * ctr3: is the overflow from counter 1
+ */
+{
+0x0c01e000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x0000000c,0x00003c00,0x07930000,0x0041e4c0,
+0x01002079,0x3000800c,0x1e4c0030,0x00279300,
+0x010049e4,0xc0014022,0x79300090,0x0c9e4c00,
+0x34004793,0x00020051,0xe4c00180,0x24793000,
+0xa00d1e4c,0x00380067,0x93000300,0x59e4c001,
+0xc0267930,0x00b00d9e,0x4c003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00100000,00000000,0xf4000000,00000000,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+00000000,00000000,00000000,00000000,
+0xffffffff,0xffffffff },
+
+
+/* spec_ifet
+ *
+ * ICORE_AV fires for every request which the Instruction Fetch Unit sends
+ * to the Runway Interface Block. Hence, this counts all I-misses, speculative
+ * or not, but does *not* include I-cache prefetches, which are generated by
+ * RIB.
+ * IRTN_AV fires twice for every I-cache miss returning from RIB to the IFU.
+ * It will not fire if a second I-cache miss is issued from the IFU to RIB
+ * before the first returns. Therefore, if the IRTN_AV count is much less
+ * than 2x the ICORE_AV count, many speculative I-cache misses are occurring
+ * which are "discovered" to be incorrect fairly quickly.
+ * The ratio of I-cache miss transactions on Runway to the ICORE_AV count is
+ * a measure of the effectiveness of instruction prefetching. This ratio
+ * should be between 1 and 2. If it is close to 1, most prefetches are
+ * eventually called for by the IFU; if it is close to 2, almost no prefetches
+ * are useful and they are wasted bus traffic.
+ *
+ * ctr0: counts ICORE_AV
+ * ctr1: counts IRTN_AV
+ * ctr2: counts all non-coherent READ transactions on Runway. (TTYPE D0)
+ * This should be just I-cache miss and I-prefetch transactions.
+ * ctr3: counts total processor cycles
+ */
+{
+0x0c000000,00000000,0x00060000,00000000,
+0xefefefef,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0x0fffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0x00000008,0x00030c00,0x01bf0001,0x00806fc0,
+0x00c1001b,0xf0005048,0x06fc001c,0x2001bf00,
+0x0908806f,0xc002c300,0x1bf000d0,0xc806fc00,
+0x3fffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0x06bf0000,00000000,00000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00110000,00000000,0xd0ffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0x00ffffff,0xffffffff,
+0xffffffff,0xffffffff,00000000,00000000,
+0xffffffff,0xffffffff },
+
+/* st_cond0
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major ops 0C and 0E (fp ops, not fmac or fmpyadd)
+ * ctr2: counts B,L (including long and push) and GATE (including nullified),
+ * predicted not-taken
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc13380,
+0x0101ffff,0xffa1f057,0xe000407f,0xdfffc87f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond1
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major ops 1x (most of the load/stores)
+ * ctr2: counts CMPB (dw) predicted not-taken
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc01b80,
+0x0101ffff,0xffb7f03d,0xe000407f,0xffffc8ff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond2
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major op 03
+ * ctr2: counts CMPIB (dw) predicted not taken.
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc09780,
+0x0101ffff,0xff21f077,0xe000407f,0xffffc87f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond3
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major ops 06 & 26
+ * ctr2: counts BB, BVB, MOVB, MOVIB (incl. nullified) predicted not-taken
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc03780,
+0x0101ffff,0xff29f016,0xe000407f,0xffffe97f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_cond4
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts major op 2E
+ * ctr2: counts CMPB, CMPIB, ADDB, ADDIB (incl. nullified) predicted not-taken
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0e0c0e0,0xffffffff,0xffffffff,0xffc17780,
+0x0101ffff,0xff21f014,0xe000407f,0xffffe9ff,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_unpred0
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts BE and BE,L
+ * ctr2: counts BE and BE,L including nullified
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0c0c0e0,0xffffffff,0xffffffff,0xffdf5bbf,
+0xffffffff,0xff25f7d6,0xefffffff,0xffffc97f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* st_unpred1
+ *
+ * ctr0: is the overflow for ctr1
+ * ctr1: counts BLR, BV, BVE, BVE,L
+ * ctr2: counts BLR, BV, BVE, BVE,L including nullified
+ * ctr3: is the overflow for ctr2
+ */
+{
+0x4c01e000,00000000,0x00060000,00000000,
+0xe0c0c0e0,0xffffffff,0xffffffff,0xffc15f80,
+0x0501ff7f,0xff21f057,0xe001407f,0xdfffc87f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf0000060,0x00003c00,0x04f90000,0x02013e40,
+0x0081004f,0x90004060,0x13e40018,0x0024f900,
+0x0802093e,0x40028102,0x4f9000c0,0x6093e400,
+0x380014f9,0x00010205,0x3e4000c1,0x014f9000,
+0x506053e4,0x001c0034,0xf9000902,0x0d3e4002,
+0xc1034f90,0x00d060d3,0xe4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+/* unpred
+ *
+ * ctr0: counts non-nullified unpredictable branches
+ * ctr1: is the overflow for ctr0
+ * ctr2: counts all unpredictable branches (nullified or not)
+ * ctr3: is the overflow for ctr2
+ */
+{
+0xcc01e000,00000000,0x00060000,00000000,
+0x20202020,0xff31ffff,0xfff7fffe,0x97ffcc7f,
+0xfffffdff,0xffa5fff3,0x1fffffff,0x7fffe97f,
+0xffffffff,0xffffffff,0xff000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffff0000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xf0000000,
+0xf00000a0,0x00003c00,0x02f50000,0x0004bd40,
+0x0040802f,0x50002020,0x4bd4000c,0x0042f500,
+0x040014bd,0x40014084,0x2f500060,0x214bd400,
+0x1c2002f5,0x00080804,0xbd400242,0x802f5000,
+0xa0a04bd4,0x002c2042,0xf5000c08,0x14bd4003,
+0x42842f50,0x00e0a14b,0xd4003fff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xfffffc00,
+00000000,00000000,00000000,00000000,
+0xffff0000,00000000,0xf0000000,00000000,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xfffffc00,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xfffffc00,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xffffffff,0xf3ffffff,0xffffffff,
+0xfdffffff,0xffffffff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0xffffffff,0xfffff9ff,0xfe000000,00000000,
+0x00030000,00000000,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff,0xffffffff,0xffffffff,
+0xffffffff,0xffffffff },
+
+
+/* go_store
+ *
+ * ctr0: Overflow for counter 2
+ * ctr1: Overflow for counter 3
+ * ctr2: count of GO_STORE_E signal
+ * ctr3: count of GO_STORE_O signal
+ */
+
+ {
+ 0x0c00e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffa5ffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x7fffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0x00000000, 0x0000c000, 0x067c0000, 0x01019f00,
+ 0x00408067, 0xc0002030, 0x19f0000c, 0x000e7c00,
+ 0x0401039f, 0x00014080, 0xe7c00060, 0x3039f000,
+ 0x1c00167c, 0x00080105, 0x9f000240, 0x8167c000,
+ 0xa03059f0, 0x002c001e, 0x7c000c01, 0x079f0003,
+ 0x4081e7c0, 0x00e03079, 0xf0003fc0, 0x07fff800,
+ 0xf001fffe, 0x003c007f, 0xff800f00, 0x1fffe003,
+ 0xc007fff8, 0x00f001ff, 0xfe003c00, 0x7fff800f,
+ 0x001fffe0, 0x03c007ff, 0xf800f001, 0xfffe003c,
+ 0x007fff80, 0x0f001fff, 0xe003c007, 0xfff800f0,
+ 0x01fffe00, 0x3c007fff, 0x800f001f, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x70130000, 0x00000000, 0x70000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffaaaa, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff
+ },
+
+
+/* shlib_call
+ *
+ * ctr0: SharedLib call Depth1
+ * ctr1: SharedLib call Depth2
+ * ctr2: SharedLib call Depth3
+ * ctr3: SharedLib call Depth>3
+ */
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0xc76fa005, 0x07dd7e9c, 0x87115b80,
+ 0x01100200, 0x07200004, 0xe000407f, 0xfffffffc,
+ 0x01380010, 0x1fffffff, 0xff000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xf0000000,
+ 0xf0000000, 0x00003c20, 0x01ff0808, 0x04007fc0,
+ 0x0003001f, 0xf0000180, 0x07fc4010, 0x5001ff00,
+ 0x001c007f, 0xc2000a00, 0x1ff18022, 0x4007fc20,
+ 0x00b001ff, 0x10003800, 0x7fc8004d, 0x001ff100,
+ 0x03c007fc, 0x60012001, 0xff280144, 0x007fc600,
+ 0x13001ff2, 0x00058007, 0xfcc00550, 0x01ff2000,
+ 0x5c007fca, 0x001a001f, 0xf3801640, 0x07fca001,
+ 0xb001ff30, 0x0078007f, 0xd0005d00, 0x1ff30007,
+ 0xc007fce0, 0x022001ff, 0x48018400, 0x7fce0023,
+ 0x001ff400, 0x098007fd, 0x20065001, 0xff40009c,
+ 0x007fd200, 0x3fffffff, 0x800fffff, 0xffe00000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xffff0000, 0x00000000, 0xf0000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xfffffc00, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xfffffc00, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xf3ffffff, 0xffffffff,
+ 0xfdffffff, 0xffffffff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0xffffffff, 0xfffff9ff, 0xfe000000, 0x00000000,
+ 0x00030000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff
+ }
+};
+#define PCXW_IMAGE_SIZE 576
+
+static uint32_t cuda_images[][PCXW_IMAGE_SIZE/sizeof(uint32_t)] = {
+/*
+ * CPI: FROM CPI.IDF (Image 0)
+ *
+ * Counts the following:
+ *
+ * ctr0 : total cycles
+ * ctr1 : total cycles where nothing retired
+ * ctr2 : total instructions retired, including nullified
+ * ctr3 : total instructions retired, less nullified instructions
+ */
+ {
+ 0x4c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xffffc000, 0x07ffff00, 0x07ffffff, 0x6007ffff,
+ 0xff0007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0x60f00000, 0x0fffff00, 0x000fffff, 0x00000fff,
+ 0xff00000f, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xfffff000, 0x0000000f, 0xffffffff, 0xff000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00270000, 0x00000055,
+ 0x0200000e, 0x4d300000, 0x00000000, 0x0ff00002,
+ 0x70000000, 0x00000020, 0x0000e400, 0x00000ff0,
+ 0x00000000, 0x00000000, 0x00000055, 0xffffff00,
+ 0x00000000, 0x0000ff00, 0x00000000, 0x0f000000,
+ 0x0000055f, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0xf0000000,
+ 0x000055ff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0x00000000, 0x00055fff, 0xfff00000,
+ 0x00000000, 0x0ff00000, 0x00000030, 0x00000000,
+ 0x00157fff, 0xffc00000, 0x034c0000, 0x00000000,
+ 0x03fc0000, 0x00000000, 0x6fff0000, 0x00000000,
+ 0x60000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* Bus utilization image FROM BUS_UTIL.IDF (Image 1)
+ *
+ * ctr0 : counts address valid cycles
+ * ctr1 : counts data valid cycles
+ * ctr2 : counts overflow from counter 0
+ * ctr3 : counts overflow from counter 1
+ */
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00001b00, 0xaa000000,
+ 0x00000001, 0x30700000, 0x00055aaf, 0xf0000000,
+ 0x01b00000, 0x00000000, 0x00001037, 0x00000000,
+ 0x55aaff00, 0x00c00000, 0x1b55aa00, 0x00000000,
+ 0x0001fff0, 0xcfffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0x30ffff0c, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xfffffff3,
+ 0x0ffff0cf, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xffffffff, 0xfffffff3, 0x0ffff0cf, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffff30,
+ 0xfff70000, 0x000055aa, 0xff000000, 0x000006d5,
+ 0x40000000, 0x00000000, 0x731c0000, 0x000156ab,
+ 0xfc000000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00100000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/*
+ * TLB counts: FROM TLBSTATS.IDF (Image 2)
+ *
+ * Counts the following:
+ *
+ * ctr0: DTLB misses
+ * ctr1: ITLB misses
+ * ctr2: total cycles in the miss handlers
+ * ctr3: total cycles
+ */
+
+ {
+ 0x0c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe7e7e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xfff00000, 0x07ffff00, 0x07ffffff, 0x6007ffff,
+ 0xa00007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0x603001c1, 0xe0000001, 0xc0c00000, 0x00000fff,
+ 0xff00000f, 0xffff0000, 0x00000000, 0x00400000,
+ 0x00001000, 0x00000004, 0x00000000, 0x01000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00800000, 0x00153f7f,
+ 0x55000000, 0xaf800000, 0xc0000000, 0x0403f240,
+ 0x00000000, 0x00001010, 0x00004700, 0x00000ff0,
+ 0x00000000, 0x00000000, 0x00000055, 0xffffff00,
+ 0x00000000, 0x0000ff00, 0x00000000, 0x0f000000,
+ 0x0000055f, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0xf0000000,
+ 0x000055ff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0x00000000, 0x00055fff, 0xfff00000,
+ 0x00000000, 0x0ff00000, 0x00000000, 0x00000000,
+ 0x00157fff, 0xffc00000, 0x00000000, 0x3fc00000,
+ 0x00040000, 0x00000000, 0x6fff0000, 0x00000000,
+ 0x60000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* tlbhandler FROM tlbHandMiss.idf (Image 3)
+ *
+ * ctr0: TLB misses
+ * ctr1: dmisses inside the TLB miss handler
+ * ctr2: cycles in the TLB miss handler
+ * ctr3: overflow of ctr2
+ */
+ {
+ 0x1c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe7e7e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xfff00000, 0x07ffff00, 0x07ffffff, 0x6007ffff,
+ 0xa00007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0x603001c1, 0xe0000001, 0xc0c00000, 0x00000fff,
+ 0xff00000f, 0xffff0000, 0x00000000, 0x00400000,
+ 0x00001000, 0x00000004, 0x00000000, 0x01000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x006c0000, 0x01000054,
+ 0x02000002, 0xc3200000, 0xc00aa000, 0x0c03f240,
+ 0x00000000, 0x00001010, 0x000044f4, 0x00000c00,
+ 0xaa0000f0, 0x0f0000b0, 0x00005005, 0x0f5f0000,
+ 0x0001f000, 0x0000ff00, 0x00000000, 0x0f000000,
+ 0x0000055f, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0xf0000000,
+ 0x000055ff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0x00000000, 0x00055fff, 0xfff00000,
+ 0x00000000, 0x0ff00a00, 0x000f0000, 0x24004000,
+ 0x15400001, 0x40c00003, 0x3da00000, 0x0002a800,
+ 0x00ff0000, 0x00000000, 0x6fff0000, 0x00000000,
+ 0x60000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* branch_taken image FROM PTKN.IDF (Image 4)
+ *
+ * ctr0: mispredicted branches
+ * ctr1: predicted taken branches, actually taken
+ * ctr2: predicted taken branches (includes nullfied)
+ * ctr3: all branches
+ */
+
+ {
+ 0xcc01e000, 0x00000000, 0x00000000, 0x00000000,
+ 0xa08080a0, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xfffffeff, 0xfffeffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf4ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd22d0000, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000000, 0x0ffff900,
+ 0x90000000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x000000ff, 0xff00bfdf, 0x03030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x003f3ff0, 0x2766c000,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* branch_nottaken FROM PNTKN.IDF (Image 5)
+ *
+ * ctr0: mispredicted branches
+ * ctr1: branches predicted not-taken, but actually taken
+ * ctr2: branches predicted not-taken (includes nullified)
+ * ctr3: all branches
+ */
+ {
+ 0xcc01e000, 0x00000000, 0x00000000, 0x00000000,
+ 0xe0c0c0e0, 0xffffffff, 0xffffffff, 0xffefffff,
+ 0xffffbfff, 0xfffffeff, 0xfffeffff, 0xfffffeff,
+ 0xfffffffe, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf4ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd22d0000, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000000, 0x0ffff900,
+ 0x90000000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x000000ff, 0xff00bfdf, 0x03030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x003f3ff0, 0x2766c000,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* IMISS image (Image 6)
+ *
+ * ctr0 : icache misses for retired instructions
+ * ctr1 : total cycles
+ * ctr2 : dcache misses for retired instructions
+ * ctr3 : number of retired instructions
+ */
+ {
+ 0x2801e000, 0x00000000, 0x00010000, 0x00000000,
+ 0x00001000, 0xffffffff, 0xffffffff, 0xfff00fff,
+ 0xfffa3fff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xf2fdf0f0, 0xf0f0f0f0,
+ 0xffffffff, 0xf6c00000, 0x00000000, 0x0ff55800,
+ 0x90000000, 0x00000000, 0x0000b0ff, 0xfffffff0,
+ 0x00000003, 0x0100bfff, 0x3f3f3f3f, 0x3f3f5555,
+ 0x555fffff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xfff00000, 0x000301b0, 0x2fefcfcf,
+ 0xcfcfcfcf, 0xd5555557, 0xf7b40000, 0x00000000,
+ 0x03c14000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* DMISS image (Image 7)
+ *
+ * ctr0 : icache misses for retired instructions
+ * ctr1 : total cycles
+ * ctr2 : dcache misses for retired instructions
+ * ctr3 : number of retired instructions
+ */
+ {
+ 0x2801e000, 0x00000000, 0x00010000, 0x00000000,
+ 0x00001000, 0xffffffff, 0xffffffff, 0xfff00fff,
+ 0xfffa3fff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xf2fdf0f0, 0xf0f0f0f0,
+ 0xffffffff, 0xf6c00000, 0x00000000, 0x0ff55800,
+ 0x90000000, 0x00000000, 0x0000b0ff, 0xfffffff0,
+ 0x00000003, 0x0100bfff, 0x3f3f3f3f, 0x3f3f5555,
+ 0x555fffff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xfff00000, 0x000301b0, 0x2fefcfcf,
+ 0xcfcfcfcf, 0xd5555557, 0xf7b40000, 0x00000000,
+ 0x03c14000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* dmiss_access image FROM DMISS_RATIO.IDF (Image 8)
+ *
+ * ctr0 : all loads and stores that retire (even lines)
+ * ctr1 : all loads and stores that retire (odd lines)
+ * ctr2 : dcache misses of retired loads/stores
+ * ctr3 : all READ_PRIV and READ_SHAR_OR_PRIV on Runway
+ * (Speculative and Non-Speculative)
+ */
+ {
+ 0x2d81e000, 0x00000000, 0x00000000, 0x00000000,
+ 0x10101010, 0x00ffffff, 0xa003ffff, 0xfe800fff,
+ 0xfffa003f, 0xffffe8ff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd2280a00, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000005, 0x555ff900,
+ 0x80200000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x00005555, 0xff80bf8b, 0xab030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x15153fe0, 0x27628880,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000001,
+ 0x5557fc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00110000, 0x00000000,
+ 0xf4ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xf8ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+
+/* big_cpi image (Image 9)
+ *
+ * ctr0 : Total number of CPU clock cycles.
+ * ctr1 : Unused
+ * ctr2 : Unused
+ * ctr3 : Total number of Non-Nullified instructions retired.
+ */
+ {
+ 0x0c00c000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe7e7e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xfff00000, 0x07ffff00, 0x07ffffff, 0x6007ffff,
+ 0xa00007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0x603001c1, 0xe0000001, 0xc0c00000, 0x00000fff,
+ 0xff00000f, 0xffff0000, 0x00000000, 0x00400000,
+ 0x00001000, 0x00000004, 0x00000000, 0x01000000,
+ 0x0000ffff, 0xfffffff0, 0x00000000, 0x0fffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00550005, 0x00220000,
+ 0x0000000c, 0x71f00000, 0x00f00aa0, 0x0aaff000,
+ 0x00005002, 0x20000000, 0x0000c413, 0x00000c0f,
+ 0x00aa0000, 0xff00b600, 0x000500a0, 0x00000300,
+ 0x000cc3f0, 0x0000c0f0, 0x0aa0000f, 0xff000000,
+ 0x011000a0, 0x05503000, 0x00d03700, 0x00000f00,
+ 0xaa005500, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0xf000aa00,
+ 0x11000a00, 0x55000000, 0x0d037000, 0x00c0f00a,
+ 0xa0055000, 0x0db00005, 0x5002a000, 0x00300000,
+ 0xf40f0000, 0x0c0f00aa, 0x0000ff10, 0x27400000,
+ 0x00008000, 0x00c00003, 0x037c0000, 0x003c02a8,
+ 0x02abfc00, 0x00000000, 0x6fff0000, 0x00000000,
+ 0x60000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* big_ls image (Image 10)
+ *
+ * ctr0 : Total number of CPU clock cycles during which local_stall_A1 is asserted
+ * ctr1 : Overflow of Counter 0
+ * ctr2 : Total number of IFLUSH_AV
+ * ctr3 : Overflow of Counter 2
+ */
+ {
+ 0x0c000000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x28880001, 0x54000000,
+ 0x00000004, 0xb6200000, 0x000aaaa0, 0x05555288,
+ 0x80000010, 0x00000000, 0x0000486e, 0x00000000,
+ 0xaaaa0055, 0x55002888, 0x00545401, 0x03030000,
+ 0x0007b000, 0x0000ff00, 0x00000000, 0x05000000,
+ 0x0000055f, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0x00000000,
+ 0x000055ff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0xa0000000, 0x00055fff, 0xfff00000,
+ 0x00aa0000, 0x05502a2a, 0x00151500, 0x0a220015,
+ 0x40400000, 0x00000001, 0xe2980000, 0x0002aaa8,
+ 0x01555400, 0x00000000, 0x0df70000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* br_abort image (Image 12)
+ *
+ * ctr0 : Total number of BRAD_STALLH
+ * ctr1 : Total number of ONE_QUAD
+ * ctr2 : Total number of BR0_ABRT
+ * ctr3 : Total number of BR1_ABRT
+ */
+
+ {
+ 0x0c002000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0xffffffff, 0xffffffff, 0xff0fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0x1077ffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x551b0000, 0x00000000,
+ 0x0000000c, 0xd4f00000, 0x00000000, 0x0ffff001,
+ 0xb0000000, 0x00000000, 0x0000fd4c, 0x00000000,
+ 0x000000ff, 0xff00ff1b, 0x00000000, 0x00000000,
+ 0x0000d000, 0x0000ff00, 0x00000000, 0x0e0fffff,
+ 0xffffffff, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0x00ffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0xffffffff, 0xffffffff, 0xfff00000,
+ 0x00400000, 0x00000000, 0x00ffff00, 0x2a86c000,
+ 0x00000000, 0x00000000, 0xf50c0000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0x1a250000, 0x00000000,
+ 0x10000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffafff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+
+/* isnt image (Image 13)
+ *
+ * ctr0 : Total number of cycles for which iside_notrans is asserted.
+ * ctr1 : Total number of times iside_notrans is asserted for 1-4 cycles.
+ * ctr2 : Total number of times iside_notrans is asserted for 5-7 cycles.
+ * ctr3 : Total number of times iside_notrans is asserted for > 7 cycles.
+ */
+
+ {
+ 0x0c018000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xc0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x22000000, 0x000001bc,
+ 0x10000006, 0x00900000, 0x50000000, 0x00055a20,
+ 0x00000000, 0x00016060, 0x0000c021, 0x00000540,
+ 0x00000000, 0x55002200, 0x00000000, 0x56bc4000,
+ 0x00048000, 0x0000ff00, 0x00000000, 0x17000000,
+ 0x0000055f, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00000000, 0x000055ff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0x00000000,
+ 0x000055ff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0x80000000, 0x00015bf3, 0xf5500000,
+ 0x02210000, 0x00100000, 0x00005500, 0x08800000,
+ 0x00001545, 0x85000001, 0x80240000, 0x11000000,
+ 0x00015400, 0x00000000, 0xcdff0000, 0x00000000,
+ 0xc0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* quadrant image (image 14)
+ *
+ * ctr0 : Total number of instructions in quadrant 0.
+ * ctr1 : Total number of instructions in quadrant 1.
+ * ctr2 : Total number of instructions in quadrant 2.
+ * ctr3 : Total number of instructions in quadrant 3.
+ *
+ * Only works for 32-bit applications.
+ */
+
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xffffc000, 0x07ffff00, 0x07ffffff, 0x0007ffff,
+ 0xff0007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0xf0000000, 0x0fffff00, 0x000fffff, 0x00000fff,
+ 0xff00000f, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xffcff000, 0x0000040f, 0xfffffffc, 0xff000000,
+ 0x0080ffff, 0xffffcff0, 0x0000000c, 0x0fffffff,
+ 0xfcff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x551b0000, 0x00000000,
+ 0x00000003, 0x17000000, 0x00000000, 0x0ffff001,
+ 0xb0000000, 0x00000000, 0x00000173, 0x00000000,
+ 0x000000ff, 0xff00ff1b, 0x00000000, 0x00000000,
+ 0x000f1ff0, 0xcfffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0x30ffff0c, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xfffffff3,
+ 0x0ffff0cf, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xfffffff3, 0x0ffff0cf, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffff30,
+ 0xff7f0000, 0x00000000, 0x00fffff0, 0x2a86c000,
+ 0x00000000, 0x00000003, 0x05f00000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* rw_pdfet image (Image 15)
+ *
+ * ctr0 : Total of all READ_PRIV address valid cycles.
+ * ctr1 : Total of all READ_PRIV data valid cycles.
+ * ctr2 : Overflow of Counter 0.
+ * ctr3 : Overflow of Counter 1.
+ */
+
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00001b00, 0xaa000000,
+ 0x00000001, 0x30700000, 0x00055aaf, 0xf0000000,
+ 0x01b00000, 0x00000000, 0x00001037, 0x00000000,
+ 0x55aaff00, 0x00c00000, 0x1b55aa00, 0x00000000,
+ 0x0001fff0, 0xcfffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0x30ffff0c, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xfffffff3,
+ 0x0ffff0cf, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xffffffff, 0xfffffff3, 0x0ffff0cf, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffff30,
+ 0xfff70000, 0x000055aa, 0xff000000, 0x000006d5,
+ 0x40000000, 0x00000000, 0x731c0000, 0x000156ab,
+ 0xfc000000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00100000, 0x00000000,
+ 0xf8000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0x00ffffff, 0xffffffff, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
+ },
+
+
+/* rw_wdfet image (Image 16)
+ *
+ * ctr0 : Counts total number of writeback transactions.
+ * ctr1 : Total number of data valid Runway cycles.
+ * ctr2 : Overflow of Counter 0.
+ * ctr3 : Overflow of Counter 1.
+ */
+
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xefefefef, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00001b00, 0xaa000000,
+ 0x00000001, 0x30700000, 0x00055aaf, 0xf0000000,
+ 0x01b00000, 0x00000000, 0x00001037, 0x00000000,
+ 0x55aaff00, 0x00c00000, 0x1b55aa00, 0x00000000,
+ 0x0001fff0, 0xcfffff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xffffffff, 0x30ffff0c, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xfffffff3,
+ 0x0ffff0cf, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xffffffff, 0xfffffff3, 0x0ffff0cf, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffff30,
+ 0xfff70000, 0x000055aa, 0xff000000, 0x000006d5,
+ 0x40000000, 0x00000000, 0x731c0000, 0x000156ab,
+ 0xfc000000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00100000, 0x00000000,
+ 0x98000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
+ 0x00ffffff, 0xffffffff, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
+ },
+
+/* shlib_cpi image (Image 17)
+ *
+ * ctr0 : Total number of instructions in quadrant 0.
+ * ctr1 : Total number of CPU clock cycles in quadrant 0.
+ * ctr2 : Total number of Non-Nullified instructions retired.
+ * ctr3 : Total number of CPU clock cycles.
+ *
+ * Only works for 32-bit shared libraries.
+ */
+
+ {
+ 0x0c01e000, 0x00000000, 0x00060000, 0x00000000,
+ 0xe0e0e0e0, 0x00001fff, 0xfc00007f, 0xfff00001,
+ 0xffffc000, 0x07ffff00, 0x07ffffff, 0x0007ffff,
+ 0xff0007ff, 0xffff0007, 0xffffff00, 0x00000000,
+ 0xf0150000, 0x0fffff00, 0x000fffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0x00ffffff,
+ 0xffcff000, 0x0000000f, 0xfffffffc, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x27000000, 0x00000055,
+ 0x02000005, 0x7f500000, 0xc0000000, 0x000ff270,
+ 0x00000000, 0x00000000, 0x00007700, 0x00000ff0,
+ 0x00000000, 0x0000ffff, 0xffffffff, 0xffffff00,
+ 0x00000000, 0x0000ff00, 0x00000000, 0x0f0fffff,
+ 0xffffffff, 0xfffff000, 0x00000000, 0x000ff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x00ff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0x00ff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xfff00000,
+ 0x00000000, 0x0ff00000, 0x000000a0, 0x3fffffff,
+ 0xffffffff, 0xffc00000, 0x03d40000, 0x20000000,
+ 0x0003fc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff7fbfc, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff7fbfc, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00030000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* flop image (Image 18)
+ *
+ * ctr0 : Total number of floating point instructions (opcode = 0xc).
+ * ctr1 : Total number of floating point instructions (opcode = 0xe, 0x6, 0x2e, 0x26).
+ * ctr2 : Unused
+ * ctr3 : Unused
+ */
+
+ {
+ 0x0001e000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00001010, 0x33ffffff, 0x006fffff, 0xfc5fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd22d0000, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000000, 0x0ffff900,
+ 0x90000000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x000000ff, 0xff00bfdf, 0x03030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x003f3ff0, 0x2766c000,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* cachemiss image FROM I_D_MISSES.IDF (Image 19)
+ *
+ * ctr0 : icache misses for retired instructions
+ * ctr1 : total cycles
+ * ctr2 : dcache misses for retired instructions
+ * ctr3 : number of retired instructions
+ */
+ {
+ 0x2801e000, 0x00000000, 0x00010000, 0x00000000,
+ 0x00001000, 0xffffffff, 0xffffffff, 0xfff00fff,
+ 0xfffa3fff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xf2fdf0f0, 0xf0f0f0f0,
+ 0xffffffff, 0xf6c00000, 0x00000000, 0x0ff55800,
+ 0x90000000, 0x00000000, 0x0000b0ff, 0xfffffff0,
+ 0x00000003, 0x0100bfff, 0x3f3f3f3f, 0x3f3f5555,
+ 0x555fffff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xfff00000, 0x000301b0, 0x2fefcfcf,
+ 0xcfcfcfcf, 0xd5555557, 0xf7b40000, 0x00000000,
+ 0x03c14000, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* branch FROM br_report3.idf
+ *
+ * ctr0 : Total number of mispredicted branches.
+ * ctr1 : Some Non-Nullified unpredictable branches.
+ * ctr2 : Total number of branches (Nullified + Non-Nullified)
+ * (Unpredicted+ Predicted Taken +Predicted Not Taken).
+ * Total of All Branches.
+ * ctr3 : Remaining Non-Nullified unpredictable branches.
+ */
+ {
+ 0x4001e000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xffffffff, 0xff9fffff, 0xfe0fffff,
+ 0xffffbaff, 0xfdffc0ff, 0xfffdffff, 0xfffffeff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf4ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd22d0000, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000000, 0x0ffff900,
+ 0x90000000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x000000ff, 0xff00bfdf, 0x03030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x003f3ff0, 0x2766c000,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* crstack FROM crs_report.idf
+ *
+ * ctr0: correctly predicted branches by the pop_latch
+ * ctr1: some procedure returns
+ * ctr2: all branches, (includes nullified)
+ * ctr3: remaining procedure returns
+ */
+ {
+ 0x4001e000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0xffffffff, 0xffa10300, 0x000fffff,
+ 0xffffbaf8, 0x3000007f, 0xffffffff, 0xfffffeff,
+ 0xff7fffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0xf2ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd22d0000, 0x00000000,
+ 0x0000000b, 0x46000000, 0x00000000, 0x0ffff900,
+ 0x90000000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x000000ff, 0xff00bfdf, 0x03030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0xf0ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x0fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x003f3ff0, 0x2766c000,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000000,
+ 0x03fffc00, 0x00000000, 0xffff0000, 0x00000000,
+ 0xf0000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ },
+
+/* icache_report image
+ *
+ * ctr0 : Icache misses actually used by the core.
+ * ctr1 : ICORE_AV (Icache misses the core THINKS it needs, including fetching down speculative paths).
+ * ctr2 : READs on Runway (Icache misses that made it out to Runway, including
+ * prefetches).
+ * ctr3 : Prefetch returns (1x and 2x).
+ */
+ {
+ 0x00000000, 0x00000000, 0x00010000, 0x00000000,
+ 0x00000000, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffff00, 0x00000000,
+ 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffff0000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xd2002d00, 0x00000000,
+ 0x0000000b, 0x46000000, 0x0000000f, 0xf00ff900,
+ 0x00900000, 0x00000000, 0x0000907e, 0x00000000,
+ 0x0000ff00, 0xff83bf03, 0xdf030303, 0x03030000,
+ 0x000dbfff, 0xffffff00, 0x00000000, 0x000fffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xfffff000,
+ 0x00000000, 0x00ffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffff0000, 0x00000000, 0x80ffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffff0000,
+ 0x00000000, 0x4fffffff, 0xffffffff, 0xffffffff,
+ 0xffff5555, 0x55500000, 0x3f003f80, 0x274026c0,
+ 0x00000000, 0x00000002, 0x67840000, 0x00000003,
+ 0xfc03fc00, 0x00000000, 0x0eff0000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00ffffff, 0xff3fffff,
+ 0xffffffff, 0xffcfffff, 0xfff6fb7c, 0x00000000,
+ 0x00ffffff, 0xff3fffff, 0xffffffff, 0xffcfffff,
+ 0xfff6fb7c, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffff0fff, 0xffffff3f,
+ 0xffffffff, 0xffffff7f, 0xffffffff, 0xfffffefc,
+ 0x00000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0xffffffff, 0xfffff9ff,
+ 0xfe000000, 0x00000000, 0x00130000, 0x00000000,
+ 0xd0ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0x00ffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+
+ }
+
+};
+
+#endif
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
new file mode 100644
index 000000000000..46e4a6881f11
--- /dev/null
+++ b/arch/parisc/kernel/process.c
@@ -0,0 +1,396 @@
+/*
+ * PARISC Architecture-dependent parts of process handling
+ * based on the work for i386
+ *
+ * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
+ * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
+ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
+ * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
+ * Copyright (C) 2000 Richard Hirst <rhirst with parisc-lixux.org>
+ * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
+ * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
+ * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
+ * Copyright (C) 2001-2002 Helge Deller <deller at parisc-linux.org>
+ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <stdarg.h>
+
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/personality.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/kallsyms.h>
+
+#include <asm/io.h>
+#include <asm/offsets.h>
+#include <asm/pdc.h>
+#include <asm/pdc_chassis.h>
+#include <asm/pgalloc.h>
+#include <asm/uaccess.h>
+#include <asm/unwind.h>
+
+static int hlt_counter;
+
+/*
+ * Power off function, if any
+ */
+void (*pm_power_off)(void);
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+EXPORT_SYMBOL(disable_hlt);
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+EXPORT_SYMBOL(enable_hlt);
+
+void default_idle(void)
+{
+ barrier();
+}
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle(void)
+{
+ /* endless idle loop with no priority at all */
+ while (1) {
+ while (!need_resched())
+ barrier();
+ schedule();
+ check_pgt_cache();
+ }
+}
+
+
+#ifdef __LP64__
+#define COMMAND_GLOBAL 0xfffffffffffe0030UL
+#else
+#define COMMAND_GLOBAL 0xfffe0030
+#endif
+
+#define CMD_RESET 5 /* reset any module */
+
+/*
+** The Wright Brothers and Gecko systems have a H/W problem
+** (Lasi...'nuf said) may cause a broadcast reset to lockup
+** the system. An HVERSION dependent PDC call was developed
+** to perform a "safe", platform specific broadcast reset instead
+** of kludging up all the code.
+**
+** Older machines which do not implement PDC_BROADCAST_RESET will
+** return (with an error) and the regular broadcast reset can be
+** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
+** the PDC call will not return (the system will be reset).
+*/
+void machine_restart(char *cmd)
+{
+#ifdef FASTBOOT_SELFTEST_SUPPORT
+ /*
+ ** If user has modified the Firmware Selftest Bitmap,
+ ** run the tests specified in the bitmap after the
+ ** system is rebooted w/PDC_DO_RESET.
+ **
+ ** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
+ **
+ ** Using "directed resets" at each processor with the MEM_TOC
+ ** vector cleared will also avoid running destructive
+ ** memory self tests. (Not implemented yet)
+ */
+ if (ftc_bitmap) {
+ pdc_do_firm_test_reset(ftc_bitmap);
+ }
+#endif
+ /* set up a new led state on systems shipped with a LED State panel */
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
+
+ /* "Normal" system reset */
+ pdc_do_reset();
+
+ /* Nope...box should reset with just CMD_RESET now */
+ gsc_writel(CMD_RESET, COMMAND_GLOBAL);
+
+ /* Wait for RESET to lay us to rest. */
+ while (1) ;
+
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void machine_halt(void)
+{
+ /*
+ ** The LED/ChassisCodes are updated by the led_halt()
+ ** function, called by the reboot notifier chain.
+ */
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+
+/*
+ * This routine is called from sys_reboot to actually turn off the
+ * machine
+ */
+void machine_power_off(void)
+{
+ /* If there is a registered power off handler, call it. */
+ if(pm_power_off)
+ pm_power_off();
+
+ /* Put the soft power button back under hardware control.
+ * If the user had already pressed the power button, the
+ * following call will immediately power off. */
+ pdc_soft_power_button(0);
+
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
+
+ /* It seems we have no way to power the system off via
+ * software. The user has to press the button himself. */
+
+ printk(KERN_EMERG "System shut down completed.\n"
+ KERN_EMERG "Please power this system off now.");
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+
+/*
+ * Create a kernel thread
+ */
+
+extern pid_t __kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+
+ /*
+ * FIXME: Once we are sure we don't need any debug here,
+ * kernel_thread can become a #define.
+ */
+
+ return __kernel_thread(fn, arg, flags);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+}
+
+void flush_thread(void)
+{
+ /* Only needs to handle fpu stuff or perf monitors.
+ ** REVISIT: several arches implement a "lazy fpu state".
+ */
+ set_fs(USER_DS);
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+/*
+ * Fill in the FPU structure for a core dump.
+ */
+
+int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
+{
+ if (regs == NULL)
+ return 0;
+
+ memcpy(r, regs->fr, sizeof *r);
+ return 1;
+}
+
+int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
+{
+ memcpy(r, tsk->thread.regs.fr, sizeof(*r));
+ return 1;
+}
+
+/* Note that "fork()" is implemented in terms of clone, with
+ parameters (SIGCHLD, regs->gr[30], regs). */
+int
+sys_clone(unsigned long clone_flags, unsigned long usp,
+ struct pt_regs *regs)
+{
+ int __user *user_tid = (int __user *)regs->gr[26];
+
+ /* usp must be word aligned. This also prevents users from
+ * passing in the value 1 (which is the signal for a special
+ * return for a kernel thread) */
+ usp = ALIGN(usp, 4);
+
+ /* A zero value for usp means use the current stack */
+ if(usp == 0)
+ usp = regs->gr[30];
+
+ return do_fork(clone_flags, usp, regs, 0, user_tid, NULL);
+}
+
+int
+sys_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gr[30], regs, 0, NULL, NULL);
+}
+
+int
+copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ unsigned long unused, /* in ia64 this is "user_stack_size" */
+ struct task_struct * p, struct pt_regs * pregs)
+{
+ struct pt_regs * cregs = &(p->thread.regs);
+ struct thread_info *ti = p->thread_info;
+
+ /* We have to use void * instead of a function pointer, because
+ * function pointers aren't a pointer to the function on 64-bit.
+ * Make them const so the compiler knows they live in .text */
+ extern void * const ret_from_kernel_thread;
+ extern void * const child_return;
+#ifdef CONFIG_HPUX
+ extern void * const hpux_child_return;
+#endif
+
+ *cregs = *pregs;
+
+ /* Set the return value for the child. Note that this is not
+ actually restored by the syscall exit path, but we put it
+ here for consistency in case of signals. */
+ cregs->gr[28] = 0; /* child */
+
+ /*
+ * We need to differentiate between a user fork and a
+ * kernel fork. We can't use user_mode, because the
+ * the syscall path doesn't save iaoq. Right now
+ * We rely on the fact that kernel_thread passes
+ * in zero for usp.
+ */
+ if (usp == 1) {
+ /* kernel thread */
+ cregs->ksp = (((unsigned long)(ti)) + THREAD_SZ_ALGN);
+ /* Must exit via ret_from_kernel_thread in order
+ * to call schedule_tail()
+ */
+ cregs->kpc = (unsigned long) &ret_from_kernel_thread;
+ /*
+ * Copy function and argument to be called from
+ * ret_from_kernel_thread.
+ */
+#ifdef __LP64__
+ cregs->gr[27] = pregs->gr[27];
+#endif
+ cregs->gr[26] = pregs->gr[26];
+ cregs->gr[25] = pregs->gr[25];
+ } else {
+ /* user thread */
+ /*
+ * Note that the fork wrappers are responsible
+ * for setting gr[21].
+ */
+
+ /* Use same stack depth as parent */
+ cregs->ksp = ((unsigned long)(ti))
+ + (pregs->gr[21] & (THREAD_SIZE - 1));
+ cregs->gr[30] = usp;
+ if (p->personality == PER_HPUX) {
+#ifdef CONFIG_HPUX
+ cregs->kpc = (unsigned long) &hpux_child_return;
+#else
+ BUG();
+#endif
+ } else {
+ cregs->kpc = (unsigned long) &child_return;
+ }
+ }
+
+ return 0;
+}
+
+unsigned long thread_saved_pc(struct task_struct *t)
+{
+ return t->thread.regs.kpc;
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+
+asmlinkage int sys_execve(struct pt_regs *regs)
+{
+ int error;
+ char *filename;
+
+ filename = getname((const char __user *) regs->gr[26]);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, (char __user **) regs->gr[25],
+ (char __user **) regs->gr[24], regs);
+ if (error == 0) {
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ }
+ putname(filename);
+out:
+
+ return error;
+}
+
+unsigned long
+get_wchan(struct task_struct *p)
+{
+ struct unwind_frame_info info;
+ unsigned long ip;
+ int count = 0;
+ /*
+ * These bracket the sleeping functions..
+ */
+
+ unwind_frame_init_from_blocked_task(&info, p);
+ do {
+ if (unwind_once(&info) < 0)
+ return 0;
+ ip = info.ip;
+ if (!in_sched_functions(ip))
+ return ip;
+ } while (count++ < 16);
+ return 0;
+}
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
new file mode 100644
index 000000000000..13b721cb9f55
--- /dev/null
+++ b/arch/parisc/kernel/processor.c
@@ -0,0 +1,400 @@
+/* $Id: processor.c,v 1.1 2002/07/20 16:27:06 rhirst Exp $
+ *
+ * Initial setup-routines for HP 9000 based hardware.
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
+ * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
+ * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
+ * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
+ * Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
+ *
+ * Initial PA-RISC Version: 04-23-1999 by Helge Deller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#include <linux/config.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+
+#include <asm/cache.h>
+#include <asm/hardware.h> /* for register_parisc_driver() stuff */
+#include <asm/processor.h>
+#include <asm/page.h>
+#include <asm/pdc.h>
+#include <asm/pdcpat.h>
+#include <asm/irq.h> /* for struct irq_region */
+#include <asm/parisc-device.h>
+
+struct system_cpuinfo_parisc boot_cpu_data;
+EXPORT_SYMBOL(boot_cpu_data);
+
+struct cpuinfo_parisc cpu_data[NR_CPUS];
+
+/*
+** PARISC CPU driver - claim "device" and initialize CPU data structures.
+**
+** Consolidate per CPU initialization into (mostly) one module.
+** Monarch CPU will initialize boot_cpu_data which shouldn't
+** change once the system has booted.
+**
+** The callback *should* do per-instance initialization of
+** everything including the monarch. "Per CPU" init code in
+** setup.c:start_parisc() has migrated here and start_parisc()
+** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
+**
+** The goal of consolidating CPU initialization into one place is
+** to make sure all CPU's get initialized the same way.
+** The code path not shared is how PDC hands control of the CPU to the OS.
+** The initialization of OS data structures is the same (done below).
+*/
+
+/**
+ * processor_probe - Determine if processor driver should claim this device.
+ * @dev: The device which has been found.
+ *
+ * Determine if processor driver should claim this chip (return 0) or not
+ * (return 1). If so, initialize the chip and tell other partners in crime
+ * they have work to do.
+ */
+static int __init processor_probe(struct parisc_device *dev)
+{
+ unsigned long txn_addr;
+ unsigned long cpuid;
+ struct cpuinfo_parisc *p;
+
+#ifndef CONFIG_SMP
+ if (boot_cpu_data.cpu_count > 0) {
+ printk(KERN_INFO "CONFIG_SMP=n ignoring additional CPUs\n");
+ return 1;
+ }
+#endif
+
+ /* logical CPU ID and update global counter
+ * May get overwritten by PAT code.
+ */
+ cpuid = boot_cpu_data.cpu_count;
+ txn_addr = dev->hpa; /* for legacy PDC */
+
+#ifdef __LP64__
+ if (is_pdc_pat()) {
+ ulong status;
+ unsigned long bytecnt;
+ pdc_pat_cell_mod_maddr_block_t pa_pdc_cell;
+#undef USE_PAT_CPUID
+#ifdef USE_PAT_CPUID
+ struct pdc_pat_cpu_num cpu_info;
+#endif
+
+ status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
+ dev->mod_index, PA_VIEW, &pa_pdc_cell);
+
+ BUG_ON(PDC_OK != status);
+
+ /* verify it's the same as what do_pat_inventory() found */
+ BUG_ON(dev->mod_info != pa_pdc_cell.mod_info);
+ BUG_ON(dev->pmod_loc != pa_pdc_cell.mod_location);
+
+ txn_addr = pa_pdc_cell.mod[0]; /* id_eid for IO sapic */
+
+#ifdef USE_PAT_CPUID
+/* We need contiguous numbers for cpuid. Firmware's notion
+ * of cpuid is for physical CPUs and we just don't care yet.
+ * We'll care when we need to query PAT PDC about a CPU *after*
+ * boot time (ie shutdown a CPU from an OS perspective).
+ */
+ /* get the cpu number */
+ status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa);
+
+ BUG_ON(PDC_OK != status);
+
+ if (cpu_info.cpu_num >= NR_CPUS) {
+ printk(KERN_WARNING "IGNORING CPU at 0x%x,"
+ " cpu_slot_id > NR_CPUS"
+ " (%ld > %d)\n",
+ dev->hpa, cpu_info.cpu_num, NR_CPUS);
+ /* Ignore CPU since it will only crash */
+ boot_cpu_data.cpu_count--;
+ return 1;
+ } else {
+ cpuid = cpu_info.cpu_num;
+ }
+#endif
+ }
+#endif
+
+ p = &cpu_data[cpuid];
+ boot_cpu_data.cpu_count++;
+
+ /* initialize counters */
+ memset(p, 0, sizeof(struct cpuinfo_parisc));
+
+ p->loops_per_jiffy = loops_per_jiffy;
+ p->dev = dev; /* Save IODC data in case we need it */
+ p->hpa = dev->hpa; /* save CPU hpa */
+ p->cpuid = cpuid; /* save CPU id */
+ p->txn_addr = txn_addr; /* save CPU IRQ address */
+#ifdef CONFIG_SMP
+ spin_lock_init(&p->lock);
+
+ /*
+ ** FIXME: review if any other initialization is clobbered
+ ** for boot_cpu by the above memset().
+ */
+
+ /* stolen from init_percpu_prof() */
+ cpu_data[cpuid].prof_counter = 1;
+ cpu_data[cpuid].prof_multiplier = 1;
+#endif
+
+ /*
+ ** CONFIG_SMP: init_smp_config() will attempt to get CPU's into
+ ** OS control. RENDEZVOUS is the default state - see mem_set above.
+ ** p->state = STATE_RENDEZVOUS;
+ */
+
+#if 0
+ /* CPU 0 IRQ table is statically allocated/initialized */
+ if (cpuid) {
+ struct irqaction actions[];
+
+ /*
+ ** itimer and ipi IRQ handlers are statically initialized in
+ ** arch/parisc/kernel/irq.c. ie Don't need to register them.
+ */
+ actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
+ if (!actions) {
+ /* not getting it's own table, share with monarch */
+ actions = cpu_irq_actions[0];
+ }
+
+ cpu_irq_actions[cpuid] = actions;
+ }
+#endif
+
+ /*
+ * Bring this CPU up now! (ignore bootstrap cpuid == 0)
+ */
+#ifdef CONFIG_SMP
+ if (cpuid) {
+ cpu_set(cpuid, cpu_present_map);
+ cpu_up(cpuid);
+ }
+#endif
+
+ return 0;
+}
+
+/**
+ * collect_boot_cpu_data - Fill the boot_cpu_data structure.
+ *
+ * This function collects and stores the generic processor information
+ * in the boot_cpu_data structure.
+ */
+void __init collect_boot_cpu_data(void)
+{
+ memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
+
+ boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
+
+ /* get CPU-Model Information... */
+#define p ((unsigned long *)&boot_cpu_data.pdc.model)
+ if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK)
+ printk(KERN_INFO
+ "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
+#undef p
+
+ if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK)
+ printk(KERN_INFO "vers %08lx\n",
+ boot_cpu_data.pdc.versions);
+
+ if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK)
+ printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
+ (boot_cpu_data.pdc.cpuid >> 5) & 127,
+ boot_cpu_data.pdc.cpuid & 31,
+ boot_cpu_data.pdc.cpuid);
+
+ if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
+ printk(KERN_INFO "capabilities 0x%lx\n",
+ boot_cpu_data.pdc.capabilities);
+
+ if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK)
+ printk(KERN_INFO "model %s\n",
+ boot_cpu_data.pdc.sys_model_name);
+
+ boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion;
+ boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion;
+
+ boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
+ boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
+ boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
+}
+
+
+/**
+ * init_cpu_profiler - enable/setup per cpu profiling hooks.
+ * @cpunum: The processor instance.
+ *
+ * FIXME: doesn't do much yet...
+ */
+static inline void __init
+init_percpu_prof(int cpunum)
+{
+ cpu_data[cpunum].prof_counter = 1;
+ cpu_data[cpunum].prof_multiplier = 1;
+}
+
+
+/**
+ * init_per_cpu - Handle individual processor initializations.
+ * @cpunum: logical processor number.
+ *
+ * This function handles initialization for *every* CPU
+ * in the system:
+ *
+ * o Set "default" CPU width for trap handlers
+ *
+ * o Enable FP coprocessor
+ * REVISIT: this could be done in the "code 22" trap handler.
+ * (frowands idea - that way we know which processes need FP
+ * registers saved on the interrupt stack.)
+ * NEWS FLASH: wide kernels need FP coprocessor enabled to handle
+ * formatted printing of %lx for example (double divides I think)
+ *
+ * o Enable CPU profiling hooks.
+ */
+int __init init_per_cpu(int cpunum)
+{
+ int ret;
+ struct pdc_coproc_cfg coproc_cfg;
+
+ set_firmware_width();
+ ret = pdc_coproc_cfg(&coproc_cfg);
+
+ if(ret >= 0 && coproc_cfg.ccr_functional) {
+ mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */
+
+ /* FWIW, FP rev/model is a more accurate way to determine
+ ** CPU type. CPU rev/model has some ambiguous cases.
+ */
+ cpu_data[cpunum].fp_rev = coproc_cfg.revision;
+ cpu_data[cpunum].fp_model = coproc_cfg.model;
+
+ printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n",
+ cpunum, coproc_cfg.revision, coproc_cfg.model);
+
+ /*
+ ** store status register to stack (hopefully aligned)
+ ** and clear the T-bit.
+ */
+ asm volatile ("fstd %fr0,8(%sp)");
+
+ } else {
+ printk(KERN_WARNING "WARNING: No FP CoProcessor?!"
+ " (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
+#ifdef __LP64__
+ "Halting Machine - FP required\n"
+#endif
+ , coproc_cfg.ccr_functional);
+#ifdef __LP64__
+ mdelay(100); /* previous chars get pushed to console */
+ panic("FP CoProc not reported");
+#endif
+ }
+
+ /* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
+ init_percpu_prof(cpunum);
+
+ return ret;
+}
+
+/*
+ * Display cpu info for all cpu's.
+ */
+int
+show_cpuinfo (struct seq_file *m, void *v)
+{
+ int n;
+
+ for(n=0; n<boot_cpu_data.cpu_count; n++) {
+#ifdef CONFIG_SMP
+ if (0 == cpu_data[n].hpa)
+ continue;
+#ifdef ENTRY_SYS_CPUS
+#error iCOD support wants to show CPU state here
+#endif
+#endif
+ seq_printf(m, "processor\t: %d\n"
+ "cpu family\t: PA-RISC %s\n",
+ n, boot_cpu_data.family_name);
+
+ seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name );
+
+ /* cpu MHz */
+ seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
+ boot_cpu_data.cpu_hz / 1000000,
+ boot_cpu_data.cpu_hz % 1000000 );
+
+ seq_printf(m, "model\t\t: %s\n"
+ "model name\t: %s\n",
+ boot_cpu_data.pdc.sys_model_name,
+ cpu_data[n].dev ?
+ cpu_data[n].dev->name : "Unknown" );
+
+ seq_printf(m, "hversion\t: 0x%08x\n"
+ "sversion\t: 0x%08x\n",
+ boot_cpu_data.hversion,
+ boot_cpu_data.sversion );
+
+ /* print cachesize info */
+ show_cache_info(m);
+
+ seq_printf(m, "bogomips\t: %lu.%02lu\n",
+ cpu_data[n].loops_per_jiffy / (500000 / HZ),
+ (cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100);
+
+ seq_printf(m, "software id\t: %ld\n\n",
+ boot_cpu_data.pdc.model.sw_id);
+ }
+ return 0;
+}
+
+static struct parisc_device_id processor_tbl[] = {
+ { HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
+ { 0, }
+};
+
+static struct parisc_driver cpu_driver = {
+ .name = "CPU",
+ .id_table = processor_tbl,
+ .probe = processor_probe
+};
+
+/**
+ * processor_init - Processor initalization procedure.
+ *
+ * Register this driver.
+ */
+void __init processor_init(void)
+{
+ register_parisc_driver(&cpu_driver);
+}
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
new file mode 100644
index 000000000000..2937a9236384
--- /dev/null
+++ b/arch/parisc/kernel/ptrace.c
@@ -0,0 +1,423 @@
+/*
+ * Kernel support for the ptrace() and syscall tracing interfaces.
+ *
+ * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
+ * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
+ * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/personality.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/offsets.h>
+
+/* PSW bits we allow the debugger to modify */
+#define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB)
+
+#undef DEBUG_PTRACE
+
+#ifdef DEBUG_PTRACE
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+#ifdef __LP64__
+
+/* This function is needed to translate 32 bit pt_regs offsets in to
+ * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel
+ * will request offset 12 if it wants gr3, but the lower 32 bits of
+ * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4).
+ * This code relies on a 32 bit pt_regs being comprised of 32 bit values
+ * except for the fp registers which (a) are 64 bits, and (b) follow
+ * the gr registers at the start of pt_regs. The 32 bit pt_regs should
+ * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[]
+ * being 64 bit in both cases.
+ */
+
+static long translate_usr_offset(long offset)
+{
+ if (offset < 0)
+ return -1;
+ else if (offset <= 32*4) /* gr[0..31] */
+ return offset * 2 + 4;
+ else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
+ return offset + 32*4;
+ else if (offset < sizeof(struct pt_regs)/2 + 32*4)
+ return offset * 2 + 4 - 32*8;
+ else
+ return -1;
+}
+#endif
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* make sure the trap bits are not set */
+ pa_psw(child)->r = 0;
+ pa_psw(child)->t = 0;
+ pa_psw(child)->h = 0;
+ pa_psw(child)->l = 0;
+}
+
+long sys_ptrace(long request, pid_t pid, long addr, long data)
+{
+ struct task_struct *child;
+ long ret;
+#ifdef DEBUG_PTRACE
+ long oaddr=addr, odata=data;
+#endif
+
+ lock_kernel();
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->ptrace & PT_PTRACED)
+ goto out;
+
+ ret = security_ptrace(current->parent, current);
+ if (ret)
+ goto out;
+
+ /* set the ptrace bit in the process flags. */
+ current->ptrace |= PT_PTRACED;
+ ret = 0;
+ goto out;
+ }
+
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out;
+ ret = -EPERM;
+ if (pid == 1) /* no messing around with init! */
+ goto out_tsk;
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0)
+ goto out_tsk;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA: {
+ int copied;
+
+#ifdef __LP64__
+ if (is_compat_task(child)) {
+ unsigned int tmp;
+
+ addr &= 0xffffffffL;
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp))
+ goto out_tsk;
+ ret = put_user(tmp,(unsigned int *) data);
+ DBG("sys_ptrace(PEEK%s, %d, %lx, %lx) returning %ld, data %x\n",
+ request == PTRACE_PEEKTEXT ? "TEXT" : "DATA",
+ pid, oaddr, odata, ret, tmp);
+ }
+ else
+#endif
+ {
+ unsigned long tmp;
+
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp))
+ goto out_tsk;
+ ret = put_user(tmp,(unsigned long *) data);
+ }
+ goto out_tsk;
+ }
+
+ /* when I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = 0;
+#ifdef __LP64__
+ if (is_compat_task(child)) {
+ unsigned int tmp = (unsigned int)data;
+ DBG("sys_ptrace(POKE%s, %d, %lx, %lx)\n",
+ request == PTRACE_POKETEXT ? "TEXT" : "DATA",
+ pid, oaddr, odata);
+ addr &= 0xffffffffL;
+ if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1) == sizeof(tmp))
+ goto out_tsk;
+ }
+ else
+#endif
+ {
+ if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
+ goto out_tsk;
+ }
+ ret = -EIO;
+ goto out_tsk;
+
+ /* Read the word at location addr in the USER area. For ptraced
+ processes, the kernel saves all regs on a syscall. */
+ case PTRACE_PEEKUSR: {
+ ret = -EIO;
+#ifdef __LP64__
+ if (is_compat_task(child)) {
+ unsigned int tmp;
+
+ if (addr & (sizeof(int)-1))
+ goto out_tsk;
+ if ((addr = translate_usr_offset(addr)) < 0)
+ goto out_tsk;
+
+ tmp = *(unsigned int *) ((char *) task_regs(child) + addr);
+ ret = put_user(tmp, (unsigned int *) data);
+ DBG("sys_ptrace(PEEKUSR, %d, %lx, %lx) returning %ld, addr %lx, data %x\n",
+ pid, oaddr, odata, ret, addr, tmp);
+ }
+ else
+#endif
+ {
+ unsigned long tmp;
+
+ if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs))
+ goto out_tsk;
+ tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
+ ret = put_user(tmp, (unsigned long *) data);
+ }
+ goto out_tsk;
+ }
+
+ /* Write the word at location addr in the USER area. This will need
+ to change when the kernel no longer saves all regs on a syscall.
+ FIXME. There is a problem at the moment in that r3-r18 are only
+ saved if the process is ptraced on syscall entry, and even then
+ those values are overwritten by actual register values on syscall
+ exit. */
+ case PTRACE_POKEUSR:
+ ret = -EIO;
+ /* Some register values written here may be ignored in
+ * entry.S:syscall_restore_rfi; e.g. iaoq is written with
+ * r31/r31+4, and not with the values in pt_regs.
+ */
+ /* PT_PSW=0, so this is valid for 32 bit processes under 64
+ * bit kernels.
+ */
+ if (addr == PT_PSW) {
+ /* PT_PSW=0, so this is valid for 32 bit processes
+ * under 64 bit kernels.
+ *
+ * Allow writing to Nullify, Divide-step-correction,
+ * and carry/borrow bits.
+ * BEWARE, if you set N, and then single step, it won't
+ * stop on the nullified instruction.
+ */
+ DBG("sys_ptrace(POKEUSR, %d, %lx, %lx)\n",
+ pid, oaddr, odata);
+ data &= USER_PSW_BITS;
+ task_regs(child)->gr[0] &= ~USER_PSW_BITS;
+ task_regs(child)->gr[0] |= data;
+ ret = 0;
+ goto out_tsk;
+ }
+#ifdef __LP64__
+ if (is_compat_task(child)) {
+ if (addr & (sizeof(int)-1))
+ goto out_tsk;
+ if ((addr = translate_usr_offset(addr)) < 0)
+ goto out_tsk;
+ DBG("sys_ptrace(POKEUSR, %d, %lx, %lx) addr %lx\n",
+ pid, oaddr, odata, addr);
+ if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
+ /* Special case, fp regs are 64 bits anyway */
+ *(unsigned int *) ((char *) task_regs(child) + addr) = data;
+ ret = 0;
+ }
+ else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
+ addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
+ addr == PT_SAR+4) {
+ /* Zero the top 32 bits */
+ *(unsigned int *) ((char *) task_regs(child) + addr - 4) = 0;
+ *(unsigned int *) ((char *) task_regs(child) + addr) = data;
+ ret = 0;
+ }
+ goto out_tsk;
+ }
+ else
+#endif
+ {
+ if ((addr & (sizeof(long)-1)) || (unsigned long) addr >= sizeof(struct pt_regs))
+ goto out_tsk;
+ if ((addr >= PT_GR1 && addr <= PT_GR31) ||
+ addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
+ (addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
+ addr == PT_SAR) {
+ *(unsigned long *) ((char *) task_regs(child) + addr) = data;
+ ret = 0;
+ }
+ goto out_tsk;
+ }
+
+ case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT:
+ ret = -EIO;
+ DBG("sys_ptrace(%s)\n",
+ request == PTRACE_SYSCALL ? "SYSCALL" : "CONT");
+ if ((unsigned long) data > _NSIG)
+ goto out_tsk;
+ child->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP);
+ if (request == PTRACE_SYSCALL) {
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ } else {
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ }
+ child->exit_code = data;
+ goto out_wake_notrap;
+
+ case PTRACE_KILL:
+ /*
+ * make the child exit. Best I can do is send it a
+ * sigkill. perhaps it should be put in the status
+ * that it wants to exit.
+ */
+ DBG("sys_ptrace(KILL)\n");
+ if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+ goto out_tsk;
+ child->exit_code = SIGKILL;
+ goto out_wake_notrap;
+
+ case PTRACE_SINGLEBLOCK:
+ DBG("sys_ptrace(SINGLEBLOCK)\n");
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out_tsk;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->ptrace &= ~PT_SINGLESTEP;
+ child->ptrace |= PT_BLOCKSTEP;
+ child->exit_code = data;
+
+ /* Enable taken branch trap. */
+ pa_psw(child)->r = 0;
+ pa_psw(child)->t = 1;
+ pa_psw(child)->h = 0;
+ pa_psw(child)->l = 0;
+ goto out_wake;
+
+ case PTRACE_SINGLESTEP:
+ DBG("sys_ptrace(SINGLESTEP)\n");
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out_tsk;
+
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->ptrace &= ~PT_BLOCKSTEP;
+ child->ptrace |= PT_SINGLESTEP;
+ child->exit_code = data;
+
+ if (pa_psw(child)->n) {
+ struct siginfo si;
+
+ /* Nullified, just crank over the queue. */
+ task_regs(child)->iaoq[0] = task_regs(child)->iaoq[1];
+ task_regs(child)->iasq[0] = task_regs(child)->iasq[1];
+ task_regs(child)->iaoq[1] = task_regs(child)->iaoq[0] + 4;
+ pa_psw(child)->n = 0;
+ pa_psw(child)->x = 0;
+ pa_psw(child)->y = 0;
+ pa_psw(child)->z = 0;
+ pa_psw(child)->b = 0;
+ ptrace_disable(child);
+ /* Don't wake up the child, but let the
+ parent know something happened. */
+ si.si_code = TRAP_TRACE;
+ si.si_addr = (void __user *) (task_regs(child)->iaoq[0] & ~3);
+ si.si_signo = SIGTRAP;
+ si.si_errno = 0;
+ force_sig_info(SIGTRAP, &si, child);
+ //notify_parent(child, SIGCHLD);
+ //ret = 0;
+ goto out_wake;
+ }
+
+ /* Enable recovery counter traps. The recovery counter
+ * itself will be set to zero on a task switch. If the
+ * task is suspended on a syscall then the syscall return
+ * path will overwrite the recovery counter with a suitable
+ * value such that it traps once back in user space. We
+ * disable interrupts in the childs PSW here also, to avoid
+ * interrupts while the recovery counter is decrementing.
+ */
+ pa_psw(child)->r = 1;
+ pa_psw(child)->t = 0;
+ pa_psw(child)->h = 0;
+ pa_psw(child)->l = 0;
+ /* give it a chance to run. */
+ goto out_wake;
+
+ case PTRACE_DETACH:
+ ret = ptrace_detach(child, data);
+ goto out_tsk;
+
+ case PTRACE_GETEVENTMSG:
+ ret = put_user(child->ptrace_message, (unsigned int __user *) data);
+ goto out_tsk;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ goto out_tsk;
+ }
+
+out_wake_notrap:
+ ptrace_disable(child);
+out_wake:
+ wake_up_process(child);
+ ret = 0;
+out_tsk:
+ put_task_struct(child);
+out:
+ unlock_kernel();
+ DBG("sys_ptrace(%ld, %d, %lx, %lx) returning %ld\n",
+ request, pid, oaddr, odata, ret);
+ return ret;
+}
+
+void syscall_trace(void)
+{
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S
new file mode 100644
index 000000000000..8dd5defb7316
--- /dev/null
+++ b/arch/parisc/kernel/real2.S
@@ -0,0 +1,304 @@
+/*
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
+ *
+ */
+#include <asm/assembly.h>
+#include <asm/psw.h>
+
+ .section .bss
+ .export real_stack
+ .export real32_stack
+ .export real64_stack
+ .align 64
+real_stack:
+real32_stack:
+real64_stack:
+ .block 8192
+
+#ifdef __LP64__
+# define REG_SZ 8
+#else
+# define REG_SZ 4
+#endif
+
+#define N_SAVED_REGS 9
+
+save_cr_space:
+ .block REG_SZ * N_SAVED_REGS
+save_cr_end:
+
+
+/************************ 32-bit real-mode calls ***********************/
+/* This can be called in both narrow and wide kernels */
+
+ .text
+
+ .export real32_call_asm
+
+ /* unsigned long real32_call_asm(unsigned int *sp,
+ * unsigned int *arg0p,
+ * unsigned int iodc_fn)
+ * sp is value of stack pointer to adopt before calling PDC (virt)
+ * arg0p points to where saved arg values may be found
+ * iodc_fn is the IODC function to call
+ */
+
+real32_call_asm:
+ STREG %rp, -RP_OFFSET(%sp) /* save RP */
+#ifdef __LP64__
+ callee_save
+ ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */
+ STREG %r27, -1*REG_SZ(%sp)
+ STREG %r29, -2*REG_SZ(%sp)
+#endif
+ STREG %sp, -REG_SZ(%arg0) /* save SP on real-mode stack */
+ copy %arg0, %sp /* adopt the real-mode SP */
+
+ /* save iodc_fn */
+ copy %arg2, %r31
+
+ /* load up the arg registers from the saved arg area */
+ /* 32-bit calling convention passes first 4 args in registers */
+ ldw 0(%arg1), %arg0 /* note overwriting arg0 */
+ ldw -8(%arg1), %arg2
+ ldw -12(%arg1), %arg3
+ ldw -4(%arg1), %arg1 /* obviously must do this one last! */
+
+ tophys_r1 %sp
+
+ b,l rfi_virt2real,%r2
+ nop
+
+ b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
+ nop
+
+#ifdef __LP64__
+ rsm PSW_SM_W, %r0 /* go narrow */
+#endif
+
+ load32 PA(ric_ret), %r2
+ bv 0(%r31)
+ nop
+ric_ret:
+#ifdef __LP64__
+ ssm PSW_SM_W, %r0 /* go wide */
+#endif
+ /* restore CRs before going virtual in case we page fault */
+ b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
+ nop
+
+ b,l rfi_real2virt,%r2
+ nop
+
+ tovirt_r1 %sp
+ LDREG -REG_SZ(%sp), %sp /* restore SP */
+#ifdef __LP64__
+ LDREG -1*REG_SZ(%sp), %r27
+ LDREG -2*REG_SZ(%sp), %r29
+ ldo -2*REG_SZ(%sp), %sp
+ callee_rest
+#endif
+ LDREG -RP_OFFSET(%sp), %rp /* restore RP */
+ bv 0(%rp)
+ nop
+
+
+# define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where)
+# define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r
+
+ .text
+save_control_regs:
+ load32 PA(save_cr_space), %r28
+ PUSH_CR(%cr24, %r28)
+ PUSH_CR(%cr25, %r28)
+ PUSH_CR(%cr26, %r28)
+ PUSH_CR(%cr27, %r28)
+ PUSH_CR(%cr28, %r28)
+ PUSH_CR(%cr29, %r28)
+ PUSH_CR(%cr30, %r28)
+ PUSH_CR(%cr31, %r28)
+ PUSH_CR(%cr15, %r28)
+ bv 0(%r2)
+ nop
+
+restore_control_regs:
+ load32 PA(save_cr_end), %r26
+ POP_CR(%cr15, %r26)
+ POP_CR(%cr31, %r26)
+ POP_CR(%cr30, %r26)
+ POP_CR(%cr29, %r26)
+ POP_CR(%cr28, %r26)
+ POP_CR(%cr27, %r26)
+ POP_CR(%cr26, %r26)
+ POP_CR(%cr25, %r26)
+ POP_CR(%cr24, %r26)
+ bv 0(%r2)
+ nop
+
+/* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for
+ * more general-purpose use by the several places which need RFIs
+ */
+ .align 128
+ .text
+rfi_virt2real:
+ /* switch to real mode... */
+ ssm 0,0 /* See "relied upon translation" */
+ nop /* PA 2.0 Arch. F-5 */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ rsm (PSW_SM_Q|PSW_SM_I),%r0 /* disable Q & I bits to load iia queue */
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ load32 PA(rfi_v2r_1), %r1
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ load32 REAL_MODE_PSW, %r1
+ mtctl %r1, %cr22
+ rfi
+
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+rfi_v2r_1:
+ tophys_r1 %r2
+ bv 0(%r2)
+ nop
+
+ .text
+ .align 128
+rfi_real2virt:
+ ssm 0,0 /* See "relied upon translation" */
+ nop /* PA 2.0 Arch. F-5 */
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ rsm PSW_SM_Q,%r0 /* disable Q bit to load iia queue */
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ load32 (rfi_r2v_1), %r1
+ mtctl %r1, %cr18 /* IIAOQ head */
+ ldo 4(%r1), %r1
+ mtctl %r1, %cr18 /* IIAOQ tail */
+ load32 KERNEL_PSW, %r1
+ mtctl %r1, %cr22
+ rfi
+
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+rfi_r2v_1:
+ tovirt_r1 %r2
+ bv 0(%r2)
+ nop
+
+#ifdef __LP64__
+
+/************************ 64-bit real-mode calls ***********************/
+/* This is only usable in wide kernels right now and will probably stay so */
+ .text
+ .export real64_call_asm
+ /* unsigned long real64_call_asm(unsigned long *sp,
+ * unsigned long *arg0p,
+ * unsigned long fn)
+ * sp is value of stack pointer to adopt before calling PDC (virt)
+ * arg0p points to where saved arg values may be found
+ * iodc_fn is the IODC function to call
+ */
+real64_call_asm:
+ std %rp, -0x10(%sp) /* save RP */
+ std %sp, -8(%arg0) /* save SP on real-mode stack */
+ copy %arg0, %sp /* adopt the real-mode SP */
+
+ /* save fn */
+ copy %arg2, %r31
+
+ /* set up the new ap */
+ ldo 64(%arg1), %r29
+
+ /* load up the arg registers from the saved arg area */
+ /* 32-bit calling convention passes first 4 args in registers */
+ ldd 0*REG_SZ(%arg1), %arg0 /* note overwriting arg0 */
+ ldd 2*REG_SZ(%arg1), %arg2
+ ldd 3*REG_SZ(%arg1), %arg3
+ ldd 4*REG_SZ(%arg1), %r22
+ ldd 5*REG_SZ(%arg1), %r21
+ ldd 6*REG_SZ(%arg1), %r20
+ ldd 7*REG_SZ(%arg1), %r19
+ ldd 1*REG_SZ(%arg1), %arg1 /* do this one last! */
+
+ tophys_r1 %sp
+
+ b,l rfi_virt2real,%r2
+ nop
+
+ b,l save_control_regs,%r2 /* modifies r1, r2, r28 */
+ nop
+
+ load32 PA(r64_ret), %r2
+ bv 0(%r31)
+ nop
+r64_ret:
+ /* restore CRs before going virtual in case we page fault */
+ b,l restore_control_regs, %r2 /* modifies r1, r2, r26 */
+ nop
+
+ b,l rfi_real2virt,%r2
+ nop
+
+ tovirt_r1 %sp
+ ldd -8(%sp), %sp /* restore SP */
+ ldd -0x10(%sp), %rp /* restore RP */
+ bv 0(%rp)
+ nop
+
+#endif
+
+ .export pc_in_user_space
+ .text
+ /* Doesn't belong here but I couldn't find a nicer spot. */
+ /* Should never get called, only used by profile stuff in time.c */
+pc_in_user_space:
+ bv,n 0(%rp)
+ nop
+
+
+ .export __canonicalize_funcptr_for_compare
+ .text
+ /* http://lists.parisc-linux.org/hypermail/parisc-linux/10916.html
+ ** GCC 3.3 and later has a new function in libgcc.a for
+ ** comparing function pointers.
+ */
+__canonicalize_funcptr_for_compare:
+#ifdef __LP64__
+ bve (%r2)
+#else
+ bv %r0(%r2)
+#endif
+ copy %r26,%r28
diff --git a/arch/parisc/kernel/semaphore.c b/arch/parisc/kernel/semaphore.c
new file mode 100644
index 000000000000..ee806bcc3726
--- /dev/null
+++ b/arch/parisc/kernel/semaphore.c
@@ -0,0 +1,102 @@
+/*
+ * Semaphore implementation Copyright (c) 2001 Matthew Wilcox, Hewlett-Packard
+ */
+
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+/*
+ * Semaphores are complex as we wish to avoid using two variables.
+ * `count' has multiple roles, depending on its value. If it is positive
+ * or zero, there are no waiters. The functions here will never be
+ * called; see <asm/semaphore.h>
+ *
+ * When count is -1 it indicates there is at least one task waiting
+ * for the semaphore.
+ *
+ * When count is less than that, there are '- count - 1' wakeups
+ * pending. ie if it has value -3, there are 2 wakeups pending.
+ *
+ * Note that these functions are only called when there is contention
+ * on the lock, and as such all this is the "non-critical" part of the
+ * whole semaphore business. The critical part is the inline stuff in
+ * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
+ */
+void __up(struct semaphore *sem)
+{
+ sem->count--;
+ wake_up(&sem->wait);
+}
+
+#define wakers(count) (-1 - count)
+
+#define DOWN_HEAD \
+ int ret = 0; \
+ DECLARE_WAITQUEUE(wait, current); \
+ \
+ /* Note that someone is waiting */ \
+ if (sem->count == 0) \
+ sem->count = -1; \
+ \
+ /* protected by the sentry still -- use unlocked version */ \
+ wait.flags = WQ_FLAG_EXCLUSIVE; \
+ __add_wait_queue_tail(&sem->wait, &wait); \
+ lost_race: \
+ spin_unlock_irq(&sem->sentry); \
+
+#define DOWN_TAIL \
+ spin_lock_irq(&sem->sentry); \
+ if (wakers(sem->count) == 0 && ret == 0) \
+ goto lost_race; /* Someone stole our wakeup */ \
+ __remove_wait_queue(&sem->wait, &wait); \
+ current->state = TASK_RUNNING; \
+ if (!waitqueue_active(&sem->wait) && (sem->count < 0)) \
+ sem->count = wakers(sem->count);
+
+#define UPDATE_COUNT \
+ sem->count += (sem->count < 0) ? 1 : - 1;
+
+
+void __sched __down(struct semaphore * sem)
+{
+ DOWN_HEAD
+
+ for(;;) {
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ /* we can _read_ this without the sentry */
+ if (sem->count != -1)
+ break;
+ schedule();
+ }
+
+ DOWN_TAIL
+ UPDATE_COUNT
+}
+
+int __sched __down_interruptible(struct semaphore * sem)
+{
+ DOWN_HEAD
+
+ for(;;) {
+ set_task_state(current, TASK_INTERRUPTIBLE);
+ /* we can _read_ this without the sentry */
+ if (sem->count != -1)
+ break;
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ schedule();
+ }
+
+ DOWN_TAIL
+
+ if (!ret) {
+ UPDATE_COUNT
+ }
+
+ return ret;
+}
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
new file mode 100644
index 000000000000..73e9c34b0948
--- /dev/null
+++ b/arch/parisc/kernel/setup.c
@@ -0,0 +1,368 @@
+/* $Id: setup.c,v 1.8 2000/02/02 04:42:38 prumpf Exp $
+ *
+ * Initial setup-routines for HP 9000 based hardware.
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
+ * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
+ * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
+ * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
+ * Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
+ *
+ * Initial PA-RISC Version: 04-23-1999 by Helge Deller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/initrd.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/seq_file.h>
+#define PCI_DEBUG
+#include <linux/pci.h>
+#undef PCI_DEBUG
+#include <linux/proc_fs.h>
+
+#include <asm/processor.h>
+#include <asm/pdc.h>
+#include <asm/led.h>
+#include <asm/machdep.h> /* for pa7300lc_init() proto */
+#include <asm/pdc_chassis.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+
+char command_line[COMMAND_LINE_SIZE];
+
+/* Intended for ccio/sba/cpu statistics under /proc/bus/{runway|gsc} */
+struct proc_dir_entry * proc_runway_root = NULL;
+struct proc_dir_entry * proc_gsc_root = NULL;
+struct proc_dir_entry * proc_mckinley_root = NULL;
+
+#if !defined(CONFIG_PA20) && (defined(CONFIG_IOMMU_CCIO) || defined(CONFIG_IOMMU_SBA))
+int parisc_bus_is_phys = 1; /* Assume no IOMMU is present */
+EXPORT_SYMBOL(parisc_bus_is_phys);
+#endif
+
+/* This sets the vmerge boundary and size, it's here because it has to
+ * be available on all platforms (zero means no-virtual merging) */
+unsigned long parisc_vmerge_boundary = 0;
+unsigned long parisc_vmerge_max_size = 0;
+
+void __init setup_cmdline(char **cmdline_p)
+{
+ extern unsigned int boot_args[];
+
+ /* Collect stuff passed in from the boot loader */
+
+ /* boot_args[0] is free-mem start, boot_args[1] is ptr to command line */
+ if (boot_args[0] < 64) {
+ /* called from hpux boot loader */
+ saved_command_line[0] = '\0';
+ } else {
+ strcpy(saved_command_line, (char *)__va(boot_args[1]));
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
+ {
+ initrd_start = (unsigned long)__va(boot_args[2]);
+ initrd_end = (unsigned long)__va(boot_args[3]);
+ }
+#endif
+ }
+
+ strcpy(command_line, saved_command_line);
+ *cmdline_p = command_line;
+}
+
+#ifdef CONFIG_PA11
+void __init dma_ops_init(void)
+{
+ switch (boot_cpu_data.cpu_type) {
+ case pcx:
+ /*
+ * We've got way too many dependencies on 1.1 semantics
+ * to support 1.0 boxes at this point.
+ */
+ panic( "PA-RISC Linux currently only supports machines that conform to\n"
+ "the PA-RISC 1.1 or 2.0 architecture specification.\n");
+
+ case pcxs:
+ case pcxt:
+ hppa_dma_ops = &pcx_dma_ops;
+ break;
+ case pcxl2:
+ pa7300lc_init();
+ case pcxl: /* falls through */
+ hppa_dma_ops = &pcxl_dma_ops;
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+extern int init_per_cpu(int cpuid);
+extern void collect_boot_cpu_data(void);
+
+void __init setup_arch(char **cmdline_p)
+{
+#ifdef __LP64__
+ extern int parisc_narrow_firmware;
+#endif
+
+ init_per_cpu(smp_processor_id()); /* Set Modes & Enable FP */
+
+#ifdef __LP64__
+ printk(KERN_INFO "The 64-bit Kernel has started...\n");
+#else
+ printk(KERN_INFO "The 32-bit Kernel has started...\n");
+#endif
+
+ pdc_console_init();
+
+#ifdef __LP64__
+ if(parisc_narrow_firmware) {
+ printk(KERN_INFO "Kernel is using PDC in 32-bit mode.\n");
+ }
+#endif
+ setup_pdc();
+ setup_cmdline(cmdline_p);
+ collect_boot_cpu_data();
+ do_memory_inventory(); /* probe for physical memory */
+ parisc_cache_init();
+ paging_init();
+
+#ifdef CONFIG_CHASSIS_LCD_LED
+ /* initialize the LCD/LED after boot_cpu_data is available ! */
+ led_init(); /* LCD/LED initialization */
+#endif
+
+#ifdef CONFIG_PA11
+ dma_ops_init();
+#endif
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con; /* we use take_over_console() later ! */
+#endif
+
+}
+
+/*
+ * Display cpu info for all cpu's.
+ * for parisc this is in processor.c
+ */
+extern int show_cpuinfo (struct seq_file *m, void *v);
+
+static void *
+c_start (struct seq_file *m, loff_t *pos)
+{
+ /* Looks like the caller will call repeatedly until we return
+ * 0, signaling EOF perhaps. This could be used to sequence
+ * through CPUs for example. Since we print all cpu info in our
+ * show_cpuinfo() disregarding 'pos' (which I assume is 'v' above)
+ * we only allow for one "position". */
+ return ((long)*pos < 1) ? (void *)1 : NULL;
+}
+
+static void *
+c_next (struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void
+c_stop (struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo
+};
+
+static void __init parisc_proc_mkdir(void)
+{
+ /*
+ ** Can't call proc_mkdir() until after proc_root_init() has been
+ ** called by start_kernel(). In other words, this code can't
+ ** live in arch/.../setup.c because start_parisc() calls
+ ** start_kernel().
+ */
+ switch (boot_cpu_data.cpu_type) {
+ case pcxl:
+ case pcxl2:
+ if (NULL == proc_gsc_root)
+ {
+ proc_gsc_root = proc_mkdir("bus/gsc", NULL);
+ }
+ break;
+ case pcxt_:
+ case pcxu:
+ case pcxu_:
+ case pcxw:
+ case pcxw_:
+ case pcxw2:
+ if (NULL == proc_runway_root)
+ {
+ proc_runway_root = proc_mkdir("bus/runway", NULL);
+ }
+ break;
+ case mako:
+ if (NULL == proc_mckinley_root)
+ {
+ proc_mckinley_root = proc_mkdir("bus/mckinley", NULL);
+ }
+ break;
+ default:
+ /* FIXME: this was added to prevent the compiler
+ * complaining about missing pcx, pcxs and pcxt
+ * I'm assuming they have neither gsc nor runway */
+ break;
+ }
+}
+
+static struct resource central_bus = {
+ .name = "Central Bus",
+ .start = F_EXTEND(0xfff80000),
+ .end = F_EXTEND(0xfffaffff),
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource local_broadcast = {
+ .name = "Local Broadcast",
+ .start = F_EXTEND(0xfffb0000),
+ .end = F_EXTEND(0xfffdffff),
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource global_broadcast = {
+ .name = "Global Broadcast",
+ .start = F_EXTEND(0xfffe0000),
+ .end = F_EXTEND(0xffffffff),
+ .flags = IORESOURCE_MEM,
+};
+
+static int __init parisc_init_resources(void)
+{
+ int result;
+
+ result = request_resource(&iomem_resource, &central_bus);
+ if (result < 0) {
+ printk(KERN_ERR
+ "%s: failed to claim %s address space!\n",
+ __FILE__, central_bus.name);
+ return result;
+ }
+
+ result = request_resource(&iomem_resource, &local_broadcast);
+ if (result < 0) {
+ printk(KERN_ERR
+ "%s: failed to claim %saddress space!\n",
+ __FILE__, local_broadcast.name);
+ return result;
+ }
+
+ result = request_resource(&iomem_resource, &global_broadcast);
+ if (result < 0) {
+ printk(KERN_ERR
+ "%s: failed to claim %s address space!\n",
+ __FILE__, global_broadcast.name);
+ return result;
+ }
+
+ return 0;
+}
+
+extern void gsc_init(void);
+extern void processor_init(void);
+extern void ccio_init(void);
+extern void hppb_init(void);
+extern void dino_init(void);
+extern void iosapic_init(void);
+extern void lba_init(void);
+extern void sba_init(void);
+extern void eisa_init(void);
+
+static int __init parisc_init(void)
+{
+ parisc_proc_mkdir();
+ parisc_init_resources();
+ do_device_inventory(); /* probe for hardware */
+
+ parisc_pdc_chassis_init();
+
+ /* set up a new led state on systems shipped LED State panel */
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BSTART);
+
+ processor_init();
+ printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
+ boot_cpu_data.cpu_count,
+ boot_cpu_data.cpu_name,
+ boot_cpu_data.cpu_hz / 1000000,
+ boot_cpu_data.cpu_hz % 1000000 );
+
+ parisc_setup_cache_timing();
+
+ /* These are in a non-obvious order, will fix when we have an iotree */
+#if defined(CONFIG_IOSAPIC)
+ iosapic_init();
+#endif
+#if defined(CONFIG_IOMMU_SBA)
+ sba_init();
+#endif
+#if defined(CONFIG_PCI_LBA)
+ lba_init();
+#endif
+
+ /* CCIO before any potential subdevices */
+#if defined(CONFIG_IOMMU_CCIO)
+ ccio_init();
+#endif
+
+ /*
+ * Need to register Asp & Wax before the EISA adapters for the IRQ
+ * regions. EISA must come before PCI to be sure it gets IRQ region
+ * 0.
+ */
+#if defined(CONFIG_GSC_LASI) || defined(CONFIG_GSC_WAX)
+ gsc_init();
+#endif
+#ifdef CONFIG_EISA
+ eisa_init();
+#endif
+
+#if defined(CONFIG_HPPB)
+ hppb_init();
+#endif
+
+#if defined(CONFIG_GSC_DINO)
+ dino_init();
+#endif
+
+#ifdef CONFIG_CHASSIS_LCD_LED
+ register_led_regions(); /* register LED port info in procfs */
+#endif
+
+ return 0;
+}
+
+arch_initcall(parisc_init);
+
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
new file mode 100644
index 000000000000..9421bb98ea63
--- /dev/null
+++ b/arch/parisc/kernel/signal.c
@@ -0,0 +1,664 @@
+/*
+ * linux/arch/parisc/kernel/signal.c: Architecture-specific signal
+ * handling support.
+ *
+ * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
+ * Copyright (C) 2000 Linuxcare, Inc.
+ *
+ * Based on the ia64, i386, and alpha versions.
+ *
+ * Like the IA-64, we are a recent enough port (we are *starting*
+ * with glibc2.2) that we do not need to support the old non-realtime
+ * Linux signals. Therefore we don't. HP/UX signals will go in
+ * arch/parisc/hpux/signal.c when we figure out how to do them.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
+#include <linux/personality.h>
+#include <asm/ucontext.h>
+#include <asm/rt_sigframe.h>
+#include <asm/uaccess.h>
+#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/offsets.h>
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#include "signal32.h"
+#endif
+
+#define DEBUG_SIG 0
+#define DEBUG_SIG_LEVEL 2
+
+#if DEBUG_SIG
+#define DBG(LEVEL, ...) \
+ ((DEBUG_SIG_LEVEL >= LEVEL) \
+ ? printk(__VA_ARGS__) : (void) 0)
+#else
+#define DBG(LEVEL, ...)
+#endif
+
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/* gcc will complain if a pointer is cast to an integer of different
+ * size. If you really need to do this (and we do for an ELF32 user
+ * application in an ELF64 kernel) then you have to do a cast to an
+ * integer of the same size first. The A() macro accomplishes
+ * this. */
+#define A(__x) ((unsigned long)(__x))
+
+int do_signal(sigset_t *oldset, struct pt_regs *regs, int in_syscall);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+#ifdef __LP64__
+#include "sys32.h"
+#endif
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, struct pt_regs *regs)
+{
+ sigset_t saveset, newset;
+#ifdef __LP64__
+ compat_sigset_t newset32;
+
+ if(personality(current->personality) == PER_LINUX32){
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(compat_sigset_t))
+ return -EINVAL;
+ if (copy_from_user(&newset32, (compat_sigset_t __user *)unewset, sizeof(newset32)))
+ return -EFAULT;
+ sigset_32to64(&newset,&newset32);
+
+ } else
+#endif
+ {
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ }
+
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sighand->siglock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ regs->gr[28] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs, 1))
+ return -EINTR;
+ }
+}
+
+/*
+ * Do a signal return - restore sigcontext.
+ */
+
+/* Trampoline for calling rt_sigreturn() */
+#define INSN_LDI_R25_0 0x34190000 /* ldi 0,%r25 (in_syscall=0) */
+#define INSN_LDI_R25_1 0x34190002 /* ldi 1,%r25 (in_syscall=1) */
+#define INSN_LDI_R20 0x3414015a /* ldi __NR_rt_sigreturn,%r20 */
+#define INSN_BLE_SR2_R0 0xe4008200 /* be,l 0x100(%sr2,%r0),%sr0,%r31 */
+#define INSN_NOP 0x08000240 /* nop */
+/* For debugging */
+#define INSN_DIE_HORRIBLY 0x68000ccc /* stw %r0,0x666(%sr0,%r0) */
+
+static long
+restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
+{
+ long err = 0;
+
+ err |= __copy_from_user(regs->gr, sc->sc_gr, sizeof(regs->gr));
+ err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
+ err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq));
+ err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq));
+ err |= __get_user(regs->sar, &sc->sc_sar);
+ DBG(2,"restore_sigcontext: iaoq is 0x%#lx / 0x%#lx\n",
+ regs->iaoq[0],regs->iaoq[1]);
+ DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]);
+ return err;
+}
+
+void
+sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
+{
+ struct rt_sigframe __user *frame;
+ struct siginfo si;
+ sigset_t set;
+ unsigned long usp = (regs->gr[30] & ~(0x01UL));
+ unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
+#ifdef __LP64__
+ compat_sigset_t compat_set;
+ struct compat_rt_sigframe __user * compat_frame;
+
+ if(personality(current->personality) == PER_LINUX32)
+ sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
+#endif
+
+
+ /* Unwind the user stack to get the rt_sigframe structure. */
+ frame = (struct rt_sigframe __user *)
+ (usp - sigframe_size);
+ DBG(2,"sys_rt_sigreturn: frame is %p\n", frame);
+
+#ifdef __LP64__
+ compat_frame = (struct compat_rt_sigframe __user *)frame;
+
+ if(personality(current->personality) == PER_LINUX32){
+ DBG(2,"sys_rt_sigreturn: ELF32 process.\n");
+ if (__copy_from_user(&compat_set, &compat_frame->uc.uc_sigmask, sizeof(compat_set)))
+ goto give_sigsegv;
+ sigset_32to64(&set,&compat_set);
+ } else
+#endif
+ {
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto give_sigsegv;
+ }
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ /* Good thing we saved the old gr[30], eh? */
+#ifdef __LP64__
+ if(personality(current->personality) == PER_LINUX32){
+ DBG(1,"sys_rt_sigreturn: compat_frame->uc.uc_mcontext 0x%p\n",
+ &compat_frame->uc.uc_mcontext);
+// FIXME: Load upper half from register file
+ if (restore_sigcontext32(&compat_frame->uc.uc_mcontext,
+ &compat_frame->regs, regs))
+ goto give_sigsegv;
+ DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
+ usp, &compat_frame->uc.uc_stack);
+ if (do_sigaltstack32(&compat_frame->uc.uc_stack, NULL, usp) == -EFAULT)
+ goto give_sigsegv;
+ } else
+#endif
+ {
+ DBG(1,"sys_rt_sigreturn: frame->uc.uc_mcontext 0x%p\n",
+ &frame->uc.uc_mcontext);
+ if (restore_sigcontext(&frame->uc.uc_mcontext, regs))
+ goto give_sigsegv;
+ DBG(1,"sys_rt_sigreturn: usp %#08lx stack 0x%p\n",
+ usp, &frame->uc.uc_stack);
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL, usp) == -EFAULT)
+ goto give_sigsegv;
+ }
+
+
+
+ /* If we are on the syscall path IAOQ will not be restored, and
+ * if we are on the interrupt path we must not corrupt gr31.
+ */
+ if (in_syscall)
+ regs->gr[31] = regs->iaoq[0];
+#if DEBUG_SIG
+ DBG(1,"sys_rt_sigreturn: returning to %#lx, DUMPING REGS:\n", regs->iaoq[0]);
+ show_regs(regs);
+#endif
+ return;
+
+give_sigsegv:
+ DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n");
+ si.si_signo = SIGSEGV;
+ si.si_errno = 0;
+ si.si_code = SI_KERNEL;
+ si.si_pid = current->pid;
+ si.si_uid = current->uid;
+ si.si_addr = &frame->uc;
+ force_sig_info(SIGSEGV, &si, current);
+ return;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static inline void __user *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+ /*FIXME: ELF32 vs. ELF64 has different frame_size, but since we
+ don't use the parameter it doesn't matter */
+
+ DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n",
+ (unsigned long)ka, sp, frame_size);
+
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ sp = current->sas_ss_sp; /* Stacks grow up! */
+
+ DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp);
+ return (void __user *) sp; /* Stacks grow up. Fun. */
+}
+
+static long
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int in_syscall)
+
+{
+ unsigned long flags = 0;
+ long err = 0;
+
+ if (on_sig_stack((unsigned long) sc))
+ flags |= PARISC_SC_FLAG_ONSTACK;
+ if (in_syscall) {
+ flags |= PARISC_SC_FLAG_IN_SYSCALL;
+ /* regs->iaoq is undefined in the syscall return path */
+ err |= __put_user(regs->gr[31], &sc->sc_iaoq[0]);
+ err |= __put_user(regs->gr[31]+4, &sc->sc_iaoq[1]);
+ err |= __put_user(regs->sr[3], &sc->sc_iasq[0]);
+ err |= __put_user(regs->sr[3], &sc->sc_iasq[1]);
+ DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (in syscall)\n",
+ regs->gr[31], regs->gr[31]+4);
+ } else {
+ err |= __copy_to_user(sc->sc_iaoq, regs->iaoq, sizeof(regs->iaoq));
+ err |= __copy_to_user(sc->sc_iasq, regs->iasq, sizeof(regs->iasq));
+ DBG(1,"setup_sigcontext: iaoq %#lx / %#lx (not in syscall)\n",
+ regs->iaoq[0], regs->iaoq[1]);
+ }
+
+ err |= __put_user(flags, &sc->sc_flags);
+ err |= __copy_to_user(sc->sc_gr, regs->gr, sizeof(regs->gr));
+ err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
+ err |= __put_user(regs->sar, &sc->sc_sar);
+ DBG(1,"setup_sigcontext: r28 is %ld\n", regs->gr[28]);
+
+ return err;
+}
+
+static long
+setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs, int in_syscall)
+{
+ struct rt_sigframe __user *frame;
+ unsigned long rp, usp;
+ unsigned long haddr, sigframe_size;
+ struct siginfo si;
+ int err = 0;
+#ifdef __LP64__
+ compat_int_t compat_val;
+ struct compat_rt_sigframe __user * compat_frame;
+ compat_sigset_t compat_set;
+#endif
+
+ usp = (regs->gr[30] & ~(0x01UL));
+ /*FIXME: frame_size parameter is unused, remove it. */
+ frame = get_sigframe(ka, usp, sizeof(*frame));
+
+ DBG(1,"SETUP_RT_FRAME: START\n");
+ DBG(1,"setup_rt_frame: frame %p info %p\n", frame, info);
+
+
+#ifdef __LP64__
+
+ compat_frame = (struct compat_rt_sigframe __user *)frame;
+
+ if(personality(current->personality) == PER_LINUX32) {
+ DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &compat_frame->info);
+ err |= compat_copy_siginfo_to_user(&compat_frame->info, info);
+ DBG(1,"SETUP_RT_FRAME: 1\n");
+ compat_val = (compat_int_t)current->sas_ss_sp;
+ err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_sp);
+ DBG(1,"SETUP_RT_FRAME: 2\n");
+ compat_val = (compat_int_t)current->sas_ss_size;
+ err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_size);
+ DBG(1,"SETUP_RT_FRAME: 3\n");
+ compat_val = sas_ss_flags(regs->gr[30]);
+ err |= __put_user(compat_val, &compat_frame->uc.uc_stack.ss_flags);
+ DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &compat_frame->uc);
+ DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &compat_frame->uc.uc_mcontext);
+ err |= setup_sigcontext32(&compat_frame->uc.uc_mcontext,
+ &compat_frame->regs, regs, in_syscall);
+ sigset_64to32(&compat_set,set);
+ err |= __copy_to_user(&compat_frame->uc.uc_sigmask, &compat_set, sizeof(compat_set));
+ } else
+#endif
+ {
+ DBG(1,"setup_rt_frame: frame->info = 0x%p\n", &frame->info);
+ err |= copy_siginfo_to_user(&frame->info, info);
+ err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= __put_user(sas_ss_flags(regs->gr[30]),
+ &frame->uc.uc_stack.ss_flags);
+ DBG(1,"setup_rt_frame: frame->uc = 0x%p\n", &frame->uc);
+ DBG(1,"setup_rt_frame: frame->uc.uc_mcontext = 0x%p\n", &frame->uc.uc_mcontext);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, in_syscall);
+ /* FIXME: Should probably be converted aswell for the compat case */
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. The first words of tramp are used to
+ save the previous sigrestartblock trampoline that might be
+ on the stack. We start the sigreturn trampoline at
+ SIGRESTARTBLOCK_TRAMP+X. */
+ err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0,
+ &frame->tramp[SIGRESTARTBLOCK_TRAMP+0]);
+ err |= __put_user(INSN_LDI_R20,
+ &frame->tramp[SIGRESTARTBLOCK_TRAMP+1]);
+ err |= __put_user(INSN_BLE_SR2_R0,
+ &frame->tramp[SIGRESTARTBLOCK_TRAMP+2]);
+ err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]);
+
+#if DEBUG_SIG
+ /* Assert that we're flushing in the correct space... */
+ {
+ int sid;
+ asm ("mfsp %%sr3,%0" : "=r" (sid));
+ DBG(1,"setup_rt_frame: Flushing 64 bytes at space %#x offset %p\n",
+ sid, frame->tramp);
+ }
+#endif
+
+ flush_user_dcache_range((unsigned long) &frame->tramp[0],
+ (unsigned long) &frame->tramp[TRAMP_SIZE]);
+ flush_user_icache_range((unsigned long) &frame->tramp[0],
+ (unsigned long) &frame->tramp[TRAMP_SIZE]);
+
+ /* TRAMP Words 0-4, Lenght 5 = SIGRESTARTBLOCK_TRAMP
+ * TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP
+ * So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP
+ */
+ rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP];
+
+ if (err)
+ goto give_sigsegv;
+
+ haddr = A(ka->sa.sa_handler);
+ /* The sa_handler may be a pointer to a function descriptor */
+#ifdef __LP64__
+ if(personality(current->personality) == PER_LINUX32) {
+#endif
+ if (haddr & PA_PLABEL_FDESC) {
+ Elf32_Fdesc fdesc;
+ Elf32_Fdesc __user *ufdesc = (Elf32_Fdesc __user *)A(haddr & ~3);
+
+ err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
+
+ if (err)
+ goto give_sigsegv;
+
+ haddr = fdesc.addr;
+ regs->gr[19] = fdesc.gp;
+ }
+#ifdef __LP64__
+ } else {
+ Elf64_Fdesc fdesc;
+ Elf64_Fdesc __user *ufdesc = (Elf64_Fdesc __user *)A(haddr & ~3);
+
+ err = __copy_from_user(&fdesc, ufdesc, sizeof(fdesc));
+
+ if (err)
+ goto give_sigsegv;
+
+ haddr = fdesc.addr;
+ regs->gr[19] = fdesc.gp;
+ DBG(1,"setup_rt_frame: 64 bit signal, exe=%#lx, r19=%#lx, in_syscall=%d\n",
+ haddr, regs->gr[19], in_syscall);
+ }
+#endif
+
+ /* The syscall return path will create IAOQ values from r31.
+ */
+ sigframe_size = PARISC_RT_SIGFRAME_SIZE;
+#ifdef __LP64__
+ if(personality(current->personality) == PER_LINUX32)
+ sigframe_size = PARISC_RT_SIGFRAME_SIZE32;
+#endif
+ if (in_syscall) {
+ regs->gr[31] = haddr;
+#ifdef __LP64__
+ if(personality(current->personality) == PER_LINUX)
+ sigframe_size |= 1;
+#endif
+ } else {
+ unsigned long psw = USER_PSW;
+#ifdef __LP64__
+ if(personality(current->personality) == PER_LINUX)
+ psw |= PSW_W;
+#endif
+
+ /* If we are singlestepping, arrange a trap to be delivered
+ when we return to userspace. Note the semantics -- we
+ should trap before the first insn in the handler is
+ executed. Ref:
+ http://sources.redhat.com/ml/gdb/2004-11/msg00245.html
+ */
+ if (pa_psw(current)->r) {
+ pa_psw(current)->r = 0;
+ psw |= PSW_R;
+ mtctl(-1, 0);
+ }
+
+ regs->gr[0] = psw;
+ regs->iaoq[0] = haddr | 3;
+ regs->iaoq[1] = regs->iaoq[0] + 4;
+ }
+
+ regs->gr[2] = rp; /* userland return pointer */
+ regs->gr[26] = sig; /* signal number */
+
+#ifdef __LP64__
+ if(personality(current->personality) == PER_LINUX32){
+ regs->gr[25] = A(&compat_frame->info); /* siginfo pointer */
+ regs->gr[24] = A(&compat_frame->uc); /* ucontext pointer */
+ } else
+#endif
+ {
+ regs->gr[25] = A(&frame->info); /* siginfo pointer */
+ regs->gr[24] = A(&frame->uc); /* ucontext pointer */
+ }
+
+ DBG(1,"setup_rt_frame: making sigreturn frame: %#lx + %#lx = %#lx\n",
+ regs->gr[30], sigframe_size,
+ regs->gr[30] + sigframe_size);
+ /* Raise the user stack pointer to make a proper call frame. */
+ regs->gr[30] = (A(frame) + sigframe_size);
+
+
+ DBG(1,"setup_rt_frame: sig deliver (%s,%d) frame=0x%p sp=%#lx iaoq=%#lx/%#lx rp=%#lx\n",
+ current->comm, current->pid, frame, regs->gr[30],
+ regs->iaoq[0], regs->iaoq[1], rp);
+
+ return 1;
+
+give_sigsegv:
+ DBG(1,"setup_rt_frame: sending SIGSEGV\n");
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ si.si_signo = SIGSEGV;
+ si.si_errno = 0;
+ si.si_code = SI_KERNEL;
+ si.si_pid = current->pid;
+ si.si_uid = current->uid;
+ si.si_addr = frame;
+ force_sig_info(SIGSEGV, &si, current);
+ return 0;
+}
+
+/*
+ * OK, we're invoking a handler.
+ */
+
+static long
+handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
+ sigset_t *oldset, struct pt_regs *regs, int in_syscall)
+{
+ DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
+ sig, ka, info, oldset, regs);
+
+ /* Set up the stack frame */
+ if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
+ return 0;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sighand->siglock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+ return 1;
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * We need to be able to restore the syscall arguments (r21-r26) to
+ * restart syscalls. Thus, the syscall path should save them in the
+ * pt_regs structure (it's okay to do so since they are caller-save
+ * registers). As noted below, the syscall number gets restored for
+ * us due to the magic of delayed branching.
+ */
+
+asmlinkage int
+do_signal(sigset_t *oldset, struct pt_regs *regs, int in_syscall)
+{
+ siginfo_t info;
+ struct k_sigaction ka;
+ int signr;
+
+ DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n",
+ oldset, regs, regs->sr[7], in_syscall);
+
+ /* Everyone else checks to see if they are in kernel mode at
+ this point and exits if that's the case. I'm not sure why
+ we would be called in that case, but for some reason we
+ are. */
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ DBG(1,"do_signal: oldset %08lx / %08lx\n",
+ oldset->sig[0], oldset->sig[1]);
+
+
+ /* May need to force signal if handle_signal failed to deliver */
+ while (1) {
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]);
+
+ if (signr <= 0)
+ break;
+
+ /* Restart a system call if necessary. */
+ if (in_syscall) {
+ /* Check the return code */
+ switch (regs->gr[28]) {
+ case -ERESTART_RESTARTBLOCK:
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ case -ERESTARTNOHAND:
+ DBG(1,"ERESTARTNOHAND: returning -EINTR\n");
+ regs->gr[28] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka.sa.sa_flags & SA_RESTART)) {
+ DBG(1,"ERESTARTSYS: putting -EINTR\n");
+ regs->gr[28] = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ /* A syscall is just a branch, so all
+ we have to do is fiddle the return pointer. */
+ regs->gr[31] -= 8; /* delayed branching */
+ /* Preserve original r28. */
+ regs->gr[28] = regs->orig_r28;
+ break;
+ }
+ }
+ /* Whee! Actually deliver the signal. If the
+ delivery failed, we need to continue to iterate in
+ this loop so we can deliver the SIGSEGV... */
+ if (handle_signal(signr, &info, &ka, oldset, regs, in_syscall)) {
+ DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
+ regs->gr[28]);
+ return 1;
+ }
+ }
+ /* end of while(1) looping forever if we can't force a signal */
+
+ /* Did we come from a system call? */
+ if (in_syscall) {
+ /* Restart the system call - no handlers present */
+ if (regs->gr[28] == -ERESTART_RESTARTBLOCK) {
+ unsigned int *usp = (unsigned int *)regs->gr[30];
+
+ /* Setup a trampoline to restart the syscall
+ * with __NR_restart_syscall
+ *
+ * 0: <return address (orig r31)>
+ * 4: <2nd half for 64-bit>
+ * 8: ldw 0(%sp), %r31
+ * 12: be 0x100(%sr2, %r0)
+ * 16: ldi __NR_restart_syscall, %r20
+ */
+#ifndef __LP64__
+ put_user(regs->gr[31], &usp[0]);
+ put_user(0x0fc0109f, &usp[2]);
+#else
+ put_user(regs->gr[31] >> 32, &usp[0]);
+ put_user(regs->gr[31] & 0xffffffff, &usp[1]);
+ put_user(0x0fc010df, &usp[2]);
+#endif
+ put_user(0xe0008200, &usp[3]);
+ put_user(0x34140000, &usp[4]);
+
+ /* Stack is 64-byte aligned, and we only
+ * need to flush 1 cache line */
+ asm("fdc 0(%%sr3, %0)\n"
+ "fic 0(%%sr3, %0)\n"
+ "sync\n"
+ : : "r"(regs->gr[30]));
+
+ regs->gr[31] = regs->gr[30] + 8;
+ /* Preserve original r28. */
+ regs->gr[28] = regs->orig_r28;
+ } else if (regs->gr[28] == -ERESTARTNOHAND ||
+ regs->gr[28] == -ERESTARTSYS ||
+ regs->gr[28] == -ERESTARTNOINTR) {
+ /* Hooray for delayed branching. We don't
+ have to restore %r20 (the system call
+ number) because it gets loaded in the delay
+ slot of the branch external instruction. */
+ regs->gr[31] -= 8;
+ /* Preserve original r28. */
+ regs->gr[28] = regs->orig_r28;
+ }
+ }
+
+ DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n",
+ regs->gr[28]);
+
+ return 0;
+}
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
new file mode 100644
index 000000000000..0792e20efef3
--- /dev/null
+++ b/arch/parisc/kernel/signal32.c
@@ -0,0 +1,400 @@
+/* Signal support for 32-bit kernel builds
+ *
+ * Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
+ * Code was mostly borrowed from kernel/signal.c.
+ * See kernel/signal.c for additional Copyrights.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/unistd.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#include <asm/compat_signal.h>
+#include <asm/uaccess.h>
+
+#include "signal32.h"
+#include "sys32.h"
+
+#define DEBUG_COMPAT_SIG 0
+#define DEBUG_COMPAT_SIG_LEVEL 2
+
+#if DEBUG_COMPAT_SIG
+#define DBG(LEVEL, ...) \
+ ((DEBUG_COMPAT_SIG_LEVEL >= LEVEL) \
+ ? printk(__VA_ARGS__) : (void) 0)
+#else
+#define DBG(LEVEL, ...)
+#endif
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+inline void
+sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
+{
+ s64->sig[0] = s32->sig[0] | ((unsigned long)s32->sig[1] << 32);
+}
+
+inline void
+sigset_64to32(compat_sigset_t *s32, sigset_t *s64)
+{
+ s32->sig[0] = s64->sig[0] & 0xffffffffUL;
+ s32->sig[1] = (s64->sig[0] >> 32) & 0xffffffffUL;
+}
+
+static int
+put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
+{
+ compat_sigset_t s;
+
+ if (sz != sizeof *set) panic("put_sigset32()");
+ sigset_64to32(&s, set);
+
+ return copy_to_user(up, &s, sizeof s);
+}
+
+static int
+get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
+{
+ compat_sigset_t s;
+ int r;
+
+ if (sz != sizeof *set) panic("put_sigset32()");
+
+ if ((r = copy_from_user(&s, up, sz)) == 0) {
+ sigset_32to64(set, &s);
+ }
+
+ return r;
+}
+
+int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, compat_sigset_t __user *oset,
+ unsigned int sigsetsize)
+{
+ sigset_t old_set, new_set;
+ int ret;
+
+ if (set && get_sigset32(set, &new_set, sigsetsize))
+ return -EFAULT;
+
+ KERNEL_SYSCALL(ret, sys_rt_sigprocmask, how, set ? (sigset_t __user *)&new_set : NULL,
+ oset ? (sigset_t __user *)&old_set : NULL, sigsetsize);
+
+ if (!ret && oset && put_sigset32(oset, &old_set, sigsetsize))
+ return -EFAULT;
+
+ return ret;
+}
+
+
+int sys32_rt_sigpending(compat_sigset_t __user *uset, unsigned int sigsetsize)
+{
+ int ret;
+ sigset_t set;
+
+ KERNEL_SYSCALL(ret, sys_rt_sigpending, (sigset_t __user *)&set, sigsetsize);
+
+ if (!ret && put_sigset32(uset, &set, sigsetsize))
+ return -EFAULT;
+
+ return ret;
+}
+
+long
+sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, struct sigaction32 __user *oact,
+ size_t sigsetsize)
+{
+ struct k_sigaction32 new_sa32, old_sa32;
+ struct k_sigaction new_sa, old_sa;
+ int ret = -EINVAL;
+
+ if (act) {
+ if (copy_from_user(&new_sa32.sa, act, sizeof new_sa32.sa))
+ return -EFAULT;
+ new_sa.sa.sa_handler = (__sighandler_t)(unsigned long)new_sa32.sa.sa_handler;
+ new_sa.sa.sa_flags = new_sa32.sa.sa_flags;
+ sigset_32to64(&new_sa.sa.sa_mask, &new_sa32.sa.sa_mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
+
+ if (!ret && oact) {
+ sigset_64to32(&old_sa32.sa.sa_mask, &old_sa.sa.sa_mask);
+ old_sa32.sa.sa_flags = old_sa.sa.sa_flags;
+ old_sa32.sa.sa_handler = (__sighandler_t32)(unsigned long)old_sa.sa.sa_handler;
+ if (copy_to_user(oact, &old_sa32.sa, sizeof old_sa32.sa))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+int
+do_sigaltstack32 (const compat_stack_t __user *uss32, compat_stack_t __user *uoss32, unsigned long sp)
+{
+ compat_stack_t ss32, oss32;
+ stack_t ss, oss;
+ stack_t *ssp = NULL, *ossp = NULL;
+ int ret;
+
+ if (uss32) {
+ if (copy_from_user(&ss32, uss32, sizeof ss32))
+ return -EFAULT;
+
+ ss.ss_sp = (void __user *)(unsigned long)ss32.ss_sp;
+ ss.ss_flags = ss32.ss_flags;
+ ss.ss_size = ss32.ss_size;
+
+ ssp = &ss;
+ }
+
+ if (uoss32)
+ ossp = &oss;
+
+ KERNEL_SYSCALL(ret, do_sigaltstack, (const stack_t __user *)ssp, (stack_t __user *)ossp, sp);
+
+ if (!ret && uoss32) {
+ oss32.ss_sp = (unsigned int)(unsigned long)oss.ss_sp;
+ oss32.ss_flags = oss.ss_flags;
+ oss32.ss_size = oss.ss_size;
+ if (copy_to_user(uoss32, &oss32, sizeof *uoss32))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+long
+restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
+ struct pt_regs *regs)
+{
+ long err = 0;
+ compat_uint_t compat_reg;
+ compat_uint_t compat_regt;
+ int regn;
+
+ /* When loading 32-bit values into 64-bit registers make
+ sure to clear the upper 32-bits */
+ DBG(2,"restore_sigcontext32: PER_LINUX32 process\n");
+ DBG(2,"restore_sigcontext32: sc = 0x%p, rf = 0x%p, regs = 0x%p\n", sc, rf, regs);
+ DBG(2,"restore_sigcontext32: compat_sigcontext is %#lx bytes\n", sizeof(*sc));
+ for(regn=0; regn < 32; regn++){
+ err |= __get_user(compat_reg,&sc->sc_gr[regn]);
+ regs->gr[regn] = compat_reg;
+ /* Load upper half */
+ err |= __get_user(compat_regt,&rf->rf_gr[regn]);
+ regs->gr[regn] = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(3,"restore_sigcontext32: gr%02d = %#lx (%#x / %#x)\n",
+ regn, regs->gr[regn], compat_regt, compat_reg);
+ }
+ DBG(2,"restore_sigcontext32: sc->sc_fr = 0x%p (%#lx)\n",sc->sc_fr, sizeof(sc->sc_fr));
+ /* XXX: BE WARNED FR's are 64-BIT! */
+ err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr));
+
+ /* Better safe than sorry, pass __get_user two things of
+ the same size and let gcc do the upward conversion to
+ 64-bits */
+ err |= __get_user(compat_reg, &sc->sc_iaoq[0]);
+ /* Load upper half */
+ err |= __get_user(compat_regt, &rf->rf_iaoq[0]);
+ regs->iaoq[0] = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(2,"restore_sigcontext32: upper half of iaoq[0] = %#lx\n", compat_regt);
+ DBG(2,"restore_sigcontext32: sc->sc_iaoq[0] = %p => %#x\n",
+ &sc->sc_iaoq[0], compat_reg);
+
+ err |= __get_user(compat_reg, &sc->sc_iaoq[1]);
+ /* Load upper half */
+ err |= __get_user(compat_regt, &rf->rf_iaoq[1]);
+ regs->iaoq[1] = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(2,"restore_sigcontext32: upper half of iaoq[1] = %#lx\n", compat_regt);
+ DBG(2,"restore_sigcontext32: sc->sc_iaoq[1] = %p => %#x\n",
+ &sc->sc_iaoq[1],compat_reg);
+ DBG(2,"restore_sigcontext32: iaoq is %#lx / %#lx\n",
+ regs->iaoq[0],regs->iaoq[1]);
+
+ err |= __get_user(compat_reg, &sc->sc_iasq[0]);
+ /* Load the upper half for iasq */
+ err |= __get_user(compat_regt, &rf->rf_iasq[0]);
+ regs->iasq[0] = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(2,"restore_sigcontext32: upper half of iasq[0] = %#lx\n", compat_regt);
+
+ err |= __get_user(compat_reg, &sc->sc_iasq[1]);
+ /* Load the upper half for iasq */
+ err |= __get_user(compat_regt, &rf->rf_iasq[1]);
+ regs->iasq[1] = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(2,"restore_sigcontext32: upper half of iasq[1] = %#lx\n", compat_regt);
+ DBG(2,"restore_sigcontext32: iasq is %#lx / %#lx\n",
+ regs->iasq[0],regs->iasq[1]);
+
+ err |= __get_user(compat_reg, &sc->sc_sar);
+ /* Load the upper half for sar */
+ err |= __get_user(compat_regt, &rf->rf_sar);
+ regs->sar = ((u64)compat_regt << 32) | (u64)compat_reg;
+ DBG(2,"restore_sigcontext32: upper_half & sar = %#lx\n", compat_regt);
+ DBG(2,"restore_sigcontext32: sar is %#lx\n", regs->sar);
+ DBG(2,"restore_sigcontext32: r28 is %ld\n", regs->gr[28]);
+
+ return err;
+}
+
+/*
+ * Set up the sigcontext structure for this process.
+ * This is not an easy task if the kernel is 64-bit, it will require
+ * that we examine the process personality to determine if we need to
+ * truncate for a 32-bit userspace.
+ */
+long
+setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf,
+ struct pt_regs *regs, int in_syscall)
+{
+ compat_int_t flags = 0;
+ long err = 0;
+ compat_uint_t compat_reg;
+ compat_uint_t compat_regb;
+ int regn;
+
+ if (on_sig_stack((unsigned long) sc))
+ flags |= PARISC_SC_FLAG_ONSTACK;
+
+ if (in_syscall) {
+
+ DBG(1,"setup_sigcontext32: in_syscall\n");
+
+ flags |= PARISC_SC_FLAG_IN_SYSCALL;
+ /* Truncate gr31 */
+ compat_reg = (compat_uint_t)(regs->gr[31]);
+ /* regs->iaoq is undefined in the syscall return path */
+ err |= __put_user(compat_reg, &sc->sc_iaoq[0]);
+ DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n",
+ &sc->sc_iaoq[0], compat_reg);
+
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->gr[32] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
+ DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
+
+
+ compat_reg = (compat_uint_t)(regs->gr[31]+4);
+ err |= __put_user(compat_reg, &sc->sc_iaoq[1]);
+ DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
+ &sc->sc_iaoq[1], compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)((regs->gr[32]+4) >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
+ DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
+
+ /* Truncate sr3 */
+ compat_reg = (compat_uint_t)(regs->sr[3]);
+ err |= __put_user(compat_reg, &sc->sc_iasq[0]);
+ err |= __put_user(compat_reg, &sc->sc_iasq[1]);
+
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->sr[3] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iasq[0]);
+ err |= __put_user(compat_reg, &rf->rf_iasq[1]);
+
+ DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg);
+ DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg);
+ DBG(1,"setup_sigcontext32: iaoq %#lx / %#lx\n",
+ regs->gr[31], regs->gr[31]+4);
+
+ } else {
+
+ compat_reg = (compat_uint_t)(regs->iaoq[0]);
+ err |= __put_user(compat_reg, &sc->sc_iaoq[0]);
+ DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n",
+ &sc->sc_iaoq[0], compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->iaoq[0] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iaoq[0]);
+ DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg);
+
+ compat_reg = (compat_uint_t)(regs->iaoq[1]);
+ err |= __put_user(compat_reg, &sc->sc_iaoq[1]);
+ DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n",
+ &sc->sc_iaoq[1], compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->iaoq[1] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iaoq[1]);
+ DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg);
+
+
+ compat_reg = (compat_uint_t)(regs->iasq[0]);
+ err |= __put_user(compat_reg, &sc->sc_iasq[0]);
+ DBG(2,"setup_sigcontext32: sc->sc_iasq[0] = %p <= %#x\n",
+ &sc->sc_iasq[0], compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->iasq[0] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iasq[0]);
+ DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg);
+
+
+ compat_reg = (compat_uint_t)(regs->iasq[1]);
+ err |= __put_user(compat_reg, &sc->sc_iasq[1]);
+ DBG(2,"setup_sigcontext32: sc->sc_iasq[1] = %p <= %#x\n",
+ &sc->sc_iasq[1], compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->iasq[1] >> 32);
+ err |= __put_user(compat_reg, &rf->rf_iasq[1]);
+ DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg);
+
+ /* Print out the IAOQ for debugging */
+ DBG(1,"setup_sigcontext32: ia0q %#lx / %#lx\n",
+ regs->iaoq[0], regs->iaoq[1]);
+ }
+
+ err |= __put_user(flags, &sc->sc_flags);
+
+ DBG(1,"setup_sigcontext32: Truncating general registers.\n");
+
+ for(regn=0; regn < 32; regn++){
+ /* Truncate a general register */
+ compat_reg = (compat_uint_t)(regs->gr[regn]);
+ err |= __put_user(compat_reg, &sc->sc_gr[regn]);
+ /* Store upper half */
+ compat_regb = (compat_uint_t)(regs->gr[regn] >> 32);
+ err |= __put_user(compat_regb, &rf->rf_gr[regn]);
+
+ /* DEBUG: Write out the "upper / lower" register data */
+ DBG(2,"setup_sigcontext32: gr%02d = %#x / %#x\n", regn,
+ compat_regb, compat_reg);
+ }
+
+ /* Copy the floating point registers (same size)
+ XXX: BE WARNED FR's are 64-BIT! */
+ DBG(1,"setup_sigcontext32: Copying from regs to sc, "
+ "sc->sc_fr size = %#lx, regs->fr size = %#lx\n",
+ sizeof(regs->fr), sizeof(sc->sc_fr));
+ err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr));
+
+ compat_reg = (compat_uint_t)(regs->sar);
+ err |= __put_user(compat_reg, &sc->sc_sar);
+ DBG(2,"setup_sigcontext32: sar is %#x\n", compat_reg);
+ /* Store upper half */
+ compat_reg = (compat_uint_t)(regs->sar >> 32);
+ err |= __put_user(compat_reg, &rf->rf_sar);
+ DBG(2,"setup_sigcontext32: upper half sar = %#x\n", compat_reg);
+ DBG(1,"setup_sigcontext32: r28 is %ld\n", regs->gr[28]);
+
+ return err;
+}
diff --git a/arch/parisc/kernel/signal32.h b/arch/parisc/kernel/signal32.h
new file mode 100644
index 000000000000..4d1569e717cc
--- /dev/null
+++ b/arch/parisc/kernel/signal32.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2003 Carlos O'Donell <carlos at parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _PARISC64_KERNEL_SIGNAL32_H
+#define _PARISC64_KERNEL_SIGNAL32_H
+
+#include <linux/compat.h>
+#include <asm/compat_signal.h>
+#include <asm/compat_rt_sigframe.h>
+
+/* ELF32 signal handling */
+
+struct k_sigaction32 {
+ struct compat_sigaction sa;
+};
+
+void sigset_32to64(sigset_t *s64, compat_sigset_t *s32);
+void sigset_64to32(compat_sigset_t *s32, sigset_t *s64);
+int do_sigaltstack32 (const compat_stack_t __user *uss32,
+ compat_stack_t __user *uoss32, unsigned long sp);
+long restore_sigcontext32(struct compat_sigcontext __user *sc,
+ struct compat_regfile __user *rf,
+ struct pt_regs *regs);
+long setup_sigcontext32(struct compat_sigcontext __user *sc,
+ struct compat_regfile __user *rf,
+ struct pt_regs *regs, int in_syscall);
+
+#endif
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
new file mode 100644
index 000000000000..bcc7e83f5142
--- /dev/null
+++ b/arch/parisc/kernel/smp.c
@@ -0,0 +1,723 @@
+/*
+** SMP Support
+**
+** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
+**
+** Lots of stuff stolen from arch/alpha/kernel/smp.c
+** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
+**
+** Thanks to John Curry and Ullas Ponnadi. I learned alot from their work.
+** -grant (1/12/2001)
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+*/
+#undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */
+
+#include <linux/autoconf.h>
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/current.h>
+#include <asm/delay.h>
+#include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */
+
+#include <asm/io.h>
+#include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
+#include <asm/mmu_context.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/unistd.h>
+#include <asm/cacheflush.h>
+
+#define kDEBUG 0
+
+DEFINE_SPINLOCK(smp_lock);
+
+volatile struct task_struct *smp_init_current_idle_task;
+
+static volatile int cpu_now_booting = 0; /* track which CPU is booting */
+
+static int parisc_max_cpus = 1;
+
+/* online cpus are ones that we've managed to bring up completely
+ * possible cpus are all valid cpu
+ * present cpus are all detected cpu
+ *
+ * On startup we bring up the "possible" cpus. Since we discover
+ * CPUs later, we add them as hotplug, so the possible cpu mask is
+ * empty in the beginning.
+ */
+
+cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */
+cpumask_t cpu_possible_map = CPU_MASK_ALL; /* Bitmap of Present CPUs */
+
+EXPORT_SYMBOL(cpu_online_map);
+EXPORT_SYMBOL(cpu_possible_map);
+
+
+struct smp_call_struct {
+ void (*func) (void *info);
+ void *info;
+ long wait;
+ atomic_t unstarted_count;
+ atomic_t unfinished_count;
+};
+static volatile struct smp_call_struct *smp_call_function_data;
+
+enum ipi_message_type {
+ IPI_NOP=0,
+ IPI_RESCHEDULE=1,
+ IPI_CALL_FUNC,
+ IPI_CPU_START,
+ IPI_CPU_STOP,
+ IPI_CPU_TEST
+};
+
+
+/********** SMP inter processor interrupt and communication routines */
+
+#undef PER_CPU_IRQ_REGION
+#ifdef PER_CPU_IRQ_REGION
+/* XXX REVISIT Ignore for now.
+** *May* need this "hook" to register IPI handler
+** once we have perCPU ExtIntr switch tables.
+*/
+static void
+ipi_init(int cpuid)
+{
+
+ /* If CPU is present ... */
+#ifdef ENTRY_SYS_CPUS
+ /* *and* running (not stopped) ... */
+#error iCOD support wants state checked here.
+#endif
+
+#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
+
+ if(cpu_online(cpuid) )
+ {
+ switch_to_idle_task(current);
+ }
+
+ return;
+}
+#endif
+
+
+/*
+** Yoink this CPU from the runnable list...
+**
+*/
+static void
+halt_processor(void)
+{
+#ifdef ENTRY_SYS_CPUS
+#error halt_processor() needs rework
+/*
+** o migrate I/O interrupts off this CPU.
+** o leave IPI enabled - __cli() will disable IPI.
+** o leave CPU in online map - just change the state
+*/
+ cpu_data[this_cpu].state = STATE_STOPPED;
+ mark_bh(IPI_BH);
+#else
+ /* REVISIT : redirect I/O Interrupts to another CPU? */
+ /* REVISIT : does PM *know* this CPU isn't available? */
+ cpu_clear(smp_processor_id(), cpu_online_map);
+ local_irq_disable();
+ for (;;)
+ ;
+#endif
+}
+
+
+irqreturn_t
+ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ int this_cpu = smp_processor_id();
+ struct cpuinfo_parisc *p = &cpu_data[this_cpu];
+ unsigned long ops;
+ unsigned long flags;
+
+ /* Count this now; we may make a call that never returns. */
+ p->ipi_count++;
+
+ mb(); /* Order interrupt and bit testing. */
+
+ for (;;) {
+ spin_lock_irqsave(&(p->lock),flags);
+ ops = p->pending_ipi;
+ p->pending_ipi = 0;
+ spin_unlock_irqrestore(&(p->lock),flags);
+
+ mb(); /* Order bit clearing and data access. */
+
+ if (!ops)
+ break;
+
+ while (ops) {
+ unsigned long which = ffz(~ops);
+
+ switch (which) {
+ case IPI_RESCHEDULE:
+#if (kDEBUG>=100)
+ printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
+#endif /* kDEBUG */
+ ops &= ~(1 << IPI_RESCHEDULE);
+ /*
+ * Reschedule callback. Everything to be
+ * done is done by the interrupt return path.
+ */
+ break;
+
+ case IPI_CALL_FUNC:
+#if (kDEBUG>=100)
+ printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
+#endif /* kDEBUG */
+ ops &= ~(1 << IPI_CALL_FUNC);
+ {
+ volatile struct smp_call_struct *data;
+ void (*func)(void *info);
+ void *info;
+ int wait;
+
+ data = smp_call_function_data;
+ func = data->func;
+ info = data->info;
+ wait = data->wait;
+
+ mb();
+ atomic_dec ((atomic_t *)&data->unstarted_count);
+
+ /* At this point, *data can't
+ * be relied upon.
+ */
+
+ (*func)(info);
+
+ /* Notify the sending CPU that the
+ * task is done.
+ */
+ mb();
+ if (wait)
+ atomic_dec ((atomic_t *)&data->unfinished_count);
+ }
+ break;
+
+ case IPI_CPU_START:
+#if (kDEBUG>=100)
+ printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
+#endif /* kDEBUG */
+ ops &= ~(1 << IPI_CPU_START);
+#ifdef ENTRY_SYS_CPUS
+ p->state = STATE_RUNNING;
+#endif
+ break;
+
+ case IPI_CPU_STOP:
+#if (kDEBUG>=100)
+ printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
+#endif /* kDEBUG */
+ ops &= ~(1 << IPI_CPU_STOP);
+#ifdef ENTRY_SYS_CPUS
+#else
+ halt_processor();
+#endif
+ break;
+
+ case IPI_CPU_TEST:
+#if (kDEBUG>=100)
+ printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
+#endif /* kDEBUG */
+ ops &= ~(1 << IPI_CPU_TEST);
+ break;
+
+ default:
+ printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
+ this_cpu, which);
+ ops &= ~(1 << which);
+ return IRQ_NONE;
+ } /* Switch */
+ } /* while (ops) */
+ }
+ return IRQ_HANDLED;
+}
+
+
+static inline void
+ipi_send(int cpu, enum ipi_message_type op)
+{
+ struct cpuinfo_parisc *p = &cpu_data[cpu];
+ unsigned long flags;
+
+ spin_lock_irqsave(&(p->lock),flags);
+ p->pending_ipi |= 1 << op;
+ gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
+ spin_unlock_irqrestore(&(p->lock),flags);
+}
+
+
+static inline void
+send_IPI_single(int dest_cpu, enum ipi_message_type op)
+{
+ if (dest_cpu == NO_PROC_ID) {
+ BUG();
+ return;
+ }
+
+ ipi_send(dest_cpu, op);
+}
+
+static inline void
+send_IPI_allbutself(enum ipi_message_type op)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_online(i) && i != smp_processor_id())
+ send_IPI_single(i, op);
+ }
+}
+
+
+inline void
+smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
+
+static inline void
+smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
+
+void
+smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
+
+
+/**
+ * Run a function on all other CPUs.
+ * <func> The function to run. This must be fast and non-blocking.
+ * <info> An arbitrary pointer to pass to the function.
+ * <retry> If true, keep retrying until ready.
+ * <wait> If true, wait until function has completed on other CPUs.
+ * [RETURNS] 0 on success, else a negative status code.
+ *
+ * Does not return until remote CPUs are nearly ready to execute <func>
+ * or have executed.
+ */
+
+int
+smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
+{
+ struct smp_call_struct data;
+ unsigned long timeout;
+ static DEFINE_SPINLOCK(lock);
+ int retries = 0;
+
+ if (num_online_cpus() < 2)
+ return 0;
+
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
+ data.func = func;
+ data.info = info;
+ data.wait = wait;
+ atomic_set(&data.unstarted_count, num_online_cpus() - 1);
+ atomic_set(&data.unfinished_count, num_online_cpus() - 1);
+
+ if (retry) {
+ spin_lock (&lock);
+ while (smp_call_function_data != 0)
+ barrier();
+ }
+ else {
+ spin_lock (&lock);
+ if (smp_call_function_data) {
+ spin_unlock (&lock);
+ return -EBUSY;
+ }
+ }
+
+ smp_call_function_data = &data;
+ spin_unlock (&lock);
+
+ /* Send a message to all other CPUs and wait for them to respond */
+ send_IPI_allbutself(IPI_CALL_FUNC);
+
+ retry:
+ /* Wait for response */
+ timeout = jiffies + HZ;
+ while ( (atomic_read (&data.unstarted_count) > 0) &&
+ time_before (jiffies, timeout) )
+ barrier ();
+
+ if (atomic_read (&data.unstarted_count) > 0) {
+ printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n",
+ smp_processor_id(), ++retries);
+ goto retry;
+ }
+ /* We either got one or timed out. Release the lock */
+
+ mb();
+ smp_call_function_data = NULL;
+
+ while (wait && atomic_read (&data.unfinished_count) > 0)
+ barrier ();
+
+ return 0;
+}
+
+EXPORT_SYMBOL(smp_call_function);
+
+/*
+ * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
+ * as we want to ensure all TLB's flushed before proceeding.
+ */
+
+extern void flush_tlb_all_local(void);
+
+void
+smp_flush_tlb_all(void)
+{
+ on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
+}
+
+
+void
+smp_do_timer(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+ struct cpuinfo_parisc *data = &cpu_data[cpu];
+
+ if (!--data->prof_counter) {
+ data->prof_counter = data->prof_multiplier;
+ update_process_times(user_mode(regs));
+ }
+}
+
+/*
+ * Called by secondaries to update state and initialize CPU registers.
+ */
+static void __init
+smp_cpu_init(int cpunum)
+{
+ extern int init_per_cpu(int); /* arch/parisc/kernel/setup.c */
+ extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
+
+ /* Set modes and Enable floating point coprocessor */
+ (void) init_per_cpu(cpunum);
+
+ disable_sr_hashing();
+
+ mb();
+
+ /* Well, support 2.4 linux scheme as well. */
+ if (cpu_test_and_set(cpunum, cpu_online_map))
+ {
+ extern void machine_halt(void); /* arch/parisc.../process.c */
+
+ printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
+ machine_halt();
+ }
+
+ /* Initialise the idle task for this CPU */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ if(current->mm)
+ BUG();
+ enter_lazy_tlb(&init_mm, current);
+
+ init_IRQ(); /* make sure no IRQ's are enabled or pending */
+}
+
+
+/*
+ * Slaves start using C here. Indirectly called from smp_slave_stext.
+ * Do what start_kernel() and main() do for boot strap processor (aka monarch)
+ */
+void __init smp_callin(void)
+{
+ int slave_id = cpu_now_booting;
+#if 0
+ void *istack;
+#endif
+
+ smp_cpu_init(slave_id);
+
+#if 0 /* NOT WORKING YET - see entry.S */
+ istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
+ if (istack == NULL) {
+ printk(KERN_CRIT "Failed to allocate interrupt stack for cpu %d\n",slave_id);
+ BUG();
+ }
+ mtctl(istack,31);
+#endif
+
+ flush_cache_all_local(); /* start with known state */
+ flush_tlb_all_local();
+
+ local_irq_enable(); /* Interrupts have been off until now */
+
+ cpu_idle(); /* Wait for timer to schedule some work */
+
+ /* NOTREACHED */
+ panic("smp_callin() AAAAaaaaahhhh....\n");
+}
+
+/*
+ * Bring one cpu online.
+ */
+int __init smp_boot_one_cpu(int cpuid)
+{
+ struct task_struct *idle;
+ long timeout;
+
+ /*
+ * Create an idle task for this CPU. Note the address wed* give
+ * to kernel_thread is irrelevant -- it's going to start
+ * where OS_BOOT_RENDEVZ vector in SAL says to start. But
+ * this gets all the other task-y sort of data structures set
+ * up like we wish. We need to pull the just created idle task
+ * off the run queue and stuff it into the init_tasks[] array.
+ * Sheesh . . .
+ */
+
+ idle = fork_idle(cpuid);
+ if (IS_ERR(idle))
+ panic("SMP: fork failed for CPU:%d", cpuid);
+
+ idle->thread_info->cpu = cpuid;
+
+ /* Let _start know what logical CPU we're booting
+ ** (offset into init_tasks[],cpu_data[])
+ */
+ cpu_now_booting = cpuid;
+
+ /*
+ ** boot strap code needs to know the task address since
+ ** it also contains the process stack.
+ */
+ smp_init_current_idle_task = idle ;
+ mb();
+
+ printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
+
+ /*
+ ** This gets PDC to release the CPU from a very tight loop.
+ **
+ ** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
+ ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
+ ** is executed after receiving the rendezvous signal (an interrupt to
+ ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
+ ** contents of memory are valid."
+ */
+ gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa);
+ mb();
+
+ /*
+ * OK, wait a bit for that CPU to finish staggering about.
+ * Slave will set a bit when it reaches smp_cpu_init().
+ * Once the "monarch CPU" sees the bit change, it can move on.
+ */
+ for (timeout = 0; timeout < 10000; timeout++) {
+ if(cpu_online(cpuid)) {
+ /* Which implies Slave has started up */
+ cpu_now_booting = 0;
+ smp_init_current_idle_task = NULL;
+ goto alive ;
+ }
+ udelay(100);
+ barrier();
+ }
+
+ put_task_struct(idle);
+ idle = NULL;
+
+ printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
+ return -1;
+
+alive:
+ /* Remember the Slave data */
+#if (kDEBUG>=100)
+ printk(KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
+ cpuid, timeout * 100);
+#endif /* kDEBUG */
+#ifdef ENTRY_SYS_CPUS
+ cpu_data[cpuid].state = STATE_RUNNING;
+#endif
+ return 0;
+}
+
+void __devinit smp_prepare_boot_cpu(void)
+{
+ int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */
+
+#ifdef ENTRY_SYS_CPUS
+ cpu_data[0].state = STATE_RUNNING;
+#endif
+
+ /* Setup BSP mappings */
+ printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
+
+ cpu_set(bootstrap_processor, cpu_online_map);
+ cpu_set(bootstrap_processor, cpu_present_map);
+}
+
+
+
+/*
+** inventory.c:do_inventory() hasn't yet been run and thus we
+** don't 'discover' the additional CPU's until later.
+*/
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ cpus_clear(cpu_present_map);
+ cpu_set(0, cpu_present_map);
+
+ parisc_max_cpus = max_cpus;
+ if (!max_cpus)
+ printk(KERN_INFO "SMP mode deactivated.\n");
+}
+
+
+void smp_cpus_done(unsigned int cpu_max)
+{
+ return;
+}
+
+
+int __devinit __cpu_up(unsigned int cpu)
+{
+ if (cpu != 0 && cpu < parisc_max_cpus)
+ smp_boot_one_cpu(cpu);
+
+ return cpu_online(cpu) ? 0 : -ENOSYS;
+}
+
+
+
+#ifdef ENTRY_SYS_CPUS
+/* Code goes along with:
+** entry.s: ENTRY_NAME(sys_cpus) / * 215, for cpu stat * /
+*/
+int sys_cpus(int argc, char **argv)
+{
+ int i,j=0;
+ extern int current_pid(int cpu);
+
+ if( argc > 2 ) {
+ printk("sys_cpus:Only one argument supported\n");
+ return (-1);
+ }
+ if ( argc == 1 ){
+
+#ifdef DUMP_MORE_STATE
+ for(i=0; i<NR_CPUS; i++) {
+ int cpus_per_line = 4;
+ if(cpu_online(i)) {
+ if (j++ % cpus_per_line)
+ printk(" %3d",i);
+ else
+ printk("\n %3d",i);
+ }
+ }
+ printk("\n");
+#else
+ printk("\n 0\n");
+#endif
+ } else if((argc==2) && !(strcmp(argv[1],"-l"))) {
+ printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
+#ifdef DUMP_MORE_STATE
+ for(i=0;i<NR_CPUS;i++) {
+ if (!cpu_online(i))
+ continue;
+ if (cpu_data[i].cpuid != NO_PROC_ID) {
+ switch(cpu_data[i].state) {
+ case STATE_RENDEZVOUS:
+ printk("RENDEZVS ");
+ break;
+ case STATE_RUNNING:
+ printk((current_pid(i)!=0) ? "RUNNING " : "IDLING ");
+ break;
+ case STATE_STOPPED:
+ printk("STOPPED ");
+ break;
+ case STATE_HALTED:
+ printk("HALTED ");
+ break;
+ default:
+ printk("%08x?", cpu_data[i].state);
+ break;
+ }
+ if(cpu_online(i)) {
+ printk(" %4d",current_pid(i));
+ }
+ printk(" %6d",cpu_number_map(i));
+ printk(" %5d",i);
+ printk(" 0x%lx\n",cpu_data[i].hpa);
+ }
+ }
+#else
+ printk("\n%s %4d 0 0 --------",
+ (current->pid)?"RUNNING ": "IDLING ",current->pid);
+#endif
+ } else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
+#ifdef DUMP_MORE_STATE
+ printk("\nCPUSTATE CPUID\n");
+ for (i=0;i<NR_CPUS;i++) {
+ if (!cpu_online(i))
+ continue;
+ if (cpu_data[i].cpuid != NO_PROC_ID) {
+ switch(cpu_data[i].state) {
+ case STATE_RENDEZVOUS:
+ printk("RENDEZVS");break;
+ case STATE_RUNNING:
+ printk((current_pid(i)!=0) ? "RUNNING " : "IDLING");
+ break;
+ case STATE_STOPPED:
+ printk("STOPPED ");break;
+ case STATE_HALTED:
+ printk("HALTED ");break;
+ default:
+ }
+ printk(" %5d\n",i);
+ }
+ }
+#else
+ printk("\n%s CPU0",(current->pid==0)?"RUNNING ":"IDLING ");
+#endif
+ } else {
+ printk("sys_cpus:Unknown request\n");
+ return (-1);
+ }
+ return 0;
+}
+#endif /* ENTRY_SYS_CPUS */
+
+#ifdef CONFIG_PROC_FS
+int __init
+setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+#endif
diff --git a/arch/parisc/kernel/sys32.h b/arch/parisc/kernel/sys32.h
new file mode 100644
index 000000000000..06c2090cfaba
--- /dev/null
+++ b/arch/parisc/kernel/sys32.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org>
+ * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
+ * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _PARISC64_KERNEL_SYS32_H
+#define _PARISC64_KERNEL_SYS32_H
+
+#include <linux/compat.h>
+
+/* Call a kernel syscall which will use kernel space instead of user
+ * space for its copy_to/from_user.
+ */
+#define KERNEL_SYSCALL(ret, syscall, args...) \
+{ \
+ mm_segment_t old_fs = get_fs(); \
+ set_fs(KERNEL_DS); \
+ ret = syscall(args); \
+ set_fs (old_fs); \
+}
+
+#ifdef CONFIG_COMPAT
+
+typedef __u32 __sighandler_t32;
+
+struct sigaction32 {
+ __sighandler_t32 sa_handler;
+ unsigned int sa_flags;
+ compat_sigset_t sa_mask; /* mask last for extensibility */
+};
+
+#endif
+
+#endif
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
new file mode 100644
index 000000000000..7958cd8c8bf8
--- /dev/null
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -0,0 +1,253 @@
+
+/*
+ * PARISC specific syscalls
+ *
+ * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <asm/uaccess.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/linkage.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/shm.h>
+#include <linux/smp_lock.h>
+#include <linux/syscalls.h>
+
+int sys_pipe(int __user *fildes)
+{
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+{
+ struct vm_area_struct *vma;
+
+ addr = PAGE_ALIGN(addr);
+
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vma || addr + len <= vma->vm_start)
+ return addr;
+ addr = vma->vm_end;
+ }
+}
+
+#define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
+
+/*
+ * We need to know the offset to use. Old scheme was to look for
+ * existing mapping and use the same offset. New scheme is to use the
+ * address of the kernel data structure as the seed for the offset.
+ * We'll see how that works...
+ *
+ * The mapping is cacheline aligned, so there's no information in the bottom
+ * few bits of the address. We're looking for 10 bits (4MB / 4k), so let's
+ * drop the bottom 8 bits and use bits 8-17.
+ */
+static int get_offset(struct address_space *mapping)
+{
+ int offset = (unsigned long) mapping << (PAGE_SHIFT - 8);
+ return offset & 0x3FF000;
+}
+
+static unsigned long get_shared_area(struct address_space *mapping,
+ unsigned long addr, unsigned long len, unsigned long pgoff)
+{
+ struct vm_area_struct *vma;
+ int offset = mapping ? get_offset(mapping) : 0;
+
+ addr = DCACHE_ALIGN(addr - offset) + offset;
+
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vma || addr + len <= vma->vm_start)
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
+ return -ENOMEM;
+ }
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+ if (filp) {
+ addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
+ } else if(flags & MAP_SHARED) {
+ addr = get_shared_area(NULL, addr, len, pgoff);
+ } else {
+ addr = get_unshared_area(addr, len);
+ }
+ return addr;
+}
+
+static unsigned long do_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long pgoff)
+{
+ struct file * file = NULL;
+ unsigned long error = -EBADF;
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+
+ if (file != NULL)
+ fput(file);
+out:
+ return error;
+}
+
+asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long pgoff)
+{
+ /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
+ we have. */
+ return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
+}
+
+asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long offset)
+{
+ if (!(offset & ~PAGE_MASK)) {
+ return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+ } else {
+ return -EINVAL;
+ }
+}
+
+long sys_shmat_wrapper(int shmid, char __user *shmaddr, int shmflag)
+{
+ unsigned long raddr;
+ int r;
+
+ r = do_shmat(shmid, shmaddr, shmflag, &raddr);
+ if (r < 0)
+ return r;
+ return raddr;
+}
+
+/* Fucking broken ABI */
+
+#ifdef CONFIG_64BIT
+asmlinkage long parisc_truncate64(const char __user * path,
+ unsigned int high, unsigned int low)
+{
+ return sys_truncate(path, (long)high << 32 | low);
+}
+
+asmlinkage long parisc_ftruncate64(unsigned int fd,
+ unsigned int high, unsigned int low)
+{
+ return sys_ftruncate(fd, (long)high << 32 | low);
+}
+
+/* stubs for the benefit of the syscall_table since truncate64 and truncate
+ * are identical on LP64 */
+asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
+{
+ return sys_truncate(path, length);
+}
+asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
+{
+ return sys_ftruncate(fd, length);
+}
+asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ return sys_fcntl(fd, cmd, arg);
+}
+#else
+
+asmlinkage long parisc_truncate64(const char __user * path,
+ unsigned int high, unsigned int low)
+{
+ return sys_truncate64(path, (loff_t)high << 32 | low);
+}
+
+asmlinkage long parisc_ftruncate64(unsigned int fd,
+ unsigned int high, unsigned int low)
+{
+ return sys_ftruncate64(fd, (loff_t)high << 32 | low);
+}
+#endif
+
+asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
+ unsigned int high, unsigned int low)
+{
+ return sys_pread64(fd, buf, count, (loff_t)high << 32 | low);
+}
+
+asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
+ size_t count, unsigned int high, unsigned int low)
+{
+ return sys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
+}
+
+asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
+ size_t count)
+{
+ return sys_readahead(fd, (loff_t)high << 32 | low, count);
+}
+
+asmlinkage long parisc_fadvise64_64(int fd,
+ unsigned int high_off, unsigned int low_off,
+ unsigned int high_len, unsigned int low_len, int advice)
+{
+ return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
+ (loff_t)high_len << 32 | low_len, advice);
+}
+
+asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
+{
+ return -ENOMEM;
+}
+
+asmlinkage int sys_free_hugepages(unsigned long addr)
+{
+ return -EINVAL;
+}
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
new file mode 100644
index 000000000000..613569018410
--- /dev/null
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -0,0 +1,720 @@
+/*
+ * sys_parisc32.c: Conversion between 32bit and 64bit native syscalls.
+ *
+ * Copyright (C) 2000-2001 Hewlett Packard Company
+ * Copyright (C) 2000 John Marvin
+ * Copyright (C) 2001 Matthew Wilcox
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * environment. Based heavily on sys_ia32.c and sys_sparc32.c.
+ */
+
+#include <linux/config.h>
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/signal.h>
+#include <linux/resource.h>
+#include <linux/times.h>
+#include <linux/utsname.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/nfs_fs.h>
+#include <linux/ncp_fs.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfsd/nfsd.h>
+#include <linux/nfsd/cache.h>
+#include <linux/nfsd/xdr.h>
+#include <linux/nfsd/syscall.h>
+#include <linux/poll.h>
+#include <linux/personality.h>
+#include <linux/stat.h>
+#include <linux/highmem.h>
+#include <linux/highuid.h>
+#include <linux/mman.h>
+#include <linux/binfmts.h>
+#include <linux/namei.h>
+#include <linux/vfs.h>
+#include <linux/ptrace.h>
+#include <linux/swap.h>
+#include <linux/syscalls.h>
+
+#include <asm/types.h>
+#include <asm/uaccess.h>
+#include <asm/semaphore.h>
+#include <asm/mmu_context.h>
+
+#include "sys32.h"
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x) printk x
+#else
+#define DBG(x)
+#endif
+
+/*
+ * sys32_execve() executes a new program.
+ */
+
+asmlinkage int sys32_execve(struct pt_regs *regs)
+{
+ int error;
+ char *filename;
+
+ DBG(("sys32_execve(%p) r26 = 0x%lx\n", regs, regs->gr[26]));
+ filename = getname((const char __user *) regs->gr[26]);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = compat_do_execve(filename, compat_ptr(regs->gr[25]),
+ compat_ptr(regs->gr[24]), regs);
+ if (error == 0) {
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ }
+ putname(filename);
+out:
+
+ return error;
+}
+
+asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
+ int r22, int r21, int r20)
+{
+ printk(KERN_ERR "%s(%d): Unimplemented 32 on 64 syscall #%d!\n",
+ current->comm, current->pid, r20);
+ return -ENOSYS;
+}
+
+#ifdef CONFIG_SYSCTL
+
+struct __sysctl_args32 {
+ u32 name;
+ int nlen;
+ u32 oldval;
+ u32 oldlenp;
+ u32 newval;
+ u32 newlen;
+ u32 __unused[4];
+};
+
+asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
+{
+ struct __sysctl_args32 tmp;
+ int error;
+ unsigned int oldlen32;
+ size_t oldlen, *oldlenp = NULL;
+ unsigned long addr = (((long __force)&args->__unused[0]) + 7) & ~7;
+ extern int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp,
+ void *newval, size_t newlen);
+
+ DBG(("sysctl32(%p)\n", args));
+
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ return -EFAULT;
+
+ if (tmp.oldval && tmp.oldlenp) {
+ /* Duh, this is ugly and might not work if sysctl_args
+ is in read-only memory, but do_sysctl does indirectly
+ a lot of uaccess in both directions and we'd have to
+ basically copy the whole sysctl.c here, and
+ glibc's __sysctl uses rw memory for the structure
+ anyway. */
+ /* a possibly better hack than this, which will avoid the
+ * problem if the struct is read only, is to push the
+ * 'oldlen' value out to the user's stack instead. -PB
+ */
+ if (get_user(oldlen32, (u32 *)(u64)tmp.oldlenp))
+ return -EFAULT;
+ oldlen = oldlen32;
+ if (put_user(oldlen, (size_t *)addr))
+ return -EFAULT;
+ oldlenp = (size_t *)addr;
+ }
+
+ lock_kernel();
+ error = do_sysctl((int *)(u64)tmp.name, tmp.nlen, (void *)(u64)tmp.oldval,
+ oldlenp, (void *)(u64)tmp.newval, tmp.newlen);
+ unlock_kernel();
+ if (oldlenp) {
+ if (!error) {
+ if (get_user(oldlen, (size_t *)addr)) {
+ error = -EFAULT;
+ } else {
+ oldlen32 = oldlen;
+ if (put_user(oldlen32, (u32 *)(u64)tmp.oldlenp))
+ error = -EFAULT;
+ }
+ }
+ if (copy_to_user(&args->__unused[0], tmp.__unused, sizeof(tmp.__unused)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+#endif /* CONFIG_SYSCTL */
+
+asmlinkage long sys32_sched_rr_get_interval(pid_t pid,
+ struct compat_timespec __user *interval)
+{
+ struct timespec t;
+ int ret;
+
+ KERNEL_SYSCALL(ret, sys_sched_rr_get_interval, pid, (struct timespec __user *)&t);
+ if (put_compat_timespec(&t, interval))
+ return -EFAULT;
+ return ret;
+}
+
+static int
+put_compat_timeval(struct compat_timeval __user *u, struct timeval *t)
+{
+ struct compat_timeval t32;
+ t32.tv_sec = t->tv_sec;
+ t32.tv_usec = t->tv_usec;
+ return copy_to_user(u, &t32, sizeof t32);
+}
+
+static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
+{
+ long usec;
+
+ if (__get_user(o->tv_sec, &i->tv_sec))
+ return -EFAULT;
+ if (__get_user(usec, &i->tv_usec))
+ return -EFAULT;
+ o->tv_nsec = usec * 1000;
+ return 0;
+}
+
+asmlinkage int
+sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
+{
+ extern void do_gettimeofday(struct timeval *tv);
+
+ if (tv) {
+ struct timeval ktv;
+ do_gettimeofday(&ktv);
+ if (put_compat_timeval(tv, &ktv))
+ return -EFAULT;
+ }
+ if (tz) {
+ extern struct timezone sys_tz;
+ if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+asmlinkage
+int sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
+{
+ struct timespec kts;
+ struct timezone ktz;
+
+ if (tv) {
+ if (get_ts32(&kts, tv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_from_user(&ktz, tz, sizeof(ktz)))
+ return -EFAULT;
+ }
+
+ return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
+}
+
+int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
+{
+ int err;
+
+ if (stat->size > MAX_NON_LFS || !new_valid_dev(stat->dev) ||
+ !new_valid_dev(stat->rdev))
+ return -EOVERFLOW;
+
+ err = put_user(new_encode_dev(stat->dev), &statbuf->st_dev);
+ err |= put_user(stat->ino, &statbuf->st_ino);
+ err |= put_user(stat->mode, &statbuf->st_mode);
+ err |= put_user(stat->nlink, &statbuf->st_nlink);
+ err |= put_user(0, &statbuf->st_reserved1);
+ err |= put_user(0, &statbuf->st_reserved2);
+ err |= put_user(new_encode_dev(stat->rdev), &statbuf->st_rdev);
+ err |= put_user(stat->size, &statbuf->st_size);
+ err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
+ err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
+ err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
+ err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
+ err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
+ err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
+ err |= put_user(stat->blksize, &statbuf->st_blksize);
+ err |= put_user(stat->blocks, &statbuf->st_blocks);
+ err |= put_user(0, &statbuf->__unused1);
+ err |= put_user(0, &statbuf->__unused2);
+ err |= put_user(0, &statbuf->__unused3);
+ err |= put_user(0, &statbuf->__unused4);
+ err |= put_user(0, &statbuf->__unused5);
+ err |= put_user(0, &statbuf->st_fstype); /* not avail */
+ err |= put_user(0, &statbuf->st_realdev); /* not avail */
+ err |= put_user(0, &statbuf->st_basemode); /* not avail */
+ err |= put_user(0, &statbuf->st_spareshort);
+ err |= put_user(stat->uid, &statbuf->st_uid);
+ err |= put_user(stat->gid, &statbuf->st_gid);
+ err |= put_user(0, &statbuf->st_spare4[0]);
+ err |= put_user(0, &statbuf->st_spare4[1]);
+ err |= put_user(0, &statbuf->st_spare4[2]);
+
+ return err;
+}
+
+struct linux32_dirent {
+ u32 d_ino;
+ compat_off_t d_off;
+ u16 d_reclen;
+ char d_name[1];
+};
+
+struct old_linux32_dirent {
+ u32 d_ino;
+ u32 d_offset;
+ u16 d_namlen;
+ char d_name[1];
+};
+
+struct getdents32_callback {
+ struct linux32_dirent __user * current_dir;
+ struct linux32_dirent __user * previous;
+ int count;
+ int error;
+};
+
+struct readdir32_callback {
+ struct old_linux32_dirent __user * dirent;
+ int count;
+};
+
+#define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
+static int
+filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino,
+ unsigned int d_type)
+{
+ struct linux32_dirent __user * dirent;
+ struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1, 4);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ if (dirent)
+ put_user(offset, &dirent->d_off);
+ dirent = buf->current_dir;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(reclen, &dirent->d_reclen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ dirent = ((void __user *)dirent) + reclen;
+ buf->current_dir = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage long
+sys32_getdents (unsigned int fd, void __user * dirent, unsigned int count)
+{
+ struct file * file;
+ struct linux32_dirent __user * lastdirent;
+ struct getdents32_callback buf;
+ int error;
+
+ error = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.current_dir = (struct linux32_dirent __user *) dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(file, filldir32, &buf);
+ if (error < 0)
+ goto out_putf;
+ error = buf.error;
+ lastdirent = buf.previous;
+ if (lastdirent) {
+ put_user(file->f_pos, &lastdirent->d_off);
+ error = count - buf.count;
+ }
+
+out_putf:
+ fput(file);
+out:
+ return error;
+}
+
+static int
+fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
+ unsigned int d_type)
+{
+ struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
+ struct old_linux32_dirent __user * dirent;
+
+ if (buf->count)
+ return -EINVAL;
+ buf->count++;
+ dirent = buf->dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(offset, &dirent->d_offset);
+ put_user(namlen, &dirent->d_namlen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ return 0;
+}
+
+asmlinkage long
+sys32_readdir (unsigned int fd, void __user * dirent, unsigned int count)
+{
+ int error;
+ struct file * file;
+ struct readdir32_callback buf;
+
+ error = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.count = 0;
+ buf.dirent = dirent;
+
+ error = vfs_readdir(file, fillonedir32, &buf);
+ if (error >= 0)
+ error = buf.count;
+ fput(file);
+out:
+ return error;
+}
+
+/*** copied from mips64 ***/
+/*
+ * Ooo, nasty. We need here to frob 32-bit unsigned longs to
+ * 64-bit unsigned longs.
+ */
+
+static inline int
+get_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
+{
+ n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32));
+ if (ufdset) {
+ unsigned long odd;
+
+ if (!access_ok(VERIFY_WRITE, ufdset, n*sizeof(u32)))
+ return -EFAULT;
+
+ odd = n & 1UL;
+ n &= ~1UL;
+ while (n) {
+ unsigned long h, l;
+ __get_user(l, ufdset);
+ __get_user(h, ufdset+1);
+ ufdset += 2;
+ *fdset++ = h << 32 | l;
+ n -= 2;
+ }
+ if (odd)
+ __get_user(*fdset, ufdset);
+ } else {
+ /* Tricky, must clear full unsigned long in the
+ * kernel fdset at the end, this makes sure that
+ * actually happens.
+ */
+ memset(fdset, 0, ((n + 1) & ~1)*sizeof(u32));
+ }
+ return 0;
+}
+
+static inline void
+set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
+{
+ unsigned long odd;
+ n = (n + 8*sizeof(u32) - 1) / (8*sizeof(u32));
+
+ if (!ufdset)
+ return;
+
+ odd = n & 1UL;
+ n &= ~1UL;
+ while (n) {
+ unsigned long h, l;
+ l = *fdset++;
+ h = l >> 32;
+ __put_user(l, ufdset);
+ __put_user(h, ufdset+1);
+ ufdset += 2;
+ n -= 2;
+ }
+ if (odd)
+ __put_user(*fdset, ufdset);
+}
+
+struct msgbuf32 {
+ int mtype;
+ char mtext[1];
+};
+
+asmlinkage long sys32_msgsnd(int msqid,
+ struct msgbuf32 __user *umsgp32,
+ size_t msgsz, int msgflg)
+{
+ struct msgbuf *mb;
+ struct msgbuf32 mb32;
+ int err;
+
+ if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ err = get_user(mb32.mtype, &umsgp32->mtype);
+ mb->mtype = mb32.mtype;
+ err |= copy_from_user(mb->mtext, &umsgp32->mtext, msgsz);
+
+ if (err)
+ err = -EFAULT;
+ else
+ KERNEL_SYSCALL(err, sys_msgsnd, msqid, (struct msgbuf __user *)mb, msgsz, msgflg);
+
+ kfree(mb);
+ return err;
+}
+
+asmlinkage long sys32_msgrcv(int msqid,
+ struct msgbuf32 __user *umsgp32,
+ size_t msgsz, long msgtyp, int msgflg)
+{
+ struct msgbuf *mb;
+ struct msgbuf32 mb32;
+ int err, len;
+
+ if ((mb = kmalloc(msgsz + sizeof *mb + 4, GFP_KERNEL)) == NULL)
+ return -ENOMEM;
+
+ KERNEL_SYSCALL(err, sys_msgrcv, msqid, (struct msgbuf __user *)mb, msgsz, msgtyp, msgflg);
+
+ if (err >= 0) {
+ len = err;
+ mb32.mtype = mb->mtype;
+ err = put_user(mb32.mtype, &umsgp32->mtype);
+ err |= copy_to_user(&umsgp32->mtext, mb->mtext, len);
+ if (err)
+ err = -EFAULT;
+ else
+ err = len;
+ }
+
+ kfree(mb);
+ return err;
+}
+
+asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count)
+{
+ mm_segment_t old_fs = get_fs();
+ int ret;
+ off_t of;
+
+ if (offset && get_user(of, offset))
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, count);
+ set_fs(old_fs);
+
+ if (offset && put_user(of, offset))
+ return -EFAULT;
+
+ return ret;
+}
+
+asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, s32 count)
+{
+ mm_segment_t old_fs = get_fs();
+ int ret;
+ loff_t lof;
+
+ if (offset && get_user(lof, offset))
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+ ret = sys_sendfile64(out_fd, in_fd, offset ? (loff_t __user *)&lof : NULL, count);
+ set_fs(old_fs);
+
+ if (offset && put_user(lof, offset))
+ return -EFAULT;
+
+ return ret;
+}
+
+
+struct timex32 {
+ unsigned int modes; /* mode selector */
+ int offset; /* time offset (usec) */
+ int freq; /* frequency offset (scaled ppm) */
+ int maxerror; /* maximum error (usec) */
+ int esterror; /* estimated error (usec) */
+ int status; /* clock command/status */
+ int constant; /* pll time constant */
+ int precision; /* clock precision (usec) (read only) */
+ int tolerance; /* clock frequency tolerance (ppm)
+ * (read only)
+ */
+ struct compat_timeval time; /* (read only) */
+ int tick; /* (modified) usecs between clock ticks */
+
+ int ppsfreq; /* pps frequency (scaled ppm) (ro) */
+ int jitter; /* pps jitter (us) (ro) */
+ int shift; /* interval duration (s) (shift) (ro) */
+ int stabil; /* pps stability (scaled ppm) (ro) */
+ int jitcnt; /* jitter limit exceeded (ro) */
+ int calcnt; /* calibration intervals (ro) */
+ int errcnt; /* calibration errors (ro) */
+ int stbcnt; /* stability limit exceeded (ro) */
+
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32; int :32;
+};
+
+asmlinkage long sys32_adjtimex(struct timex32 __user *txc_p32)
+{
+ struct timex txc;
+ struct timex32 t32;
+ int ret;
+ extern int do_adjtimex(struct timex *txc);
+
+ if(copy_from_user(&t32, txc_p32, sizeof(struct timex32)))
+ return -EFAULT;
+#undef CP
+#define CP(x) txc.x = t32.x
+ CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror);
+ CP(status); CP(constant); CP(precision); CP(tolerance);
+ CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter);
+ CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt);
+ CP(stbcnt);
+ ret = do_adjtimex(&txc);
+#undef CP
+#define CP(x) t32.x = txc.x
+ CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror);
+ CP(status); CP(constant); CP(precision); CP(tolerance);
+ CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter);
+ CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt);
+ CP(stbcnt);
+ return copy_to_user(txc_p32, &t32, sizeof(struct timex32)) ? -EFAULT : ret;
+}
+
+
+struct sysinfo32 {
+ s32 uptime;
+ u32 loads[3];
+ u32 totalram;
+ u32 freeram;
+ u32 sharedram;
+ u32 bufferram;
+ u32 totalswap;
+ u32 freeswap;
+ unsigned short procs;
+ u32 totalhigh;
+ u32 freehigh;
+ u32 mem_unit;
+ char _f[12];
+};
+
+/* We used to call sys_sysinfo and translate the result. But sys_sysinfo
+ * undoes the good work done elsewhere, and rather than undoing the
+ * damage, I decided to just duplicate the code from sys_sysinfo here.
+ */
+
+asmlinkage int sys32_sysinfo(struct sysinfo32 __user *info)
+{
+ struct sysinfo val;
+ int err;
+ unsigned long seq;
+
+ /* We don't need a memset here because we copy the
+ * struct to userspace once element at a time.
+ */
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ val.uptime = jiffies / HZ;
+
+ val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
+ val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
+ val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
+
+ val.procs = nr_threads;
+ } while (read_seqretry(&xtime_lock, seq));
+
+
+ si_meminfo(&val);
+ si_swapinfo(&val);
+
+ err = put_user (val.uptime, &info->uptime);
+ err |= __put_user (val.loads[0], &info->loads[0]);
+ err |= __put_user (val.loads[1], &info->loads[1]);
+ err |= __put_user (val.loads[2], &info->loads[2]);
+ err |= __put_user (val.totalram, &info->totalram);
+ err |= __put_user (val.freeram, &info->freeram);
+ err |= __put_user (val.sharedram, &info->sharedram);
+ err |= __put_user (val.bufferram, &info->bufferram);
+ err |= __put_user (val.totalswap, &info->totalswap);
+ err |= __put_user (val.freeswap, &info->freeswap);
+ err |= __put_user (val.procs, &info->procs);
+ err |= __put_user (val.totalhigh, &info->totalhigh);
+ err |= __put_user (val.freehigh, &info->freehigh);
+ err |= __put_user (val.mem_unit, &info->mem_unit);
+ return err ? -EFAULT : 0;
+}
+
+
+/* lseek() needs a wrapper because 'offset' can be negative, but the top
+ * half of the argument has been zeroed by syscall.S.
+ */
+
+asmlinkage int sys32_lseek(unsigned int fd, int offset, unsigned int origin)
+{
+ return sys_lseek(fd, offset, origin);
+}
+
+asmlinkage long sys32_semctl(int semid, int semnum, int cmd, union semun arg)
+{
+ union semun u;
+
+ if (cmd == SETVAL) {
+ /* Ugh. arg is a union of int,ptr,ptr,ptr, so is 8 bytes.
+ * The int should be in the first 4, but our argument
+ * frobbing has left it in the last 4.
+ */
+ u.val = *((int *)&arg + 1);
+ return sys_semctl (semid, semnum, cmd, u);
+ }
+ return sys_semctl (semid, semnum, cmd, arg);
+}
+
+long sys32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char __user *buf,
+ size_t len)
+{
+ return sys_lookup_dcookie((u64)cookie_high << 32 | cookie_low,
+ buf, len);
+}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
new file mode 100644
index 000000000000..32ea701f4d20
--- /dev/null
+++ b/arch/parisc/kernel/syscall.S
@@ -0,0 +1,703 @@
+/*
+ * Linux/PA-RISC Project (http://www.parisc-linux.org/)
+ *
+ * System call entry code Copyright (c) Matthew Wilcox 1999 <willy@bofh.ai>
+ * Licensed under the GNU GPL.
+ * thanks to Philipp Rumpf, Mike Shaver and various others
+ * sorry about the wall, puffin..
+ */
+
+#include <asm/offsets.h>
+#include <asm/unistd.h>
+#include <asm/errno.h>
+#include <asm/psw.h>
+#include <asm/thread_info.h>
+
+#include <asm/assembly.h>
+#include <asm/processor.h>
+
+ /* We fill the empty parts of the gateway page with
+ * something that will kill the kernel or a
+ * userspace application.
+ */
+#define KILL_INSN break 0,0
+
+#include <linux/config.h> /* for CONFIG_SMP */
+
+#ifdef __LP64__
+ .level 2.0w
+#else
+ .level 1.1
+#endif
+
+#ifndef __LP64__
+ .macro fixup_branch,lbl
+ b \lbl
+ .endm
+#else
+ .macro fixup_branch,lbl
+ ldil L%\lbl, %r1
+ ldo R%\lbl(%r1), %r1
+ bv,n %r0(%r1)
+ .endm
+#endif
+
+ .text
+
+ .import syscall_exit,code
+ .import syscall_exit_rfi,code
+ .export linux_gateway_page
+
+ /* Linux gateway page is aliased to virtual page 0 in the kernel
+ * address space. Since it is a gateway page it cannot be
+ * dereferenced, so null pointers will still fault. We start
+ * the actual entry point at 0x100. We put break instructions
+ * at the beginning of the page to trap null indirect function
+ * pointers.
+ */
+
+ .align 4096
+linux_gateway_page:
+
+ /* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
+ .rept 44
+ KILL_INSN
+ .endr
+
+ /* ADDRESS 0xb0 to 0xb4, lws uses 1 insns for entry */
+ /* Light-weight-syscall entry must always be located at 0xb0 */
+ /* WARNING: Keep this number updated with table size changes */
+#define __NR_lws_entries (2)
+
+lws_entry:
+ /* Unconditional branch to lws_start, located on the
+ same gateway page */
+ b,n lws_start
+
+ /* Fill from 0xb4 to 0xe0 */
+ .rept 11
+ KILL_INSN
+ .endr
+
+ /* This function MUST be located at 0xe0 for glibc's threading
+ mechanism to work. DO NOT MOVE THIS CODE EVER! */
+set_thread_pointer:
+ gate .+8, %r0 /* increase privilege */
+ depi 3, 31, 2, %r31 /* Ensure we return into user mode. */
+ be 0(%sr7,%r31) /* return to user space */
+ mtctl %r26, %cr27 /* move arg0 to the control register */
+
+ /* Increase the chance of trapping if random jumps occur to this
+ address, fill from 0xf0 to 0x100 */
+ .rept 4
+ KILL_INSN
+ .endr
+
+/* This address must remain fixed at 0x100 for glibc's syscalls to work */
+ .align 256
+linux_gateway_entry:
+ gate .+8, %r0 /* become privileged */
+ mtsp %r0,%sr4 /* get kernel space into sr4 */
+ mtsp %r0,%sr5 /* get kernel space into sr5 */
+ mtsp %r0,%sr6 /* get kernel space into sr6 */
+ mfsp %sr7,%r1 /* save user sr7 */
+ mtsp %r1,%sr3 /* and store it in sr3 */
+
+#ifdef __LP64__
+ /* for now we can *always* set the W bit on entry to the syscall
+ * since we don't support wide userland processes. We could
+ * also save the current SM other than in r0 and restore it on
+ * exit from the syscall, and also use that value to know
+ * whether to do narrow or wide syscalls. -PB
+ */
+ ssm PSW_SM_W, %r1
+ extrd,u %r1,PSW_W_BIT,1,%r1
+ /* sp must be aligned on 4, so deposit the W bit setting into
+ * the bottom of sp temporarily */
+ or,ev %r1,%r30,%r30
+ b,n 1f
+ /* The top halves of argument registers must be cleared on syscall
+ * entry from narrow executable.
+ */
+ depdi 0, 31, 32, %r26
+ depdi 0, 31, 32, %r25
+ depdi 0, 31, 32, %r24
+ depdi 0, 31, 32, %r23
+ depdi 0, 31, 32, %r22
+ depdi 0, 31, 32, %r21
+1:
+#endif
+ mfctl %cr30,%r1
+ xor %r1,%r30,%r30 /* ye olde xor trick */
+ xor %r1,%r30,%r1
+ xor %r1,%r30,%r30
+
+ ldo THREAD_SZ_ALGN+FRAME_SIZE(%r30),%r30 /* set up kernel stack */
+
+ /* N.B.: It is critical that we don't set sr7 to 0 until r30
+ * contains a valid kernel stack pointer. It is also
+ * critical that we don't start using the kernel stack
+ * until after sr7 has been set to 0.
+ */
+
+ mtsp %r0,%sr7 /* get kernel space into sr7 */
+ STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
+ mfctl %cr30,%r1 /* get task ptr in %r1 */
+ LDREG TI_TASK(%r1),%r1
+
+ /* Save some registers for sigcontext and potential task
+ switch (see entry.S for the details of which ones are
+ saved/restored). TASK_PT_PSW is zeroed so we can see whether
+ a process is on a syscall or not. For an interrupt the real
+ PSW value is stored. This is needed for gdb and sys_ptrace. */
+ STREG %r0, TASK_PT_PSW(%r1)
+ STREG %r2, TASK_PT_GR2(%r1) /* preserve rp */
+ STREG %r19, TASK_PT_GR19(%r1)
+
+ LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */
+#ifdef __LP64__
+ extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */
+#if 0
+ xor %r19,%r2,%r2 /* clear bottom bit */
+ depd,z %r19,1,1,%r19
+ std %r19,TASK_PT_PSW(%r1)
+#endif
+#endif
+ STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
+
+ STREG %r20, TASK_PT_GR20(%r1)
+ STREG %r21, TASK_PT_GR21(%r1)
+ STREG %r22, TASK_PT_GR22(%r1)
+ STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
+ STREG %r24, TASK_PT_GR24(%r1) /* 3rd argument */
+ STREG %r25, TASK_PT_GR25(%r1) /* 2nd argument */
+ STREG %r26, TASK_PT_GR26(%r1) /* 1st argument */
+ STREG %r27, TASK_PT_GR27(%r1) /* user dp */
+ STREG %r28, TASK_PT_GR28(%r1) /* return value 0 */
+ STREG %r28, TASK_PT_ORIG_R28(%r1) /* return value 0 (saved for signals) */
+ STREG %r29, TASK_PT_GR29(%r1) /* return value 1 */
+ STREG %r31, TASK_PT_GR31(%r1) /* preserve syscall return ptr */
+
+ ldo TASK_PT_FR0(%r1), %r27 /* save fpregs from the kernel */
+ save_fp %r27 /* or potential task switch */
+
+ mfctl %cr11, %r27 /* i.e. SAR */
+ STREG %r27, TASK_PT_SAR(%r1)
+
+ loadgp
+
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+ copy %r19,%r2 /* W bit back to r2 */
+#else
+ /* no need to save these on stack in wide mode because the first 8
+ * args are passed in registers */
+ stw %r22, -52(%r30) /* 5th argument */
+ stw %r21, -56(%r30) /* 6th argument */
+#endif
+
+ /* Are we being ptraced? */
+ mfctl %cr30, %r1
+ LDREG TI_TASK(%r1),%r1
+ LDREG TASK_PTRACE(%r1), %r1
+ bb,<,n %r1,31,.Ltracesys
+
+ /* Note! We cannot use the syscall table that is mapped
+ nearby since the gateway page is mapped execute-only. */
+
+#ifdef __LP64__
+ ldil L%sys_call_table, %r1
+ or,= %r2,%r2,%r2
+ addil L%(sys_call_table64-sys_call_table), %r1
+ ldo R%sys_call_table(%r1), %r19
+ or,= %r2,%r2,%r2
+ ldo R%sys_call_table64(%r1), %r19
+#else
+ ldil L%sys_call_table, %r1
+ ldo R%sys_call_table(%r1), %r19
+#endif
+ comiclr,>>= __NR_Linux_syscalls, %r20, %r0
+ b,n .Lsyscall_nosys
+
+ LDREGX %r20(%r19), %r19
+
+ /* If this is a sys_rt_sigreturn call, and the signal was received
+ * when not in_syscall, then we want to return via syscall_exit_rfi,
+ * not syscall_exit. Signal no. in r20, in_syscall in r25 (see
+ * trampoline code in signal.c).
+ */
+ ldi __NR_rt_sigreturn,%r2
+ comb,= %r2,%r20,.Lrt_sigreturn
+.Lin_syscall:
+ ldil L%syscall_exit,%r2
+ be 0(%sr7,%r19)
+ ldo R%syscall_exit(%r2),%r2
+.Lrt_sigreturn:
+ comib,<> 0,%r25,.Lin_syscall
+ ldil L%syscall_exit_rfi,%r2
+ be 0(%sr7,%r19)
+ ldo R%syscall_exit_rfi(%r2),%r2
+
+ /* Note! Because we are not running where we were linked, any
+ calls to functions external to this file must be indirect. To
+ be safe, we apply the opposite rule to functions within this
+ file, with local labels given to them to ensure correctness. */
+
+.Lsyscall_nosys:
+syscall_nosys:
+ ldil L%syscall_exit,%r1
+ be R%syscall_exit(%sr7,%r1)
+ ldo -ENOSYS(%r0),%r28 /* set errno */
+
+
+/* Warning! This trace code is a virtual duplicate of the code above so be
+ * sure to maintain both! */
+.Ltracesys:
+tracesys:
+ /* Need to save more registers so the debugger can see where we
+ * are. This saves only the lower 8 bits of PSW, so that the C
+ * bit is still clear on syscalls, and the D bit is set if this
+ * full register save path has been executed. We check the D
+ * bit on syscall_return_rfi to determine which registers to
+ * restore. An interrupt results in a full PSW saved with the
+ * C bit set, a non-straced syscall entry results in C and D clear
+ * in the saved PSW.
+ */
+ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
+ LDREG TI_TASK(%r1), %r1
+ ssm 0,%r2
+ STREG %r2,TASK_PT_PSW(%r1) /* Lower 8 bits only!! */
+ mfsp %sr0,%r2
+ STREG %r2,TASK_PT_SR0(%r1)
+ mfsp %sr1,%r2
+ STREG %r2,TASK_PT_SR1(%r1)
+ mfsp %sr2,%r2
+ STREG %r2,TASK_PT_SR2(%r1)
+ mfsp %sr3,%r2
+ STREG %r2,TASK_PT_SR3(%r1)
+ STREG %r2,TASK_PT_SR4(%r1)
+ STREG %r2,TASK_PT_SR5(%r1)
+ STREG %r2,TASK_PT_SR6(%r1)
+ STREG %r2,TASK_PT_SR7(%r1)
+ STREG %r2,TASK_PT_IASQ0(%r1)
+ STREG %r2,TASK_PT_IASQ1(%r1)
+ LDREG TASK_PT_GR31(%r1),%r2
+ STREG %r2,TASK_PT_IAOQ0(%r1)
+ ldo 4(%r2),%r2
+ STREG %r2,TASK_PT_IAOQ1(%r1)
+ ldo TASK_REGS(%r1),%r2
+ /* reg_save %r2 */
+ STREG %r3,PT_GR3(%r2)
+ STREG %r4,PT_GR4(%r2)
+ STREG %r5,PT_GR5(%r2)
+ STREG %r6,PT_GR6(%r2)
+ STREG %r7,PT_GR7(%r2)
+ STREG %r8,PT_GR8(%r2)
+ STREG %r9,PT_GR9(%r2)
+ STREG %r10,PT_GR10(%r2)
+ STREG %r11,PT_GR11(%r2)
+ STREG %r12,PT_GR12(%r2)
+ STREG %r13,PT_GR13(%r2)
+ STREG %r14,PT_GR14(%r2)
+ STREG %r15,PT_GR15(%r2)
+ STREG %r16,PT_GR16(%r2)
+ STREG %r17,PT_GR17(%r2)
+ STREG %r18,PT_GR18(%r2)
+ /* Finished saving things for the debugger */
+
+ ldil L%syscall_trace,%r1
+ ldil L%tracesys_next,%r2
+ be R%syscall_trace(%sr7,%r1)
+ ldo R%tracesys_next(%r2),%r2
+
+tracesys_next:
+ ldil L%sys_call_table,%r1
+ ldo R%sys_call_table(%r1), %r19
+
+ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
+ LDREG TI_TASK(%r1), %r1
+ LDREG TASK_PT_GR20(%r1), %r20
+ LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */
+ LDREG TASK_PT_GR25(%r1), %r25
+ LDREG TASK_PT_GR24(%r1), %r24
+ LDREG TASK_PT_GR23(%r1), %r23
+#ifdef __LP64__
+ LDREG TASK_PT_GR22(%r1), %r22
+ LDREG TASK_PT_GR21(%r1), %r21
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ comiclr,>>= __NR_Linux_syscalls, %r20, %r0
+ b,n .Lsyscall_nosys
+
+ LDREGX %r20(%r19), %r19
+
+ /* If this is a sys_rt_sigreturn call, and the signal was received
+ * when not in_syscall, then we want to return via syscall_exit_rfi,
+ * not syscall_exit. Signal no. in r20, in_syscall in r25 (see
+ * trampoline code in signal.c).
+ */
+ ldi __NR_rt_sigreturn,%r2
+ comb,= %r2,%r20,.Ltrace_rt_sigreturn
+.Ltrace_in_syscall:
+ ldil L%tracesys_exit,%r2
+ be 0(%sr7,%r19)
+ ldo R%tracesys_exit(%r2),%r2
+
+ /* Do *not* call this function on the gateway page, because it
+ makes a direct call to syscall_trace. */
+
+tracesys_exit:
+ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
+ LDREG TI_TASK(%r1), %r1
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ bl syscall_trace, %r2
+ STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
+ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
+ LDREG TI_TASK(%r1), %r1
+ LDREG TASK_PT_GR28(%r1), %r28 /* Restore return val. */
+
+ ldil L%syscall_exit,%r1
+ be,n R%syscall_exit(%sr7,%r1)
+
+.Ltrace_rt_sigreturn:
+ comib,<> 0,%r25,.Ltrace_in_syscall
+ ldil L%tracesys_sigexit,%r2
+ be 0(%sr7,%r19)
+ ldo R%tracesys_sigexit(%r2),%r2
+
+tracesys_sigexit:
+ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
+ LDREG 0(%r1), %r1
+#ifdef __LP64__
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+ bl syscall_trace, %r2
+ nop
+
+ ldil L%syscall_exit_rfi,%r1
+ be,n R%syscall_exit_rfi(%sr7,%r1)
+
+
+ /*********************************************************
+ Light-weight-syscall code
+
+ r20 - lws number
+ r26,r25,r24,r23,r22 - Input registers
+ r28 - Function return register
+ r21 - Error code.
+
+ Scracth: Any of the above that aren't being
+ currently used, including r1.
+
+ Return pointer: r31 (Not usable)
+
+ Error codes returned by entry path:
+
+ ENOSYS - r20 was an invalid LWS number.
+
+ *********************************************************/
+lws_start:
+ /* Gate and ensure we return to userspace */
+ gate .+8, %r0
+ depi 3, 31, 2, %r31 /* Ensure we return to userspace */
+
+#ifdef __LP64__
+ /* FIXME: If we are a 64-bit kernel just
+ * turn this on unconditionally.
+ */
+ ssm PSW_SM_W, %r1
+ extrd,u %r1,PSW_W_BIT,1,%r1
+ /* sp must be aligned on 4, so deposit the W bit setting into
+ * the bottom of sp temporarily */
+ or,ev %r1,%r30,%r30
+
+ /* Clip LWS number to a 32-bit value always */
+ depdi 0, 31, 32, %r20
+#endif
+
+ /* Is the lws entry number valid? */
+ comiclr,>>= __NR_lws_entries, %r20, %r0
+ b,n lws_exit_nosys
+
+ /* WARNING: Trashing sr2 and sr3 */
+ mfsp %sr7,%r1 /* get userspace into sr3 */
+ mtsp %r1,%sr3
+ mtsp %r0,%sr2 /* get kernel space into sr2 */
+
+ /* Load table start */
+ ldil L%lws_table, %r1
+ ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
+ LDREGX %r20(%sr2,r28), %r21 /* Scratch use of r21 */
+
+ /* Jump to lws, lws table pointers already relocated */
+ be,n 0(%sr2,%r21)
+
+lws_exit_nosys:
+ ldo -ENOSYS(%r0),%r21 /* set errno */
+ /* Fall through: Return to userspace */
+
+lws_exit:
+#ifdef __LP64__
+ /* decide whether to reset the wide mode bit
+ *
+ * For a syscall, the W bit is stored in the lowest bit
+ * of sp. Extract it and reset W if it is zero */
+ extrd,u,*<> %r30,63,1,%r1
+ rsm PSW_SM_W, %r0
+ /* now reset the lowest bit of sp if it was set */
+ xor %r30,%r1,%r30
+#endif
+ be,n 0(%sr3, %r31)
+
+
+
+ /***************************************************
+ Implementing CAS as an atomic operation:
+
+ %r26 - Address to examine
+ %r25 - Old value to check (old)
+ %r24 - New value to set (new)
+ %r28 - Return prev through this register.
+ %r21 - Kernel error code
+
+ If debugging is DISabled:
+
+ %r21 has the following meanings:
+
+ EAGAIN - CAS is busy, ldcw failed, try again.
+ EFAULT - Read or write failed.
+
+ If debugging is enabled:
+
+ EDEADLOCK - CAS called recursively.
+ EAGAIN && r28 == 1 - CAS is busy. Lock contended.
+ EAGAIN && r28 == 2 - CAS is busy. ldcw failed.
+ EFAULT - Read or write failed.
+
+ Scratch: r20, r28, r1
+
+ ****************************************************/
+
+ /* Do not enable LWS debugging */
+#define ENABLE_LWS_DEBUG 0
+
+ /* ELF64 Process entry path */
+lws_compare_and_swap64:
+#ifdef __LP64__
+ b,n lws_compare_and_swap
+#else
+ /* If we are not a 64-bit kernel, then we don't
+ * implement having 64-bit input registers
+ */
+ b,n lws_exit_nosys
+#endif
+
+ /* ELF32 Process entry path */
+lws_compare_and_swap32:
+#ifdef __LP64__
+ /* Clip all the input registers */
+ depdi 0, 31, 32, %r26
+ depdi 0, 31, 32, %r25
+ depdi 0, 31, 32, %r24
+#endif
+
+lws_compare_and_swap:
+#ifdef CONFIG_SMP
+ /* Load start of lock table */
+ ldil L%lws_lock_start, %r20
+ ldo R%lws_lock_start(%r20), %r28
+
+ /* Extract four bits from r26 and hash lock (Bits 4-7) */
+ extru %r26, 27, 4, %r20
+
+ /* Find lock to use, the hash is either one of 0 to
+ 15, multiplied by 16 (keep it 16-byte aligned)
+ and add to the lock table offset. */
+ shlw %r20, 4, %r20
+ add %r20, %r28, %r20
+
+# ifdef ENABLE_LWS_DEBUG
+ /*
+ DEBUG, check for deadlock!
+ If the thread register values are the same
+ then we were the one that locked it last and
+ this is a recurisve call that will deadlock.
+ We *must* giveup this call and fail.
+ */
+ ldw 4(%sr2,%r20), %r28 /* Load thread register */
+ mfctl %cr27, %r21 /* Get current thread register */
+ cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
+ b lws_exit /* Return error! */
+ ldo -EDEADLOCK(%r0), %r21
+cas_lock:
+ cmpb,=,n %r0, %r28, cas_nocontend /* Is nobody using it? */
+ ldo 1(%r0), %r28 /* 1st case */
+ b lws_exit /* Contended... */
+ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
+cas_nocontend:
+# endif
+/* ENABLE_LWS_DEBUG */
+
+ ldcw 0(%sr2,%r20), %r28 /* Try to acquire the lock */
+ cmpb,<>,n %r0, %r28, cas_action /* Did we get it? */
+cas_wouldblock:
+ ldo 2(%r0), %r28 /* 2nd case */
+ b lws_exit /* Contended... */
+ ldo -EAGAIN(%r0), %r21 /* Spin in userspace */
+#endif
+/* CONFIG_SMP */
+
+ /*
+ prev = *addr;
+ if ( prev == old )
+ *addr = new;
+ return prev;
+ */
+
+ /* NOTES:
+ This all works becuse intr_do_signal
+ and schedule both check the return iasq
+ and see that we are on the kernel page
+ so this process is never scheduled off
+ or is ever sent any signal of any sort,
+ thus it is wholly atomic from usrspaces
+ perspective
+ */
+cas_action:
+#if defined CONFIG_SMP && defined ENABLE_LWS_DEBUG
+ /* DEBUG */
+ mfctl %cr27, %r1
+ stw %r1, 4(%sr2,%r20)
+#endif
+ /* The load and store could fail */
+1: ldw 0(%sr3,%r26), %r28
+ sub,<> %r28, %r25, %r0
+2: stw %r24, 0(%sr3,%r26)
+#ifdef CONFIG_SMP
+ /* Free lock */
+ stw %r20, 0(%sr2,%r20)
+# ifdef ENABLE_LWS_DEBUG
+ /* Clear thread register indicator */
+ stw %r0, 4(%sr2,%r20)
+# endif
+#endif
+ /* Return to userspace, set no error */
+ b lws_exit
+ copy %r0, %r21
+
+3:
+ /* Error occured on load or store */
+#ifdef CONFIG_SMP
+ /* Free lock */
+ stw %r20, 0(%sr2,%r20)
+# ifdef ENABLE_LWS_DEBUG
+ stw %r0, 4(%sr2,%r20)
+# endif
+#endif
+ b lws_exit
+ ldo -EFAULT(%r0),%r21 /* set errno */
+ nop
+ nop
+ nop
+ nop
+
+ /* Two exception table entries, one for the load,
+ the other for the store. Either return -EFAULT.
+ Each of the entries must be relocated. */
+ .section __ex_table,"aw"
+#ifdef __LP64__
+ /* Pad the address calculation */
+ .word 0,(2b - linux_gateway_page)
+ .word 0,(3b - linux_gateway_page)
+#else
+ .word (2b - linux_gateway_page)
+ .word (3b - linux_gateway_page)
+#endif
+ .previous
+
+ .section __ex_table,"aw"
+#ifdef __LP64__
+ /* Pad the address calculation */
+ .word 0,(1b - linux_gateway_page)
+ .word 0,(3b - linux_gateway_page)
+#else
+ .word (1b - linux_gateway_page)
+ .word (3b - linux_gateway_page)
+#endif
+ .previous
+
+end_compare_and_swap:
+
+ /* Make sure nothing else is placed on this page */
+ .align 4096
+ .export end_linux_gateway_page
+end_linux_gateway_page:
+
+ /* Relocate symbols assuming linux_gateway_page is mapped
+ to virtual address 0x0 */
+#ifdef __LP64__
+ /* FIXME: The code will always be on the gateay page
+ and thus it will be on the first 4k, the
+ assembler seems to think that the final
+ subtraction result is only a word in
+ length, so we pad the value.
+ */
+#define LWS_ENTRY(_name_) .word 0,(lws_##_name_ - linux_gateway_page)
+#else
+#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page)
+#endif
+
+ .align 4096
+ /* Light-weight-syscall table */
+ /* Start of lws table. */
+ .export lws_table
+.Llws_table:
+lws_table:
+ LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
+ LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
+ /* End of lws table */
+
+ .align 4096
+ .export sys_call_table
+.Lsys_call_table:
+sys_call_table:
+#include "syscall_table.S"
+
+#ifdef __LP64__
+ .align 4096
+ .export sys_call_table64
+.Lsys_call_table64:
+sys_call_table64:
+#define SYSCALL_TABLE_64BIT
+#include "syscall_table.S"
+#endif
+
+#ifdef CONFIG_SMP
+ /*
+ All light-weight-syscall atomic operations
+ will use this set of locks
+ */
+ .section .data
+ .align 4096
+ .export lws_lock_start
+.Llws_lock_start:
+lws_lock_start:
+ /* lws locks */
+ .align 16
+ .rept 16
+ /* Keep locks aligned at 16-bytes */
+ .word 1
+ .word 0
+ .word 0
+ .word 0
+ .endr
+ .previous
+#endif
+/* CONFIG_SMP for lws_lock_start */
+
+.end
+
+
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
new file mode 100644
index 000000000000..779b537100ec
--- /dev/null
+++ b/arch/parisc/kernel/syscall_table.S
@@ -0,0 +1,372 @@
+/* System Call Table
+ *
+ * Copyright (C) 1999-2004 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2000 Alan Modra <amodra at parisc-linux.org>
+ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
+ * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
+ * Copyright (C) 2000 Grant Grundler <grundler at parisc-linux.org>
+ * Copyright (C) 2001 Richard Hirst <rhirst with parisc-linux.org>
+ * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
+ * Copyright (C) 2001 Helge Deller <deller at parisc-linux.org>
+ * Copyright (C) 2000-2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
+ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#undef ENTRY_SAME
+#undef ENTRY_DIFF
+#undef ENTRY_UHOH
+#undef ENTRY_COMP
+#undef ENTRY_OURS
+#if defined(__LP64__) && !defined(SYSCALL_TABLE_64BIT)
+/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
+ * narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific
+ * implementation is required on wide palinux. Use ENTRY_COMP where
+ * the compatability layer has a useful 32-bit implementation.
+ */
+#define ENTRY_SAME(_name_) .dword sys_##_name_
+#define ENTRY_DIFF(_name_) .dword sys32_##_name_
+#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
+#define ENTRY_OURS(_name_) .dword parisc_##_name_
+#define ENTRY_COMP(_name_) .dword compat_sys_##_name_
+#elif defined(__LP64__) && defined(SYSCALL_TABLE_64BIT)
+#define ENTRY_SAME(_name_) .dword sys_##_name_
+#define ENTRY_DIFF(_name_) .dword sys_##_name_
+#define ENTRY_UHOH(_name_) .dword sys_##_name_
+#define ENTRY_OURS(_name_) .dword sys_##_name_
+#define ENTRY_COMP(_name_) .dword sys_##_name_
+#else
+#define ENTRY_SAME(_name_) .word sys_##_name_
+#define ENTRY_DIFF(_name_) .word sys_##_name_
+#define ENTRY_UHOH(_name_) .word sys_##_name_
+#define ENTRY_OURS(_name_) .word parisc_##_name_
+#define ENTRY_COMP(_name_) .word sys_##_name_
+#endif
+
+ ENTRY_SAME(restart_syscall) /* 0 */
+ ENTRY_SAME(exit)
+ ENTRY_SAME(fork_wrapper)
+ ENTRY_SAME(read)
+ ENTRY_SAME(write)
+ ENTRY_SAME(open) /* 5 */
+ ENTRY_SAME(close)
+ ENTRY_SAME(waitpid)
+ ENTRY_SAME(creat)
+ ENTRY_SAME(link)
+ ENTRY_SAME(unlink) /* 10 */
+ ENTRY_DIFF(execve_wrapper)
+ ENTRY_SAME(chdir)
+ /* See comments in kernel/time.c!!! Maybe we don't need this? */
+ ENTRY_COMP(time)
+ ENTRY_SAME(mknod)
+ ENTRY_SAME(chmod) /* 15 */
+ ENTRY_SAME(lchown)
+ ENTRY_SAME(socket)
+ /* struct stat is MAYBE identical wide and narrow ?? */
+ ENTRY_COMP(newstat)
+ ENTRY_DIFF(lseek)
+ ENTRY_SAME(getpid) /* 20 */
+ /* the 'void * data' parameter may need re-packing in wide */
+ ENTRY_COMP(mount)
+ /* concerned about struct sockaddr in wide/narrow */
+ /* ---> I think sockaddr is OK unless the compiler packs the struct */
+ /* differently to align the char array */
+ ENTRY_SAME(bind)
+ ENTRY_SAME(setuid)
+ ENTRY_SAME(getuid)
+ ENTRY_COMP(stime) /* 25 */
+ ENTRY_SAME(ptrace)
+ ENTRY_SAME(alarm)
+ /* see stat comment */
+ ENTRY_COMP(newfstat)
+ ENTRY_SAME(pause)
+ /* struct utimbuf uses time_t which might vary */
+ ENTRY_COMP(utime) /* 30 */
+ /* struct sockaddr... */
+ ENTRY_SAME(connect)
+ ENTRY_SAME(listen)
+ ENTRY_SAME(access)
+ ENTRY_SAME(nice)
+ /* struct sockaddr... */
+ ENTRY_SAME(accept) /* 35 */
+ ENTRY_SAME(sync)
+ ENTRY_SAME(kill)
+ ENTRY_SAME(rename)
+ ENTRY_SAME(mkdir)
+ ENTRY_SAME(rmdir) /* 40 */
+ ENTRY_SAME(dup)
+ ENTRY_SAME(pipe)
+ ENTRY_COMP(times)
+ /* struct sockaddr... */
+ ENTRY_SAME(getsockname)
+ /* it seems possible brk() could return a >4G pointer... */
+ ENTRY_SAME(brk) /* 45 */
+ ENTRY_SAME(setgid)
+ ENTRY_SAME(getgid)
+ ENTRY_SAME(signal)
+ ENTRY_SAME(geteuid)
+ ENTRY_SAME(getegid) /* 50 */
+ ENTRY_SAME(acct)
+ ENTRY_SAME(umount)
+ /* struct sockaddr... */
+ ENTRY_SAME(getpeername)
+ ENTRY_COMP(ioctl)
+ ENTRY_COMP(fcntl) /* 55 */
+ ENTRY_SAME(socketpair)
+ ENTRY_SAME(setpgid)
+ ENTRY_SAME(send)
+ ENTRY_SAME(newuname)
+ ENTRY_SAME(umask) /* 60 */
+ ENTRY_SAME(chroot)
+ ENTRY_SAME(ustat)
+ ENTRY_SAME(dup2)
+ ENTRY_SAME(getppid)
+ ENTRY_SAME(getpgrp) /* 65 */
+ ENTRY_SAME(setsid)
+ ENTRY_SAME(pivot_root)
+ /* I don't like this */
+ ENTRY_UHOH(sgetmask)
+ ENTRY_UHOH(ssetmask)
+ ENTRY_SAME(setreuid) /* 70 */
+ ENTRY_SAME(setregid)
+ ENTRY_SAME(mincore)
+ ENTRY_COMP(sigpending)
+ ENTRY_SAME(sethostname)
+ /* Following 3 have linux-common-code structs containing longs -( */
+ ENTRY_COMP(setrlimit) /* 75 */
+ ENTRY_COMP(getrlimit)
+ ENTRY_COMP(getrusage)
+ /* struct timeval and timezone are maybe?? consistent wide and narrow */
+ ENTRY_DIFF(gettimeofday)
+ ENTRY_DIFF(settimeofday)
+ ENTRY_SAME(getgroups) /* 80 */
+ ENTRY_SAME(setgroups)
+ /* struct socketaddr... */
+ ENTRY_SAME(sendto)
+ ENTRY_SAME(symlink)
+ /* see stat comment */
+ ENTRY_COMP(newlstat)
+ ENTRY_SAME(readlink) /* 85 */
+ ENTRY_SAME(ni_syscall) /* was uselib */
+ ENTRY_SAME(swapon)
+ ENTRY_SAME(reboot)
+ ENTRY_SAME(mmap2)
+ ENTRY_SAME(mmap) /* 90 */
+ ENTRY_SAME(munmap)
+ ENTRY_SAME(truncate)
+ ENTRY_SAME(ftruncate)
+ ENTRY_SAME(fchmod)
+ ENTRY_SAME(fchown) /* 95 */
+ ENTRY_SAME(getpriority)
+ ENTRY_SAME(setpriority)
+ ENTRY_SAME(recv)
+ ENTRY_COMP(statfs)
+ ENTRY_COMP(fstatfs) /* 100 */
+ ENTRY_SAME(stat64)
+ ENTRY_SAME(ni_syscall) /* was socketcall */
+ ENTRY_SAME(syslog)
+ /* even though manpage says struct timeval contains longs, ours has
+ * time_t and suseconds_t -- both of which are safe wide/narrow */
+ ENTRY_COMP(setitimer)
+ ENTRY_COMP(getitimer) /* 105 */
+ ENTRY_SAME(capget)
+ ENTRY_SAME(capset)
+ ENTRY_OURS(pread64)
+ ENTRY_OURS(pwrite64)
+ ENTRY_SAME(getcwd) /* 110 */
+ ENTRY_SAME(vhangup)
+ ENTRY_SAME(fstat64)
+ ENTRY_SAME(vfork_wrapper)
+ /* struct rusage contains longs... */
+ ENTRY_COMP(wait4)
+ ENTRY_SAME(swapoff) /* 115 */
+ ENTRY_DIFF(sysinfo)
+ ENTRY_SAME(shutdown)
+ ENTRY_SAME(fsync)
+ ENTRY_SAME(madvise)
+ ENTRY_SAME(clone_wrapper) /* 120 */
+ ENTRY_SAME(setdomainname)
+ ENTRY_DIFF(sendfile)
+ /* struct sockaddr... */
+ ENTRY_SAME(recvfrom)
+ /* struct timex contains longs */
+ ENTRY_DIFF(adjtimex)
+ ENTRY_SAME(mprotect) /* 125 */
+ /* old_sigset_t forced to 32 bits. Beware glibc sigset_t */
+ ENTRY_COMP(sigprocmask)
+ ENTRY_SAME(ni_syscall) /* create_module */
+ ENTRY_SAME(init_module)
+ ENTRY_SAME(delete_module)
+ ENTRY_SAME(ni_syscall) /* 130: get_kernel_syms */
+ /* time_t inside struct dqblk */
+ ENTRY_SAME(quotactl)
+ ENTRY_SAME(getpgid)
+ ENTRY_SAME(fchdir)
+ ENTRY_SAME(bdflush)
+ ENTRY_SAME(sysfs) /* 135 */
+ ENTRY_SAME(personality)
+ ENTRY_SAME(ni_syscall) /* for afs_syscall */
+ ENTRY_SAME(setfsuid)
+ ENTRY_SAME(setfsgid)
+ /* I think this might work */
+ ENTRY_SAME(llseek) /* 140 */
+ /* struct linux_dirent has longs, like 'unsigned long d_ino' which
+ * almost definitely should be 'ino_t d_ino' but it's too late now */
+ ENTRY_DIFF(getdents)
+ /* it is POSSIBLE that select will be OK because even though fd_set
+ * contains longs, the macros and sizes are clever. */
+ ENTRY_COMP(select)
+ ENTRY_SAME(flock)
+ ENTRY_SAME(msync)
+ /* struct iovec contains pointers */
+ ENTRY_COMP(readv) /* 145 */
+ ENTRY_COMP(writev)
+ ENTRY_SAME(getsid)
+ ENTRY_SAME(fdatasync)
+ /* struct __sysctl_args is a mess */
+ ENTRY_DIFF(sysctl)
+ ENTRY_SAME(mlock) /* 150 */
+ ENTRY_SAME(munlock)
+ ENTRY_SAME(mlockall)
+ ENTRY_SAME(munlockall)
+ /* struct sched_param is ok for now */
+ ENTRY_SAME(sched_setparam)
+ ENTRY_SAME(sched_getparam) /* 155 */
+ ENTRY_SAME(sched_setscheduler)
+ ENTRY_SAME(sched_getscheduler)
+ ENTRY_SAME(sched_yield)
+ ENTRY_SAME(sched_get_priority_max)
+ ENTRY_SAME(sched_get_priority_min) /* 160 */
+ /* These 2 would've worked if someone had defined struct timespec
+ * carefully, like timeval for example (which is about the same).
+ * Unfortunately it contains a long :-( */
+ ENTRY_DIFF(sched_rr_get_interval)
+ ENTRY_COMP(nanosleep)
+ ENTRY_SAME(mremap)
+ ENTRY_SAME(setresuid)
+ ENTRY_SAME(getresuid) /* 165 */
+ ENTRY_DIFF(sigaltstack_wrapper)
+ ENTRY_SAME(ni_syscall) /* query_module */
+ ENTRY_SAME(poll)
+ /* structs contain pointers and an in_addr... */
+ ENTRY_COMP(nfsservctl)
+ ENTRY_SAME(setresgid) /* 170 */
+ ENTRY_SAME(getresgid)
+ ENTRY_SAME(prctl)
+ /* signals need a careful review */
+ ENTRY_SAME(rt_sigreturn_wrapper)
+ ENTRY_DIFF(rt_sigaction)
+ ENTRY_DIFF(rt_sigprocmask) /* 175 */
+ ENTRY_DIFF(rt_sigpending)
+ ENTRY_COMP(rt_sigtimedwait)
+ /* even though the struct siginfo_t is different, it appears like
+ * all the paths use values which should be same wide and narrow.
+ * Also the struct is padded to 128 bytes which means we don't have
+ * to worry about faulting trying to copy in a larger 64-bit
+ * struct from a 32-bit user-space app.
+ */
+ ENTRY_SAME(rt_sigqueueinfo)
+ ENTRY_SAME(rt_sigsuspend_wrapper) /* not really SAME -- see the code */
+ ENTRY_SAME(chown) /* 180 */
+ /* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
+ ENTRY_COMP(setsockopt)
+ ENTRY_SAME(getsockopt)
+ ENTRY_COMP(sendmsg)
+ ENTRY_COMP(recvmsg)
+ ENTRY_SAME(semop) /* 185 */
+ ENTRY_SAME(semget)
+ ENTRY_DIFF(semctl)
+ ENTRY_DIFF(msgsnd)
+ ENTRY_DIFF(msgrcv)
+ ENTRY_SAME(msgget) /* 190 */
+ ENTRY_SAME(msgctl)
+ ENTRY_SAME(shmat_wrapper)
+ ENTRY_SAME(shmdt)
+ ENTRY_SAME(shmget)
+ ENTRY_SAME(shmctl) /* 195 */
+ ENTRY_SAME(ni_syscall) /* streams1 */
+ ENTRY_SAME(ni_syscall) /* streams2 */
+ ENTRY_SAME(lstat64)
+ ENTRY_OURS(truncate64)
+ ENTRY_OURS(ftruncate64) /* 200 */
+ ENTRY_SAME(getdents64)
+ ENTRY_COMP(fcntl64)
+ ENTRY_SAME(ni_syscall) /* attrctl -- dead */
+ ENTRY_SAME(ni_syscall) /* acl_get -- dead */
+ ENTRY_SAME(ni_syscall) /* 205 (acl_set -- dead) */
+ ENTRY_SAME(gettid)
+ ENTRY_OURS(readahead)
+ ENTRY_SAME(tkill)
+ ENTRY_SAME(sendfile64)
+ ENTRY_COMP(futex) /* 210 */
+ ENTRY_COMP(sched_setaffinity)
+ ENTRY_COMP(sched_getaffinity)
+ ENTRY_SAME(ni_syscall) /* set_thread_area */
+ ENTRY_SAME(ni_syscall) /* get_thread_area */
+ ENTRY_SAME(io_setup) /* 215 */
+ ENTRY_SAME(io_destroy)
+ ENTRY_SAME(io_getevents)
+ ENTRY_SAME(io_submit)
+ ENTRY_SAME(io_cancel)
+ ENTRY_SAME(alloc_hugepages) /* 220 */
+ ENTRY_SAME(free_hugepages)
+ ENTRY_SAME(exit_group)
+ ENTRY_DIFF(lookup_dcookie)
+ ENTRY_SAME(epoll_create)
+ ENTRY_SAME(epoll_ctl) /* 225 */
+ ENTRY_SAME(epoll_wait)
+ ENTRY_SAME(remap_file_pages)
+ ENTRY_SAME(semtimedop)
+ ENTRY_SAME(mq_open)
+ ENTRY_SAME(mq_unlink) /* 230 */
+ ENTRY_SAME(mq_timedsend)
+ ENTRY_SAME(mq_timedreceive)
+ ENTRY_SAME(mq_notify)
+ ENTRY_SAME(mq_getsetattr)
+ ENTRY_COMP(waitid) /* 235 */
+ ENTRY_OURS(fadvise64_64)
+ ENTRY_SAME(set_tid_address)
+ ENTRY_SAME(setxattr)
+ ENTRY_SAME(lsetxattr)
+ ENTRY_SAME(fsetxattr) /* 240 */
+ ENTRY_SAME(getxattr)
+ ENTRY_SAME(lgetxattr)
+ ENTRY_SAME(fgetxattr)
+ ENTRY_SAME(listxattr)
+ ENTRY_SAME(llistxattr) /* 245 */
+ ENTRY_SAME(flistxattr)
+ ENTRY_SAME(removexattr)
+ ENTRY_SAME(lremovexattr)
+ ENTRY_SAME(fremovexattr)
+ ENTRY_COMP(timer_create) /* 250 */
+ ENTRY_COMP(timer_settime)
+ ENTRY_COMP(timer_gettime)
+ ENTRY_SAME(timer_getoverrun)
+ ENTRY_SAME(timer_delete)
+ ENTRY_COMP(clock_settime) /* 255 */
+ ENTRY_COMP(clock_gettime)
+ ENTRY_COMP(clock_getres)
+ ENTRY_COMP(clock_nanosleep)
+ ENTRY_SAME(tgkill)
+ ENTRY_COMP(mbind) /* 260 */
+ ENTRY_COMP(get_mempolicy)
+ ENTRY_COMP(set_mempolicy)
+ /* Nothing yet */
+
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
new file mode 100644
index 000000000000..6cf7407344ba
--- /dev/null
+++ b/arch/parisc/kernel/time.c
@@ -0,0 +1,243 @@
+/*
+ * linux/arch/parisc/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
+ * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
+ *
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/profile.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/param.h>
+#include <asm/pdc.h>
+#include <asm/led.h>
+
+#include <linux/timex.h>
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+/* xtime and wall_jiffies keep wall-clock time */
+extern unsigned long wall_jiffies;
+
+static long clocktick; /* timer cycles per tick */
+static long halftick;
+
+#ifdef CONFIG_SMP
+extern void smp_do_timer(struct pt_regs *regs);
+#endif
+
+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ long now;
+ long next_tick;
+ int nticks;
+ int cpu = smp_processor_id();
+
+ profile_tick(CPU_PROFILING, regs);
+
+ now = mfctl(16);
+ /* initialize next_tick to time at last clocktick */
+ next_tick = cpu_data[cpu].it_value;
+
+ /* since time passes between the interrupt and the mfctl()
+ * above, it is never true that last_tick + clocktick == now. If we
+ * never miss a clocktick, we could set next_tick = last_tick + clocktick
+ * but maybe we'll miss ticks, hence the loop.
+ *
+ * Variables are *signed*.
+ */
+
+ nticks = 0;
+ while((next_tick - now) < halftick) {
+ next_tick += clocktick;
+ nticks++;
+ }
+ mtctl(next_tick, 16);
+ cpu_data[cpu].it_value = next_tick;
+
+ while (nticks--) {
+#ifdef CONFIG_SMP
+ smp_do_timer(regs);
+#else
+ update_process_times(user_mode(regs));
+#endif
+ if (cpu == 0) {
+ write_seqlock(&xtime_lock);
+ do_timer(regs);
+ write_sequnlock(&xtime_lock);
+ }
+ }
+
+#ifdef CONFIG_CHASSIS_LCD_LED
+ /* Only schedule the led tasklet on cpu 0, and only if it
+ * is enabled.
+ */
+ if (cpu == 0 && !atomic_read(&led_tasklet.count))
+ tasklet_schedule(&led_tasklet);
+#endif
+
+ /* check soft power switch status */
+ if (cpu == 0 && !atomic_read(&power_tasklet.count))
+ tasklet_schedule(&power_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/*** converted from ia64 ***/
+/*
+ * Return the number of micro-seconds that elapsed since the last
+ * update to wall time (aka xtime aka wall_jiffies). The xtime_lock
+ * must be at least read-locked when calling this routine.
+ */
+static inline unsigned long
+gettimeoffset (void)
+{
+#ifndef CONFIG_SMP
+ /*
+ * FIXME: This won't work on smp because jiffies are updated by cpu 0.
+ * Once parisc-linux learns the cr16 difference between processors,
+ * this could be made to work.
+ */
+ long last_tick;
+ long elapsed_cycles;
+
+ /* it_value is the intended time of the next tick */
+ last_tick = cpu_data[smp_processor_id()].it_value;
+
+ /* Subtract one tick and account for possible difference between
+ * when we expected the tick and when it actually arrived.
+ * (aka wall vs real)
+ */
+ last_tick -= clocktick * (jiffies - wall_jiffies + 1);
+ elapsed_cycles = mfctl(16) - last_tick;
+
+ /* the precision of this math could be improved */
+ return elapsed_cycles / (PAGE0->mem_10msec / 10000);
+#else
+ return 0;
+#endif
+}
+
+void
+do_gettimeofday (struct timeval *tv)
+{
+ unsigned long flags, seq, usec, sec;
+
+ do {
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ usec = gettimeoffset();
+ sec = xtime.tv_sec;
+ usec += (xtime.tv_nsec / 1000);
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ ++sec;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int
+do_settimeofday (struct timespec *tv)
+{
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec, nsec = tv->tv_nsec;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irq(&xtime_lock);
+ {
+ /*
+ * This is revolting. We need to set "xtime"
+ * correctly. However, the value in this location is
+ * the value at the most recent update of wall time.
+ * Discover what correction gettimeofday would have
+ * done, and then undo it!
+ */
+ nsec -= gettimeoffset() * 1000;
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+ }
+ write_sequnlock_irq(&xtime_lock);
+ clock_was_set();
+ return 0;
+}
+EXPORT_SYMBOL(do_settimeofday);
+
+/*
+ * XXX: We can do better than this.
+ * Returns nanoseconds
+ */
+
+unsigned long long sched_clock(void)
+{
+ return (unsigned long long)jiffies * (1000000000 / HZ);
+}
+
+
+void __init time_init(void)
+{
+ unsigned long next_tick;
+ static struct pdc_tod tod_data;
+
+ clocktick = (100 * PAGE0->mem_10msec) / HZ;
+ halftick = clocktick / 2;
+
+ /* Setup clock interrupt timing */
+
+ next_tick = mfctl(16);
+ next_tick += clocktick;
+ cpu_data[smp_processor_id()].it_value = next_tick;
+
+ /* kick off Itimer (CR16) */
+ mtctl(next_tick, 16);
+
+ if(pdc_tod_read(&tod_data) == 0) {
+ write_seqlock_irq(&xtime_lock);
+ xtime.tv_sec = tod_data.tod_sec;
+ xtime.tv_nsec = tod_data.tod_usec * 1000;
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+ write_sequnlock_irq(&xtime_lock);
+ } else {
+ printk(KERN_ERR "Error reading tod clock\n");
+ xtime.tv_sec = 0;
+ xtime.tv_nsec = 0;
+ }
+}
+
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
new file mode 100644
index 000000000000..ac2a40681414
--- /dev/null
+++ b/arch/parisc/kernel/topology.c
@@ -0,0 +1,37 @@
+/*
+ * arch/parisc/kernel/topology.c - Populate driverfs with topology information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+
+static struct cpu cpu_devices[NR_CPUS];
+
+static int __init topology_init(void)
+{
+ struct node *parent = NULL;
+ int num;
+
+ for_each_present_cpu(num) {
+ register_cpu(&cpu_devices[num], num, parent);
+ }
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
new file mode 100644
index 000000000000..d2e5b229a2f4
--- /dev/null
+++ b/arch/parisc/kernel/traps.c
@@ -0,0 +1,834 @@
+/*
+ * linux/arch/parisc/traps.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'asm.s'.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/console.h>
+#include <linux/kallsyms.h>
+
+#include <asm/assembly.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/unaligned.h>
+#include <asm/atomic.h>
+#include <asm/smp.h>
+#include <asm/pdc.h>
+#include <asm/pdc_chassis.h>
+#include <asm/unwind.h>
+
+#include "../math-emu/math-emu.h" /* for handle_fpe() */
+
+#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
+ /* dumped to the console via printk) */
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+DEFINE_SPINLOCK(pa_dbit_lock);
+#endif
+
+int printbinary(char *buf, unsigned long x, int nbits)
+{
+ unsigned long mask = 1UL << (nbits - 1);
+ while (mask != 0) {
+ *buf++ = (mask & x ? '1' : '0');
+ mask >>= 1;
+ }
+ *buf = '\0';
+
+ return nbits;
+}
+
+#ifdef __LP64__
+#define RFMT "%016lx"
+#else
+#define RFMT "%08lx"
+#endif
+
+void show_regs(struct pt_regs *regs)
+{
+ int i;
+ char buf[128], *p;
+ char *level;
+ unsigned long cr30;
+ unsigned long cr31;
+
+ level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
+
+ printk("%s\n", level); /* don't want to have that pretty register dump messed up */
+
+ printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
+ printbinary(buf, regs->gr[0], 32);
+ printk("%sPSW: %s %s\n", level, buf, print_tainted());
+
+ for (i = 0; i < 32; i += 4) {
+ int j;
+ p = buf;
+ p += sprintf(p, "%sr%02d-%02d ", level, i, i + 3);
+ for (j = 0; j < 4; j++) {
+ p += sprintf(p, " " RFMT, (i+j) == 0 ? 0 : regs->gr[i + j]);
+ }
+ printk("%s\n", buf);
+ }
+
+ for (i = 0; i < 8; i += 4) {
+ int j;
+ p = buf;
+ p += sprintf(p, "%ssr%d-%d ", level, i, i + 3);
+ for (j = 0; j < 4; j++) {
+ p += sprintf(p, " " RFMT, regs->sr[i + j]);
+ }
+ printk("%s\n", buf);
+ }
+
+#if RIDICULOUSLY_VERBOSE
+ for (i = 0; i < 32; i += 2)
+ printk("%sFR%02d : %016lx FR%2d : %016lx", level, i,
+ regs->fr[i], i+1, regs->fr[i+1]);
+#endif
+
+ cr30 = mfctl(30);
+ cr31 = mfctl(31);
+ printk("%s\n", level);
+ printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
+ level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
+ printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
+ level, regs->iir, regs->isr, regs->ior);
+ printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
+ level, current_thread_info()->cpu, cr30, cr31);
+ printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
+ printk(level);
+ print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
+ printk(level);
+ print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
+ printk(level);
+ print_symbol(" RP(r2): %s\n", regs->gr[2]);
+}
+
+
+void dump_stack(void)
+{
+ show_stack(NULL, NULL);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+static void do_show_stack(struct unwind_frame_info *info)
+{
+ int i = 1;
+
+ printk("Backtrace:\n");
+ while (i <= 16) {
+ if (unwind_once(info) < 0 || info->ip == 0)
+ break;
+
+ if (__kernel_text_address(info->ip)) {
+ printk(" [<" RFMT ">] ", info->ip);
+#ifdef CONFIG_KALLSYMS
+ print_symbol("%s\n", info->ip);
+#else
+ if ((i & 0x03) == 0)
+ printk("\n");
+#endif
+ i++;
+ }
+ }
+ printk("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *s)
+{
+ struct unwind_frame_info info;
+
+ if (!task) {
+ unsigned long sp;
+ struct pt_regs *r;
+
+HERE:
+ asm volatile ("copy %%r30, %0" : "=r"(sp));
+ r = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
+ if (!r)
+ return;
+ memset(r, 0, sizeof(struct pt_regs));
+ r->iaoq[0] = (unsigned long)&&HERE;
+ r->gr[2] = (unsigned long)__builtin_return_address(0);
+ r->gr[30] = sp;
+ unwind_frame_init(&info, current, r);
+ kfree(r);
+ } else {
+ unwind_frame_init_from_blocked_task(&info, task);
+ }
+
+ do_show_stack(&info);
+}
+
+void die_if_kernel(char *str, struct pt_regs *regs, long err)
+{
+ if (user_mode(regs)) {
+ if (err == 0)
+ return; /* STFU */
+
+ printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
+ current->comm, current->pid, str, err, regs->iaoq[0]);
+#ifdef PRINT_USER_FAULTS
+ /* XXX for debugging only */
+ show_regs(regs);
+#endif
+ return;
+ }
+
+ oops_in_progress = 1;
+
+ /* Amuse the user in a SPARC fashion */
+ printk(
+" _______________________________ \n"
+" < Your System ate a SPARC! Gah! >\n"
+" ------------------------------- \n"
+" \\ ^__^\n"
+" \\ (xx)\\_______\n"
+" (__)\\ )\\/\\\n"
+" U ||----w |\n"
+" || ||\n");
+
+ /* unlock the pdc lock if necessary */
+ pdc_emergency_unlock();
+
+ /* maybe the kernel hasn't booted very far yet and hasn't been able
+ * to initialize the serial or STI console. In that case we should
+ * re-enable the pdc console, so that the user will be able to
+ * identify the problem. */
+ if (!console_drivers)
+ pdc_console_restart();
+
+ printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
+ current->comm, current->pid, str, err);
+ show_regs(regs);
+
+ /* Wot's wrong wif bein' racy? */
+ if (current->thread.flags & PARISC_KERNEL_DEATH) {
+ printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
+ local_irq_enable();
+ while (1);
+ }
+
+ current->thread.flags |= PARISC_KERNEL_DEATH;
+ do_exit(SIGSEGV);
+}
+
+int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
+{
+ return syscall(regs);
+}
+
+/* gdb uses break 4,8 */
+#define GDB_BREAK_INSN 0x10004
+void handle_gdb_break(struct pt_regs *regs, int wot)
+{
+ struct siginfo si;
+
+ si.si_code = wot;
+ si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
+ si.si_signo = SIGTRAP;
+ si.si_errno = 0;
+ force_sig_info(SIGTRAP, &si, current);
+}
+
+void handle_break(unsigned iir, struct pt_regs *regs)
+{
+ struct siginfo si;
+
+ switch(iir) {
+ case 0x00:
+#ifdef PRINT_USER_FAULTS
+ printk(KERN_DEBUG "break 0,0: pid=%d command='%s'\n",
+ current->pid, current->comm);
+#endif
+ die_if_kernel("Breakpoint", regs, 0);
+#ifdef PRINT_USER_FAULTS
+ show_regs(regs);
+#endif
+ si.si_code = TRAP_BRKPT;
+ si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
+ si.si_signo = SIGTRAP;
+ force_sig_info(SIGTRAP, &si, current);
+ break;
+
+ case GDB_BREAK_INSN:
+ die_if_kernel("Breakpoint", regs, 0);
+ handle_gdb_break(regs, TRAP_BRKPT);
+ break;
+
+ default:
+#ifdef PRINT_USER_FAULTS
+ printk(KERN_DEBUG "break %#08x: pid=%d command='%s'\n",
+ iir, current->pid, current->comm);
+ show_regs(regs);
+#endif
+ si.si_signo = SIGTRAP;
+ si.si_code = TRAP_BRKPT;
+ si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
+ force_sig_info(SIGTRAP, &si, current);
+ return;
+ }
+}
+
+
+int handle_toc(void)
+{
+ printk(KERN_CRIT "TOC call.\n");
+ return 0;
+}
+
+static void default_trap(int code, struct pt_regs *regs)
+{
+ printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
+ show_regs(regs);
+}
+
+void (*cpu_lpmc) (int code, struct pt_regs *regs) = default_trap;
+
+
+void transfer_pim_to_trap_frame(struct pt_regs *regs)
+{
+ register int i;
+ extern unsigned int hpmc_pim_data[];
+ struct pdc_hpmc_pim_11 *pim_narrow;
+ struct pdc_hpmc_pim_20 *pim_wide;
+
+ if (boot_cpu_data.cpu_type >= pcxu) {
+
+ pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
+
+ /*
+ * Note: The following code will probably generate a
+ * bunch of truncation error warnings from the compiler.
+ * Could be handled with an ifdef, but perhaps there
+ * is a better way.
+ */
+
+ regs->gr[0] = pim_wide->cr[22];
+
+ for (i = 1; i < 32; i++)
+ regs->gr[i] = pim_wide->gr[i];
+
+ for (i = 0; i < 32; i++)
+ regs->fr[i] = pim_wide->fr[i];
+
+ for (i = 0; i < 8; i++)
+ regs->sr[i] = pim_wide->sr[i];
+
+ regs->iasq[0] = pim_wide->cr[17];
+ regs->iasq[1] = pim_wide->iasq_back;
+ regs->iaoq[0] = pim_wide->cr[18];
+ regs->iaoq[1] = pim_wide->iaoq_back;
+
+ regs->sar = pim_wide->cr[11];
+ regs->iir = pim_wide->cr[19];
+ regs->isr = pim_wide->cr[20];
+ regs->ior = pim_wide->cr[21];
+ }
+ else {
+ pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
+
+ regs->gr[0] = pim_narrow->cr[22];
+
+ for (i = 1; i < 32; i++)
+ regs->gr[i] = pim_narrow->gr[i];
+
+ for (i = 0; i < 32; i++)
+ regs->fr[i] = pim_narrow->fr[i];
+
+ for (i = 0; i < 8; i++)
+ regs->sr[i] = pim_narrow->sr[i];
+
+ regs->iasq[0] = pim_narrow->cr[17];
+ regs->iasq[1] = pim_narrow->iasq_back;
+ regs->iaoq[0] = pim_narrow->cr[18];
+ regs->iaoq[1] = pim_narrow->iaoq_back;
+
+ regs->sar = pim_narrow->cr[11];
+ regs->iir = pim_narrow->cr[19];
+ regs->isr = pim_narrow->cr[20];
+ regs->ior = pim_narrow->cr[21];
+ }
+
+ /*
+ * The following fields only have meaning if we came through
+ * another path. So just zero them here.
+ */
+
+ regs->ksp = 0;
+ regs->kpc = 0;
+ regs->orig_r28 = 0;
+}
+
+
+/*
+ * This routine is called as a last resort when everything else
+ * has gone clearly wrong. We get called for faults in kernel space,
+ * and HPMC's.
+ */
+void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
+{
+ static DEFINE_SPINLOCK(terminate_lock);
+
+ oops_in_progress = 1;
+
+ set_eiem(0);
+ local_irq_disable();
+ spin_lock(&terminate_lock);
+
+ /* unlock the pdc lock if necessary */
+ pdc_emergency_unlock();
+
+ /* restart pdc console if necessary */
+ if (!console_drivers)
+ pdc_console_restart();
+
+ /* Not all paths will gutter the processor... */
+ switch(code){
+
+ case 1:
+ transfer_pim_to_trap_frame(regs);
+ break;
+
+ default:
+ /* Fall through */
+ break;
+
+ }
+
+ {
+ /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
+ struct unwind_frame_info info;
+ unwind_frame_init(&info, current, regs);
+ do_show_stack(&info);
+ }
+
+ printk("\n");
+ printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
+ msg, code, regs, offset);
+ show_regs(regs);
+
+ spin_unlock(&terminate_lock);
+
+ /* put soft power button back under hardware control;
+ * if the user had pressed it once at any time, the
+ * system will shut down immediately right here. */
+ pdc_soft_power_button(0);
+
+ /* Call kernel panic() so reboot timeouts work properly
+ * FIXME: This function should be on the list of
+ * panic notifiers, and we should call panic
+ * directly from the location that we wish.
+ * e.g. We should not call panic from
+ * parisc_terminate, but rather the oter way around.
+ * This hack works, prints the panic message twice,
+ * and it enables reboot timers!
+ */
+ panic(msg);
+}
+
+void handle_interruption(int code, struct pt_regs *regs)
+{
+ unsigned long fault_address = 0;
+ unsigned long fault_space = 0;
+ struct siginfo si;
+
+ if (code == 1)
+ pdc_console_restart(); /* switch back to pdc if HPMC */
+ else
+ local_irq_enable();
+
+ /* Security check:
+ * If the priority level is still user, and the
+ * faulting space is not equal to the active space
+ * then the user is attempting something in a space
+ * that does not belong to them. Kill the process.
+ *
+ * This is normally the situation when the user
+ * attempts to jump into the kernel space at the
+ * wrong offset, be it at the gateway page or a
+ * random location.
+ *
+ * We cannot normally signal the process because it
+ * could *be* on the gateway page, and processes
+ * executing on the gateway page can't have signals
+ * delivered.
+ *
+ * We merely readjust the address into the users
+ * space, at a destination address of zero, and
+ * allow processing to continue.
+ */
+ if (((unsigned long)regs->iaoq[0] & 3) &&
+ ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
+ /* Kill the user process later */
+ regs->iaoq[0] = 0 | 3;
+ regs->iaoq[1] = regs->iaoq[0] + 4;
+ regs->iasq[0] = regs->iasq[0] = regs->sr[7];
+ regs->gr[0] &= ~PSW_B;
+ return;
+ }
+
+#if 0
+ printk(KERN_CRIT "Interruption # %d\n", code);
+#endif
+
+ switch(code) {
+
+ case 1:
+ /* High-priority machine check (HPMC) */
+
+ /* set up a new led state on systems shipped with a LED State panel */
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
+
+ parisc_terminate("High Priority Machine Check (HPMC)",
+ regs, code, 0);
+ /* NOT REACHED */
+
+ case 2:
+ /* Power failure interrupt */
+ printk(KERN_CRIT "Power failure interrupt !\n");
+ return;
+
+ case 3:
+ /* Recovery counter trap */
+ regs->gr[0] &= ~PSW_R;
+ if (user_space(regs))
+ handle_gdb_break(regs, TRAP_TRACE);
+ /* else this must be the start of a syscall - just let it run */
+ return;
+
+ case 5:
+ /* Low-priority machine check */
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
+
+ flush_all_caches();
+ cpu_lpmc(5, regs);
+ return;
+
+ case 6:
+ /* Instruction TLB miss fault/Instruction page fault */
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+ break;
+
+ case 8:
+ /* Illegal instruction trap */
+ die_if_kernel("Illegal instruction", regs, code);
+ si.si_code = ILL_ILLOPC;
+ goto give_sigill;
+
+ case 9:
+ /* Break instruction trap */
+ handle_break(regs->iir,regs);
+ return;
+
+ case 10:
+ /* Privileged operation trap */
+ die_if_kernel("Privileged operation", regs, code);
+ si.si_code = ILL_PRVOPC;
+ goto give_sigill;
+
+ case 11:
+ /* Privileged register trap */
+ if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
+
+ /* This is a MFCTL cr26/cr27 to gr instruction.
+ * PCXS traps on this, so we need to emulate it.
+ */
+
+ if (regs->iir & 0x00200000)
+ regs->gr[regs->iir & 0x1f] = mfctl(27);
+ else
+ regs->gr[regs->iir & 0x1f] = mfctl(26);
+
+ regs->iaoq[0] = regs->iaoq[1];
+ regs->iaoq[1] += 4;
+ regs->iasq[0] = regs->iasq[1];
+ return;
+ }
+
+ die_if_kernel("Privileged register usage", regs, code);
+ si.si_code = ILL_PRVREG;
+ give_sigill:
+ si.si_signo = SIGILL;
+ si.si_errno = 0;
+ si.si_addr = (void __user *) regs->iaoq[0];
+ force_sig_info(SIGILL, &si, current);
+ return;
+
+ case 12:
+ /* Overflow Trap, let the userland signal handler do the cleanup */
+ si.si_signo = SIGFPE;
+ si.si_code = FPE_INTOVF;
+ si.si_addr = (void __user *) regs->iaoq[0];
+ force_sig_info(SIGFPE, &si, current);
+ return;
+
+ case 13:
+ /* Conditional Trap
+ The condition succees in an instruction which traps
+ on condition */
+ if(user_mode(regs)){
+ si.si_signo = SIGFPE;
+ /* Set to zero, and let the userspace app figure it out from
+ the insn pointed to by si_addr */
+ si.si_code = 0;
+ si.si_addr = (void __user *) regs->iaoq[0];
+ force_sig_info(SIGFPE, &si, current);
+ return;
+ }
+ /* The kernel doesn't want to handle condition codes */
+ break;
+
+ case 14:
+ /* Assist Exception Trap, i.e. floating point exception. */
+ die_if_kernel("Floating point exception", regs, 0); /* quiet */
+ handle_fpe(regs);
+ return;
+
+ case 15:
+ /* Data TLB miss fault/Data page fault */
+ /* Fall through */
+ case 16:
+ /* Non-access instruction TLB miss fault */
+ /* The instruction TLB entry needed for the target address of the FIC
+ is absent, and hardware can't find it, so we get to cleanup */
+ /* Fall through */
+ case 17:
+ /* Non-access data TLB miss fault/Non-access data page fault */
+ /* FIXME:
+ Still need to add slow path emulation code here!
+ If the insn used a non-shadow register, then the tlb
+ handlers could not have their side-effect (e.g. probe
+ writing to a target register) emulated since rfir would
+ erase the changes to said register. Instead we have to
+ setup everything, call this function we are in, and emulate
+ by hand. Technically we need to emulate:
+ fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
+ */
+ fault_address = regs->ior;
+ fault_space = regs->isr;
+ break;
+
+ case 18:
+ /* PCXS only -- later cpu's split this into types 26,27 & 28 */
+ /* Check for unaligned access */
+ if (check_unaligned(regs)) {
+ handle_unaligned(regs);
+ return;
+ }
+ /* Fall Through */
+ case 26:
+ /* PCXL: Data memory access rights trap */
+ fault_address = regs->ior;
+ fault_space = regs->isr;
+ break;
+
+ case 19:
+ /* Data memory break trap */
+ regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
+ /* fall thru */
+ case 21:
+ /* Page reference trap */
+ handle_gdb_break(regs, TRAP_HWBKPT);
+ return;
+
+ case 25:
+ /* Taken branch trap */
+ regs->gr[0] &= ~PSW_T;
+ if (user_space(regs))
+ handle_gdb_break(regs, TRAP_BRANCH);
+ /* else this must be the start of a syscall - just let it
+ * run.
+ */
+ return;
+
+ case 7:
+ /* Instruction access rights */
+ /* PCXL: Instruction memory protection trap */
+
+ /*
+ * This could be caused by either: 1) a process attempting
+ * to execute within a vma that does not have execute
+ * permission, or 2) an access rights violation caused by a
+ * flush only translation set up by ptep_get_and_clear().
+ * So we check the vma permissions to differentiate the two.
+ * If the vma indicates we have execute permission, then
+ * the cause is the latter one. In this case, we need to
+ * call do_page_fault() to fix the problem.
+ */
+
+ if (user_mode(regs)) {
+ struct vm_area_struct *vma;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+ if (vma && (regs->iaoq[0] >= vma->vm_start)
+ && (vma->vm_flags & VM_EXEC)) {
+
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+ up_read(&current->mm->mmap_sem);
+ break; /* call do_page_fault() */
+ }
+ up_read(&current->mm->mmap_sem);
+ }
+ /* Fall Through */
+ case 27:
+ /* Data memory protection ID trap */
+ die_if_kernel("Protection id trap", regs, code);
+ si.si_code = SEGV_MAPERR;
+ si.si_signo = SIGSEGV;
+ si.si_errno = 0;
+ if (code == 7)
+ si.si_addr = (void __user *) regs->iaoq[0];
+ else
+ si.si_addr = (void __user *) regs->ior;
+ force_sig_info(SIGSEGV, &si, current);
+ return;
+
+ case 28:
+ /* Unaligned data reference trap */
+ handle_unaligned(regs);
+ return;
+
+ default:
+ if (user_mode(regs)) {
+#ifdef PRINT_USER_FAULTS
+ printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
+ current->pid, current->comm);
+ show_regs(regs);
+#endif
+ /* SIGBUS, for lack of a better one. */
+ si.si_signo = SIGBUS;
+ si.si_code = BUS_OBJERR;
+ si.si_errno = 0;
+ si.si_addr = (void __user *) regs->ior;
+ force_sig_info(SIGBUS, &si, current);
+ return;
+ }
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+
+ parisc_terminate("Unexpected interruption", regs, code, 0);
+ /* NOT REACHED */
+ }
+
+ if (user_mode(regs)) {
+ if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
+#ifdef PRINT_USER_FAULTS
+ if (fault_space == 0)
+ printk(KERN_DEBUG "User Fault on Kernel Space ");
+ else
+ printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
+ code);
+ printk("pid=%d command='%s'\n", current->pid, current->comm);
+ show_regs(regs);
+#endif
+ si.si_signo = SIGSEGV;
+ si.si_errno = 0;
+ si.si_code = SEGV_MAPERR;
+ si.si_addr = (void __user *) regs->ior;
+ force_sig_info(SIGSEGV, &si, current);
+ return;
+ }
+ }
+ else {
+
+ /*
+ * The kernel should never fault on its own address space.
+ */
+
+ if (fault_space == 0)
+ {
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ parisc_terminate("Kernel Fault", regs, code, fault_address);
+
+ }
+ }
+
+ do_page_fault(regs, code, fault_address);
+}
+
+
+int __init check_ivt(void *iva)
+{
+ int i;
+ u32 check = 0;
+ u32 *ivap;
+ u32 *hpmcp;
+ u32 length;
+ extern void os_hpmc(void);
+ extern void os_hpmc_end(void);
+
+ if (strcmp((char *)iva, "cows can fly"))
+ return -1;
+
+ ivap = (u32 *)iva;
+
+ for (i = 0; i < 8; i++)
+ *ivap++ = 0;
+
+ /* Compute Checksum for HPMC handler */
+
+ length = (u32)((unsigned long)os_hpmc_end - (unsigned long)os_hpmc);
+ ivap[7] = length;
+
+ hpmcp = (u32 *)os_hpmc;
+
+ for (i=0; i<length/4; i++)
+ check += *hpmcp++;
+
+ for (i=0; i<8; i++)
+ check += ivap[i];
+
+ ivap[5] = -check;
+
+ return 0;
+}
+
+#ifndef __LP64__
+extern const void fault_vector_11;
+#endif
+extern const void fault_vector_20;
+
+void __init trap_init(void)
+{
+ void *iva;
+
+ if (boot_cpu_data.cpu_type >= pcxu)
+ iva = (void *) &fault_vector_20;
+ else
+#ifdef __LP64__
+ panic("Can't boot 64-bit OS on PA1.1 processor!");
+#else
+ iva = (void *) &fault_vector_11;
+#endif
+
+ if (check_ivt(iva))
+ panic("IVT invalid");
+}
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
new file mode 100644
index 000000000000..62eea35bcd69
--- /dev/null
+++ b/arch/parisc/kernel/unaligned.c
@@ -0,0 +1,816 @@
+/*
+ * Unaligned memory access handler
+ *
+ * Copyright (C) 2001 Randolph Chung <tausq@debian.org>
+ * Significantly tweaked by LaMont Jones <lamont@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+/* #define DEBUG_UNALIGNED 1 */
+
+#ifdef DEBUG_UNALIGNED
+#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __FUNCTION__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
+#else
+#define DPRINTF(fmt, args...)
+#endif
+
+#ifdef __LP64__
+#define RFMT "%016lx"
+#else
+#define RFMT "%08lx"
+#endif
+
+#define FIXUP_BRANCH(lbl) \
+ "\tldil L%%" #lbl ", %%r1\n" \
+ "\tldo R%%" #lbl "(%%r1), %%r1\n" \
+ "\tbv,n %%r0(%%r1)\n"
+
+/* 1111 1100 0000 0000 0001 0011 1100 0000 */
+#define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6)
+#define OPCODE2(a,b) ((a)<<26|(b)<<1)
+#define OPCODE3(a,b) ((a)<<26|(b)<<2)
+#define OPCODE4(a) ((a)<<26)
+#define OPCODE1_MASK OPCODE1(0x3f,1,0xf)
+#define OPCODE2_MASK OPCODE2(0x3f,1)
+#define OPCODE3_MASK OPCODE3(0x3f,1)
+#define OPCODE4_MASK OPCODE4(0x3f)
+
+/* skip LDB - never unaligned (index) */
+#define OPCODE_LDH_I OPCODE1(0x03,0,0x1)
+#define OPCODE_LDW_I OPCODE1(0x03,0,0x2)
+#define OPCODE_LDD_I OPCODE1(0x03,0,0x3)
+#define OPCODE_LDDA_I OPCODE1(0x03,0,0x4)
+#define OPCODE_LDCD_I OPCODE1(0x03,0,0x5)
+#define OPCODE_LDWA_I OPCODE1(0x03,0,0x6)
+#define OPCODE_LDCW_I OPCODE1(0x03,0,0x7)
+/* skip LDB - never unaligned (short) */
+#define OPCODE_LDH_S OPCODE1(0x03,1,0x1)
+#define OPCODE_LDW_S OPCODE1(0x03,1,0x2)
+#define OPCODE_LDD_S OPCODE1(0x03,1,0x3)
+#define OPCODE_LDDA_S OPCODE1(0x03,1,0x4)
+#define OPCODE_LDCD_S OPCODE1(0x03,1,0x5)
+#define OPCODE_LDWA_S OPCODE1(0x03,1,0x6)
+#define OPCODE_LDCW_S OPCODE1(0x03,1,0x7)
+/* skip STB - never unaligned */
+#define OPCODE_STH OPCODE1(0x03,1,0x9)
+#define OPCODE_STW OPCODE1(0x03,1,0xa)
+#define OPCODE_STD OPCODE1(0x03,1,0xb)
+/* skip STBY - never unaligned */
+/* skip STDBY - never unaligned */
+#define OPCODE_STWA OPCODE1(0x03,1,0xe)
+#define OPCODE_STDA OPCODE1(0x03,1,0xf)
+
+#define OPCODE_FLDWX OPCODE1(0x09,0,0x0)
+#define OPCODE_FLDWXR OPCODE1(0x09,0,0x1)
+#define OPCODE_FSTWX OPCODE1(0x09,0,0x8)
+#define OPCODE_FSTWXR OPCODE1(0x09,0,0x9)
+#define OPCODE_FLDWS OPCODE1(0x09,1,0x0)
+#define OPCODE_FLDWSR OPCODE1(0x09,1,0x1)
+#define OPCODE_FSTWS OPCODE1(0x09,1,0x8)
+#define OPCODE_FSTWSR OPCODE1(0x09,1,0x9)
+#define OPCODE_FLDDX OPCODE1(0x0b,0,0x0)
+#define OPCODE_FSTDX OPCODE1(0x0b,0,0x8)
+#define OPCODE_FLDDS OPCODE1(0x0b,1,0x0)
+#define OPCODE_FSTDS OPCODE1(0x0b,1,0x8)
+
+#define OPCODE_LDD_L OPCODE2(0x14,0)
+#define OPCODE_FLDD_L OPCODE2(0x14,1)
+#define OPCODE_STD_L OPCODE2(0x1c,0)
+#define OPCODE_FSTD_L OPCODE2(0x1c,1)
+
+#define OPCODE_LDW_M OPCODE3(0x17,1)
+#define OPCODE_FLDW_L OPCODE3(0x17,0)
+#define OPCODE_FSTW_L OPCODE3(0x1f,0)
+#define OPCODE_STW_M OPCODE3(0x1f,1)
+
+#define OPCODE_LDH_L OPCODE4(0x11)
+#define OPCODE_LDW_L OPCODE4(0x12)
+#define OPCODE_LDWM OPCODE4(0x13)
+#define OPCODE_STH_L OPCODE4(0x19)
+#define OPCODE_STW_L OPCODE4(0x1A)
+#define OPCODE_STWM OPCODE4(0x1B)
+
+#define MAJOR_OP(i) (((i)>>26)&0x3f)
+#define R1(i) (((i)>>21)&0x1f)
+#define R2(i) (((i)>>16)&0x1f)
+#define R3(i) ((i)&0x1f)
+#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
+#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
+#define IM5_2(i) IM((i)>>16,5)
+#define IM5_3(i) IM((i),5)
+#define IM14(i) IM((i),14)
+
+#define ERR_NOTHANDLED -1
+#define ERR_PAGEFAULT -2
+
+int unaligned_enabled = 1;
+
+void die_if_kernel (char *str, struct pt_regs *regs, long err);
+
+static int emulate_ldh(struct pt_regs *regs, int toreg)
+{
+ unsigned long saddr = regs->ior;
+ unsigned long val = 0;
+ int ret;
+
+ DPRINTF("load " RFMT ":" RFMT " to r%d for 2 bytes\n",
+ regs->isr, regs->ior, toreg);
+
+ __asm__ __volatile__ (
+" mtsp %4, %%sr1\n"
+"1: ldbs 0(%%sr1,%3), %%r20\n"
+"2: ldbs 1(%%sr1,%3), %0\n"
+" depw %%r20, 23, 24, %0\n"
+" copy %%r0, %1\n"
+"3: \n"
+" .section .fixup,\"ax\"\n"
+"4: ldi -2, %1\n"
+ FIXUP_BRANCH(3b)
+" .previous\n"
+" .section __ex_table,\"aw\"\n"
+#ifdef __LP64__
+" .dword 1b,4b\n"
+" .dword 2b,4b\n"
+#else
+" .word 1b,4b\n"
+" .word 2b,4b\n"
+#endif
+" .previous\n"
+ : "=r" (val), "=r" (ret)
+ : "0" (val), "r" (saddr), "r" (regs->isr)
+ : "r20" );
+
+ DPRINTF("val = 0x" RFMT "\n", val);
+
+ if (toreg)
+ regs->gr[toreg] = val;
+
+ return ret;
+}
+
+static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
+{
+ unsigned long saddr = regs->ior;
+ unsigned long val = 0;
+ int ret;
+
+ DPRINTF("load " RFMT ":" RFMT " to r%d for 4 bytes\n",
+ regs->isr, regs->ior, toreg);
+
+ __asm__ __volatile__ (
+" zdep %3,28,2,%%r19\n" /* r19=(ofs&3)*8 */
+" mtsp %4, %%sr1\n"
+" depw %%r0,31,2,%3\n"
+"1: ldw 0(%%sr1,%3),%0\n"
+"2: ldw 4(%%sr1,%3),%%r20\n"
+" subi 32,%%r19,%%r19\n"
+" mtctl %%r19,11\n"
+" vshd %0,%%r20,%0\n"
+" copy %%r0, %1\n"
+"3: \n"
+" .section .fixup,\"ax\"\n"
+"4: ldi -2, %1\n"
+ FIXUP_BRANCH(3b)
+" .previous\n"
+" .section __ex_table,\"aw\"\n"
+#ifdef __LP64__
+" .dword 1b,4b\n"
+" .dword 2b,4b\n"
+#else
+" .word 1b,4b\n"
+" .word 2b,4b\n"
+#endif
+" .previous\n"
+ : "=r" (val), "=r" (ret)
+ : "0" (val), "r" (saddr), "r" (regs->isr)
+ : "r19", "r20" );
+
+ DPRINTF("val = 0x" RFMT "\n", val);
+
+ if (flop)
+ ((__u32*)(regs->fr))[toreg] = val;
+ else if (toreg)
+ regs->gr[toreg] = val;
+
+ return ret;
+}
+static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
+{
+ unsigned long saddr = regs->ior;
+ __u64 val = 0;
+ int ret;
+
+ DPRINTF("load " RFMT ":" RFMT " to r%d for 8 bytes\n",
+ regs->isr, regs->ior, toreg);
+#ifdef CONFIG_PA20
+
+#ifndef __LP64__
+ if (!flop)
+ return -1;
+#endif
+ __asm__ __volatile__ (
+" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
+" mtsp %4, %%sr1\n"
+" depd %%r0,63,3,%3\n"
+"1: ldd 0(%%sr1,%3),%0\n"
+"2: ldd 8(%%sr1,%3),%%r20\n"
+" subi 64,%%r19,%%r19\n"
+" mtsar %%r19\n"
+" shrpd %0,%%r20,%%sar,%0\n"
+" copy %%r0, %1\n"
+"3: \n"
+" .section .fixup,\"ax\"\n"
+"4: ldi -2, %1\n"
+ FIXUP_BRANCH(3b)
+" .previous\n"
+" .section __ex_table,\"aw\"\n"
+#ifdef __LP64__
+" .dword 1b,4b\n"
+" .dword 2b,4b\n"
+#else
+" .word 1b,4b\n"
+" .word 2b,4b\n"
+#endif
+" .previous\n"
+ : "=r" (val), "=r" (ret)
+ : "0" (val), "r" (saddr), "r" (regs->isr)
+ : "r19", "r20" );
+#else
+ {
+ unsigned long valh=0,vall=0;
+ __asm__ __volatile__ (
+" zdep %5,29,2,%%r19\n" /* r19=(ofs&3)*8 */
+" mtsp %6, %%sr1\n"
+" dep %%r0,31,2,%5\n"
+"1: ldw 0(%%sr1,%5),%0\n"
+"2: ldw 4(%%sr1,%5),%1\n"
+"3: ldw 8(%%sr1,%5),%%r20\n"
+" subi 32,%%r19,%%r19\n"
+" mtsar %%r19\n"
+" vshd %0,%1,%0\n"
+" vshd %1,%%r20,%1\n"
+" copy %%r0, %2\n"
+"4: \n"
+" .section .fixup,\"ax\"\n"
+"5: ldi -2, %2\n"
+ FIXUP_BRANCH(4b)
+" .previous\n"
+" .section __ex_table,\"aw\"\n"
+#ifdef __LP64__
+" .dword 1b,5b\n"
+" .dword 2b,5b\n"
+" .dword 3b,5b\n"
+#else
+" .word 1b,5b\n"
+" .word 2b,5b\n"
+" .word 3b,5b\n"
+#endif
+" .previous\n"
+ : "=r" (valh), "=r" (vall), "=r" (ret)
+ : "0" (valh), "1" (vall), "r" (saddr), "r" (regs->isr)
+ : "r19", "r20" );
+ val=((__u64)valh<<32)|(__u64)vall;
+ }
+#endif
+
+ DPRINTF("val = 0x%llx\n", val);
+
+ if (flop)
+ regs->fr[toreg] = val;
+ else if (toreg)
+ regs->gr[toreg] = val;
+
+ return ret;
+}
+
+static int emulate_sth(struct pt_regs *regs, int frreg)
+{
+ unsigned long val = regs->gr[frreg];
+ int ret;
+
+ if (!frreg)
+ val = 0;
+
+ DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 2 bytes\n", frreg,
+ val, regs->isr, regs->ior);
+
+ __asm__ __volatile__ (
+" mtsp %3, %%sr1\n"
+" extrw,u %1, 23, 8, %%r19\n"
+"1: stb %1, 1(%%sr1, %2)\n"
+"2: stb %%r19, 0(%%sr1, %2)\n"
+" copy %%r0, %0\n"
+"3: \n"
+" .section .fixup,\"ax\"\n"
+"4: ldi -2, %0\n"
+ FIXUP_BRANCH(3b)
+" .previous\n"
+" .section __ex_table,\"aw\"\n"
+#ifdef __LP64__
+" .dword 1b,4b\n"
+" .dword 2b,4b\n"
+#else
+" .word 1b,4b\n"
+" .word 2b,4b\n"
+#endif
+" .previous\n"
+ : "=r" (ret)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr)
+ : "r19" );
+
+ return ret;
+}
+
+static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
+{
+ unsigned long val;
+ int ret;
+
+ if (flop)
+ val = ((__u32*)(regs->fr))[frreg];
+ else if (frreg)
+ val = regs->gr[frreg];
+ else
+ val = 0;
+
+ DPRINTF("store r%d (0x" RFMT ") to " RFMT ":" RFMT " for 4 bytes\n", frreg,
+ val, regs->isr, regs->ior);
+
+
+ __asm__ __volatile__ (
+" mtsp %3, %%sr1\n"
+" zdep %2, 28, 2, %%r19\n"
+" dep %%r0, 31, 2, %2\n"
+" mtsar %%r19\n"
+" depwi,z -2, %%sar, 32, %%r19\n"
+"1: ldw 0(%%sr1,%2),%%r20\n"
+"2: ldw 4(%%sr1,%2),%%r21\n"
+" vshd %%r0, %1, %%r22\n"
+" vshd %1, %%r0, %%r1\n"
+" and %%r20, %%r19, %%r20\n"
+" andcm %%r21, %%r19, %%r21\n"
+" or %%r22, %%r20, %%r20\n"
+" or %%r1, %%r21, %%r21\n"
+" stw %%r20,0(%%sr1,%2)\n"
+" stw %%r21,4(%%sr1,%2)\n"
+" copy %%r0, %0\n"
+"3: \n"
+" .section .fixup,\"ax\"\n"
+"4: ldi -2, %0\n"
+ FIXUP_BRANCH(3b)
+" .previous\n"
+" .section __ex_table,\"aw\"\n"
+#ifdef __LP64__
+" .dword 1b,4b\n"
+" .dword 2b,4b\n"
+#else
+" .word 1b,4b\n"
+" .word 2b,4b\n"
+#endif
+" .previous\n"
+ : "=r" (ret)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr)
+ : "r19", "r20", "r21", "r22", "r1" );
+
+ return 0;
+}
+static int emulate_std(struct pt_regs *regs, int frreg, int flop)
+{
+ __u64 val;
+ int ret;
+
+ if (flop)
+ val = regs->fr[frreg];
+ else if (frreg)
+ val = regs->gr[frreg];
+ else
+ val = 0;
+
+ DPRINTF("store r%d (0x%016llx) to " RFMT ":" RFMT " for 8 bytes\n", frreg,
+ val, regs->isr, regs->ior);
+
+#ifdef CONFIG_PA20
+#ifndef __LP64__
+ if (!flop)
+ return -1;
+#endif
+ __asm__ __volatile__ (
+" mtsp %3, %%sr1\n"
+" depd,z %2, 60, 3, %%r19\n"
+" depd %%r0, 63, 3, %2\n"
+" mtsar %%r19\n"
+" depdi,z -2, %%sar, 64, %%r19\n"
+"1: ldd 0(%%sr1,%2),%%r20\n"
+"2: ldd 8(%%sr1,%2),%%r21\n"
+" shrpd %%r0, %1, %%sar, %%r22\n"
+" shrpd %1, %%r0, %%sar, %%r1\n"
+" and %%r20, %%r19, %%r20\n"
+" andcm %%r21, %%r19, %%r21\n"
+" or %%r22, %%r20, %%r20\n"
+" or %%r1, %%r21, %%r21\n"
+"3: std %%r20,0(%%sr1,%2)\n"
+"4: std %%r21,8(%%sr1,%2)\n"
+" copy %%r0, %0\n"
+"5: \n"
+" .section .fixup,\"ax\"\n"
+"6: ldi -2, %0\n"
+ FIXUP_BRANCH(5b)
+" .previous\n"
+" .section __ex_table,\"aw\"\n"
+#ifdef __LP64__
+" .dword 1b,6b\n"
+" .dword 2b,6b\n"
+" .dword 3b,6b\n"
+" .dword 4b,6b\n"
+#else
+" .word 1b,6b\n"
+" .word 2b,6b\n"
+" .word 3b,6b\n"
+" .word 4b,6b\n"
+#endif
+" .previous\n"
+ : "=r" (ret)
+ : "r" (val), "r" (regs->ior), "r" (regs->isr)
+ : "r19", "r20", "r21", "r22", "r1" );
+#else
+ {
+ unsigned long valh=(val>>32),vall=(val&0xffffffffl);
+ __asm__ __volatile__ (
+" mtsp %4, %%sr1\n"
+" zdep %2, 29, 2, %%r19\n"
+" dep %%r0, 31, 2, %2\n"
+" mtsar %%r19\n"
+" zvdepi -2, 32, %%r19\n"
+"1: ldw 0(%%sr1,%3),%%r20\n"
+"2: ldw 8(%%sr1,%3),%%r21\n"
+" vshd %1, %2, %%r1\n"
+" vshd %%r0, %1, %1\n"
+" vshd %2, %%r0, %2\n"
+" and %%r20, %%r19, %%r20\n"
+" andcm %%r21, %%r19, %%r21\n"
+" or %1, %%r20, %1\n"
+" or %2, %%r21, %2\n"
+"3: stw %1,0(%%sr1,%1)\n"
+"4: stw %%r1,4(%%sr1,%3)\n"
+"5: stw %2,8(%%sr1,%3)\n"
+" copy %%r0, %0\n"
+"6: \n"
+" .section .fixup,\"ax\"\n"
+"7: ldi -2, %0\n"
+ FIXUP_BRANCH(6b)
+" .previous\n"
+" .section __ex_table,\"aw\"\n"
+#ifdef __LP64__
+" .dword 1b,7b\n"
+" .dword 2b,7b\n"
+" .dword 3b,7b\n"
+" .dword 4b,7b\n"
+" .dword 5b,7b\n"
+#else
+" .word 1b,7b\n"
+" .word 2b,7b\n"
+" .word 3b,7b\n"
+" .word 4b,7b\n"
+" .word 5b,7b\n"
+#endif
+" .previous\n"
+ : "=r" (ret)
+ : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr)
+ : "r19", "r20", "r21", "r1" );
+ }
+#endif
+
+ return ret;
+}
+
+void handle_unaligned(struct pt_regs *regs)
+{
+ static unsigned long unaligned_count = 0;
+ static unsigned long last_time = 0;
+ unsigned long newbase = R1(regs->iir)?regs->gr[R1(regs->iir)]:0;
+ int modify = 0;
+ int ret = ERR_NOTHANDLED;
+ struct siginfo si;
+ register int flop=0; /* true if this is a flop */
+
+ /* log a message with pacing */
+ if (user_mode(regs))
+ {
+ if (unaligned_count > 5 && jiffies - last_time > 5*HZ)
+ {
+ unaligned_count = 0;
+ last_time = jiffies;
+ }
+ if (++unaligned_count < 5)
+ {
+ char buf[256];
+ sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n",
+ current->comm, current->pid, regs->ior, regs->iaoq[0]);
+ printk(KERN_WARNING "%s", buf);
+#ifdef DEBUG_UNALIGNED
+ show_regs(regs);
+#endif
+ }
+ if (!unaligned_enabled)
+ goto force_sigbus;
+ }
+
+ /* handle modification - OK, it's ugly, see the instruction manual */
+ switch (MAJOR_OP(regs->iir))
+ {
+ case 0x03:
+ case 0x09:
+ case 0x0b:
+ if (regs->iir&0x20)
+ {
+ modify = 1;
+ if (regs->iir&0x1000) /* short loads */
+ if (regs->iir&0x200)
+ newbase += IM5_3(regs->iir);
+ else
+ newbase += IM5_2(regs->iir);
+ else if (regs->iir&0x2000) /* scaled indexed */
+ {
+ int shift=0;
+ switch (regs->iir & OPCODE1_MASK)
+ {
+ case OPCODE_LDH_I:
+ shift= 1; break;
+ case OPCODE_LDW_I:
+ shift= 2; break;
+ case OPCODE_LDD_I:
+ case OPCODE_LDDA_I:
+ shift= 3; break;
+ }
+ newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0)<<shift;
+ } else /* simple indexed */
+ newbase += (R2(regs->iir)?regs->gr[R2(regs->iir)]:0);
+ }
+ break;
+ case 0x13:
+ case 0x1b:
+ modify = 1;
+ newbase += IM14(regs->iir);
+ break;
+ case 0x14:
+ case 0x1c:
+ if (regs->iir&8)
+ {
+ modify = 1;
+ newbase += IM14(regs->iir&~0xe);
+ }
+ break;
+ case 0x16:
+ case 0x1e:
+ modify = 1;
+ newbase += IM14(regs->iir&6);
+ break;
+ case 0x17:
+ case 0x1f:
+ if (regs->iir&4)
+ {
+ modify = 1;
+ newbase += IM14(regs->iir&~4);
+ }
+ break;
+ }
+
+ /* TODO: make this cleaner... */
+ switch (regs->iir & OPCODE1_MASK)
+ {
+ case OPCODE_LDH_I:
+ case OPCODE_LDH_S:
+ ret = emulate_ldh(regs, R3(regs->iir));
+ break;
+
+ case OPCODE_LDW_I:
+ case OPCODE_LDWA_I:
+ case OPCODE_LDW_S:
+ case OPCODE_LDWA_S:
+ ret = emulate_ldw(regs, R3(regs->iir),0);
+ break;
+
+ case OPCODE_STH:
+ ret = emulate_sth(regs, R2(regs->iir));
+ break;
+
+ case OPCODE_STW:
+ case OPCODE_STWA:
+ ret = emulate_stw(regs, R2(regs->iir),0);
+ break;
+
+#ifdef CONFIG_PA20
+ case OPCODE_LDD_I:
+ case OPCODE_LDDA_I:
+ case OPCODE_LDD_S:
+ case OPCODE_LDDA_S:
+ ret = emulate_ldd(regs, R3(regs->iir),0);
+ break;
+
+ case OPCODE_STD:
+ case OPCODE_STDA:
+ ret = emulate_std(regs, R2(regs->iir),0);
+ break;
+#endif
+
+ case OPCODE_FLDWX:
+ case OPCODE_FLDWS:
+ case OPCODE_FLDWXR:
+ case OPCODE_FLDWSR:
+ flop=1;
+ ret = emulate_ldw(regs,FR3(regs->iir),1);
+ break;
+
+ case OPCODE_FLDDX:
+ case OPCODE_FLDDS:
+ flop=1;
+ ret = emulate_ldd(regs,R3(regs->iir),1);
+ break;
+
+ case OPCODE_FSTWX:
+ case OPCODE_FSTWS:
+ case OPCODE_FSTWXR:
+ case OPCODE_FSTWSR:
+ flop=1;
+ ret = emulate_stw(regs,FR3(regs->iir),1);
+ break;
+
+ case OPCODE_FSTDX:
+ case OPCODE_FSTDS:
+ flop=1;
+ ret = emulate_std(regs,R3(regs->iir),1);
+ break;
+
+ case OPCODE_LDCD_I:
+ case OPCODE_LDCW_I:
+ case OPCODE_LDCD_S:
+ case OPCODE_LDCW_S:
+ ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
+ break;
+ }
+#ifdef CONFIG_PA20
+ switch (regs->iir & OPCODE2_MASK)
+ {
+ case OPCODE_FLDD_L:
+ flop=1;
+ ret = emulate_ldd(regs,R2(regs->iir),1);
+ break;
+ case OPCODE_FSTD_L:
+ flop=1;
+ ret = emulate_std(regs, R2(regs->iir),1);
+ break;
+
+#ifdef CONFIG_PA20
+ case OPCODE_LDD_L:
+ ret = emulate_ldd(regs, R2(regs->iir),0);
+ break;
+ case OPCODE_STD_L:
+ ret = emulate_std(regs, R2(regs->iir),0);
+ break;
+#endif
+ }
+#endif
+ switch (regs->iir & OPCODE3_MASK)
+ {
+ case OPCODE_FLDW_L:
+ flop=1;
+ ret = emulate_ldw(regs, R2(regs->iir),0);
+ break;
+ case OPCODE_LDW_M:
+ ret = emulate_ldw(regs, R2(regs->iir),1);
+ break;
+
+ case OPCODE_FSTW_L:
+ flop=1;
+ ret = emulate_stw(regs, R2(regs->iir),1);
+ break;
+ case OPCODE_STW_M:
+ ret = emulate_stw(regs, R2(regs->iir),0);
+ break;
+ }
+ switch (regs->iir & OPCODE4_MASK)
+ {
+ case OPCODE_LDH_L:
+ ret = emulate_ldh(regs, R2(regs->iir));
+ break;
+ case OPCODE_LDW_L:
+ case OPCODE_LDWM:
+ ret = emulate_ldw(regs, R2(regs->iir),0);
+ break;
+ case OPCODE_STH_L:
+ ret = emulate_sth(regs, R2(regs->iir));
+ break;
+ case OPCODE_STW_L:
+ case OPCODE_STWM:
+ ret = emulate_stw(regs, R2(regs->iir),0);
+ break;
+ }
+
+ if (modify && R1(regs->iir))
+ regs->gr[R1(regs->iir)] = newbase;
+
+
+ if (ret == ERR_NOTHANDLED)
+ printk(KERN_CRIT "Not-handled unaligned insn 0x%08lx\n", regs->iir);
+
+ DPRINTF("ret = %d\n", ret);
+
+ if (ret)
+ {
+ printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
+ die_if_kernel("Unaligned data reference", regs, 28);
+
+ if (ret == ERR_PAGEFAULT)
+ {
+ si.si_signo = SIGSEGV;
+ si.si_errno = 0;
+ si.si_code = SEGV_MAPERR;
+ si.si_addr = (void __user *)regs->ior;
+ force_sig_info(SIGSEGV, &si, current);
+ }
+ else
+ {
+force_sigbus:
+ /* couldn't handle it ... */
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRALN;
+ si.si_addr = (void __user *)regs->ior;
+ force_sig_info(SIGBUS, &si, current);
+ }
+
+ return;
+ }
+
+ /* else we handled it, let life go on. */
+ regs->gr[0]|=PSW_N;
+}
+
+/*
+ * NB: check_unaligned() is only used for PCXS processors right
+ * now, so we only check for PA1.1 encodings at this point.
+ */
+
+int
+check_unaligned(struct pt_regs *regs)
+{
+ unsigned long align_mask;
+
+ /* Get alignment mask */
+
+ align_mask = 0UL;
+ switch (regs->iir & OPCODE1_MASK) {
+
+ case OPCODE_LDH_I:
+ case OPCODE_LDH_S:
+ case OPCODE_STH:
+ align_mask = 1UL;
+ break;
+
+ case OPCODE_LDW_I:
+ case OPCODE_LDWA_I:
+ case OPCODE_LDW_S:
+ case OPCODE_LDWA_S:
+ case OPCODE_STW:
+ case OPCODE_STWA:
+ align_mask = 3UL;
+ break;
+
+ default:
+ switch (regs->iir & OPCODE4_MASK) {
+ case OPCODE_LDH_L:
+ case OPCODE_STH_L:
+ align_mask = 1UL;
+ break;
+ case OPCODE_LDW_L:
+ case OPCODE_LDWM:
+ case OPCODE_STW_L:
+ case OPCODE_STWM:
+ align_mask = 3UL;
+ break;
+ }
+ break;
+ }
+
+ return (int)(regs->ior & align_mask);
+}
+
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
new file mode 100644
index 000000000000..db141108412e
--- /dev/null
+++ b/arch/parisc/kernel/unwind.c
@@ -0,0 +1,393 @@
+/*
+ * Kernel unwinding support
+ *
+ * (c) 2002-2004 Randolph Chung <tausq@debian.org>
+ *
+ * Derived partially from the IA64 implementation. The PA-RISC
+ * Runtime Architecture Document is also a useful reference to
+ * understand what is happening here
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kallsyms.h>
+
+#include <asm/uaccess.h>
+#include <asm/assembly.h>
+
+#include <asm/unwind.h>
+
+/* #define DEBUG 1 */
+#ifdef DEBUG
+#define dbg(x...) printk(x)
+#else
+#define dbg(x...)
+#endif
+
+extern struct unwind_table_entry __start___unwind[];
+extern struct unwind_table_entry __stop___unwind[];
+
+static spinlock_t unwind_lock;
+/*
+ * the kernel unwind block is not dynamically allocated so that
+ * we can call unwind_init as early in the bootup process as
+ * possible (before the slab allocator is initialized)
+ */
+static struct unwind_table kernel_unwind_table;
+static LIST_HEAD(unwind_tables);
+
+static inline const struct unwind_table_entry *
+find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
+{
+ const struct unwind_table_entry *e = NULL;
+ unsigned long lo, hi, mid;
+
+ lo = 0;
+ hi = table->length - 1;
+
+ while (lo <= hi) {
+ mid = (hi - lo) / 2 + lo;
+ e = &table->table[mid];
+ if (addr < e->region_start)
+ hi = mid - 1;
+ else if (addr > e->region_end)
+ lo = mid + 1;
+ else
+ return e;
+ }
+
+ return NULL;
+}
+
+static const struct unwind_table_entry *
+find_unwind_entry(unsigned long addr)
+{
+ struct unwind_table *table;
+ const struct unwind_table_entry *e = NULL;
+
+ if (addr >= kernel_unwind_table.start &&
+ addr <= kernel_unwind_table.end)
+ e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
+ else
+ list_for_each_entry(table, &unwind_tables, list) {
+ if (addr >= table->start &&
+ addr <= table->end)
+ e = find_unwind_entry_in_table(table, addr);
+ if (e)
+ break;
+ }
+
+ return e;
+}
+
+static void
+unwind_table_init(struct unwind_table *table, const char *name,
+ unsigned long base_addr, unsigned long gp,
+ void *table_start, void *table_end)
+{
+ struct unwind_table_entry *start = table_start;
+ struct unwind_table_entry *end =
+ (struct unwind_table_entry *)table_end - 1;
+
+ table->name = name;
+ table->base_addr = base_addr;
+ table->gp = gp;
+ table->start = base_addr + start->region_start;
+ table->end = base_addr + end->region_end;
+ table->table = (struct unwind_table_entry *)table_start;
+ table->length = end - start + 1;
+ INIT_LIST_HEAD(&table->list);
+
+ for (; start <= end; start++) {
+ if (start < end &&
+ start->region_end > (start+1)->region_start) {
+ printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
+ }
+
+ start->region_start += base_addr;
+ start->region_end += base_addr;
+ }
+}
+
+static void
+unwind_table_sort(struct unwind_table_entry *start,
+ struct unwind_table_entry *finish)
+{
+ struct unwind_table_entry el, *p, *q;
+
+ for (p = start + 1; p < finish; ++p) {
+ if (p[0].region_start < p[-1].region_start) {
+ el = *p;
+ q = p;
+ do {
+ q[0] = q[-1];
+ --q;
+ } while (q > start &&
+ el.region_start < q[-1].region_start);
+ *q = el;
+ }
+ }
+}
+
+struct unwind_table *
+unwind_table_add(const char *name, unsigned long base_addr,
+ unsigned long gp,
+ void *start, void *end)
+{
+ struct unwind_table *table;
+ unsigned long flags;
+ struct unwind_table_entry *s = (struct unwind_table_entry *)start;
+ struct unwind_table_entry *e = (struct unwind_table_entry *)end;
+
+ unwind_table_sort(s, e);
+
+ table = kmalloc(sizeof(struct unwind_table), GFP_USER);
+ if (table == NULL)
+ return NULL;
+ unwind_table_init(table, name, base_addr, gp, start, end);
+ spin_lock_irqsave(&unwind_lock, flags);
+ list_add_tail(&table->list, &unwind_tables);
+ spin_unlock_irqrestore(&unwind_lock, flags);
+
+ return table;
+}
+
+void unwind_table_remove(struct unwind_table *table)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&unwind_lock, flags);
+ list_del(&table->list);
+ spin_unlock_irqrestore(&unwind_lock, flags);
+
+ kfree(table);
+}
+
+/* Called from setup_arch to import the kernel unwind info */
+static int unwind_init(void)
+{
+ long start, stop;
+ register unsigned long gp __asm__ ("r27");
+
+ start = (long)&__start___unwind[0];
+ stop = (long)&__stop___unwind[0];
+
+ spin_lock_init(&unwind_lock);
+
+ printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
+ start, stop,
+ (stop - start) / sizeof(struct unwind_table_entry));
+
+ unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
+ gp,
+ &__start___unwind[0], &__stop___unwind[0]);
+#if 0
+ {
+ int i;
+ for (i = 0; i < 10; i++)
+ {
+ printk("region 0x%x-0x%x\n",
+ __start___unwind[i].region_start,
+ __start___unwind[i].region_end);
+ }
+ }
+#endif
+ return 0;
+}
+
+static void unwind_frame_regs(struct unwind_frame_info *info)
+{
+ const struct unwind_table_entry *e;
+ unsigned long npc;
+ unsigned int insn;
+ long frame_size = 0;
+ int looking_for_rp, rpoffset = 0;
+
+ e = find_unwind_entry(info->ip);
+ if (e == NULL) {
+ unsigned long sp;
+ extern char _stext[], _etext[];
+
+ dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
+
+#ifdef CONFIG_KALLSYMS
+ /* Handle some frequent special cases.... */
+ {
+ char symname[KSYM_NAME_LEN+1];
+ char *modname;
+ unsigned long symsize, offset;
+
+ kallsyms_lookup(info->ip, &symsize, &offset,
+ &modname, symname);
+
+ dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
+
+ if (strcmp(symname, "_switch_to_ret") == 0) {
+ info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
+ info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
+ dbg("_switch_to_ret @ %lx - setting "
+ "prev_sp=%lx prev_ip=%lx\n",
+ info->ip, info->prev_sp,
+ info->prev_ip);
+ return;
+ } else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
+ strcmp(symname, "syscall_exit") == 0) {
+ info->prev_ip = info->prev_sp = 0;
+ return;
+ }
+ }
+#endif
+
+ /* Since we are doing the unwinding blind, we don't know if
+ we are adjusting the stack correctly or extracting the rp
+ correctly. The rp is checked to see if it belongs to the
+ kernel text section, if not we assume we don't have a
+ correct stack frame and we continue to unwind the stack.
+ This is not quite correct, and will fail for loadable
+ modules. */
+ sp = info->sp & ~63;
+ do {
+ unsigned long tmp;
+
+ info->prev_sp = sp - 64;
+ info->prev_ip = 0;
+ if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
+ break;
+ info->prev_ip = tmp;
+ sp = info->prev_sp;
+ } while (info->prev_ip < (unsigned long)_stext ||
+ info->prev_ip > (unsigned long)_etext);
+
+ info->rp = 0;
+
+ dbg("analyzing func @ %lx with no unwind info, setting "
+ "prev_sp=%lx prev_ip=%lx\n", info->ip,
+ info->prev_sp, info->prev_ip);
+ } else {
+ dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
+ "Save_RP = %d, Millicode = %d size = %u\n",
+ e->region_start, e->region_end, e->Save_SP, e->Save_RP,
+ e->Millicode, e->Total_frame_size);
+
+ looking_for_rp = e->Save_RP;
+
+ for (npc = e->region_start;
+ (frame_size < (e->Total_frame_size << 3) ||
+ looking_for_rp) &&
+ npc < info->ip;
+ npc += 4) {
+
+ insn = *(unsigned int *)npc;
+
+ if ((insn & 0xffffc000) == 0x37de0000 ||
+ (insn & 0xffe00000) == 0x6fc00000) {
+ /* ldo X(sp), sp, or stwm X,D(sp) */
+ frame_size += (insn & 0x1 ? -1 << 13 : 0) |
+ ((insn & 0x3fff) >> 1);
+ dbg("analyzing func @ %lx, insn=%08x @ "
+ "%lx, frame_size = %ld\n", info->ip,
+ insn, npc, frame_size);
+ } else if ((insn & 0xffe00008) == 0x73c00008) {
+ /* std,ma X,D(sp) */
+ frame_size += (insn & 0x1 ? -1 << 13 : 0) |
+ (((insn >> 4) & 0x3ff) << 3);
+ dbg("analyzing func @ %lx, insn=%08x @ "
+ "%lx, frame_size = %ld\n", info->ip,
+ insn, npc, frame_size);
+ } else if (insn == 0x6bc23fd9) {
+ /* stw rp,-20(sp) */
+ rpoffset = 20;
+ looking_for_rp = 0;
+ dbg("analyzing func @ %lx, insn=stw rp,"
+ "-20(sp) @ %lx\n", info->ip, npc);
+ } else if (insn == 0x0fc212c1) {
+ /* std rp,-16(sr0,sp) */
+ rpoffset = 16;
+ looking_for_rp = 0;
+ dbg("analyzing func @ %lx, insn=std rp,"
+ "-16(sp) @ %lx\n", info->ip, npc);
+ }
+ }
+
+ info->prev_sp = info->sp - frame_size;
+ if (e->Millicode)
+ info->rp = info->r31;
+ else if (rpoffset)
+ info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
+ info->prev_ip = info->rp;
+ info->rp = 0;
+
+ dbg("analyzing func @ %lx, setting prev_sp=%lx "
+ "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
+ info->prev_ip, npc);
+ }
+}
+
+void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
+ struct pt_regs *regs)
+{
+ memset(info, 0, sizeof(struct unwind_frame_info));
+ info->t = t;
+ info->sp = regs->gr[30];
+ info->ip = regs->iaoq[0];
+ info->rp = regs->gr[2];
+ info->r31 = regs->gr[31];
+
+ dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
+ t ? (int)t->pid : -1, info->sp, info->ip);
+}
+
+void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
+{
+ struct pt_regs *r = &t->thread.regs;
+ struct pt_regs *r2;
+
+ r2 = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
+ if (!r2)
+ return;
+ *r2 = *r;
+ r2->gr[30] = r->ksp;
+ r2->iaoq[0] = r->kpc;
+ unwind_frame_init(info, t, r2);
+ kfree(r2);
+}
+
+void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
+{
+ unwind_frame_init(info, current, regs);
+}
+
+int unwind_once(struct unwind_frame_info *next_frame)
+{
+ unwind_frame_regs(next_frame);
+
+ if (next_frame->prev_sp == 0 ||
+ next_frame->prev_ip == 0)
+ return -1;
+
+ next_frame->sp = next_frame->prev_sp;
+ next_frame->ip = next_frame->prev_ip;
+ next_frame->prev_sp = 0;
+ next_frame->prev_ip = 0;
+
+ dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
+ next_frame->t ? (int)next_frame->t->pid : -1,
+ next_frame->sp, next_frame->ip);
+
+ return 0;
+}
+
+int unwind_to_user(struct unwind_frame_info *info)
+{
+ int ret;
+
+ do {
+ ret = unwind_once(info);
+ } while (!ret && !(info->ip & 3));
+
+ return ret;
+}
+
+module_init(unwind_init);
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..e5fac3e08c7a
--- /dev/null
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -0,0 +1,207 @@
+/* Kernel link layout for various "sections"
+ *
+ * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
+ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
+ * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
+ * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
+ * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
+ * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/config.h>
+#include <asm-generic/vmlinux.lds.h>
+/* needed for the processor specific cache alignment size */
+#include <asm/cache.h>
+#include <asm/page.h>
+
+/* ld script to make hppa Linux kernel */
+#ifndef CONFIG_64BIT
+OUTPUT_FORMAT("elf32-hppa-linux")
+OUTPUT_ARCH(hppa)
+#else
+OUTPUT_FORMAT("elf64-hppa-linux")
+OUTPUT_ARCH(hppa:hppa2.0w)
+#endif
+
+ENTRY(_stext)
+#ifndef CONFIG_64BIT
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+SECTIONS
+{
+
+ . = KERNEL_BINARY_TEXT_START;
+
+ _text = .; /* Text and read-only data */
+ .text ALIGN(16) : {
+ *(.text)
+ SCHED_TEXT
+ LOCK_TEXT
+ *(.text.do_softirq)
+ *(.text.sys_exit)
+ *(.text.do_sigaltstack)
+ *(.text.do_fork)
+ *(.text.*)
+ *(.fixup)
+ *(.lock.text) /* out-of-line lock text */
+ *(.gnu.warning)
+ } = 0
+
+ _etext = .; /* End of text section */
+
+ RODATA
+
+ /* writeable */
+ . = ALIGN(4096); /* Make sure this is paged aligned so
+ that we can properly leave these
+ as writable */
+ data_start = .;
+
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ __start___unwind = .; /* unwind info */
+ .PARISC.unwind : { *(.PARISC.unwind) }
+ __stop___unwind = .;
+
+ .data : { /* Data */
+ *(.data)
+ *(.data.vm0.pmd)
+ *(.data.vm0.pgd)
+ *(.data.vm0.pte)
+ CONSTRUCTORS
+ }
+
+ . = ALIGN(4096);
+ /* nosave data is really only used for software suspend...it's here
+ * just in case we ever implement it */
+ __nosave_begin = .;
+ .data_nosave : { *(.data.nosave) }
+ . = ALIGN(4096);
+ __nosave_end = .;
+
+ . = ALIGN(L1_CACHE_BYTES);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ /* PA-RISC locks requires 16-byte alignment */
+ . = ALIGN(16);
+ .data.lock_aligned : { *(.data.lock_aligned) }
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(16384); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+
+ /* The interrupt stack is currently partially coded, but not yet
+ * implemented */
+ . = ALIGN(16384);
+ init_istack : { *(init_istack) }
+
+#ifdef CONFIG_64BIT
+ . = ALIGN(16); /* Linkage tables */
+ .opd : { *(.opd) } PROVIDE (__gp = .);
+ .plt : { *(.plt) }
+ .dlt : { *(.dlt) }
+#endif
+
+ . = ALIGN(16384);
+ __init_begin = .;
+ .init.text : {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ .init.data : { *(.init.data) }
+ . = ALIGN(16);
+ __setup_start = .;
+ .init.setup : { *(.init.setup) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : {
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+ }
+ __initcall_end = .;
+ __con_initcall_start = .;
+ .con_initcall.init : { *(.con_initcall.init) }
+ __con_initcall_end = .;
+ SECURITY_INIT
+ /* alternate instruction replacement. This is a mechanism x86 uses
+ * to detect the CPU type and replace generic instruction sequences
+ * with CPU specific ones. We don't currently do this in PA, but
+ * it seems like a good idea... */
+ . = ALIGN(4);
+ __alt_instructions = .;
+ .altinstructions : { *(.altinstructions) }
+ __alt_instructions_end = .;
+ .altinstr_replacement : { *(.altinstr_replacement) }
+ /* .exit.text is discard at runtime, not link time, to deal with references
+ from .altinstructions and .eh_frame */
+ .exit.text : { *(.exit.text) }
+ .exit.data : { *(.exit.data) }
+ . = ALIGN(4096);
+ __initramfs_start = .;
+ .init.ramfs : { *(.init.ramfs) }
+ __initramfs_end = .;
+ . = ALIGN(32);
+ __per_cpu_start = .;
+ .data.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
+ . = ALIGN(4096);
+ __init_end = .;
+ /* freed after init ends here */
+
+ __bss_start = .; /* BSS */
+ .bss : { *(.bss) *(COMMON) }
+ __bss_stop = .;
+
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.exitcall.exit)
+#ifdef CONFIG_64BIT
+ /* temporary hack until binutils is fixed to not emit these
+ for static binaries */
+ *(.interp)
+ *(.dynsym)
+ *(.dynstr)
+ *(.dynamic)
+ *(.hash)
+#endif
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ .note 0 : { *(.note) }
+
+}
OpenPOWER on IntegriCloud