summaryrefslogtreecommitdiffstats
path: root/include/asm-xtensa
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@tglx.tec.linutronix.de>2005-06-26 23:20:36 +0200
committerThomas Gleixner <tglx@mtd.linutronix.de>2005-06-26 23:20:36 +0200
commit7ca6448dbfb398bba36eda3c01bc14b86c3675be (patch)
tree82d934ebf07f22a2c64c3b6d82ec24082878b43a /include/asm-xtensa
parentf1f67a9874f1a4bba1adff6d694aa52e5f52ff1a (diff)
parent7d681b23d6cc14a8c026ea6756242cb522cbbcae (diff)
downloadblackbird-op-linux-7ca6448dbfb398bba36eda3c01bc14b86c3675be.tar.gz
blackbird-op-linux-7ca6448dbfb398bba36eda3c01bc14b86c3675be.zip
Merge with rsync://fileserver/linux
Update to Linus latest
Diffstat (limited to 'include/asm-xtensa')
-rw-r--r--include/asm-xtensa/a.out.h33
-rw-r--r--include/asm-xtensa/atomic.h272
-rw-r--r--include/asm-xtensa/bitops.h446
-rw-r--r--include/asm-xtensa/bootparam.h61
-rw-r--r--include/asm-xtensa/bug.h41
-rw-r--r--include/asm-xtensa/bugs.h22
-rw-r--r--include/asm-xtensa/byteorder.h82
-rw-r--r--include/asm-xtensa/cache.h32
-rw-r--r--include/asm-xtensa/cacheflush.h122
-rw-r--r--include/asm-xtensa/checksum.h264
-rw-r--r--include/asm-xtensa/coprocessor.h70
-rw-r--r--include/asm-xtensa/cpumask.h16
-rw-r--r--include/asm-xtensa/cputime.h6
-rw-r--r--include/asm-xtensa/current.h38
-rw-r--r--include/asm-xtensa/delay.h50
-rw-r--r--include/asm-xtensa/div64.h19
-rw-r--r--include/asm-xtensa/dma-mapping.h182
-rw-r--r--include/asm-xtensa/dma.h61
-rw-r--r--include/asm-xtensa/elf.h222
-rw-r--r--include/asm-xtensa/errno.h142
-rw-r--r--include/asm-xtensa/fcntl.h101
-rw-r--r--include/asm-xtensa/fixmap.h252
-rw-r--r--include/asm-xtensa/hardirq.h28
-rw-r--r--include/asm-xtensa/hdreg.h17
-rw-r--r--include/asm-xtensa/highmem.h17
-rw-r--r--include/asm-xtensa/hw_irq.h18
-rw-r--r--include/asm-xtensa/ide.h36
-rw-r--r--include/asm-xtensa/io.h197
-rw-r--r--include/asm-xtensa/ioctl.h83
-rw-r--r--include/asm-xtensa/ioctls.h112
-rw-r--r--include/asm-xtensa/ipc.h34
-rw-r--r--include/asm-xtensa/ipcbuf.h37
-rw-r--r--include/asm-xtensa/irq.h37
-rw-r--r--include/asm-xtensa/kmap_types.h31
-rw-r--r--include/asm-xtensa/linkage.h16
-rw-r--r--include/asm-xtensa/local.h16
-rw-r--r--include/asm-xtensa/mman.h80
-rw-r--r--include/asm-xtensa/mmu.h17
-rw-r--r--include/asm-xtensa/mmu_context.h330
-rw-r--r--include/asm-xtensa/module.h25
-rw-r--r--include/asm-xtensa/msgbuf.h48
-rw-r--r--include/asm-xtensa/namei.h26
-rw-r--r--include/asm-xtensa/page.h133
-rw-r--r--include/asm-xtensa/page.h.n135
-rw-r--r--include/asm-xtensa/param.h34
-rw-r--r--include/asm-xtensa/pci-bridge.h88
-rw-r--r--include/asm-xtensa/pci.h89
-rw-r--r--include/asm-xtensa/percpu.h16
-rw-r--r--include/asm-xtensa/pgalloc.h116
-rw-r--r--include/asm-xtensa/pgtable.h468
-rw-r--r--include/asm-xtensa/platform-iss/hardware.h29
-rw-r--r--include/asm-xtensa/platform.h92
-rw-r--r--include/asm-xtensa/poll.h37
-rw-r--r--include/asm-xtensa/posix_types.h123
-rw-r--r--include/asm-xtensa/processor.h205
-rw-r--r--include/asm-xtensa/ptrace.h135
-rw-r--r--include/asm-xtensa/resource.h16
-rw-r--r--include/asm-xtensa/rmap.h16
-rw-r--r--include/asm-xtensa/rwsem.h175
-rw-r--r--include/asm-xtensa/scatterlist.h34
-rw-r--r--include/asm-xtensa/sections.h16
-rw-r--r--include/asm-xtensa/segment.h16
-rw-r--r--include/asm-xtensa/semaphore.h129
-rw-r--r--include/asm-xtensa/sembuf.h44
-rw-r--r--include/asm-xtensa/serial.h18
-rw-r--r--include/asm-xtensa/setup.h16
-rw-r--r--include/asm-xtensa/shmbuf.h50
-rw-r--r--include/asm-xtensa/shmparam.h23
-rw-r--r--include/asm-xtensa/sigcontext.h44
-rw-r--r--include/asm-xtensa/siginfo.h16
-rw-r--r--include/asm-xtensa/signal.h187
-rw-r--r--include/asm-xtensa/smp.h27
-rw-r--r--include/asm-xtensa/socket.h61
-rw-r--r--include/asm-xtensa/sockios.h30
-rw-r--r--include/asm-xtensa/spinlock.h16
-rw-r--r--include/asm-xtensa/stat.h105
-rw-r--r--include/asm-xtensa/statfs.h17
-rw-r--r--include/asm-xtensa/string.h124
-rw-r--r--include/asm-xtensa/system.h252
-rw-r--r--include/asm-xtensa/termbits.h194
-rw-r--r--include/asm-xtensa/termios.h122
-rw-r--r--include/asm-xtensa/thread_info.h146
-rw-r--r--include/asm-xtensa/timex.h94
-rw-r--r--include/asm-xtensa/tlb.h25
-rw-r--r--include/asm-xtensa/tlbflush.h200
-rw-r--r--include/asm-xtensa/topology.h16
-rw-r--r--include/asm-xtensa/types.h66
-rw-r--r--include/asm-xtensa/uaccess.h532
-rw-r--r--include/asm-xtensa/ucontext.h22
-rw-r--r--include/asm-xtensa/unaligned.h28
-rw-r--r--include/asm-xtensa/unistd.h537
-rw-r--r--include/asm-xtensa/user.h20
-rw-r--r--include/asm-xtensa/vga.h19
-rw-r--r--include/asm-xtensa/xor.h16
-rw-r--r--include/asm-xtensa/xtensa/cacheasm.h708
-rw-r--r--include/asm-xtensa/xtensa/cacheattrasm.h432
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/core.h1270
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/defs.h270
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/specreg.h99
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/system.h198
-rw-r--r--include/asm-xtensa/xtensa/config-linux_be/tie.h275
-rw-r--r--include/asm-xtensa/xtensa/coreasm.h526
-rw-r--r--include/asm-xtensa/xtensa/corebits.h77
-rw-r--r--include/asm-xtensa/xtensa/hal.h822
-rw-r--r--include/asm-xtensa/xtensa/simcall.h130
-rw-r--r--include/asm-xtensa/xtensa/xt2000-uart.h155
-rw-r--r--include/asm-xtensa/xtensa/xt2000.h408
-rw-r--r--include/asm-xtensa/xtensa/xtboard.h120
108 files changed, 14398 insertions, 0 deletions
diff --git a/include/asm-xtensa/a.out.h b/include/asm-xtensa/a.out.h
new file mode 100644
index 000000000000..3be701dfe098
--- /dev/null
+++ b/include/asm-xtensa/a.out.h
@@ -0,0 +1,33 @@
+/*
+ * include/asm-xtensa/addrspace.h
+ *
+ * Dummy a.out file. Xtensa does not support the a.out format, but the kernel
+ * seems to depend on it.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_A_OUT_H
+#define _XTENSA_A_OUT_H
+
+/* Note: the kernel needs the a.out definitions, even if only ELF is used. */
+
+#define STACK_TOP TASK_SIZE
+
+struct exec
+{
+ unsigned long a_info;
+ unsigned a_text;
+ unsigned a_data;
+ unsigned a_bss;
+ unsigned a_syms;
+ unsigned a_entry;
+ unsigned a_trsize;
+ unsigned a_drsize;
+};
+
+#endif /* _XTENSA_A_OUT_H */
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
new file mode 100644
index 000000000000..d72bcb32ba4f
--- /dev/null
+++ b/include/asm-xtensa/atomic.h
@@ -0,0 +1,272 @@
+/*
+ * include/asm-xtensa/atomic.h
+ *
+ * Atomic operations that C can't guarantee us. Useful for resource counting..
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ATOMIC_H
+#define _XTENSA_ATOMIC_H
+
+#include <linux/config.h>
+#include <linux/stringify.h>
+
+typedef struct { volatile int counter; } atomic_t;
+
+#ifdef __KERNEL__
+#include <asm/processor.h>
+#include <asm/system.h>
+
+#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
+
+/*
+ * This Xtensa implementation assumes that the right mechanism
+ * for exclusion is for locking interrupts to level 1.
+ *
+ * Locking interrupts looks like this:
+ *
+ * rsil a15, 1
+ * <code>
+ * wsr a15, PS
+ * rsync
+ *
+ * Note that a15 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a15 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+#define atomic_set(v,i) ((v)->counter = (i))
+
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v.
+ */
+extern __inline__ void atomic_add(int i, atomic_t * v)
+{
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ "rsil a15, "__stringify(LOCKLEVEL)"\n\t"
+ "l32i %0, %2, 0 \n\t"
+ "add %0, %0, %1 \n\t"
+ "s32i %0, %2, 0 \n\t"
+ "wsr a15, "__stringify(PS)" \n\t"
+ "rsync \n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+}
+
+/**
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v.
+ */
+extern __inline__ void atomic_sub(int i, atomic_t *v)
+{
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ "rsil a15, "__stringify(LOCKLEVEL)"\n\t"
+ "l32i %0, %2, 0 \n\t"
+ "sub %0, %0, %1 \n\t"
+ "s32i %0, %2, 0 \n\t"
+ "wsr a15, "__stringify(PS)" \n\t"
+ "rsync \n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+}
+
+/*
+ * We use atomic_{add|sub}_return to define other functions.
+ */
+
+extern __inline__ int atomic_add_return(int i, atomic_t * v)
+{
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
+ "l32i %0, %2, 0 \n\t"
+ "add %0, %0, %1 \n\t"
+ "s32i %0, %2, 0 \n\t"
+ "wsr a15, "__stringify(PS)" \n\t"
+ "rsync \n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+
+ return vval;
+}
+
+extern __inline__ int atomic_sub_return(int i, atomic_t * v)
+{
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
+ "l32i %0, %2, 0 \n\t"
+ "sub %0, %0, %1 \n\t"
+ "s32i %0, %2, 0 \n\t"
+ "wsr a15, "__stringify(PS)" \n\t"
+ "rsync \n"
+ : "=&a" (vval)
+ : "a" (i), "a" (v)
+ : "a15", "memory"
+ );
+
+ return vval;
+}
+
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+#define atomic_inc(v) atomic_add(1,(v))
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+#define atomic_inc_return(v) atomic_add_return(1,(v))
+
+/**
+ * atomic_dec - decrement atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1.
+ */
+#define atomic_dec(v) atomic_sub(1,(v))
+
+/**
+ * atomic_dec_return - decrement atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1.
+ */
+#define atomic_dec_return(v) atomic_sub_return(1,(v))
+
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
+
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
+
+/**
+ * atomic_add_negative - add and test if negative
+ * @v: pointer of type atomic_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
+
+
+extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned int all_f = -1;
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
+ "l32i %0, %2, 0 \n\t"
+ "xor %1, %4, %3 \n\t"
+ "and %0, %0, %4 \n\t"
+ "s32i %0, %2, 0 \n\t"
+ "wsr a15, "__stringify(PS)" \n\t"
+ "rsync \n"
+ : "=&a" (vval), "=a" (mask)
+ : "a" (v), "a" (all_f), "1" (mask)
+ : "a15", "memory"
+ );
+}
+
+extern __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+ unsigned int vval;
+
+ __asm__ __volatile__(
+ "rsil a15,"__stringify(LOCKLEVEL)"\n\t"
+ "l32i %0, %2, 0 \n\t"
+ "or %0, %0, %1 \n\t"
+ "s32i %0, %2, 0 \n\t"
+ "wsr a15, "__stringify(PS)" \n\t"
+ "rsync \n"
+ : "=&a" (vval)
+ : "a" (mask), "a" (v)
+ : "a15", "memory"
+ );
+}
+
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#endif /* __KERNEL__ */
+
+#endif /* _XTENSA_ATOMIC_H */
+
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h
new file mode 100644
index 000000000000..d395ef226c32
--- /dev/null
+++ b/include/asm-xtensa/bitops.h
@@ -0,0 +1,446 @@
+/*
+ * include/asm-xtensa/bitops.h
+ *
+ * Atomic operations that C can't guarantee us.Useful for resource counting etc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_BITOPS_H
+#define _XTENSA_BITOPS_H
+
+#ifdef __KERNEL__
+
+#include <asm/processor.h>
+#include <asm/byteorder.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_SMP
+# error SMP not supported on this architecture
+#endif
+
+static __inline__ void set_bit(int nr, volatile void * addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *a |= mask;
+ local_irq_restore(flags);
+}
+
+static __inline__ void __set_bit(int nr, volatile unsigned long * addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+
+ *a |= mask;
+}
+
+static __inline__ void clear_bit(int nr, volatile void * addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *a &= ~mask;
+ local_irq_restore(flags);
+}
+
+static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+
+ *a &= ~mask;
+}
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+static __inline__ void change_bit(int nr, volatile void * addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ *a ^= mask;
+ local_irq_restore(flags);
+}
+
+static __inline__ void __change_bit(int nr, volatile void * addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+
+ *a ^= mask;
+}
+
+static __inline__ int test_and_set_bit(int nr, volatile void * addr)
+{
+ unsigned long retval;
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+{
+ unsigned long retval;
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+
+ retval = (mask & *a) != 0;
+ *a |= mask;
+
+ return retval;
+}
+
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+{
+ unsigned long retval;
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long old = *a;
+
+ *a = old & ~mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_and_change_bit(int nr, volatile void * addr)
+{
+ unsigned long retval;
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+/*
+ * non-atomic version; can be reordered
+ */
+
+static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
+{
+ unsigned long mask = 1 << (nr & 0x1f);
+ unsigned long *a = ((unsigned long *)addr) + (nr >> 5);
+ unsigned long old = *a;
+
+ *a = old ^ mask;
+ return (old & mask) != 0;
+}
+
+static __inline__ int test_bit(int nr, const volatile void *addr)
+{
+ return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31));
+}
+
+#if XCHAL_HAVE_NSAU
+
+static __inline__ int __cntlz (unsigned long x)
+{
+ int lz;
+ asm ("nsau %0, %1" : "=r" (lz) : "r" (x));
+ return 31 - lz;
+}
+
+#else
+
+static __inline__ int __cntlz (unsigned long x)
+{
+ unsigned long sum, x1, x2, x4, x8, x16;
+ x1 = x & 0xAAAAAAAA;
+ x2 = x & 0xCCCCCCCC;
+ x4 = x & 0xF0F0F0F0;
+ x8 = x & 0xFF00FF00;
+ x16 = x & 0xFFFF0000;
+ sum = x2 ? 2 : 0;
+ sum += (x16 != 0) * 16;
+ sum += (x8 != 0) * 8;
+ sum += (x4 != 0) * 4;
+ sum += (x1 != 0);
+
+ return sum;
+}
+
+#endif
+
+/*
+ * ffz: Find first zero in word. Undefined if no zero exists.
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+
+static __inline__ int ffz(unsigned long x)
+{
+ if ((x = ~x) == 0)
+ return 32;
+ return __cntlz(x & -x);
+}
+
+/*
+ * __ffs: Find first bit set in word. Return 0 for bit 0
+ */
+
+static __inline__ int __ffs(unsigned long x)
+{
+ return __cntlz(x & -x);
+}
+
+/*
+ * ffs: Find first bit set in word. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+
+static __inline__ int ffs(unsigned long x)
+{
+ return __cntlz(x & -x) + 1;
+}
+
+/*
+ * fls: Find last (most-significant) bit set in word.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static __inline__ int fls (unsigned int x)
+{
+ return __cntlz(x);
+}
+
+static __inline__ int
+find_next_bit(const unsigned long *addr, int size, int offset)
+{
+ const unsigned long *p = addr + (offset >> 5);
+ unsigned long result = offset & ~31UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if (offset) {
+ tmp = *p++;
+ tmp &= ~0UL << offset;
+ if (size < 32)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while (size >= 32) {
+ if ((tmp = *p++) != 0)
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp &= ~0UL >> (32 - size);
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+}
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+
+#define find_first_bit(addr, size) \
+ find_next_bit((addr), (size), 0)
+
+static __inline__ int
+find_next_zero_bit(const unsigned long *addr, int size, int offset)
+{
+ const unsigned long *p = addr + (offset >> 5);
+ unsigned long result = offset & ~31UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if (offset) {
+ tmp = *p++;
+ tmp |= ~0UL >> (32-offset);
+ if (size < 32)
+ goto found_first;
+ if (~tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while (size & ~31UL) {
+ if (~(tmp = *p++))
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ tmp |= ~0UL << size;
+found_middle:
+ return result + ffz(tmp);
+}
+
+#define find_first_zero_bit(addr, size) \
+ find_next_zero_bit((addr), (size), 0)
+
+#ifdef __XTENSA_EL__
+# define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr))
+# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr))
+# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr))
+# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr))
+# define ext2_test_bit(nr,addr) test_bit((nr), (addr))
+# define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size))
+# define ext2_find_next_zero_bit(addr, size, offset) \
+ find_next_zero_bit((addr), (size), (offset))
+#elif defined(__XTENSA_EB__)
+# define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr))
+# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr))
+# define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr))
+# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr))
+# define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr))
+# define ext2_find_first_zero_bit(addr, size) \
+ ext2_find_next_zero_bit((addr), (size), 0)
+
+static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
+{
+ unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
+ unsigned long result = offset & ~31UL;
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= 31UL;
+ if(offset) {
+ /* We hold the little endian value in tmp, but then the
+ * shift is illegal. So we could keep a big endian value
+ * in tmp, like this:
+ *
+ * tmp = __swab32(*(p++));
+ * tmp |= ~0UL >> (32-offset);
+ *
+ * but this would decrease preformance, so we change the
+ * shift:
+ */
+ tmp = *(p++);
+ tmp |= __swab32(~0UL >> (32-offset));
+ if(size < 32)
+ goto found_first;
+ if(~tmp)
+ goto found_middle;
+ size -= 32;
+ result += 32;
+ }
+ while(size & ~31UL) {
+ if(~(tmp = *(p++)))
+ goto found_middle;
+ result += 32;
+ size -= 32;
+ }
+ if(!size)
+ return result;
+ tmp = *p;
+
+found_first:
+ /* tmp is little endian, so we would have to swab the shift,
+ * see above. But then we have to swab tmp below for ffz, so
+ * we might as well do this here.
+ */
+ return result + ffz(__swab32(tmp) | (~0UL << size));
+found_middle:
+ return result + ffz(__swab32(tmp));
+}
+
+#else
+# error processor byte order undefined!
+#endif
+
+
+#define hweight32(x) generic_hweight32(x)
+#define hweight16(x) generic_hweight16(x)
+#define hweight8(x) generic_hweight8(x)
+
+/*
+ * Find the first bit set in a 140-bit bitmap.
+ * The first 100 bits are unlikely to be set.
+ */
+
+static inline int sched_find_first_bit(const unsigned long *b)
+{
+ if (unlikely(b[0]))
+ return __ffs(b[0]);
+ if (unlikely(b[1]))
+ return __ffs(b[1]) + 32;
+ if (unlikely(b[2]))
+ return __ffs(b[2]) + 64;
+ if (b[3])
+ return __ffs(b[3]) + 96;
+ return __ffs(b[4]) + 128;
+}
+
+
+/* Bitmap functions for the minix filesystem. */
+
+#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
+#define minix_set_bit(nr,addr) set_bit(nr,addr)
+#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
+#define minix_test_bit(nr,addr) test_bit(nr,addr)
+#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
+
+#endif /* __KERNEL__ */
+
+#endif /* _XTENSA_BITOPS_H */
diff --git a/include/asm-xtensa/bootparam.h b/include/asm-xtensa/bootparam.h
new file mode 100644
index 000000000000..9983f2c1b7ee
--- /dev/null
+++ b/include/asm-xtensa/bootparam.h
@@ -0,0 +1,61 @@
+/*
+ * include/asm-xtensa/bootparam.h
+ *
+ * Definition of the Linux/Xtensa boot parameter structure
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * (Concept borrowed from the 68K port)
+ */
+
+#ifndef _XTENSA_BOOTPARAM_H
+#define _XTENSA_BOOTPARAM_H
+
+#define BP_VERSION 0x0001
+
+#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
+#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
+#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
+#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */
+#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
+
+#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
+#define BP_TAG_LAST 0x7E0B /* last tag */
+
+#ifndef __ASSEMBLY__
+
+/* All records are aligned to 4 bytes */
+
+typedef struct bp_tag {
+ unsigned short id; /* tag id */
+ unsigned short size; /* size of this record excluding the structure*/
+ unsigned long data[0]; /* data */
+} bp_tag_t;
+
+typedef struct meminfo {
+ unsigned long type;
+ unsigned long start;
+ unsigned long end;
+} meminfo_t;
+
+#define SYSMEM_BANKS_MAX 5
+
+#define MEMORY_TYPE_CONVENTIONAL 0x1000
+#define MEMORY_TYPE_NONE 0x2000
+
+typedef struct sysmem_info {
+ int nr_banks;
+ meminfo_t bank[SYSMEM_BANKS_MAX];
+} sysmem_info_t;
+
+extern sysmem_info_t sysmem;
+
+#endif
+#endif
+
+
+
diff --git a/include/asm-xtensa/bug.h b/include/asm-xtensa/bug.h
new file mode 100644
index 000000000000..56703659b204
--- /dev/null
+++ b/include/asm-xtensa/bug.h
@@ -0,0 +1,41 @@
+/*
+ * include/asm-xtensa/bug.h
+ *
+ * Macros to cause a 'bug' message.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_BUG_H
+#define _XTENSA_BUG_H
+
+#include <linux/stringify.h>
+
+#define ILL __asm__ __volatile__ (".byte 0,0,0\n")
+
+#ifdef CONFIG_KALLSYMS
+# define BUG() do { \
+ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
+ ILL; \
+} while (0)
+#else
+# define BUG() do { \
+ printk("kernel BUG!\n"); \
+ ILL; \
+} while (0)
+#endif
+
+#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
+#define PAGE_BUG(page) do { BUG(); } while (0)
+#define WARN_ON(condition) do { \
+ if (unlikely((condition)!=0)) { \
+ printk ("Warning in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
+ dump_stack(); \
+ } \
+} while (0)
+
+#endif /* _XTENSA_BUG_H */
diff --git a/include/asm-xtensa/bugs.h b/include/asm-xtensa/bugs.h
new file mode 100644
index 000000000000..c42285320133
--- /dev/null
+++ b/include/asm-xtensa/bugs.h
@@ -0,0 +1,22 @@
+/*
+ * include/asm-xtensa/bugs.h
+ *
+ * This is included by init/main.c to check for architecture-dependent bugs.
+ *
+ * Xtensa processors don't have any bugs. :)
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_BUGS_H
+#define _XTENSA_BUGS_H
+
+#include <asm/processor.h>
+
+static void __init check_bugs(void)
+{
+}
+
+#endif /* _XTENSA_BUGS_H */
diff --git a/include/asm-xtensa/byteorder.h b/include/asm-xtensa/byteorder.h
new file mode 100644
index 000000000000..0b1552569aae
--- /dev/null
+++ b/include/asm-xtensa/byteorder.h
@@ -0,0 +1,82 @@
+/*
+ * include/asm-xtensa/byteorder.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_BYTEORDER_H
+#define _XTENSA_BYTEORDER_H
+
+#include <asm/processor.h>
+#include <asm/types.h>
+
+static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
+{
+ __u32 res;
+ /* instruction sequence from Xtensa ISA release 2/2000 */
+ __asm__("ssai 8 \n\t"
+ "srli %0, %1, 16 \n\t"
+ "src %0, %0, %1 \n\t"
+ "src %0, %0, %0 \n\t"
+ "src %0, %1, %0 \n"
+ : "=&a" (res)
+ : "a" (x)
+ );
+ return res;
+}
+
+static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
+{
+ /* Given that 'short' values are signed (i.e., can be negative),
+ * we cannot assume that the upper 16-bits of the register are
+ * zero. We are careful to mask values after shifting.
+ */
+
+ /* There exists an anomaly between xt-gcc and xt-xcc. xt-gcc
+ * inserts an extui instruction after putting this function inline
+ * to ensure that it uses only the least-significant 16 bits of
+ * the result. xt-xcc doesn't use an extui, but assumes the
+ * __asm__ macro follows convention that the upper 16 bits of an
+ * 'unsigned short' result are still zero. This macro doesn't
+ * follow convention; indeed, it leaves garbage in the upport 16
+ * bits of the register.
+
+ * Declaring the temporary variables 'res' and 'tmp' to be 32-bit
+ * types while the return type of the function is a 16-bit type
+ * forces both compilers to insert exactly one extui instruction
+ * (or equivalent) to mask off the upper 16 bits. */
+
+ __u32 res;
+ __u32 tmp;
+
+ __asm__("extui %1, %2, 8, 8\n\t"
+ "slli %0, %2, 8 \n\t"
+ "or %0, %0, %1 \n"
+ : "=&a" (res), "=&a" (tmp)
+ : "a" (x)
+ );
+
+ return res;
+}
+
+#define __arch__swab32(x) ___arch__swab32(x)
+#define __arch__swab16(x) ___arch__swab16(x)
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+# define __BYTEORDER_HAS_U64__
+# define __SWAB_64_THRU_32__
+#endif
+
+#ifdef __XTENSA_EL__
+# include <linux/byteorder/little_endian.h>
+#elif defined(__XTENSA_EB__)
+# include <linux/byteorder/big_endian.h>
+#else
+# error processor byte order undefined!
+#endif
+
+#endif /* __ASM_XTENSA_BYTEORDER_H */
diff --git a/include/asm-xtensa/cache.h b/include/asm-xtensa/cache.h
new file mode 100644
index 000000000000..5aae3f12407c
--- /dev/null
+++ b/include/asm-xtensa/cache.h
@@ -0,0 +1,32 @@
+/*
+ * include/asm-xtensa/cacheflush.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ * 2 of the License, or (at your option) any later version.
+ *
+ * (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHE_H
+#define _XTENSA_CACHE_H
+
+#include <xtensa/config/core.h>
+
+#if XCHAL_ICACHE_SIZE > 0
+# if (XCHAL_ICACHE_SIZE % (XCHAL_ICACHE_LINESIZE*XCHAL_ICACHE_WAYS*4)) != 0
+# error cache configuration outside expected/supported range!
+# endif
+#endif
+
+#if XCHAL_DCACHE_SIZE > 0
+# if (XCHAL_DCACHE_SIZE % (XCHAL_DCACHE_LINESIZE*XCHAL_DCACHE_WAYS*4)) != 0
+# error cache configuration outside expected/supported range!
+# endif
+#endif
+
+#define L1_CACHE_SHIFT XCHAL_CACHE_LINEWIDTH_MAX
+#define L1_CACHE_BYTES XCHAL_CACHE_LINESIZE_MAX
+
+#endif /* _XTENSA_CACHE_H */
diff --git a/include/asm-xtensa/cacheflush.h b/include/asm-xtensa/cacheflush.h
new file mode 100644
index 000000000000..44a36e087844
--- /dev/null
+++ b/include/asm-xtensa/cacheflush.h
@@ -0,0 +1,122 @@
+/*
+ * include/asm-xtensa/cacheflush.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CACHEFLUSH_H
+#define _XTENSA_CACHEFLUSH_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/page.h>
+
+/*
+ * flush and invalidate data cache, invalidate instruction cache:
+ *
+ * __flush_invalidate_cache_all()
+ * __flush_invalidate_cache_range(from,sze)
+ *
+ * invalidate data or instruction cache:
+ *
+ * __invalidate_icache_all()
+ * __invalidate_icache_page(adr)
+ * __invalidate_dcache_page(adr)
+ * __invalidate_icache_range(from,size)
+ * __invalidate_dcache_range(from,size)
+ *
+ * flush data cache:
+ *
+ * __flush_dcache_page(adr)
+ *
+ * flush and invalidate data cache:
+ *
+ * __flush_invalidate_dcache_all()
+ * __flush_invalidate_dcache_page(adr)
+ * __flush_invalidate_dcache_range(from,size)
+ */
+
+extern void __flush_invalidate_cache_all(void);
+extern void __flush_invalidate_cache_range(unsigned long, unsigned long);
+extern void __flush_invalidate_dcache_all(void);
+extern void __invalidate_icache_all(void);
+
+extern void __invalidate_dcache_page(unsigned long);
+extern void __invalidate_icache_page(unsigned long);
+extern void __invalidate_icache_range(unsigned long, unsigned long);
+extern void __invalidate_dcache_range(unsigned long, unsigned long);
+
+#if XCHAL_DCACHE_IS_WRITEBACK
+extern void __flush_dcache_page(unsigned long);
+extern void __flush_invalidate_dcache_page(unsigned long);
+extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
+#else
+# define __flush_dcache_page(p) do { } while(0)
+# define __flush_invalidate_dcache_page(p) do { } while(0)
+# define __flush_invalidate_dcache_range(p,s) do { } while(0)
+#endif
+
+/*
+ * We have physically tagged caches - nothing to do here -
+ * unless we have cache aliasing.
+ *
+ * Pages can get remapped. Because this might change the 'color' of that page,
+ * we have to flush the cache before the PTE is changed.
+ * (see also Documentation/cachetlb.txt)
+ */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+
+#define flush_cache_all() __flush_invalidate_cache_all();
+#define flush_cache_mm(mm) __flush_invalidate_cache_all();
+
+#define flush_cache_vmap(start,end) __flush_invalidate_cache_all();
+#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all();
+
+extern void flush_dcache_page(struct page*);
+
+extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
+extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
+
+#else
+
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+
+#define flush_cache_vmap(start,end) do { } while (0)
+#define flush_cache_vunmap(start,end) do { } while (0)
+
+#define flush_dcache_page(page) do { } while (0)
+
+#define flush_cache_page(vma,addr,pfn) do { } while (0)
+#define flush_cache_range(vma,start,end) do { } while (0)
+
+#endif
+
+#define flush_icache_range(start,end) \
+ __invalidate_icache_range(start,(end)-(start))
+
+/* This is not required, see Documentation/cachetlb.txt */
+
+#define flush_icache_page(vma,page) do { } while(0)
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif /* __KERNEL__ */
+
+#endif /* _XTENSA_CACHEFLUSH_H */
+
diff --git a/include/asm-xtensa/checksum.h b/include/asm-xtensa/checksum.h
new file mode 100644
index 000000000000..1a00fad19929
--- /dev/null
+++ b/include/asm-xtensa/checksum.h
@@ -0,0 +1,264 @@
+/*
+ * include/asm-xtensa/checksum.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CHECKSUM_H
+#define _XTENSA_CHECKSUM_H
+
+#include <linux/config.h>
+#include <linux/in6.h>
+#include <xtensa/config/core.h>
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+
+asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
+/*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+ *
+ * If you use these functions directly please don't forget the
+ * verify_area().
+ */
+extern __inline__
+unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
+ int len, int sum)
+{
+ return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
+}
+
+extern __inline__
+unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
+ int len, int sum, int *err_ptr)
+{
+ return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
+}
+
+/*
+ * These are the old (and unsafe) way of doing checksums, a warning message will be
+ * printed if they are used and an exeption occurs.
+ *
+ * these functions should go away after some time.
+ */
+
+#define csum_partial_copy_fromuser csum_partial_copy
+unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
+
+/*
+ * Fold a partial checksum
+ */
+
+static __inline__ unsigned int csum_fold(unsigned int sum)
+{
+ unsigned int __dummy;
+ __asm__("extui %1, %0, 16, 16\n\t"
+ "extui %0 ,%0, 0, 16\n\t"
+ "add %0, %0, %1\n\t"
+ "slli %1, %0, 16\n\t"
+ "add %0, %0, %1\n\t"
+ "extui %0, %0, 16, 16\n\t"
+ "neg %0, %0\n\t"
+ "addi %0, %0, -1\n\t"
+ "extui %0, %0, 0, 16\n\t"
+ : "=r" (sum), "=&r" (__dummy)
+ : "0" (sum));
+ return sum;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+{
+ unsigned int sum, tmp, endaddr;
+
+ __asm__ __volatile__(
+ "sub %0, %0, %0\n\t"
+#if XCHAL_HAVE_LOOPS
+ "loopgtz %2, 2f\n\t"
+#else
+ "beqz %2, 2f\n\t"
+ "slli %4, %2, 2\n\t"
+ "add %4, %4, %1\n\t"
+ "0:\t"
+#endif
+ "l32i %3, %1, 0\n\t"
+ "add %0, %0, %3\n\t"
+ "bgeu %0, %3, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "addi %1, %1, 4\n\t"
+#if !XCHAL_HAVE_LOOPS
+ "blt %1, %4, 0b\n\t"
+#endif
+ "2:\t"
+ /* Since the input registers which are loaded with iph and ihl
+ are modified, we must also specify them as outputs, or gcc
+ will assume they contain their original values. */
+ : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr)
+ : "1" (iph), "2" (ihl));
+
+ return csum_fold(sum);
+}
+
+static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+
+#ifdef __XTENSA_EL__
+ unsigned long len_proto = (ntohs(len)<<16)+proto*256;
+#elif defined(__XTENSA_EB__)
+ unsigned long len_proto = (proto<<16)+len;
+#else
+# error processor byte order undefined!
+#endif
+ __asm__("add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %2\n\t"
+ "bgeu %0, %2, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %3\n\t"
+ "bgeu %0, %3, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ : "=r" (sum), "=r" (len_proto)
+ : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
+ return sum;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len)
+{
+ return csum_fold (csum_partial(buff, len, 0));
+}
+
+#define _HAVE_ARCH_IPV6_CSUM
+static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
+ struct in6_addr *daddr,
+ __u32 len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ unsigned int __dummy;
+ __asm__("l32i %1, %2, 0\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 4\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 8\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %2, 12\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 0\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 4\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 8\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "l32i %1, %3, 12\n\t"
+ "add %0, %0, %1\n\t"
+ "bgeu %0, %1, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %4\n\t"
+ "bgeu %0, %4, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ "add %0, %0, %5\n\t"
+ "bgeu %0, %5, 1f\n\t"
+ "addi %0, %0, 1\n\t"
+ "1:\t"
+ : "=r" (sum), "=&r" (__dummy)
+ : "r" (saddr), "r" (daddr),
+ "r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
+
+ return csum_fold(sum);
+}
+
+/*
+ * Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static __inline__ unsigned int csum_and_copy_to_user (const char *src, char *dst,
+ int len, int sum, int *err_ptr)
+{
+ if (access_ok(VERIFY_WRITE, dst, len))
+ return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
+
+ if (len)
+ *err_ptr = -EFAULT;
+
+ return -1; /* invalid checksum */
+}
+#endif
diff --git a/include/asm-xtensa/coprocessor.h b/include/asm-xtensa/coprocessor.h
new file mode 100644
index 000000000000..a91b96dc0efe
--- /dev/null
+++ b/include/asm-xtensa/coprocessor.h
@@ -0,0 +1,70 @@
+/*
+ * include/asm-xtensa/cpextra.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_COPROCESSOR_H
+#define _XTENSA_COPROCESSOR_H
+
+#include <xtensa/config/core.h>
+
+#define XTOFS(last_start,last_size,align) \
+ ((last_start+last_size+align-1) & -align)
+
+#define XTENSA_CP_EXTRA_OFFSET 0
+#define XTENSA_CP_EXTRA_ALIGN XCHAL_EXTRA_SA_ALIGN
+
+#define XTENSA_CPE_CP0_OFFSET \
+ XTOFS(XTENSA_CP_EXTRA_OFFSET, XCHAL_EXTRA_SA_SIZE, XCHAL_CP0_SA_ALIGN)
+#define XTENSA_CPE_CP1_OFFSET \
+ XTOFS(XTENSA_CPE_CP0_OFFSET, XCHAL_CP0_SA_SIZE, XCHAL_CP1_SA_ALIGN)
+#define XTENSA_CPE_CP2_OFFSET \
+ XTOFS(XTENSA_CPE_CP1_OFFSET, XCHAL_CP1_SA_SIZE, XCHAL_CP2_SA_ALIGN)
+#define XTENSA_CPE_CP3_OFFSET \
+ XTOFS(XTENSA_CPE_CP2_OFFSET, XCHAL_CP2_SA_SIZE, XCHAL_CP3_SA_ALIGN)
+#define XTENSA_CPE_CP4_OFFSET \
+ XTOFS(XTENSA_CPE_CP3_OFFSET, XCHAL_CP3_SA_SIZE, XCHAL_CP4_SA_ALIGN)
+#define XTENSA_CPE_CP5_OFFSET \
+ XTOFS(XTENSA_CPE_CP4_OFFSET, XCHAL_CP4_SA_SIZE, XCHAL_CP5_SA_ALIGN)
+#define XTENSA_CPE_CP6_OFFSET \
+ XTOFS(XTENSA_CPE_CP5_OFFSET, XCHAL_CP5_SA_SIZE, XCHAL_CP6_SA_ALIGN)
+#define XTENSA_CPE_CP7_OFFSET \
+ XTOFS(XTENSA_CPE_CP6_OFFSET, XCHAL_CP6_SA_SIZE, XCHAL_CP7_SA_ALIGN)
+#define XTENSA_CP_EXTRA_SIZE \
+ XTOFS(XTENSA_CPE_CP7_OFFSET, XCHAL_CP7_SA_SIZE, 16)
+
+#if XCHAL_CP_NUM > 0
+# ifndef __ASSEMBLY__
+/*
+ * Tasks that own contents of (last user) each coprocessor.
+ * Entries are 0 for not-owned or non-existent coprocessors.
+ * Note: The size of this structure is fixed to 8 bytes in entry.S
+ */
+typedef struct {
+ struct task_struct *owner; /* owner */
+ int offset; /* offset in cpextra space. */
+} coprocessor_info_t;
+# else
+# define COPROCESSOR_INFO_OWNER 0
+# define COPROCESSOR_INFO_OFFSET 4
+# define COPROCESSOR_INFO_SIZE 8
+# endif
+#endif
+
+
+#ifndef __ASSEMBLY__
+# if XCHAL_CP_NUM > 0
+struct task_struct;
+extern void release_coprocessors (struct task_struct*);
+extern void save_coprocessor_registers(void*, int);
+# else
+# define release_coprocessors(task)
+# endif
+#endif
+
+#endif /* _XTENSA_COPROCESSOR_H */
diff --git a/include/asm-xtensa/cpumask.h b/include/asm-xtensa/cpumask.h
new file mode 100644
index 000000000000..ebeede397db3
--- /dev/null
+++ b/include/asm-xtensa/cpumask.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/cpumask.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CPUMASK_H
+#define _XTENSA_CPUMASK_H
+
+#include <asm-generic/cpumask.h>
+
+#endif /* _XTENSA_CPUMASK_H */
diff --git a/include/asm-xtensa/cputime.h b/include/asm-xtensa/cputime.h
new file mode 100644
index 000000000000..a7fb864a50ae
--- /dev/null
+++ b/include/asm-xtensa/cputime.h
@@ -0,0 +1,6 @@
+#ifndef _XTENSA_CPUTIME_H
+#define _XTENSA_CPUTIME_H
+
+#include <asm-generic/cputime.h>
+
+#endif /* _XTENSA_CPUTIME_H */
diff --git a/include/asm-xtensa/current.h b/include/asm-xtensa/current.h
new file mode 100644
index 000000000000..8d1eb5d78649
--- /dev/null
+++ b/include/asm-xtensa/current.h
@@ -0,0 +1,38 @@
+/*
+ * include/asm-xtensa/current.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_CURRENT_H
+#define _XTENSA_CURRENT_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+
+struct task_struct;
+
+static inline struct task_struct *get_current(void)
+{
+ return current_thread_info()->task;
+}
+
+#define current get_current()
+
+#else
+
+#define CURRENT_SHIFT 13
+
+#define GET_CURRENT(reg,sp) \
+ GET_THREAD_INFO(reg,sp); \
+ l32i reg, reg, TI_TASK \
+
+#endif
+
+
+#endif /* XTENSA_CURRENT_H */
diff --git a/include/asm-xtensa/delay.h b/include/asm-xtensa/delay.h
new file mode 100644
index 000000000000..6359c55e77a8
--- /dev/null
+++ b/include/asm-xtensa/delay.h
@@ -0,0 +1,50 @@
+/*
+ * include/asm-xtensa/delay.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ */
+
+#ifndef _XTENSA_DELAY_H
+#define _XTENSA_DELAY_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+#include <asm/param.h>
+
+extern unsigned long loops_per_jiffy;
+
+extern __inline__ void __delay(unsigned long loops)
+{
+ /* 2 cycles per loop. */
+ __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 1, 1b"
+ : "=r" (loops) : "0" (loops));
+}
+
+static __inline__ u32 xtensa_get_ccount(void)
+{
+ u32 ccount;
+ asm volatile ("rsr %0, 234; # CCOUNT\n" : "=r" (ccount));
+ return ccount;
+}
+
+/* For SMP/NUMA systems, change boot_cpu_data to something like
+ * local_cpu_data->... where local_cpu_data points to the current
+ * cpu. */
+
+static __inline__ void udelay (unsigned long usecs)
+{
+ unsigned long start = xtensa_get_ccount();
+ unsigned long cycles = usecs * (loops_per_jiffy / (1000000UL / HZ));
+
+ /* Note: all variables are unsigned (can wrap around)! */
+ while (((unsigned long)xtensa_get_ccount()) - start < cycles)
+ ;
+}
+
+#endif
+
diff --git a/include/asm-xtensa/div64.h b/include/asm-xtensa/div64.h
new file mode 100644
index 000000000000..c4a105776383
--- /dev/null
+++ b/include/asm-xtensa/div64.h
@@ -0,0 +1,19 @@
+/*
+ * include/asm-xtensa/div64.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_DIV64_H
+#define _XTENSA_DIV64_H
+
+#define do_div(n,base) ({ \
+ int __res = n % ((unsigned int) base); \
+ n /= (unsigned int) base; \
+ __res; })
+
+#endif
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
new file mode 100644
index 000000000000..e86a206f1209
--- /dev/null
+++ b/include/asm-xtensa/dma-mapping.h
@@ -0,0 +1,182 @@
+/*
+ * include/asm-xtensa/dma_mapping.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_DMA_MAPPING_H
+#define _XTENSA_DMA_MAPPING_H
+
+#include <asm/scatterlist.h>
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <linux/mm.h>
+
+/*
+ * DMA-consistent mapping functions.
+ */
+
+extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
+extern void consistent_free(void*, size_t, dma_addr_t);
+extern void consistent_sync(void*, size_t, int);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int flag);
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+ consistent_sync(ptr, size, direction);
+ return virt_to_phys(ptr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ BUG_ON(direction == DMA_NONE);
+
+ for (i = 0; i < nents; i++, sg++ ) {
+ BUG_ON(!sg->page);
+
+ sg->dma_address = page_to_phys(sg->page) + sg->offset;
+ consistent_sync(page_address(sg->page) + sg->offset,
+ sg->length, direction);
+ }
+
+ return nents;
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+ return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ BUG_ON(direction == DMA_NONE);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+
+ consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+
+ consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
+}
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+ for (i = 0; i < nelems; i++, sg++)
+ consistent_sync(page_address(sg->page) + sg->offset,
+ sg->length, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+ for (i = 0; i < nelems; i++, sg++)
+ consistent_sync(page_address(sg->page) + sg->offset,
+ sg->length, dir);
+}
+static inline int
+dma_mapping_error(dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ if(!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
+static inline int
+dma_get_cache_alignment(void)
+{
+ return L1_CACHE_BYTES;
+}
+
+#define dma_is_consistent(d) (1)
+
+static inline void
+dma_cache_sync(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ consistent_sync(vaddr, size, direction);
+}
+
+#endif /* _XTENSA_DMA_MAPPING_H */
diff --git a/include/asm-xtensa/dma.h b/include/asm-xtensa/dma.h
new file mode 100644
index 000000000000..1c22b0234586
--- /dev/null
+++ b/include/asm-xtensa/dma.h
@@ -0,0 +1,61 @@
+/*
+ * include/asm-xtensa/dma.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_DMA_H
+#define _XTENSA_DMA_H
+
+#include <linux/config.h>
+#include <asm/io.h> /* need byte IO */
+#include <xtensa/config/core.h>
+
+/*
+ * This is only to be defined if we have PC-like DMA.
+ * By default this is not true on an Xtensa processor,
+ * however on boards with a PCI bus, such functionality
+ * might be emulated externally.
+ *
+ * NOTE: there still exists driver code that assumes
+ * this is defined, eg. drivers/sound/soundcard.c (as of 2.4).
+ */
+#define MAX_DMA_CHANNELS 8
+
+/*
+ * The maximum virtual address to which DMA transfers
+ * can be performed on this platform.
+ *
+ * NOTE: This is board (platform) specific, not processor-specific!
+ *
+ * NOTE: This assumes DMA transfers can only be performed on
+ * the section of physical memory contiguously mapped in virtual
+ * space for the kernel. For the Xtensa architecture, this
+ * means the maximum possible size of this DMA area is
+ * the size of the statically mapped kernel segment
+ * (XCHAL_KSEG_{CACHED,BYPASS}_SIZE), ie. 128 MB.
+ *
+ * NOTE: When the entire KSEG area is DMA capable, we substract
+ * one from the max address so that the virt_to_phys() macro
+ * works correctly on the address (otherwise the address
+ * enters another area, and virt_to_phys() may not return
+ * the value desired).
+ */
+#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KSEG_CACHED_SIZE - 1)
+
+/* Reserve and release a DMA channel */
+extern int request_dma(unsigned int dmanr, const char * device_id);
+extern void free_dma(unsigned int dmanr);
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
+#define isa_dma_bridge_buggy (0)
+#endif
+
+
+#endif
diff --git a/include/asm-xtensa/elf.h b/include/asm-xtensa/elf.h
new file mode 100644
index 000000000000..64f1f53874fe
--- /dev/null
+++ b/include/asm-xtensa/elf.h
@@ -0,0 +1,222 @@
+/*
+ * include/asm-xtensa/elf.h
+ *
+ * ELF register definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ELF_H
+#define _XTENSA_ELF_H
+
+#include <asm/ptrace.h>
+#include <asm/coprocessor.h>
+#include <xtensa/config/core.h>
+
+/* Xtensa processor ELF architecture-magic number */
+
+#define EM_XTENSA 94
+#define EM_XTENSA_OLD 0xABC7
+
+/* ELF register definitions. This is needed for core dump support. */
+
+/*
+ * elf_gregset_t contains the application-level state in the following order:
+ * Processor info: config_version, cpuxy
+ * Processor state: pc, ps, exccause, excvaddr, wb, ws,
+ * lbeg, lend, lcount, sar
+ * GP regs: ar0 - arXX
+ */
+
+typedef unsigned long elf_greg_t;
+
+typedef struct {
+ elf_greg_t xchal_config_id0;
+ elf_greg_t xchal_config_id1;
+ elf_greg_t cpux;
+ elf_greg_t cpuy;
+ elf_greg_t pc;
+ elf_greg_t ps;
+ elf_greg_t exccause;
+ elf_greg_t excvaddr;
+ elf_greg_t windowbase;
+ elf_greg_t windowstart;
+ elf_greg_t lbeg;
+ elf_greg_t lend;
+ elf_greg_t lcount;
+ elf_greg_t sar;
+ elf_greg_t syscall;
+ elf_greg_t ar[XCHAL_NUM_AREGS];
+} xtensa_gregset_t;
+
+#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+/*
+ * Compute the size of the coprocessor and extra state layout (register info)
+ * table (in bytes).
+ * This is actually the maximum size of the table, as opposed to the size,
+ * which is available from the _xtensa_reginfo_table_size global variable.
+ *
+ * (See also arch/xtensa/kernel/coprocessor.S)
+ *
+ */
+
+#ifndef XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM
+# define XTENSA_CPE_LTABLE_SIZE 0
+#else
+# define XTENSA_CPE_SEGMENT(num) (num ? (1+num) : 0)
+# define XTENSA_CPE_LTABLE_ENTRIES \
+ ( XTENSA_CPE_SEGMENT(XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM) \
+ + XTENSA_CPE_SEGMENT(XCHAL_CP0_SA_CONTENTS_LIBDB_NUM) \
+ + XTENSA_CPE_SEGMENT(XCHAL_CP1_SA_CONTENTS_LIBDB_NUM) \
+ + XTENSA_CPE_SEGMENT(XCHAL_CP2_SA_CONTENTS_LIBDB_NUM) \
+ + XTENSA_CPE_SEGMENT(XCHAL_CP3_SA_CONTENTS_LIBDB_NUM) \
+ + XTENSA_CPE_SEGMENT(XCHAL_CP4_SA_CONTENTS_LIBDB_NUM) \
+ + XTENSA_CPE_SEGMENT(XCHAL_CP5_SA_CONTENTS_LIBDB_NUM) \
+ + XTENSA_CPE_SEGMENT(XCHAL_CP6_SA_CONTENTS_LIBDB_NUM) \
+ + XTENSA_CPE_SEGMENT(XCHAL_CP7_SA_CONTENTS_LIBDB_NUM) \
+ + 1 /* final entry */ \
+ )
+# define XTENSA_CPE_LTABLE_SIZE (XTENSA_CPE_LTABLE_ENTRIES * 8)
+#endif
+
+
+/*
+ * Instantiations of the elf_fpregset_t type contain, in most
+ * architectures, the floating point (FPU) register set.
+ * For Xtensa, this type is extended to contain all custom state,
+ * ie. coprocessor and "extra" (non-coprocessor) state (including,
+ * for example, TIE-defined states and register files; as well
+ * as other optional processor state).
+ * This includes FPU state if a floating-point coprocessor happens
+ * to have been configured within the Xtensa processor.
+ *
+ * TOTAL_FPREGS_SIZE is the required size (without rounding)
+ * of elf_fpregset_t. It provides space for the following:
+ *
+ * a) 32-bit mask of active coprocessors for this task (similar
+ * to CPENABLE in single-threaded Xtensa processor systems)
+ *
+ * b) table describing the layout of custom states (ie. of
+ * individual registers, etc) within the save areas
+ *
+ * c) save areas for each coprocessor and for non-coprocessor
+ * ("extra") state
+ *
+ * Note that save areas may require up to 16-byte alignment when
+ * accessed by save/restore sequences. We do not need to ensure
+ * such alignment in an elf_fpregset_t structure because custom
+ * state is not directly loaded/stored into it; rather, save area
+ * contents are copied to elf_fpregset_t from the active save areas
+ * (see 'struct task_struct' definition in processor.h for that)
+ * using memcpy(). But we do allow space for such alignment,
+ * to allow optimizations of layout and copying.
+ */
+
+#define TOTAL_FPREGS_SIZE \
+ (4 + XTENSA_CPE_LTABLE_SIZE + XTENSA_CP_EXTRA_SIZE)
+#define ELF_NFPREG \
+ ((TOTAL_FPREGS_SIZE + sizeof(elf_fpreg_t) - 1) / sizeof(elf_fpreg_t))
+
+typedef unsigned int elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+#define ELF_CORE_COPY_REGS(_eregs, _pregs) \
+ xtensa_elf_core_copy_regs (&_eregs, _pregs);
+
+extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+
+#define elf_check_arch(x) ( ( (x)->e_machine == EM_XTENSA ) || \
+ ( (x)->e_machine == EM_XTENSA_OLD ) )
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+
+#ifdef __XTENSA_EL__
+# define ELF_DATA ELFDATA2LSB
+#elif defined(__XTENSA_EB__)
+# define ELF_DATA ELFDATA2MSB
+#else
+# error processor byte order undefined!
+#endif
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_ARCH EM_XTENSA
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this CPU supports. This could be done in user space,
+ * but it's not easy, and we've already done it here.
+ */
+
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ * For the moment, we have only optimizations for the Intel generations,
+ * but that could change...
+ */
+
+#define ELF_PLATFORM (NULL)
+
+/*
+ * The Xtensa processor ABI says that when the program starts, a2
+ * contains a pointer to a function which might be registered using
+ * `atexit'. This provides a mean for the dynamic linker to call
+ * DT_FINI functions for shared libraries that have been loaded before
+ * the code runs.
+ *
+ * A value of 0 tells we have no such handler.
+ *
+ * We might as well make sure everything else is cleared too (except
+ * for the stack pointer in a1), just to make things more
+ * deterministic. Also, clearing a0 terminates debugger backtraces.
+ */
+
+#define ELF_PLAT_INIT(_r, load_addr) \
+ do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
+ _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
+ _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
+ _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
+ } while (0)
+
+#ifdef __KERNEL__
+
+#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
+
+extern void do_copy_regs (xtensa_gregset_t*, struct pt_regs*,
+ struct task_struct*);
+extern void do_restore_regs (xtensa_gregset_t*, struct pt_regs*,
+ struct task_struct*);
+extern void do_save_fpregs (elf_fpregset_t*, struct pt_regs*,
+ struct task_struct*);
+extern int do_restore_fpregs (elf_fpregset_t*, struct pt_regs*,
+ struct task_struct*);
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_ELF_H */
diff --git a/include/asm-xtensa/errno.h b/include/asm-xtensa/errno.h
new file mode 100644
index 000000000000..ced5194d2750
--- /dev/null
+++ b/include/asm-xtensa/errno.h
@@ -0,0 +1,142 @@
+/*
+ * include/asm-xtensa/errno.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_ERRNO_H
+#define _XTENSA_ERRNO_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Arg list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+
+#endif /* _XTENSA_ERRNO_H */
diff --git a/include/asm-xtensa/fcntl.h b/include/asm-xtensa/fcntl.h
new file mode 100644
index 000000000000..48876bb727d2
--- /dev/null
+++ b/include/asm-xtensa/fcntl.h
@@ -0,0 +1,101 @@
+/*
+ * include/asm-xtensa/fcntl.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_FCNTL_H
+#define _XTENSA_FCNTL_H
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+ located on an ext2 file system */
+#define O_ACCMODE 0x0003
+#define O_RDONLY 0x0000
+#define O_WRONLY 0x0001
+#define O_RDWR 0x0002
+#define O_APPEND 0x0008
+#define O_SYNC 0x0010
+#define O_NONBLOCK 0x0080
+#define O_CREAT 0x0100 /* not fcntl */
+#define O_TRUNC 0x0200 /* not fcntl */
+#define O_EXCL 0x0400 /* not fcntl */
+#define O_NOCTTY 0x0800 /* not fcntl */
+#define FASYNC 0x1000 /* fcntl, for BSD compatibility */
+#define O_LARGEFILE 0x2000 /* allow large file opens - currently ignored */
+#define O_DIRECT 0x8000 /* direct disk access hint - currently ignored*/
+#define O_DIRECTORY 0x10000 /* must be a directory */
+#define O_NOFOLLOW 0x20000 /* don't follow links */
+#define O_NOATIME 0x100000
+
+#define O_NDELAY O_NONBLOCK
+
+#define F_DUPFD 0 /* dup */
+#define F_GETFD 1 /* get close_on_exec */
+#define F_SETFD 2 /* set/clear close_on_exec */
+#define F_GETFL 3 /* get file->f_flags */
+#define F_SETFL 4 /* set file->f_flags */
+#define F_GETLK 14
+#define F_GETLK64 15
+#define F_SETLK 6
+#define F_SETLKW 7
+#define F_SETLK64 16
+#define F_SETLKW64 17
+
+#define F_SETOWN 24 /* for sockets. */
+#define F_GETOWN 23 /* for sockets. */
+#define F_SETSIG 10 /* for sockets. */
+#define F_GETSIG 11 /* for sockets. */
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#define F_RDLCK 0
+#define F_WRLCK 1
+#define F_UNLCK 2
+
+/* for old implementation of bsd flock () */
+#define F_EXLCK 4 /* or 3 */
+#define F_SHLCK 8 /* or 4 */
+
+/* for leases */
+#define F_INPROGRESS 16
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH 1 /* shared lock */
+#define LOCK_EX 2 /* exclusive lock */
+#define LOCK_NB 4 /* or'd with one of the above to prevent
+ blocking */
+#define LOCK_UN 8 /* remove lock */
+
+#define LOCK_MAND 32 /* This is a mandatory flock ... */
+#define LOCK_READ 64 /* which allows concurrent read operations */
+#define LOCK_WRITE 128 /* which allows concurrent write operations */
+#define LOCK_RW 192 /* which allows concurrent read & write ops */
+
+typedef struct flock {
+ short l_type;
+ short l_whence;
+ __kernel_off_t l_start;
+ __kernel_off_t l_len;
+ long l_sysid;
+ __kernel_pid_t l_pid;
+ long pad[4];
+} flock_t;
+
+struct flock64 {
+ short l_type;
+ short l_whence;
+ __kernel_off_t l_start;
+ __kernel_off_t l_len;
+ pid_t l_pid;
+};
+
+#define F_LINUX_SPECIFIC_BASE 1024
+
+#endif /* _XTENSA_FCNTL_H */
diff --git a/include/asm-xtensa/fixmap.h b/include/asm-xtensa/fixmap.h
new file mode 100644
index 000000000000..4423b8ad4954
--- /dev/null
+++ b/include/asm-xtensa/fixmap.h
@@ -0,0 +1,252 @@
+/*
+ * include/asm-xtensa/fixmap.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_FIXMAP_H
+#define _XTENSA_FIXMAP_H
+
+#include <asm/processor.h>
+
+#ifdef CONFIG_MMU
+
+/*
+ * Here we define all the compile-time virtual addresses.
+ */
+
+#if XCHAL_SEG_MAPPABLE_VADDR != 0
+# error "Current port requires virtual user space starting at 0"
+#endif
+#if XCHAL_SEG_MAPPABLE_SIZE < 0x80000000
+# error "Current port requires at least 0x8000000 bytes for user space"
+#endif
+
+/* Verify instruction/data ram/rom and xlmi don't overlay vmalloc space. */
+
+#define __IN_VMALLOC(addr) \
+ (((addr) >= VMALLOC_START) && ((addr) < VMALLOC_END))
+#define __SPAN_VMALLOC(start,end) \
+ (((start) < VMALLOC_START) && ((end) >= VMALLOC_END))
+#define INSIDE_VMALLOC(start,end) \
+ (__IN_VMALLOC((start)) || __IN_VMALLOC(end) || __SPAN_VMALLOC((start),(end)))
+
+#if XCHAL_NUM_INSTROM
+# if XCHAL_NUM_INSTROM == 1
+# if INSIDE_VMALLOC(XCHAL_INSTROM0_VADDR,XCHAL_INSTROM0_VADDR+XCHAL_INSTROM0_SIZE)
+# error vmalloc range conflicts with instrom0
+# endif
+# endif
+# if XCHAL_NUM_INSTROM == 2
+# if INSIDE_VMALLOC(XCHAL_INSTROM1_VADDR,XCHAL_INSTROM1_VADDR+XCHAL_INSTROM1_SIZE)
+# error vmalloc range conflicts with instrom1
+# endif
+# endif
+#endif
+
+#if XCHAL_NUM_INSTRAM
+# if XCHAL_NUM_INSTRAM == 1
+# if INSIDE_VMALLOC(XCHAL_INSTRAM0_VADDR,XCHAL_INSTRAM0_VADDR+XCHAL_INSTRAM0_SIZE)
+# error vmalloc range conflicts with instram0
+# endif
+# endif
+# if XCHAL_NUM_INSTRAM == 2
+# if INSIDE_VMALLOC(XCHAL_INSTRAM1_VADDR,XCHAL_INSTRAM1_VADDR+XCHAL_INSTRAM1_SIZE)
+# error vmalloc range conflicts with instram1
+# endif
+# endif
+#endif
+
+#if XCHAL_NUM_DATAROM
+# if XCHAL_NUM_DATAROM == 1
+# if INSIDE_VMALLOC(XCHAL_DATAROM0_VADDR,XCHAL_DATAROM0_VADDR+XCHAL_DATAROM0_SIZE)
+# error vmalloc range conflicts with datarom0
+# endif
+# endif
+# if XCHAL_NUM_DATAROM == 2
+# if INSIDE_VMALLOC(XCHAL_DATAROM1_VADDR,XCHAL_DATAROM1_VADDR+XCHAL_DATAROM1_SIZE)
+# error vmalloc range conflicts with datarom1
+# endif
+# endif
+#endif
+
+#if XCHAL_NUM_DATARAM
+# if XCHAL_NUM_DATARAM == 1
+# if INSIDE_VMALLOC(XCHAL_DATARAM0_VADDR,XCHAL_DATARAM0_VADDR+XCHAL_DATARAM0_SIZE)
+# error vmalloc range conflicts with dataram0
+# endif
+# endif
+# if XCHAL_NUM_DATARAM == 2
+# if INSIDE_VMALLOC(XCHAL_DATARAM1_VADDR,XCHAL_DATARAM1_VADDR+XCHAL_DATARAM1_SIZE)
+# error vmalloc range conflicts with dataram1
+# endif
+# endif
+#endif
+
+#if XCHAL_NUM_XLMI
+# if XCHAL_NUM_XLMI == 1
+# if INSIDE_VMALLOC(XCHAL_XLMI0_VADDR,XCHAL_XLMI0_VADDR+XCHAL_XLMI0_SIZE)
+# error vmalloc range conflicts with xlmi0
+# endif
+# endif
+# if XCHAL_NUM_XLMI == 2
+# if INSIDE_VMALLOC(XCHAL_XLMI1_VADDR,XCHAL_XLMI1_VADDR+XCHAL_XLMI1_SIZE)
+# error vmalloc range conflicts with xlmi1
+# endif
+# endif
+#endif
+
+#if (XCHAL_NUM_INSTROM > 2) || \
+ (XCHAL_NUM_INSTRAM > 2) || \
+ (XCHAL_NUM_DATARAM > 2) || \
+ (XCHAL_NUM_DATAROM > 2) || \
+ (XCHAL_NUM_XLMI > 2)
+# error Insufficient checks on vmalloc above for more than 2 devices
+#endif
+
+/*
+ * USER_VM_SIZE does not necessarily equal TASK_SIZE. We bumped
+ * TASK_SIZE down to 0x4000000 to simplify the handling of windowed
+ * call instructions (currently limited to a range of 1 GByte). User
+ * tasks may very well reclaim the VM space from 0x40000000 to
+ * 0x7fffffff in the future, so we do not want the kernel becoming
+ * accustomed to having any of its stuff (e.g., page tables) in this
+ * region. This VM region is no-man's land for now.
+ */
+
+#define USER_VM_START XCHAL_SEG_MAPPABLE_VADDR
+#define USER_VM_SIZE 0x80000000
+
+/* Size of page table: */
+
+#define PGTABLE_SIZE_BITS (32 - XCHAL_MMU_MIN_PTE_PAGE_SIZE + 2)
+#define PGTABLE_SIZE (1L << PGTABLE_SIZE_BITS)
+
+/* All kernel-mappable space: */
+
+#define KERNEL_ALLMAP_START (USER_VM_START + USER_VM_SIZE)
+#define KERNEL_ALLMAP_SIZE (XCHAL_SEG_MAPPABLE_SIZE - KERNEL_ALLMAP_START)
+
+/* Carve out page table at start of kernel-mappable area: */
+
+#if KERNEL_ALLMAP_SIZE < PGTABLE_SIZE
+#error "Gimme some space for page table!"
+#endif
+#define PGTABLE_START KERNEL_ALLMAP_START
+
+/* Remaining kernel-mappable space: */
+
+#define KERNEL_MAPPED_START (KERNEL_ALLMAP_START + PGTABLE_SIZE)
+#define KERNEL_MAPPED_SIZE (KERNEL_ALLMAP_SIZE - PGTABLE_SIZE)
+
+#if KERNEL_MAPPED_SIZE < 0x01000000 /* 16 MB is arbitrary for now */
+# error "Shouldn't the kernel have at least *some* mappable space?"
+#endif
+
+#define MAX_LOW_MEMORY XCHAL_KSEG_CACHED_SIZE
+
+#endif
+
+/*
+ * Some constants used elsewhere, but perhaps only in Xtensa header
+ * files, so maybe we can get rid of some and access compile-time HAL
+ * directly...
+ *
+ * Note: We assume that system RAM is located at the very start of the
+ * kernel segments !!
+ */
+#define KERNEL_VM_LOW XCHAL_KSEG_CACHED_VADDR
+#define KERNEL_VM_HIGH XCHAL_KSEG_BYPASS_VADDR
+#define KERNEL_SPACE XCHAL_KSEG_CACHED_VADDR
+
+/*
+ * Returns the physical/virtual addresses of the kernel space
+ * (works with the cached kernel segment only, which is the
+ * one normally used for kernel operation).
+ */
+
+/* PHYSICAL BYPASS CACHED
+ *
+ * bypass vaddr bypass paddr * cached vaddr
+ * cached vaddr cached paddr bypass vaddr *
+ * bypass paddr * bypass vaddr cached vaddr
+ * cached paddr * bypass vaddr cached vaddr
+ * other * * *
+ */
+
+#define PHYSADDR(a) \
+(((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
+ && (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
+ (unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_PADDR : \
+ ((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
+ && (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_SIZE) ? \
+ (unsigned)(a) - XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_PADDR : \
+ (unsigned)(a))
+
+#define BYPASS_ADDR(a) \
+(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
+ && (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
+ (unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
+ ((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
+ && (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
+ (unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
+ ((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
+ && (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_CACHED_SIZE)? \
+ (unsigned)(a) - XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_BYPASS_VADDR: \
+ (unsigned)(a))
+
+#define CACHED_ADDR(a) \
+(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
+ && (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
+ (unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_CACHED_VADDR : \
+ ((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
+ && (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
+ (unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_VADDR : \
+ ((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
+ && (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_BYPASS_SIZE) ? \
+ (unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_CACHED_VADDR : \
+ (unsigned)(a))
+
+#define PHYSADDR_IO(a) \
+(((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
+ && (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
+ (unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_PADDR : \
+ ((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
+ && (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
+ (unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_PADDR : \
+ (unsigned)(a))
+
+#define BYPASS_ADDR_IO(a) \
+(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
+ && (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
+ (unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_VADDR : \
+ ((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
+ && (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
+ (unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_BYPASS_VADDR : \
+ ((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
+ && (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
+ (unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_BYPASS_VADDR : \
+ (unsigned)(a))
+
+#define CACHED_ADDR_IO(a) \
+(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
+ && (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
+ (unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_CACHED_VADDR : \
+ ((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
+ && (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
+ (unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_VADDR : \
+ ((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
+ && (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
+ (unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_CACHED_VADDR : \
+ (unsigned)(a))
+
+#endif /* _XTENSA_ADDRSPACE_H */
+
+
+
+
+
diff --git a/include/asm-xtensa/hardirq.h b/include/asm-xtensa/hardirq.h
new file mode 100644
index 000000000000..e07c76c36b95
--- /dev/null
+++ b/include/asm-xtensa/hardirq.h
@@ -0,0 +1,28 @@
+/*
+ * include/asm-xtensa/hardirq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_HARDIRQ_H
+#define _XTENSA_HARDIRQ_H
+
+#include <linux/config.h>
+#include <linux/cache.h>
+#include <asm/irq.h>
+
+/* headers.S is sensitive to the offsets of these fields */
+typedef struct {
+ unsigned int __softirq_pending;
+ unsigned int __syscall_count;
+ struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
+ unsigned int __nmi_count; /* arch dependent */
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
+#endif /* _XTENSA_HARDIRQ_H */
diff --git a/include/asm-xtensa/hdreg.h b/include/asm-xtensa/hdreg.h
new file mode 100644
index 000000000000..64b80607b80d
--- /dev/null
+++ b/include/asm-xtensa/hdreg.h
@@ -0,0 +1,17 @@
+/*
+ * include/asm-xtensa/hdreg.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ */
+
+#ifndef _XTENSA_HDREG_H
+#define _XTENSA_HDREG_H
+
+typedef unsigned int ide_ioreg_t;
+
+#endif
diff --git a/include/asm-xtensa/highmem.h b/include/asm-xtensa/highmem.h
new file mode 100644
index 000000000000..0a046ca5a687
--- /dev/null
+++ b/include/asm-xtensa/highmem.h
@@ -0,0 +1,17 @@
+/*
+ * include/asm-xtensa/highmem.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_HIGHMEM_H
+#define _XTENSA_HIGHMEM_H
+
+extern void flush_cache_kmaps(void);
+
+#endif
+
diff --git a/include/asm-xtensa/hw_irq.h b/include/asm-xtensa/hw_irq.h
new file mode 100644
index 000000000000..ccf436249eaa
--- /dev/null
+++ b/include/asm-xtensa/hw_irq.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-xtensa/hw_irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_HW_IRQ_H
+#define _XTENSA_HW_IRQ_H
+
+static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
+{
+}
+
+#endif
diff --git a/include/asm-xtensa/ide.h b/include/asm-xtensa/ide.h
new file mode 100644
index 000000000000..b523cd4a486e
--- /dev/null
+++ b/include/asm-xtensa/ide.h
@@ -0,0 +1,36 @@
+/*
+ * include/asm-xtensa/ide.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1996 Linus Torvalds & authors
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IDE_H
+#define _XTENSA_IDE_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+#ifndef MAX_HWIFS
+# define MAX_HWIFS 1
+#endif
+
+static __inline__ int ide_default_irq(unsigned long base)
+{
+ /* Unsupported! */
+ return 0;
+}
+
+static __inline__ unsigned long ide_default_io_base(int index)
+{
+ /* Unsupported! */
+ return 0;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_IDE_H */
diff --git a/include/asm-xtensa/io.h b/include/asm-xtensa/io.h
new file mode 100644
index 000000000000..2c471c42ecfc
--- /dev/null
+++ b/include/asm-xtensa/io.h
@@ -0,0 +1,197 @@
+/*
+ * linux/include/asm-xtensa/io.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IO_H
+#define _XTENSA_IO_H
+
+#ifdef __KERNEL__
+#include <linux/config.h>
+#include <asm/byteorder.h>
+
+#include <linux/types.h>
+#include <asm/fixmap.h>
+
+#define _IO_BASE 0
+
+
+/*
+ * swap functions to change byte order from little-endian to big-endian and
+ * vice versa.
+ */
+
+static inline unsigned short _swapw (unsigned short v)
+{
+ return (v << 8) | (v >> 8);
+}
+
+static inline unsigned int _swapl (unsigned int v)
+{
+ return (v << 24) | ((v & 0xff00) << 8) | ((v >> 8) & 0xff00) | (v >> 24);
+}
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are trivial on the 1:1 Linux/Xtensa mapping
+ */
+
+extern inline unsigned long virt_to_phys(volatile void * address)
+{
+ return PHYSADDR((unsigned long)address);
+}
+
+extern inline void * phys_to_virt(unsigned long address)
+{
+ return (void*) CACHED_ADDR(address);
+}
+
+/*
+ * IO bus memory addresses are also 1:1 with the physical address
+ */
+
+extern inline unsigned long virt_to_bus(volatile void * address)
+{
+ return PHYSADDR((unsigned long)address);
+}
+
+extern inline void * bus_to_virt (unsigned long address)
+{
+ return (void *) CACHED_ADDR(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+
+extern inline void *ioremap(unsigned long offset, unsigned long size)
+{
+ return (void *) CACHED_ADDR_IO(offset);
+}
+
+extern inline void *ioremap_nocache(unsigned long offset, unsigned long size)
+{
+ return (void *) BYPASS_ADDR_IO(offset);
+}
+
+extern inline void iounmap(void *addr)
+{
+}
+
+/*
+ * Generic I/O
+ */
+
+#define readb(addr) \
+ ({ unsigned char __v = (*(volatile unsigned char *)(addr)); __v; })
+#define readw(addr) \
+ ({ unsigned short __v = (*(volatile unsigned short *)(addr)); __v; })
+#define readl(addr) \
+ ({ unsigned int __v = (*(volatile unsigned int *)(addr)); __v; })
+#define writeb(b, addr) (void)((*(volatile unsigned char *)(addr)) = (b))
+#define writew(b, addr) (void)((*(volatile unsigned short *)(addr)) = (b))
+#define writel(b, addr) (void)((*(volatile unsigned int *)(addr)) = (b))
+
+static inline __u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u8 *)(addr);
+}
+static inline __u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u16 *)(addr);
+}
+static inline __u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u32 *)(addr);
+}
+static inline void __raw_writeb(__u8 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u8 *)(addr) = b;
+}
+static inline void __raw_writew(__u16 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u16 *)(addr) = b;
+}
+static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u32 *)(addr) = b;
+}
+
+
+
+
+/* These are the definitions for the x86 IO instructions
+ * inb/inw/inl/outb/outw/outl, the "string" versions
+ * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
+ * inb_p/inw_p/...
+ * The macros don't do byte-swapping.
+ */
+
+#define inb(port) readb((u8 *)((port)+_IO_BASE))
+#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)+_IO_BASE))
+#define inw(port) readw((u16 *)((port)+_IO_BASE))
+#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)+_IO_BASE))
+#define inl(port) readl((u32 *)((port)+_IO_BASE))
+#define outl(val, port) writel((val),(u32 *)((unsigned long)(port)))
+
+#define inb_p(port) inb((port))
+#define outb_p(val, port) outb((val), (port))
+#define inw_p(port) inw((port))
+#define outw_p(val, port) outw((val), (port))
+#define inl_p(port) inl((port))
+#define outl_p(val, port) outl((val), (port))
+
+extern void insb (unsigned long port, void *dst, unsigned long count);
+extern void insw (unsigned long port, void *dst, unsigned long count);
+extern void insl (unsigned long port, void *dst, unsigned long count);
+extern void outsb (unsigned long port, const void *src, unsigned long count);
+extern void outsw (unsigned long port, const void *src, unsigned long count);
+extern void outsl (unsigned long port, const void *src, unsigned long count);
+
+#define IO_SPACE_LIMIT ~0
+
+#define memset_io(a,b,c) memset((void *)(a),(b),(c))
+#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
+#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+
+/* At this point the Xtensa doesn't provide byte swap instructions */
+
+#ifdef __XTENSA_EB__
+# define in_8(addr) (*(u8*)(addr))
+# define in_le16(addr) _swapw(*(u16*)(addr))
+# define in_le32(addr) _swapl(*(u32*)(addr))
+# define out_8(b, addr) *(u8*)(addr) = (b)
+# define out_le16(b, addr) *(u16*)(addr) = _swapw(b)
+# define out_le32(b, addr) *(u32*)(addr) = _swapl(b)
+#elif defined(__XTENSA_EL__)
+# define in_8(addr) (*(u8*)(addr))
+# define in_le16(addr) (*(u16*)(addr))
+# define in_le32(addr) (*(u32*)(addr))
+# define out_8(b, addr) *(u8*)(addr) = (b)
+# define out_le16(b, addr) *(u16*)(addr) = (b)
+# define out_le32(b, addr) *(u32*)(addr) = (b)
+#else
+# error processor byte order undefined!
+#endif
+
+
+/*
+ * * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * * access
+ * */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * * Convert a virtual cached pointer to an uncached pointer
+ * */
+#define xlate_dev_kmem_ptr(p) p
+
+
+#endif /* __KERNEL__ */
+
+#endif /* _XTENSA_IO_H */
diff --git a/include/asm-xtensa/ioctl.h b/include/asm-xtensa/ioctl.h
new file mode 100644
index 000000000000..856c605d62b1
--- /dev/null
+++ b/include/asm-xtensa/ioctl.h
@@ -0,0 +1,83 @@
+/*
+ * include/asm-xtensa/ioctl.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ *
+ * Derived from "include/asm-i386/ioctl.h"
+ */
+
+#ifndef _XTENSA_IOCTL_H
+#define _XTENSA_IOCTL_H
+
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms. The i386 ioctl numbering scheme doesn't really enforce
+ * a type field. De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here. Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+#define _IOC_SIZEBITS 14
+#define _IOC_DIRBITS 2
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits.
+ */
+#define _IOC_NONE 0U
+#define _IOC_WRITE 1U
+#define _IOC_READ 2U
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif
diff --git a/include/asm-xtensa/ioctls.h b/include/asm-xtensa/ioctls.h
new file mode 100644
index 000000000000..10c443435c11
--- /dev/null
+++ b/include/asm-xtensa/ioctls.h
@@ -0,0 +1,112 @@
+/*
+ * include/asm-xtensa/ioctl.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 - 2005 Tensilica Inc.
+ *
+ * Derived from "include/asm-i386/ioctls.h"
+ */
+
+#ifndef _XTENSA_IOCTLS_H
+#define _XTENSA_IOCTLS_H
+
+#include <asm/ioctl.h>
+
+#define FIOCLEX _IO('f', 1)
+#define FIONCLEX _IO('f', 2)
+#define FIOASYNC _IOW('f', 125, int)
+#define FIONBIO _IOW('f', 126, int)
+#define FIONREAD _IOR('f', 127, int)
+#define TIOCINQ FIONREAD
+#define FIOQSIZE _IOR('f', 128, loff_t)
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+
+#define TCGETA _IOR('t', 23, struct termio)
+#define TCSETA _IOW('t', 24, struct termio)
+#define TCSETAW _IOW('t', 25, struct termio)
+#define TCSETAF _IOW('t', 28, struct termio)
+
+#define TCSBRK _IO('t', 29)
+#define TCXONC _IO('t', 30)
+#define TCFLSH _IO('t', 31)
+
+#define TIOCSWINSZ _IOW('t', 103, struct winsize)
+#define TIOCGWINSZ _IOR('t', 104, struct winsize)
+#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
+#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
+#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
+
+#define TIOCSPGRP _IOW('t', 118, int)
+#define TIOCGPGRP _IOR('t', 119, int)
+
+#define TIOCEXCL _IO('T', 12)
+#define TIOCNXCL _IO('T', 13)
+#define TIOCSCTTY _IO('T', 14)
+
+#define TIOCSTI _IOW('T', 18, char)
+#define TIOCMGET _IOR('T', 21, unsigned int)
+#define TIOCMBIS _IOW('T', 22, unsigned int)
+#define TIOCMBIC _IOW('T', 23, unsigned int)
+#define TIOCMSET _IOW('T', 24, unsigned int)
+# define TIOCM_LE 0x001
+# define TIOCM_DTR 0x002
+# define TIOCM_RTS 0x004
+# define TIOCM_ST 0x008
+# define TIOCM_SR 0x010
+# define TIOCM_CTS 0x020
+# define TIOCM_CAR 0x040
+# define TIOCM_RNG 0x080
+# define TIOCM_DSR 0x100
+# define TIOCM_CD TIOCM_CAR
+# define TIOCM_RI TIOCM_RNG
+
+#define TIOCGSOFTCAR _IOR('T', 25, unsigned int)
+#define TIOCSSOFTCAR _IOW('T', 26, unsigned int)
+#define TIOCLINUX _IOW('T', 28, char)
+#define TIOCCONS _IO('T', 29)
+#define TIOCGSERIAL _IOR('T', 30, struct serial_struct)
+#define TIOCSSERIAL _IOW('T', 31, struct serial_struct)
+#define TIOCPKT _IOW('T', 32, int)
+# define TIOCPKT_DATA 0
+# define TIOCPKT_FLUSHREAD 1
+# define TIOCPKT_FLUSHWRITE 2
+# define TIOCPKT_STOP 4
+# define TIOCPKT_START 8
+# define TIOCPKT_NOSTOP 16
+# define TIOCPKT_DOSTOP 32
+
+
+#define TIOCNOTTY _IO('T', 34)
+#define TIOCSETD _IOW('T', 35, int)
+#define TIOCGETD _IOR('T', 36, int)
+#define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/
+#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/
+#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
+#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
+#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
+#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
+
+#define TIOCSERCONFIG _IO('T', 83)
+#define TIOCSERGWILD _IOR('T', 84, int)
+#define TIOCSERSWILD _IOW('T', 85, int)
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */
+ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */
+#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */
+
+#define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT _IOR('T', 93, struct async_icount) /* read serial port inline interrupt counts */
+
+#endif /* _XTENSA_IOCTLS_H */
diff --git a/include/asm-xtensa/ipc.h b/include/asm-xtensa/ipc.h
new file mode 100644
index 000000000000..d37bdb4d4c9c
--- /dev/null
+++ b/include/asm-xtensa/ipc.h
@@ -0,0 +1,34 @@
+/*
+ * include/asm-xtensa/ipc.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IPC_H
+#define _XTENSA_IPC_H
+
+struct ipc_kludge {
+ struct msgbuf __user *msgp;
+ long msgtyp;
+};
+
+#define SEMOP 1
+#define SEMGET 2
+#define SEMCTL 3
+#define SEMTIMEDOP 4
+#define MSGSND 11
+#define MSGRCV 12
+#define MSGGET 13
+#define MSGCTL 14
+#define SHMAT 21
+#define SHMDT 22
+#define SHMGET 23
+#define SHMCTL 24
+
+#define IPCCALL(version,op) ((version)<<16 | (op))
+
+#endif /* _XTENSA_IPC_H */
diff --git a/include/asm-xtensa/ipcbuf.h b/include/asm-xtensa/ipcbuf.h
new file mode 100644
index 000000000000..c33aa6a42145
--- /dev/null
+++ b/include/asm-xtensa/ipcbuf.h
@@ -0,0 +1,37 @@
+/*
+ * include/asm-xtensa/ipcbuf.h
+ *
+ * The ipc64_perm structure for the Xtensa architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IPCBUF_H
+#define _XTENSA_IPCBUF_H
+
+/*
+ * Pad space is left for:
+ * - 32-bit mode_t and seq
+ * - 2 miscellaneous 32-bit values
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+struct ipc64_perm
+{
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+ unsigned long seq;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* _XTENSA_IPCBUF_H */
diff --git a/include/asm-xtensa/irq.h b/include/asm-xtensa/irq.h
new file mode 100644
index 000000000000..d984e955938f
--- /dev/null
+++ b/include/asm-xtensa/irq.h
@@ -0,0 +1,37 @@
+/*
+ * include/asm-xtensa/irq.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_IRQ_H
+#define _XTENSA_IRQ_H
+
+#include <linux/config.h>
+#include <asm/platform/hardware.h>
+
+#include <xtensa/config/core.h>
+
+#ifndef PLATFORM_NR_IRQS
+# define PLATFORM_NR_IRQS 0
+#endif
+#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
+#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS)
+
+static __inline__ int irq_canonicalize(int irq)
+{
+ return (irq);
+}
+
+struct irqaction;
+#if 0 // FIXME
+extern void disable_irq_nosync(unsigned int);
+extern void disable_irq(unsigned int);
+extern void enable_irq(unsigned int);
+#endif
+
+#endif /* _XTENSA_IRQ_H */
diff --git a/include/asm-xtensa/kmap_types.h b/include/asm-xtensa/kmap_types.h
new file mode 100644
index 000000000000..9e822d2e3bce
--- /dev/null
+++ b/include/asm-xtensa/kmap_types.h
@@ -0,0 +1,31 @@
+/*
+ * include/asm-xtensa/kmap_types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_KMAP_TYPES_H
+#define _XTENSA_KMAP_TYPES_H
+
+enum km_type {
+ KM_BOUNCE_READ,
+ KM_SKB_SUNRPC_DATA,
+ KM_SKB_DATA_SOFTIRQ,
+ KM_USER0,
+ KM_USER1,
+ KM_BIO_SRC_IRQ,
+ KM_BIO_DST_IRQ,
+ KM_PTE0,
+ KM_PTE1,
+ KM_IRQ0,
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+ KM_TYPE_NR
+};
+
+#endif /* _XTENSA_KMAP_TYPES_H */
diff --git a/include/asm-xtensa/linkage.h b/include/asm-xtensa/linkage.h
new file mode 100644
index 000000000000..bf2128a99d79
--- /dev/null
+++ b/include/asm-xtensa/linkage.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/linkage.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_LINKAGE_H
+#define _XTENSA_LINKAGE_H
+
+/* Nothing to do here ... */
+
+#endif /* _XTENSA_LINKAGE_H */
diff --git a/include/asm-xtensa/local.h b/include/asm-xtensa/local.h
new file mode 100644
index 000000000000..48723e550d14
--- /dev/null
+++ b/include/asm-xtensa/local.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/local.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_LOCAL_H
+#define _XTENSA_LOCAL_H
+
+#include <asm-generic/local.h>
+
+#endif /* _XTENSA_LOCAL_H */
diff --git a/include/asm-xtensa/mman.h b/include/asm-xtensa/mman.h
new file mode 100644
index 000000000000..9a95a45df996
--- /dev/null
+++ b/include/asm-xtensa/mman.h
@@ -0,0 +1,80 @@
+/*
+ * include/asm-xtensa/mman.h
+ *
+ * Xtensa Processor memory-manager definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 by Ralf Baechle
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMAN_H
+#define _XTENSA_MMAN_H
+
+/*
+ * Protections are chosen from these bits, OR'd together. The
+ * implementation does not necessarily support PROT_EXEC or PROT_WRITE
+ * without PROT_READ. The only guarantees are that no writing will be
+ * allowed without PROT_WRITE and no access will be allowed for PROT_NONE.
+ */
+
+#define PROT_NONE 0x0 /* page can not be accessed */
+#define PROT_READ 0x1 /* page can be read */
+#define PROT_WRITE 0x2 /* page can be written */
+#define PROT_EXEC 0x4 /* page can be executed */
+
+#define PROT_SEM 0x10 /* page may be used for atomic ops */
+#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end fo growsup vma */
+
+/*
+ * Flags for mmap
+ */
+#define MAP_SHARED 0x001 /* Share changes */
+#define MAP_PRIVATE 0x002 /* Changes are private */
+#define MAP_TYPE 0x00f /* Mask for type of mapping */
+#define MAP_FIXED 0x010 /* Interpret addr exactly */
+
+/* not used by linux, but here to make sure we don't clash with ABI defines */
+#define MAP_RENAME 0x020 /* Assign page to file */
+#define MAP_AUTOGROW 0x040 /* File may grow by writing */
+#define MAP_LOCAL 0x080 /* Copy on fork/sproc */
+#define MAP_AUTORSRV 0x100 /* Logical swap reserved on demand */
+
+/* These are linux-specific */
+#define MAP_NORESERVE 0x0400 /* don't check for reservations */
+#define MAP_ANONYMOUS 0x0800 /* don't use a file */
+#define MAP_GROWSDOWN 0x1000 /* stack-like segment */
+#define MAP_DENYWRITE 0x2000 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x4000 /* mark it as an executable */
+#define MAP_LOCKED 0x8000 /* pages are locked */
+#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x20000 /* do not block on IO */
+
+/*
+ * Flags for msync
+ */
+#define MS_ASYNC 0x0001 /* sync memory asynchronously */
+#define MS_INVALIDATE 0x0002 /* invalidate mappings & caches */
+#define MS_SYNC 0x0004 /* synchronous memory sync */
+
+/*
+ * Flags for mlockall
+ */
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
+
+#define MADV_NORMAL 0x0 /* default page-in behavior */
+#define MADV_RANDOM 0x1 /* page-in minimum required */
+#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
+#define MADV_WILLNEED 0x3 /* pre-fault pages */
+#define MADV_DONTNEED 0x4 /* discard these pages */
+
+/* compatibility flags */
+#define MAP_ANON MAP_ANONYMOUS
+#define MAP_FILE 0
+
+#endif /* _XTENSA_MMAN_H */
diff --git a/include/asm-xtensa/mmu.h b/include/asm-xtensa/mmu.h
new file mode 100644
index 000000000000..44c5bb04c55c
--- /dev/null
+++ b/include/asm-xtensa/mmu.h
@@ -0,0 +1,17 @@
+/*
+ * include/asm-xtensa/mmu.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMU_H
+#define _XTENSA_MMU_H
+
+/* Default "unsigned long" context */
+typedef unsigned long mm_context_t;
+
+#endif /* _XTENSA_MMU_H */
diff --git a/include/asm-xtensa/mmu_context.h b/include/asm-xtensa/mmu_context.h
new file mode 100644
index 000000000000..1b0801548cd9
--- /dev/null
+++ b/include/asm-xtensa/mmu_context.h
@@ -0,0 +1,330 @@
+/*
+ * include/asm-xtensa/mmu_context.h
+ *
+ * Switch an MMU context.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MMU_CONTEXT_H
+#define _XTENSA_MMU_CONTEXT_H
+
+#include <linux/config.h>
+#include <linux/stringify.h>
+
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Linux was ported to Xtensa assuming all auto-refill ways in set 0
+ * had the same properties (a very likely assumption). Multiple sets
+ * of auto-refill ways will still work properly, but not as optimally
+ * as the Xtensa designer may have assumed.
+ *
+ * We make this case a hard #error, killing the kernel build, to alert
+ * the developer to this condition (which is more likely an error).
+ * You super-duper clever developers can change it to a warning or
+ * remove it altogether if you think you know what you're doing. :)
+ */
+
+#if (XCHAL_HAVE_TLBS != 1)
+# error "Linux must have an MMU!"
+#endif
+
+#if ((XCHAL_ITLB_ARF_WAYS == 0) || (XCHAL_DTLB_ARF_WAYS == 0))
+# error "MMU must have auto-refill ways"
+#endif
+
+#if ((XCHAL_ITLB_ARF_SETS != 1) || (XCHAL_DTLB_ARF_SETS != 1))
+# error Linux may not use all auto-refill ways as efficiently as you think
+#endif
+
+#if (XCHAL_MMU_MAX_PTE_PAGE_SIZE != XCHAL_MMU_MIN_PTE_PAGE_SIZE)
+# error Only one page size allowed!
+#endif
+
+extern unsigned long asid_cache;
+extern pgd_t *current_pgd;
+
+/*
+ * Define the number of entries per auto-refill way in set 0 of both I and D
+ * TLBs. We deal only with set 0 here (an assumption further explained in
+ * assertions.h). Also, define the total number of ARF entries in both TLBs.
+ */
+
+#define ITLB_ENTRIES_PER_ARF_WAY (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES))
+#define DTLB_ENTRIES_PER_ARF_WAY (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES))
+
+#define ITLB_ENTRIES \
+ (ITLB_ENTRIES_PER_ARF_WAY * (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,WAYS)))
+#define DTLB_ENTRIES \
+ (DTLB_ENTRIES_PER_ARF_WAY * (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,WAYS)))
+
+
+/*
+ * SMALLEST_NTLB_ENTRIES is the smaller of ITLB_ENTRIES and DTLB_ENTRIES.
+ * In practice, they are probably equal. This macro simplifies function
+ * flush_tlb_range().
+ */
+
+#if (DTLB_ENTRIES < ITLB_ENTRIES)
+# define SMALLEST_NTLB_ENTRIES DTLB_ENTRIES
+#else
+# define SMALLEST_NTLB_ENTRIES ITLB_ENTRIES
+#endif
+
+
+/*
+ * asid_cache tracks only the ASID[USER_RING] field of the RASID special
+ * register, which is the current user-task asid allocation value.
+ * mm->context has the same meaning. When it comes time to write the
+ * asid_cache or mm->context values to the RASID special register, we first
+ * shift the value left by 8, then insert the value.
+ * ASID[0] always contains the kernel's asid value, and we reserve three
+ * other asid values that we never assign to user tasks.
+ */
+
+#define ASID_INC 0x1
+#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
+
+/*
+ * XCHAL_MMU_ASID_INVALID is a configurable Xtensa processor constant
+ * indicating invalid address space. XCHAL_MMU_ASID_KERNEL is a configurable
+ * Xtensa processor constant indicating the kernel address space. They can
+ * be arbitrary values.
+ *
+ * We identify three more unique, reserved ASID values to use in the unused
+ * ring positions. No other user process will be assigned these reserved
+ * ASID values.
+ *
+ * For example, given that
+ *
+ * XCHAL_MMU_ASID_INVALID == 0
+ * XCHAL_MMU_ASID_KERNEL == 1
+ *
+ * the following maze of #if statements would generate
+ *
+ * ASID_RESERVED_1 == 2
+ * ASID_RESERVED_2 == 3
+ * ASID_RESERVED_3 == 4
+ * ASID_FIRST_NONRESERVED == 5
+ */
+
+#if (XCHAL_MMU_ASID_INVALID != XCHAL_MMU_ASID_KERNEL + 1)
+# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 1) & ASID_MASK)
+#else
+# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 2) & ASID_MASK)
+#endif
+
+#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_1 + 1)
+# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 1) & ASID_MASK)
+#else
+# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 2) & ASID_MASK)
+#endif
+
+#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_2 + 1)
+# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 1) & ASID_MASK)
+#else
+# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 2) & ASID_MASK)
+#endif
+
+#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_3 + 1)
+# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 1) & ASID_MASK)
+#else
+# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 2) & ASID_MASK)
+#endif
+
+#define ASID_ALL_RESERVED ( ((ASID_RESERVED_1) << 24) + \
+ ((ASID_RESERVED_2) << 16) + \
+ ((ASID_RESERVED_3) << 8) + \
+ ((XCHAL_MMU_ASID_KERNEL)) )
+
+
+/*
+ * NO_CONTEXT is the invalid ASID value that we don't ever assign to
+ * any user or kernel context. NO_CONTEXT is a better mnemonic than
+ * XCHAL_MMU_ASID_INVALID, so we use it in code instead.
+ */
+
+#define NO_CONTEXT XCHAL_MMU_ASID_INVALID
+
+#if (KERNEL_RING != 0)
+# error The KERNEL_RING really should be zero.
+#endif
+
+#if (USER_RING >= XCHAL_MMU_RINGS)
+# error USER_RING cannot be greater than the highest numbered ring.
+#endif
+
+#if (USER_RING == KERNEL_RING)
+# error The user and kernel rings really should not be equal.
+#endif
+
+#if (USER_RING == 1)
+#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
+ ((ASID_RESERVED_2) << 16) + \
+ (((x) & (ASID_MASK)) << 8) + \
+ ((XCHAL_MMU_ASID_KERNEL)) )
+
+#elif (USER_RING == 2)
+#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
+ (((x) & (ASID_MASK)) << 16) + \
+ ((ASID_RESERVED_2) << 8) + \
+ ((XCHAL_MMU_ASID_KERNEL)) )
+
+#elif (USER_RING == 3)
+#define ASID_INSERT(x) ( (((x) & (ASID_MASK)) << 24) + \
+ ((ASID_RESERVED_1) << 16) + \
+ ((ASID_RESERVED_2) << 8) + \
+ ((XCHAL_MMU_ASID_KERNEL)) )
+
+#else
+#error Goofy value for USER_RING
+
+#endif /* USER_RING == 1 */
+
+
+/*
+ * All unused by hardware upper bits will be considered
+ * as a software asid extension.
+ */
+
+#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
+#define ASID_FIRST_VERSION \
+ ((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED)
+
+extern inline void set_rasid_register (unsigned long val)
+{
+ __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
+ " isync\n" : : "a" (val));
+}
+
+extern inline unsigned long get_rasid_register (void)
+{
+ unsigned long tmp;
+ __asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp));
+ return tmp;
+}
+
+
+#if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))
+
+extern inline void
+get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
+{
+ extern void flush_tlb_all(void);
+ if (! ((asid += ASID_INC) & ASID_MASK) ) {
+ flush_tlb_all(); /* start new asid cycle */
+ if (!asid) /* fix version if needed */
+ asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
+ asid += ASID_FIRST_NONRESERVED;
+ }
+ mm->context = asid_cache = asid;
+}
+
+#else
+#warning ASID_{INVALID,KERNEL} values impose non-optimal get_new_mmu_context implementation
+
+/* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
+ really the best, but if you insist... */
+
+extern inline int validate_asid (unsigned long asid)
+{
+ switch (asid) {
+ case XCHAL_MMU_ASID_INVALID:
+ case XCHAL_MMU_ASID_KERNEL:
+ case ASID_RESERVED_1:
+ case ASID_RESERVED_2:
+ case ASID_RESERVED_3:
+ return 0; /* can't use these values as ASIDs */
+ }
+ return 1; /* valid */
+}
+
+extern inline void
+get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
+{
+ extern void flush_tlb_all(void);
+ while (1) {
+ asid += ASID_INC;
+ if ( ! (asid & ASID_MASK) ) {
+ flush_tlb_all(); /* start new asid cycle */
+ if (!asid) /* fix version if needed */
+ asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
+ asid += ASID_FIRST_NONRESERVED;
+ break; /* no need to validate here */
+ }
+ if (validate_asid (asid & ASID_MASK))
+ break;
+ }
+ mm->context = asid_cache = asid;
+}
+
+#endif
+
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+
+extern inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context = NO_CONTEXT;
+ return 0;
+}
+
+extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long asid = asid_cache;
+
+ /* Check if our ASID is of an older version and thus invalid */
+
+ if ((next->context ^ asid) & ASID_VERSION_MASK)
+ get_new_mmu_context(next, asid);
+
+ set_rasid_register (ASID_INSERT(next->context));
+ invalidate_page_directory();
+}
+
+#define deactivate_mm(tsk, mm) do { } while(0)
+
+/*
+ * Destroy context related info for an mm_struct that is about
+ * to be put to rest.
+ */
+extern inline void destroy_context(struct mm_struct *mm)
+{
+ /* Nothing to do. */
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+extern inline void
+activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+ /* Unconditionally get a new ASID. */
+
+ get_new_mmu_context(next, asid_cache);
+ set_rasid_register (ASID_INSERT(next->context));
+ invalidate_page_directory();
+}
+
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+ /* Nothing to do. */
+
+}
+
+#endif /* _XTENSA_MMU_CONTEXT_H */
diff --git a/include/asm-xtensa/module.h b/include/asm-xtensa/module.h
new file mode 100644
index 000000000000..ffb25bfdf6a1
--- /dev/null
+++ b/include/asm-xtensa/module.h
@@ -0,0 +1,25 @@
+/*
+ * include/asm-xtensa/module.h
+ *
+ * This file contains the module code specific to the Xtensa architecture.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_MODULE_H
+#define _XTENSA_MODULE_H
+
+struct mod_arch_specific
+{
+ /* Module support is not completely implemented. */
+};
+
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+
+#endif /* _XTENSA_MODULE_H */
diff --git a/include/asm-xtensa/msgbuf.h b/include/asm-xtensa/msgbuf.h
new file mode 100644
index 000000000000..693c96755280
--- /dev/null
+++ b/include/asm-xtensa/msgbuf.h
@@ -0,0 +1,48 @@
+/*
+ * include/asm-xtensa/msgbuf.h
+ *
+ * The msqid64_ds structure for the Xtensa architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_MSGBUF_H
+#define _XTENSA_MSGBUF_H
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+#ifdef __XTENSA_EB__
+ unsigned int __unused1;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ unsigned int __unused2;
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ unsigned int __unused3;
+ __kernel_time_t msg_ctime; /* last change time */
+#elif defined(__XTENSA_EL__)
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ unsigned int __unused1;
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ unsigned int __unused2;
+ __kernel_time_t msg_ctime; /* last change time */
+ unsigned int __unused3;
+#else
+# error processor byte order undefined!
+#endif
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* _XTENSA_MSGBUF_H */
diff --git a/include/asm-xtensa/namei.h b/include/asm-xtensa/namei.h
new file mode 100644
index 000000000000..3fdff039d27d
--- /dev/null
+++ b/include/asm-xtensa/namei.h
@@ -0,0 +1,26 @@
+/*
+ * include/asm-xtensa/namei.h
+ *
+ * Included from linux/fs/namei.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_NAMEI_H
+#define _XTENSA_NAMEI_H
+
+#ifdef __KERNEL__
+
+/* This dummy routine maybe changed to something useful
+ * for /usr/gnemul/ emulation stuff.
+ * Look at asm-sparc/namei.h for details.
+ */
+
+#define __emul_prefix() NULL
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_NAMEI_H */
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h
new file mode 100644
index 000000000000..b495e5b5a942
--- /dev/null
+++ b/include/asm-xtensa/page.h
@@ -0,0 +1,133 @@
+/*
+ * linux/include/asm-xtensa/page.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PAGE_H
+#define _XTENSA_PAGE_H
+
+#ifdef __KERNEL__
+
+#include <asm/processor.h>
+#include <linux/config.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
+ */
+
+#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
+
+#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
+#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
+
+#ifdef __ASSEMBLY__
+
+#define __pgprot(x) (x)
+
+#else
+
+/*
+ * These are used to make use of C type-checking..
+ */
+
+typedef struct { unsigned long pte; } pte_t; /* page table entry */
+typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+/*
+ * Pure 2^n version of get_order
+ */
+
+extern __inline__ int get_order(unsigned long size)
+{
+ int order;
+#ifndef XCHAL_HAVE_NSU
+ unsigned long x1, x2, x4, x8, x16;
+
+ size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ x1 = size & 0xAAAAAAAA;
+ x2 = size & 0xCCCCCCCC;
+ x4 = size & 0xF0F0F0F0;
+ x8 = size & 0xFF00FF00;
+ x16 = size & 0xFFFF0000;
+ order = x2 ? 2 : 0;
+ order += (x16 != 0) * 16;
+ order += (x8 != 0) * 8;
+ order += (x4 != 0) * 4;
+ order += (x1 != 0);
+
+ return order;
+#else
+ size = (size - 1) >> PAGE_SHIFT;
+ asm ("nsau %0, %1" : "=r" (order) : "r" (size));
+ return 32 - order;
+#endif
+}
+
+
+struct page;
+extern void clear_page(void *page);
+extern void copy_page(void *to, void *from);
+
+/*
+ * If we have cache aliasing and writeback caches, we might have to do
+ * some extra work
+ */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+void clear_user_page(void *addr, unsigned long vaddr, struct page* page);
+void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page);
+#else
+# define clear_user_page(page,vaddr,pg) clear_page(page)
+# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+#endif
+
+/*
+ * This handles the memory map. We handle pages at
+ * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
+ * These macros are for conversion of kernel address, not user
+ * addresses.
+ */
+
+#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
+#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr)
+#ifndef CONFIG_DISCONTIGMEM
+# define pfn_to_page(pfn) (mem_map + (pfn))
+# define page_to_pfn(page) ((unsigned long)((page) - mem_map))
+#else
+# error CONFIG_DISCONTIGMEM not supported
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#define WANT_PAGE_VIRTUAL
+
+
+#endif /* __ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_PAGE_H */
diff --git a/include/asm-xtensa/page.h.n b/include/asm-xtensa/page.h.n
new file mode 100644
index 000000000000..546cc6624f24
--- /dev/null
+++ b/include/asm-xtensa/page.h.n
@@ -0,0 +1,135 @@
+/*
+ * linux/include/asm-xtensa/page.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PAGE_H
+#define _XTENSA_PAGE_H
+
+#ifdef __KERNEL__
+
+#include <asm/processor.h>
+#include <linux/config.h>
+
+/*
+ * PAGE_SHIFT determines the page size
+ * PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
+ */
+#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
+
+#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
+#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
+
+#ifdef __ASSEMBLY__
+
+#define __pgprot(x) (x)
+
+#else
+
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t; /* page table entry */
+typedef struct { unsigned long pmd; } pmd_t; /* PMD table entry */
+typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+/*
+ * Pure 2^n version of get_order
+ */
+extern __inline__ int get_order(unsigned long size)
+{
+ int order;
+#ifndef XCHAL_HAVE_NSU
+ unsigned long x1, x2, x4, x8, x16;
+
+ size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ x1 = size & 0xAAAAAAAA;
+ x2 = size & 0xCCCCCCCC;
+ x4 = size & 0xF0F0F0F0;
+ x8 = size & 0xFF00FF00;
+ x16 = size & 0xFFFF0000;
+ order = x2 ? 2 : 0;
+ order += (x16 != 0) * 16;
+ order += (x8 != 0) * 8;
+ order += (x4 != 0) * 4;
+ order += (x1 != 0);
+
+ return order;
+#else
+ size = (size - 1) >> PAGE_SHIFT;
+ asm ("nsau %0, %1" : "=r" (order) : "r" (size));
+ return 32 - order;
+#endif
+}
+
+
+struct page;
+extern void clear_page(void *page);
+extern void copy_page(void *to, void *from);
+
+/*
+ * If we have cache aliasing and writeback caches, we might have to do
+ * some extra work
+ */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+void clear_user_page(void *addr, unsigned long vaddr, struct page* page);
+void copy_user_page(void *to, void* from, unsigned long vaddr, struct page* page);
+#else
+# define clear_user_page(page,vaddr,pg) clear_page(page)
+# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+#endif
+
+
+/*
+ * This handles the memory map. We handle pages at
+ * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
+ * These macros are for conversion of kernel address, not user
+ * addresses.
+ */
+
+#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
+#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
+#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr)
+#ifndef CONFIG_DISCONTIGMEM
+# define pfn_to_page(pfn) (mem_map + (pfn))
+# define page_to_pfn(page) ((unsigned long)((page) - mem_map))
+#else
+# error CONFIG_DISCONTIGMEM not supported
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#define WANT_PAGE_VIRTUAL
+
+
+#endif /* __ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_PAGE_H */
diff --git a/include/asm-xtensa/param.h b/include/asm-xtensa/param.h
new file mode 100644
index 000000000000..c0eec8260b0e
--- /dev/null
+++ b/include/asm-xtensa/param.h
@@ -0,0 +1,34 @@
+/*
+ * include/asm-xtensa/param.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PARAM_H
+#define _XTENSA_PARAM_H
+
+#include <xtensa/config/core.h>
+
+#ifdef __KERNEL__
+# define HZ 100 /* internal timer frequency */
+# define USER_HZ 100 /* for user interfaces in "ticks" */
+# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
+#endif
+
+#define EXEC_PAGESIZE (1 << XCHAL_MMU_MIN_PTE_PAGE_SIZE)
+
+#ifndef NGROUPS
+#define NGROUPS 32
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif /* _XTENSA_PARAM_H */
diff --git a/include/asm-xtensa/pci-bridge.h b/include/asm-xtensa/pci-bridge.h
new file mode 100644
index 000000000000..00fcbd7c534a
--- /dev/null
+++ b/include/asm-xtensa/pci-bridge.h
@@ -0,0 +1,88 @@
+/*
+ * include/asm-xtensa/pci-bridge.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PCI_BRIDGE_H
+#define _XTENSA_PCI_BRIDGE_H
+
+#ifdef __KERNEL__
+
+struct device_node;
+struct pci_controller;
+
+/*
+ * pciauto_bus_scan() enumerates the pci space.
+ */
+
+extern int pciauto_bus_scan(struct pci_controller *, int);
+
+struct pci_space {
+ unsigned long start;
+ unsigned long end;
+ unsigned long base;
+};
+
+/*
+ * Structure of a PCI controller (host bridge)
+ */
+
+struct pci_controller {
+ int index; /* used for pci_controller_num */
+ struct pci_controller *next;
+ struct pci_bus *bus;
+ void *arch_data;
+
+ int first_busno;
+ int last_busno;
+
+ struct pci_ops *ops;
+ volatile unsigned int *cfg_addr;
+ volatile unsigned char *cfg_data;
+
+ /* Currently, we limit ourselves to 1 IO range and 3 mem
+ * ranges since the common pci_bus structure can't handle more
+ */
+ struct resource io_resource;
+ struct resource mem_resources[3];
+ int mem_resource_count;
+
+ /* Host bridge I/O and Memory space
+ * Used for BAR placement algorithms
+ */
+ struct pci_space io_space;
+ struct pci_space mem_space;
+
+ /* Return the interrupt number fo a device. */
+ int (*map_irq)(struct pci_dev*, u8, u8);
+
+};
+
+static inline void pcibios_init_resource(struct resource *res,
+ unsigned long start, unsigned long end, int flags, char *name)
+{
+ res->start = start;
+ res->end = end;
+ res->flags = flags;
+ res->name = name;
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+}
+
+
+/* These are used for config access before all the PCI probing has been done. */
+int early_read_config_byte(struct pci_controller*, int, int, int, u8*);
+int early_read_config_word(struct pci_controller*, int, int, int, u16*);
+int early_read_config_dword(struct pci_controller*, int, int, int, u32*);
+int early_write_config_byte(struct pci_controller*, int, int, int, u8);
+int early_write_config_word(struct pci_controller*, int, int, int, u16);
+int early_write_config_dword(struct pci_controller*, int, int, int, u32);
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_PCI_BRIDGE_H */
diff --git a/include/asm-xtensa/pci.h b/include/asm-xtensa/pci.h
new file mode 100644
index 000000000000..6817742301c2
--- /dev/null
+++ b/include/asm-xtensa/pci.h
@@ -0,0 +1,89 @@
+/*
+ * linux/include/asm-xtensa/pci.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PCI_H
+#define _XTENSA_PCI_H
+
+#ifdef __KERNEL__
+
+/* Can be used to override the logic in pci_scan_bus for skipping
+ * already-configured bus numbers - to be used for buggy BIOSes
+ * or architectures with incomplete PCI setup by the loader
+ */
+
+#define pcibios_assign_all_busses() 0
+
+extern struct pci_controller* pcibios_alloc_controller(void);
+
+extern inline void pcibios_set_master(struct pci_dev *dev)
+{
+ /* No special bus mastering setup handling */
+}
+
+extern inline void pcibios_penalize_isa_irq(int irq)
+{
+ /* We don't do dynamic PCI IRQ allocation */
+}
+
+/* Assume some values. (We should revise them, if necessary) */
+
+#define PCIBIOS_MIN_IO 0x2000
+#define PCIBIOS_MIN_MEM 0x10000000
+
+/* Dynamic DMA mapping stuff.
+ * Xtensa has everything mapped statically like x86.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <asm/scatterlist.h>
+#include <linux/string.h>
+#include <asm/io.h>
+
+struct pci_dev;
+
+/* The PCI address space does equal the physical memory address space.
+ * The networking and block device layers use this boolean for bounce buffer
+ * decisions.
+ */
+
+#define PCI_DMA_BUS_IS_PHYS (1)
+
+/* pci_unmap_{page,single} is a no-op, so */
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define pci_ubnmap_len(PTR, LEN_NAME) (0)
+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+
+/* We cannot access memory above 4GB */
+#define pci_dac_dma_supported(pci_dev, mask) (0)
+
+/* Map a range of PCI memory or I/O space for a device into user space */
+int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
+#define HAVE_PCI_MMAP 1
+
+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
+{
+}
+
+#endif /* __KERNEL__ */
+
+/* Implement the pci_ DMA API in terms of the generic device dma_ one */
+#include <asm-generic/pci-dma-compat.h>
+
+/* Generic PCI */
+#include <asm-generic/pci.h>
+
+#endif /* _XTENSA_PCI_H */
diff --git a/include/asm-xtensa/percpu.h b/include/asm-xtensa/percpu.h
new file mode 100644
index 000000000000..6d2bc2ada9d1
--- /dev/null
+++ b/include/asm-xtensa/percpu.h
@@ -0,0 +1,16 @@
+/*
+ * linux/include/asm-xtensa/percpu.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PERCPU__
+#define _XTENSA_PERCPU__
+
+#include <asm-generic/percpu.h>
+
+#endif /* _XTENSA_PERCPU__ */
diff --git a/include/asm-xtensa/pgalloc.h b/include/asm-xtensa/pgalloc.h
new file mode 100644
index 000000000000..734a8d060395
--- /dev/null
+++ b/include/asm-xtensa/pgalloc.h
@@ -0,0 +1,116 @@
+/*
+ * linux/include/asm-xtensa/pgalloc.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001-2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PGALLOC_H
+#define _XTENSA_PGALLOC_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/highmem.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+
+
+/* Cache aliasing:
+ *
+ * If the cache size for one way is greater than the page size, we have to
+ * deal with cache aliasing. The cache index is wider than the page size:
+ *
+ * |cache |
+ * |pgnum |page| virtual address
+ * |xxxxxX|zzzz|
+ * | | |
+ * \ / | |
+ * trans.| |
+ * / \ | |
+ * |yyyyyY|zzzz| physical address
+ *
+ * When the page number is translated to the physical page address, the lowest
+ * bit(s) (X) that are also part of the cache index are also translated (Y).
+ * If this translation changes this bit (X), the cache index is also afected,
+ * thus resulting in a different cache line than before.
+ * The kernel does not provide a mechanism to ensure that the page color
+ * (represented by this bit) remains the same when allocated or when pages
+ * are remapped. When user pages are mapped into kernel space, the color of
+ * the page might also change.
+ *
+ * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
+ * to temporarily map a patch so we can match the color.
+ */
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+# define PAGE_COLOR_MASK (PAGE_MASK & (DCACHE_WAY_SIZE-1))
+# define PAGE_COLOR(a) \
+ (((unsigned long)(a)&PAGE_COLOR_MASK) >> PAGE_SHIFT)
+# define PAGE_COLOR_EQ(a,b) \
+ ((((unsigned long)(a) ^ (unsigned long)(b)) & PAGE_COLOR_MASK) == 0)
+# define PAGE_COLOR_MAP0(v) \
+ (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK))
+# define PAGE_COLOR_MAP1(v) \
+ (VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK) + DCACHE_WAY_SIZE)
+#endif
+
+/*
+ * Allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+
+#define pgd_free(pgd) free_page((unsigned long)(pgd))
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte)
+{
+ pmd_val(*(pmdp)) = (unsigned long)(pte);
+ __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *page)
+{
+ pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page);
+ __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
+}
+
+
+
+#else
+
+# define pmd_populate_kernel(mm, pmdp, pte) \
+ (pmd_val(*(pmdp)) = (unsigned long)(pte))
+# define pmd_populate(mm, pmdp, page) \
+ (pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page))
+
+#endif
+
+static inline pgd_t*
+pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd;
+
+ pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGD_ORDER);
+
+ if (likely(pgd != NULL))
+ __flush_dcache_page((unsigned long)pgd);
+
+ return pgd;
+}
+
+extern pte_t* pte_alloc_one_kernel(struct mm_struct* mm, unsigned long addr);
+extern struct page* pte_alloc_one(struct mm_struct* mm, unsigned long addr);
+
+#define pte_free_kernel(pte) free_page((unsigned long)pte)
+#define pte_free(pte) __free_page(pte)
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_PGALLOC_H */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
new file mode 100644
index 000000000000..0bb6416ae266
--- /dev/null
+++ b/include/asm-xtensa/pgtable.h
@@ -0,0 +1,468 @@
+/*
+ * linux/include/asm-xtensa/page.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PGTABLE_H
+#define _XTENSA_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+#include <asm/page.h>
+
+/* Assertions. */
+
+#ifdef CONFIG_MMU
+
+
+#if (XCHAL_MMU_RINGS < 2)
+# error Linux build assumes at least 2 ring levels.
+#endif
+
+#if (XCHAL_MMU_CA_BITS != 4)
+# error We assume exactly four bits for CA.
+#endif
+
+#if (XCHAL_MMU_SR_BITS != 0)
+# error We have no room for SR bits.
+#endif
+
+/*
+ * Use the first min-wired way for mapping page-table pages.
+ * Page coloring requires a second min-wired way.
+ */
+
+#if (XCHAL_DTLB_MINWIRED_SETS == 0)
+# error Need a min-wired way for mapping page-table pages
+#endif
+
+#define DTLB_WAY_PGTABLE XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAY)
+
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+# if XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAYS) >= 2
+# define DTLB_WAY_DCACHE_ALIAS0 (DTLB_WAY_PGTABLE + 1)
+# define DTLB_WAY_DCACHE_ALIAS1 (DTLB_WAY_PGTABLE + 2)
+# else
+# error Page coloring requires its own wired dtlb way!
+# endif
+#endif
+
+#endif /* CONFIG_MMU */
+
+/*
+ * We only use two ring levels, user and kernel space.
+ */
+
+#define USER_RING 1 /* user ring level */
+#define KERNEL_RING 0 /* kernel ring level */
+
+/*
+ * The Xtensa architecture port of Linux has a two-level page table system,
+ * i.e. the logical three-level Linux page table layout are folded.
+ * Each task has the following memory page tables:
+ *
+ * PGD table (page directory), ie. 3rd-level page table:
+ * One page (4 kB) of 1024 (PTRS_PER_PGD) pointers to PTE tables
+ * (Architectures that don't have the PMD folded point to the PMD tables)
+ *
+ * The pointer to the PGD table for a given task can be retrieved from
+ * the task structure (struct task_struct*) t, e.g. current():
+ * (t->mm ? t->mm : t->active_mm)->pgd
+ *
+ * PMD tables (page middle-directory), ie. 2nd-level page tables:
+ * Absent for the Xtensa architecture (folded, PTRS_PER_PMD == 1).
+ *
+ * PTE tables (page table entry), ie. 1st-level page tables:
+ * One page (4 kB) of 1024 (PTRS_PER_PTE) PTEs with a special PTE
+ * invalid_pte_table for absent mappings.
+ *
+ * The individual pages are 4 kB big with special pages for the empty_zero_page.
+ */
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * Entries per page directory level: we use two-level, so
+ * we don't really have any PMD directory physically.
+ */
+#define PTRS_PER_PTE 1024
+#define PTRS_PER_PTE_SHIFT 10
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PGD 1024
+#define PGD_ORDER 0
+#define PMD_ORDER 0
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS XCHAL_SEG_MAPPABLE_VADDR
+#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
+
+/* virtual memory area. We keep a distance to other memory regions to be
+ * on the safe side. We also use this area for cache aliasing.
+ */
+
+// FIXME: virtual memory area must be configuration-dependent
+
+#define VMALLOC_START 0xC0000000
+#define VMALLOC_END 0xC7FF0000
+
+/* Xtensa Linux config PTE layout (when present):
+ * 31-12: PPN
+ * 11-6: Software
+ * 5-4: RING
+ * 3-0: CA
+ *
+ * Similar to the Alpha and MIPS ports, we need to keep track of the ref
+ * and mod bits in software. We have a software "you can read
+ * from this page" bit, and a hardware one which actually lets the
+ * process read from the page. On the same token we have a software
+ * writable bit and the real hardware one which actually lets the
+ * process write to the page.
+ *
+ * See further below for PTE layout for swapped-out pages.
+ */
+
+#define _PAGE_VALID (1<<0) /* hardware: page is accessible */
+#define _PAGE_WRENABLE (1<<1) /* hardware: page is writable */
+
+/* None of these cache modes include MP coherency: */
+#define _PAGE_NO_CACHE (0<<2) /* bypass, non-speculative */
+#if XCHAL_DCACHE_IS_WRITEBACK
+# define _PAGE_WRITEBACK (1<<2) /* write back */
+# define _PAGE_WRITETHRU (2<<2) /* write through */
+#else
+# define _PAGE_WRITEBACK (1<<2) /* assume write through */
+# define _PAGE_WRITETHRU (1<<2)
+#endif
+#define _PAGE_NOALLOC (3<<2) /* don't allocate cache,if not cached */
+#define _CACHE_MASK (3<<2)
+
+#define _PAGE_USER (1<<4) /* user access (ring=1) */
+#define _PAGE_KERNEL (0<<4) /* kernel access (ring=0) */
+
+/* Software */
+#define _PAGE_RW (1<<6) /* software: page writable */
+#define _PAGE_DIRTY (1<<7) /* software: page dirty */
+#define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */
+#define _PAGE_FILE (1<<9) /* nonlinear file mapping*/
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _CACHE_MASK | _PAGE_DIRTY)
+#define _PAGE_PRESENT ( _PAGE_VALID | _PAGE_WRITEBACK | _PAGE_ACCESSED)
+
+#ifdef CONFIG_MMU
+
+# define PAGE_NONE __pgprot(_PAGE_PRESENT)
+# define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_RW)
+# define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER)
+# define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER)
+# define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_KERNEL | _PAGE_WRENABLE)
+# define PAGE_INVALID __pgprot(_PAGE_USER)
+
+# if (DCACHE_WAY_SIZE > PAGE_SIZE)
+# define PAGE_DIRECTORY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_KERNEL)
+# else
+# define PAGE_DIRECTORY __pgprot(_PAGE_PRESENT | _PAGE_KERNEL)
+# endif
+
+#else /* no mmu */
+
+# define PAGE_NONE __pgprot(0)
+# define PAGE_SHARED __pgprot(0)
+# define PAGE_COPY __pgprot(0)
+# define PAGE_READONLY __pgprot(0)
+# define PAGE_KERNEL __pgprot(0)
+
+#endif
+
+/*
+ * On certain configurations of Xtensa MMUs (eg. the initial Linux config),
+ * the MMU can't do page protection for execute, and considers that the same as
+ * read. Also, write permissions may imply read permissions.
+ * What follows is the closest we can get by reasonable means..
+ * See linux/mm/mmap.c for protection_map[] array that uses these definitions.
+ */
+#define __P000 PAGE_NONE /* private --- */
+#define __P001 PAGE_READONLY /* private --r */
+#define __P010 PAGE_COPY /* private -w- */
+#define __P011 PAGE_COPY /* private -wr */
+#define __P100 PAGE_READONLY /* private x-- */
+#define __P101 PAGE_READONLY /* private x-r */
+#define __P110 PAGE_COPY /* private xw- */
+#define __P111 PAGE_COPY /* private xwr */
+
+#define __S000 PAGE_NONE /* shared --- */
+#define __S001 PAGE_READONLY /* shared --r */
+#define __S010 PAGE_SHARED /* shared -w- */
+#define __S011 PAGE_SHARED /* shared -wr */
+#define __S100 PAGE_READONLY /* shared x-- */
+#define __S101 PAGE_READONLY /* shared x-r */
+#define __S110 PAGE_SHARED /* shared xw- */
+#define __S111 PAGE_SHARED /* shared xwr */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd entry %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern unsigned long empty_zero_page[1024];
+
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)];
+
+/*
+ * The pmd contains the kernel virtual address of the pte page.
+ */
+#define pmd_page_kernel(pmd) ((unsigned long)(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) virt_to_page(pmd_val(pmd))
+
+/*
+ * The following only work if pte_present() is true.
+ */
+#define pte_none(pte) (!(pte_val(pte) ^ _PAGE_USER))
+#define pte_present(pte) (pte_val(pte) & _PAGE_VALID)
+#define pte_clear(mm,addr,ptep) \
+ do { update_pte(ptep, __pte(_PAGE_USER)); } while(0)
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK)
+#define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0)
+#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+
+/* Note: We use the _PAGE_USER bit to indicate write-protect kernel memory */
+
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_WRENABLE); return pte; }
+static inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; }
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pte_same(a,b) (pte_val(a) == pte_val(b))
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/*
+ * Certain architectures need to do special things when pte's
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+static inline void update_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+ __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (ptep));
+#endif
+}
+
+extern inline void
+set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+ update_pte(ptep, pteval);
+}
+
+
+extern inline void
+set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ *pmdp = pmdval;
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
+ __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
+#endif
+}
+
+
+static inline int
+ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_young(pte))
+ return 0;
+ update_pte(ptep, pte_mkold(pte));
+ return 1;
+}
+
+static inline int
+ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ if (!pte_dirty(pte))
+ return 0;
+ update_pte(ptep, pte_mkclean(pte));
+ return 1;
+}
+
+static inline pte_t
+ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ pte_clear(mm, addr, ptep);
+ return pte;
+}
+
+static inline void
+ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = *ptep;
+ update_pte(ptep, pte_wrprotect(pte));
+}
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_offset(mm,address) ((mm)->pgd + pgd_index(address))
+
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(dir,address) ((pmd_t*)(dir))
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir,addr) \
+ ((pte_t*) pmd_page_kernel(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir,addr) pte_offset_kernel((dir),(addr))
+#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir),(addr))
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+
+/*
+ * Encode and decode a swap entry.
+ * Each PTE in a process VM's page table is either:
+ * "present" -- valid and not swapped out, protection bits are meaningful;
+ * "not present" -- which further subdivides in these two cases:
+ * "none" -- no mapping at all; identified by pte_none(), set by pte_clear(
+ * "swapped out" -- the page is swapped out, and the SWP macros below
+ * are used to store swap file info in the PTE itself.
+ *
+ * In the Xtensa processor MMU, any PTE entries in user space (or anywhere
+ * in virtual memory that can map differently across address spaces)
+ * must have a correct ring value that represents the RASID field that
+ * is changed when switching address spaces. Eg. such PTE entries cannot
+ * be set to ring zero, because that can cause a (global) kernel ASID
+ * entry to be created in the TLBs (even with invalid cache attribute),
+ * potentially causing a multihit exception when going back to another
+ * address space that mapped the same virtual address at another ring.
+ *
+ * SO: we avoid using ring bits (_PAGE_RING_MASK) in "not present" PTEs.
+ * We also avoid using the _PAGE_VALID bit which must be zero for non-present
+ * pages.
+ *
+ * We end up with the following available bits: 1..3 and 7..31.
+ * We don't bother with 1..3 for now (we can use them later if needed),
+ * and chose to allocate 6 bits for SWP_TYPE and the remaining 19 bits
+ * for SWP_OFFSET. At least 5 bits are needed for SWP_TYPE, because it
+ * is currently implemented as an index into swap_info[MAX_SWAPFILES]
+ * and MAX_SWAPFILES is currently defined as 32 in <linux/swap.h>.
+ * However, for some reason all other architectures in the 2.4 kernel
+ * reserve either 6, 7, or 8 bits so I'll not detract from that for now. :)
+ * SWP_OFFSET is an offset into the swap file in page-size units, so
+ * with 4 kB pages, 19 bits supports a maximum swap file size of 2 GB.
+ *
+ * FIXME: 2 GB isn't very big. Other bits can be used to allow
+ * larger swap sizes. In the meantime, it appears relatively easy to get
+ * around the 2 GB limitation by simply using multiple swap files.
+ */
+
+#define __swp_type(entry) (((entry).val >> 7) & 0x3f)
+#define __swp_offset(entry) ((entry).val >> 13)
+#define __swp_entry(type,offs) ((swp_entry_t) {((type) << 7) | ((offs) << 13)})
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define PTE_FILE_MAX_BITS 29
+#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
+
+
+#endif /* !defined (__ASSEMBLY__) */
+
+
+#ifdef __ASSEMBLY__
+
+/* Assembly macro _PGD_INDEX is the same as C pgd_index(unsigned long),
+ * _PGD_OFFSET as C pgd_offset(struct mm_struct*, unsigned long),
+ * _PMD_OFFSET as C pmd_offset(pgd_t*, unsigned long)
+ * _PTE_OFFSET as C pte_offset(pmd_t*, unsigned long)
+ *
+ * Note: We require an additional temporary register which can be the same as
+ * the register that holds the address.
+ *
+ * ((pte_t*) ((unsigned long)(pmd_val(*pmd) & PAGE_MASK)) + pte_index(addr))
+ *
+ */
+#define _PGD_INDEX(rt,rs) extui rt, rs, PGDIR_SHIFT, 32-PGDIR_SHIFT
+#define _PTE_INDEX(rt,rs) extui rt, rs, PAGE_SHIFT, PTRS_PER_PTE_SHIFT
+
+#define _PGD_OFFSET(mm,adr,tmp) l32i mm, mm, MM_PGD; \
+ _PGD_INDEX(tmp, adr); \
+ addx4 mm, tmp, mm
+
+#define _PTE_OFFSET(pmd,adr,tmp) _PTE_INDEX(tmp, adr); \
+ srli pmd, pmd, PAGE_SHIFT; \
+ slli pmd, pmd, PAGE_SHIFT; \
+ addx4 pmd, tmp, pmd
+
+#else
+
+extern void paging_init(void);
+
+#define kern_addr_valid(addr) (1)
+
+extern void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte);
+
+/*
+ * remap a physical address `phys' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_page_range(vma,from,phys,size,prot) \
+ remap_pfn_range(vma, from, (phys) >> PAGE_SHIFT, size, prot)
+
+
+/* No page table caches to init */
+
+#define pgtable_cache_init() do { } while (0)
+
+typedef pte_t *pte_addr_t;
+
+#endif /* !defined (__ASSEMBLY__) */
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#define __HAVE_ARCH_PTEP_MKDIRTY
+#define __HAVE_ARCH_PTE_SAME
+
+#include <asm-generic/pgtable.h>
+
+#endif /* _XTENSA_PGTABLE_H */
diff --git a/include/asm-xtensa/platform-iss/hardware.h b/include/asm-xtensa/platform-iss/hardware.h
new file mode 100644
index 000000000000..22240f001803
--- /dev/null
+++ b/include/asm-xtensa/platform-iss/hardware.h
@@ -0,0 +1,29 @@
+/*
+ * include/asm-xtensa/platform-iss/hardware.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 Tensilica Inc.
+ */
+
+/*
+ * This file contains the default configuration of ISS.
+ */
+
+#ifndef __ASM_XTENSA_ISS_HARDWARE
+#define __ASM_XTENSA_ISS_HARDWARE
+
+/*
+ * Memory configuration.
+ */
+
+#define PLATFORM_DEFAULT_MEM_START XSHAL_RAM_PADDR
+#define PLATFORM_DEFAULT_MEM_SIZE XSHAL_RAM_VSIZE
+
+/*
+ * Interrupt configuration.
+ */
+
+#endif /* __ASM_XTENSA_ISS_HARDWARE */
diff --git a/include/asm-xtensa/platform.h b/include/asm-xtensa/platform.h
new file mode 100644
index 000000000000..36163894bc20
--- /dev/null
+++ b/include/asm-xtensa/platform.h
@@ -0,0 +1,92 @@
+/*
+ * include/asm-xtensa/platform.h
+ *
+ * Platform specific functions
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PLATFORM_H
+#define _XTENSA_PLATFORM_H
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include <asm/bootparam.h>
+
+/*
+ * platform_init is called before the mmu is initialized to give the
+ * platform a early hook-up. bp_tag_t is a list of configuration tags
+ * passed from the boot-loader.
+ */
+extern void platform_init(bp_tag_t*);
+
+/*
+ * platform_setup is called from setup_arch with a pointer to the command-line
+ * string.
+ */
+extern void platform_setup (char **);
+
+/*
+ * platform_init_irq is called from init_IRQ.
+ */
+extern void platform_init_irq (void);
+
+/*
+ * platform_restart is called to restart the system.
+ */
+extern void platform_restart (void);
+
+/*
+ * platform_halt is called to stop the system and halt.
+ */
+extern void platform_halt (void);
+
+/*
+ * platform_power_off is called to stop the system and power it off.
+ */
+extern void platform_power_off (void);
+
+/*
+ * platform_idle is called from the idle function.
+ */
+extern void platform_idle (void);
+
+/*
+ * platform_heartbeat is called every HZ
+ */
+extern void platform_heartbeat (void);
+
+/*
+ * platform_pcibios_init is called to allow the platform to setup the pci bus.
+ */
+extern void platform_pcibios_init (void);
+
+/*
+ * platform_pcibios_fixup allows to modify the PCI configuration.
+ */
+extern int platform_pcibios_fixup (void);
+
+/*
+ * platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
+ */
+extern void platform_calibrate_ccount (void);
+
+/*
+ * platform_get_rtc_time returns RTC seconds (returns 0 for no error)
+ */
+extern int platform_get_rtc_time(time_t*);
+
+/*
+ * platform_set_rtc_time set RTC seconds (returns 0 for no error)
+ */
+extern int platform_set_rtc_time(time_t);
+
+
+#endif /* _XTENSA_PLATFORM_H */
+
diff --git a/include/asm-xtensa/poll.h b/include/asm-xtensa/poll.h
new file mode 100644
index 000000000000..dffe447534e0
--- /dev/null
+++ b/include/asm-xtensa/poll.h
@@ -0,0 +1,37 @@
+/*
+ * include/asm-xtensa/poll.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_POLL_H
+#define _XTENSA_POLL_H
+
+
+#define POLLIN 0x0001
+#define POLLPRI 0x0002
+#define POLLOUT 0x0004
+
+#define POLLERR 0x0008
+#define POLLHUP 0x0010
+#define POLLNVAL 0x0020
+
+#define POLLRDNORM 0x0040
+#define POLLRDBAND 0x0080
+#define POLLWRNORM POLLOUT
+#define POLLWRBAND 0x0100
+
+#define POLLMSG 0x0400
+#define POLLREMOVE 0x0800
+
+struct pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+#endif /* _XTENSA_POLL_H */
diff --git a/include/asm-xtensa/posix_types.h b/include/asm-xtensa/posix_types.h
new file mode 100644
index 000000000000..2c816b0e7762
--- /dev/null
+++ b/include/asm-xtensa/posix_types.h
@@ -0,0 +1,123 @@
+/*
+ * include/asm-xtensa/posix_types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Largely copied from include/asm-ppc/posix_types.h
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_POSIX_TYPES_H
+#define _XTENSA_POSIX_TYPES_H
+
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc. Also, we cannot
+ * assume GCC is being used.
+ */
+
+typedef unsigned long __kernel_ino_t;
+typedef unsigned int __kernel_mode_t;
+typedef unsigned short __kernel_nlink_t;
+typedef long __kernel_off_t;
+typedef int __kernel_pid_t;
+typedef unsigned short __kernel_ipc_pid_t;
+typedef unsigned int __kernel_uid_t;
+typedef unsigned int __kernel_gid_t;
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef long __kernel_ptrdiff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_suseconds_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef int __kernel_daddr_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+typedef unsigned int __kernel_uid32_t;
+typedef unsigned int __kernel_gid32_t;
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+typedef unsigned short __kernel_old_dev_t;
+
+#ifdef __GNUC__
+typedef long long __kernel_loff_t;
+#endif
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+#ifndef __GNUC__
+
+#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
+#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
+#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
+#define __FD_ZERO(set) \
+ ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
+
+#else /* __GNUC__ */
+
+#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) \
+ || (__GLIBC__ == 2 && __GLIBC_MINOR__ == 0)
+/* With GNU C, use inline functions instead so args are evaluated only once: */
+
+#undef __FD_SET
+static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
+}
+
+#undef __FD_CLR
+static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
+}
+
+#undef __FD_ISSET
+static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
+{
+ unsigned long _tmp = fd / __NFDBITS;
+ unsigned long _rem = fd % __NFDBITS;
+ return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef __FD_ZERO
+static __inline__ void __FD_ZERO(__kernel_fd_set *p)
+{
+ unsigned int *tmp = (unsigned int *)p->fds_bits;
+ int i;
+
+ if (__builtin_constant_p(__FDSET_LONGS)) {
+ switch (__FDSET_LONGS) {
+ case 8:
+ tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
+ tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
+ return;
+ }
+ }
+ i = __FDSET_LONGS;
+ while (i) {
+ i--;
+ *tmp = 0;
+ tmp++;
+ }
+}
+
+#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
+#endif /* __GNUC__ */
+#endif /* _XTENSA_POSIX_TYPES_H */
diff --git a/include/asm-xtensa/processor.h b/include/asm-xtensa/processor.h
new file mode 100644
index 000000000000..9cab5e4298b9
--- /dev/null
+++ b/include/asm-xtensa/processor.h
@@ -0,0 +1,205 @@
+/*
+ * include/asm-xtensa/processor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PROCESSOR_H
+#define _XTENSA_PROCESSOR_H
+
+#ifdef __ASSEMBLY__
+#define _ASMLANGUAGE
+#endif
+
+#include <xtensa/config/core.h>
+#include <xtensa/config/specreg.h>
+#include <xtensa/config/tie.h>
+#include <xtensa/config/system.h>
+
+#include <asm/ptrace.h>
+#include <asm/types.h>
+#include <asm/coprocessor.h>
+
+/* Assertions. */
+
+#if (XCHAL_HAVE_WINDOWED != 1)
+#error Linux requires the Xtensa Windowed Registers Option.
+#endif
+
+/*
+ * User space process size: 1 GB.
+ * Windowed call ABI requires caller and callee to be located within the same
+ * 1 GB region. The C compiler places trampoline code on the stack for sources
+ * that take the address of a nested C function (a feature used by glibc), so
+ * the 1 GB requirement applies to the stack as well.
+ */
+
+#define TASK_SIZE 0x40000000
+
+/*
+ * General exception cause assigned to debug exceptions. Debug exceptions go
+ * to their own vector, rather than the general exception vectors (user,
+ * kernel, double); and their specific causes are reported via DEBUGCAUSE
+ * rather than EXCCAUSE. However it is sometimes convenient to redirect debug
+ * exceptions to the general exception mechanism. To do this, an otherwise
+ * unused EXCCAUSE value was assigned to debug exceptions for this purpose.
+ */
+
+#define EXCCAUSE_MAPPED_DEBUG 63
+
+/*
+ * We use DEPC also as a flag to distinguish between double and regular
+ * exceptions. For performance reasons, DEPC might contain the value of
+ * EXCCAUSE for regular exceptions, so we use this definition to mark a
+ * valid double exception address.
+ * (Note: We use it in bgeui, so it should be 64, 128, or 256)
+ */
+
+#define VALID_DOUBLE_EXCEPTION_ADDRESS 64
+
+/* LOCKLEVEL defines the interrupt level that masks all
+ * general-purpose interrupts.
+ */
+#define LOCKLEVEL 1
+
+/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
+ * registers
+ */
+#define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */
+#define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */
+
+#ifndef __ASSEMBLY__
+
+/* Build a valid return address for the specified call winsize.
+ * winsize must be 1 (call4), 2 (call8), or 3 (call12)
+ */
+#define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
+
+/* Convert return address to a valid pc
+ * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ */
+#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct thread_struct {
+
+ /* kernel's return address and stack pointer for context switching */
+ unsigned long ra; /* kernel's a0: return address and window call size */
+ unsigned long sp; /* kernel's a1: stack pointer */
+
+ mm_segment_t current_ds; /* see uaccess.h for example uses */
+
+ /* struct xtensa_cpuinfo info; */
+
+ unsigned long bad_vaddr; /* last user fault */
+ unsigned long bad_uaddr; /* last kernel fault accessing user space */
+ unsigned long error_code;
+
+ unsigned long ibreak[XCHAL_NUM_IBREAK];
+ unsigned long dbreaka[XCHAL_NUM_DBREAK];
+ unsigned long dbreakc[XCHAL_NUM_DBREAK];
+
+ /* Allocate storage for extra state and coprocessor state. */
+ unsigned char cp_save[XTENSA_CP_EXTRA_SIZE]
+ __attribute__ ((aligned(XTENSA_CP_EXTRA_ALIGN)));
+
+ /* Make structure 16 bytes aligned. */
+ int align[0] __attribute__ ((aligned(16)));
+};
+
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l;})
+
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
+
+#define INIT_THREAD \
+{ \
+ ra: 0, \
+ sp: sizeof(init_stack) + (long) &init_stack, \
+ current_ds: {0}, \
+ /*info: {0}, */ \
+ bad_vaddr: 0, \
+ bad_uaddr: 0, \
+ error_code: 0, \
+}
+
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ * Note: We set-up ps as if we did a call4 to the new pc.
+ * set_thread_state in signal.c depends on it.
+ */
+#define USER_PS_VALUE ( (1 << XCHAL_PS_WOE_SHIFT) + \
+ (1 << XCHAL_PS_CALLINC_SHIFT) + \
+ (USER_RING << XCHAL_PS_RING_SHIFT) + \
+ (1 << XCHAL_PS_PROGSTACK_SHIFT) + \
+ (1 << XCHAL_PS_EXCM_SHIFT) )
+
+/* Clearing a0 terminates the backtrace. */
+#define start_thread(regs, new_pc, new_sp) \
+ regs->pc = new_pc; \
+ regs->ps = USER_PS_VALUE; \
+ regs->areg[1] = new_sp; \
+ regs->areg[0] = 0; \
+ regs->wmask = 1; \
+ regs->depc = 0; \
+ regs->windowbase = 0; \
+ regs->windowstart = 1;
+
+/* Forward declaration */
+struct task_struct;
+struct mm_struct;
+
+// FIXME: do we need release_thread for CP??
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while(0)
+
+// FIXME: do we need prepare_to_copy (lazy status) for CP??
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+/*
+ * create a kernel thread without removing it from tasklists
+ */
+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+/* Copy and release all segment info associated with a VM */
+
+#define copy_segments(p, mm) do { } while(0)
+#define release_segments(mm) do { } while(0)
+#define forget_segments() do { } while (0)
+
+#define thread_saved_pc(tsk) (xtensa_pt_regs(tsk)->pc)
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+#define KSTK_EIP(tsk) (xtensa_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk) (xtensa_pt_regs(tsk)->areg[1])
+
+#define cpu_relax() do { } while (0)
+
+/* Special register access. */
+
+#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v));
+#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v));
+
+#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
+#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_PROCESSOR_H */
diff --git a/include/asm-xtensa/ptrace.h b/include/asm-xtensa/ptrace.h
new file mode 100644
index 000000000000..2848a5ff8349
--- /dev/null
+++ b/include/asm-xtensa/ptrace.h
@@ -0,0 +1,135 @@
+/*
+ * include/asm-xtensa/ptrace.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_PTRACE_H
+#define _XTENSA_PTRACE_H
+
+#include <xtensa/config/core.h>
+
+/*
+ * Kernel stack
+ *
+ * +-----------------------+ -------- STACK_SIZE
+ * | register file | |
+ * +-----------------------+ |
+ * | struct pt_regs | |
+ * +-----------------------+ | ------ PT_REGS_OFFSET
+ * double : 16 bytes spill area : | ^
+ * excetion :- - - - - - - - - - - -: | |
+ * frame : struct pt_regs : | |
+ * :- - - - - - - - - - - -: | |
+ * | | | |
+ * | memory stack | | |
+ * | | | |
+ * ~ ~ ~ ~
+ * ~ ~ ~ ~
+ * | | | |
+ * | | | |
+ * +-----------------------+ | | --- STACK_BIAS
+ * | struct task_struct | | | ^
+ * current --> +-----------------------+ | | |
+ * | struct thread_info | | | |
+ * +-----------------------+ --------
+ */
+
+#define KERNEL_STACK_SIZE (2 * PAGE_SIZE)
+
+/* Offsets for exception_handlers[] (3 x 64-entries x 4-byte tables). */
+
+#define EXC_TABLE_KSTK 0x004 /* Kernel Stack */
+#define EXC_TABLE_DOUBLE_SAVE 0x008 /* Double exception save area for a0 */
+#define EXC_TABLE_FIXUP 0x00c /* Fixup handler */
+#define EXC_TABLE_PARAM 0x010 /* For passing a parameter to fixup */
+#define EXC_TABLE_SYSCALL_SAVE 0x014 /* For fast syscall handler */
+#define EXC_TABLE_FAST_USER 0x100 /* Fast user exception handler */
+#define EXC_TABLE_FAST_KERNEL 0x200 /* Fast kernel exception handler */
+#define EXC_TABLE_DEFAULT 0x300 /* Default C-Handler */
+#define EXC_TABLE_SIZE 0x400
+
+/* Registers used by strace */
+
+#define REG_A_BASE 0xfc000000
+#define REG_AR_BASE 0x04000000
+#define REG_PC 0x14000000
+#define REG_PS 0x080000e6
+#define REG_WB 0x08000048
+#define REG_WS 0x08000049
+#define REG_LBEG 0x08000000
+#define REG_LEND 0x08000001
+#define REG_LCOUNT 0x08000002
+#define REG_SAR 0x08000003
+#define REG_DEPC 0x080000c0
+#define REG_EXCCAUSE 0x080000e8
+#define REG_EXCVADDR 0x080000ee
+#define SYSCALL_NR 0x1
+
+#define AR_REGNO_TO_A_REGNO(ar, wb) (ar - wb*4) & ~(XCHAL_NUM_AREGS - 1)
+
+/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
+
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+#define PTRACE_GETFPREGSIZE 18
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored on the
+ * kernel stack during a system call or other kernel entry.
+ */
+struct pt_regs {
+ unsigned long pc; /* 4 */
+ unsigned long ps; /* 8 */
+ unsigned long depc; /* 12 */
+ unsigned long exccause; /* 16 */
+ unsigned long excvaddr; /* 20 */
+ unsigned long debugcause; /* 24 */
+ unsigned long wmask; /* 28 */
+ unsigned long lbeg; /* 32 */
+ unsigned long lend; /* 36 */
+ unsigned long lcount; /* 40 */
+ unsigned long sar; /* 44 */
+ unsigned long windowbase; /* 48 */
+ unsigned long windowstart; /* 52 */
+ unsigned long syscall; /* 56 */
+ int reserved[2]; /* 64 */
+
+ /* Make sure the areg field is 16 bytes aligned. */
+ int align[0] __attribute__ ((aligned(16)));
+
+ /* current register frame.
+ * Note: The ESF for kernel exceptions ends after 16 registers!
+ */
+ unsigned long areg[16]; /* 128 (64) */
+};
+
+#ifdef __KERNEL__
+# define xtensa_pt_regs(tsk) ((struct pt_regs*) \
+ (((long)(tsk)->thread_info + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4)) - 1)
+# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
+# define instruction_pointer(regs) ((regs)->pc)
+extern void show_regs(struct pt_regs *);
+
+# ifndef CONFIG_SMP
+# define profile_pc(regs) instruction_pointer(regs)
+# endif
+#endif /* __KERNEL__ */
+
+#else /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+# include <asm/offsets.h>
+#define PT_REGS_OFFSET (KERNEL_STACK_SIZE - PT_USER_SIZE)
+#endif
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _XTENSA_PTRACE_H */
diff --git a/include/asm-xtensa/resource.h b/include/asm-xtensa/resource.h
new file mode 100644
index 000000000000..17b5ab311771
--- /dev/null
+++ b/include/asm-xtensa/resource.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/resource.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_RESOURCE_H
+#define _XTENSA_RESOURCE_H
+
+#include <asm-generic/resource.h>
+
+#endif /* _XTENSA_RESOURCE_H */
diff --git a/include/asm-xtensa/rmap.h b/include/asm-xtensa/rmap.h
new file mode 100644
index 000000000000..649588b7e9ad
--- /dev/null
+++ b/include/asm-xtensa/rmap.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/rmap.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_RMAP_H
+#define _XTENSA_RMAP_H
+
+#include <asm-generic/rmap.h>
+
+#endif
diff --git a/include/asm-xtensa/rwsem.h b/include/asm-xtensa/rwsem.h
new file mode 100644
index 000000000000..3c02b0e033f0
--- /dev/null
+++ b/include/asm-xtensa/rwsem.h
@@ -0,0 +1,175 @@
+/*
+ * include/asm-xtensa/rwsem.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Largely copied from include/asm-ppc/rwsem.h
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_RWSEM_H
+#define _XTENSA_RWSEM_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+/*
+ * the semaphore definition
+ */
+struct rw_semaphore {
+ signed long count;
+#define RWSEM_UNLOCKED_VALUE 0x00000000
+#define RWSEM_ACTIVE_BIAS 0x00000001
+#define RWSEM_ACTIVE_MASK 0x0000ffff
+#define RWSEM_WAITING_BIAS (-0x00010000)
+#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
+#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#if RWSEM_DEBUG
+ int debug;
+#endif
+};
+
+/*
+ * initialisation
+ */
+#if RWSEM_DEBUG
+#define __RWSEM_DEBUG_INIT , 0
+#else
+#define __RWSEM_DEBUG_INIT /* */
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEBUG_INIT }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+static inline void init_rwsem(struct rw_semaphore *sem)
+{
+ sem->count = RWSEM_UNLOCKED_VALUE;
+ spin_lock_init(&sem->wait_lock);
+ INIT_LIST_HEAD(&sem->wait_list);
+#if RWSEM_DEBUG
+ sem->debug = 0;
+#endif
+}
+
+/*
+ * lock for reading
+ */
+static inline void __down_read(struct rw_semaphore *sem)
+{
+ if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
+ smp_wmb();
+ else
+ rwsem_down_read_failed(sem);
+}
+
+static inline int __down_read_trylock(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ while ((tmp = sem->count) >= 0) {
+ if (tmp == cmpxchg(&sem->count, tmp,
+ tmp + RWSEM_ACTIVE_READ_BIAS)) {
+ smp_wmb();
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * lock for writing
+ */
+static inline void __down_write(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count));
+ if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
+ smp_wmb();
+ else
+ rwsem_down_write_failed(sem);
+}
+
+static inline int __down_write_trylock(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+ RWSEM_ACTIVE_WRITE_BIAS);
+ smp_wmb();
+ return tmp == RWSEM_UNLOCKED_VALUE;
+}
+
+/*
+ * unlock after reading
+ */
+static inline void __up_read(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ smp_wmb();
+ tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
+ if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
+ rwsem_wake(sem);
+}
+
+/*
+ * unlock after writing
+ */
+static inline void __up_write(struct rw_semaphore *sem)
+{
+ smp_wmb();
+ if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
+ (atomic_t *)(&sem->count)) < 0)
+ rwsem_wake(sem);
+}
+
+/*
+ * implement atomic add functionality
+ */
+static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
+{
+ atomic_add(delta, (atomic_t *)(&sem->count));
+}
+
+/*
+ * downgrade write lock to read lock
+ */
+static inline void __downgrade_write(struct rw_semaphore *sem)
+{
+ int tmp;
+
+ smp_wmb();
+ tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
+ if (tmp < 0)
+ rwsem_downgrade_wake(sem);
+}
+
+/*
+ * implement exchange and add functionality
+ */
+static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
+{
+ smp_mb();
+ return atomic_add_return(delta, (atomic_t *)(&sem->count));
+}
+
+#endif /* _XTENSA_RWSEM_XADD_H */
diff --git a/include/asm-xtensa/scatterlist.h b/include/asm-xtensa/scatterlist.h
new file mode 100644
index 000000000000..38a2b9acd658
--- /dev/null
+++ b/include/asm-xtensa/scatterlist.h
@@ -0,0 +1,34 @@
+/*
+ * include/asm-xtensa/scatterlist.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SCATTERLIST_H
+#define _XTENSA_SCATTERLIST_H
+
+struct scatterlist {
+ struct page *page;
+ unsigned int offset;
+ dma_addr_t dma_address;
+ unsigned int length;
+};
+
+/*
+ * These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) ((sg)->dma_address)
+#define sg_dma_len(sg) ((sg)->length)
+
+
+#define ISA_DMA_THRESHOLD (~0UL)
+
+#endif /* _XTENSA_SCATTERLIST_H */
diff --git a/include/asm-xtensa/sections.h b/include/asm-xtensa/sections.h
new file mode 100644
index 000000000000..40b5191b55a2
--- /dev/null
+++ b/include/asm-xtensa/sections.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/sections.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SECTIONS_H
+#define _XTENSA_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+#endif /* _XTENSA_SECTIONS_H */
diff --git a/include/asm-xtensa/segment.h b/include/asm-xtensa/segment.h
new file mode 100644
index 000000000000..a2eb547a1a75
--- /dev/null
+++ b/include/asm-xtensa/segment.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/segment.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SEGMENT_H
+#define _XTENSA_SEGMENT_H
+
+#include <asm/uaccess.h>
+
+#endif /* _XTENSA_SEGEMENT_H */
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
new file mode 100644
index 000000000000..c8a7574a9a57
--- /dev/null
+++ b/include/asm-xtensa/semaphore.h
@@ -0,0 +1,129 @@
+/*
+ * linux/include/asm-xtensa/semaphore.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SEMAPHORE_H
+#define _XTENSA_SEMAPHORE_H
+
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+
+struct semaphore {
+ atomic_t count;
+ int sleepers;
+ wait_queue_head_t wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+#endif
+};
+
+#if WAITQUEUE_DEBUG
+# define __SEM_DEBUG_INIT(name) \
+ , (int)&(name).__magic
+#else
+# define __SEM_DEBUG_INIT(name)
+#endif
+
+#define __SEMAPHORE_INITIALIZER(name,count) \
+ { ATOMIC_INIT(count), \
+ 0, \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+ __SEM_DEBUG_INIT(name) }
+
+#define __MUTEX_INITIALIZER(name) \
+ __SEMAPHORE_INITIALIZER(name, 1)
+
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+extern inline void sema_init (struct semaphore *sem, int val)
+{
+/*
+ * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
+ *
+ * i'd rather use the more flexible initialization above, but sadly
+ * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
+ */
+ atomic_set(&sem->count, val);
+ init_waitqueue_head(&sem->wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (int)&sem->__magic;
+#endif
+}
+
+static inline void init_MUTEX (struct semaphore *sem)
+{
+ sema_init(sem, 1);
+}
+
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
+{
+ sema_init(sem, 0);
+}
+
+asmlinkage void __down(struct semaphore * sem);
+asmlinkage int __down_interruptible(struct semaphore * sem);
+asmlinkage int __down_trylock(struct semaphore * sem);
+asmlinkage void __up(struct semaphore * sem);
+
+extern spinlock_t semaphore_wake_lock;
+
+extern __inline__ void down(struct semaphore * sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ if (atomic_sub_return(1, &sem->count) < 0)
+ __down(sem);
+}
+
+extern __inline__ int down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ if (atomic_sub_return(1, &sem->count) < 0)
+ ret = __down_interruptible(sem);
+ return ret;
+}
+
+extern __inline__ int down_trylock(struct semaphore * sem)
+{
+ int ret = 0;
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ if (atomic_sub_return(1, &sem->count) < 0)
+ ret = __down_trylock(sem);
+ return ret;
+}
+
+/*
+ * Note! This is subtle. We jump to wake people up only if
+ * the semaphore was negative (== somebody was waiting on it).
+ */
+extern __inline__ void up(struct semaphore * sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ if (atomic_add_return(1, &sem->count) <= 0)
+ __up(sem);
+}
+
+#endif /* _XTENSA_SEMAPHORE_H */
diff --git a/include/asm-xtensa/sembuf.h b/include/asm-xtensa/sembuf.h
new file mode 100644
index 000000000000..2d26c47666fe
--- /dev/null
+++ b/include/asm-xtensa/sembuf.h
@@ -0,0 +1,44 @@
+/*
+ * include/asm-xtensa/sembuf.h
+ *
+ * The semid64_ds structure for Xtensa architecture.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ *
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ *
+ */
+
+#ifndef _XTENSA_SEMBUF_H
+#define _XTENSA_SEMBUF_H
+
+#include <asm/byteorder.h>
+
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+#if XCHAL_HAVE_LE
+ __kernel_time_t sem_otime; /* last semop time */
+ unsigned long __unused1;
+ __kernel_time_t sem_ctime; /* last change time */
+ unsigned long __unused2;
+#else
+ unsigned long __unused1;
+ __kernel_time_t sem_otime; /* last semop time */
+ unsigned long __unused2;
+ __kernel_time_t sem_ctime; /* last change time */
+#endif
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_XTENSA_SEMBUF_H */
diff --git a/include/asm-xtensa/serial.h b/include/asm-xtensa/serial.h
new file mode 100644
index 000000000000..ec04114fcf0b
--- /dev/null
+++ b/include/asm-xtensa/serial.h
@@ -0,0 +1,18 @@
+/*
+ * include/asm-xtensa/serial.h
+ *
+ * Configuration details for 8250, 16450, 16550, etc. serial ports
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SERIAL_H
+#define _XTENSA_SERIAL_H
+
+#include <asm/platform/serial.h>
+
+#endif /* _XTENSA_SERIAL_H */
diff --git a/include/asm-xtensa/setup.h b/include/asm-xtensa/setup.h
new file mode 100644
index 000000000000..e3636520d8cc
--- /dev/null
+++ b/include/asm-xtensa/setup.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/setup.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SETUP_H
+#define _XTENSA_SETUP_H
+
+#define COMMAND_LINE_SIZE 256
+
+#endif
diff --git a/include/asm-xtensa/shmbuf.h b/include/asm-xtensa/shmbuf.h
new file mode 100644
index 000000000000..a30b81a4b933
--- /dev/null
+++ b/include/asm-xtensa/shmbuf.h
@@ -0,0 +1,50 @@
+/*
+ * include/asm-xtensa/shmbuf.h
+ *
+ * The shmid64_ds structure for Xtensa architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SHMBUF_H
+#define _XTENSA_SHMBUF_H
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ unsigned long __unused1;
+ __kernel_time_t shm_dtime; /* last detach time */
+ unsigned long __unused2;
+ __kernel_time_t shm_ctime; /* last change time */
+ unsigned long __unused3;
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* _XTENSA_SHMBUF_H */
diff --git a/include/asm-xtensa/shmparam.h b/include/asm-xtensa/shmparam.h
new file mode 100644
index 000000000000..d3b65bfa71c3
--- /dev/null
+++ b/include/asm-xtensa/shmparam.h
@@ -0,0 +1,23 @@
+/*
+ * include/asm-xtensa/shmparam.h
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+
+#ifndef _XTENSA_SHMPARAM_H
+#define _XTENSA_SHMPARAM_H
+
+#include <asm/processor.h>
+
+/*
+ * Xtensa can have variable size caches, and if
+ * the size of single way is larger than the page size,
+ * then we have to start worrying about cache aliasing
+ * problems.
+ */
+
+#define SHMLBA ((PAGE_SIZE > DCACHE_WAY_SIZE)? PAGE_SIZE : DCACHE_WAY_SIZE)
+
+#endif /* _XTENSA_SHMPARAM_H */
diff --git a/include/asm-xtensa/sigcontext.h b/include/asm-xtensa/sigcontext.h
new file mode 100644
index 000000000000..a75177291418
--- /dev/null
+++ b/include/asm-xtensa/sigcontext.h
@@ -0,0 +1,44 @@
+/*
+ * include/asm-xtensa/sigcontext.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2003 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SIGCONTEXT_H
+#define _XTENSA_SIGCONTEXT_H
+
+#define _ASMLANGUAGE
+#include <asm/processor.h>
+#include <asm/coprocessor.h>
+
+
+struct _cpstate {
+ unsigned char _cpstate[XTENSA_CP_EXTRA_SIZE];
+} __attribute__ ((aligned (XTENSA_CP_EXTRA_ALIGN)));
+
+
+struct sigcontext {
+ unsigned long oldmask;
+
+ /* CPU registers */
+ unsigned long sc_pc;
+ unsigned long sc_ps;
+ unsigned long sc_wmask;
+ unsigned long sc_windowbase;
+ unsigned long sc_windowstart;
+ unsigned long sc_lbeg;
+ unsigned long sc_lend;
+ unsigned long sc_lcount;
+ unsigned long sc_sar;
+ unsigned long sc_depc;
+ unsigned long sc_dareg0;
+ unsigned long sc_treg[4];
+ unsigned long sc_areg[XCHAL_NUM_AREGS];
+ struct _cpstate *sc_cpstate;
+};
+
+#endif /* __ASM_XTENSA_SIGCONTEXT_H */
diff --git a/include/asm-xtensa/siginfo.h b/include/asm-xtensa/siginfo.h
new file mode 100644
index 000000000000..44f0ae77b539
--- /dev/null
+++ b/include/asm-xtensa/siginfo.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/processor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SIGINFO_H
+#define _XTENSA_SIGINFO_H
+
+#include <asm-generic/siginfo.h>
+
+#endif /* _XTENSA_SIGINFO_H */
diff --git a/include/asm-xtensa/signal.h b/include/asm-xtensa/signal.h
new file mode 100644
index 000000000000..5d6fc9cdf58d
--- /dev/null
+++ b/include/asm-xtensa/signal.h
@@ -0,0 +1,187 @@
+/*
+ * include/asm-xtensa/signal.h
+ *
+ * Swiped from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SIGNAL_H
+#define _XTENSA_SIGNAL_H
+
+
+#define _NSIG 64
+#define _NSIG_BPW 32
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/* Avoid too many header ordering problems. */
+struct siginfo;
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+#endif
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/* #define SIGLOST 29 */
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#define SIGRTMAX (_NSIG-1)
+
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
+
+#define SA_RESTORER 0x04000000
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
+#ifndef __ASSEMBLY__
+#ifdef __KERNEL__
+
+/*
+ * These values of sa_flags are used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * SA_INTERRUPT is also used by the irq handling routines.
+ * SA_SHIRQ is for shared interrupt support on PCI and EISA.
+ */
+#define SA_PROBE SA_ONESHOT
+#define SA_SAMPLE_RANDOM SA_RESTART
+#define SA_SHIRQ 0x04000000
+#endif
+
+#define SIG_BLOCK 0 /* for blocking signals */
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#define SIG_SETMASK 2 /* for setting the signal mask */
+
+/* Type of a signal handler. */
+typedef void (*__sighandler_t)(int);
+
+#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */
+
+#ifdef __KERNEL__
+struct old_sigaction {
+ __sighandler_t sa_handler;
+ old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+#else
+
+/* Here we must cater to libcs that poke about in kernel headers. */
+
+struct sigaction {
+ union {
+ __sighandler_t _sa_handler;
+ void (*_sa_sigaction)(int, struct siginfo *, void *);
+ } _u;
+ sigset_t sa_mask;
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+};
+
+#define sa_handler _u._sa_handler
+#define sa_sigaction _u._sa_sigaction
+
+#endif /* __KERNEL__ */
+
+typedef struct sigaltstack {
+ void *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+#include <asm/sigcontext.h>
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_SIGNAL_H */
diff --git a/include/asm-xtensa/smp.h b/include/asm-xtensa/smp.h
new file mode 100644
index 000000000000..83c569e3bdbd
--- /dev/null
+++ b/include/asm-xtensa/smp.h
@@ -0,0 +1,27 @@
+/*
+ * include/asm-xtensa/smp.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SMP_H
+#define _XTENSA_SMP_H
+
+extern struct xtensa_cpuinfo boot_cpu_data;
+
+#define cpu_data (&boot_cpu_data)
+#define current_cpu_data boot_cpu_data
+
+struct xtensa_cpuinfo {
+ unsigned long *pgd_cache;
+ unsigned long *pte_cache;
+ unsigned long pgtable_cache_sz;
+};
+
+#define cpu_logical_map(cpu) (cpu)
+
+#endif /* _XTENSA_SMP_H */
diff --git a/include/asm-xtensa/socket.h b/include/asm-xtensa/socket.h
new file mode 100644
index 000000000000..daccd05a14cd
--- /dev/null
+++ b/include/asm-xtensa/socket.h
@@ -0,0 +1,61 @@
+/*
+ * include/asm-xtensa/socket.h
+ *
+ * Copied from i386.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _XTENSA_SOCKET_H
+#define _XTENSA_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockoptions(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+#define SO_PASSCRED 16
+#define SO_PEERCRED 17
+#define SO_RCVLOWAT 18
+#define SO_SNDLOWAT 19
+#define SO_RCVTIMEO 20
+#define SO_SNDTIMEO 21
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+
+#define SO_PEERNAME 28
+#define SO_TIMESTAMP 29
+#define SCM_TIMESTAMP SO_TIMESTAMP
+
+#define SO_ACCEPTCONN 30
+#define SO_PEERSEC 31
+
+#endif /* _XTENSA_SOCKET_H */
diff --git a/include/asm-xtensa/sockios.h b/include/asm-xtensa/sockios.h
new file mode 100644
index 000000000000..20d2ba10ecd1
--- /dev/null
+++ b/include/asm-xtensa/sockios.h
@@ -0,0 +1,30 @@
+/*
+ * include/asm-xtensa/sockios.h
+ *
+ * Socket-level I/O control calls. Copied from MIPS.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 by Ralf Baechle
+ * Copyright (C) 2001 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SOCKIOS_H
+#define _XTENSA_SOCKIOS_H
+
+#include <asm/ioctl.h>
+
+/* Socket-level I/O control calls. */
+
+#define FIOGETOWN _IOR('f', 123, int)
+#define FIOSETOWN _IOW('f', 124, int)
+
+#define SIOCATMARK _IOR('s', 7, int)
+#define SIOCSPGRP _IOW('s', 8, pid_t)
+#define SIOCGPGRP _IOR('s', 9, pid_t)
+
+#define SIOCGSTAMP 0x8906 /* Get stamp - linux-specific */
+
+#endif /* _XTENSA_SOCKIOS_H */
diff --git a/include/asm-xtensa/spinlock.h b/include/asm-xtensa/spinlock.h
new file mode 100644
index 000000000000..8ff23649581b
--- /dev/null
+++ b/include/asm-xtensa/spinlock.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/spinlock.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SPINLOCK_H
+#define _XTENSA_SPINLOCK_H
+
+#include <linux/spinlock.h>
+
+#endif /* _XTENSA_SPINLOCK_H */
diff --git a/include/asm-xtensa/stat.h b/include/asm-xtensa/stat.h
new file mode 100644
index 000000000000..2f4662ff6c3a
--- /dev/null
+++ b/include/asm-xtensa/stat.h
@@ -0,0 +1,105 @@
+/*
+ * include/asm-xtensa/stat.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_STAT_H
+#define _XTENSA_STAT_H
+
+#include <linux/types.h>
+
+struct __old_kernel_stat {
+ unsigned short st_dev;
+ unsigned short st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_mtime;
+ unsigned long st_ctime;
+};
+
+#define STAT_HAVE_NSEC 1
+
+struct stat {
+ unsigned short st_dev;
+ unsigned short __pad1;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned short st_rdev;
+ unsigned short __pad2;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+/* This matches struct stat64 in glibc-2.2.3. */
+
+struct stat64 {
+#ifdef __XTENSA_EL__
+ unsigned short st_dev; /* Device */
+ unsigned char __pad0[10];
+#else
+ unsigned char __pad0[6];
+ unsigned short st_dev;
+ unsigned char __pad1[2];
+#endif
+
+#define STAT64_HAS_BROKEN_ST_INO 1
+ unsigned long __st_ino; /* 32bit file serial number. */
+
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+
+#ifdef __XTENSA_EL__
+ unsigned short st_rdev; /* Device number, if device. */
+ unsigned char __pad3[10];
+#else
+ unsigned char __pad2[6];
+ unsigned short st_rdev;
+ unsigned char __pad3[2];
+#endif
+
+ long long int st_size; /* Size of file, in bytes. */
+ long int st_blksize; /* Optimal block size for I/O. */
+
+#ifdef __XTENSA_EL__
+ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned long __pad4;
+#else
+ unsigned long __pad4;
+ unsigned long st_blocks;
+#endif
+
+ unsigned long __pad5;
+ long int st_atime; /* Time of last access. */
+ unsigned long st_atime_nsec;
+ long int st_mtime; /* Time of last modification. */
+ unsigned long st_mtime_nsec;
+ long int st_ctime; /* Time of last status change. */
+ unsigned long st_ctime_nsec;
+ unsigned long long int st_ino; /* File serial number. */
+};
+
+#endif /* _XTENSA_STAT_H */
diff --git a/include/asm-xtensa/statfs.h b/include/asm-xtensa/statfs.h
new file mode 100644
index 000000000000..9c3d1a213136
--- /dev/null
+++ b/include/asm-xtensa/statfs.h
@@ -0,0 +1,17 @@
+/*
+ * include/asm-xtensa/statfs.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_STATFS_H
+#define _XTENSA_STATFS_H
+
+#include <asm-generic/statfs.h>
+
+#endif /* _XTENSA_STATFS_H */
+
diff --git a/include/asm-xtensa/string.h b/include/asm-xtensa/string.h
new file mode 100644
index 000000000000..3f81b27d9809
--- /dev/null
+++ b/include/asm-xtensa/string.h
@@ -0,0 +1,124 @@
+/*
+ * include/asm-xtensa/string.h
+ *
+ * These trivial string functions are considered part of the public domain.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+/* We should optimize these. See arch/xtensa/lib/strncpy_user.S */
+
+#ifndef _XTENSA_STRING_H
+#define _XTENSA_STRING_H
+
+#define __HAVE_ARCH_STRCPY
+extern __inline__ char *strcpy(char *__dest, const char *__src)
+{
+ register char *__xdest = __dest;
+ unsigned long __dummy;
+
+ __asm__ __volatile__("1:\n\t"
+ "l8ui %2, %1, 0\n\t"
+ "s8i %2, %0, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "addi %0, %0, 1\n\t"
+ "bnez %2, 1b\n\t"
+ : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
+ : "0" (__dest), "1" (__src)
+ : "memory");
+
+ return __xdest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+extern __inline__ char *strncpy(char *__dest, const char *__src, size_t __n)
+{
+ register char *__xdest = __dest;
+ unsigned long __dummy;
+
+ if (__n == 0)
+ return __xdest;
+
+ __asm__ __volatile__(
+ "1:\n\t"
+ "l8ui %2, %1, 0\n\t"
+ "s8i %2, %0, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "bne %1, %5, 1b\n"
+ "2:"
+ : "=r" (__dest), "=r" (__src), "=&r" (__dummy)
+ : "0" (__dest), "1" (__src), "r" (__src+__n)
+ : "memory");
+
+ return __xdest;
+}
+
+#define __HAVE_ARCH_STRCMP
+extern __inline__ int strcmp(const char *__cs, const char *__ct)
+{
+ register int __res;
+ unsigned long __dummy;
+
+ __asm__ __volatile__(
+ "1:\n\t"
+ "l8ui %3, %1, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "l8ui %2, %0, 0\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "beq %2, %3, 1b\n"
+ "2:\n\t"
+ "sub %2, %3, %2"
+ : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
+ : "0" (__cs), "1" (__ct));
+
+ return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+extern __inline__ int strncmp(const char *__cs, const char *__ct, size_t __n)
+{
+ register int __res;
+ unsigned long __dummy;
+
+ __asm__ __volatile__(
+ "mov %2, %3\n"
+ "1:\n\t"
+ "beq %0, %6, 2f\n\t"
+ "l8ui %3, %1, 0\n\t"
+ "addi %1, %1, 1\n\t"
+ "l8ui %2, %0, 0\n\t"
+ "addi %0, %0, 1\n\t"
+ "beqz %2, 2f\n\t"
+ "beqz %3, 2f\n\t"
+ "beq %2, %3, 1b\n"
+ "2:\n\t"
+ "sub %2, %3, %2"
+ : "=r" (__cs), "=r" (__ct), "=&r" (__res), "=&r" (__dummy)
+ : "0" (__cs), "1" (__ct), "r" (__cs+__n));
+
+ return __res;
+}
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *__s, int __c, size_t __count);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+
+/* Don't build bcopy at all ... */
+#define __HAVE_ARCH_BCOPY
+
+#define __HAVE_ARCH_MEMSCAN
+#define memscan memchr
+
+#endif /* _XTENSA_STRING_H */
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h
new file mode 100644
index 000000000000..690fe325e671
--- /dev/null
+++ b/include/asm-xtensa/system.h
@@ -0,0 +1,252 @@
+/*
+ * include/asm-xtensa/system.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_SYSTEM_H
+#define _XTENSA_SYSTEM_H
+
+#include <linux/config.h>
+#include <linux/stringify.h>
+
+#include <asm/processor.h>
+
+/* interrupt control */
+
+#define local_save_flags(x) \
+ __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x));
+#define local_irq_restore(x) do { \
+ __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \
+ :: "a" (x) : "memory"); } while(0);
+#define local_irq_save(x) do { \
+ __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \
+ : "=a" (x) :: "memory");} while(0);
+
+static inline void local_irq_disable(void)
+{
+ unsigned long flags;
+ __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL)
+ : "=a" (flags) :: "memory");
+}
+static inline void local_irq_enable(void)
+{
+ unsigned long flags;
+ __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory");
+
+}
+
+static inline int irqs_disabled(void)
+{
+ unsigned long flags;
+ local_save_flags(flags);
+ return flags & 0xf;
+}
+
+#define RSR_CPENABLE(x) do { \
+ __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
+ } while(0);
+#define WSR_CPENABLE(x) do { \
+ __asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \
+ :: "a" (x));} while(0);
+
+#define clear_cpenable() __clear_cpenable()
+
+extern __inline__ void __clear_cpenable(void)
+{
+#if XCHAL_HAVE_CP
+ unsigned long i = 0;
+ WSR_CPENABLE(i);
+#endif
+}
+
+extern __inline__ void enable_coprocessor(int i)
+{
+#if XCHAL_HAVE_CP
+ int cp;
+ RSR_CPENABLE(cp);
+ cp |= 1 << i;
+ WSR_CPENABLE(cp);
+#endif
+}
+
+extern __inline__ void disable_coprocessor(int i)
+{
+#if XCHAL_HAVE_CP
+ int cp;
+ RSR_CPENABLE(cp);
+ cp &= ~(1 << i);
+ WSR_CPENABLE(cp);
+#endif
+}
+
+#define smp_read_barrier_depends() do { } while(0)
+#define read_barrier_depends() do { } while(0)
+
+#define mb() barrier()
+#define rmb() mb()
+#define wmb() mb()
+
+#ifdef CONFIG_SMP
+#error smp_* not defined
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#endif
+
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#if !defined (__ASSEMBLY__)
+
+/* * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ */
+extern void *_switch_to(void *last, void *next);
+
+#endif /* __ASSEMBLY__ */
+
+#define prepare_to_switch() do { } while(0)
+
+#define switch_to(prev,next,last) \
+do { \
+ clear_cpenable(); \
+ (last) = _switch_to(prev, next); \
+} while(0)
+
+/*
+ * cmpxchg
+ */
+
+extern __inline__ unsigned long
+__cmpxchg_u32(volatile int *p, int old, int new)
+{
+ __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
+ "l32i %0, %1, 0 \n\t"
+ "bne %0, %2, 1f \n\t"
+ "s32i %3, %1, 0 \n\t"
+ "1: \n\t"
+ "wsr a15, "__stringify(PS)" \n\t"
+ "rsync \n\t"
+ : "=&a" (old)
+ : "a" (p), "a" (old), "r" (new)
+ : "a15", "memory");
+ return old;
+}
+/* This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg(). */
+
+extern void __cmpxchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4: return __cmpxchg_u32(ptr, old, new);
+ default: __cmpxchg_called_with_bad_pointer();
+ return old;
+ }
+}
+
+#define cmpxchg(ptr,o,n) \
+ ({ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof (*(ptr))); \
+ })
+
+
+
+
+/*
+ * xchg_u32
+ *
+ * Note that a15 is used here because the register allocation
+ * done by the compiler is not guaranteed and a window overflow
+ * may not occur between the rsil and wsr instructions. By using
+ * a15 in the rsil, the machine is guaranteed to be in a state
+ * where no register reference will cause an overflow.
+ */
+
+extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t"
+ "l32i %0, %1, 0 \n\t"
+ "s32i %2, %1, 0 \n\t"
+ "wsr a15, "__stringify(PS)" \n\t"
+ "rsync \n\t"
+ : "=&a" (tmp)
+ : "a" (m), "a" (val)
+ : "a15", "memory");
+ return tmp;
+}
+
+#define tas(ptr) (xchg((ptr),1))
+
+#if ( __XCC__ == 1 )
+
+/* xt-xcc processes __inline__ differently than xt-gcc and decides to
+ * insert an out-of-line copy of function __xchg. This presents the
+ * unresolved symbol at link time of __xchg_called_with_bad_pointer,
+ * even though such a function would never be called at run-time.
+ * xt-gcc always inlines __xchg, and optimizes away the undefined
+ * bad_pointer function.
+ */
+
+#define xchg(ptr,x) xchg_u32(ptr,x)
+
+#else /* assume xt-gcc */
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+/*
+ * This only works if the compiler isn't horribly bad at optimizing.
+ * gcc-2.5.8 reportedly can't handle this, but I define that one to
+ * be dead anyway.
+ */
+
+extern void __xchg_called_with_bad_pointer(void);
+
+static __inline__ unsigned long
+__xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 4:
+ return xchg_u32(ptr, x);
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+#endif
+
+extern void set_except_vector(int n, void *addr);
+
+static inline void spill_registers(void)
+{
+ unsigned int a0, ps;
+
+ __asm__ __volatile__ (
+ "movi a14," __stringify (PS_EXCM_MASK) " | 1\n\t"
+ "mov a12, a0\n\t"
+ "rsr a13," __stringify(SAR) "\n\t"
+ "xsr a14," __stringify(PS) "\n\t"
+ "movi a0, _spill_registers\n\t"
+ "rsync\n\t"
+ "callx0 a0\n\t"
+ "mov a0, a12\n\t"
+ "wsr a13," __stringify(SAR) "\n\t"
+ "wsr a14," __stringify(PS) "\n\t"
+ :: "a" (&a0), "a" (&ps)
+ : "a2", "a3", "a12", "a13", "a14", "a15", "memory");
+}
+
+#define arch_align_stack(x) (x)
+
+#endif /* _XTENSA_SYSTEM_H */
diff --git a/include/asm-xtensa/termbits.h b/include/asm-xtensa/termbits.h
new file mode 100644
index 000000000000..c780593ff5f9
--- /dev/null
+++ b/include/asm-xtensa/termbits.h
@@ -0,0 +1,194 @@
+/*
+ * include/asm-xtensa/termbits.h
+ *
+ * Copied from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TERMBITS_H
+#define _XTENSA_TERMBITS_H
+
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+/* c_cc characters */
+
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+#define IUTF8 0040000
+
+/* c_oflag bits */
+
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate (not used) */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+
+/* tcflow() and TCXONC use these */
+
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* _XTENSA_TERMBITS_H */
diff --git a/include/asm-xtensa/termios.h b/include/asm-xtensa/termios.h
new file mode 100644
index 000000000000..83c6aed1d115
--- /dev/null
+++ b/include/asm-xtensa/termios.h
@@ -0,0 +1,122 @@
+/*
+ * include/asm-xtensa/termios.h
+ *
+ * Copied from SH.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TERMIOS_H
+#define _XTENSA_TERMIOS_H
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* Modem lines */
+
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+/* Line disciplines */
+
+#define N_TTY 0
+#define N_SLIP 1
+#define N_MOUSE 2
+#define N_PPP 3
+#define N_STRIP 4
+#define N_AX25 5
+#define N_X25 6 /* X.25 async */
+#define N_6PACK 7
+#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
+#define N_R3964 9 /* Reserved for Simatic R3964 module */
+#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
+#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */
+#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
+#define N_HDLC 13 /* synchronous HDLC */
+#define N_SYNC_PPP 14
+#define N_HCI 15 /* Bluetooth HCI UART */
+
+#ifdef __KERNEL__
+
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+
+#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
+ unsigned short __tmp; \
+ get_user(__tmp,&(termio)->x); \
+ *(unsigned short *) &(termios)->x = __tmp; \
+}
+
+#define user_termio_to_kernel_termios(termios, termio) \
+({ \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
+ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
+ copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
+})
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+
+#define kernel_termios_to_user_termio(termio, termios) \
+({ \
+ put_user((termios)->c_iflag, &(termio)->c_iflag); \
+ put_user((termios)->c_oflag, &(termio)->c_oflag); \
+ put_user((termios)->c_cflag, &(termio)->c_cflag); \
+ put_user((termios)->c_lflag, &(termio)->c_lflag); \
+ put_user((termios)->c_line, &(termio)->c_line); \
+ copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
+})
+
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* __KERNEL__ */
+
+#endif /* _XTENSA_TERMIOS_H */
diff --git a/include/asm-xtensa/thread_info.h b/include/asm-xtensa/thread_info.h
new file mode 100644
index 000000000000..af208d41fd82
--- /dev/null
+++ b/include/asm-xtensa/thread_info.h
@@ -0,0 +1,146 @@
+/*
+ * include/asm-xtensa/thread_info.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_THREAD_INFO_H
+#define _XTENSA_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+# include <asm/processor.h>
+#endif
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+
+#ifndef __ASSEMBLY__
+
+struct thread_info {
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ unsigned long flags; /* low level flags */
+ unsigned long status; /* thread-synchronous flags */
+ __u32 cpu; /* current CPU */
+ __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/
+
+ mm_segment_t addr_limit; /* thread address space */
+ struct restart_block restart_block;
+
+
+};
+
+#else /* !__ASSEMBLY__ */
+
+/* offsets into the thread_info struct for assembly code access */
+#define TI_TASK 0x00000000
+#define TI_EXEC_DOMAIN 0x00000004
+#define TI_FLAGS 0x00000008
+#define TI_STATUS 0x0000000C
+#define TI_CPU 0x00000010
+#define TI_PRE_COUNT 0x00000014
+#define TI_ADDR_LIMIT 0x00000018
+#define TI_RESTART_BLOCK 0x000001C
+
+#endif
+
+#define PREEMPT_ACTIVE 0x10000000
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+
+#ifndef __ASSEMBLY__
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = 1, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/* how to get the thread information struct from C */
+static inline struct thread_info *current_thread_info(void)
+{
+ struct thread_info *ti;
+ __asm__("extui %0,a1,0,13\n\t"
+ "xor %0, a1, %0" : "=&r" (ti) : );
+ return ti;
+}
+
+/* thread information allocation */
+#define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
+#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#else /* !__ASSEMBLY__ */
+
+/* how to get the thread information struct from ASM */
+#define GET_THREAD_INFO(reg,sp) \
+ extui reg, sp, 0, 13; \
+ xor reg, sp, reg
+#endif
+
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
+#define TIF_IRET 5 /* return with iret */
+#define TIF_MEMDIE 6
+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
+#define _TIF_IRET (1<<TIF_IRET)
+#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+
+#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
+#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
+
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
+
+#define THREAD_SIZE 8192 //(2*PAGE_SIZE)
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_THREAD_INFO */
diff --git a/include/asm-xtensa/timex.h b/include/asm-xtensa/timex.h
new file mode 100644
index 000000000000..d14a3755a12b
--- /dev/null
+++ b/include/asm-xtensa/timex.h
@@ -0,0 +1,94 @@
+/*
+ * include/asm-xtensa/timex.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TIMEX_H
+#define _XTENSA_TIMEX_H
+
+#ifdef __KERNEL__
+
+#include <asm/processor.h>
+#include <linux/stringify.h>
+
+#if XCHAL_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) == 1
+# define LINUX_TIMER 0
+#elif XCHAL_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) == 1
+# define LINUX_TIMER 1
+#elif XCHAL_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) == 1
+# define LINUX_TIMER 2
+#else
+# error "Bad timer number for Linux configurations!"
+#endif
+
+#define LINUX_TIMER_INT XCHAL_TIMER_INTERRUPT(LINUX_TIMER)
+#define LINUX_TIMER_MASK (1L << LINUX_TIMER_INT)
+
+#define CLOCK_TICK_RATE 1193180 /* (everyone is using this value) */
+#define CLOCK_TICK_FACTOR 20 /* Factor of both 10^6 and CLOCK_TICK_RATE */
+#define FINETUNE ((((((long)LATCH * HZ - CLOCK_TICK_RATE) << SHIFT_HZ) * \
+ (1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
+ << (SHIFT_SCALE-SHIFT_HZ)) / HZ)
+
+#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
+extern unsigned long ccount_per_jiffy;
+extern unsigned long ccount_nsec;
+#define CCOUNT_PER_JIFFY ccount_per_jiffy
+#define CCOUNT_NSEC ccount_nsec
+#else
+#define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ))
+#define CCOUNT_NSEC (1000000000UL / CONFIG_XTENSA_CPU_CLOCK)
+#endif
+
+
+typedef unsigned long long cycles_t;
+
+/*
+ * Only used for SMP.
+ */
+
+extern cycles_t cacheflush_time;
+
+#define get_cycles() (0)
+
+
+/*
+ * Register access.
+ */
+
+#define WSR_CCOUNT(r) __asm__("wsr %0,"__stringify(CCOUNT) :: "a" (r))
+#define RSR_CCOUNT(r) __asm__("rsr %0,"__stringify(CCOUNT) : "=a" (r))
+#define WSR_CCOMPARE(x,r) __asm__("wsr %0,"__stringify(CCOMPARE_0)"+"__stringify(x) :: "a"(r))
+#define RSR_CCOMPARE(x,r) __asm__("rsr %0,"__stringify(CCOMPARE_0)"+"__stringify(x) : "=a"(r))
+
+static inline unsigned long get_ccount (void)
+{
+ unsigned long ccount;
+ RSR_CCOUNT(ccount);
+ return ccount;
+}
+
+static inline void set_ccount (unsigned long ccount)
+{
+ WSR_CCOUNT(ccount);
+}
+
+static inline unsigned long get_linux_timer (void)
+{
+ unsigned ccompare;
+ RSR_CCOMPARE(LINUX_TIMER, ccompare);
+ return ccompare;
+}
+
+static inline void set_linux_timer (unsigned long ccompare)
+{
+ WSR_CCOMPARE(LINUX_TIMER, ccompare);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_TIMEX_H */
diff --git a/include/asm-xtensa/tlb.h b/include/asm-xtensa/tlb.h
new file mode 100644
index 000000000000..4562b2dcfbc0
--- /dev/null
+++ b/include/asm-xtensa/tlb.h
@@ -0,0 +1,25 @@
+/*
+ * include/asm-xtensa/tlb.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TLB_H
+#define _XTENSA_TLB_H
+
+#define tlb_start_vma(tlb,vma) do { } while (0)
+#define tlb_end_vma(tlb,vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb,pte,addr) do { } while (0)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#include <asm-generic/tlb.h>
+#include <asm/page.h>
+
+#define __pte_free_tlb(tlb,pte) pte_free(pte)
+
+#endif /* _XTENSA_TLB_H */
diff --git a/include/asm-xtensa/tlbflush.h b/include/asm-xtensa/tlbflush.h
new file mode 100644
index 000000000000..23bfe9db45f5
--- /dev/null
+++ b/include/asm-xtensa/tlbflush.h
@@ -0,0 +1,200 @@
+/*
+ * include/asm-xtensa/tlbflush.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TLBFLUSH_H
+#define _XTENSA_TLBFLUSH_H
+
+#define DEBUG_TLB
+
+#ifdef __KERNEL__
+
+#include <asm/processor.h>
+#include <linux/stringify.h>
+
+/* TLB flushing:
+ *
+ * - flush_tlb_all() flushes all processes TLB entries
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
+ * - flush_tlb_page(mm, vmaddr) flushes a single page
+ * - flush_tlb_range(mm, start, end) flushes a range of pages
+ */
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct*);
+extern void flush_tlb_page(struct vm_area_struct*,unsigned long);
+extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long);
+
+#define flush_tlb_kernel_range(start,end) flush_tlb_all()
+
+
+/* This is calld in munmap when we have freed up some page-table pages.
+ * We don't need to do anything here, there's nothing special about our
+ * page-table pages.
+ */
+
+extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+/* TLB operations. */
+
+#define ITLB_WAYS_LOG2 XCHAL_ITLB_WAY_BITS
+#define DTLB_WAYS_LOG2 XCHAL_DTLB_WAY_BITS
+#define ITLB_PROBE_SUCCESS (1 << ITLB_WAYS_LOG2)
+#define DTLB_PROBE_SUCCESS (1 << DTLB_WAYS_LOG2)
+
+extern inline unsigned long itlb_probe(unsigned long addr)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
+ return tmp;
+}
+
+extern inline unsigned long dtlb_probe(unsigned long addr)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (tmp) : "a" (addr));
+ return tmp;
+}
+
+extern inline void invalidate_itlb_entry (unsigned long probe)
+{
+ __asm__ __volatile__("iitlb %0; isync\n\t" : : "a" (probe));
+}
+
+extern inline void invalidate_dtlb_entry (unsigned long probe)
+{
+ __asm__ __volatile__("idtlb %0; dsync\n\t" : : "a" (probe));
+}
+
+/* Use the .._no_isync functions with caution. Generally, these are
+ * handy for bulk invalidates followed by a single 'isync'. The
+ * caller must follow up with an 'isync', which can be relatively
+ * expensive on some Xtensa implementations.
+ */
+extern inline void invalidate_itlb_entry_no_isync (unsigned entry)
+{
+ /* Caller must follow up with 'isync'. */
+ __asm__ __volatile__ ("iitlb %0\n" : : "a" (entry) );
+}
+
+extern inline void invalidate_dtlb_entry_no_isync (unsigned entry)
+{
+ /* Caller must follow up with 'isync'. */
+ __asm__ __volatile__ ("idtlb %0\n" : : "a" (entry) );
+}
+
+extern inline void set_itlbcfg_register (unsigned long val)
+{
+ __asm__ __volatile__("wsr %0, "__stringify(ITLBCFG)"\n\t" "isync\n\t"
+ : : "a" (val));
+}
+
+extern inline void set_dtlbcfg_register (unsigned long val)
+{
+ __asm__ __volatile__("wsr %0, "__stringify(DTLBCFG)"; dsync\n\t"
+ : : "a" (val));
+}
+
+extern inline void set_ptevaddr_register (unsigned long val)
+{
+ __asm__ __volatile__(" wsr %0, "__stringify(PTEVADDR)"; isync\n"
+ : : "a" (val));
+}
+
+extern inline unsigned long read_ptevaddr_register (void)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rsr %0, "__stringify(PTEVADDR)"\n\t" : "=a" (tmp));
+ return tmp;
+}
+
+extern inline void write_dtlb_entry (pte_t entry, int way)
+{
+ __asm__ __volatile__("wdtlb %1, %0; dsync\n\t"
+ : : "r" (way), "r" (entry) );
+}
+
+extern inline void write_itlb_entry (pte_t entry, int way)
+{
+ __asm__ __volatile__("witlb %1, %0; isync\n\t"
+ : : "r" (way), "r" (entry) );
+}
+
+extern inline void invalidate_page_directory (void)
+{
+ invalidate_dtlb_entry (DTLB_WAY_PGTABLE);
+}
+
+extern inline void invalidate_itlb_mapping (unsigned address)
+{
+ unsigned long tlb_entry;
+ while ((tlb_entry = itlb_probe (address)) & ITLB_PROBE_SUCCESS)
+ invalidate_itlb_entry (tlb_entry);
+}
+
+extern inline void invalidate_dtlb_mapping (unsigned address)
+{
+ unsigned long tlb_entry;
+ while ((tlb_entry = dtlb_probe (address)) & DTLB_PROBE_SUCCESS)
+ invalidate_dtlb_entry (tlb_entry);
+}
+
+#define check_pgt_cache() do { } while (0)
+
+
+#ifdef DEBUG_TLB
+
+/* DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
+ * ISA and exist only for test purposes..
+ * You may find it helpful for MMU debugging, however.
+ *
+ * 'at' is the unmodified input register
+ * 'as' is the output register, as follows (specific to the Linux config):
+ *
+ * as[31..12] contain the virtual address
+ * as[11..08] are meaningless
+ * as[07..00] contain the asid
+ */
+
+extern inline unsigned long read_dtlb_virtual (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rdtlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+extern inline unsigned long read_dtlb_translation (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("rdtlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+extern inline unsigned long read_itlb_virtual (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("ritlb0 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+extern inline unsigned long read_itlb_translation (int way)
+{
+ unsigned long tmp;
+ __asm__ __volatile__("ritlb1 %0, %1\n\t" : "=a" (tmp), "+a" (way));
+ return tmp;
+}
+
+#endif /* DEBUG_TLB */
+
+
+#endif /* __KERNEL__ */
+#endif /* _XTENSA_PGALLOC_H */
diff --git a/include/asm-xtensa/topology.h b/include/asm-xtensa/topology.h
new file mode 100644
index 000000000000..7309e38a0ccb
--- /dev/null
+++ b/include/asm-xtensa/topology.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/topology.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TOPOLOGY_H
+#define _XTENSA_TOPOLOGY_H
+
+#include <asm-generic/topology.h>
+
+#endif /* _XTENSA_TOPOLOGY_H */
diff --git a/include/asm-xtensa/types.h b/include/asm-xtensa/types.h
new file mode 100644
index 000000000000..ebac00469852
--- /dev/null
+++ b/include/asm-xtensa/types.h
@@ -0,0 +1,66 @@
+/*
+ * include/asm-xtensa/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_TYPES_H
+#define _XTENSA_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned short umode_t;
+
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+
+typedef __signed__ char s8;
+typedef unsigned char u8;
+
+typedef __signed__ short s16;
+typedef unsigned short u16;
+
+typedef __signed__ int s32;
+typedef unsigned int u32;
+
+typedef __signed__ long long s64;
+typedef unsigned long long u64;
+
+
+#define BITS_PER_LONG 32
+
+/* Dma addresses are 32-bits wide. */
+
+typedef u32 dma_addr_t;
+
+typedef unsigned int kmem_bufctl_t;
+
+#endif /* __KERNEL__ */
+#endif
+
+#endif /* _XTENSA_TYPES_H */
diff --git a/include/asm-xtensa/uaccess.h b/include/asm-xtensa/uaccess.h
new file mode 100644
index 000000000000..35576b25c7b2
--- /dev/null
+++ b/include/asm-xtensa/uaccess.h
@@ -0,0 +1,532 @@
+/*
+ * include/asm-xtensa/uaccess.h
+ *
+ * User space memory access functions
+ *
+ * These routines provide basic accessing functions to the user memory
+ * space for the kernel. This header file provides fuctions such as:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UACCESS_H
+#define _XTENSA_UACCESS_H
+
+#include <linux/errno.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+#ifdef __ASSEMBLY__
+
+#define _ASMLANGUAGE
+#include <asm/current.h>
+#include <asm/offsets.h>
+#include <asm/processor.h>
+
+/*
+ * These assembly macros mirror the C macros that follow below. They
+ * should always have identical functionality. See
+ * arch/xtensa/kernel/sys.S for usage.
+ */
+
+#define KERNEL_DS 0
+#define USER_DS 1
+
+#define get_ds (KERNEL_DS)
+
+/*
+ * get_fs reads current->thread.current_ds into a register.
+ * On Entry:
+ * <ad> anything
+ * <sp> stack
+ * On Exit:
+ * <ad> contains current->thread.current_ds
+ */
+ .macro get_fs ad, sp
+ GET_CURRENT(\ad,\sp)
+ l32i \ad, \ad, THREAD_CURRENT_DS
+ .endm
+
+/*
+ * set_fs sets current->thread.current_ds to some value.
+ * On Entry:
+ * <at> anything (temp register)
+ * <av> value to write
+ * <sp> stack
+ * On Exit:
+ * <at> destroyed (actually, current)
+ * <av> preserved, value to write
+ */
+ .macro set_fs at, av, sp
+ GET_CURRENT(\at,\sp)
+ s32i \av, \at, THREAD_CURRENT_DS
+ .endm
+
+/*
+ * kernel_ok determines whether we should bypass addr/size checking.
+ * See the equivalent C-macro version below for clarity.
+ * On success, kernel_ok branches to a label indicated by parameter
+ * <success>. This implies that the macro falls through to the next
+ * insruction on an error.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on error).
+ *
+ * On Entry:
+ * <at> anything (temp register)
+ * <success> label to branch to on success; implies
+ * fall-through macro on error
+ * <sp> stack pointer
+ * On Exit:
+ * <at> destroyed (actually, current->thread.current_ds)
+ */
+
+#if ((KERNEL_DS != 0) || (USER_DS == 0))
+# error Assembly macro kernel_ok fails
+#endif
+ .macro kernel_ok at, sp, success
+ get_fs \at, \sp
+ beqz \at, \success
+ .endm
+
+/*
+ * user_ok determines whether the access to user-space memory is allowed.
+ * See the equivalent C-macro version below for clarity.
+ *
+ * On error, user_ok branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that while this macro can be used independently, we designed
+ * in for optimal use in the access_ok macro below (i.e., we fall
+ * through on success).
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed (actually, (TASK_SIZE + 1 - size))
+ */
+ .macro user_ok aa, as, at, error
+ movi \at, (TASK_SIZE+1)
+ bgeu \as, \at, \error
+ sub \at, \at, \as
+ bgeu \aa, \at, \error
+ .endm
+
+/*
+ * access_ok determines whether a memory access is allowed. See the
+ * equivalent C-macro version below for clarity.
+ *
+ * On error, access_ok branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that we assume success is the common case, and we optimize the
+ * branch fall-through case on success.
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <sp>
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed
+ */
+ .macro access_ok aa, as, at, sp, error
+ kernel_ok \at, \sp, .Laccess_ok_\@
+ user_ok \aa, \as, \at, \error
+.Laccess_ok_\@:
+ .endm
+
+/*
+ * verify_area determines whether a memory access is allowed. It's
+ * mostly an unnecessary wrapper for access_ok, but we provide it as a
+ * duplicate of the verify_area() C inline function below. See the
+ * equivalent C version below for clarity.
+ *
+ * On error, verify_area branches to a label indicated by parameter
+ * <error>. This implies that the macro falls through to the next
+ * instruction on success.
+ *
+ * Note that we assume success is the common case, and we optimize the
+ * branch fall-through case on success.
+ *
+ * On Entry:
+ * <aa> register containing memory address
+ * <as> register containing memory size
+ * <at> temp register
+ * <error> label to branch to on error; implies fall-through
+ * macro on success
+ * On Exit:
+ * <aa> preserved
+ * <as> preserved
+ * <at> destroyed
+ */
+ .macro verify_area aa, as, at, sp, error
+ access_ok \at, \aa, \as, \sp, \error
+ .endm
+
+
+#else /* __ASSEMBLY__ not defined */
+
+#include <linux/sched.h>
+#include <asm/types.h>
+
+/*
+ * The fs value determines whether argument validity checking should
+ * be performed or not. If get_fs() == USER_DS, checking is
+ * performed, with get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons (Data Segment Register?), these macros are
+ * grossly misnamed.
+ */
+
+#define KERNEL_DS ((mm_segment_t) { 0 })
+#define USER_DS ((mm_segment_t) { 1 })
+
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current->thread.current_ds)
+#define set_fs(val) (current->thread.current_ds = (val))
+
+#define segment_eq(a,b) ((a).seg == (b).seg)
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
+#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
+#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
+
+extern inline int verify_area(int type, const void * addr, unsigned long size)
+{
+ return access_ok(type,addr,size) ? 0 : -EFAULT;
+}
+
+/*
+ * These are the main single-value transfer routines. They
+ * automatically use the right size if we just have the right pointer
+ * type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in
+ * "get_user()" and yet we don't want to do any pointers, because that
+ * is too much of a performance impact. Thus we have a few rather ugly
+ * macros here, and hide all the uglyness from the user.
+ *
+ * Careful to not
+ * (a) re-use the arguments for side effects (sizeof is ok)
+ * (b) require any knowledge of processes at this stage
+ */
+#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
+#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
+
+/*
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ */
+#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
+#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
+
+
+extern long __put_user_bad(void);
+
+#define __put_user_nocheck(x,ptr,size) \
+({ \
+ long __pu_err; \
+ __put_user_size((x),(ptr),(size),__pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_check(x,ptr,size) \
+({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) *__pu_addr = (ptr); \
+ if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
+ __put_user_size((x),__pu_addr,(size),__pu_err); \
+ __pu_err; \
+})
+
+#define __put_user_size(x,ptr,size,retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __put_user_asm(x,ptr,retval,1,"s8i"); break; \
+ case 2: __put_user_asm(x,ptr,retval,2,"s16i"); break; \
+ case 4: __put_user_asm(x,ptr,retval,4,"s32i"); break; \
+ case 8: { \
+ __typeof__(*ptr) __v64 = x; \
+ retval = __copy_to_user(ptr,&__v64,8); \
+ break; \
+ } \
+ default: __put_user_bad(); \
+ } \
+} while (0)
+
+
+/*
+ * Consider a case of a user single load/store would cause both an
+ * unaligned exception and an MMU-related exception (unaligned
+ * exceptions happen first):
+ *
+ * User code passes a bad variable ptr to a system call.
+ * Kernel tries to access the variable.
+ * Unaligned exception occurs.
+ * Unaligned exception handler tries to make aligned accesses.
+ * Double exception occurs for MMU-related cause (e.g., page not mapped).
+ * do_page_fault() thinks the fault address belongs to the kernel, not the
+ * user, and panics.
+ *
+ * The kernel currently prohibits user unaligned accesses. We use the
+ * __check_align_* macros to check for unaligned addresses before
+ * accessing user space so we don't crash the kernel. Both
+ * __put_user_asm and __get_user_asm use these alignment macros, so
+ * macro-specific labels such as 0f, 1f, %0, %2, and %3 must stay in
+ * sync.
+ */
+
+#define __check_align_1 ""
+
+#define __check_align_2 \
+ " _bbci.l %2, 0, 1f \n" \
+ " movi %0, %3 \n" \
+ " _j 2f \n"
+
+#define __check_align_4 \
+ " _bbsi.l %2, 0, 0f \n" \
+ " _bbci.l %2, 1, 1f \n" \
+ "0: movi %0, %3 \n" \
+ " _j 2f \n"
+
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ *
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
+ */
+#define __put_user_asm(x, addr, err, align, insn) \
+ __asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %1, %2, 0 \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: \n" \
+ " .long 2b \n" \
+ "5: \n" \
+ " l32r %2, 4b \n" \
+ " movi %0, %3 \n" \
+ " jx %2 \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :"=r" (err) \
+ :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
+
+#define __get_user_nocheck(x,ptr,size) \
+({ \
+ long __gu_err, __gu_val; \
+ __get_user_size(__gu_val,(ptr),(size),__gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+#define __get_user_check(x,ptr,size) \
+({ \
+ long __gu_err = -EFAULT, __gu_val = 0; \
+ const __typeof__(*(ptr)) *__gu_addr = (ptr); \
+ if (access_ok(VERIFY_READ,__gu_addr,size)) \
+ __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x,ptr,size,retval) \
+do { \
+ retval = 0; \
+ switch (size) { \
+ case 1: __get_user_asm(x,ptr,retval,1,"l8ui"); break; \
+ case 2: __get_user_asm(x,ptr,retval,2,"l16ui"); break; \
+ case 4: __get_user_asm(x,ptr,retval,4,"l32i"); break; \
+ case 8: retval = __copy_from_user(&x,ptr,8); break; \
+ default: (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+
+/*
+ * WARNING: If you modify this macro at all, verify that the
+ * __check_align_* macros still work.
+ */
+#define __get_user_asm(x, addr, err, align, insn) \
+ __asm__ __volatile__( \
+ __check_align_##align \
+ "1: "insn" %1, %2, 0 \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: \n" \
+ " .long 2b \n" \
+ "5: \n" \
+ " l32r %2, 4b \n" \
+ " movi %1, 0 \n" \
+ " movi %0, %3 \n" \
+ " jx %2 \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .long 1b, 5b \n" \
+ " .previous" \
+ :"=r" (err), "=r" (x) \
+ :"r" (addr), "i" (-EFAULT), "0" (err))
+
+
+/*
+ * Copy to/from user space
+ */
+
+/*
+ * We use a generic, arbitrary-sized copy subroutine. The Xtensa
+ * architecture would cause heavy code bloat if we tried to inline
+ * these functions and provide __constant_copy_* equivalents like the
+ * i386 versions. __xtensa_copy_user is quite efficient. See the
+ * .fixup section of __xtensa_copy_user for a discussion on the
+ * X_zeroing equivalents for Xtensa.
+ */
+
+extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n);
+#define __copy_user(to,from,size) __xtensa_copy_user(to,from,size)
+
+
+static inline unsigned long
+__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ return __copy_user(to,from,n);
+}
+
+static inline unsigned long
+__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
+{
+ return __copy_user(to,from,n);
+}
+
+static inline unsigned long
+__generic_copy_to_user(void *to, const void *from, unsigned long n)
+{
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_user(to,from,n);
+ return n;
+}
+
+static inline unsigned long
+__generic_copy_from_user(void *to, const void *from, unsigned long n)
+{
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_user(to,from,n);
+ else
+ memset(to, 0, n);
+ return n;
+}
+
+#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
+#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
+#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
+#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
+
+/*
+ * We need to return the number of bytes not cleared. Our memset()
+ * returns zero if a problem occurs while accessing user-space memory.
+ * In that event, return no memory cleared. Otherwise, zero for
+ * success.
+ */
+
+extern inline unsigned long
+__xtensa_clear_user(void *addr, unsigned long size)
+{
+ if ( ! memset(addr, 0, size) )
+ return size;
+ return 0;
+}
+
+extern inline unsigned long
+clear_user(void *addr, unsigned long size)
+{
+ if (access_ok(VERIFY_WRITE, addr, size))
+ return __xtensa_clear_user(addr, size);
+ return size ? -EFAULT : 0;
+}
+
+#define __clear_user __xtensa_clear_user
+
+
+extern long __strncpy_user(char *, const char *, long);
+#define __strncpy_from_user __strncpy_user
+
+extern inline long
+strncpy_from_user(char *dst, const char *src, long count)
+{
+ if (access_ok(VERIFY_READ, src, 1))
+ return __strncpy_from_user(dst, src, count);
+ return -EFAULT;
+}
+
+
+#define strlen_user(str) strnlen_user((str), TASK_SIZE - 1)
+
+/*
+ * Return the size of a string (including the ending 0!)
+ */
+extern long __strnlen_user(const char *, long);
+
+extern inline long strnlen_user(const char *str, long len)
+{
+ unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
+
+ if ((unsigned long)str > top)
+ return 0;
+ return __strnlen_user(str, len);
+}
+
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+/* Returns 0 if exception not found and fixup.unit otherwise. */
+
+extern unsigned long search_exception_table(unsigned long addr);
+extern void sort_exception_table(void);
+
+/* Returns the new pc */
+#define fixup_exception(map_reg, fixup_unit, pc) \
+({ \
+ fixup_unit; \
+})
+
+#endif /* __ASSEMBLY__ */
+#endif /* _XTENSA_UACCESS_H */
diff --git a/include/asm-xtensa/ucontext.h b/include/asm-xtensa/ucontext.h
new file mode 100644
index 000000000000..94c94ed3e00a
--- /dev/null
+++ b/include/asm-xtensa/ucontext.h
@@ -0,0 +1,22 @@
+/*
+ * include/asm-xtensa/ucontext.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UCONTEXT_H
+#define _XTENSA_UCONTEXT_H
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* _XTENSA_UCONTEXT_H */
diff --git a/include/asm-xtensa/unaligned.h b/include/asm-xtensa/unaligned.h
new file mode 100644
index 000000000000..28220890d0a6
--- /dev/null
+++ b/include/asm-xtensa/unaligned.h
@@ -0,0 +1,28 @@
+/*
+ * include/asm-xtensa/unaligned.h
+ *
+ * Xtensa doesn't handle unaligned accesses efficiently.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UNALIGNED_H
+#define _XTENSA_UNALIGNED_H
+
+#include <linux/string.h>
+
+/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+
+#define get_unaligned(ptr) \
+ ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
+
+#define put_unaligned(val, ptr) \
+ ({ __typeof__(*(ptr)) __tmp = (val); \
+ memmove((ptr), &__tmp, sizeof(*(ptr))); \
+ (void)0; })
+
+#endif /* _XTENSA_UNALIGNED_H */
diff --git a/include/asm-xtensa/unistd.h b/include/asm-xtensa/unistd.h
new file mode 100644
index 000000000000..64c64dd83ba4
--- /dev/null
+++ b/include/asm-xtensa/unistd.h
@@ -0,0 +1,537 @@
+/*
+ * include/asm-xtensa/unistd.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_UNISTD_H
+#define _XTENSA_UNISTD_H
+
+#include <linux/linkage.h>
+
+//#define __NR_setup 0 /* used only by init, to get system going */
+#define __NR_spill 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+#define __NR_waitpid 7
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+#define __NR_break 17
+#define __NR_oldstat 18
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_oldumount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+#define __NR_oldfstat 28
+#define __NR_pause 29
+#define __NR_utime 30
+#define __NR_stty 31
+#define __NR_gtty 32
+#define __NR_access 33
+#define __NR_nice 34
+#define __NR_ftime 35
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+#define __NR_prof 44
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+#define __NR_signal 48
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount 52
+#define __NR_lock 53
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+#define __NR_mpx 56
+#define __NR_setpgid 57
+#define __NR_ulimit 58
+#define __NR_oldolduname 59
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+#define __NR_sgetmask 68
+#define __NR_ssetmask 69
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+#define __NR_oldlstat 84
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+#define __NR_profil 98
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+#define __NR_ioperm 101
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+#define __NR_olduname 109
+#define __NR_iopl 110
+#define __NR_vhangup 111
+#define __NR_idle 112
+#define __NR_vm86 113
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+#define __NR_modify_ldt 123
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+#define __NR_create_module 127
+#define __NR_init_module 128
+#define __NR_delete_module 129
+#define __NR_get_kernel_syms 130
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_cacheflush 147
+#define __NR_cachectl 148
+#define __NR_sysxtensa 149
+#define __NR_sysdummy 150
+#define __NR_getsid 151
+#define __NR_fdatasync 152
+#define __NR__sysctl 153
+#define __NR_mlock 154
+#define __NR_munlock 155
+#define __NR_mlockall 156
+#define __NR_munlockall 157
+#define __NR_sched_setparam 158
+#define __NR_sched_getparam 159
+#define __NR_sched_setscheduler 160
+#define __NR_sched_getscheduler 161
+#define __NR_sched_yield 162
+#define __NR_sched_get_priority_max 163
+#define __NR_sched_get_priority_min 164
+#define __NR_sched_rr_get_interval 165
+#define __NR_nanosleep 166
+#define __NR_mremap 167
+#define __NR_accept 168
+#define __NR_bind 169
+#define __NR_connect 170
+#define __NR_getpeername 171
+#define __NR_getsockname 172
+#define __NR_getsockopt 173
+#define __NR_listen 174
+#define __NR_recv 175
+#define __NR_recvfrom 176
+#define __NR_recvmsg 177
+#define __NR_send 178
+#define __NR_sendmsg 179
+#define __NR_sendto 180
+#define __NR_setsockopt 181
+#define __NR_shutdown 182
+#define __NR_socket 183
+#define __NR_socketpair 184
+#define __NR_setresuid 185
+#define __NR_getresuid 186
+#define __NR_query_module 187
+#define __NR_poll 188
+#define __NR_nfsservctl 189
+#define __NR_setresgid 190
+#define __NR_getresgid 191
+#define __NR_prctl 192
+#define __NR_rt_sigreturn 193
+#define __NR_rt_sigaction 194
+#define __NR_rt_sigprocmask 195
+#define __NR_rt_sigpending 196
+#define __NR_rt_sigtimedwait 197
+#define __NR_rt_sigqueueinfo 198
+#define __NR_rt_sigsuspend 199
+#define __NR_pread 200
+#define __NR_pwrite 201
+#define __NR_chown 202
+#define __NR_getcwd 203
+#define __NR_capget 204
+#define __NR_capset 205
+#define __NR_sigaltstack 206
+#define __NR_sendfile 207
+#define __NR_streams1 208 /* some people actually want it */
+#define __NR_streams2 209 /* some people actually want it */
+#define __NR_mmap2 210
+#define __NR_truncate64 211
+#define __NR_ftruncate64 212
+#define __NR_stat64 213
+#define __NR_lstat64 214
+#define __NR_fstat64 215
+#define __NR_pivot_root 216
+#define __NR_mincore 217
+#define __NR_madvise 218
+#define __NR_getdents64 219
+#define __NR_vfork 220
+
+/* Keep this last; should always equal the last valid call number. */
+#define __NR_Linux_syscalls 220
+
+/* user-visible error numbers are in the range -1 - -125: see
+ * <asm-xtensa/errno.h> */
+
+#define SYSXTENSA_RESERVED 0 /* don't use this */
+#define SYSXTENSA_ATOMIC_SET 1 /* set variable */
+#define SYSXTENSA_ATOMIC_EXG_ADD 2 /* exchange memory and add */
+#define SYSXTENSA_ATOMIC_ADD 3 /* add to memory */
+#define SYSXTENSA_ATOMIC_CMP_SWP 4 /* compare and swap */
+
+#define SYSXTENSA_COUNT 5 /* count of syscall0 functions*/
+
+#ifdef __KERNEL__
+#define __syscall_return(type, res) return ((type)(res))
+#else
+#define __syscall_return(type, res) \
+do { \
+ if ((unsigned long)(res) >= (unsigned long)(-125)) { \
+ /* Avoid using "res" which is declared to be in register r2; \
+ * errno might expand to a function call and clobber it. */ \
+ int __err = -(res); \
+ errno = __err; \
+ res = -1; \
+ } \
+ return (type) (res); \
+} while (0)
+#endif
+
+
+/* Tensilica's xt-xcc compiler is much more agressive at code
+ * optimization than gcc. Multiple __asm__ statements are
+ * insufficient for xt-xcc because subsequent optimization passes
+ * (beyond the front-end that knows of __asm__ statements and other
+ * such GNU Extensions to C) can modify the register selection for
+ * containment of C variables.
+ *
+ * xt-xcc cannot modify the contents of a single __asm__ statement, so
+ * we create single-asm versions of the syscall macros that are
+ * suitable and optimal for both xt-xcc and gcc.
+ *
+ * Linux takes system-call arguments in registers. The following
+ * design is optimized for user-land apps (e.g., glibc) which
+ * typically have a function wrapper around the "syscall" assembly
+ * instruction. It satisfies the Xtensa ABI while minizing argument
+ * shifting.
+ *
+ * The Xtensa ABI and software conventions require the system-call
+ * number in a2. If an argument exists in a2, we move it to the next
+ * available register. Note that for improved efficiency, we do NOT
+ * shift all parameters down one register to maintain the original
+ * order.
+ *
+ * At best case (zero arguments), we just write the syscall number to
+ * a2. At worst case (1 to 6 arguments), we move the argument in a2
+ * to the next available register, then write the syscall number to
+ * a2.
+ *
+ * For clarity, the following truth table enumerates all possibilities.
+ *
+ * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5
+ * --------- -------------- ----------------------------------
+ * 0 a2
+ * 1 a2 a3
+ * 2 a2 a4, a3
+ * 3 a2 a5, a3, a4
+ * 4 a2 a6, a3, a4, a5
+ * 5 a2 a7, a3, a4, a5, a6
+ * 6 a2 a8, a3, a4, a5, a6, a7
+ */
+
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+long __res; \
+__asm__ __volatile__ ( \
+ " movi a2, %1 \n" \
+ " syscall \n" \
+ " mov %0, a2 \n" \
+ : "=a" (__res) \
+ : "i" (__NR_##name) \
+ : "a2" \
+ ); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall1(type,name,type0,arg0) \
+type name(type0 arg0) \
+{ \
+long __res; \
+__asm__ __volatile__ ( \
+ " mov a3, %2 \n" \
+ " movi a2, %1 \n" \
+ " syscall \n" \
+ " mov %0, a2 \n" \
+ : "=a" (__res) \
+ : "i" (__NR_##name), "a" (arg0) \
+ : "a2", "a3" \
+ ); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall2(type,name,type0,arg0,type1,arg1) \
+type name(type0 arg0,type1 arg1) \
+{ \
+long __res; \
+__asm__ __volatile__ ( \
+ " mov a4, %2 \n" \
+ " mov a3, %3 \n" \
+ " movi a2, %1 \n" \
+ " syscall \n" \
+ " mov %0, a2 \n" \
+ : "=a" (__res) \
+ : "i" (__NR_##name), "a" (arg0), "a" (arg1) \
+ : "a2", "a3", "a4" \
+ ); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall3(type,name,type0,arg0,type1,arg1,type2,arg2) \
+type name(type0 arg0,type1 arg1,type2 arg2) \
+{ \
+long __res; \
+__asm__ __volatile__ ( \
+ " mov a5, %2 \n" \
+ " mov a4, %4 \n" \
+ " mov a3, %3 \n" \
+ " movi a2, %1 \n" \
+ " syscall \n" \
+ " mov %0, a2 \n" \
+ : "=a" (__res) \
+ : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2) \
+ : "a2", "a3", "a4", "a5" \
+ ); \
+__syscall_return(type,__res); \
+}
+
+#define _syscall4(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3) \
+type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3) \
+{ \
+long __res; \
+__asm__ __volatile__ ( \
+ " mov a6, %2 \n" \
+ " mov a5, %5 \n" \
+ " mov a4, %4 \n" \
+ " mov a3, %3 \n" \
+ " movi a2, %1 \n" \
+ " syscall \n" \
+ " mov %0, a2 \n" \
+ : "=a" (__res) \
+ : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), "a" (arg3) \
+ : "a2", "a3", "a4", "a5", "a6" \
+ ); \
+__syscall_return(type,__res); \
+}
+
+/* Note that we save and restore the a7 frame pointer.
+ * Including a7 in the clobber list doesn't do what you'd expect.
+ */
+#define _syscall5(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
+type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
+{ \
+long __res; \
+__asm__ __volatile__ ( \
+ " mov a9, a7 \n" \
+ " mov a7, %2 \n" \
+ " mov a6, %6 \n" \
+ " mov a5, %5 \n" \
+ " mov a4, %4 \n" \
+ " mov a3, %3 \n" \
+ " movi a2, %1 \n" \
+ " syscall \n" \
+ " mov a7, a9 \n" \
+ " mov %0, a2 \n" \
+ : "=a" (__res) \
+ : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
+ "a" (arg3), "a" (arg4) \
+ : "a2", "a3", "a4", "a5", "a6", "a9" \
+ ); \
+__syscall_return(type,__res); \
+}
+
+/* Note that we save and restore the a7 frame pointer.
+ * Including a7 in the clobber list doesn't do what you'd expect.
+ */
+#define _syscall6(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
+type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
+{ \
+long __res; \
+__asm__ __volatile__ ( \
+ " mov a9, a7 \n" \
+ " mov a8, %2 \n" \
+ " mov a7, %7 \n" \
+ " mov a6, %6 \n" \
+ " mov a5, %5 \n" \
+ " mov a4, %4 \n" \
+ " mov a3, %3 \n" \
+ " movi a2, %1 \n" \
+ " syscall \n" \
+ " mov a7, a9 \n" \
+ " mov %0, a2 \n" \
+ : "=a" (__res) \
+ : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
+ "a" (arg3), "a" (arg4), "a" (arg5) \
+ : "a2", "a3", "a4", "a5", "a6", "a8", "a9" \
+ ); \
+__syscall_return(type,__res); \
+}
+
+
+#ifdef __KERNEL_SYSCALLS__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/syscalls.h>
+
+/*
+ * we need this inline - forking from kernel space will result
+ * in NO COPY ON WRITE (!!!), until an execve is executed. This
+ * is no problem, but for the stack. This is handled by not letting
+ * main() use the stack at all after fork(). Thus, no function
+ * calls - which means inline code for fork too, as otherwise we
+ * would use the stack upon exit from 'fork()'.
+ *
+ * Actually only pause and fork are needed inline, so that there
+ * won't be any messing with the stack from main(), but we define
+ * some others too.
+ */
+
+#define __NR__exit __NR_exit
+
+static __inline__ _syscall0(int,pause)
+//static __inline__ _syscall1(int,setup,int,magic) FIXME
+static __inline__ _syscall0(int,sync)
+static __inline__ _syscall0(pid_t,setsid)
+static __inline__ _syscall3(int,write,int,fd,const char *,buf,off_t,count)
+static __inline__ _syscall3(int,read,int,fd,char *,buf,off_t,count)
+static __inline__ _syscall3(off_t,lseek,int,fd,off_t,offset,int,count)
+static __inline__ _syscall1(int,dup,int,fd)
+static __inline__ _syscall3(int,execve,const char*,file,char**,argv,char**,envp)
+static __inline__ _syscall3(int,open,const char *,file,int,flag,int,mode)
+static __inline__ _syscall1(int,close,int,fd)
+static __inline__ _syscall1(int,_exit,int,exitcode)
+static __inline__ _syscall3(pid_t,waitpid,pid_t,pid,int *,wait_stat,int,options)
+static __inline__ _syscall1(int,delete_module,const char *,name)
+
+struct stat;
+static __inline__ _syscall2(int,fstat,int,fd,struct stat *,buf)
+static __inline__ _syscall0(pid_t,getpid)
+static __inline__ _syscall2(int,kill,int,pid,int,sig)
+static __inline__ _syscall2(int,stat,const char *, path,struct stat *,buf)
+static __inline__ _syscall1(int,unlink,char *,pathname)
+
+
+
+extern pid_t waitpid(int, int*, int );
+static __inline__ pid_t wait(int * wait_stat)
+{
+ return waitpid(-1,wait_stat,0);
+}
+#endif
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall");
+
+#ifdef __KERNEL__
+#define __ARCH_WANT_IPC_PARSE_VERSION
+#define __ARCH_WANT_OLD_READDIR
+#define __ARCH_WANT_OLD_STAT
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_ALARM
+#define __ARCH_WANT_SYS_GETHOSTNAME
+#define __ARCH_WANT_SYS_PAUSE
+#define __ARCH_WANT_SYS_SGETMASK
+#define __ARCH_WANT_SYS_SIGNAL
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_WAITPID
+#define __ARCH_WANT_SYS_SOCKETCALL
+#define __ARCH_WANT_SYS_FADVISE64
+#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_NICE
+#define __ARCH_WANT_SYS_OLD_GETRLIMIT
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_SIGPROCMASK
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#endif
+
+
+
+#endif /* _XTENSA_UNISTD_H */
diff --git a/include/asm-xtensa/user.h b/include/asm-xtensa/user.h
new file mode 100644
index 000000000000..2c3ed23354a8
--- /dev/null
+++ b/include/asm-xtensa/user.h
@@ -0,0 +1,20 @@
+/*
+ * include/asm-xtensa/user.h
+ *
+ * Xtensa Processor version.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_USER_H
+#define _XTENSA_USER_H
+
+/* This file usually defines a 'struct user' structure. However, it it only
+ * used for a.out file, which are not supported on Xtensa.
+ */
+
+#endif /* _XTENSA_USER_H */
diff --git a/include/asm-xtensa/vga.h b/include/asm-xtensa/vga.h
new file mode 100644
index 000000000000..23d82f6acb57
--- /dev/null
+++ b/include/asm-xtensa/vga.h
@@ -0,0 +1,19 @@
+/*
+ * include/asm-xtensa/vga.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_VGA_H
+#define _XTENSA_VGA_H
+
+#define VGA_MAP_MEM(x) (unsigned long)phys_to_virt(x)
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x,y) (*(y) = (x))
+
+#endif
diff --git a/include/asm-xtensa/xor.h b/include/asm-xtensa/xor.h
new file mode 100644
index 000000000000..e7b1f083991d
--- /dev/null
+++ b/include/asm-xtensa/xor.h
@@ -0,0 +1,16 @@
+/*
+ * include/asm-xtensa/xor.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _XTENSA_XOR_H
+#define _XTENSA_XOR_H
+
+#include <asm-generic/xor.h>
+
+#endif
diff --git a/include/asm-xtensa/xtensa/cacheasm.h b/include/asm-xtensa/xtensa/cacheasm.h
new file mode 100644
index 000000000000..0cdbb0bf180e
--- /dev/null
+++ b/include/asm-xtensa/xtensa/cacheasm.h
@@ -0,0 +1,708 @@
+#ifndef XTENSA_CACHEASM_H
+#define XTENSA_CACHEASM_H
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/cacheasm.h -- assembler-specific cache
+ * related definitions that depend on CORE configuration.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/coreasm.h>
+
+
+/*
+ * This header file defines assembler macros of the form:
+ * <x>cache_<func>
+ * where <x> is 'i' or 'd' for instruction and data caches,
+ * and <func> indicates the function of the macro.
+ *
+ * The following functions <func> are defined,
+ * and apply only to the specified cache (I or D):
+ *
+ * reset
+ * Resets the cache.
+ *
+ * sync
+ * Makes sure any previous cache instructions have been completed;
+ * ie. makes sure any previous cache control operations
+ * have had full effect and been synchronized to memory.
+ * Eg. any invalidate completed [so as not to generate a hit],
+ * any writebacks or other pipelined writes written to memory, etc.
+ *
+ * invalidate_line (single cache line)
+ * invalidate_region (specified memory range)
+ * invalidate_all (entire cache)
+ * Invalidates all cache entries that cache
+ * data from the specified memory range.
+ * NOTE: locked entries are not invalidated.
+ *
+ * writeback_line (single cache line)
+ * writeback_region (specified memory range)
+ * writeback_all (entire cache)
+ * Writes back to memory all dirty cache entries
+ * that cache data from the specified memory range,
+ * and marks these entries as clean.
+ * NOTE: on some future implementations, this might
+ * also invalidate.
+ * NOTE: locked entries are written back, but never invalidated.
+ * NOTE: instruction caches never implement writeback.
+ *
+ * writeback_inv_line (single cache line)
+ * writeback_inv_region (specified memory range)
+ * writeback_inv_all (entire cache)
+ * Writes back to memory all dirty cache entries
+ * that cache data from the specified memory range,
+ * and invalidates these entries (including all clean
+ * cache entries that cache data from that range).
+ * NOTE: locked entries are written back but not invalidated.
+ * NOTE: instruction caches never implement writeback.
+ *
+ * lock_line (single cache line)
+ * lock_region (specified memory range)
+ * Prefetch and lock the specified memory range into cache.
+ * NOTE: if any part of the specified memory range cannot
+ * be locked, a ??? exception occurs. These macros don't
+ * do anything special (yet anyway) to handle this situation.
+ *
+ * unlock_line (single cache line)
+ * unlock_region (specified memory range)
+ * unlock_all (entire cache)
+ * Unlock cache entries that cache the specified memory range.
+ * Entries not already locked are unaffected.
+ */
+
+
+
+/*************************** GENERIC -- ALL CACHES ***************************/
+
+
+/*
+ * The following macros assume the following cache size/parameter limits
+ * in the current Xtensa core implementation:
+ * cache size: 1024 bytes minimum
+ * line size: 16 - 64 bytes
+ * way count: 1 - 4
+ *
+ * Minimum entries per way (ie. per associativity) = 1024 / 64 / 4 = 4
+ * Hence the assumption that each loop can execute four cache instructions.
+ *
+ * Correspondingly, the offset range of instructions is assumed able to cover
+ * four lines, ie. offsets {0,1,2,3} * line_size are assumed valid for
+ * both hit and indexed cache instructions. Ie. these offsets are all
+ * valid: 0, 16, 32, 48, 64, 96, 128, 192 (for line sizes 16, 32, 64).
+ * This is true of all original cache instructions
+ * (dhi, ihi, dhwb, dhwbi, dii, iii) which have offsets
+ * of 0 to 1020 in multiples of 4 (ie. 8 bits shifted by 2).
+ * This is also true of subsequent cache instructions
+ * (dhu, ihu, diu, iiu, diwb, diwbi, dpfl, ipfl) which have offsets
+ * of 0 to 240 in multiples of 16 (ie. 4 bits shifted by 4).
+ *
+ * (Maximum cache size, currently 32k, doesn't affect the following macros.
+ * Cache ways > MMU min page size cause aliasing but that's another matter.)
+ */
+
+
+
+/*
+ * Macro to apply an 'indexed' cache instruction to the entire cache.
+ *
+ * Parameters:
+ * cainst instruction/ that takes an address register parameter
+ * and an offset parameter (in range 0 .. 3*linesize).
+ * size size of cache in bytes
+ * linesize size of cache line in bytes
+ * assoc_or1 number of associativities (ways/sets) in cache
+ * if all sets affected by cainst,
+ * or 1 if only one set (or not all sets) of the cache
+ * is affected by cainst (eg. DIWB or DIWBI [not yet ISA defined]).
+ * aa, ab unique address registers (temporaries)
+ */
+
+ .macro cache_index_all cainst, size, linesize, assoc_or1, aa, ab
+
+ // Sanity-check on cache parameters:
+ .ifne (\size % (\linesize * \assoc_or1 * 4))
+ .err // cache configuration outside expected/supported range!
+ .endif
+
+ // \size byte cache, \linesize byte lines, \assoc_or1 way(s) affected by each \cainst.
+ movi \aa, (\size / (\linesize * \assoc_or1 * 4))
+ // Possible improvement: need only loop if \aa > 1 ;
+ // however that particular condition is highly unlikely.
+ movi \ab, 0 // to iterate over cache
+ floop \aa, cachex\@
+ \cainst \ab, 0*\linesize
+ \cainst \ab, 1*\linesize
+ \cainst \ab, 2*\linesize
+ \cainst \ab, 3*\linesize
+ addi \ab, \ab, 4*\linesize // move to next line
+ floopend \aa, cachex\@
+
+ .endm
+
+
+/*
+ * Macro to apply a 'hit' cache instruction to a memory region,
+ * ie. to any cache entries that cache a specified portion (region) of memory.
+ * Takes care of the unaligned cases, ie. may apply to one
+ * more cache line than $asize / lineSize if $aaddr is not aligned.
+ *
+ *
+ * Parameters are:
+ * cainst instruction/macro that takes an address register parameter
+ * and an offset parameter (currently always zero)
+ * and generates a cache instruction (eg. "dhi", "dhwb", "ihi", etc.)
+ * linesize_log2 log2(size of cache line in bytes)
+ * addr register containing start address of region (clobbered)
+ * asize register containing size of the region in bytes (clobbered)
+ * askew unique register used as temporary
+ *
+ * !?!?! 2DO: optimization: iterate max(cache_size and \asize) / linesize
+ */
+
+ .macro cache_hit_region cainst, linesize_log2, addr, asize, askew
+
+ // Make \asize the number of iterations:
+ extui \askew, \addr, 0, \linesize_log2 // get unalignment amount of \addr
+ add \asize, \asize, \askew // ... and add it to \asize
+ addi \asize, \asize, (1 << \linesize_log2) - 1 // round up!
+ srli \asize, \asize, \linesize_log2
+
+ // Iterate over region:
+ floopnez \asize, cacheh\@
+ \cainst \addr, 0
+ addi \addr, \addr, (1 << \linesize_log2) // move to next line
+ floopend \asize, cacheh\@
+
+ .endm
+
+
+
+
+
+/*************************** INSTRUCTION CACHE ***************************/
+
+
+/*
+ * Reset/initialize the instruction cache by simply invalidating it:
+ * (need to unlock first also, if cache locking implemented):
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro icache_reset aa, ab
+ icache_unlock_all \aa, \ab
+ icache_invalidate_all \aa, \ab
+ .endm
+
+
+/*
+ * Synchronize after an instruction cache operation,
+ * to be sure everything is in sync with memory as to be
+ * expected following any previous instruction cache control operations.
+ *
+ * Parameters are:
+ * ar an address register (temporary) (currently unused, but may be used in future)
+ */
+ .macro icache_sync ar
+#if XCHAL_ICACHE_SIZE > 0
+ isync
+#endif
+ .endm
+
+
+
+/*
+ * Invalidate a single line of the instruction cache.
+ * Parameters are:
+ * ar address register that contains (virtual) address to invalidate
+ * (may get clobbered in a future implementation, but not currently)
+ * offset (optional) offset to add to \ar to compute effective address to invalidate
+ * (note: some number of lsbits are ignored)
+ */
+ .macro icache_invalidate_line ar, offset
+#if XCHAL_ICACHE_SIZE > 0
+ ihi \ar, \offset // invalidate icache line
+ /*
+ * NOTE: in some version of the silicon [!!!SHOULD HAVE BEEN DOCUMENTED!!!]
+ * 'ihi' doesn't work, so it had been replaced with 'iii'
+ * (which would just invalidate more than it should,
+ * which should be okay other than the performance hit
+ * because cache locking did not exist in that version,
+ * unless user somehow relies on something being cached).
+ * [WHAT VERSION IS IT!!?!?
+ * IS THERE ANY WAY TO TEST FOR THAT HERE, TO OUTPUT 'III' ONLY IF NEEDED!?!?].
+ *
+ * iii \ar, \offset
+ */
+ icache_sync \ar
+#endif
+ .endm
+
+
+
+
+/*
+ * Invalidate instruction cache entries that cache a specified portion of memory.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro icache_invalidate_region astart, asize, ac
+#if XCHAL_ICACHE_SIZE > 0
+ // Instruction cache region invalidation:
+ cache_hit_region ihi, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
+ icache_sync \ac
+ // End of instruction cache region invalidation
+#endif
+ .endm
+
+
+
+/*
+ * Invalidate entire instruction cache.
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro icache_invalidate_all aa, ab
+#if XCHAL_ICACHE_SIZE > 0
+ // Instruction cache invalidation:
+ cache_index_all iii, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, XCHAL_ICACHE_WAYS, \aa, \ab
+ icache_sync \aa
+ // End of instruction cache invalidation
+#endif
+ .endm
+
+
+
+/*
+ * Lock (prefetch & lock) a single line of the instruction cache.
+ *
+ * Parameters are:
+ * ar address register that contains (virtual) address to lock
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to lock
+ * (note: some number of lsbits are ignored)
+ */
+ .macro icache_lock_line ar, offset
+#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
+ ipfl \ar, \offset /* prefetch and lock icache line */
+ icache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Lock (prefetch & lock) a specified portion of memory into the instruction cache.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro icache_lock_region astart, asize, ac
+#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
+ // Instruction cache region lock:
+ cache_hit_region ipfl, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
+ icache_sync \ac
+ // End of instruction cache region lock
+#endif
+ .endm
+
+
+
+/*
+ * Unlock a single line of the instruction cache.
+ *
+ * Parameters are:
+ * ar address register that contains (virtual) address to unlock
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to unlock
+ * (note: some number of lsbits are ignored)
+ */
+ .macro icache_unlock_line ar, offset
+#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
+ ihu \ar, \offset /* unlock icache line */
+ icache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Unlock a specified portion of memory from the instruction cache.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro icache_unlock_region astart, asize, ac
+#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
+ // Instruction cache region unlock:
+ cache_hit_region ihu, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
+ icache_sync \ac
+ // End of instruction cache region unlock
+#endif
+ .endm
+
+
+
+/*
+ * Unlock entire instruction cache.
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro icache_unlock_all aa, ab
+#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
+ // Instruction cache unlock:
+ cache_index_all iiu, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, 1, \aa, \ab
+ icache_sync \aa
+ // End of instruction cache unlock
+#endif
+ .endm
+
+
+
+
+
+/*************************** DATA CACHE ***************************/
+
+
+
+/*
+ * Reset/initialize the data cache by simply invalidating it
+ * (need to unlock first also, if cache locking implemented):
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro dcache_reset aa, ab
+ dcache_unlock_all \aa, \ab
+ dcache_invalidate_all \aa, \ab
+ .endm
+
+
+
+
+/*
+ * Synchronize after a data cache operation,
+ * to be sure everything is in sync with memory as to be
+ * expected following any previous data cache control operations.
+ *
+ * Parameters are:
+ * ar an address register (temporary) (currently unused, but may be used in future)
+ */
+ .macro dcache_sync ar
+#if XCHAL_DCACHE_SIZE > 0
+ // This previous sequence errs on the conservative side (too much so); a DSYNC should be sufficient:
+ //memw // synchronize data cache changes relative to subsequent memory accesses
+ //isync // be conservative and ISYNC as well (just to be sure)
+
+ dsync
+#endif
+ .endm
+
+
+
+/*
+ * Synchronize after a data store operation,
+ * to be sure the stored data is completely off the processor
+ * (and assuming there is no buffering outside the processor,
+ * that the data is in memory). This may be required to
+ * ensure that the processor's write buffers are emptied.
+ * A MEMW followed by a read guarantees this, by definition.
+ * We also try to make sure the read itself completes.
+ *
+ * Parameters are:
+ * ar an address register (temporary)
+ */
+ .macro write_sync ar
+ memw // ensure previous memory accesses are complete prior to subsequent memory accesses
+ l32i \ar, sp, 0 // completing this read ensures any previous write has completed, because of MEMW
+ //slot
+ add \ar, \ar, \ar // use the result of the read to help ensure the read completes (in future architectures)
+ .endm
+
+
+/*
+ * Invalidate a single line of the data cache.
+ * Parameters are:
+ * ar address register that contains (virtual) address to invalidate
+ * (may get clobbered in a future implementation, but not currently)
+ * offset (optional) offset to add to \ar to compute effective address to invalidate
+ * (note: some number of lsbits are ignored)
+ */
+ .macro dcache_invalidate_line ar, offset
+#if XCHAL_DCACHE_SIZE > 0
+ dhi \ar, \offset
+ dcache_sync \ar
+#endif
+ .endm
+
+
+
+
+
+/*
+ * Invalidate data cache entries that cache a specified portion of memory.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro dcache_invalidate_region astart, asize, ac
+#if XCHAL_DCACHE_SIZE > 0
+ // Data cache region invalidation:
+ cache_hit_region dhi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
+ dcache_sync \ac
+ // End of data cache region invalidation
+#endif
+ .endm
+
+
+
+#if 0
+/*
+ * This is a work-around for a bug in SiChip1 (???).
+ * There should be a proper mechanism for not outputting
+ * these instructions when not needed.
+ * To enable work-around, uncomment this and replace 'dii'
+ * with 'dii_s1' everywhere, eg. in dcache_invalidate_all
+ * macro below.
+ */
+ .macro dii_s1 ar, offset
+ dii \ar, \offset
+ or \ar, \ar, \ar
+ or \ar, \ar, \ar
+ or \ar, \ar, \ar
+ or \ar, \ar, \ar
+ .endm
+#endif
+
+
+/*
+ * Invalidate entire data cache.
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro dcache_invalidate_all aa, ab
+#if XCHAL_DCACHE_SIZE > 0
+ // Data cache invalidation:
+ cache_index_all dii, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, \aa, \ab
+ dcache_sync \aa
+ // End of data cache invalidation
+#endif
+ .endm
+
+
+
+/*
+ * Writeback a single line of the data cache.
+ * Parameters are:
+ * ar address register that contains (virtual) address to writeback
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to writeback
+ * (note: some number of lsbits are ignored)
+ */
+ .macro dcache_writeback_line ar, offset
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
+ dhwb \ar, \offset
+ dcache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Writeback dirty data cache entries that cache a specified portion of memory.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro dcache_writeback_region astart, asize, ac
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
+ // Data cache region writeback:
+ cache_hit_region dhwb, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
+ dcache_sync \ac
+ // End of data cache region writeback
+#endif
+ .endm
+
+
+
+/*
+ * Writeback entire data cache.
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro dcache_writeback_all aa, ab
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
+ // Data cache writeback:
+ cache_index_all diwb, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
+ dcache_sync \aa
+ // End of data cache writeback
+#endif
+ .endm
+
+
+
+/*
+ * Writeback and invalidate a single line of the data cache.
+ * Parameters are:
+ * ar address register that contains (virtual) address to writeback and invalidate
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to writeback and invalidate
+ * (note: some number of lsbits are ignored)
+ */
+ .macro dcache_writeback_inv_line ar, offset
+#if XCHAL_DCACHE_SIZE > 0
+ dhwbi \ar, \offset /* writeback and invalidate dcache line */
+ dcache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Writeback and invalidate data cache entries that cache a specified portion of memory.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro dcache_writeback_inv_region astart, asize, ac
+#if XCHAL_DCACHE_SIZE > 0
+ // Data cache region writeback and invalidate:
+ cache_hit_region dhwbi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
+ dcache_sync \ac
+ // End of data cache region writeback and invalidate
+#endif
+ .endm
+
+
+
+/*
+ * Writeback and invalidate entire data cache.
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro dcache_writeback_inv_all aa, ab
+#if XCHAL_DCACHE_SIZE > 0
+ // Data cache writeback and invalidate:
+#if XCHAL_DCACHE_IS_WRITEBACK
+ cache_index_all diwbi, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
+ dcache_sync \aa
+#else /*writeback*/
+ // Data cache does not support writeback, so just invalidate: */
+ dcache_invalidate_all \aa, \ab
+#endif /*writeback*/
+ // End of data cache writeback and invalidate
+#endif
+ .endm
+
+
+
+
+/*
+ * Lock (prefetch & lock) a single line of the data cache.
+ *
+ * Parameters are:
+ * ar address register that contains (virtual) address to lock
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to lock
+ * (note: some number of lsbits are ignored)
+ */
+ .macro dcache_lock_line ar, offset
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
+ dpfl \ar, \offset /* prefetch and lock dcache line */
+ dcache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Lock (prefetch & lock) a specified portion of memory into the data cache.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro dcache_lock_region astart, asize, ac
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
+ // Data cache region lock:
+ cache_hit_region dpfl, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
+ dcache_sync \ac
+ // End of data cache region lock
+#endif
+ .endm
+
+
+
+/*
+ * Unlock a single line of the data cache.
+ *
+ * Parameters are:
+ * ar address register that contains (virtual) address to unlock
+ * (may get clobbered in a future implementation, but not currently)
+ * offset offset to add to \ar to compute effective address to unlock
+ * (note: some number of lsbits are ignored)
+ */
+ .macro dcache_unlock_line ar, offset
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
+ dhu \ar, \offset /* unlock dcache line */
+ dcache_sync \ar
+#endif
+ .endm
+
+
+
+/*
+ * Unlock a specified portion of memory from the data cache.
+ * Parameters are:
+ * astart start address (register gets clobbered)
+ * asize size of the region in bytes (register gets clobbered)
+ * ac unique register used as temporary
+ */
+ .macro dcache_unlock_region astart, asize, ac
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
+ // Data cache region unlock:
+ cache_hit_region dhu, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
+ dcache_sync \ac
+ // End of data cache region unlock
+#endif
+ .endm
+
+
+
+/*
+ * Unlock entire data cache.
+ *
+ * Parameters:
+ * aa, ab unique address registers (temporaries)
+ */
+ .macro dcache_unlock_all aa, ab
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
+ // Data cache unlock:
+ cache_index_all diu, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
+ dcache_sync \aa
+ // End of data cache unlock
+#endif
+ .endm
+
+
+#endif /*XTENSA_CACHEASM_H*/
+
diff --git a/include/asm-xtensa/xtensa/cacheattrasm.h b/include/asm-xtensa/xtensa/cacheattrasm.h
new file mode 100644
index 000000000000..1c3e117b3592
--- /dev/null
+++ b/include/asm-xtensa/xtensa/cacheattrasm.h
@@ -0,0 +1,432 @@
+#ifndef XTENSA_CACHEATTRASM_H
+#define XTENSA_CACHEATTRASM_H
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/cacheattrasm.h -- assembler-specific
+ * CACHEATTR register related definitions that depend on CORE
+ * configuration.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/coreasm.h>
+
+
+/*
+ * This header file defines assembler macros of the form:
+ * <x>cacheattr_<func>
+ * where:
+ * <x> is 'i', 'd' or absent for instruction, data
+ * or both caches; and
+ * <func> indicates the function of the macro.
+ *
+ * The following functions are defined:
+ *
+ * icacheattr_get
+ * Reads I-cache CACHEATTR into a2 (clobbers a3-a5).
+ *
+ * dcacheattr_get
+ * Reads D-cache CACHEATTR into a2 (clobbers a3-a5).
+ * (Note: for configs with a real CACHEATTR register, the
+ * above two macros are identical.)
+ *
+ * cacheattr_set
+ * Writes both I-cache and D-cache CACHEATTRs from a2 (a3-a8 clobbered).
+ * Works even when changing one's own code's attributes.
+ *
+ * icacheattr_is_enabled label
+ * Branches to \label if I-cache appears to have been enabled
+ * (eg. if CACHEATTR contains a cache-enabled attribute).
+ * (clobbers a2-a5,SAR)
+ *
+ * dcacheattr_is_enabled label
+ * Branches to \label if D-cache appears to have been enabled
+ * (eg. if CACHEATTR contains a cache-enabled attribute).
+ * (clobbers a2-a5,SAR)
+ *
+ * cacheattr_is_enabled label
+ * Branches to \label if either I-cache or D-cache appears to have been enabled
+ * (eg. if CACHEATTR contains a cache-enabled attribute).
+ * (clobbers a2-a5,SAR)
+ *
+ * The following macros are only defined under certain conditions:
+ *
+ * icacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR)
+ * Writes I-cache CACHEATTR from a2 (a3-a8 clobbered).
+ *
+ * dcacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR)
+ * Writes D-cache CACHEATTR from a2 (a3-a8 clobbered).
+ */
+
+
+
+/*************************** GENERIC -- ALL CACHES ***************************/
+
+/*
+ * _cacheattr_get
+ *
+ * (Internal macro.)
+ * Returns value of CACHEATTR register (or closest equivalent) in a2.
+ *
+ * Entry:
+ * (none)
+ * Exit:
+ * a2 value read from CACHEATTR
+ * a3-a5 clobbered (temporaries)
+ */
+ .macro _cacheattr_get tlb
+#if XCHAL_HAVE_CACHEATTR
+ rsr a2, CACHEATTR
+#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ // We have a config that "mimics" CACHEATTR using a simplified
+ // "MMU" composed of a single statically-mapped way.
+ // DTLB and ITLB are independent, so there's no single
+ // cache attribute that can describe both. So for now
+ // just return the DTLB state.
+ movi a5, 0xE0000000
+ movi a2, 0
+ movi a3, 0
+1: add a3, a3, a5 // next segment
+ r&tlb&1 a4, a3 // get PPN+CA of segment at 0xE0000000, 0xC0000000, ..., 0
+ dsync // interlock???
+ slli a2, a2, 4
+ extui a4, a4, 0, 4 // extract CA
+ or a2, a2, a4
+ bnez a3, 1b
+#else
+ // This macro isn't applicable to arbitrary MMU configurations.
+ // Just return zero.
+ movi a2, 0
+#endif
+ .endm
+
+ .macro icacheattr_get
+ _cacheattr_get itlb
+ .endm
+
+ .macro dcacheattr_get
+ _cacheattr_get dtlb
+ .endm
+
+
+#define XCHAL_CACHEATTR_ALL_BYPASS 0x22222222 /* default (powerup/reset) value of CACHEATTR, all BYPASS
+ mode (ie. disabled/bypassed caches) */
+
+#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+
+#define XCHAL_FCA_ENAMASK 0x001A /* bitmap of fetch attributes that require enabled icache */
+#define XCHAL_LCA_ENAMASK 0x0003 /* bitmap of load attributes that require enabled dcache */
+#define XCHAL_SCA_ENAMASK 0x0003 /* bitmap of store attributes that require enabled dcache */
+#define XCHAL_LSCA_ENAMASK (XCHAL_LCA_ENAMASK|XCHAL_SCA_ENAMASK) /* l/s attrs requiring enabled dcache */
+#define XCHAL_ALLCA_ENAMASK (XCHAL_FCA_ENAMASK|XCHAL_LSCA_ENAMASK) /* all attrs requiring enabled caches */
+
+/*
+ * _cacheattr_is_enabled
+ *
+ * (Internal macro.)
+ * Branches to \label if CACHEATTR in a2 indicates an enabled
+ * cache, using mask in a3.
+ *
+ * Parameters:
+ * label where to branch to if cache is enabled
+ * Entry:
+ * a2 contains CACHEATTR value used to determine whether
+ * caches are enabled
+ * a3 16-bit constant where each bit correspond to
+ * one of the 16 possible CA values (in a CACHEATTR mask);
+ * CA values that indicate the cache is enabled
+ * have their corresponding bit set in this mask
+ * (eg. use XCHAL_xCA_ENAMASK , above)
+ * Exit:
+ * a2,a4,a5 clobbered
+ * SAR clobbered
+ */
+ .macro _cacheattr_is_enabled label
+ movi a4, 8 // loop 8 times
+.Lcaife\@:
+ extui a5, a2, 0, 4 // get CA nibble
+ ssr a5 // index into mask according to CA...
+ srl a5, a3 // ...and get CA's mask bit in a5 bit 0
+ bbsi.l a5, 0, \label // if CA indicates cache enabled, jump to label
+ srli a2, a2, 4 // next nibble
+ addi a4, a4, -1
+ bnez a4, .Lcaife\@ // loop for each nibble
+ .endm
+
+#else /* XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */
+ .macro _cacheattr_is_enabled label
+ j \label // macro not applicable, assume caches always enabled
+ .endm
+#endif /* XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */
+
+
+
+/*
+ * icacheattr_is_enabled
+ *
+ * Branches to \label if I-cache is enabled.
+ *
+ * Parameters:
+ * label where to branch to if icache is enabled
+ * Entry:
+ * (none)
+ * Exit:
+ * a2-a5, SAR clobbered (temporaries)
+ */
+ .macro icacheattr_is_enabled label
+#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ icacheattr_get
+ movi a3, XCHAL_FCA_ENAMASK
+#endif
+ _cacheattr_is_enabled \label
+ .endm
+
+/*
+ * dcacheattr_is_enabled
+ *
+ * Branches to \label if D-cache is enabled.
+ *
+ * Parameters:
+ * label where to branch to if dcache is enabled
+ * Entry:
+ * (none)
+ * Exit:
+ * a2-a5, SAR clobbered (temporaries)
+ */
+ .macro dcacheattr_is_enabled label
+#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ dcacheattr_get
+ movi a3, XCHAL_LSCA_ENAMASK
+#endif
+ _cacheattr_is_enabled \label
+ .endm
+
+/*
+ * cacheattr_is_enabled
+ *
+ * Branches to \label if either I-cache or D-cache is enabled.
+ *
+ * Parameters:
+ * label where to branch to if a cache is enabled
+ * Entry:
+ * (none)
+ * Exit:
+ * a2-a5, SAR clobbered (temporaries)
+ */
+ .macro cacheattr_is_enabled label
+#if XCHAL_HAVE_CACHEATTR
+ rsr a2, CACHEATTR
+ movi a3, XCHAL_ALLCA_ENAMASK
+#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ icacheattr_get
+ movi a3, XCHAL_FCA_ENAMASK
+ _cacheattr_is_enabled \label
+ dcacheattr_get
+ movi a3, XCHAL_LSCA_ENAMASK
+#endif
+ _cacheattr_is_enabled \label
+ .endm
+
+
+
+/*
+ * The ISA does not have a defined way to change the
+ * instruction cache attributes of the running code,
+ * ie. of the memory area that encloses the current PC.
+ * However, each micro-architecture (or class of
+ * configurations within a micro-architecture)
+ * provides a way to deal with this issue.
+ *
+ * Here are a few macros used to implement the relevant
+ * approach taken.
+ */
+
+#if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ // We have a config that "mimics" CACHEATTR using a simplified
+ // "MMU" composed of a single statically-mapped way.
+
+/*
+ * icacheattr_set
+ *
+ * Entry:
+ * a2 cacheattr value to set
+ * Exit:
+ * a2 unchanged
+ * a3-a8 clobbered (temporaries)
+ */
+ .macro icacheattr_set
+
+ movi a5, 0xE0000000 // mask of upper 3 bits
+ movi a6, 3f // PC where ITLB is set
+ movi a3, 0 // start at region 0 (0 .. 7)
+ and a6, a6, a5 // upper 3 bits of local PC area
+ mov a7, a2 // copy a2 so it doesn't get clobbered
+ j 3f
+
+# if XCHAL_HAVE_XLT_CACHEATTR
+ // Can do translations, use generic method:
+1: sub a6, a3, a5 // address of some other segment
+ ritlb1 a8, a6 // save its PPN+CA
+ dsync // interlock??
+ witlb a4, a6 // make it translate to this code area
+ movi a6, 5f // where to jump into it
+ isync
+ sub a6, a6, a5 // adjust jump address within that other segment
+ jx a6
+
+ // Note that in the following code snippet, which runs at a different virtual
+ // address than it is assembled for, we avoid using literals (eg. via movi/l32r)
+ // just in case literals end up in a different 512 MB segment, and we avoid
+ // instructions that rely on the current PC being what is expected.
+ //
+ .align 4
+ _j 6f // this is at label '5' minus 4 bytes
+ .align 4
+5: witlb a4, a3 // we're in other segment, now can write previous segment's CA
+ isync
+ add a6, a6, a5 // back to previous segment
+ addi a6, a6, -4 // next jump label
+ jx a6
+
+6: sub a6, a3, a5 // address of some other segment
+ witlb a8, a6 // restore PPN+CA of other segment
+ mov a6, a3 // restore a6
+ isync
+# else /* XCHAL_HAVE_XLT_CACHEATTR */
+ // Use micro-architecture specific method.
+ // The following 4-instruction sequence is aligned such that
+ // it all fits within a single I-cache line. Sixteen byte
+ // alignment is sufficient for this (using XCHAL_ICACHE_LINESIZE
+ // actually causes problems because that can be greater than
+ // the alignment of the reset vector, where this macro is often
+ // invoked, which would cause the linker to align the reset
+ // vector code away from the reset vector!!).
+ .align 16 /*XCHAL_ICACHE_LINESIZE*/
+1: _witlb a4, a3 // write wired PTE (CA, no PPN) of 512MB segment to ITLB
+ _isync
+ nop
+ nop
+# endif /* XCHAL_HAVE_XLT_CACHEATTR */
+ beq a3, a5, 4f // done?
+
+ // Note that in the WITLB loop, we don't do any load/stores
+ // (may not be an issue here, but it is important in the DTLB case).
+2: srli a7, a7, 4 // next CA
+ sub a3, a3, a5 // next segment (add 0x20000000)
+3:
+# if XCHAL_HAVE_XLT_CACHEATTR /* if have translation, preserve it */
+ ritlb1 a8, a3 // get current PPN+CA of segment
+ dsync // interlock???
+ extui a4, a7, 0, 4 // extract CA to set
+ srli a8, a8, 4 // clear CA but keep PPN ...
+ slli a8, a8, 4 // ...
+ add a4, a4, a8 // combine new CA with PPN to preserve
+# else
+ extui a4, a7, 0, 4 // extract CA
+# endif
+ beq a3, a6, 1b // current PC's region? if so, do it in a safe way
+ witlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to ITLB
+ bne a3, a5, 2b
+ isync // make sure all ifetch changes take effect
+4:
+ .endm // icacheattr_set
+
+
+/*
+ * dcacheattr_set
+ *
+ * Entry:
+ * a2 cacheattr value to set
+ * Exit:
+ * a2 unchanged
+ * a3-a8 clobbered (temporaries)
+ */
+
+ .macro dcacheattr_set
+
+ movi a5, 0xE0000000 // mask of upper 3 bits
+ movi a3, 0 // start at region 0 (0 .. 7)
+ mov a7, a2 // copy a2 so it doesn't get clobbered
+ j 3f
+ // Note that in the WDTLB loop, we don't do any load/stores
+ // (including implicit l32r via movi) because it isn't safe.
+2: srli a7, a7, 4 // next CA
+ sub a3, a3, a5 // next segment (add 0x20000000)
+3:
+# if XCHAL_HAVE_XLT_CACHEATTR /* if have translation, preserve it */
+ rdtlb1 a8, a3 // get current PPN+CA of segment
+ dsync // interlock???
+ extui a4, a7, 0, 4 // extract CA to set
+ srli a8, a8, 4 // clear CA but keep PPN ...
+ slli a8, a8, 4 // ...
+ add a4, a4, a8 // combine new CA with PPN to preserve
+# else
+ extui a4, a7, 0, 4 // extract CA to set
+# endif
+ wdtlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to DTLB
+ bne a3, a5, 2b
+ dsync // make sure all data path changes take effect
+ .endm // dcacheattr_set
+
+#endif /* XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR */
+
+
+
+/*
+ * cacheattr_set
+ *
+ * Macro that sets the current CACHEATTR safely
+ * (both i and d) according to the current contents of a2.
+ * It works even when changing the cache attributes of
+ * the currently running code.
+ *
+ * Entry:
+ * a2 cacheattr value to set
+ * Exit:
+ * a2 unchanged
+ * a3-a8 clobbered (temporaries)
+ */
+ .macro cacheattr_set
+
+#if XCHAL_HAVE_CACHEATTR
+# if XCHAL_ICACHE_LINESIZE < 4
+ // No i-cache, so can always safely write to CACHEATTR:
+ wsr a2, CACHEATTR
+# else
+ // The Athens micro-architecture, when using the old
+ // exception architecture option (ie. with the CACHEATTR register)
+ // allows changing the cache attributes of the running code
+ // using the following exact sequence aligned to be within
+ // an instruction cache line. (NOTE: using XCHAL_ICACHE_LINESIZE
+ // alignment actually causes problems because that can be greater
+ // than the alignment of the reset vector, where this macro is often
+ // invoked, which would cause the linker to align the reset
+ // vector code away from the reset vector!!).
+ j 1f
+ .align 16 /*XCHAL_ICACHE_LINESIZE*/ // align to within an I-cache line
+1: _wsr a2, CACHEATTR
+ _isync
+ nop
+ nop
+# endif
+#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR
+ // DTLB and ITLB are independent, but to keep semantics
+ // of this macro we simply write to both.
+ icacheattr_set
+ dcacheattr_set
+#else
+ // This macro isn't applicable to arbitrary MMU configurations.
+ // Do nothing in this case.
+#endif
+ .endm
+
+
+#endif /*XTENSA_CACHEATTRASM_H*/
+
diff --git a/include/asm-xtensa/xtensa/config-linux_be/core.h b/include/asm-xtensa/xtensa/config-linux_be/core.h
new file mode 100644
index 000000000000..d54fe5eb1064
--- /dev/null
+++ b/include/asm-xtensa/xtensa/config-linux_be/core.h
@@ -0,0 +1,1270 @@
+/*
+ * xtensa/config/core.h -- HAL definitions that are dependent on CORE configuration
+ *
+ * This header file is sometimes referred to as the "compile-time HAL" or CHAL.
+ * It was generated for a specific Xtensa processor configuration.
+ *
+ * Source for configuration-independent binaries (which link in a
+ * configuration-specific HAL library) must NEVER include this file.
+ * It is perfectly normal, however, for the HAL source itself to include this file.
+ */
+
+/*
+ * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+
+#ifndef XTENSA_CONFIG_CORE_H
+#define XTENSA_CONFIG_CORE_H
+
+#include <xtensa/hal.h>
+
+
+/*----------------------------------------------------------------------
+ GENERAL
+ ----------------------------------------------------------------------*/
+
+/*
+ * Separators for macros that expand into arrays.
+ * These can be predefined by files that #include this one,
+ * when different separators are required.
+ */
+/* Element separator for macros that expand into 1-dimensional arrays: */
+#ifndef XCHAL_SEP
+#define XCHAL_SEP ,
+#endif
+/* Array separator for macros that expand into 2-dimensional arrays: */
+#ifndef XCHAL_SEP2
+#define XCHAL_SEP2 },{
+#endif
+
+
+/*----------------------------------------------------------------------
+ ENDIANNESS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 1
+#define XCHAL_HAVE_LE 0
+#define XCHAL_MEMORY_ORDER XTHAL_BIGENDIAN
+
+
+/*----------------------------------------------------------------------
+ REGISTER WINDOWS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_WINDOWED 1 /* 1 if windowed registers option configured, 0 otherwise */
+#define XCHAL_NUM_AREGS 64 /* number of physical address regs */
+#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
+
+
+/*----------------------------------------------------------------------
+ ADDRESS ALIGNMENT
+ ----------------------------------------------------------------------*/
+
+/* These apply to a selected set of core load and store instructions only (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* 1 if unaligned loads cause an exception, 0 otherwise */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* 1 if unaligned stores cause an exception, 0 otherwise */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* 1 if interrupt option configured, 0 otherwise */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* 1 if high-priority interrupt option configured, 0 otherwise */
+#define XCHAL_HAVE_HIGHLEVEL_INTERRUPTS XCHAL_HAVE_HIGHPRI_INTERRUPTS
+#define XCHAL_HAVE_NMI 0 /* 1 if NMI option configured, 0 otherwise */
+#define XCHAL_NUM_INTERRUPTS 17 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* number of bits to hold an interrupt number: roundup(log2(number of interrupts)) */
+#define XCHAL_NUM_EXTINTERRUPTS 10 /* number of external interrupts */
+#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels (not including level zero!) */
+#define XCHAL_NUM_LOWPRI_LEVELS 1 /* number of low-priority interrupt levels (always 1) */
+#define XCHAL_FIRST_HIGHPRI_LEVEL (XCHAL_NUM_LOWPRI_LEVELS+1) /* level of first high-priority interrupt (always 2) */
+#define XCHAL_EXCM_LEVEL 1 /* level of interrupts masked by PS.EXCM (XEA2 only; always 1 in T10xx);
+ for XEA1, where there is no PS.EXCM, this is always 1;
+ interrupts at levels FIRST_HIGHPRI <= n <= EXCM_LEVEL, if any,
+ are termed "medium priority" interrupts (post T10xx only) */
+/* Note: 1 <= LOWPRI_LEVELS <= EXCM_LEVEL < DEBUGLEVEL <= NUM_INTLEVELS < NMILEVEL <= 15 */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL0_MASK 0x00000000
+#define XCHAL_INTLEVEL1_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_MASK 0x00008902
+#define XCHAL_INTLEVEL3_MASK 0x00011204
+#define XCHAL_INTLEVEL4_MASK 0x00000000
+#define XCHAL_INTLEVEL5_MASK 0x00000000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00000000
+#define XCHAL_INTLEVEL8_MASK 0x00000000
+#define XCHAL_INTLEVEL9_MASK 0x00000000
+#define XCHAL_INTLEVEL10_MASK 0x00000000
+#define XCHAL_INTLEVEL11_MASK 0x00000000
+#define XCHAL_INTLEVEL12_MASK 0x00000000
+#define XCHAL_INTLEVEL13_MASK 0x00000000
+#define XCHAL_INTLEVEL14_MASK 0x00000000
+#define XCHAL_INTLEVEL15_MASK 0x00000000
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_INTLEVEL_MASKS 0x00000000 XCHAL_SEP \
+ 0x000064F9 XCHAL_SEP \
+ 0x00008902 XCHAL_SEP \
+ 0x00011204 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000 XCHAL_SEP \
+ 0x00000000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL0_ANDBELOW_MASK 0x00000000
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x0000EDFB
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL8_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL9_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL10_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL11_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL12_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL13_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL14_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL15_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_LOWPRI_MASK XCHAL_INTLEVEL1_ANDBELOW_MASK /* mask of all low-priority interrupts */
+#define XCHAL_EXCM_MASK XCHAL_INTLEVEL1_ANDBELOW_MASK /* mask of all interrupts masked by PS.EXCM (or CEXCM) */
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_INTLEVEL_ANDBELOW_MASKS 0x00000000 XCHAL_SEP \
+ 0x000064F9 XCHAL_SEP \
+ 0x0000EDFB XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF XCHAL_SEP \
+ 0x0001FFFF
+
+/* Interrupt numbers for each interrupt level at which only one interrupt was configured: */
+/*#define XCHAL_INTLEVEL1_NUM ...more than one interrupt at this level...*/
+/*#define XCHAL_INTLEVEL2_NUM ...more than one interrupt at this level...*/
+/*#define XCHAL_INTLEVEL3_NUM ...more than one interrupt at this level...*/
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 2
+#define XCHAL_INT2_LEVEL 3
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 1
+#define XCHAL_INT11_LEVEL 2
+#define XCHAL_INT12_LEVEL 3
+#define XCHAL_INT13_LEVEL 1
+#define XCHAL_INT14_LEVEL 1
+#define XCHAL_INT15_LEVEL 2
+#define XCHAL_INT16_LEVEL 3
+#define XCHAL_INT17_LEVEL 0
+#define XCHAL_INT18_LEVEL 0
+#define XCHAL_INT19_LEVEL 0
+#define XCHAL_INT20_LEVEL 0
+#define XCHAL_INT21_LEVEL 0
+#define XCHAL_INT22_LEVEL 0
+#define XCHAL_INT23_LEVEL 0
+#define XCHAL_INT24_LEVEL 0
+#define XCHAL_INT25_LEVEL 0
+#define XCHAL_INT26_LEVEL 0
+#define XCHAL_INT27_LEVEL 0
+#define XCHAL_INT28_LEVEL 0
+#define XCHAL_INT29_LEVEL 0
+#define XCHAL_INT30_LEVEL 0
+#define XCHAL_INT31_LEVEL 0
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_INT_LEVELS 1 XCHAL_SEP \
+ 2 XCHAL_SEP \
+ 3 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 2 XCHAL_SEP \
+ 3 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 2 XCHAL_SEP \
+ 3 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 1 XCHAL_SEP \
+ 2 XCHAL_SEP \
+ 3 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0 XCHAL_SEP \
+ 0
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT22_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT23_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT24_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT25_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT26_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT27_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT28_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT29_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT30_TYPE XTHAL_INTTYPE_UNCONFIGURED
+#define XCHAL_INT31_TYPE XTHAL_INTTYPE_UNCONFIGURED
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_INT_TYPES XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_LEVEL XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \
+ XTHAL_INTTYPE_EXTERN_EDGE XCHAL_SEP \
+ XTHAL_INTTYPE_TIMER XCHAL_SEP \
+ XTHAL_INTTYPE_TIMER XCHAL_SEP \
+ XTHAL_INTTYPE_TIMER XCHAL_SEP \
+ XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
+ XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
+ XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
+ XTHAL_INTTYPE_SOFTWARE XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED XCHAL_SEP \
+ XTHAL_INTTYPE_UNCONFIGURED
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFE0000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x0001E000
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000380
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000007F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00001C00
+#define XCHAL_INTTYPE_MASK_NMI 0x00000000
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_INTTYPE_MASKS 0xFFFE0000 XCHAL_SEP \
+ 0x0001E000 XCHAL_SEP \
+ 0x00000380 XCHAL_SEP \
+ 0x0000007F XCHAL_SEP \
+ 0x00001C00 XCHAL_SEP \
+ 0x00000000
+
+/* Interrupts assigned to each timer (CCOMPARE0 to CCOMPARE3), -1 if unassigned */
+#define XCHAL_TIMER0_INTERRUPT 10
+#define XCHAL_TIMER1_INTERRUPT 11
+#define XCHAL_TIMER2_INTERRUPT 12
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+/* As an array of entries (eg. for C constant arrays): */
+#define XCHAL_TIMER_INTERRUPTS 10 XCHAL_SEP \
+ 11 XCHAL_SEP \
+ 12 XCHAL_SEP \
+ XTHAL_TIMER_UNCONFIGURED
+
+/* Indexing macros: */
+#define _XCHAL_INTLEVEL_MASK(n) XCHAL_INTLEVEL ## n ## _MASK
+#define XCHAL_INTLEVEL_MASK(n) _XCHAL_INTLEVEL_MASK(n) /* n = 0 .. 15 */
+#define _XCHAL_INTLEVEL_ANDBELOWMASK(n) XCHAL_INTLEVEL ## n ## _ANDBELOW_MASK
+#define XCHAL_INTLEVEL_ANDBELOW_MASK(n) _XCHAL_INTLEVEL_ANDBELOWMASK(n) /* n = 0 .. 15 */
+#define _XCHAL_INT_LEVEL(n) XCHAL_INT ## n ## _LEVEL
+#define XCHAL_INT_LEVEL(n) _XCHAL_INT_LEVEL(n) /* n = 0 .. 31 */
+#define _XCHAL_INT_TYPE(n) XCHAL_INT ## n ## _TYPE
+#define XCHAL_INT_TYPE(n) _XCHAL_INT_TYPE(n) /* n = 0 .. 31 */
+#define _XCHAL_TIMER_INTERRUPT(n) XCHAL_TIMER ## n ## _INTERRUPT
+#define XCHAL_TIMER_INTERRUPT(n) _XCHAL_TIMER_INTERRUPT(n) /* n = 0 .. 3 */
+
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 2) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 3) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
+#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
+#define XCHAL_EXTINT8_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT9_NUM 9 /* (intlevel 3) */
+
+/* Corresponding interrupt masks: */
+#define XCHAL_EXTINT0_MASK 0x00000001
+#define XCHAL_EXTINT1_MASK 0x00000002
+#define XCHAL_EXTINT2_MASK 0x00000004
+#define XCHAL_EXTINT3_MASK 0x00000008
+#define XCHAL_EXTINT4_MASK 0x00000010
+#define XCHAL_EXTINT5_MASK 0x00000020
+#define XCHAL_EXTINT6_MASK 0x00000040
+#define XCHAL_EXTINT7_MASK 0x00000080
+#define XCHAL_EXTINT8_MASK 0x00000100
+#define XCHAL_EXTINT9_MASK 0x00000200
+
+/* Core config interrupt levels mapped to each external interrupt: */
+#define XCHAL_EXTINT0_LEVEL 1 /* (int number 0) */
+#define XCHAL_EXTINT1_LEVEL 2 /* (int number 1) */
+#define XCHAL_EXTINT2_LEVEL 3 /* (int number 2) */
+#define XCHAL_EXTINT3_LEVEL 1 /* (int number 3) */
+#define XCHAL_EXTINT4_LEVEL 1 /* (int number 4) */
+#define XCHAL_EXTINT5_LEVEL 1 /* (int number 5) */
+#define XCHAL_EXTINT6_LEVEL 1 /* (int number 6) */
+#define XCHAL_EXTINT7_LEVEL 1 /* (int number 7) */
+#define XCHAL_EXTINT8_LEVEL 2 /* (int number 8) */
+#define XCHAL_EXTINT9_LEVEL 3 /* (int number 9) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_EXCEPTIONS 1 /* 1 if exception option configured, 0 otherwise */
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture number: 1 for XEA1 (old), 2 for XEA2 (new) */
+#define XCHAL_HAVE_XEA1 0 /* 1 if XEA1, 0 otherwise */
+#define XCHAL_HAVE_XEA2 1 /* 1 if XEA2, 0 otherwise */
+/* For backward compatibility ONLY -- DO NOT USE (will be removed in future release): */
+#define XCHAL_HAVE_OLD_EXC_ARCH XCHAL_HAVE_XEA1 /* (DEPRECATED) 1 if old exception architecture (XEA1), 0 otherwise (eg. XEA2) */
+#define XCHAL_HAVE_EXCM XCHAL_HAVE_XEA2 /* (DEPRECATED) 1 if PS.EXCM bit exists (currently equals XCHAL_HAVE_TLBS) */
+
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000020
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000020
+#define XCHAL_USER_VECTOR_VADDR 0xD0000220
+#define XCHAL_PROGRAMEXC_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR /* for backward compatibility */
+#define XCHAL_USEREXC_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR /* for backward compatibility */
+#define XCHAL_USER_VECTOR_PADDR 0x00000220
+#define XCHAL_PROGRAMEXC_VECTOR_PADDR XCHAL_USER_VECTOR_PADDR /* for backward compatibility */
+#define XCHAL_USEREXC_VECTOR_PADDR XCHAL_USER_VECTOR_PADDR /* for backward compatibility */
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000200
+#define XCHAL_STACKEDEXC_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR /* for backward compatibility */
+#define XCHAL_KERNELEXC_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR /* for backward compatibility */
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000200
+#define XCHAL_STACKEDEXC_VECTOR_PADDR XCHAL_KERNEL_VECTOR_PADDR /* for backward compatibility */
+#define XCHAL_KERNELEXC_VECTOR_PADDR XCHAL_KERNEL_VECTOR_PADDR /* for backward compatibility */
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD0000290
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x00000290
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000240
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000240
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD0000250
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x00000250
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xFE000520
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0xFE000520
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
+
+/* Indexing macros: */
+#define _XCHAL_INTLEVEL_VECTOR_VADDR(n) XCHAL_INTLEVEL ## n ## _VECTOR_VADDR
+#define XCHAL_INTLEVEL_VECTOR_VADDR(n) _XCHAL_INTLEVEL_VECTOR_VADDR(n) /* n = 0 .. 15 */
+
+/*
+ * General Exception Causes
+ * (values of EXCCAUSE special register set by general exceptions,
+ * which vector to the user, kernel, or double-exception vectors):
+ */
+#define XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION 0 /* Illegal Instruction (IllegalInstruction) */
+#define XCHAL_EXCCAUSE_SYSTEM_CALL 1 /* System Call (SystemCall) */
+#define XCHAL_EXCCAUSE_INSTRUCTION_FETCH_ERROR 2 /* Instruction Fetch Error (InstructionFetchError) */
+#define XCHAL_EXCCAUSE_LOAD_STORE_ERROR 3 /* Load Store Error (LoadStoreError) */
+#define XCHAL_EXCCAUSE_LEVEL1_INTERRUPT 4 /* Level 1 Interrupt (Level1Interrupt) */
+#define XCHAL_EXCCAUSE_ALLOCA 5 /* Stack Extension Assist (Alloca) */
+#define XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6 /* Integer Divide by Zero (IntegerDivideByZero) */
+#define XCHAL_EXCCAUSE_SPECULATION 7 /* Speculation (Speculation) */
+#define XCHAL_EXCCAUSE_PRIVILEGED 8 /* Privileged Instruction (Privileged) */
+#define XCHAL_EXCCAUSE_UNALIGNED 9 /* Unaligned Load Store (Unaligned) */
+#define XCHAL_EXCCAUSE_ITLB_MISS 16 /* ITlb Miss Exception (ITlbMiss) */
+#define XCHAL_EXCCAUSE_ITLB_MULTIHIT 17 /* ITlb Mutltihit Exception (ITlbMultihit) */
+#define XCHAL_EXCCAUSE_ITLB_PRIVILEGE 18 /* ITlb Privilege Exception (ITlbPrivilege) */
+#define XCHAL_EXCCAUSE_ITLB_SIZE_RESTRICTION 19 /* ITlb Size Restriction Exception (ITlbSizeRestriction) */
+#define XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20 /* Fetch Cache Attribute Exception (FetchCacheAttribute) */
+#define XCHAL_EXCCAUSE_DTLB_MISS 24 /* DTlb Miss Exception (DTlbMiss) */
+#define XCHAL_EXCCAUSE_DTLB_MULTIHIT 25 /* DTlb Multihit Exception (DTlbMultihit) */
+#define XCHAL_EXCCAUSE_DTLB_PRIVILEGE 26 /* DTlb Privilege Exception (DTlbPrivilege) */
+#define XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION 27 /* DTlb Size Restriction Exception (DTlbSizeRestriction) */
+#define XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28 /* Load Cache Attribute Exception (LoadCacheAttribute) */
+#define XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE 29 /* Store Cache Attribute Exception (StoreCacheAttribute) */
+#define XCHAL_EXCCAUSE_FLOATING_POINT 40 /* Floating Point Exception (FloatingPoint) */
+
+
+
+/*----------------------------------------------------------------------
+ TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_CCOUNT 1 /* 1 if have CCOUNT, 0 otherwise */
+/*#define XCHAL_HAVE_TIMERS XCHAL_HAVE_CCOUNT*/
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_DEBUG 1 /* 1 if debug option configured, 0 otherwise */
+#define XCHAL_HAVE_OCD 1 /* 1 if OnChipDebug option configured, 0 otherwise */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
+/*DebugExternalInterrupt 0 0|1*/
+/*DebugUseDIRArray 0 0|1*/
+
+
+
+
+/*----------------------------------------------------------------------
+ COPROCESSORS and EXTRA STATE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_CP 0 /* 1 if coprocessor option configured (CPENABLE present) */
+#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one (per cfg) */
+
+#include <xtensa/config/tie.h>
+
+
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instruction ROMs configured */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instruction RAMs configured */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs configured */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs configured */
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports configured */
+#define XCHAL_NUM_IROM XCHAL_NUM_INSTROM /* (DEPRECATED) */
+#define XCHAL_NUM_IRAM XCHAL_NUM_INSTRAM /* (DEPRECATED) */
+#define XCHAL_NUM_DROM XCHAL_NUM_DATAROM /* (DEPRECATED) */
+#define XCHAL_NUM_DRAM XCHAL_NUM_DATARAM /* (DEPRECATED) */
+
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+/* Size of the cache lines in log2(bytes): */
+#define XCHAL_ICACHE_LINEWIDTH 4
+#define XCHAL_DCACHE_LINEWIDTH 4
+/* Size of the cache lines in bytes: */
+#define XCHAL_ICACHE_LINESIZE 16
+#define XCHAL_DCACHE_LINESIZE 16
+/* Max for both I-cache and D-cache (used for general alignment): */
+#define XCHAL_CACHE_LINEWIDTH_MAX 4
+#define XCHAL_CACHE_LINESIZE_MAX 16
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 8
+#define XCHAL_DCACHE_SETWIDTH 8
+/* Max for both I-cache and D-cache (used for general cache-coherency page alignment): */
+#define XCHAL_CACHE_SETWIDTH_MAX 8
+#define XCHAL_CACHE_SETSIZE_MAX 256
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Size of the caches in bytes (ways * 2^(linewidth + setwidth)): */
+#define XCHAL_ICACHE_SIZE 8192
+#define XCHAL_DCACHE_SIZE 8192
+
+/* Cache features: */
+#define XCHAL_DCACHE_IS_WRITEBACK 0
+/* Whether cache locking feature is available: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 0
+#define XCHAL_DCACHE_LINE_LOCKABLE 0
+
+/* Number of (encoded) cache attribute bits: */
+#define XCHAL_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */
+/* (The number of access mode bits (decoded cache attribute bits) is defined by the architecture; see xtensa/hal.h?) */
+
+
+/* Cache Attribute encodings -- lists of access modes for each cache attribute: */
+#define XCHAL_FCA_LIST XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_BYPASS XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_BYPASS XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_CACHED XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION XCHAL_SEP \
+ XTHAL_FAM_EXCEPTION
+#define XCHAL_LCA_LIST XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_BYPASSG XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_BYPASSG XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_CACHED XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_NACACHED XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_NACACHED XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_ISOLATE XCHAL_SEP \
+ XTHAL_LAM_EXCEPTION XCHAL_SEP \
+ XTHAL_LAM_CACHED
+#define XCHAL_SCA_LIST XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_BYPASS XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_WRITETHRU XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_WRITETHRU XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_ISOLATE XCHAL_SEP \
+ XTHAL_SAM_EXCEPTION XCHAL_SEP \
+ XTHAL_SAM_WRITETHRU
+
+/* Test:
+ read/only: 0 + 1 + 2 + 4 + 5 + 6 + 8 + 9 + 10 + 12 + 14
+ read/only: 0 + 1 + 2 + 4 + 5 + 6 + 8 + 9 + 10 + 12 + 14
+ all: 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15
+ fault: 0 + 2 + 4 + 6 + 8 + 10 + 12 + 14
+ r/w/x cached:
+ r/w/x dcached:
+ I-bypass: 1 + 3
+
+ load guard bit set: 1 + 3
+ load guard bit clr: 0 + 2 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + 15
+ hit-cache r/w/x: 7 + 11
+
+ fams: 5
+ fams: 0 / 6 / 18 / 1 / 2
+ fams: Bypass / Isolate / Cached / Exception / NACached
+
+ MMU okay: yes
+*/
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/*
+ * General notes on MMU parameters.
+ *
+ * Terminology:
+ * ASID = address-space ID (acts as an "extension" of virtual addresses)
+ * VPN = virtual page number
+ * PPN = physical page number
+ * CA = encoded cache attribute (access modes)
+ * TLB = translation look-aside buffer (term is stretched somewhat here)
+ * I = instruction (fetch accesses)
+ * D = data (load and store accesses)
+ * way = each TLB (ITLB and DTLB) consists of a number of "ways"
+ * that simultaneously match the virtual address of an access;
+ * a TLB successfully translates a virtual address if exactly
+ * one way matches the vaddr; if none match, it is a miss;
+ * if multiple match, one gets a "multihit" exception;
+ * each way can be independently configured in terms of number of
+ * entries, page sizes, which fields are writable or constant, etc.
+ * set = group of contiguous ways with exactly identical parameters
+ * ARF = auto-refill; hardware services a 1st-level miss by loading a PTE
+ * from the page table and storing it in one of the auto-refill ways;
+ * if this PTE load also misses, a miss exception is posted for s/w.
+ * min-wired = a "min-wired" way can be used to map a single (minimum-sized)
+ * page arbitrarily under program control; it has a single entry,
+ * is non-auto-refill (some other way(s) must be auto-refill),
+ * all its fields (VPN, PPN, ASID, CA) are all writable, and it
+ * supports the XCHAL_MMU_MIN_PTE_PAGE_SIZE page size (a current
+ * restriction is that this be the only page size it supports).
+ *
+ * TLB way entries are virtually indexed.
+ * TLB ways that support multiple page sizes:
+ * - must have all writable VPN and PPN fields;
+ * - can only use one page size at any given time (eg. setup at startup),
+ * selected by the respective ITLBCFG or DTLBCFG special register,
+ * whose bits n*4+3 .. n*4 index the list of page sizes for way n
+ * (XCHAL_xTLB_SETm_PAGESZ_LOG2_LIST for set m corresponding to way n);
+ * this list may be sparse for auto-refill ways because auto-refill
+ * ways have independent lists of supported page sizes sharing a
+ * common encoding with PTE entries; the encoding is the index into
+ * this list; unsupported sizes for a given way are zero in the list;
+ * selecting unsupported sizes results in undefined hardware behaviour;
+ * - is only possible for ways 0 thru 7 (due to ITLBCFG/DTLBCFG definition).
+ */
+
+#define XCHAL_HAVE_CACHEATTR 0 /* 1 if CACHEATTR register present, 0 if TLBs present instead */
+#define XCHAL_HAVE_TLBS 1 /* 1 if TLBs present, 0 if CACHEATTR present instead */
+#define XCHAL_HAVE_MMU XCHAL_HAVE_TLBS /* (DEPRECATED; use XCHAL_HAVE_TLBS instead; will be removed in future release) */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* 1 if single way maps entire virtual address space in I+D */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* 1 if virtual addr == physical addr always, 0 otherwise */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* 1 if have MMU that mimics a CACHEATTR config (CaMMU) */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* 1 if have MMU that mimics a CACHEATTR config, but with translation (CaXltMMU) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs (address space IDs) */
+#define XCHAL_MMU_ASID_INVALID 0 /* ASID value indicating invalid address space */
+#define XCHAL_MMU_ASID_KERNEL 1 /* ASID value indicating kernel (ring 0) address space */
+#define XCHAL_MMU_RINGS 4 /* number of rings supported (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* number of bits needed to hold ring number */
+#define XCHAL_MMU_SR_BITS 0 /* number of size-restriction bits supported */
+#define XCHAL_MMU_CA_BITS 4 /* number of bits needed to hold cache attribute encoding */
+#define XCHAL_MMU_MAX_PTE_PAGE_SIZE 12 /* max page size in a PTE structure (log2) */
+#define XCHAL_MMU_MIN_PTE_PAGE_SIZE 12 /* min page size in a PTE structure (log2) */
+
+
+/*** Instruction TLB: ***/
+
+#define XCHAL_ITLB_WAY_BITS 3 /* number of bits holding the ways */
+#define XCHAL_ITLB_WAYS 7 /* number of ways (n-way set-associative TLB) */
+#define XCHAL_ITLB_ARF_WAYS 4 /* number of auto-refill ways */
+#define XCHAL_ITLB_SETS 4 /* number of sets (groups of ways with identical settings) */
+
+/* Way set to which each way belongs: */
+#define XCHAL_ITLB_WAY0_SET 0
+#define XCHAL_ITLB_WAY1_SET 0
+#define XCHAL_ITLB_WAY2_SET 0
+#define XCHAL_ITLB_WAY3_SET 0
+#define XCHAL_ITLB_WAY4_SET 1
+#define XCHAL_ITLB_WAY5_SET 2
+#define XCHAL_ITLB_WAY6_SET 3
+
+/* Ways sets that are used by hardware auto-refill (ARF): */
+#define XCHAL_ITLB_ARF_SETS 1 /* number of auto-refill sets */
+#define XCHAL_ITLB_ARF_SET0 0 /* index of n'th auto-refill set */
+
+/* Way sets that are "min-wired" (see terminology comment above): */
+#define XCHAL_ITLB_MINWIRED_SETS 0 /* number of "min-wired" sets */
+
+
+/* ITLB way set 0 (group of ways 0 thru 3): */
+#define XCHAL_ITLB_SET0_WAY 0 /* index of first way in this way set */
+#define XCHAL_ITLB_SET0_WAYS 4 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET0_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET0_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET0_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET0_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET0_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* ITLB way set 1 (group of ways 4 thru 4): */
+#define XCHAL_ITLB_SET1_WAY 4 /* index of first way in this way set */
+#define XCHAL_ITLB_SET1_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET1_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET1_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET1_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET1_PAGESIZES 4 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET1_PAGESZ_BITS 2 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET1_PAGESZ_LOG2_MIN 20 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET1_PAGESZ_LOG2_MAX 26 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET1_PAGESZ_LOG2_LIST 20 XCHAL_SEP 22 XCHAL_SEP 24 XCHAL_SEP 26 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET1_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET1_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET1_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET1_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET1_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* ITLB way set 2 (group of ways 5 thru 5): */
+#define XCHAL_ITLB_SET2_WAY 5 /* index of first way in this way set */
+#define XCHAL_ITLB_SET2_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET2_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET2_ENTRIES 2 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET2_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET2_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET2_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET2_PAGESZ_LOG2_MIN 27 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET2_PAGESZ_LOG2_MAX 27 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET2_PAGESZ_LOG2_LIST 27 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET2_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_VPN_CONSTMASK 0xF0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_PPN_CONSTMASK 0xF8000000 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET2_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET2_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET2_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET2_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Constant ASID values for each entry of ITLB way set 2 (because ASID_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET2_E0_ASID_CONST 0x01
+#define XCHAL_ITLB_SET2_E1_ASID_CONST 0x01
+/* Constant VPN values for each entry of ITLB way set 2 (because VPN_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET2_E0_VPN_CONST 0xD0000000
+#define XCHAL_ITLB_SET2_E1_VPN_CONST 0xD8000000
+/* Constant PPN values for each entry of ITLB way set 2 (because PPN_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET2_E0_PPN_CONST 0x00000000
+#define XCHAL_ITLB_SET2_E1_PPN_CONST 0x00000000
+/* Constant CA values for each entry of ITLB way set 2 (because CA_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET2_E0_CA_CONST 0x07
+#define XCHAL_ITLB_SET2_E1_CA_CONST 0x03
+
+/* ITLB way set 3 (group of ways 6 thru 6): */
+#define XCHAL_ITLB_SET3_WAY 6 /* index of first way in this way set */
+#define XCHAL_ITLB_SET3_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_ITLB_SET3_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
+#define XCHAL_ITLB_SET3_ENTRIES 2 /* number of entries in this way (always a power of 2) */
+#define XCHAL_ITLB_SET3_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_ITLB_SET3_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_ITLB_SET3_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_ITLB_SET3_PAGESZ_LOG2_MIN 28 /* log2(minimum supported page size) */
+#define XCHAL_ITLB_SET3_PAGESZ_LOG2_MAX 28 /* log2(maximum supported page size) */
+#define XCHAL_ITLB_SET3_PAGESZ_LOG2_LIST 28 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_ITLB_SET3_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_VPN_CONSTMASK 0xE0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_PPN_CONSTMASK 0xF0000000 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
+#define XCHAL_ITLB_SET3_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET3_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET3_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_ITLB_SET3_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Constant ASID values for each entry of ITLB way set 3 (because ASID_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET3_E0_ASID_CONST 0x01
+#define XCHAL_ITLB_SET3_E1_ASID_CONST 0x01
+/* Constant VPN values for each entry of ITLB way set 3 (because VPN_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET3_E0_VPN_CONST 0xE0000000
+#define XCHAL_ITLB_SET3_E1_VPN_CONST 0xF0000000
+/* Constant PPN values for each entry of ITLB way set 3 (because PPN_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET3_E0_PPN_CONST 0xF0000000
+#define XCHAL_ITLB_SET3_E1_PPN_CONST 0xF0000000
+/* Constant CA values for each entry of ITLB way set 3 (because CA_CONSTMASK is non-zero): */
+#define XCHAL_ITLB_SET3_E0_CA_CONST 0x07
+#define XCHAL_ITLB_SET3_E1_CA_CONST 0x03
+
+/* Indexing macros: */
+#define _XCHAL_ITLB_SET(n,_what) XCHAL_ITLB_SET ## n ## _what
+#define XCHAL_ITLB_SET(n,what) _XCHAL_ITLB_SET(n, _ ## what )
+#define _XCHAL_ITLB_SET_E(n,i,_what) XCHAL_ITLB_SET ## n ## _E ## i ## _what
+#define XCHAL_ITLB_SET_E(n,i,what) _XCHAL_ITLB_SET_E(n,i, _ ## what )
+/*
+ * Example use: XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES)
+ * to get the value of XCHAL_ITLB_SET<n>_ENTRIES where <n> is the first auto-refill set.
+ */
+
+
+/*** Data TLB: ***/
+
+#define XCHAL_DTLB_WAY_BITS 4 /* number of bits holding the ways */
+#define XCHAL_DTLB_WAYS 10 /* number of ways (n-way set-associative TLB) */
+#define XCHAL_DTLB_ARF_WAYS 4 /* number of auto-refill ways */
+#define XCHAL_DTLB_SETS 5 /* number of sets (groups of ways with identical settings) */
+
+/* Way set to which each way belongs: */
+#define XCHAL_DTLB_WAY0_SET 0
+#define XCHAL_DTLB_WAY1_SET 0
+#define XCHAL_DTLB_WAY2_SET 0
+#define XCHAL_DTLB_WAY3_SET 0
+#define XCHAL_DTLB_WAY4_SET 1
+#define XCHAL_DTLB_WAY5_SET 2
+#define XCHAL_DTLB_WAY6_SET 3
+#define XCHAL_DTLB_WAY7_SET 4
+#define XCHAL_DTLB_WAY8_SET 4
+#define XCHAL_DTLB_WAY9_SET 4
+
+/* Ways sets that are used by hardware auto-refill (ARF): */
+#define XCHAL_DTLB_ARF_SETS 1 /* number of auto-refill sets */
+#define XCHAL_DTLB_ARF_SET0 0 /* index of n'th auto-refill set */
+
+/* Way sets that are "min-wired" (see terminology comment above): */
+#define XCHAL_DTLB_MINWIRED_SETS 1 /* number of "min-wired" sets */
+#define XCHAL_DTLB_MINWIRED_SET0 4 /* index of n'th "min-wired" set */
+
+
+/* DTLB way set 0 (group of ways 0 thru 3): */
+#define XCHAL_DTLB_SET0_WAY 0 /* index of first way in this way set */
+#define XCHAL_DTLB_SET0_WAYS 4 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET0_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET0_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET0_ARF 1 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET0_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET0_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET0_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET0_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET0_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET0_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 1 (group of ways 4 thru 4): */
+#define XCHAL_DTLB_SET1_WAY 4 /* index of first way in this way set */
+#define XCHAL_DTLB_SET1_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET1_ENTRIES_LOG2 2 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET1_ENTRIES 4 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET1_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET1_PAGESIZES 4 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET1_PAGESZ_BITS 2 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET1_PAGESZ_LOG2_MIN 20 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET1_PAGESZ_LOG2_MAX 26 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET1_PAGESZ_LOG2_LIST 20 XCHAL_SEP 22 XCHAL_SEP 24 XCHAL_SEP 26 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET1_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET1_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET1_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET1_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET1_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* DTLB way set 2 (group of ways 5 thru 5): */
+#define XCHAL_DTLB_SET2_WAY 5 /* index of first way in this way set */
+#define XCHAL_DTLB_SET2_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET2_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET2_ENTRIES 2 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET2_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET2_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET2_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET2_PAGESZ_LOG2_MIN 27 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET2_PAGESZ_LOG2_MAX 27 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET2_PAGESZ_LOG2_LIST 27 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET2_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_VPN_CONSTMASK 0xF0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_PPN_CONSTMASK 0xF8000000 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET2_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET2_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET2_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET2_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Constant ASID values for each entry of DTLB way set 2 (because ASID_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET2_E0_ASID_CONST 0x01
+#define XCHAL_DTLB_SET2_E1_ASID_CONST 0x01
+/* Constant VPN values for each entry of DTLB way set 2 (because VPN_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET2_E0_VPN_CONST 0xD0000000
+#define XCHAL_DTLB_SET2_E1_VPN_CONST 0xD8000000
+/* Constant PPN values for each entry of DTLB way set 2 (because PPN_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET2_E0_PPN_CONST 0x00000000
+#define XCHAL_DTLB_SET2_E1_PPN_CONST 0x00000000
+/* Constant CA values for each entry of DTLB way set 2 (because CA_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET2_E0_CA_CONST 0x07
+#define XCHAL_DTLB_SET2_E1_CA_CONST 0x03
+
+/* DTLB way set 3 (group of ways 6 thru 6): */
+#define XCHAL_DTLB_SET3_WAY 6 /* index of first way in this way set */
+#define XCHAL_DTLB_SET3_WAYS 1 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET3_ENTRIES_LOG2 1 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET3_ENTRIES 2 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET3_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET3_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET3_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET3_PAGESZ_LOG2_MIN 28 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET3_PAGESZ_LOG2_MAX 28 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET3_PAGESZ_LOG2_LIST 28 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET3_ASID_CONSTMASK 0xFF /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_VPN_CONSTMASK 0xE0000000 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_PPN_CONSTMASK 0xF0000000 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_CA_CONSTMASK 0x0000000F /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET3_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET3_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET3_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET3_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+/* Constant ASID values for each entry of DTLB way set 3 (because ASID_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET3_E0_ASID_CONST 0x01
+#define XCHAL_DTLB_SET3_E1_ASID_CONST 0x01
+/* Constant VPN values for each entry of DTLB way set 3 (because VPN_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET3_E0_VPN_CONST 0xE0000000
+#define XCHAL_DTLB_SET3_E1_VPN_CONST 0xF0000000
+/* Constant PPN values for each entry of DTLB way set 3 (because PPN_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET3_E0_PPN_CONST 0xF0000000
+#define XCHAL_DTLB_SET3_E1_PPN_CONST 0xF0000000
+/* Constant CA values for each entry of DTLB way set 3 (because CA_CONSTMASK is non-zero): */
+#define XCHAL_DTLB_SET3_E0_CA_CONST 0x07
+#define XCHAL_DTLB_SET3_E1_CA_CONST 0x03
+
+/* DTLB way set 4 (group of ways 7 thru 9): */
+#define XCHAL_DTLB_SET4_WAY 7 /* index of first way in this way set */
+#define XCHAL_DTLB_SET4_WAYS 3 /* number of (contiguous) ways in this way set */
+#define XCHAL_DTLB_SET4_ENTRIES_LOG2 0 /* log2(number of entries in this way) */
+#define XCHAL_DTLB_SET4_ENTRIES 1 /* number of entries in this way (always a power of 2) */
+#define XCHAL_DTLB_SET4_ARF 0 /* 1=autorefill by h/w, 0=non-autorefill (wired/constant/static) */
+#define XCHAL_DTLB_SET4_PAGESIZES 1 /* number of supported page sizes in this way */
+#define XCHAL_DTLB_SET4_PAGESZ_BITS 0 /* number of bits to encode the page size */
+#define XCHAL_DTLB_SET4_PAGESZ_LOG2_MIN 12 /* log2(minimum supported page size) */
+#define XCHAL_DTLB_SET4_PAGESZ_LOG2_MAX 12 /* log2(maximum supported page size) */
+#define XCHAL_DTLB_SET4_PAGESZ_LOG2_LIST 12 /* list of log2(page size)s, separated by XCHAL_SEP;
+ 2^PAGESZ_BITS entries in list, unsupported entries are zero */
+#define XCHAL_DTLB_SET4_ASID_CONSTMASK 0 /* constant ASID bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_VPN_CONSTMASK 0 /* constant VPN bits, not including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_PPN_CONSTMASK 0 /* constant PPN bits, including entry index bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_CA_CONSTMASK 0 /* constant CA bits; 0 if all writable */
+#define XCHAL_DTLB_SET4_ASID_RESET 0 /* 1 if ASID reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET4_VPN_RESET 0 /* 1 if VPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET4_PPN_RESET 0 /* 1 if PPN reset values defined (and all writable); 0 otherwise */
+#define XCHAL_DTLB_SET4_CA_RESET 0 /* 1 if CA reset values defined (and all writable); 0 otherwise */
+
+/* Indexing macros: */
+#define _XCHAL_DTLB_SET(n,_what) XCHAL_DTLB_SET ## n ## _what
+#define XCHAL_DTLB_SET(n,what) _XCHAL_DTLB_SET(n, _ ## what )
+#define _XCHAL_DTLB_SET_E(n,i,_what) XCHAL_DTLB_SET ## n ## _E ## i ## _what
+#define XCHAL_DTLB_SET_E(n,i,what) _XCHAL_DTLB_SET_E(n,i, _ ## what )
+/*
+ * Example use: XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES)
+ * to get the value of XCHAL_DTLB_SET<n>_ENTRIES where <n> is the first auto-refill set.
+ */
+
+
+/*
+ * Determine whether we have a full MMU (with Page Table and Protection)
+ * usable for an MMU-based OS:
+ */
+#if XCHAL_HAVE_TLBS && !XCHAL_HAVE_SPANNING_WAY && XCHAL_ITLB_ARF_WAYS > 0 && XCHAL_DTLB_ARF_WAYS > 0 && XCHAL_MMU_RINGS >= 2
+# define XCHAL_HAVE_PTP_MMU 1 /* have full MMU (with page table [autorefill] and protection) */
+#else
+# define XCHAL_HAVE_PTP_MMU 0 /* don't have full MMU */
+#endif
+
+/*
+ * For full MMUs, report kernel RAM segment and kernel I/O segment static page mappings:
+ */
+#if XCHAL_HAVE_PTP_MMU
+#define XCHAL_KSEG_CACHED_VADDR 0xD0000000 /* virt.addr of kernel RAM cached static map */
+#define XCHAL_KSEG_CACHED_PADDR 0x00000000 /* phys.addr of kseg_cached */
+#define XCHAL_KSEG_CACHED_SIZE 0x08000000 /* size in bytes of kseg_cached (assumed power of 2!!!) */
+#define XCHAL_KSEG_BYPASS_VADDR 0xD8000000 /* virt.addr of kernel RAM bypass (uncached) static map */
+#define XCHAL_KSEG_BYPASS_PADDR 0x00000000 /* phys.addr of kseg_bypass */
+#define XCHAL_KSEG_BYPASS_SIZE 0x08000000 /* size in bytes of kseg_bypass (assumed power of 2!!!) */
+
+#define XCHAL_KIO_CACHED_VADDR 0xE0000000 /* virt.addr of kernel I/O cached static map */
+#define XCHAL_KIO_CACHED_PADDR 0xF0000000 /* phys.addr of kio_cached */
+#define XCHAL_KIO_CACHED_SIZE 0x10000000 /* size in bytes of kio_cached (assumed power of 2!!!) */
+#define XCHAL_KIO_BYPASS_VADDR 0xF0000000 /* virt.addr of kernel I/O bypass (uncached) static map */
+#define XCHAL_KIO_BYPASS_PADDR 0xF0000000 /* phys.addr of kio_bypass */
+#define XCHAL_KIO_BYPASS_SIZE 0x10000000 /* size in bytes of kio_bypass (assumed power of 2!!!) */
+
+#define XCHAL_SEG_MAPPABLE_VADDR 0x00000000 /* start of largest non-static-mapped virtual addr area */
+#define XCHAL_SEG_MAPPABLE_SIZE 0xD0000000 /* size in bytes of " */
+/* define XCHAL_SEG_MAPPABLE2_xxx if more areas present, sorted in order of descending size. */
+#endif
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* number of write buffer entries */
+
+#define XCHAL_CORE_ID "linux_be" /* configuration's alphanumeric core identifier
+ (CoreID) set in the Xtensa Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00003256 /* software build-unique ID (22-bit) */
+
+/* These definitions describe the hardware targeted by this software: */
+#define XCHAL_HW_CONFIGID0 0xC103D1FF /* config ID reg 0 value (upper 32 of 64 bits) */
+#define XCHAL_HW_CONFIGID1 0x00803256 /* config ID reg 1 value (lower 32 of 64 bits) */
+#define XCHAL_CONFIGID0 XCHAL_HW_CONFIGID0 /* for backward compatibility only -- don't use! */
+#define XCHAL_CONFIGID1 XCHAL_HW_CONFIGID1 /* for backward compatibility only -- don't use! */
+#define XCHAL_HW_RELEASE_MAJOR 1050 /* major release of targeted hardware */
+#define XCHAL_HW_RELEASE_MINOR 1 /* minor release of targeted hardware */
+#define XCHAL_HW_RELEASE_NAME "T1050.1" /* full release name of targeted hardware */
+#define XTHAL_HW_REL_T1050 1
+#define XTHAL_HW_REL_T1050_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+
+
+/*
+ * Miscellaneous special register fields:
+ */
+
+
+/* DBREAKC (special register number 160): */
+#define XCHAL_DBREAKC_VALIDMASK 0xC000003F /* bits of DBREAKC that are defined */
+/* MASK field: */
+#define XCHAL_DBREAKC_MASK_BITS 6 /* number of bits in MASK field */
+#define XCHAL_DBREAKC_MASK_NUM 64 /* max number of possible causes (2^bits) */
+#define XCHAL_DBREAKC_MASK_SHIFT 0 /* position of MASK bits in DBREAKC, starting from lsbit */
+#define XCHAL_DBREAKC_MASK_MASK 0x0000003F /* mask of bits in MASK field of DBREAKC */
+/* LOADBREAK field: */
+#define XCHAL_DBREAKC_LOADBREAK_BITS 1 /* number of bits in LOADBREAK field */
+#define XCHAL_DBREAKC_LOADBREAK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DBREAKC_LOADBREAK_SHIFT 30 /* position of LOADBREAK bits in DBREAKC, starting from lsbit */
+#define XCHAL_DBREAKC_LOADBREAK_MASK 0x40000000 /* mask of bits in LOADBREAK field of DBREAKC */
+/* STOREBREAK field: */
+#define XCHAL_DBREAKC_STOREBREAK_BITS 1 /* number of bits in STOREBREAK field */
+#define XCHAL_DBREAKC_STOREBREAK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DBREAKC_STOREBREAK_SHIFT 31 /* position of STOREBREAK bits in DBREAKC, starting from lsbit */
+#define XCHAL_DBREAKC_STOREBREAK_MASK 0x80000000 /* mask of bits in STOREBREAK field of DBREAKC */
+
+/* PS (special register number 230): */
+#define XCHAL_PS_VALIDMASK 0x00070FFF /* bits of PS that are defined */
+/* INTLEVEL field: */
+#define XCHAL_PS_INTLEVEL_BITS 4 /* number of bits in INTLEVEL field */
+#define XCHAL_PS_INTLEVEL_NUM 16 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_INTLEVEL_SHIFT 0 /* position of INTLEVEL bits in PS, starting from lsbit */
+#define XCHAL_PS_INTLEVEL_MASK 0x0000000F /* mask of bits in INTLEVEL field of PS */
+/* EXCM field: */
+#define XCHAL_PS_EXCM_BITS 1 /* number of bits in EXCM field */
+#define XCHAL_PS_EXCM_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_EXCM_SHIFT 4 /* position of EXCM bits in PS, starting from lsbit */
+#define XCHAL_PS_EXCM_MASK 0x00000010 /* mask of bits in EXCM field of PS */
+/* PROGSTACK field: */
+#define XCHAL_PS_PROGSTACK_BITS 1 /* number of bits in PROGSTACK field */
+#define XCHAL_PS_PROGSTACK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_PROGSTACK_SHIFT 5 /* position of PROGSTACK bits in PS, starting from lsbit */
+#define XCHAL_PS_PROGSTACK_MASK 0x00000020 /* mask of bits in PROGSTACK field of PS */
+/* RING field: */
+#define XCHAL_PS_RING_BITS 2 /* number of bits in RING field */
+#define XCHAL_PS_RING_NUM 4 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_RING_SHIFT 6 /* position of RING bits in PS, starting from lsbit */
+#define XCHAL_PS_RING_MASK 0x000000C0 /* mask of bits in RING field of PS */
+/* OWB field: */
+#define XCHAL_PS_OWB_BITS 4 /* number of bits in OWB field */
+#define XCHAL_PS_OWB_NUM 16 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_OWB_SHIFT 8 /* position of OWB bits in PS, starting from lsbit */
+#define XCHAL_PS_OWB_MASK 0x00000F00 /* mask of bits in OWB field of PS */
+/* CALLINC field: */
+#define XCHAL_PS_CALLINC_BITS 2 /* number of bits in CALLINC field */
+#define XCHAL_PS_CALLINC_NUM 4 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_CALLINC_SHIFT 16 /* position of CALLINC bits in PS, starting from lsbit */
+#define XCHAL_PS_CALLINC_MASK 0x00030000 /* mask of bits in CALLINC field of PS */
+/* WOE field: */
+#define XCHAL_PS_WOE_BITS 1 /* number of bits in WOE field */
+#define XCHAL_PS_WOE_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_PS_WOE_SHIFT 18 /* position of WOE bits in PS, starting from lsbit */
+#define XCHAL_PS_WOE_MASK 0x00040000 /* mask of bits in WOE field of PS */
+
+/* EXCCAUSE (special register number 232): */
+#define XCHAL_EXCCAUSE_VALIDMASK 0x0000003F /* bits of EXCCAUSE that are defined */
+/* EXCCAUSE field: */
+#define XCHAL_EXCCAUSE_BITS 6 /* number of bits in EXCCAUSE register */
+#define XCHAL_EXCCAUSE_NUM 64 /* max number of possible causes (2^bits) */
+#define XCHAL_EXCCAUSE_SHIFT 0 /* position of EXCCAUSE bits in register, starting from lsbit */
+#define XCHAL_EXCCAUSE_MASK 0x0000003F /* mask of bits in EXCCAUSE register */
+
+/* DEBUGCAUSE (special register number 233): */
+#define XCHAL_DEBUGCAUSE_VALIDMASK 0x0000003F /* bits of DEBUGCAUSE that are defined */
+/* ICOUNT field: */
+#define XCHAL_DEBUGCAUSE_ICOUNT_BITS 1 /* number of bits in ICOUNT field */
+#define XCHAL_DEBUGCAUSE_ICOUNT_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_ICOUNT_SHIFT 0 /* position of ICOUNT bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_ICOUNT_MASK 0x00000001 /* mask of bits in ICOUNT field of DEBUGCAUSE */
+/* IBREAK field: */
+#define XCHAL_DEBUGCAUSE_IBREAK_BITS 1 /* number of bits in IBREAK field */
+#define XCHAL_DEBUGCAUSE_IBREAK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_IBREAK_SHIFT 1 /* position of IBREAK bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_IBREAK_MASK 0x00000002 /* mask of bits in IBREAK field of DEBUGCAUSE */
+/* DBREAK field: */
+#define XCHAL_DEBUGCAUSE_DBREAK_BITS 1 /* number of bits in DBREAK field */
+#define XCHAL_DEBUGCAUSE_DBREAK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_DBREAK_SHIFT 2 /* position of DBREAK bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_DBREAK_MASK 0x00000004 /* mask of bits in DBREAK field of DEBUGCAUSE */
+/* BREAK field: */
+#define XCHAL_DEBUGCAUSE_BREAK_BITS 1 /* number of bits in BREAK field */
+#define XCHAL_DEBUGCAUSE_BREAK_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_BREAK_SHIFT 3 /* position of BREAK bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_BREAK_MASK 0x00000008 /* mask of bits in BREAK field of DEBUGCAUSE */
+/* BREAKN field: */
+#define XCHAL_DEBUGCAUSE_BREAKN_BITS 1 /* number of bits in BREAKN field */
+#define XCHAL_DEBUGCAUSE_BREAKN_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_BREAKN_SHIFT 4 /* position of BREAKN bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_BREAKN_MASK 0x00000010 /* mask of bits in BREAKN field of DEBUGCAUSE */
+/* DEBUGINT field: */
+#define XCHAL_DEBUGCAUSE_DEBUGINT_BITS 1 /* number of bits in DEBUGINT field */
+#define XCHAL_DEBUGCAUSE_DEBUGINT_NUM 2 /* max number of possible causes (2^bits) */
+#define XCHAL_DEBUGCAUSE_DEBUGINT_SHIFT 5 /* position of DEBUGINT bits in DEBUGCAUSE, starting from lsbit */
+#define XCHAL_DEBUGCAUSE_DEBUGINT_MASK 0x00000020 /* mask of bits in DEBUGINT field of DEBUGCAUSE */
+
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_DENSITY 1 /* 1 if density option configured, 0 otherwise */
+#define XCHAL_HAVE_LOOPS 1 /* 1 if zero-overhead loops option configured, 0 otherwise */
+/* Misc instructions: */
+#define XCHAL_HAVE_NSA 0 /* 1 if NSA/NSAU instructions option configured, 0 otherwise */
+#define XCHAL_HAVE_MINMAX 0 /* 1 if MIN/MAX instructions option configured, 0 otherwise */
+#define XCHAL_HAVE_SEXT 0 /* 1 if sign-extend instruction option configured, 0 otherwise */
+#define XCHAL_HAVE_CLAMPS 0 /* 1 if CLAMPS instruction option configured, 0 otherwise */
+#define XCHAL_HAVE_MAC16 0 /* 1 if MAC16 option configured, 0 otherwise */
+#define XCHAL_HAVE_MUL16 0 /* 1 if 16-bit integer multiply option configured, 0 otherwise */
+/*#define XCHAL_HAVE_POPC 0*/ /* 1 if CRC instruction option configured, 0 otherwise */
+/*#define XCHAL_HAVE_CRC 0*/ /* 1 if POPC instruction option configured, 0 otherwise */
+
+#define XCHAL_HAVE_SPECULATION 0 /* 1 if speculation option configured, 0 otherwise */
+/*#define XCHAL_HAVE_MP_SYNC 0*/ /* 1 if multiprocessor sync. option configured, 0 otherwise */
+#define XCHAL_HAVE_PRID 0 /* 1 if processor ID register configured, 0 otherwise */
+
+#define XCHAL_NUM_MISC_REGS 2 /* number of miscellaneous registers (0..4) */
+
+/* These relate a bit more to TIE: */
+#define XCHAL_HAVE_BOOLEANS 0 /* 1 if booleans option configured, 0 otherwise */
+#define XCHAL_HAVE_MUL32 0 /* 1 if 32-bit integer multiply option configured, 0 otherwise */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* 1 if MUL32 option includes MULUH and MULSH, 0 otherwise */
+#define XCHAL_HAVE_FP 0 /* 1 if floating point option configured, 0 otherwise */
+
+
+/*----------------------------------------------------------------------
+ DERIVED
+ ----------------------------------------------------------------------*/
+
+#if XCHAL_HAVE_BE
+#define XCHAL_INST_ILLN 0xD60F /* 2-byte illegal instruction, msb-first */
+#define XCHAL_INST_ILLN_BYTE0 0xD6 /* 2-byte illegal instruction, 1st byte */
+#define XCHAL_INST_ILLN_BYTE1 0x0F /* 2-byte illegal instruction, 2nd byte */
+#else
+#define XCHAL_INST_ILLN 0xF06D /* 2-byte illegal instruction, lsb-first */
+#define XCHAL_INST_ILLN_BYTE0 0x6D /* 2-byte illegal instruction, 1st byte */
+#define XCHAL_INST_ILLN_BYTE1 0xF0 /* 2-byte illegal instruction, 2nd byte */
+#endif
+/* Belongs in xtensa/hal.h: */
+#define XTHAL_INST_ILL 0x000000 /* 3-byte illegal instruction */
+
+
+/*
+ * Because information as to exactly which hardware release is targeted
+ * by a given software build is not always available, compile-time HAL
+ * Hardware-Release "_AT" macros are fuzzy (return 0, 1, or XCHAL_MAYBE):
+ */
+#ifndef XCHAL_HW_RELEASE_MAJOR
+# define XCHAL_HW_CONFIGID_RELIABLE 0
+#endif
+#if XCHAL_HW_CONFIGID_RELIABLE
+# define XCHAL_HW_RELEASE_AT_OR_BELOW(major,minor) (XTHAL_REL_LE( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0)
+# define XCHAL_HW_RELEASE_AT_OR_ABOVE(major,minor) (XTHAL_REL_GE( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0)
+# define XCHAL_HW_RELEASE_AT(major,minor) (XTHAL_REL_EQ( XCHAL_HW_RELEASE_MAJOR,XCHAL_HW_RELEASE_MINOR, major,minor ) ? 1 : 0)
+# define XCHAL_HW_RELEASE_MAJOR_AT(major) ((XCHAL_HW_RELEASE_MAJOR == (major)) ? 1 : 0)
+#else
+# define XCHAL_HW_RELEASE_AT_OR_BELOW(major,minor) ( ((major) < 1040 && XCHAL_HAVE_XEA2) ? 0 \
+ : ((major) > 1050 && XCHAL_HAVE_XEA1) ? 1 \
+ : XTHAL_MAYBE )
+# define XCHAL_HW_RELEASE_AT_OR_ABOVE(major,minor) ( ((major) >= 2000 && XCHAL_HAVE_XEA1) ? 0 \
+ : (XTHAL_REL_LE(major,minor, 1040,0) && XCHAL_HAVE_XEA2) ? 1 \
+ : XTHAL_MAYBE )
+# define XCHAL_HW_RELEASE_AT(major,minor) ( (((major) < 1040 && XCHAL_HAVE_XEA2) || \
+ ((major) >= 2000 && XCHAL_HAVE_XEA1)) ? 0 : XTHAL_MAYBE)
+# define XCHAL_HW_RELEASE_MAJOR_AT(major) XCHAL_HW_RELEASE_AT(major,0)
+#endif
+
+/*
+ * Specific errata:
+ */
+
+/*
+ * Erratum T1020.H13, T1030.H7, T1040.H10, T1050.H4 (fixed in T1040.3 and T1050.1;
+ * relevant only in XEA1, kernel-vector mode, level-one interrupts and overflows enabled):
+ */
+#define XCHAL_MAYHAVE_ERRATUM_XEA1KWIN (XCHAL_HAVE_XEA1 && \
+ (XCHAL_HW_RELEASE_AT_OR_BELOW(1040,2) != 0 \
+ || XCHAL_HW_RELEASE_AT(1050,0)))
+
+
+
+#endif /*XTENSA_CONFIG_CORE_H*/
+
diff --git a/include/asm-xtensa/xtensa/config-linux_be/defs.h b/include/asm-xtensa/xtensa/config-linux_be/defs.h
new file mode 100644
index 000000000000..f7c58b273371
--- /dev/null
+++ b/include/asm-xtensa/xtensa/config-linux_be/defs.h
@@ -0,0 +1,270 @@
+/* Definitions for Xtensa instructions, types, and protos. */
+
+/*
+ * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+/* Do not modify. This is automatically generated.*/
+
+#ifndef _XTENSA_BASE_HEADER
+#define _XTENSA_BASE_HEADER
+
+#ifdef __XTENSA__
+#if defined(__GNUC__) && !defined(__XCC__)
+
+#define L8UI_ASM(arr, ars, imm) { \
+ __asm__ volatile("l8ui %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
+}
+
+#define XT_L8UI(ars, imm) \
+({ \
+ unsigned char _arr; \
+ const unsigned char *_ars = ars; \
+ L8UI_ASM(_arr, _ars, imm); \
+ _arr; \
+})
+
+#define L16UI_ASM(arr, ars, imm) { \
+ __asm__ volatile("l16ui %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
+}
+
+#define XT_L16UI(ars, imm) \
+({ \
+ unsigned short _arr; \
+ const unsigned short *_ars = ars; \
+ L16UI_ASM(_arr, _ars, imm); \
+ _arr; \
+})
+
+#define L16SI_ASM(arr, ars, imm) {\
+ __asm__ volatile("l16si %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
+}
+
+#define XT_L16SI(ars, imm) \
+({ \
+ signed short _arr; \
+ const signed short *_ars = ars; \
+ L16SI_ASM(_arr, _ars, imm); \
+ _arr; \
+})
+
+#define L32I_ASM(arr, ars, imm) { \
+ __asm__ volatile("l32i %0, %1, %2" : "=a" (arr) : "a" (ars) , "i" (imm)); \
+}
+
+#define XT_L32I(ars, imm) \
+({ \
+ unsigned _arr; \
+ const unsigned *_ars = ars; \
+ L32I_ASM(_arr, _ars, imm); \
+ _arr; \
+})
+
+#define S8I_ASM(arr, ars, imm) {\
+ __asm__ volatile("s8i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \
+}
+
+#define XT_S8I(arr, ars, imm) \
+({ \
+ signed char _arr = arr; \
+ const signed char *_ars = ars; \
+ S8I_ASM(_arr, _ars, imm); \
+})
+
+#define S16I_ASM(arr, ars, imm) {\
+ __asm__ volatile("s16i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \
+}
+
+#define XT_S16I(arr, ars, imm) \
+({ \
+ signed short _arr = arr; \
+ const signed short *_ars = ars; \
+ S16I_ASM(_arr, _ars, imm); \
+})
+
+#define S32I_ASM(arr, ars, imm) { \
+ __asm__ volatile("s32i %0, %1, %2" : : "a" (arr), "a" (ars) , "i" (imm) : "memory" ); \
+}
+
+#define XT_S32I(arr, ars, imm) \
+({ \
+ signed int _arr = arr; \
+ const signed int *_ars = ars; \
+ S32I_ASM(_arr, _ars, imm); \
+})
+
+#define ADDI_ASM(art, ars, imm) {\
+ __asm__ ("addi %0, %1, %2" : "=a" (art) : "a" (ars), "i" (imm)); \
+}
+
+#define XT_ADDI(ars, imm) \
+({ \
+ unsigned _art; \
+ unsigned _ars = ars; \
+ ADDI_ASM(_art, _ars, imm); \
+ _art; \
+})
+
+#define ABS_ASM(arr, art) {\
+ __asm__ ("abs %0, %1" : "=a" (arr) : "a" (art)); \
+}
+
+#define XT_ABS(art) \
+({ \
+ unsigned _arr; \
+ signed _art = art; \
+ ABS_ASM(_arr, _art); \
+ _arr; \
+})
+
+/* Note: In the following macros that reference SAR, the magic "state"
+ register is used to capture the dependency on SAR. This is because
+ SAR is a 5-bit register and thus there are no C types that can be
+ used to represent it. It doesn't appear that the SAR register is
+ even relevant to GCC, but it is marked as "clobbered" just in
+ case. */
+
+#define SRC_ASM(arr, ars, art) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("src %0, %1, %2" \
+ : "=a" (arr) : "a" (ars), "a" (art), "t" (_xt_sar)); \
+}
+
+#define XT_SRC(ars, art) \
+({ \
+ unsigned _arr; \
+ unsigned _ars = ars; \
+ unsigned _art = art; \
+ SRC_ASM(_arr, _ars, _art); \
+ _arr; \
+})
+
+#define SSR_ASM(ars) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("ssr %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
+}
+
+#define XT_SSR(ars) \
+({ \
+ unsigned _ars = ars; \
+ SSR_ASM(_ars); \
+})
+
+#define SSL_ASM(ars) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("ssl %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
+}
+
+#define XT_SSL(ars) \
+({ \
+ unsigned _ars = ars; \
+ SSL_ASM(_ars); \
+})
+
+#define SSA8B_ASM(ars) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("ssa8b %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
+}
+
+#define XT_SSA8B(ars) \
+({ \
+ unsigned _ars = ars; \
+ SSA8B_ASM(_ars); \
+})
+
+#define SSA8L_ASM(ars) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("ssa8l %1" : "=t" (_xt_sar) : "a" (ars) : "sar"); \
+}
+
+#define XT_SSA8L(ars) \
+({ \
+ unsigned _ars = ars; \
+ SSA8L_ASM(_ars); \
+})
+
+#define SSAI_ASM(imm) {\
+ register int _xt_sar __asm__ ("state"); \
+ __asm__ ("ssai %1" : "=t" (_xt_sar) : "i" (imm) : "sar"); \
+}
+
+#define XT_SSAI(imm) \
+({ \
+ SSAI_ASM(imm); \
+})
+
+
+
+
+
+
+
+
+#endif /* __GNUC__ && !__XCC__ */
+
+#ifdef __XCC__
+
+/* Core load/store instructions */
+extern unsigned char _TIE_L8UI(const unsigned char * ars, immediate imm);
+extern unsigned short _TIE_L16UI(const unsigned short * ars, immediate imm);
+extern signed short _TIE_L16SI(const signed short * ars, immediate imm);
+extern unsigned _TIE_L32I(const unsigned * ars, immediate imm);
+extern void _TIE_S8I(unsigned char arr, unsigned char * ars, immediate imm);
+extern void _TIE_S16I(unsigned short arr, unsigned short * ars, immediate imm);
+extern void _TIE_S32I(unsigned arr, unsigned * ars, immediate imm);
+
+#define XT_L8UI _TIE_L8UI
+#define XT_L16UI _TIE_L16UI
+#define XT_L16SI _TIE_L16SI
+#define XT_L32I _TIE_L32I
+#define XT_S8I _TIE_S8I
+#define XT_S16I _TIE_S16I
+#define XT_S32I _TIE_S32I
+
+/* Add-immediate instruction */
+extern unsigned _TIE_ADDI(unsigned ars, immediate imm);
+#define XT_ADDI _TIE_ADDI
+
+/* Absolute value instruction */
+extern unsigned _TIE_ABS(int art);
+#define XT_ABS _TIE_ABS
+
+/* funnel shift instructions */
+extern unsigned _TIE_SRC(unsigned ars, unsigned art);
+#define XT_SRC _TIE_SRC
+extern void _TIE_SSR(unsigned ars);
+#define XT_SSR _TIE_SSR
+extern void _TIE_SSL(unsigned ars);
+#define XT_SSL _TIE_SSL
+extern void _TIE_SSA8B(unsigned ars);
+#define XT_SSA8B _TIE_SSA8B
+extern void _TIE_SSA8L(unsigned ars);
+#define XT_SSA8L _TIE_SSA8L
+extern void _TIE_SSAI(immediate imm);
+#define XT_SSAI _TIE_SSAI
+
+
+#endif /* __XCC__ */
+
+#endif /* __XTENSA__ */
+#endif /* !_XTENSA_BASE_HEADER */
diff --git a/include/asm-xtensa/xtensa/config-linux_be/specreg.h b/include/asm-xtensa/xtensa/config-linux_be/specreg.h
new file mode 100644
index 000000000000..fa4106aa9a02
--- /dev/null
+++ b/include/asm-xtensa/xtensa/config-linux_be/specreg.h
@@ -0,0 +1,99 @@
+/*
+ * Xtensa Special Register symbolic names
+ */
+
+/* $Id: specreg.h,v 1.2 2003/03/07 19:15:18 joetaylor Exp $ */
+
+/*
+ * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+#ifndef XTENSA_SPECREG_H
+#define XTENSA_SPECREG_H
+
+/* Include these special register bitfield definitions, for historical reasons: */
+#include <xtensa/corebits.h>
+
+
+/* Special registers: */
+#define LBEG 0
+#define LEND 1
+#define LCOUNT 2
+#define SAR 3
+#define WINDOWBASE 72
+#define WINDOWSTART 73
+#define PTEVADDR 83
+#define RASID 90
+#define ITLBCFG 91
+#define DTLBCFG 92
+#define IBREAKENABLE 96
+#define DDR 104
+#define IBREAKA_0 128
+#define IBREAKA_1 129
+#define DBREAKA_0 144
+#define DBREAKA_1 145
+#define DBREAKC_0 160
+#define DBREAKC_1 161
+#define EPC_1 177
+#define EPC_2 178
+#define EPC_3 179
+#define EPC_4 180
+#define DEPC 192
+#define EPS_2 194
+#define EPS_3 195
+#define EPS_4 196
+#define EXCSAVE_1 209
+#define EXCSAVE_2 210
+#define EXCSAVE_3 211
+#define EXCSAVE_4 212
+#define INTERRUPT 226
+#define INTENABLE 228
+#define PS 230
+#define EXCCAUSE 232
+#define DEBUGCAUSE 233
+#define CCOUNT 234
+#define ICOUNT 236
+#define ICOUNTLEVEL 237
+#define EXCVADDR 238
+#define CCOMPARE_0 240
+#define CCOMPARE_1 241
+#define CCOMPARE_2 242
+#define MISC_REG_0 244
+#define MISC_REG_1 245
+
+/* Special cases (bases of special register series): */
+#define IBREAKA 128
+#define DBREAKA 144
+#define DBREAKC 160
+#define EPC 176
+#define EPS 192
+#define EXCSAVE 208
+#define CCOMPARE 240
+
+/* Special names for read-only and write-only interrupt registers: */
+#define INTREAD 226
+#define INTSET 226
+#define INTCLEAR 227
+
+#endif /* XTENSA_SPECREG_H */
+
diff --git a/include/asm-xtensa/xtensa/config-linux_be/system.h b/include/asm-xtensa/xtensa/config-linux_be/system.h
new file mode 100644
index 000000000000..cf9d4d308e3a
--- /dev/null
+++ b/include/asm-xtensa/xtensa/config-linux_be/system.h
@@ -0,0 +1,198 @@
+/*
+ * xtensa/config/system.h -- HAL definitions that are dependent on SYSTEM configuration
+ *
+ * NOTE: The location and contents of this file are highly subject to change.
+ *
+ * Source for configuration-independent binaries (which link in a
+ * configuration-specific HAL library) must NEVER include this file.
+ * The HAL itself has historically included this file in some instances,
+ * but this is not appropriate either, because the HAL is meant to be
+ * core-specific but system independent.
+ */
+
+/*
+ * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+
+#ifndef XTENSA_CONFIG_SYSTEM_H
+#define XTENSA_CONFIG_SYSTEM_H
+
+/*#include <xtensa/hal.h>*/
+
+
+
+/*----------------------------------------------------------------------
+ DEVICE ADDRESSES
+ ----------------------------------------------------------------------*/
+
+/*
+ * Strange place to find these, but the configuration GUI
+ * allows moving these around to account for various core
+ * configurations. Specific boards (and their BSP software)
+ * will have specific meanings for these components.
+ */
+
+/* I/O Block areas: */
+#define XSHAL_IOBLOCK_CACHED_VADDR 0xE0000000
+#define XSHAL_IOBLOCK_CACHED_PADDR 0xF0000000
+#define XSHAL_IOBLOCK_CACHED_SIZE 0x0E000000
+
+#define XSHAL_IOBLOCK_BYPASS_VADDR 0xF0000000
+#define XSHAL_IOBLOCK_BYPASS_PADDR 0xF0000000
+#define XSHAL_IOBLOCK_BYPASS_SIZE 0x0E000000
+
+/* System ROM: */
+#define XSHAL_ROM_VADDR 0xEE000000
+#define XSHAL_ROM_PADDR 0xFE000000
+#define XSHAL_ROM_SIZE 0x00400000
+/* Largest available area (free of vectors): */
+#define XSHAL_ROM_AVAIL_VADDR 0xEE00052C
+#define XSHAL_ROM_AVAIL_VSIZE 0x003FFAD4
+
+/* System RAM: */
+#define XSHAL_RAM_VADDR 0xD0000000
+#define XSHAL_RAM_PADDR 0x00000000
+#define XSHAL_RAM_VSIZE 0x08000000
+#define XSHAL_RAM_PSIZE 0x10000000
+#define XSHAL_RAM_SIZE XSHAL_RAM_PSIZE
+/* Largest available area (free of vectors): */
+#define XSHAL_RAM_AVAIL_VADDR 0xD0000370
+#define XSHAL_RAM_AVAIL_VSIZE 0x07FFFC90
+
+/*
+ * Shadow system RAM (same device as system RAM, at different address).
+ * (Emulation boards need this for the SONIC Ethernet driver
+ * when data caches are configured for writeback mode.)
+ * NOTE: on full MMU configs, this points to the BYPASS virtual address
+ * of system RAM, ie. is the same as XSHAL_RAM_* except that virtual
+ * addresses are viewed through the BYPASS static map rather than
+ * the CACHED static map.
+ */
+#define XSHAL_RAM_BYPASS_VADDR 0xD8000000
+#define XSHAL_RAM_BYPASS_PADDR 0x00000000
+#define XSHAL_RAM_BYPASS_PSIZE 0x08000000
+
+/* Alternate system RAM (different device than system RAM): */
+#define XSHAL_ALTRAM_VADDR 0xCEE00000
+#define XSHAL_ALTRAM_PADDR 0xC0000000
+#define XSHAL_ALTRAM_SIZE 0x00200000
+
+
+/*----------------------------------------------------------------------
+ * DEVICE-ADDRESS DEPENDENT...
+ *
+ * Values written to CACHEATTR special register (or its equivalent)
+ * to enable and disable caches in various modes.
+ *----------------------------------------------------------------------*/
+
+/*----------------------------------------------------------------------
+ BACKWARD COMPATIBILITY ...
+ ----------------------------------------------------------------------*/
+
+/*
+ * NOTE: the following two macros are DEPRECATED. Use the latter
+ * board-specific macros instead, which are specially tuned for the
+ * particular target environments' memory maps.
+ */
+#define XSHAL_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS /* disable caches in bypass mode */
+#define XSHAL_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT /* default setting to enable caches (no writeback!) */
+
+/*----------------------------------------------------------------------
+ ISS (Instruction Set Simulator) SPECIFIC ...
+ ----------------------------------------------------------------------*/
+
+#define XSHAL_ISS_CACHEATTR_WRITEBACK 0x1122222F /* enable caches in write-back mode */
+#define XSHAL_ISS_CACHEATTR_WRITEALLOC 0x1122222F /* enable caches in write-allocate mode */
+#define XSHAL_ISS_CACHEATTR_WRITETHRU 0x1122222F /* enable caches in write-through mode */
+#define XSHAL_ISS_CACHEATTR_BYPASS 0x2222222F /* disable caches in bypass mode */
+#define XSHAL_ISS_CACHEATTR_DEFAULT XSHAL_ISS_CACHEATTR_WRITEBACK /* default setting to enable caches */
+
+/* For Coware only: */
+#define XSHAL_COWARE_CACHEATTR_WRITEBACK 0x11222222 /* enable caches in write-back mode */
+#define XSHAL_COWARE_CACHEATTR_WRITEALLOC 0x11222222 /* enable caches in write-allocate mode */
+#define XSHAL_COWARE_CACHEATTR_WRITETHRU 0x11222222 /* enable caches in write-through mode */
+#define XSHAL_COWARE_CACHEATTR_BYPASS 0x22222222 /* disable caches in bypass mode */
+#define XSHAL_COWARE_CACHEATTR_DEFAULT XSHAL_COWARE_CACHEATTR_WRITEBACK /* default setting to enable caches */
+
+/* For BFM and other purposes: */
+#define XSHAL_ALLVALID_CACHEATTR_WRITEBACK 0x11222222 /* enable caches without any invalid regions */
+#define XSHAL_ALLVALID_CACHEATTR_DEFAULT XSHAL_ALLVALID_CACHEATTR_WRITEBACK /* default setting for caches without any invalid regions */
+
+#define XSHAL_ISS_PIPE_REGIONS 0
+#define XSHAL_ISS_SDRAM_REGIONS 0
+
+
+/*----------------------------------------------------------------------
+ XT2000 BOARD SPECIFIC ...
+ ----------------------------------------------------------------------*/
+
+#define XSHAL_XT2000_CACHEATTR_WRITEBACK 0x22FFFFFF /* enable caches in write-back mode */
+#define XSHAL_XT2000_CACHEATTR_WRITEALLOC 0x22FFFFFF /* enable caches in write-allocate mode */
+#define XSHAL_XT2000_CACHEATTR_WRITETHRU 0x22FFFFFF /* enable caches in write-through mode */
+#define XSHAL_XT2000_CACHEATTR_BYPASS 0x22FFFFFF /* disable caches in bypass mode */
+#define XSHAL_XT2000_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_WRITEBACK /* default setting to enable caches */
+
+#define XSHAL_XT2000_PIPE_REGIONS 0x00001000 /* BusInt pipeline regions */
+#define XSHAL_XT2000_SDRAM_REGIONS 0x00000005 /* BusInt SDRAM regions */
+
+
+/*----------------------------------------------------------------------
+ VECTOR SIZES
+ ----------------------------------------------------------------------*/
+
+/*
+ * Sizes allocated to vectors by the system (memory map) configuration.
+ * These sizes are constrained by core configuration (eg. one vector's
+ * code cannot overflow into another vector) but are dependent on the
+ * system or board (or LSP) memory map configuration.
+ *
+ * Whether or not each vector happens to be in a system ROM is also
+ * a system configuration matter, sometimes useful, included here also:
+ */
+#define XSHAL_RESET_VECTOR_SIZE 0x000004E0
+#define XSHAL_RESET_VECTOR_ISROM 1
+#define XSHAL_USER_VECTOR_SIZE 0x0000001C
+#define XSHAL_USER_VECTOR_ISROM 0
+#define XSHAL_PROGRAMEXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
+#define XSHAL_USEREXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
+#define XSHAL_KERNEL_VECTOR_SIZE 0x0000001C
+#define XSHAL_KERNEL_VECTOR_ISROM 0
+#define XSHAL_STACKEDEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
+#define XSHAL_KERNELEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
+#define XSHAL_DOUBLEEXC_VECTOR_SIZE 0x000000E0
+#define XSHAL_DOUBLEEXC_VECTOR_ISROM 0
+#define XSHAL_WINDOW_VECTORS_SIZE 0x00000180
+#define XSHAL_WINDOW_VECTORS_ISROM 0
+#define XSHAL_INTLEVEL2_VECTOR_SIZE 0x0000000C
+#define XSHAL_INTLEVEL2_VECTOR_ISROM 0
+#define XSHAL_INTLEVEL3_VECTOR_SIZE 0x0000000C
+#define XSHAL_INTLEVEL3_VECTOR_ISROM 0
+#define XSHAL_INTLEVEL4_VECTOR_SIZE 0x0000000C
+#define XSHAL_INTLEVEL4_VECTOR_ISROM 1
+#define XSHAL_DEBUG_VECTOR_SIZE XSHAL_INTLEVEL4_VECTOR_SIZE
+#define XSHAL_DEBUG_VECTOR_ISROM XSHAL_INTLEVEL4_VECTOR_ISROM
+
+
+#endif /*XTENSA_CONFIG_SYSTEM_H*/
+
diff --git a/include/asm-xtensa/xtensa/config-linux_be/tie.h b/include/asm-xtensa/xtensa/config-linux_be/tie.h
new file mode 100644
index 000000000000..3c2e514602f4
--- /dev/null
+++ b/include/asm-xtensa/xtensa/config-linux_be/tie.h
@@ -0,0 +1,275 @@
+/*
+ * xtensa/config/tie.h -- HAL definitions that are dependent on CORE and TIE configuration
+ *
+ * This header file is sometimes referred to as the "compile-time HAL" or CHAL.
+ * It was generated for a specific Xtensa processor configuration,
+ * and furthermore for a specific set of TIE source files that extend
+ * basic core functionality.
+ *
+ * Source for configuration-independent binaries (which link in a
+ * configuration-specific HAL library) must NEVER include this file.
+ * It is perfectly normal, however, for the HAL source itself to include this file.
+ */
+
+/*
+ * Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
+ * USA.
+ */
+
+
+#ifndef XTENSA_CONFIG_TIE_H
+#define XTENSA_CONFIG_TIE_H
+
+#include <xtensa/hal.h>
+
+
+/*----------------------------------------------------------------------
+ GENERAL
+ ----------------------------------------------------------------------*/
+
+/*
+ * Separators for macros that expand into arrays.
+ * These can be predefined by files that #include this one,
+ * when different separators are required.
+ */
+/* Element separator for macros that expand into 1-dimensional arrays: */
+#ifndef XCHAL_SEP
+#define XCHAL_SEP ,
+#endif
+/* Array separator for macros that expand into 2-dimensional arrays: */
+#ifndef XCHAL_SEP2
+#define XCHAL_SEP2 },{
+#endif
+
+
+
+
+
+
+/*----------------------------------------------------------------------
+ COPROCESSORS and EXTRA STATE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_CP_NUM 0 /* number of coprocessors */
+#define XCHAL_CP_MAX 0 /* max coprocessor id plus one (0 if none) */
+#define XCHAL_CP_MASK 0x00 /* bitmask of coprocessors by id */
+
+/* Space for coprocessors' state save areas: */
+#define XCHAL_CP0_SA_SIZE 0
+#define XCHAL_CP1_SA_SIZE 0
+#define XCHAL_CP2_SA_SIZE 0
+#define XCHAL_CP3_SA_SIZE 0
+#define XCHAL_CP4_SA_SIZE 0
+#define XCHAL_CP5_SA_SIZE 0
+#define XCHAL_CP6_SA_SIZE 0
+#define XCHAL_CP7_SA_SIZE 0
+/* Minimum required alignments of CP state save areas: */
+#define XCHAL_CP0_SA_ALIGN 1
+#define XCHAL_CP1_SA_ALIGN 1
+#define XCHAL_CP2_SA_ALIGN 1
+#define XCHAL_CP3_SA_ALIGN 1
+#define XCHAL_CP4_SA_ALIGN 1
+#define XCHAL_CP5_SA_ALIGN 1
+#define XCHAL_CP6_SA_ALIGN 1
+#define XCHAL_CP7_SA_ALIGN 1
+
+/* Indexing macros: */
+#define _XCHAL_CP_SA_SIZE(n) XCHAL_CP ## n ## _SA_SIZE
+#define XCHAL_CP_SA_SIZE(n) _XCHAL_CP_SA_SIZE(n) /* n = 0 .. 7 */
+#define _XCHAL_CP_SA_ALIGN(n) XCHAL_CP ## n ## _SA_ALIGN
+#define XCHAL_CP_SA_ALIGN(n) _XCHAL_CP_SA_ALIGN(n) /* n = 0 .. 7 */
+
+
+/* Space for "extra" state (user special registers and non-cp TIE) save area: */
+#define XCHAL_EXTRA_SA_SIZE 0
+#define XCHAL_EXTRA_SA_ALIGN 1
+
+/* Total save area size (extra + all coprocessors) */
+/* (not useful until xthal_{save,restore}_all_extra() is implemented, */
+/* but included for Tor2 beta; doesn't account for alignment!): */
+#define XCHAL_CPEXTRA_SA_SIZE_TOR2 0 /* Tor2Beta temporary definition -- do not use */
+
+/* Combined required alignment for all CP and EXTRA state save areas */
+/* (does not include required alignment for any base config registers): */
+#define XCHAL_CPEXTRA_SA_ALIGN 1
+
+/* ... */
+
+
+#ifdef _ASMLANGUAGE
+/*
+ * Assembly-language specific definitions (assembly macros, etc.).
+ */
+#include <xtensa/config/specreg.h>
+
+/********************
+ * Macros to save and restore the non-coprocessor TIE portion of EXTRA state.
+ */
+
+/* (none) */
+
+
+/********************
+ * Macros to create functions that save and restore all EXTRA (non-coprocessor) state
+ * (does not include zero-overhead loop registers and non-optional registers).
+ */
+
+ /*
+ * Macro that expands to the body of a function that
+ * stores the extra (non-coprocessor) optional/custom state.
+ * Entry: a2 = ptr to save area in which to save extra state
+ * Exit: any register a2-a15 (?) may have been clobbered.
+ */
+ .macro xchal_extra_store_funcbody
+ .endm
+
+
+ /*
+ * Macro that expands to the body of a function that
+ * loads the extra (non-coprocessor) optional/custom state.
+ * Entry: a2 = ptr to save area from which to restore extra state
+ * Exit: any register a2-a15 (?) may have been clobbered.
+ */
+ .macro xchal_extra_load_funcbody
+ .endm
+
+
+/********************
+ * Macros to save and restore the state of each TIE coprocessor.
+ */
+
+
+
+/********************
+ * Macros to create functions that save and restore the state of *any* TIE coprocessor.
+ */
+
+ /*
+ * Macro that expands to the body of a function
+ * that stores the selected coprocessor's state (registers etc).
+ * Entry: a2 = ptr to save area in which to save cp state
+ * a3 = coprocessor number
+ * Exit: any register a2-a15 (?) may have been clobbered.
+ */
+ .macro xchal_cpi_store_funcbody
+ .endm
+
+
+ /*
+ * Macro that expands to the body of a function
+ * that loads the selected coprocessor's state (registers etc).
+ * Entry: a2 = ptr to save area from which to restore cp state
+ * a3 = coprocessor number
+ * Exit: any register a2-a15 (?) may have been clobbered.
+ */
+ .macro xchal_cpi_load_funcbody
+ .endm
+
+#endif /*_ASMLANGUAGE*/
+
+
+/*
+ * Contents of save areas in terms of libdb register numbers.
+ * NOTE: CONTENTS_LIBDB_{UREG,REGF} macros are not defined in this file;
+ * it is up to the user of this header file to define these macros
+ * usefully before each expansion of the CONTENTS_LIBDB macros.
+ * (Fields rsv[123] are reserved for future additions; they are currently
+ * set to zero but may be set to some useful values in the future.)
+ *
+ * CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum, bitmask, rsv2, rsv3)
+ * CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum, bitmask, rsv2, rsv3)
+ * CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, numentries, contentsize, regname_base, regfile_name, rsv2, rsv3)
+ */
+
+#define XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_EXTRA_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP0_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP0_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP1_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP1_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP2_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP2_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP3_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP3_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP4_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP4_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP5_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP5_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP6_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP6_SA_CONTENTS_LIBDB /* empty */
+
+#define XCHAL_CP7_SA_CONTENTS_LIBDB_NUM 0
+#define XCHAL_CP7_SA_CONTENTS_LIBDB /* empty */
+
+
+
+
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#if 0 /* is there something equivalent for user TIE? */
+#define XCHAL_CORE_ID "linux_be" /* configuration's alphanumeric core identifier
+ (CoreID) set in the Xtensa Processor Generator */
+
+#define XCHAL_BUILD_UNIQUE_ID 0x00003256 /* software build-unique ID (22-bit) */
+
+/* These definitions describe the hardware targeted by this software: */
+#define XCHAL_HW_CONFIGID0 0xC103D1FF /* config ID reg 0 value (upper 32 of 64 bits) */
+#define XCHAL_HW_CONFIGID1 0x00803256 /* config ID reg 1 value (lower 32 of 64 bits) */
+#define XCHAL_CONFIGID0 XCHAL_HW_CONFIGID0 /* for backward compatibility only -- don't use! */
+#define XCHAL_CONFIGID1 XCHAL_HW_CONFIGID1 /* for backward compatibility only -- don't use! */
+#define XCHAL_HW_RELEASE_MAJOR 1050 /* major release of targeted hardware */
+#define XCHAL_HW_RELEASE_MINOR 1 /* minor release of targeted hardware */
+#define XCHAL_HW_RELEASE_NAME "T1050.1" /* full release name of targeted hardware */
+#define XTHAL_HW_REL_T1050 1
+#define XTHAL_HW_REL_T1050_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+#endif /*0*/
+
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#if 0 /* these probably don't belong here, but are related to or implemented using TIE */
+#define XCHAL_HAVE_BOOLEANS 0 /* 1 if booleans option configured, 0 otherwise */
+/* Misc instructions: */
+#define XCHAL_HAVE_MUL32 0 /* 1 if 32-bit integer multiply option configured, 0 otherwise */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* 1 if MUL32 option includes MULUH and MULSH, 0 otherwise */
+
+#define XCHAL_HAVE_FP 0 /* 1 if floating point option configured, 0 otherwise */
+#endif /*0*/
+
+
+#endif /*XTENSA_CONFIG_TIE_H*/
+
diff --git a/include/asm-xtensa/xtensa/coreasm.h b/include/asm-xtensa/xtensa/coreasm.h
new file mode 100644
index 000000000000..a8cfb54c20a1
--- /dev/null
+++ b/include/asm-xtensa/xtensa/coreasm.h
@@ -0,0 +1,526 @@
+#ifndef XTENSA_COREASM_H
+#define XTENSA_COREASM_H
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/coreasm.h -- assembler-specific
+ * definitions that depend on CORE configuration.
+ *
+ * Source for configuration-independent binaries (which link in a
+ * configuration-specific HAL library) must NEVER include this file.
+ * It is perfectly normal, however, for the HAL itself to include this
+ * file.
+ *
+ * This file must NOT include xtensa/config/system.h. Any assembler
+ * header file that depends on system information should likely go in
+ * a new systemasm.h (or sysasm.h) header file.
+ *
+ * NOTE: macro beqi32 is NOT configuration-dependent, and is placed
+ * here til we will have configuration-independent header file.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/config/core.h>
+#include <xtensa/config/specreg.h>
+
+/*
+ * Assembly-language specific definitions (assembly macros, etc.).
+ */
+
+/*----------------------------------------------------------------------
+ * find_ms_setbit
+ *
+ * This macro finds the most significant bit that is set in <as>
+ * and return its index + <base> in <ad>, or <base> - 1 if <as> is zero.
+ * The index counts starting at zero for the lsbit, so the return
+ * value ranges from <base>-1 (no bit set) to <base>+31 (msbit set).
+ *
+ * Parameters:
+ * <ad> destination address register (any register)
+ * <as> source address register
+ * <at> temporary address register (must be different than <as>)
+ * <base> constant value added to result (usually 0 or 1)
+ * On entry:
+ * <ad> = undefined if different than <as>
+ * <as> = value whose most significant set bit is to be found
+ * <at> = undefined
+ * no other registers are used by this macro.
+ * On exit:
+ * <ad> = <base> + index of msbit set in original <as>,
+ * = <base> - 1 if original <as> was zero.
+ * <as> clobbered (if not <ad>)
+ * <at> clobbered (if not <ad>)
+ * Example:
+ * find_ms_setbit a0, a4, a0, 0 -- return in a0 index of msbit set in a4
+ */
+
+ .macro find_ms_setbit ad, as, at, base
+#if XCHAL_HAVE_NSA
+ movi \at, 31+\base
+ nsau \as, \as // get index of \as, numbered from msbit (32 if absent)
+ sub \ad, \at, \as // get numbering from lsbit (0..31, -1 if absent)
+#else /* XCHAL_HAVE_NSA */
+ movi \at, \base // start with result of 0 (point to lsbit of 32)
+
+ beqz \as, 2f // special case for zero argument: return -1
+ bltui \as, 0x10000, 1f // is it one of the 16 lsbits? (if so, check lower 16 bits)
+ addi \at, \at, 16 // no, increment result to upper 16 bits (of 32)
+ //srli \as, \as, 16 // check upper half (shift right 16 bits)
+ extui \as, \as, 16, 16 // check upper half (shift right 16 bits)
+1: bltui \as, 0x100, 1f // is it one of the 8 lsbits? (if so, check lower 8 bits)
+ addi \at, \at, 8 // no, increment result to upper 8 bits (of 16)
+ srli \as, \as, 8 // shift right to check upper 8 bits
+1: bltui \as, 0x10, 1f // is it one of the 4 lsbits? (if so, check lower 4 bits)
+ addi \at, \at, 4 // no, increment result to upper 4 bits (of 8)
+ srli \as, \as, 4 // shift right 4 bits to check upper half
+1: bltui \as, 0x4, 1f // is it one of the 2 lsbits? (if so, check lower 2 bits)
+ addi \at, \at, 2 // no, increment result to upper 2 bits (of 4)
+ srli \as, \as, 2 // shift right 2 bits to check upper half
+1: bltui \as, 0x2, 1f // is it the lsbit?
+ addi \at, \at, 2 // no, increment result to upper bit (of 2)
+2: addi \at, \at, -1 // (from just above: add 1; from beqz: return -1)
+ //srli \as, \as, 1
+1: // done! \at contains index of msbit set (or -1 if none set)
+ .if 0x\ad - 0x\at // destination different than \at ? (works because regs are a0-a15)
+ mov \ad, \at // then move result to \ad
+ .endif
+#endif /* XCHAL_HAVE_NSA */
+ .endm // find_ms_setbit
+
+/*----------------------------------------------------------------------
+ * find_ls_setbit
+ *
+ * This macro finds the least significant bit that is set in <as>,
+ * and return its index in <ad>.
+ * Usage is the same as for the find_ms_setbit macro.
+ * Example:
+ * find_ls_setbit a0, a4, a0, 0 -- return in a0 index of lsbit set in a4
+ */
+
+ .macro find_ls_setbit ad, as, at, base
+ neg \at, \as // keep only the least-significant bit that is set...
+ and \as, \at, \as // ... in \as
+ find_ms_setbit \ad, \as, \at, \base
+ .endm // find_ls_setbit
+
+/*----------------------------------------------------------------------
+ * find_ls_one
+ *
+ * Same as find_ls_setbit with base zero.
+ * Source (as) and destination (ad) registers must be different.
+ * Provided for backward compatibility.
+ */
+
+ .macro find_ls_one ad, as
+ find_ls_setbit \ad, \as, \ad, 0
+ .endm // find_ls_one
+
+/*----------------------------------------------------------------------
+ * floop, floopnez, floopgtz, floopend
+ *
+ * These macros are used for fast inner loops that
+ * work whether or not the Loops options is configured.
+ * If the Loops option is configured, they simply use
+ * the zero-overhead LOOP instructions; otherwise
+ * they use explicit decrement and branch instructions.
+ *
+ * They are used in pairs, with floop, floopnez or floopgtz
+ * at the beginning of the loop, and floopend at the end.
+ *
+ * Each pair of loop macro calls must be given the loop count
+ * address register and a unique label for that loop.
+ *
+ * Example:
+ *
+ * movi a3, 16 // loop 16 times
+ * floop a3, myloop1
+ * :
+ * bnez a7, end1 // exit loop if a7 != 0
+ * :
+ * floopend a3, myloop1
+ * end1:
+ *
+ * Like the LOOP instructions, these macros cannot be
+ * nested, must include at least one instruction,
+ * cannot call functions inside the loop, etc.
+ * The loop can be exited by jumping to the instruction
+ * following floopend (or elsewhere outside the loop),
+ * or continued by jumping to a NOP instruction placed
+ * immediately before floopend.
+ *
+ * Unlike LOOP instructions, the register passed to floop*
+ * cannot be used inside the loop, because it is used as
+ * the loop counter if the Loops option is not configured.
+ * And its value is undefined after exiting the loop.
+ * And because the loop counter register is active inside
+ * the loop, you can't easily use this construct to loop
+ * across a register file using ROTW as you might with LOOP
+ * instructions, unless you copy the loop register along.
+ */
+
+ /* Named label version of the macros: */
+
+ .macro floop ar, endlabel
+ floop_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
+ .endm
+
+ .macro floopnez ar, endlabel
+ floopnez_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
+ .endm
+
+ .macro floopgtz ar, endlabel
+ floopgtz_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
+ .endm
+
+ .macro floopend ar, endlabel
+ floopend_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel
+ .endm
+
+ /* Numbered local label version of the macros: */
+#if 0 /*UNTESTED*/
+ .macro floop89 ar
+ floop_ \ar, 8, 9f
+ .endm
+
+ .macro floopnez89 ar
+ floopnez_ \ar, 8, 9f
+ .endm
+
+ .macro floopgtz89 ar
+ floopgtz_ \ar, 8, 9f
+ .endm
+
+ .macro floopend89 ar
+ floopend_ \ar, 8b, 9
+ .endm
+#endif /*0*/
+
+ /* Underlying version of the macros: */
+
+ .macro floop_ ar, startlabel, endlabelref
+ .ifdef _infloop_
+ .if _infloop_
+ .err // Error: floop cannot be nested
+ .endif
+ .endif
+ .set _infloop_, 1
+#if XCHAL_HAVE_LOOPS
+ loop \ar, \endlabelref
+#else /* XCHAL_HAVE_LOOPS */
+\startlabel:
+ addi \ar, \ar, -1
+#endif /* XCHAL_HAVE_LOOPS */
+ .endm // floop_
+
+ .macro floopnez_ ar, startlabel, endlabelref
+ .ifdef _infloop_
+ .if _infloop_
+ .err // Error: floopnez cannot be nested
+ .endif
+ .endif
+ .set _infloop_, 1
+#if XCHAL_HAVE_LOOPS
+ loopnez \ar, \endlabelref
+#else /* XCHAL_HAVE_LOOPS */
+ beqz \ar, \endlabelref
+\startlabel:
+ addi \ar, \ar, -1
+#endif /* XCHAL_HAVE_LOOPS */
+ .endm // floopnez_
+
+ .macro floopgtz_ ar, startlabel, endlabelref
+ .ifdef _infloop_
+ .if _infloop_
+ .err // Error: floopgtz cannot be nested
+ .endif
+ .endif
+ .set _infloop_, 1
+#if XCHAL_HAVE_LOOPS
+ loopgtz \ar, \endlabelref
+#else /* XCHAL_HAVE_LOOPS */
+ bltz \ar, \endlabelref
+ beqz \ar, \endlabelref
+\startlabel:
+ addi \ar, \ar, -1
+#endif /* XCHAL_HAVE_LOOPS */
+ .endm // floopgtz_
+
+
+ .macro floopend_ ar, startlabelref, endlabel
+ .ifndef _infloop_
+ .err // Error: floopend without matching floopXXX
+ .endif
+ .ifeq _infloop_
+ .err // Error: floopend without matching floopXXX
+ .endif
+ .set _infloop_, 0
+#if ! XCHAL_HAVE_LOOPS
+ bnez \ar, \startlabelref
+#endif /* XCHAL_HAVE_LOOPS */
+\endlabel:
+ .endm // floopend_
+
+/*----------------------------------------------------------------------
+ * crsil -- conditional RSIL (read/set interrupt level)
+ *
+ * Executes the RSIL instruction if it exists, else just reads PS.
+ * The RSIL instruction does not exist in the new exception architecture
+ * if the interrupt option is not selected.
+ */
+
+ .macro crsil ar, newlevel
+#if XCHAL_HAVE_OLD_EXC_ARCH || XCHAL_HAVE_INTERRUPTS
+ rsil \ar, \newlevel
+#else
+ rsr \ar, PS
+#endif
+ .endm // crsil
+
+/*----------------------------------------------------------------------
+ * window_spill{4,8,12}
+ *
+ * These macros spill callers' register windows to the stack.
+ * They work for both privileged and non-privileged tasks.
+ * Must be called from a windowed ABI context, eg. within
+ * a windowed ABI function (ie. valid stack frame, window
+ * exceptions enabled, not in exception mode, etc).
+ *
+ * This macro requires a single invocation of the window_spill_common
+ * macro in the same assembly unit and section.
+ *
+ * Note that using window_spill{4,8,12} macros is more efficient
+ * than calling a function implemented using window_spill_function,
+ * because the latter needs extra code to figure out the size of
+ * the call to the spilling function.
+ *
+ * Example usage:
+ *
+ * .text
+ * .align 4
+ * .global some_function
+ * .type some_function,@function
+ * some_function:
+ * entry a1, 16
+ * :
+ * :
+ *
+ * window_spill4 // spill windows of some_function's callers; preserves a0..a3 only;
+ * // to use window_spill{8,12} in this example function we'd have
+ * // to increase space allocated by the entry instruction, because
+ * // 16 bytes only allows call4; 32 or 48 bytes (+locals) are needed
+ * // for call8/window_spill8 or call12/window_spill12 respectively.
+ * :
+ *
+ * retw
+ *
+ * window_spill_common // instantiates code used by window_spill4
+ *
+ *
+ * On entry:
+ * none (if window_spill4)
+ * stack frame has enough space allocated for call8 (if window_spill8)
+ * stack frame has enough space allocated for call12 (if window_spill12)
+ * On exit:
+ * a4..a15 clobbered (if window_spill4)
+ * a8..a15 clobbered (if window_spill8)
+ * a12..a15 clobbered (if window_spill12)
+ * no caller windows are in live registers
+ */
+
+ .macro window_spill4
+#if XCHAL_HAVE_WINDOWED
+# if XCHAL_NUM_AREGS == 16
+ movi a15, 0 // for 16-register files, no need to call to reach the end
+# elif XCHAL_NUM_AREGS == 32
+ call4 .L__wdwspill_assist28 // call deep enough to clear out any live callers
+# elif XCHAL_NUM_AREGS == 64
+ call4 .L__wdwspill_assist60 // call deep enough to clear out any live callers
+# endif
+#endif
+ .endm // window_spill4
+
+ .macro window_spill8
+#if XCHAL_HAVE_WINDOWED
+# if XCHAL_NUM_AREGS == 16
+ movi a15, 0 // for 16-register files, no need to call to reach the end
+# elif XCHAL_NUM_AREGS == 32
+ call8 .L__wdwspill_assist24 // call deep enough to clear out any live callers
+# elif XCHAL_NUM_AREGS == 64
+ call8 .L__wdwspill_assist56 // call deep enough to clear out any live callers
+# endif
+#endif
+ .endm // window_spill8
+
+ .macro window_spill12
+#if XCHAL_HAVE_WINDOWED
+# if XCHAL_NUM_AREGS == 16
+ movi a15, 0 // for 16-register files, no need to call to reach the end
+# elif XCHAL_NUM_AREGS == 32
+ call12 .L__wdwspill_assist20 // call deep enough to clear out any live callers
+# elif XCHAL_NUM_AREGS == 64
+ call12 .L__wdwspill_assist52 // call deep enough to clear out any live callers
+# endif
+#endif
+ .endm // window_spill12
+
+/*----------------------------------------------------------------------
+ * window_spill_function
+ *
+ * This macro outputs a function that will spill its caller's callers'
+ * register windows to the stack. Eg. it could be used to implement
+ * a version of xthal_window_spill() that works in non-privileged tasks.
+ * This works for both privileged and non-privileged tasks.
+ *
+ * Typical usage:
+ *
+ * .text
+ * .align 4
+ * .global my_spill_function
+ * .type my_spill_function,@function
+ * my_spill_function:
+ * window_spill_function
+ *
+ * On entry to resulting function:
+ * none
+ * On exit from resulting function:
+ * none (no caller windows are in live registers)
+ */
+
+ .macro window_spill_function
+#if XCHAL_HAVE_WINDOWED
+# if XCHAL_NUM_AREGS == 32
+ entry sp, 48
+ bbci.l a0, 31, 1f // branch if called with call4
+ bbsi.l a0, 30, 2f // branch if called with call12
+ call8 .L__wdwspill_assist16 // called with call8, only need another 8
+ retw
+1: call12 .L__wdwspill_assist16 // called with call4, only need another 12
+ retw
+2: call4 .L__wdwspill_assist16 // called with call12, only need another 4
+ retw
+# elif XCHAL_NUM_AREGS == 64
+ entry sp, 48
+ bbci.l a0, 31, 1f // branch if called with call4
+ bbsi.l a0, 30, 2f // branch if called with call12
+ call4 .L__wdwspill_assist52 // called with call8, only need a call4
+ retw
+1: call8 .L__wdwspill_assist52 // called with call4, only need a call8
+ retw
+2: call12 .L__wdwspill_assist40 // called with call12, can skip a call12
+ retw
+# elif XCHAL_NUM_AREGS == 16
+ entry sp, 16
+ bbci.l a0, 31, 1f // branch if called with call4
+ bbsi.l a0, 30, 2f // branch if called with call12
+ movi a7, 0 // called with call8
+ retw
+1: movi a11, 0 // called with call4
+2: retw // if called with call12, everything already spilled
+
+// movi a15, 0 // trick to spill all but the direct caller
+// j 1f
+// // The entry instruction is magical in the assembler (gets auto-aligned)
+// // so we have to jump to it to avoid falling through the padding.
+// // We need entry/retw to know where to return.
+//1: entry sp, 16
+// retw
+# else
+# error "unrecognized address register file size"
+# endif
+#endif /* XCHAL_HAVE_WINDOWED */
+ window_spill_common
+ .endm // window_spill_function
+
+/*----------------------------------------------------------------------
+ * window_spill_common
+ *
+ * Common code used by any number of invocations of the window_spill##
+ * and window_spill_function macros.
+ *
+ * Must be instantiated exactly once within a given assembly unit,
+ * within call/j range of and same section as window_spill##
+ * macro invocations for that assembly unit.
+ * (Is automatically instantiated by the window_spill_function macro.)
+ */
+
+ .macro window_spill_common
+#if XCHAL_HAVE_WINDOWED && (XCHAL_NUM_AREGS == 32 || XCHAL_NUM_AREGS == 64)
+ .ifndef .L__wdwspill_defined
+# if XCHAL_NUM_AREGS >= 64
+.L__wdwspill_assist60:
+ entry sp, 32
+ call8 .L__wdwspill_assist52
+ retw
+.L__wdwspill_assist56:
+ entry sp, 16
+ call4 .L__wdwspill_assist52
+ retw
+.L__wdwspill_assist52:
+ entry sp, 48
+ call12 .L__wdwspill_assist40
+ retw
+.L__wdwspill_assist40:
+ entry sp, 48
+ call12 .L__wdwspill_assist28
+ retw
+# endif
+.L__wdwspill_assist28:
+ entry sp, 48
+ call12 .L__wdwspill_assist16
+ retw
+.L__wdwspill_assist24:
+ entry sp, 32
+ call8 .L__wdwspill_assist16
+ retw
+.L__wdwspill_assist20:
+ entry sp, 16
+ call4 .L__wdwspill_assist16
+ retw
+.L__wdwspill_assist16:
+ entry sp, 16
+ movi a15, 0
+ retw
+ .set .L__wdwspill_defined, 1
+ .endif
+#endif /* XCHAL_HAVE_WINDOWED with 32 or 64 aregs */
+ .endm // window_spill_common
+
+/*----------------------------------------------------------------------
+ * beqi32
+ *
+ * macro implements version of beqi for arbitrary 32-bit immidiate value
+ *
+ * beqi32 ax, ay, imm32, label
+ *
+ * Compares value in register ax with imm32 value and jumps to label if
+ * equal. Clobberes register ay if needed
+ *
+ */
+ .macro beqi32 ax, ay, imm, label
+ .ifeq ((\imm-1) & ~7) // 1..8 ?
+ beqi \ax, \imm, \label
+ .else
+ .ifeq (\imm+1) // -1 ?
+ beqi \ax, \imm, \label
+ .else
+ .ifeq (\imm) // 0 ?
+ beqz \ax, \label
+ .else
+ // We could also handle immediates 10,12,16,32,64,128,256
+ // but it would be a long macro...
+ movi \ay, \imm
+ beq \ax, \ay, \label
+ .endif
+ .endif
+ .endif
+ .endm // beqi32
+
+#endif /*XTENSA_COREASM_H*/
+
diff --git a/include/asm-xtensa/xtensa/corebits.h b/include/asm-xtensa/xtensa/corebits.h
new file mode 100644
index 000000000000..e578ade41632
--- /dev/null
+++ b/include/asm-xtensa/xtensa/corebits.h
@@ -0,0 +1,77 @@
+#ifndef XTENSA_COREBITS_H
+#define XTENSA_COREBITS_H
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * xtensa/corebits.h - Xtensa Special Register field positions and masks.
+ *
+ * (In previous releases, these were defined in specreg.h, a generated file.
+ * This file is not generated, i.e. it is processor configuration independent.)
+ */
+
+
+/* EXCCAUSE register fields: */
+#define EXCCAUSE_EXCCAUSE_SHIFT 0
+#define EXCCAUSE_EXCCAUSE_MASK 0x3F
+/* Exception causes (mostly incomplete!): */
+#define EXCCAUSE_ILLEGAL 0
+#define EXCCAUSE_SYSCALL 1
+#define EXCCAUSE_IFETCHERROR 2
+#define EXCCAUSE_LOADSTOREERROR 3
+#define EXCCAUSE_LEVEL1INTERRUPT 4
+#define EXCCAUSE_ALLOCA 5
+
+/* PS register fields: */
+#define PS_WOE_SHIFT 18
+#define PS_WOE_MASK 0x00040000
+#define PS_WOE PS_WOE_MASK
+#define PS_CALLINC_SHIFT 16
+#define PS_CALLINC_MASK 0x00030000
+#define PS_CALLINC(n) (((n)&3)<<PS_CALLINC_SHIFT) /* n = 0..3 */
+#define PS_OWB_SHIFT 8
+#define PS_OWB_MASK 0x00000F00
+#define PS_OWB(n) (((n)&15)<<PS_OWB_SHIFT) /* n = 0..15 (or 0..7) */
+#define PS_RING_SHIFT 6
+#define PS_RING_MASK 0x000000C0
+#define PS_RING(n) (((n)&3)<<PS_RING_SHIFT) /* n = 0..3 */
+#define PS_UM_SHIFT 5
+#define PS_UM_MASK 0x00000020
+#define PS_UM PS_UM_MASK
+#define PS_EXCM_SHIFT 4
+#define PS_EXCM_MASK 0x00000010
+#define PS_EXCM PS_EXCM_MASK
+#define PS_INTLEVEL_SHIFT 0
+#define PS_INTLEVEL_MASK 0x0000000F
+#define PS_INTLEVEL(n) ((n)&PS_INTLEVEL_MASK) /* n = 0..15 */
+/* Backward compatibility (deprecated): */
+#define PS_PROGSTACK_SHIFT PS_UM_SHIFT
+#define PS_PROGSTACK_MASK PS_UM_MASK
+#define PS_PROG_SHIFT PS_UM_SHIFT
+#define PS_PROG_MASK PS_UM_MASK
+#define PS_PROG PS_UM
+
+/* DBREAKCn register fields: */
+#define DBREAKC_MASK_SHIFT 0
+#define DBREAKC_MASK_MASK 0x0000003F
+#define DBREAKC_LOADBREAK_SHIFT 30
+#define DBREAKC_LOADBREAK_MASK 0x40000000
+#define DBREAKC_STOREBREAK_SHIFT 31
+#define DBREAKC_STOREBREAK_MASK 0x80000000
+
+/* DEBUGCAUSE register fields: */
+#define DEBUGCAUSE_DEBUGINT_SHIFT 5
+#define DEBUGCAUSE_DEBUGINT_MASK 0x20 /* debug interrupt */
+#define DEBUGCAUSE_BREAKN_SHIFT 4
+#define DEBUGCAUSE_BREAKN_MASK 0x10 /* BREAK.N instruction */
+#define DEBUGCAUSE_BREAK_SHIFT 3
+#define DEBUGCAUSE_BREAK_MASK 0x08 /* BREAK instruction */
+#define DEBUGCAUSE_DBREAK_SHIFT 2
+#define DEBUGCAUSE_DBREAK_MASK 0x04 /* DBREAK match */
+#define DEBUGCAUSE_IBREAK_SHIFT 1
+#define DEBUGCAUSE_IBREAK_MASK 0x02 /* IBREAK match */
+#define DEBUGCAUSE_ICOUNT_SHIFT 0
+#define DEBUGCAUSE_ICOUNT_MASK 0x01 /* ICOUNT would increment to zero */
+
+#endif /*XTENSA_COREBITS_H*/
+
diff --git a/include/asm-xtensa/xtensa/hal.h b/include/asm-xtensa/xtensa/hal.h
new file mode 100644
index 000000000000..d10472505454
--- /dev/null
+++ b/include/asm-xtensa/xtensa/hal.h
@@ -0,0 +1,822 @@
+#ifndef XTENSA_HAL_H
+#define XTENSA_HAL_H
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/hal.h -- contains a definition of the
+ * Core HAL interface.
+ *
+ * All definitions in this header file are independent of any specific
+ * Xtensa processor configuration. Thus an OS or other software can
+ * include this header file and be compiled into configuration-
+ * independent objects that can be distributed and eventually linked
+ * to the HAL library (libhal.a) to create a configuration-specific
+ * final executable.
+ *
+ * Certain definitions, however, are release-specific -- such as the
+ * XTHAL_RELEASE_xxx macros (or additions made in later releases).
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+/*----------------------------------------------------------------------
+ Constant Definitions
+ (shared with assembly)
+ ----------------------------------------------------------------------*/
+
+/* Software release information (not configuration-specific!): */
+#define XTHAL_RELEASE_MAJOR 1050
+#define XTHAL_RELEASE_MINOR 0
+#define XTHAL_RELEASE_NAME "T1050.0-2002-08-06-eng0"
+#define XTHAL_RELEASE_INTERNAL "2002-08-06-eng0"
+#define XTHAL_REL_T1050 1
+#define XTHAL_REL_T1050_0 1
+#define XTHAL_REL_T1050_0_2002 1
+#define XTHAL_REL_T1050_0_2002_08 1
+#define XTHAL_REL_T1050_0_2002_08_06 1
+#define XTHAL_REL_T1050_0_2002_08_06_ENG0 1
+
+/* HAL version numbers (these names are for backward compatibility): */
+#define XTHAL_MAJOR_REV XTHAL_RELEASE_MAJOR
+#define XTHAL_MINOR_REV XTHAL_RELEASE_MINOR
+/*
+ * A bit of software release history on values of XTHAL_{MAJOR,MINOR}_REV:
+ *
+ * Release MAJOR MINOR Comment
+ * ======= ===== ===== =======
+ * T1015.n n/a n/a (HAL not yet available)
+ * T1020.{0,1,2} 0 1 (HAL beta)
+ * T1020.{3,4} 0 2 First release.
+ * T1020.n (n>4) 0 2 or >3 (TBD)
+ * T1030.0 0 1 (HAL beta)
+ * T1030.{1,2} 0 3 Equivalent to first release.
+ * T1030.n (n>=3) 0 >= 3 (TBD)
+ * T1040.n 1040 n Full CHAL available from T1040.2
+ * T1050.n 1050 n Current release.
+ *
+ *
+ * Note: there is a distinction between the software release with
+ * which something is compiled (accessible using XTHAL_RELEASE_* macros)
+ * and the software release with which the HAL library was compiled
+ * (accessible using Xthal_release_* global variables). This
+ * distinction is particularly relevant for vendors that distribute
+ * configuration-independent binaries (eg. an OS), where their customer
+ * might link it with a HAL of a different Xtensa software release.
+ * In this case, it may be appropriate for the OS to verify at run-time
+ * whether XTHAL_RELEASE_* and Xthal_release_* are compatible.
+ * [Guidelines as to which release is compatible with which are not
+ * currently provided explicitly, but might be inferred from reading
+ * OSKit documentation for all releases -- compatibility is also highly
+ * dependent on which HAL features are used. Each release is usually
+ * backward compatible, with very few exceptions if any.]
+ *
+ * Notes:
+ * Tornado 2.0 supported in T1020.3+, T1030.1+, and T1040.{0,1} only.
+ * Tornado 2.0.2 supported in T1040.2+, and T1050.
+ * Compile-time HAL port of NucleusPlus supported by T1040.2+ and T1050.
+ */
+
+
+/*
+ * Architectural limits, independent of configuration.
+ * Note that these are ISA-defined limits, not micro-architecture implementation
+ * limits enforced by the Xtensa Processor Generator (which may be stricter than
+ * these below).
+ */
+#define XTHAL_MAX_CPS 8 /* max number of coprocessors (0..7) */
+#define XTHAL_MAX_INTERRUPTS 32 /* max number of interrupts (0..31) */
+#define XTHAL_MAX_INTLEVELS 16 /* max number of interrupt levels (0..15) */
+ /* (as of T1040, implementation limit is 7: 0..6) */
+#define XTHAL_MAX_TIMERS 4 /* max number of timers (CCOMPARE0..CCOMPARE3) */
+ /* (as of T1040, implementation limit is 3: 0..2) */
+
+/* Misc: */
+#define XTHAL_LITTLEENDIAN 0
+#define XTHAL_BIGENDIAN 1
+
+
+/* Interrupt types: */
+#define XTHAL_INTTYPE_UNCONFIGURED 0
+#define XTHAL_INTTYPE_SOFTWARE 1
+#define XTHAL_INTTYPE_EXTERN_EDGE 2
+#define XTHAL_INTTYPE_EXTERN_LEVEL 3
+#define XTHAL_INTTYPE_TIMER 4
+#define XTHAL_INTTYPE_NMI 5
+#define XTHAL_MAX_INTTYPES 6 /* number of interrupt types */
+
+/* Timer related: */
+#define XTHAL_TIMER_UNCONFIGURED -1 /* Xthal_timer_interrupt[] value for non-existent timers */
+#define XTHAL_TIMER_UNASSIGNED XTHAL_TIMER_UNCONFIGURED /* (for backwards compatibility only) */
+
+
+/* Access Mode bits (tentative): */ /* bit abbr unit short_name PPC equ - Description */
+#define XTHAL_AMB_EXCEPTION 0 /* 001 E EX fls: EXception none - generate exception on any access (aka "illegal") */
+#define XTHAL_AMB_HITCACHE 1 /* 002 C CH fls: use Cache on Hit ~(I CI) - use cache on hit -- way from tag match [or H HC, or U UC] (ISA: same, except for Isolate case) */
+#define XTHAL_AMB_ALLOCATE 2 /* 004 A AL fl?: ALlocate none - refill cache on miss -- way from LRU [or F FI fill] (ISA: Read/Write Miss Refill) */
+#define XTHAL_AMB_WRITETHRU 3 /* 008 W WT --s: WriteThrough W WT - store immediately to memory (ISA: same) */
+#define XTHAL_AMB_ISOLATE 4 /* 010 I IS fls: ISolate none - use cache regardless of hit-vs-miss -- way from vaddr (ISA: use-cache-on-miss+hit) */
+#define XTHAL_AMB_GUARD 5 /* 020 G GU ?l?: GUard G * - non-speculative; spec/replay refs not permitted */
+#if 0
+#define XTHAL_AMB_ORDERED x /* 000 O OR fls: ORdered G * - mem accesses cannot be out of order */
+#define XTHAL_AMB_FUSEWRITES x /* 000 F FW --s: FuseWrites none - allow combining/merging multiple writes (to same datapath data unit) into one (implied by writeback) */
+#define XTHAL_AMB_COHERENT x /* 000 M MC fl?: Mem/MP Coherent M - on reads, other CPUs/bus-masters may need to supply data */
+#define XTHAL_AMB_TRUSTED x /* 000 T TR ?l?: TRusted none - memory will not bus error (if it does, handle as fatal imprecise interrupt) */
+#define XTHAL_AMB_PREFETCH x /* 000 P PR fl?: PRefetch none - on refill, read line+1 into prefetch buffers */
+#define XTHAL_AMB_STREAM x /* 000 S ST ???: STreaming none - access one of N stream buffers */
+#endif /*0*/
+
+#define XTHAL_AM_EXCEPTION (1<<XTHAL_AMB_EXCEPTION)
+#define XTHAL_AM_HITCACHE (1<<XTHAL_AMB_HITCACHE)
+#define XTHAL_AM_ALLOCATE (1<<XTHAL_AMB_ALLOCATE)
+#define XTHAL_AM_WRITETHRU (1<<XTHAL_AMB_WRITETHRU)
+#define XTHAL_AM_ISOLATE (1<<XTHAL_AMB_ISOLATE)
+#define XTHAL_AM_GUARD (1<<XTHAL_AMB_GUARD)
+#if 0
+#define XTHAL_AM_ORDERED (1<<XTHAL_AMB_ORDERED)
+#define XTHAL_AM_FUSEWRITES (1<<XTHAL_AMB_FUSEWRITES)
+#define XTHAL_AM_COHERENT (1<<XTHAL_AMB_COHERENT)
+#define XTHAL_AM_TRUSTED (1<<XTHAL_AMB_TRUSTED)
+#define XTHAL_AM_PREFETCH (1<<XTHAL_AMB_PREFETCH)
+#define XTHAL_AM_STREAM (1<<XTHAL_AMB_STREAM)
+#endif /*0*/
+
+/*
+ * Allowed Access Modes (bit combinations).
+ *
+ * Columns are:
+ * "FOGIWACE"
+ * Access mode bits (see XTHAL_AMB_xxx above).
+ * <letter> = bit is set
+ * '-' = bit is clear
+ * '.' = bit is irrelevant / don't care, as follows:
+ * E=1 makes all others irrelevant
+ * W,F relevant only for stores
+ * "2345"
+ * Indicates which Xtensa releases support the corresponding
+ * access mode. Releases for each character column are:
+ * 2 = prior to T1020.2: T1015 (V1.5), T1020.0, T1020.1
+ * 3 = T1020.2 and later: T1020.2+, T1030
+ * 4 = T1040
+ * 5 = T1050 (maybe)
+ * And the character column contents are:
+ * <number> = support by release(s)
+ * "." = unsupported by release(s)
+ * "?" = support unknown
+ */
+ /* FOGIWACE 2345 */
+/* For instruction fetch: */
+#define XTHAL_FAM_EXCEPTION 0x001 /* .......E 2345 exception */
+#define XTHAL_FAM_ISOLATE 0x012 /* .--I.-C- .... isolate */
+#define XTHAL_FAM_BYPASS 0x000 /* .---.--- 2345 bypass */
+#define XTHAL_FAM_NACACHED 0x002 /* .---.-C- .... cached no-allocate (frozen) */
+#define XTHAL_FAM_CACHED 0x006 /* .---.AC- 2345 cached */
+/* For data load: */
+#define XTHAL_LAM_EXCEPTION 0x001 /* .......E 2345 exception */
+#define XTHAL_LAM_ISOLATE 0x012 /* .--I.-C- 2345 isolate */
+#define XTHAL_LAM_BYPASS 0x000 /* .O--.--- 2... bypass speculative */
+#define XTHAL_LAM_BYPASSG 0x020 /* .OG-.--- .345 bypass guarded */
+#define XTHAL_LAM_NACACHED 0x002 /* .O--.-C- 2... cached no-allocate speculative */
+#define XTHAL_LAM_NACACHEDG 0x022 /* .OG-.-C- .345 cached no-allocate guarded */
+#define XTHAL_LAM_CACHED 0x006 /* .---.AC- 2345 cached speculative */
+#define XTHAL_LAM_CACHEDG 0x026 /* .?G-.AC- .... cached guarded */
+/* For data store: */
+#define XTHAL_SAM_EXCEPTION 0x001 /* .......E 2345 exception */
+#define XTHAL_SAM_ISOLATE 0x032 /* .-GI--C- 2345 isolate */
+#define XTHAL_SAM_BYPASS 0x028 /* -OG-W--- 2345 bypass */
+/*efine XTHAL_SAM_BYPASSF 0x028*/ /* F-G-W--- ...? bypass write-combined */
+#define XTHAL_SAM_WRITETHRU 0x02A /* -OG-W-C- 234? writethrough */
+/*efine XTHAL_SAM_WRITETHRUF 0x02A*/ /* F-G-W-C- ...5 writethrough write-combined */
+#define XTHAL_SAM_WRITEALLOC 0x02E /* -OG-WAC- ...? writethrough-allocate */
+/*efine XTHAL_SAM_WRITEALLOCF 0x02E*/ /* F-G-WAC- ...? writethrough-allocate write-combined */
+#define XTHAL_SAM_WRITEBACK 0x026 /* F-G--AC- ...5 writeback */
+
+#if 0
+/*
+ Cache attribute encoding for CACHEATTR (per ISA):
+ (Note: if this differs from ISA Ref Manual, ISA has precedence)
+
+ Inst-fetches Loads Stores
+ ------------- ------------ -------------
+0x0 FCA_EXCEPTION ?LCA_NACACHED_G* SCA_WRITETHRU "uncached"
+0x1 FCA_CACHED LCA_CACHED SCA_WRITETHRU cached
+0x2 FCA_BYPASS LCA_BYPASS_G* SCA_BYPASS bypass
+0x3 FCA_CACHED LCA_CACHED SCA_WRITEALLOCF write-allocate
+ or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented)
+0x4 FCA_CACHED LCA_CACHED SCA_WRITEBACK write-back
+ or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented)
+0x5..D FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION (reserved)
+0xE FCA_EXCEPTION LCA_ISOLATE SCA_ISOLATE isolate
+0xF FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION illegal
+ * Prior to T1020.2?, guard feature not supported, this defaulted to speculative (no _G)
+*/
+#endif /*0*/
+
+
+#if !defined(__ASSEMBLY__) && !defined(_NOCLANGUAGE)
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*----------------------------------------------------------------------
+ HAL
+ ----------------------------------------------------------------------*/
+
+/* Constant to be checked in build = (XTHAL_MAJOR_REV<<16)|XTHAL_MINOR_REV */
+extern const unsigned int Xthal_rev_no;
+
+
+/*----------------------------------------------------------------------
+ Processor State
+ ----------------------------------------------------------------------*/
+/* save & restore the extra processor state */
+extern void xthal_save_extra(void *base);
+extern void xthal_restore_extra(void *base);
+
+extern void xthal_save_cpregs(void *base, int);
+extern void xthal_restore_cpregs(void *base, int);
+
+/*extern void xthal_save_all_extra(void *base);*/
+/*extern void xthal_restore_all_extra(void *base);*/
+
+/* space for processor state */
+extern const unsigned int Xthal_extra_size;
+extern const unsigned int Xthal_extra_align;
+/* space for TIE register files */
+extern const unsigned int Xthal_cpregs_size[XTHAL_MAX_CPS];
+extern const unsigned int Xthal_cpregs_align[XTHAL_MAX_CPS];
+
+/* total of space for the processor state (for Tor2) */
+extern const unsigned int Xthal_all_extra_size;
+extern const unsigned int Xthal_all_extra_align;
+
+/* initialize the extra processor */
+/*extern void xthal_init_extra(void);*/
+/* initialize the TIE coprocessor */
+/*extern void xthal_init_cp(int);*/
+
+/* initialize the extra processor */
+extern void xthal_init_mem_extra(void *);
+/* initialize the TIE coprocessor */
+extern void xthal_init_mem_cp(void *, int);
+
+/* validate & invalidate the TIE register file */
+extern void xthal_validate_cp(int);
+extern void xthal_invalidate_cp(int);
+
+/* the number of TIE coprocessors contiguous from zero (for Tor2) */
+extern const unsigned int Xthal_num_coprocessors;
+
+/* actual number of coprocessors */
+extern const unsigned char Xthal_cp_num;
+/* index of highest numbered coprocessor, plus one */
+extern const unsigned char Xthal_cp_max;
+/* index of highest allowed coprocessor number, per cfg, plus one */
+/*extern const unsigned char Xthal_cp_maxcfg;*/
+/* bitmask of which coprocessors are present */
+extern const unsigned int Xthal_cp_mask;
+
+/* read and write cpenable register */
+extern void xthal_set_cpenable(unsigned);
+extern unsigned xthal_get_cpenable(void);
+
+/* read & write extra state register */
+/*extern int xthal_read_extra(void *base, unsigned reg, unsigned *value);*/
+/*extern int xthal_write_extra(void *base, unsigned reg, unsigned value);*/
+
+/* read & write a TIE coprocessor register */
+/*extern int xthal_read_cpreg(void *base, int cp, unsigned reg, unsigned *value);*/
+/*extern int xthal_write_cpreg(void *base, int cp, unsigned reg, unsigned value);*/
+
+/* return coprocessor number based on register */
+/*extern int xthal_which_cp(unsigned reg);*/
+
+/*----------------------------------------------------------------------
+ Interrupts
+ ----------------------------------------------------------------------*/
+
+/* the number of interrupt levels */
+extern const unsigned char Xthal_num_intlevels;
+/* the number of interrupts */
+extern const unsigned char Xthal_num_interrupts;
+
+/* mask for level of interrupts */
+extern const unsigned int Xthal_intlevel_mask[XTHAL_MAX_INTLEVELS];
+/* mask for level 0 to N interrupts */
+extern const unsigned int Xthal_intlevel_andbelow_mask[XTHAL_MAX_INTLEVELS];
+
+/* level of each interrupt */
+extern const unsigned char Xthal_intlevel[XTHAL_MAX_INTERRUPTS];
+
+/* type per interrupt */
+extern const unsigned char Xthal_inttype[XTHAL_MAX_INTERRUPTS];
+
+/* masks of each type of interrupt */
+extern const unsigned int Xthal_inttype_mask[XTHAL_MAX_INTTYPES];
+
+/* interrupt numbers assigned to each timer interrupt */
+extern const int Xthal_timer_interrupt[XTHAL_MAX_TIMERS];
+
+/*** Virtual interrupt prioritization: ***/
+
+/* Convert between interrupt levels (as per PS.INTLEVEL) and virtual interrupt priorities: */
+extern unsigned xthal_vpri_to_intlevel(unsigned vpri);
+extern unsigned xthal_intlevel_to_vpri(unsigned intlevel);
+
+/* Enables/disables given set (mask) of interrupts; returns previous enabled-mask of all ints: */
+extern unsigned xthal_int_enable(unsigned);
+extern unsigned xthal_int_disable(unsigned);
+
+/* Set/get virtual priority of an interrupt: */
+extern int xthal_set_int_vpri(int intnum, int vpri);
+extern int xthal_get_int_vpri(int intnum);
+
+/* Set/get interrupt lockout level for exclusive access to virtual priority data structures: */
+extern void xthal_set_vpri_locklevel(unsigned intlevel);
+extern unsigned xthal_get_vpri_locklevel(void);
+
+/* Set/get current virtual interrupt priority: */
+extern unsigned xthal_set_vpri(unsigned vpri);
+extern unsigned xthal_get_vpri(unsigned vpri);
+extern unsigned xthal_set_vpri_intlevel(unsigned intlevel);
+extern unsigned xthal_set_vpri_lock(void);
+
+
+
+/*----------------------------------------------------------------------
+ Generic Interrupt Trampolining Support
+ ----------------------------------------------------------------------*/
+
+typedef void (XtHalVoidFunc)(void);
+
+/*
+ * Bitmask of interrupts currently trampolining down:
+ */
+extern unsigned Xthal_tram_pending;
+
+/*
+ * Bitmask of which interrupts currently trampolining down
+ * synchronously are actually enabled; this bitmask is necessary
+ * because INTENABLE cannot hold that state (sync-trampolining
+ * interrupts must be kept disabled while trampolining);
+ * in the current implementation, any bit set here is not set
+ * in INTENABLE, and vice-versa; once a sync-trampoline is
+ * handled (at level one), its enable bit must be moved from
+ * here to INTENABLE:
+ */
+extern unsigned Xthal_tram_enabled;
+
+/*
+ * Bitmask of interrupts configured for sync trampolining:
+ */
+extern unsigned Xthal_tram_sync;
+
+
+/* Trampoline support functions: */
+extern unsigned xthal_tram_pending_to_service( void );
+extern void xthal_tram_done( unsigned serviced_mask );
+extern int xthal_tram_set_sync( int intnum, int sync );
+extern XtHalVoidFunc* xthal_set_tram_trigger_func( XtHalVoidFunc *trigger_fn );
+
+/* INTENABLE,INTREAD,INTSET,INTCLEAR register access functions: */
+extern unsigned xthal_get_intenable( void );
+extern void xthal_set_intenable( unsigned );
+extern unsigned xthal_get_intread( void );
+extern void xthal_set_intset( unsigned );
+extern void xthal_set_intclear( unsigned );
+
+
+/*----------------------------------------------------------------------
+ Register Windows
+ ----------------------------------------------------------------------*/
+
+/* number of registers in register window */
+extern const unsigned int Xthal_num_aregs;
+extern const unsigned char Xthal_num_aregs_log2;
+
+/* This spill any live register windows (other than the caller's): */
+extern void xthal_window_spill( void );
+
+
+/*----------------------------------------------------------------------
+ Cache
+ ----------------------------------------------------------------------*/
+
+/* size of the cache lines in log2(bytes) */
+extern const unsigned char Xthal_icache_linewidth;
+extern const unsigned char Xthal_dcache_linewidth;
+/* size of the cache lines in bytes */
+extern const unsigned short Xthal_icache_linesize;
+extern const unsigned short Xthal_dcache_linesize;
+/* number of cache sets in log2(lines per way) */
+extern const unsigned char Xthal_icache_setwidth;
+extern const unsigned char Xthal_dcache_setwidth;
+/* cache set associativity (number of ways) */
+extern const unsigned int Xthal_icache_ways;
+extern const unsigned int Xthal_dcache_ways;
+/* size of the caches in bytes (ways * 2^(linewidth + setwidth)) */
+extern const unsigned int Xthal_icache_size;
+extern const unsigned int Xthal_dcache_size;
+/* cache features */
+extern const unsigned char Xthal_dcache_is_writeback;
+extern const unsigned char Xthal_icache_line_lockable;
+extern const unsigned char Xthal_dcache_line_lockable;
+
+/* cache attribute register control (used by other HAL routines) */
+extern unsigned xthal_get_cacheattr( void );
+extern unsigned xthal_get_icacheattr( void );
+extern unsigned xthal_get_dcacheattr( void );
+extern void xthal_set_cacheattr( unsigned );
+extern void xthal_set_icacheattr( unsigned );
+extern void xthal_set_dcacheattr( unsigned );
+
+/* initialize cache support (must be called once at startup, before all other cache calls) */
+/*extern void xthal_cache_startinit( void );*/
+/* reset caches */
+/*extern void xthal_icache_reset( void );*/
+/*extern void xthal_dcache_reset( void );*/
+/* enable caches */
+extern void xthal_icache_enable( void ); /* DEPRECATED */
+extern void xthal_dcache_enable( void ); /* DEPRECATED */
+/* disable caches */
+extern void xthal_icache_disable( void ); /* DEPRECATED */
+extern void xthal_dcache_disable( void ); /* DEPRECATED */
+
+/* invalidate the caches */
+extern void xthal_icache_all_invalidate( void );
+extern void xthal_dcache_all_invalidate( void );
+extern void xthal_icache_region_invalidate( void *addr, unsigned size );
+extern void xthal_dcache_region_invalidate( void *addr, unsigned size );
+extern void xthal_icache_line_invalidate(void *addr);
+extern void xthal_dcache_line_invalidate(void *addr);
+/* write dirty data back */
+extern void xthal_dcache_all_writeback( void );
+extern void xthal_dcache_region_writeback( void *addr, unsigned size );
+extern void xthal_dcache_line_writeback(void *addr);
+/* write dirty data back and invalidate */
+extern void xthal_dcache_all_writeback_inv( void );
+extern void xthal_dcache_region_writeback_inv( void *addr, unsigned size );
+extern void xthal_dcache_line_writeback_inv(void *addr);
+/* prefetch and lock specified memory range into cache */
+extern void xthal_icache_region_lock( void *addr, unsigned size );
+extern void xthal_dcache_region_lock( void *addr, unsigned size );
+extern void xthal_icache_line_lock(void *addr);
+extern void xthal_dcache_line_lock(void *addr);
+/* unlock from cache */
+extern void xthal_icache_all_unlock( void );
+extern void xthal_dcache_all_unlock( void );
+extern void xthal_icache_region_unlock( void *addr, unsigned size );
+extern void xthal_dcache_region_unlock( void *addr, unsigned size );
+extern void xthal_icache_line_unlock(void *addr);
+extern void xthal_dcache_line_unlock(void *addr);
+
+
+/* sync icache and memory */
+extern void xthal_icache_sync( void );
+/* sync dcache and memory */
+extern void xthal_dcache_sync( void );
+
+/*----------------------------------------------------------------------
+ Debug
+ ----------------------------------------------------------------------*/
+
+/* 1 if debug option configured, 0 if not: */
+extern const int Xthal_debug_configured;
+
+/* Number of instruction and data break registers: */
+extern const int Xthal_num_ibreak;
+extern const int Xthal_num_dbreak;
+
+/* Set (plant) and remove software breakpoint, both synchronizing cache: */
+extern unsigned int xthal_set_soft_break(void *addr);
+extern void xthal_remove_soft_break(void *addr, unsigned int);
+
+
+/*----------------------------------------------------------------------
+ Disassembler
+ ----------------------------------------------------------------------*/
+
+/* Max expected size of the return buffer for a disassembled instruction (hint only): */
+#define XTHAL_DISASM_BUFSIZE 80
+
+/* Disassembly option bits for selecting what to return: */
+#define XTHAL_DISASM_OPT_ADDR 0x0001 /* display address */
+#define XTHAL_DISASM_OPT_OPHEX 0x0002 /* display opcode bytes in hex */
+#define XTHAL_DISASM_OPT_OPCODE 0x0004 /* display opcode name (mnemonic) */
+#define XTHAL_DISASM_OPT_PARMS 0x0008 /* display parameters */
+#define XTHAL_DISASM_OPT_ALL 0x0FFF /* display everything */
+
+/* routine to get a string for the disassembled instruction */
+extern int xthal_disassemble( unsigned char *instr_buf, void *tgt_addr,
+ char *buffer, unsigned buflen, unsigned options );
+
+/* routine to get the size of the next instruction. Returns 0 for
+ illegal instruction */
+extern int xthal_disassemble_size( unsigned char *instr_buf );
+
+
+/*----------------------------------------------------------------------
+ Core Counter
+ ----------------------------------------------------------------------*/
+
+/* counter info */
+extern const unsigned char Xthal_have_ccount; /* set if CCOUNT register present */
+extern const unsigned char Xthal_num_ccompare; /* number of CCOMPAREn registers */
+
+/* get CCOUNT register (if not present return 0) */
+extern unsigned xthal_get_ccount(void);
+
+/* set and get CCOMPAREn registers (if not present, get returns 0) */
+extern void xthal_set_ccompare(int, unsigned);
+extern unsigned xthal_get_ccompare(int);
+
+
+/*----------------------------------------------------------------------
+ Instruction/Data RAM/ROM Access
+ ----------------------------------------------------------------------*/
+
+extern void* xthal_memcpy(void *dst, const void *src, unsigned len);
+extern void* xthal_bcopy(const void *src, void *dst, unsigned len);
+
+/*----------------------------------------------------------------------
+ MP Synchronization
+ ----------------------------------------------------------------------*/
+extern int xthal_compare_and_set( int *addr, int test_val, int compare_val );
+extern unsigned xthal_get_prid( void );
+
+/*extern const char Xthal_have_s32c1i;*/
+extern const unsigned char Xthal_have_prid;
+
+
+/*----------------------------------------------------------------------
+ Miscellaneous
+ ----------------------------------------------------------------------*/
+
+extern const unsigned int Xthal_release_major;
+extern const unsigned int Xthal_release_minor;
+extern const char * const Xthal_release_name;
+extern const char * const Xthal_release_internal;
+
+extern const unsigned char Xthal_memory_order;
+extern const unsigned char Xthal_have_windowed;
+extern const unsigned char Xthal_have_density;
+extern const unsigned char Xthal_have_booleans;
+extern const unsigned char Xthal_have_loops;
+extern const unsigned char Xthal_have_nsa;
+extern const unsigned char Xthal_have_minmax;
+extern const unsigned char Xthal_have_sext;
+extern const unsigned char Xthal_have_clamps;
+extern const unsigned char Xthal_have_mac16;
+extern const unsigned char Xthal_have_mul16;
+extern const unsigned char Xthal_have_fp;
+extern const unsigned char Xthal_have_speculation;
+extern const unsigned char Xthal_have_exceptions;
+extern const unsigned char Xthal_xea_version;
+extern const unsigned char Xthal_have_interrupts;
+extern const unsigned char Xthal_have_highlevel_interrupts;
+extern const unsigned char Xthal_have_nmi;
+
+extern const unsigned short Xthal_num_writebuffer_entries;
+
+extern const unsigned int Xthal_build_unique_id;
+/* Release info for hardware targeted by software upgrades: */
+extern const unsigned int Xthal_hw_configid0;
+extern const unsigned int Xthal_hw_configid1;
+extern const unsigned int Xthal_hw_release_major;
+extern const unsigned int Xthal_hw_release_minor;
+extern const char * const Xthal_hw_release_name;
+extern const char * const Xthal_hw_release_internal;
+
+
+/* Internal memories... */
+
+extern const unsigned char Xthal_num_instrom;
+extern const unsigned char Xthal_num_instram;
+extern const unsigned char Xthal_num_datarom;
+extern const unsigned char Xthal_num_dataram;
+extern const unsigned char Xthal_num_xlmi;
+extern const unsigned int Xthal_instrom_vaddr[1];
+extern const unsigned int Xthal_instrom_paddr[1];
+extern const unsigned int Xthal_instrom_size [1];
+extern const unsigned int Xthal_instram_vaddr[1];
+extern const unsigned int Xthal_instram_paddr[1];
+extern const unsigned int Xthal_instram_size [1];
+extern const unsigned int Xthal_datarom_vaddr[1];
+extern const unsigned int Xthal_datarom_paddr[1];
+extern const unsigned int Xthal_datarom_size [1];
+extern const unsigned int Xthal_dataram_vaddr[1];
+extern const unsigned int Xthal_dataram_paddr[1];
+extern const unsigned int Xthal_dataram_size [1];
+extern const unsigned int Xthal_xlmi_vaddr[1];
+extern const unsigned int Xthal_xlmi_paddr[1];
+extern const unsigned int Xthal_xlmi_size [1];
+
+
+
+/*----------------------------------------------------------------------
+ Memory Management Unit
+ ----------------------------------------------------------------------*/
+
+extern const unsigned char Xthal_have_spanning_way;
+extern const unsigned char Xthal_have_identity_map;
+extern const unsigned char Xthal_have_mimic_cacheattr;
+extern const unsigned char Xthal_have_xlt_cacheattr;
+extern const unsigned char Xthal_have_cacheattr;
+extern const unsigned char Xthal_have_tlbs;
+
+extern const unsigned char Xthal_mmu_asid_bits; /* 0 .. 8 */
+extern const unsigned char Xthal_mmu_asid_kernel;
+extern const unsigned char Xthal_mmu_rings; /* 1 .. 4 (perhaps 0 if no MMU and/or no protection?) */
+extern const unsigned char Xthal_mmu_ring_bits;
+extern const unsigned char Xthal_mmu_sr_bits;
+extern const unsigned char Xthal_mmu_ca_bits;
+extern const unsigned int Xthal_mmu_max_pte_page_size;
+extern const unsigned int Xthal_mmu_min_pte_page_size;
+
+extern const unsigned char Xthal_itlb_way_bits;
+extern const unsigned char Xthal_itlb_ways;
+extern const unsigned char Xthal_itlb_arf_ways;
+extern const unsigned char Xthal_dtlb_way_bits;
+extern const unsigned char Xthal_dtlb_ways;
+extern const unsigned char Xthal_dtlb_arf_ways;
+
+/* Convert between virtual and physical addresses (through static maps only): */
+/*** WARNING: these two functions may go away in a future release; don't depend on them! ***/
+extern int xthal_static_v2p( unsigned vaddr, unsigned *paddrp );
+extern int xthal_static_p2v( unsigned paddr, unsigned *vaddrp, unsigned cached );
+
+#if 0
+/******************* EXPERIMENTAL AND TENTATIVE ONLY ********************/
+
+#define XTHAL_MMU_PAGESZ_COUNT_MAX 8 /* maximum number of different page sizes */
+extern const char Xthal_mmu_pagesz_count; /* 0 .. 8 number of different page sizes configured */
+
+/* Note: the following table doesn't necessarily have page sizes in increasing order: */
+extern const char Xthal_mmu_pagesz_log2[XTHAL_MMU_PAGESZ_COUNT_MAX]; /* 10 .. 28 (0 past count) */
+
+/* Sorted (increasing) table of page sizes, that indexes into the above table: */
+extern const char Xthal_mmu_pagesz_sorted[XTHAL_MMU_PAGESZ_COUNT_MAX]; /* 0 .. 7 (0 past count) */
+
+/*u32 Xthal_virtual_exceptions;*/ /* bitmask of which exceptions execute in virtual mode... */
+
+extern const char Xthal_mmu_pte_pagesz_log2_min; /* ?? minimum page size in PTEs */
+extern const char Xthal_mmu_pte_pagesz_log2_max; /* ?? maximum page size in PTEs */
+
+/* Cache Attribute Bits Implemented by the Cache (part of the cache abstraction) */
+extern const char Xthal_icache_fca_bits_implemented; /* ITLB/UTLB only! */
+extern const char Xthal_dcache_lca_bits_implemented; /* DTLB/UTLB only! */
+extern const char Xthal_dcache_sca_bits_implemented; /* DTLB/UTLB only! */
+
+/* Per TLB Parameters (Instruction, Data, Unified) */
+struct XtHalMmuTlb Xthal_itlb; /* description of MMU I-TLB generic features */
+struct XtHalMmuTlb Xthal_dtlb; /* description of MMU D-TLB generic features */
+struct XtHalMmuTlb Xthal_utlb; /* description of MMU U-TLB generic features */
+
+#define XTHAL_MMU_WAYS_MAX 8 /* maximum number of ways (associativities) for each TLB */
+
+/* Structure for common information described for each possible TLB (instruction, data and unified): */
+typedef struct XtHalMmuTlb {
+ u8 va_bits; /* 32 (number of virtual address bits) */
+ u8 pa_bits; /* 32 (number of physical address bits) */
+ bool tlb_va_indexed; /* 1 (set if TLB is indexed by virtual address) */
+ bool tlb_va_tagged; /* 0 (set if TLB is tagged by virtual address) */
+ bool cache_va_indexed; /* 1 (set if cache is indexed by virtual address) */
+ bool cache_va_tagged; /* 0 (set if cache is tagged by virtual address) */
+ /*bool (whether page tables are traversed in vaddr sorted order, paddr sorted order, ...) */
+ /*u8 (set of available page attribute bits, other than cache attribute bits defined above) */
+ /*u32 (various masks for pages, MMU table/TLB entries, etc.) */
+ u8 way_count; /* 0 .. 8 (number of ways, a.k.a. associativities, for this TLB) */
+ XtHalMmuTlbWay * ways[XTHAL_MMU_WAYS_MAX]; /* pointers to per-way parms for each way */
+} XtHalMmuTlb;
+
+/* Per TLB Way (Per Associativity) Parameters */
+typedef struct XtHalMmuTlbWay {
+ u32 index_count_log2; /* 0 .. 4 */
+ u32 pagesz_mask; /* 0 .. 2^pagesz_count - 1 (each bit corresponds to a size */
+ /* defined in the Xthal_mmu_pagesz_log2[] table) */
+ u32 vpn_const_mask;
+ u32 vpn_const_value;
+ u64 ppn_const_mask; /* future may support pa_bits > 32 */
+ u64 ppn_const_value;
+ u32 ppn_id_mask; /* paddr bits taken directly from vaddr */
+ bool backgnd_match; /* 0 or 1 */
+ /* These are defined in terms of the XTHAL_CACHE_xxx bits: */
+ u8 fca_const_mask; /* ITLB/UTLB only! */
+ u8 fca_const_value; /* ITLB/UTLB only! */
+ u8 lca_const_mask; /* DTLB/UTLB only! */
+ u8 lca_const_value; /* DTLB/UTLB only! */
+ u8 sca_const_mask; /* DTLB/UTLB only! */
+ u8 sca_const_value; /* DTLB/UTLB only! */
+ /* These define an encoding that map 5 bits in TLB and PTE entries to */
+ /* 8 bits (FCA, ITLB), 16 bits (LCA+SCA, DTLB) or 24 bits (FCA+LCA+SCA, UTLB): */
+ /* (they may be moved to struct XtHalMmuTlb) */
+ u8 ca_bits; /* number of bits in TLB/PTE entries for cache attributes */
+ u32 * ca_map; /* pointer to array of 2^ca_bits entries of FCA+LCA+SCA bits */
+} XtHalMmuTlbWay;
+
+/*
+ * The way to determine whether protection support is present in core
+ * is to [look at Xthal_mmu_rings ???].
+ * Give info on memory requirements for MMU tables and other in-memory
+ * data structures (globally, per task, base and per page, etc.) - whatever bounds can be calculated.
+ */
+
+
+/* Default vectors: */
+xthal_immu_fetch_miss_vector
+xthal_dmmu_load_miss_vector
+xthal_dmmu_store_miss_vector
+
+/* Functions called when a fault is detected: */
+typedef void (XtHalMmuFaultFunc)( unsigned vaddr, ...context... );
+/* Or, */
+/* a? = vaddr */
+/* a? = context... */
+/* PS.xxx = xxx */
+XtHalMMuFaultFunc *Xthal_immu_fetch_fault_func;
+XtHalMMuFaultFunc *Xthal_dmmu_load_fault_func;
+XtHalMMuFaultFunc *Xthal_dmmu_store_fault_func;
+
+/* Default Handlers: */
+/* The user and/or kernel exception handlers may jump to these handlers to handle the relevant exceptions,
+ * according to the value of EXCCAUSE. The exact register state on entry to these handlers is TBD. */
+/* When multiple TLB entries match (hit) on the same access: */
+xthal_immu_fetch_multihit_handler
+xthal_dmmu_load_multihit_handler
+xthal_dmmu_store_multihit_handler
+/* Protection violations according to cache attributes, and other cache attribute mismatches: */
+xthal_immu_fetch_attr_handler
+xthal_dmmu_load_attr_handler
+xthal_dmmu_store_attr_handler
+/* Protection violations due to insufficient ring level: */
+xthal_immu_fetch_priv_handler
+xthal_dmmu_load_priv_handler
+xthal_dmmu_store_priv_handler
+/* Alignment exception handlers (if supported by the particular Xtensa MMU configuration): */
+xthal_dmmu_load_align_handler
+xthal_dmmu_store_align_handler
+
+/* Or, alternatively, the OS user and/or kernel exception handlers may simply jump to the
+ * following entry points which will handle any values of EXCCAUSE not handled by the OS: */
+xthal_user_exc_default_handler
+xthal_kernel_exc_default_handler
+
+#endif /*0*/
+
+#ifdef INCLUDE_DEPRECATED_HAL_CODE
+extern const unsigned char Xthal_have_old_exc_arch;
+extern const unsigned char Xthal_have_mmu;
+extern const unsigned int Xthal_num_regs;
+extern const unsigned char Xthal_num_iroms;
+extern const unsigned char Xthal_num_irams;
+extern const unsigned char Xthal_num_droms;
+extern const unsigned char Xthal_num_drams;
+extern const unsigned int Xthal_configid0;
+extern const unsigned int Xthal_configid1;
+#endif
+
+#ifdef INCLUDE_DEPRECATED_HAL_DEBUG_CODE
+#define XTHAL_24_BIT_BREAK 0x80000000
+#define XTHAL_16_BIT_BREAK 0x40000000
+extern const unsigned short Xthal_ill_inst_16[16];
+#define XTHAL_DEST_REG 0xf0000000 /* Mask for destination register */
+#define XTHAL_DEST_REG_INST 0x08000000 /* Branch address is in register */
+#define XTHAL_DEST_REL_INST 0x04000000 /* Branch address is relative */
+#define XTHAL_RFW_INST 0x00000800
+#define XTHAL_RFUE_INST 0x00000400
+#define XTHAL_RFI_INST 0x00000200
+#define XTHAL_RFE_INST 0x00000100
+#define XTHAL_RET_INST 0x00000080
+#define XTHAL_BREAK_INST 0x00000040
+#define XTHAL_SYSCALL_INST 0x00000020
+#define XTHAL_LOOP_END 0x00000010 /* Not set by xthal_inst_type */
+#define XTHAL_JUMP_INST 0x00000008 /* Call or jump instruction */
+#define XTHAL_BRANCH_INST 0x00000004 /* Branch instruction */
+#define XTHAL_24_BIT_INST 0x00000002
+#define XTHAL_16_BIT_INST 0x00000001
+typedef struct xthal_state {
+ unsigned pc;
+ unsigned ar[16];
+ unsigned lbeg;
+ unsigned lend;
+ unsigned lcount;
+ unsigned extra_ptr;
+ unsigned cpregs_ptr[XTHAL_MAX_CPS];
+} XTHAL_STATE;
+extern unsigned int xthal_inst_type(void *addr);
+extern unsigned int xthal_branch_addr(void *addr);
+extern unsigned int xthal_get_npc(XTHAL_STATE *user_state);
+#endif /* INCLUDE_DEPRECATED_HAL_DEBUG_CODE */
+
+#ifdef __cplusplus
+}
+#endif
+#endif /*!__ASSEMBLY__ */
+
+#endif /*XTENSA_HAL_H*/
+
diff --git a/include/asm-xtensa/xtensa/simcall.h b/include/asm-xtensa/xtensa/simcall.h
new file mode 100644
index 000000000000..a2b868929a49
--- /dev/null
+++ b/include/asm-xtensa/xtensa/simcall.h
@@ -0,0 +1,130 @@
+#ifndef SIMCALL_INCLUDED
+#define SIMCALL_INCLUDED
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/simcall.h - Simulator call numbers
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+/*
+ * System call like services offered by the simulator host.
+ * These are modeled after the Linux 2.4 kernel system calls
+ * for Xtensa processors. However not all system calls and
+ * not all functionality of a given system call are implemented,
+ * or necessarily have well defined or equivalent semantics in
+ * the context of a simulation (as opposed to a Unix kernel).
+ *
+ * These services behave largely as if they had been invoked
+ * as a task in the simulator host's operating system
+ * (eg. files accessed are those of the simulator host).
+ * However, these SIMCALLs model a virtual operating system
+ * so that various definitions, bit assignments etc
+ * (eg. open mode bits, errno values, etc) are independent
+ * of the host operating system used to run the simulation.
+ * Rather these definitions are specific to the Xtensa ISS.
+ * This way Xtensa ISA code written to use these SIMCALLs
+ * can (in principle) be simulated on any host.
+ *
+ * Up to 6 parameters are passed in registers a3 to a8
+ * (note the 6th parameter isn't passed on the stack,
+ * unlike windowed function calling conventions).
+ * The return value is in a2. A negative value in the
+ * range -4096 to -1 indicates a negated error code to be
+ * reported in errno with a return value of -1, otherwise
+ * the value in a2 is returned as is.
+ */
+
+/* These #defines need to match what's in Xtensa/OS/vxworks/xtiss/simcalls.c */
+
+#define SYS_nop 0 /* n/a - setup; used to flush register windows */
+#define SYS_exit 1 /*x*/
+#define SYS_fork 2
+#define SYS_read 3 /*x*/
+#define SYS_write 4 /*x*/
+#define SYS_open 5 /*x*/
+#define SYS_close 6 /*x*/
+#define SYS_rename 7 /*x 38 - waitpid */
+#define SYS_creat 8 /*x*/
+#define SYS_link 9 /*x (not implemented on WIN32) */
+#define SYS_unlink 10 /*x*/
+#define SYS_execv 11 /* n/a - execve */
+#define SYS_execve 12 /* 11 - chdir */
+#define SYS_pipe 13 /* 42 - time */
+#define SYS_stat 14 /* 106 - mknod */
+#define SYS_chmod 15
+#define SYS_chown 16 /* 202 - lchown */
+#define SYS_utime 17 /* 30 - break */
+#define SYS_wait 18 /* n/a - oldstat */
+#define SYS_lseek 19 /*x*/
+#define SYS_getpid 20
+#define SYS_isatty 21 /* n/a - mount */
+#define SYS_fstat 22 /* 108 - oldumount */
+#define SYS_time 23 /* 13 - setuid */
+#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */
+#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */
+#define SYS_socket 26
+#define SYS_sendto 27
+#define SYS_recvfrom 28
+#define SYS_select_one 29 /* not compitible select, one file descriptor at the time */
+#define SYS_bind 30
+#define SYS_ioctl 31
+
+/*
+ * Other...
+ */
+#define SYS_iss_argc 1000 /* returns value of argc */
+#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */
+#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */
+
+/*
+ * SIMCALLs for the ferret memory debugger. All are invoked by
+ * libferret.a ... ( Xtensa/Target-Libs/ferret )
+ */
+#define SYS_ferret 1010
+#define SYS_malloc 1011
+#define SYS_free 1012
+#define SYS_more_heap 1013
+#define SYS_no_heap 1014
+
+
+/*
+ * Extra SIMCALLs for GDB:
+ */
+#define SYS_gdb_break -1 /* invoked by XTOS on user exceptions if EPC points
+ to a break.n/break, regardless of cause! */
+#define SYS_xmon_out -2 /* invoked by XMON: ... */
+#define SYS_xmon_in -3 /* invoked by XMON: ... */
+#define SYS_xmon_flush -4 /* invoked by XMON: ... */
+#define SYS_gdb_abort -5 /* invoked by XTOS in _xtos_panic() */
+#define SYS_gdb_illegal_inst -6 /* invoked by XTOS for illegal instructions (too deeply) */
+#define SYS_xmon_init -7 /* invoked by XMON: ... */
+#define SYS_gdb_enter_sktloop -8 /* invoked by XTOS on debug exceptions */
+
+/*
+ * SIMCALLs for vxWorks xtiss BSP:
+ */
+#define SYS_setup_ppp_pipes -83
+#define SYS_log_msg -84
+
+/*
+ * Test SIMCALLs:
+ */
+#define SYS_test_write_state -100
+#define SYS_test_read_state -101
+
+/*
+ * SYS_select_one specifiers
+ */
+#define XTISS_SELECT_ONE_READ 1
+#define XTISS_SELECT_ONE_WRITE 2
+#define XTISS_SELECT_ONE_EXCEPT 3
+
+#endif /* !SIMCALL_INCLUDED */
diff --git a/include/asm-xtensa/xtensa/xt2000-uart.h b/include/asm-xtensa/xtensa/xt2000-uart.h
new file mode 100644
index 000000000000..0154460f0ed8
--- /dev/null
+++ b/include/asm-xtensa/xtensa/xt2000-uart.h
@@ -0,0 +1,155 @@
+#ifndef _uart_h_included_
+#define _uart_h_included_
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/xt2000-uart.h -- NatSemi PC16552D DUART
+ * definitions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/xt2000.h>
+
+
+/* 16550 UART DEVICE REGISTERS
+ The XT2000 board aligns each register to a 32-bit word but the UART device only uses
+ one byte of the word, which is the least-significant byte regardless of the
+ endianness of the core (ie. byte offset 0 for little-endian and 3 for big-endian).
+ So if using word accesses then endianness doesn't matter.
+ The macros provided here do that.
+*/
+struct uart_dev_s {
+ union {
+ unsigned int rxb; /* DLAB=0: receive buffer, read-only */
+ unsigned int txb; /* DLAB=0: transmit buffer, write-only */
+ unsigned int dll; /* DLAB=1: divisor, least-significant byte latch (was write-only?) */
+ } w0;
+ union {
+ unsigned int ier; /* DLAB=0: interrupt-enable register (was write-only?) */
+ unsigned int dlm; /* DLAB=1: divisor, most-significant byte latch (was write-only?) */
+ } w1;
+
+ union {
+ unsigned int isr; /* DLAB=0: interrupt status register, read-only */
+ unsigned int fcr; /* DLAB=0: FIFO control register, write-only */
+ unsigned int afr; /* DLAB=1: alternate function register */
+ } w2;
+
+ unsigned int lcr; /* line control-register, write-only */
+ unsigned int mcr; /* modem control-regsiter, write-only */
+ unsigned int lsr; /* line status register, read-only */
+ unsigned int msr; /* modem status register, read-only */
+ unsigned int scr; /* scratch regsiter, read/write */
+};
+
+#define _RXB(u) ((u)->w0.rxb)
+#define _TXB(u) ((u)->w0.txb)
+#define _DLL(u) ((u)->w0.dll)
+#define _IER(u) ((u)->w1.ier)
+#define _DLM(u) ((u)->w1.dlm)
+#define _ISR(u) ((u)->w2.isr)
+#define _FCR(u) ((u)->w2.fcr)
+#define _AFR(u) ((u)->w2.afr)
+#define _LCR(u) ((u)->lcr)
+#define _MCR(u) ((u)->mcr)
+#define _LSR(u) ((u)->lsr)
+#define _MSR(u) ((u)->msr)
+#define _SCR(u) ((u)->scr)
+
+typedef volatile struct uart_dev_s uart_dev_t;
+
+/* IER bits */
+#define RCVR_DATA_REG_INTENABLE 0x01
+#define XMIT_HOLD_REG_INTENABLE 0x02
+#define RCVR_STATUS_INTENABLE 0x04
+#define MODEM_STATUS_INTENABLE 0x08
+
+/* FCR bits */
+#define _FIFO_ENABLE 0x01
+#define RCVR_FIFO_RESET 0x02
+#define XMIT_FIFO_RESET 0x04
+#define DMA_MODE_SELECT 0x08
+#define RCVR_TRIGGER_LSB 0x40
+#define RCVR_TRIGGER_MSB 0x80
+
+/* AFR bits */
+#define AFR_CONC_WRITE 0x01
+#define AFR_BAUDOUT_SEL 0x02
+#define AFR_RXRDY_SEL 0x04
+
+/* ISR bits */
+#define INT_STATUS(r) ((r)&1)
+#define INT_PRIORITY(r) (((r)>>1)&0x7)
+
+/* LCR bits */
+#define WORD_LENGTH(n) (((n)-5)&0x3)
+#define STOP_BIT_ENABLE 0x04
+#define PARITY_ENABLE 0x08
+#define EVEN_PARITY 0x10
+#define FORCE_PARITY 0x20
+#define XMIT_BREAK 0x40
+#define DLAB_ENABLE 0x80
+
+/* MCR bits */
+#define _DTR 0x01
+#define _RTS 0x02
+#define _OP1 0x04
+#define _OP2 0x08
+#define LOOP_BACK 0x10
+
+/* LSR Bits */
+#define RCVR_DATA_READY 0x01
+#define OVERRUN_ERROR 0x02
+#define PARITY_ERROR 0x04
+#define FRAMING_ERROR 0x08
+#define BREAK_INTERRUPT 0x10
+#define XMIT_HOLD_EMPTY 0x20
+#define XMIT_EMPTY 0x40
+#define FIFO_ERROR 0x80
+#define RCVR_READY(u) (_LSR(u)&RCVR_DATA_READY)
+#define XMIT_READY(u) (_LSR(u)&XMIT_HOLD_EMPTY)
+
+/* MSR bits */
+#define _RDR 0x01
+#define DELTA_DSR 0x02
+#define DELTA_RI 0x04
+#define DELTA_CD 0x08
+#define _CTS 0x10
+#define _DSR 0x20
+#define _RI 0x40
+#define _CD 0x80
+
+/* prototypes */
+void uart_init( uart_dev_t *u, int bitrate );
+void uart_out( uart_dev_t *u, char c );
+void uart_puts( uart_dev_t *u, char *s );
+char uart_in( uart_dev_t *u );
+void uart_enable_rcvr_int( uart_dev_t *u );
+void uart_disable_rcvr_int( uart_dev_t *u );
+
+#ifdef DUART16552_1_VADDR
+/* DUART present. */
+#define DUART_1_BASE (*(uart_dev_t*)DUART16552_1_VADDR)
+#define DUART_2_BASE (*(uart_dev_t*)DUART16552_2_VADDR)
+#define UART1_PUTS(s) uart_puts( &DUART_1_BASE, s )
+#define UART2_PUTS(s) uart_puts( &DUART_2_BASE, s )
+#else
+/* DUART not configured, use dummy placeholders to allow compiles to work. */
+#define DUART_1_BASE (*(uart_dev_t*)0)
+#define DUART_2_BASE (*(uart_dev_t*)0)
+#define UART1_PUTS(s)
+#define UART2_PUTS(s)
+#endif
+
+/* Compute 16-bit divisor for baudrate generator, with rounding: */
+#define DUART_DIVISOR(crystal,speed) (((crystal)/16 + (speed)/2)/(speed))
+
+#endif /*_uart_h_included_*/
+
diff --git a/include/asm-xtensa/xtensa/xt2000.h b/include/asm-xtensa/xtensa/xt2000.h
new file mode 100644
index 000000000000..703a45002f8f
--- /dev/null
+++ b/include/asm-xtensa/xtensa/xt2000.h
@@ -0,0 +1,408 @@
+#ifndef _INC_XT2000_H_
+#define _INC_XT2000_H_
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * include/asm-xtensa/xtensa/xt2000.h - Definitions specific to the
+ * Tensilica XT2000 Emulation Board
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/config/core.h>
+#include <xtensa/config/system.h>
+
+
+/*
+ * Default assignment of XT2000 devices to external interrupts.
+ */
+
+/* Ethernet interrupt: */
+#ifdef XCHAL_EXTINT3_NUM
+#define SONIC83934_INTNUM XCHAL_EXTINT3_NUM
+#define SONIC83934_INTLEVEL XCHAL_EXTINT3_LEVEL
+#define SONIC83934_INTMASK XCHAL_EXTINT3_MASK
+#else
+#define SONIC83934_INTMASK 0
+#endif
+
+/* DUART channel 1 interrupt (P1 - console): */
+#ifdef XCHAL_EXTINT4_NUM
+#define DUART16552_1_INTNUM XCHAL_EXTINT4_NUM
+#define DUART16552_1_INTLEVEL XCHAL_EXTINT4_LEVEL
+#define DUART16552_1_INTMASK XCHAL_EXTINT4_MASK
+#else
+#define DUART16552_1_INTMASK 0
+#endif
+
+/* DUART channel 2 interrupt (P2 - 2nd serial port): */
+#ifdef XCHAL_EXTINT5_NUM
+#define DUART16552_2_INTNUM XCHAL_EXTINT5_NUM
+#define DUART16552_2_INTLEVEL XCHAL_EXTINT5_LEVEL
+#define DUART16552_2_INTMASK XCHAL_EXTINT5_MASK
+#else
+#define DUART16552_2_INTMASK 0
+#endif
+
+/* FPGA-combined PCI/etc interrupts: */
+#ifdef XCHAL_EXTINT6_NUM
+#define XT2000_FPGAPCI_INTNUM XCHAL_EXTINT6_NUM
+#define XT2000_FPGAPCI_INTLEVEL XCHAL_EXTINT6_LEVEL
+#define XT2000_FPGAPCI_INTMASK XCHAL_EXTINT6_MASK
+#else
+#define XT2000_FPGAPCI_INTMASK 0
+#endif
+
+
+
+/*
+ * Device addresses.
+ *
+ * Note: for endianness-independence, use 32-bit loads and stores for all
+ * register accesses to Ethernet, DUART and LED devices. Undefined bits
+ * may need to be masked out if needed when reading if the actual register
+ * size is smaller than 32 bits.
+ *
+ * Note: XT2000 bus byte lanes are defined in terms of msbyte and lsbyte
+ * relative to the processor. So 32-bit registers are accessed consistently
+ * from both big and little endian processors. However, this means byte
+ * sequences are not consistent between big and little endian processors.
+ * This is fine for RAM, and for ROM if ROM is created for a specific
+ * processor (and thus has correct byte sequences). However this may be
+ * unexpected for Flash, which might contain a file-system that one wants
+ * to use for multiple processor configurations (eg. the Flash might contain
+ * the Ethernet card's address, endianness-independent application data, etc).
+ * That is, byte sequences written in Flash by a core of a given endianness
+ * will be byte-swapped when seen by a core of the other endianness.
+ * Someone implementing an endianness-independent Flash file system will
+ * likely handle this byte-swapping issue in the Flash driver software.
+ */
+
+#define DUART16552_XTAL_FREQ 18432000 /* crystal frequency in Hz */
+#define XTBOARD_FLASH_MAXSIZE 0x4000000 /* 64 MB (max; depends on what is socketed!) */
+#define XTBOARD_EPROM_MAXSIZE 0x0400000 /* 4 MB (max; depends on what is socketed!) */
+#define XTBOARD_EEPROM_MAXSIZE 0x0080000 /* 512 kB (max; depends on what is socketed!) */
+#define XTBOARD_ASRAM_SIZE 0x0100000 /* 1 MB */
+#define XTBOARD_PCI_MEM_SIZE 0x8000000 /* 128 MB (allocated) */
+#define XTBOARD_PCI_IO_SIZE 0x1000000 /* 16 MB (allocated) */
+
+#ifdef XSHAL_IOBLOCK_BYPASS_PADDR
+/* PCI memory space: */
+# define XTBOARD_PCI_MEM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0x0000000)
+/* Socketed Flash (eg. 2 x 16-bit devices): */
+# define XTBOARD_FLASH_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0x8000000)
+/* PCI I/O space: */
+# define XTBOARD_PCI_IO_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xC000000)
+/* V3 PCI interface chip register/config space: */
+# define XTBOARD_V3PCI_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD000000)
+/* Bus Interface registers: */
+# define XTBOARD_BUSINT_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD010000)
+/* FPGA registers: */
+# define XT2000_FPGAREGS_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD020000)
+/* SONIC SN83934 Ethernet controller/transceiver: */
+# define SONIC83934_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD030000)
+/* 8-character bitmapped LED display: */
+# define XTBOARD_LED_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD040000)
+/* National-Semi PC16552D DUART: */
+# define DUART16552_1_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD050020) /* channel 1 (P1 - console) */
+# define DUART16552_2_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD050000) /* channel 2 (P2) */
+/* Asynchronous Static RAM: */
+# define XTBOARD_ASRAM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD400000)
+/* 8-bit EEPROM: */
+# define XTBOARD_EEPROM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD600000)
+/* 2 x 16-bit EPROMs: */
+# define XTBOARD_EPROM_PADDR (XSHAL_IOBLOCK_BYPASS_PADDR+0xD800000)
+#endif /* XSHAL_IOBLOCK_BYPASS_PADDR */
+
+/* These devices might be accessed cached: */
+#ifdef XSHAL_IOBLOCK_CACHED_PADDR
+# define XTBOARD_PCI_MEM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0x0000000)
+# define XTBOARD_FLASH_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0x8000000)
+# define XTBOARD_ASRAM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD400000)
+# define XTBOARD_EEPROM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD600000)
+# define XTBOARD_EPROM_CACHED_PADDR (XSHAL_IOBLOCK_CACHED_PADDR+0xD800000)
+#endif /* XSHAL_IOBLOCK_CACHED_PADDR */
+
+
+/*** Same thing over again, this time with virtual addresses: ***/
+
+#ifdef XSHAL_IOBLOCK_BYPASS_VADDR
+/* PCI memory space: */
+# define XTBOARD_PCI_MEM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0x0000000)
+/* Socketed Flash (eg. 2 x 16-bit devices): */
+# define XTBOARD_FLASH_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0x8000000)
+/* PCI I/O space: */
+# define XTBOARD_PCI_IO_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xC000000)
+/* V3 PCI interface chip register/config space: */
+# define XTBOARD_V3PCI_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD000000)
+/* Bus Interface registers: */
+# define XTBOARD_BUSINT_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD010000)
+/* FPGA registers: */
+# define XT2000_FPGAREGS_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD020000)
+/* SONIC SN83934 Ethernet controller/transceiver: */
+# define SONIC83934_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD030000)
+/* 8-character bitmapped LED display: */
+# define XTBOARD_LED_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD040000)
+/* National-Semi PC16552D DUART: */
+# define DUART16552_1_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD050020) /* channel 1 (P1 - console) */
+# define DUART16552_2_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD050000) /* channel 2 (P2) */
+/* Asynchronous Static RAM: */
+# define XTBOARD_ASRAM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD400000)
+/* 8-bit EEPROM: */
+# define XTBOARD_EEPROM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD600000)
+/* 2 x 16-bit EPROMs: */
+# define XTBOARD_EPROM_VADDR (XSHAL_IOBLOCK_BYPASS_VADDR+0xD800000)
+#endif /* XSHAL_IOBLOCK_BYPASS_VADDR */
+
+/* These devices might be accessed cached: */
+#ifdef XSHAL_IOBLOCK_CACHED_VADDR
+# define XTBOARD_PCI_MEM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0x0000000)
+# define XTBOARD_FLASH_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0x8000000)
+# define XTBOARD_ASRAM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD400000)
+# define XTBOARD_EEPROM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD600000)
+# define XTBOARD_EPROM_CACHED_VADDR (XSHAL_IOBLOCK_CACHED_VADDR+0xD800000)
+#endif /* XSHAL_IOBLOCK_CACHED_VADDR */
+
+
+/* System ROM: */
+#define XTBOARD_ROM_SIZE XSHAL_ROM_SIZE
+#ifdef XSHAL_ROM_VADDR
+#define XTBOARD_ROM_VADDR XSHAL_ROM_VADDR
+#endif
+#ifdef XSHAL_ROM_PADDR
+#define XTBOARD_ROM_PADDR XSHAL_ROM_PADDR
+#endif
+
+/* System RAM: */
+#define XTBOARD_RAM_SIZE XSHAL_RAM_SIZE
+#ifdef XSHAL_RAM_VADDR
+#define XTBOARD_RAM_VADDR XSHAL_RAM_VADDR
+#endif
+#ifdef XSHAL_RAM_PADDR
+#define XTBOARD_RAM_PADDR XSHAL_RAM_PADDR
+#endif
+#define XTBOARD_RAM_BYPASS_VADDR XSHAL_RAM_BYPASS_VADDR
+#define XTBOARD_RAM_BYPASS_PADDR XSHAL_RAM_BYPASS_PADDR
+
+
+
+/*
+ * Things that depend on device addresses.
+ */
+
+
+#define XTBOARD_CACHEATTR_WRITEBACK XSHAL_XT2000_CACHEATTR_WRITEBACK
+#define XTBOARD_CACHEATTR_WRITEALLOC XSHAL_XT2000_CACHEATTR_WRITEALLOC
+#define XTBOARD_CACHEATTR_WRITETHRU XSHAL_XT2000_CACHEATTR_WRITETHRU
+#define XTBOARD_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS
+#define XTBOARD_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT
+
+#define XTBOARD_BUSINT_PIPE_REGIONS XSHAL_XT2000_PIPE_REGIONS
+#define XTBOARD_BUSINT_SDRAM_REGIONS XSHAL_XT2000_SDRAM_REGIONS
+
+
+
+/*
+ * BusLogic (FPGA) registers.
+ * All these registers are normally accessed using 32-bit loads/stores.
+ */
+
+/* Register offsets: */
+#define XT2000_DATECD_OFS 0x00 /* date code (read-only) */
+#define XT2000_STSREG_OFS 0x04 /* status (read-only) */
+#define XT2000_SYSLED_OFS 0x08 /* system LED */
+#define XT2000_WRPROT_OFS 0x0C /* write protect */
+#define XT2000_SWRST_OFS 0x10 /* software reset */
+#define XT2000_SYSRST_OFS 0x14 /* system (peripherals) reset */
+#define XT2000_IMASK_OFS 0x18 /* interrupt mask */
+#define XT2000_ISTAT_OFS 0x1C /* interrupt status */
+#define XT2000_V3CFG_OFS 0x20 /* V3 config (V320 PCI) */
+
+/* Physical register addresses: */
+#ifdef XT2000_FPGAREGS_PADDR
+#define XT2000_DATECD_PADDR (XT2000_FPGAREGS_PADDR+XT2000_DATECD_OFS)
+#define XT2000_STSREG_PADDR (XT2000_FPGAREGS_PADDR+XT2000_STSREG_OFS)
+#define XT2000_SYSLED_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SYSLED_OFS)
+#define XT2000_WRPROT_PADDR (XT2000_FPGAREGS_PADDR+XT2000_WRPROT_OFS)
+#define XT2000_SWRST_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SWRST_OFS)
+#define XT2000_SYSRST_PADDR (XT2000_FPGAREGS_PADDR+XT2000_SYSRST_OFS)
+#define XT2000_IMASK_PADDR (XT2000_FPGAREGS_PADDR+XT2000_IMASK_OFS)
+#define XT2000_ISTAT_PADDR (XT2000_FPGAREGS_PADDR+XT2000_ISTAT_OFS)
+#define XT2000_V3CFG_PADDR (XT2000_FPGAREGS_PADDR+XT2000_V3CFG_OFS)
+#endif
+
+/* Virtual register addresses: */
+#ifdef XT2000_FPGAREGS_VADDR
+#define XT2000_DATECD_VADDR (XT2000_FPGAREGS_VADDR+XT2000_DATECD_OFS)
+#define XT2000_STSREG_VADDR (XT2000_FPGAREGS_VADDR+XT2000_STSREG_OFS)
+#define XT2000_SYSLED_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SYSLED_OFS)
+#define XT2000_WRPROT_VADDR (XT2000_FPGAREGS_VADDR+XT2000_WRPROT_OFS)
+#define XT2000_SWRST_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SWRST_OFS)
+#define XT2000_SYSRST_VADDR (XT2000_FPGAREGS_VADDR+XT2000_SYSRST_OFS)
+#define XT2000_IMASK_VADDR (XT2000_FPGAREGS_VADDR+XT2000_IMASK_OFS)
+#define XT2000_ISTAT_VADDR (XT2000_FPGAREGS_VADDR+XT2000_ISTAT_OFS)
+#define XT2000_V3CFG_VADDR (XT2000_FPGAREGS_VADDR+XT2000_V3CFG_OFS)
+/* Register access (for C code): */
+#define XT2000_DATECD_REG (*(volatile unsigned*) XT2000_DATECD_VADDR)
+#define XT2000_STSREG_REG (*(volatile unsigned*) XT2000_STSREG_VADDR)
+#define XT2000_SYSLED_REG (*(volatile unsigned*) XT2000_SYSLED_VADDR)
+#define XT2000_WRPROT_REG (*(volatile unsigned*) XT2000_WRPROT_VADDR)
+#define XT2000_SWRST_REG (*(volatile unsigned*) XT2000_SWRST_VADDR)
+#define XT2000_SYSRST_REG (*(volatile unsigned*) XT2000_SYSRST_VADDR)
+#define XT2000_IMASK_REG (*(volatile unsigned*) XT2000_IMASK_VADDR)
+#define XT2000_ISTAT_REG (*(volatile unsigned*) XT2000_ISTAT_VADDR)
+#define XT2000_V3CFG_REG (*(volatile unsigned*) XT2000_V3CFG_VADDR)
+#endif
+
+/* DATECD (date code) bit fields: */
+
+/* BCD-coded month (01..12): */
+#define XT2000_DATECD_MONTH_SHIFT 24
+#define XT2000_DATECD_MONTH_BITS 8
+#define XT2000_DATECD_MONTH_MASK 0xFF000000
+/* BCD-coded day (01..31): */
+#define XT2000_DATECD_DAY_SHIFT 16
+#define XT2000_DATECD_DAY_BITS 8
+#define XT2000_DATECD_DAY_MASK 0x00FF0000
+/* BCD-coded year (2001..9999): */
+#define XT2000_DATECD_YEAR_SHIFT 0
+#define XT2000_DATECD_YEAR_BITS 16
+#define XT2000_DATECD_YEAR_MASK 0x0000FFFF
+
+/* STSREG (status) bit fields: */
+
+/* Switch SW3 setting bit fields (0=off/up, 1=on/down): */
+#define XT2000_STSREG_SW3_SHIFT 0
+#define XT2000_STSREG_SW3_BITS 4
+#define XT2000_STSREG_SW3_MASK 0x0000000F
+/* Boot-select bits of switch SW3: */
+#define XT2000_STSREG_BOOTSEL_SHIFT 0
+#define XT2000_STSREG_BOOTSEL_BITS 2
+#define XT2000_STSREG_BOOTSEL_MASK 0x00000003
+/* Boot-select values: */
+#define XT2000_STSREG_BOOTSEL_FLASH 0
+#define XT2000_STSREG_BOOTSEL_EPROM16 1
+#define XT2000_STSREG_BOOTSEL_PROM8 2
+#define XT2000_STSREG_BOOTSEL_ASRAM 3
+/* User-defined bits of switch SW3: */
+#define XT2000_STSREG_SW3_2_SHIFT 2
+#define XT2000_STSREG_SW3_2_MASK 0x00000004
+#define XT2000_STSREG_SW3_3_SHIFT 3
+#define XT2000_STSREG_SW3_3_MASK 0x00000008
+
+/* SYSLED (system LED) bit fields: */
+
+/* LED control bit (0=off, 1=on): */
+#define XT2000_SYSLED_LEDON_SHIFT 0
+#define XT2000_SYSLED_LEDON_MASK 0x00000001
+
+/* WRPROT (write protect) bit fields (0=writable, 1=write-protected [default]): */
+
+/* Flash write protect: */
+#define XT2000_WRPROT_FLWP_SHIFT 0
+#define XT2000_WRPROT_FLWP_MASK 0x00000001
+/* Reserved but present write protect bits: */
+#define XT2000_WRPROT_WRP_SHIFT 1
+#define XT2000_WRPROT_WRP_BITS 7
+#define XT2000_WRPROT_WRP_MASK 0x000000FE
+
+/* SWRST (software reset; allows s/w to generate power-on equivalent reset): */
+
+/* Software reset bits: */
+#define XT2000_SWRST_SWR_SHIFT 0
+#define XT2000_SWRST_SWR_BITS 16
+#define XT2000_SWRST_SWR_MASK 0x0000FFFF
+/* Software reset value -- writing this value resets the board: */
+#define XT2000_SWRST_RESETVALUE 0x0000DEAD
+
+/* SYSRST (system reset; controls reset of individual peripherals): */
+
+/* All-device reset: */
+#define XT2000_SYSRST_ALL_SHIFT 0
+#define XT2000_SYSRST_ALL_BITS 4
+#define XT2000_SYSRST_ALL_MASK 0x0000000F
+/* HDSP-2534 LED display reset (1=reset, 0=nothing): */
+#define XT2000_SYSRST_LED_SHIFT 0
+#define XT2000_SYSRST_LED_MASK 0x00000001
+/* Sonic DP83934 Ethernet controller reset (1=reset, 0=nothing): */
+#define XT2000_SYSRST_SONIC_SHIFT 1
+#define XT2000_SYSRST_SONIC_MASK 0x00000002
+/* DP16552 DUART reset (1=reset, 0=nothing): */
+#define XT2000_SYSRST_DUART_SHIFT 2
+#define XT2000_SYSRST_DUART_MASK 0x00000004
+/* V3 V320 PCI bridge controller reset (1=reset, 0=nothing): */
+#define XT2000_SYSRST_V3_SHIFT 3
+#define XT2000_SYSRST_V3_MASK 0x00000008
+
+/* IMASK (interrupt mask; 0=disable, 1=enable): */
+/* ISTAT (interrupt status; 0=inactive, 1=pending): */
+
+/* PCI INTP interrupt: */
+#define XT2000_INTMUX_PCI_INTP_SHIFT 2
+#define XT2000_INTMUX_PCI_INTP_MASK 0x00000004
+/* PCI INTS interrupt: */
+#define XT2000_INTMUX_PCI_INTS_SHIFT 3
+#define XT2000_INTMUX_PCI_INTS_MASK 0x00000008
+/* PCI INTD interrupt: */
+#define XT2000_INTMUX_PCI_INTD_SHIFT 4
+#define XT2000_INTMUX_PCI_INTD_MASK 0x00000010
+/* V320 PCI controller interrupt: */
+#define XT2000_INTMUX_V3_SHIFT 5
+#define XT2000_INTMUX_V3_MASK 0x00000020
+/* PCI ENUM interrupt: */
+#define XT2000_INTMUX_PCI_ENUM_SHIFT 6
+#define XT2000_INTMUX_PCI_ENUM_MASK 0x00000040
+/* PCI DEG interrupt: */
+#define XT2000_INTMUX_PCI_DEG_SHIFT 7
+#define XT2000_INTMUX_PCI_DEG_MASK 0x00000080
+
+/* V3CFG (V3 config, V320 PCI controller): */
+
+/* V3 address control (0=pass-thru, 1=V3 address bits 31:28 set to 4'b0001 [default]): */
+#define XT2000_V3CFG_V3ADC_SHIFT 0
+#define XT2000_V3CFG_V3ADC_MASK 0x00000001
+
+/* I2C Devices */
+
+#define XT2000_I2C_RTC_ID 0x68
+#define XT2000_I2C_NVRAM0_ID 0x56 /* 1st 256 byte block */
+#define XT2000_I2C_NVRAM1_ID 0x57 /* 2nd 256 byte block */
+
+/* NVRAM Board Info structure: */
+
+#define XT2000_NVRAM_SIZE 512
+
+#define XT2000_NVRAM_BINFO_START 0x100
+#define XT2000_NVRAM_BINFO_SIZE 0x20
+#define XT2000_NVRAM_BINFO_VERSION 0x10 /* version 1.0 */
+#if 0
+#define XT2000_NVRAM_BINFO_VERSION_OFFSET 0x00
+#define XT2000_NVRAM_BINFO_VERSION_SIZE 0x1
+#define XT2000_NVRAM_BINFO_ETH_ADDR_OFFSET 0x02
+#define XT2000_NVRAM_BINFO_ETH_ADDR_SIZE 0x6
+#define XT2000_NVRAM_BINFO_SN_OFFSET 0x10
+#define XT2000_NVRAM_BINFO_SN_SIZE 0xE
+#define XT2000_NVRAM_BINFO_CRC_OFFSET 0x1E
+#define XT2000_NVRAM_BINFO_CRC_SIZE 0x2
+#endif /*0*/
+
+#if !defined(__ASSEMBLY__) && !defined(_NOCLANGUAGE)
+typedef struct xt2000_nvram_binfo {
+ unsigned char version;
+ unsigned char reserved1;
+ unsigned char eth_addr[6];
+ unsigned char reserved8[8];
+ unsigned char serialno[14];
+ unsigned char crc[2]; /* 16-bit CRC */
+} xt2000_nvram_binfo;
+#endif /*!__ASSEMBLY__ && !_NOCLANGUAGE*/
+
+
+#endif /*_INC_XT2000_H_*/
+
diff --git a/include/asm-xtensa/xtensa/xtboard.h b/include/asm-xtensa/xtensa/xtboard.h
new file mode 100644
index 000000000000..22469c175307
--- /dev/null
+++ b/include/asm-xtensa/xtensa/xtboard.h
@@ -0,0 +1,120 @@
+#ifndef _xtboard_h_included_
+#define _xtboard_h_included_
+
+/*
+ * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
+ *
+ * xtboard.h -- Routines for getting useful information from the board.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002 Tensilica Inc.
+ */
+
+
+#include <xtensa/xt2000.h>
+
+#define XTBOARD_RTC_ERROR -1
+#define XTBOARD_RTC_STOPPED -2
+
+
+/* xt2000-i2cdev.c: */
+typedef void XtboardDelayFunc( unsigned );
+extern XtboardDelayFunc* xtboard_set_nsdelay_func( XtboardDelayFunc *delay_fn );
+extern int xtboard_i2c_read (unsigned id, unsigned char *buf, unsigned addr, unsigned size);
+extern int xtboard_i2c_write(unsigned id, unsigned char *buf, unsigned addr, unsigned size);
+extern int xtboard_i2c_wait_nvram_ack(unsigned id, unsigned swtimer);
+
+/* xtboard.c: */
+extern int xtboard_nvram_read (unsigned addr, unsigned len, unsigned char *buf);
+extern int xtboard_nvram_write(unsigned addr, unsigned len, unsigned char *buf);
+extern int xtboard_nvram_binfo_read (xt2000_nvram_binfo *buf);
+extern int xtboard_nvram_binfo_write(xt2000_nvram_binfo *buf);
+extern int xtboard_nvram_binfo_valid(xt2000_nvram_binfo *buf);
+extern int xtboard_ethermac_get(unsigned char *buf);
+extern int xtboard_ethermac_set(unsigned char *buf);
+
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_get_rtc_time
+/
+/ Description: Get time stored in real-time clock.
+/
+/ Returns: time in seconds stored in real-time clock.
+/-**----------------------------------------------------------------------------*/
+
+extern unsigned xtboard_get_rtc_time(void);
+
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_set_rtc_time
+/
+/ Description: Set time stored in real-time clock.
+/
+/ Parameters: time -- time in seconds to store to real-time clock
+/
+/ Returns: 0 on success, xtboard_i2c_write() error code otherwise.
+/-**----------------------------------------------------------------------------*/
+
+extern int xtboard_set_rtc_time(unsigned time);
+
+
+/* xtfreq.c: */
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_measure_sys_clk
+/
+/ Description: Get frequency of system clock.
+/
+/ Parameters: none
+/
+/ Returns: frequency of system clock.
+/-**----------------------------------------------------------------------------*/
+
+extern unsigned xtboard_measure_sys_clk(void);
+
+
+#if 0 /* old stuff from xtboard.c: */
+
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_nvram valid
+/
+/ Description: Determines if data in NVRAM is valid.
+/
+/ Parameters: delay -- 10us delay function
+/
+/ Returns: 1 if NVRAM is valid, 0 otherwise
+/-**----------------------------------------------------------------------------*/
+
+extern unsigned xtboard_nvram_valid(void (*delay)( void ));
+
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_get_nvram_contents
+/
+/ Description: Returns contents of NVRAM.
+/
+/ Parameters: buf -- buffer to NVRAM contents.
+/ delay -- 10us delay function
+/
+/ Returns: 1 if NVRAM is valid, 0 otherwise
+/-**----------------------------------------------------------------------------*/
+
+extern unsigned xtboard_get_nvram_contents(unsigned char *buf, void (*delay)( void ));
+
+/*+*----------------------------------------------------------------------------
+/ Function: xtboard_get_ether_addr
+/
+/ Description: Returns ethernet address of board.
+/
+/ Parameters: buf -- buffer to store ethernet address
+/ delay -- 10us delay function
+/
+/ Returns: nothing.
+/-**----------------------------------------------------------------------------*/
+
+extern void xtboard_get_ether_addr(unsigned char *buf, void (*delay)( void ));
+
+#endif /*0*/
+
+
+#endif /*_xtboard_h_included_*/
+
OpenPOWER on IntegriCloud