summaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/Kbuild3
-rw-r--r--arch/mips/include/asm/addrspace.h9
-rw-r--r--arch/mips/include/asm/asmmacro.h4
-rw-r--r--arch/mips/include/asm/atomic.h570
-rw-r--r--arch/mips/include/asm/barrier.h228
-rw-r--r--arch/mips/include/asm/bitops.h430
-rw-r--r--arch/mips/include/asm/bmips.h10
-rw-r--r--arch/mips/include/asm/bootinfo.h22
-rw-r--r--arch/mips/include/asm/bugs.h18
-rw-r--r--arch/mips/include/asm/cmpxchg.h76
-rw-r--r--arch/mips/include/asm/compat.h18
-rw-r--r--arch/mips/include/asm/cop2.h2
-rw-r--r--arch/mips/include/asm/cpu-features.h23
-rw-r--r--arch/mips/include/asm/cpu-type.h23
-rw-r--r--arch/mips/include/asm/cpu.h31
-rw-r--r--arch/mips/include/asm/dma-direct.h8
-rw-r--r--arch/mips/include/asm/fixmap.h2
-rw-r--r--arch/mips/include/asm/futex.h15
-rw-r--r--arch/mips/include/asm/gio_device.h2
-rw-r--r--arch/mips/include/asm/hazards.h4
-rw-r--r--arch/mips/include/asm/io.h47
-rw-r--r--arch/mips/include/asm/irqflags.h8
-rw-r--r--arch/mips/include/asm/llsc.h19
-rw-r--r--arch/mips/include/asm/local.h4
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h1
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h9
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/war.h1
-rw-r--r--arch/mips/include/asm/mach-dec/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-generic/war.h1
-rw-r--r--arch/mips/include/asm/mach-ip22/spaces.h12
-rw-r--r--arch/mips/include/asm/mach-ip22/war.h1
-rw-r--r--arch/mips/include/asm/mach-ip27/kernel-entry-init.h12
-rw-r--r--arch/mips/include/asm/mach-ip27/mangle-port.h4
-rw-r--r--arch/mips/include/asm/mach-ip27/mmzone.h6
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h7
-rw-r--r--arch/mips/include/asm/mach-ip27/war.h1
-rw-r--r--arch/mips/include/asm/mach-ip28/war.h1
-rw-r--r--arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h83
-rw-r--r--arch/mips/include/asm/mach-ip30/irq.h87
-rw-r--r--arch/mips/include/asm/mach-ip30/kernel-entry-init.h13
-rw-r--r--arch/mips/include/asm/mach-ip30/mangle-port.h22
-rw-r--r--arch/mips/include/asm/mach-ip30/spaces.h20
-rw-r--r--arch/mips/include/asm/mach-ip30/war.h26
-rw-r--r--arch/mips/include/asm/mach-ip32/war.h1
-rw-r--r--arch/mips/include/asm/mach-jz4740/gpio.h15
-rw-r--r--arch/mips/include/asm/mach-jz4740/jz4740_fb.h58
-rw-r--r--arch/mips/include/asm/mach-jz4740/jz4740_mmc.h12
-rw-r--r--arch/mips/include/asm/mach-jz4740/platform.h26
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h44
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536_mfgpt.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h (renamed from arch/mips/include/asm/mach-loongson64/cs5536/cs5536_vsm.h)0
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/loongson.h326
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/machine.h (renamed from arch/mips/include/asm/mach-loongson64/machine.h)12
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h36
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/mem.h (renamed from arch/mips/include/asm/mach-loongson64/mem.h)6
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/pci.h46
-rw-r--r--arch/mips/include/asm/mach-loongson2ef/spaces.h10
-rw-r--r--arch/mips/include/asm/mach-loongson32/prom.h20
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h6
-rw-r--r--arch/mips/include/asm/mach-loongson64/irq.h4
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h32
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson.h115
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson_regs.h227
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h29
-rw-r--r--arch/mips/include/asm/mach-loongson64/pci.h31
-rw-r--r--arch/mips/include/asm/mach-loongson64/topology.h4
-rw-r--r--arch/mips/include/asm/mach-malta/war.h1
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/war.h1
-rw-r--r--arch/mips/include/asm/mach-rc32434/war.h1
-rw-r--r--arch/mips/include/asm/mach-rm/war.h1
-rw-r--r--arch/mips/include/asm/mach-sibyte/war.h1
-rw-r--r--arch/mips/include/asm/mach-tx49xx/war.h1
-rw-r--r--arch/mips/include/asm/mipsregs.h13
-rw-r--r--arch/mips/include/asm/module.h18
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd.h1
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sli-defs.h1
-rw-r--r--arch/mips/include/asm/octeon/octeon.h4
-rw-r--r--arch/mips/include/asm/pci.h1
-rw-r--r--arch/mips/include/asm/pci/bridge.h4
-rw-r--r--arch/mips/include/asm/pgalloc.h8
-rw-r--r--arch/mips/include/asm/pgtable-32.h68
-rw-r--r--arch/mips/include/asm/pgtable-64.h44
-rw-r--r--arch/mips/include/asm/pgtable-bits.h34
-rw-r--r--arch/mips/include/asm/pgtable.h41
-rw-r--r--arch/mips/include/asm/pmon.h46
-rw-r--r--arch/mips/include/asm/processor.h7
-rw-r--r--arch/mips/include/asm/r4kcache.h362
-rw-r--r--arch/mips/include/asm/serial.h18
-rw-r--r--arch/mips/include/asm/sgi/heart.h272
-rw-r--r--arch/mips/include/asm/sgi/sgi.h48
-rw-r--r--arch/mips/include/asm/sgialib.h22
-rw-r--r--arch/mips/include/asm/sgiarcs.h103
-rw-r--r--arch/mips/include/asm/sn/agent.h2
-rw-r--r--arch/mips/include/asm/sn/arch.h34
-rw-r--r--arch/mips/include/asm/sn/gda.h4
-rw-r--r--arch/mips/include/asm/sn/hub.h17
-rw-r--r--arch/mips/include/asm/sn/intr.h17
-rw-r--r--arch/mips/include/asm/sn/ioc3.h370
-rw-r--r--arch/mips/include/asm/sn/klconfig.h4
-rw-r--r--arch/mips/include/asm/sn/kldir.h193
-rw-r--r--arch/mips/include/asm/sn/mapped_kernel.h4
-rw-r--r--arch/mips/include/asm/sn/sn0/arch.h18
-rw-r--r--arch/mips/include/asm/sn/sn0/hub.h22
-rw-r--r--arch/mips/include/asm/sn/sn0/hubni.h8
-rw-r--r--arch/mips/include/asm/sn/sn0/ip27.h85
-rw-r--r--arch/mips/include/asm/sn/sn0/kldir.h186
-rw-r--r--arch/mips/include/asm/sn/sn_private.h20
-rw-r--r--arch/mips/include/asm/sn/types.h8
-rw-r--r--arch/mips/include/asm/string.h121
-rw-r--r--arch/mips/include/asm/sync.h207
-rw-r--r--arch/mips/include/asm/syscall.h21
-rw-r--r--arch/mips/include/asm/thread_info.h20
-rw-r--r--arch/mips/include/asm/unistd.h1
-rw-r--r--arch/mips/include/asm/unroll.h77
-rw-r--r--arch/mips/include/asm/vdso.h78
-rw-r--r--arch/mips/include/asm/vdso/gettimeofday.h209
-rw-r--r--arch/mips/include/asm/vdso/vdso.h75
-rw-r--r--arch/mips/include/asm/vdso/vsyscall.h36
-rw-r--r--arch/mips/include/asm/vmalloc.h4
-rw-r--r--arch/mips/include/asm/war.h13
122 files changed, 3006 insertions, 2913 deletions
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index c8b595c60910..4ebd8ce254ce 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -6,20 +6,19 @@ generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
generic-y += current.h
generic-y += device.h
-generic-y += dma-contiguous.h
generic-y += emergency-restart.h
generic-y += export.h
generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += msi.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += sections.h
+generic-y += serial.h
generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += user.h
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index 4856adc8906e..59a48c60a065 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -135,18 +135,9 @@
*/
#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */
-#ifndef CONFIG_CPU_R8000
-
-/*
- * The R8000 doesn't have the 32-bit compat spaces so we don't define them
- * in order to catch bugs in the source code.
- */
-
#define COMPAT_K1BASE32 _CONST64_(0xffffffffa0000000)
#define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */
-#endif
-
#define KDM_TO_PHYS(x) (_ACAST64_ (x) & TO_PHYS_MASK)
#define PHYS_TO_K0(x) (_ACAST64_ (x) | CAC_BASE)
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index feb069cbf44e..655f40ddb6d1 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -63,7 +63,7 @@
.endm
.macro local_irq_disable reg=t0
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
lw \reg, TI_PRE_COUNT($28)
addi \reg, \reg, 1
sw \reg, TI_PRE_COUNT($28)
@@ -73,7 +73,7 @@
xori \reg, \reg, 1
mtc0 \reg, CP0_STATUS
irq_disable_hazard
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
lw \reg, TI_PRE_COUNT($28)
addi \reg, \reg, -1
sw \reg, TI_PRE_COUNT($28)
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 9a82dd11c0e9..e5ac88392d1f 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -20,157 +20,176 @@
#include <asm/compiler.h>
#include <asm/cpu-features.h>
#include <asm/cmpxchg.h>
+#include <asm/llsc.h>
+#include <asm/sync.h>
#include <asm/war.h>
-/*
- * Using a branch-likely instruction to check the result of an sc instruction
- * works around a bug present in R10000 CPUs prior to revision 3.0 that could
- * cause ll-sc sequences to execute non-atomically.
- */
-#if R10000_LLSC_WAR
-# define __scbeqz "beqzl"
-#else
-# define __scbeqz "beqz"
-#endif
-
-#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC_OPS(pfx, type) \
+static __always_inline type pfx##_read(const pfx##_t *v) \
+{ \
+ return READ_ONCE(v->counter); \
+} \
+ \
+static __always_inline void pfx##_set(pfx##_t *v, type i) \
+{ \
+ WRITE_ONCE(v->counter, i); \
+} \
+ \
+static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \
+{ \
+ return cmpxchg(&v->counter, o, n); \
+} \
+ \
+static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
+{ \
+ return xchg(&v->counter, n); \
+}
-/*
- * atomic_read - read atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically reads the value of @v.
- */
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define ATOMIC_INIT(i) { (i) }
+ATOMIC_OPS(atomic, int)
-/*
- * atomic_set - set atomic variable
- * @v: pointer of type atomic_t
- * @i: required value
- *
- * Atomically sets the value of @v to @i.
- */
-#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
+#ifdef CONFIG_64BIT
+# define ATOMIC64_INIT(i) { (i) }
+ATOMIC_OPS(atomic64, s64)
+#endif
-#define ATOMIC_OP(op, c_op, asm_op) \
-static __inline__ void atomic_##op(int i, atomic_t * v) \
-{ \
- if (kernel_uses_llsc) { \
- int temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: ll %0, %1 # atomic_" #op " \n" \
- " " #asm_op " %0, %2 \n" \
- " sc %0, %1 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " .set pop \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
+#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ void pfx##_##op(type i, pfx##_t * v) \
+{ \
+ type temp; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ return; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
+ " " #asm_op " %0, %2 \n" \
+ " " #sc " %0, %1 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
}
-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
-{ \
- int result; \
- \
- if (kernel_uses_llsc) { \
- int temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: ll %1, %2 # atomic_" #op "_return \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " .set pop \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- result c_op i; \
- v->counter = result; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
+#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
+{ \
+ type temp, result; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result c_op i; \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " " #sc " %0, %2 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
+ \
+ return result; \
}
-#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
-{ \
- int result; \
- \
- if (kernel_uses_llsc) { \
- int temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: ll %1, %2 # atomic_fetch_" #op " \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " sc %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " .set pop \n" \
- " move %0, %1 \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
+#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
+{ \
+ int temp, result; \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " " #sc " %0, %2 \n" \
+ "\t" __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ " move %0, %1 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) : __LLSC_CLOBBER); \
+ \
+ return result; \
}
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op) \
- ATOMIC_FETCH_OP(op, c_op, asm_op)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
-ATOMIC_OPS(add, +=, addu)
-ATOMIC_OPS(sub, -=, subu)
+ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
+ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#ifdef CONFIG_64BIT
+ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
+ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
+# define atomic64_add_return_relaxed atomic64_add_return_relaxed
+# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#endif /* CONFIG_64BIT */
+
#undef ATOMIC_OPS
-#define ATOMIC_OPS(op, c_op, asm_op) \
- ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
+ ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
-ATOMIC_OPS(and, &=, and)
-ATOMIC_OPS(or, |=, or)
-ATOMIC_OPS(xor, ^=, xor)
+ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
+ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
+ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#ifdef CONFIG_64BIT
+ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
+ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
+ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
+# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#endif
+
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
@@ -184,257 +203,66 @@ ATOMIC_OPS(xor, ^=, xor)
* Atomically test @v and subtract @i if @v is greater or equal than @i.
* The function returns the old value of @v minus @i.
*/
-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
-{
- int result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc) {
- int temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_LEVEL" \n"
- "1: ll %1, %2 # atomic_sub_if_positive\n"
- " .set pop \n"
- " subu %0, %1, %3 \n"
- " move %1, %0 \n"
- " bltz %0, 1f \n"
- " .set push \n"
- " .set "MIPS_ISA_LEVEL" \n"
- " sc %1, %2 \n"
- "\t" __scbeqz " %1, 1b \n"
- "1: \n"
- " .set pop \n"
- : "=&r" (result), "=&r" (temp),
- "+" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- if (result >= 0)
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
+#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
+static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
+{ \
+ type temp, result; \
+ \
+ smp_mb__before_atomic(); \
+ \
+ if (!kernel_uses_llsc) { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ result -= i; \
+ if (result >= 0) \
+ v->counter = result; \
+ raw_local_irq_restore(flags); \
+ smp_mb__after_atomic(); \
+ return result; \
+ } \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
+ " .set pop \n" \
+ " " #op " %0, %1, %3 \n" \
+ " move %1, %0 \n" \
+ " bltz %0, 2f \n" \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " #sc " %1, %2 \n" \
+ " " __SC_BEQZ "%1, 1b \n" \
+ "2: " __SYNC(full, loongson3_war) " \n" \
+ " .set pop \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i) \
+ : __LLSC_CLOBBER); \
+ \
+ /* \
+ * In the Loongson3 workaround case we already have a \
+ * completion barrier at 2: above, which is needed due to the \
+ * bltz that can branch to code outside of the LL/SC loop. As \
+ * such, we don't need to emit another barrier here. \
+ */ \
+ if (!__SYNC_loongson3_war) \
+ smp_mb__after_atomic(); \
+ \
+ return result; \
}
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
-
-/*
- * atomic_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic_t
- */
+ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#ifdef CONFIG_64BIT
-
-#define ATOMIC64_INIT(i) { (i) }
-
-/*
- * atomic64_read - read atomic variable
- * @v: pointer of type atomic64_t
- *
- */
-#define atomic64_read(v) READ_ONCE((v)->counter)
-
-/*
- * atomic64_set - set atomic variable
- * @v: pointer of type atomic64_t
- * @i: required value
- */
-#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
-
-#define ATOMIC64_OP(op, c_op, asm_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
-{ \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %0, %1 # atomic64_" #op " \n" \
- " " #asm_op " %0, %2 \n" \
- " scd %0, %1 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " .set pop \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
-}
-
-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
-{ \
- s64 result; \
- \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %1, %2 # atomic64_" #op "_return\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " " #asm_op " %0, %1, %3 \n" \
- " .set pop \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- result c_op i; \
- v->counter = result; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
-}
-
-#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
-static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
-{ \
- s64 result; \
- \
- if (kernel_uses_llsc) { \
- s64 temp; \
- \
- loongson_llsc_mb(); \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set "MIPS_ISA_LEVEL" \n" \
- "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
- " " #asm_op " %0, %1, %3 \n" \
- " scd %0, %2 \n" \
- "\t" __scbeqz " %0, 1b \n" \
- " move %0, %1 \n" \
- " .set pop \n" \
- : "=&r" (result), "=&r" (temp), \
- "+" GCC_OFF_SMALL_ASM() (v->counter) \
- : "Ir" (i)); \
- } else { \
- unsigned long flags; \
- \
- raw_local_irq_save(flags); \
- result = v->counter; \
- v->counter c_op i; \
- raw_local_irq_restore(flags); \
- } \
- \
- return result; \
-}
-
-#define ATOMIC64_OPS(op, c_op, asm_op) \
- ATOMIC64_OP(op, c_op, asm_op) \
- ATOMIC64_OP_RETURN(op, c_op, asm_op) \
- ATOMIC64_FETCH_OP(op, c_op, asm_op)
-
-ATOMIC64_OPS(add, +=, daddu)
-ATOMIC64_OPS(sub, -=, dsubu)
-
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-
-#undef ATOMIC64_OPS
-#define ATOMIC64_OPS(op, c_op, asm_op) \
- ATOMIC64_OP(op, c_op, asm_op) \
- ATOMIC64_FETCH_OP(op, c_op, asm_op)
-
-ATOMIC64_OPS(and, &=, and)
-ATOMIC64_OPS(or, |=, or)
-ATOMIC64_OPS(xor, ^=, xor)
-
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
-/*
- * atomic64_sub_if_positive - conditionally subtract integer from atomic
- * variable
- * @i: integer value to subtract
- * @v: pointer of type atomic64_t
- *
- * Atomically test @v and subtract @i if @v is greater or equal than @i.
- * The function returns the old value of @v minus @i.
- */
-static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
-{
- s64 result;
-
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc) {
- s64 temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_LEVEL" \n"
- "1: lld %1, %2 # atomic64_sub_if_positive\n"
- " dsubu %0, %1, %3 \n"
- " move %1, %0 \n"
- " bltz %0, 1f \n"
- " scd %1, %2 \n"
- "\t" __scbeqz " %1, 1b \n"
- "1: \n"
- " .set pop \n"
- : "=&r" (result), "=&r" (temp),
- "+" GCC_OFF_SMALL_ASM() (v->counter)
- : "Ir" (i));
- } else {
- unsigned long flags;
-
- raw_local_irq_save(flags);
- result = v->counter;
- result -= i;
- if (result >= 0)
- v->counter = result;
- raw_local_irq_restore(flags);
- }
-
- smp_llsc_mb();
-
- return result;
-}
-
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
-
-/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
- * @v: pointer of type atomic64_t
- */
+ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
+#endif
-#endif /* CONFIG_64BIT */
+#undef ATOMIC_SIP_OP
#endif /* _ASM_ATOMIC_H */
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index b865e317a14f..49ff172a72b9 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -9,131 +9,26 @@
#define __ASM_BARRIER_H
#include <asm/addrspace.h>
+#include <asm/sync.h>
-/*
- * Sync types defined by the MIPS architecture (document MD00087 table 6.5)
- * These values are used with the sync instruction to perform memory barriers.
- * Types of ordering guarantees available through the SYNC instruction:
- * - Completion Barriers
- * - Ordering Barriers
- * As compared to the completion barrier, the ordering barrier is a
- * lighter-weight operation as it does not require the specified instructions
- * before the SYNC to be already completed. Instead it only requires that those
- * specified instructions which are subsequent to the SYNC in the instruction
- * stream are never re-ordered for processing ahead of the specified
- * instructions which are before the SYNC in the instruction stream.
- * This potentially reduces how many cycles the barrier instruction must stall
- * before it completes.
- * Implementations that do not use any of the non-zero values of stype to define
- * different barriers, such as ordering barriers, must make those stype values
- * act the same as stype zero.
- */
-
-/*
- * Completion barriers:
- * - Every synchronizable specified memory instruction (loads or stores or both)
- * that occurs in the instruction stream before the SYNC instruction must be
- * already globally performed before any synchronizable specified memory
- * instructions that occur after the SYNC are allowed to be performed, with
- * respect to any other processor or coherent I/O module.
- *
- * - The barrier does not guarantee the order in which instruction fetches are
- * performed.
- *
- * - A stype value of zero will always be defined such that it performs the most
- * complete set of synchronization operations that are defined.This means
- * stype zero always does a completion barrier that affects both loads and
- * stores preceding the SYNC instruction and both loads and stores that are
- * subsequent to the SYNC instruction. Non-zero values of stype may be defined
- * by the architecture or specific implementations to perform synchronization
- * behaviors that are less complete than that of stype zero. If an
- * implementation does not use one of these non-zero values to define a
- * different synchronization behavior, then that non-zero value of stype must
- * act the same as stype zero completion barrier. This allows software written
- * for an implementation with a lighter-weight barrier to work on another
- * implementation which only implements the stype zero completion barrier.
- *
- * - A completion barrier is required, potentially in conjunction with SSNOP (in
- * Release 1 of the Architecture) or EHB (in Release 2 of the Architecture),
- * to guarantee that memory reference results are visible across operating
- * mode changes. For example, a completion barrier is required on some
- * implementations on entry to and exit from Debug Mode to guarantee that
- * memory effects are handled correctly.
- */
-
-/*
- * stype 0 - A completion barrier that affects preceding loads and stores and
- * subsequent loads and stores.
- * Older instructions which must reach the load/store ordering point before the
- * SYNC instruction completes: Loads, Stores
- * Younger instructions which must reach the load/store ordering point only
- * after the SYNC instruction completes: Loads, Stores
- * Older instructions which must be globally performed when the SYNC instruction
- * completes: Loads, Stores
- */
-#define STYPE_SYNC 0x0
-
-/*
- * Ordering barriers:
- * - Every synchronizable specified memory instruction (loads or stores or both)
- * that occurs in the instruction stream before the SYNC instruction must
- * reach a stage in the load/store datapath after which no instruction
- * re-ordering is possible before any synchronizable specified memory
- * instruction which occurs after the SYNC instruction in the instruction
- * stream reaches the same stage in the load/store datapath.
- *
- * - If any memory instruction before the SYNC instruction in program order,
- * generates a memory request to the external memory and any memory
- * instruction after the SYNC instruction in program order also generates a
- * memory request to external memory, the memory request belonging to the
- * older instruction must be globally performed before the time the memory
- * request belonging to the younger instruction is globally performed.
- *
- * - The barrier does not guarantee the order in which instruction fetches are
- * performed.
- */
+static inline void __sync(void)
+{
+ asm volatile(__SYNC(full, always) ::: "memory");
+}
-/*
- * stype 0x10 - An ordering barrier that affects preceding loads and stores and
- * subsequent loads and stores.
- * Older instructions which must reach the load/store ordering point before the
- * SYNC instruction completes: Loads, Stores
- * Younger instructions which must reach the load/store ordering point only
- * after the SYNC instruction completes: Loads, Stores
- * Older instructions which must be globally performed when the SYNC instruction
- * completes: N/A
- */
-#define STYPE_SYNC_MB 0x10
+static inline void rmb(void)
+{
+ asm volatile(__SYNC(rmb, always) ::: "memory");
+}
+#define rmb rmb
-/*
- * stype 0x14 - A completion barrier specific to global invalidations
- *
- * When a sync instruction of this type completes any preceding GINVI or GINVT
- * operation has been globalized & completed on all coherent CPUs. Anything
- * that the GINV* instruction should invalidate will have been invalidated on
- * all coherent CPUs when this instruction completes. It is implementation
- * specific whether the GINV* instructions themselves will ensure completion,
- * or this sync type will.
- *
- * In systems implementing global invalidates (ie. with Config5.GI == 2 or 3)
- * this sync type also requires that previous SYNCI operations have completed.
- */
-#define STYPE_GINV 0x14
+static inline void wmb(void)
+{
+ asm volatile(__SYNC(wmb, always) ::: "memory");
+}
+#define wmb wmb
-#ifdef CONFIG_CPU_HAS_SYNC
-#define __sync() \
- __asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- ".set mips2\n\t" \
- "sync\n\t" \
- ".set pop" \
- : /* no output */ \
- : /* no input */ \
- : "memory")
-#else
-#define __sync() do { } while(0)
-#endif
+#define fast_mb() __sync()
#define __fast_iob() \
__asm__ __volatile__( \
@@ -146,17 +41,8 @@
: "m" (*(int *)CKSEG1) \
: "memory")
#ifdef CONFIG_CPU_CAVIUM_OCTEON
-# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
-# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
-
-# define fast_wmb() __syncw()
-# define fast_rmb() barrier()
-# define fast_mb() __sync()
# define fast_iob() do { } while (0)
#else /* ! CONFIG_CPU_CAVIUM_OCTEON */
-# define fast_wmb() __sync()
-# define fast_rmb() __sync()
-# define fast_mb() __sync()
# ifdef CONFIG_SGI_IP28
# define fast_iob() \
__asm__ __volatile__( \
@@ -192,33 +78,33 @@
#endif /* !CONFIG_CPU_HAS_WB */
-#define wmb() fast_wmb()
-#define rmb() fast_rmb()
-
#if defined(CONFIG_WEAK_ORDERING)
-# ifdef CONFIG_CPU_CAVIUM_OCTEON
-# define __smp_mb() __sync()
-# define __smp_rmb() barrier()
-# define __smp_wmb() __syncw()
-# else
-# define __smp_mb() __asm__ __volatile__("sync" : : :"memory")
-# define __smp_rmb() __asm__ __volatile__("sync" : : :"memory")
-# define __smp_wmb() __asm__ __volatile__("sync" : : :"memory")
-# endif
+# define __smp_mb() __sync()
+# define __smp_rmb() rmb()
+# define __smp_wmb() wmb()
#else
-#define __smp_mb() barrier()
-#define __smp_rmb() barrier()
-#define __smp_wmb() barrier()
+# define __smp_mb() barrier()
+# define __smp_rmb() barrier()
+# define __smp_wmb() barrier()
#endif
+/*
+ * When LL/SC does imply order, it must also be a compiler barrier to avoid the
+ * compiler from reordering where the CPU will not. When it does not imply
+ * order, the compiler is also free to reorder across the LL/SC loop and
+ * ordering will be done by smp_llsc_mb() and friends.
+ */
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
-#define __WEAK_LLSC_MB " sync \n"
+# define __WEAK_LLSC_MB sync
+# define smp_llsc_mb() \
+ __asm__ __volatile__(__stringify(__WEAK_LLSC_MB) : : :"memory")
+# define __LLSC_CLOBBER
#else
-#define __WEAK_LLSC_MB " \n"
+# define __WEAK_LLSC_MB
+# define smp_llsc_mb() do { } while (0)
+# define __LLSC_CLOBBER "memory"
#endif
-#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
-
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#define smp_mb__before_llsc() smp_wmb()
#define __smp_mb__before_llsc() __smp_wmb()
@@ -233,48 +119,22 @@
#define nudge_writes() mb()
#endif
-#define __smp_mb__before_atomic() __smp_mb__before_llsc()
-#define __smp_mb__after_atomic() smp_llsc_mb()
-
/*
- * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
- * store or pref) in between an ll & sc can cause the sc instruction to
- * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
- * containing such sequences, this bug bites harder than we might otherwise
- * expect due to reordering & speculation:
- *
- * 1) A memory access appearing prior to the ll in program order may actually
- * be executed after the ll - this is the reordering case.
- *
- * In order to avoid this we need to place a memory barrier (ie. a sync
- * instruction) prior to every ll instruction, in between it & any earlier
- * memory access instructions. Many of these cases are already covered by
- * smp_mb__before_llsc() but for the remaining cases, typically ones in
- * which multiple CPUs may operate on a memory location but ordering is not
- * usually guaranteed, we use loongson_llsc_mb() below.
- *
- * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
- *
- * 2) If a conditional branch exists between an ll & sc with a target outside
- * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg()
- * or similar, then misprediction of the branch may allow speculative
- * execution of memory accesses from outside of the ll-sc loop.
- *
- * In order to avoid this we need a memory barrier (ie. a sync instruction)
- * at each affected branch target, for which we also use loongson_llsc_mb()
- * defined below.
- *
- * This case affects all current Loongson 3 CPUs.
+ * In the Loongson3 LL/SC workaround case, all of our LL/SC loops already have
+ * a completion barrier immediately preceding the LL instruction. Therefore we
+ * can skip emitting a barrier from __smp_mb__before_atomic().
*/
-#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */
-#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
+# define __smp_mb__before_atomic()
#else
-#define loongson_llsc_mb() do { } while (0)
+# define __smp_mb__before_atomic() __smp_mb__before_llsc()
#endif
+#define __smp_mb__after_atomic() smp_llsc_mb()
+
static inline void sync_ginv(void)
{
- asm volatile("sync\t%0" :: "i"(STYPE_GINV));
+ asm volatile(__SYNC(ginv, always));
}
#include <asm-generic/barrier.h>
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 9a466dde9b96..a74769940fbd 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -13,16 +13,55 @@
#error only <linux/bitops.h> can be included directly
#endif
+#include <linux/bits.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
#include <asm/compiler.h>
#include <asm/cpu-features.h>
+#include <asm/isa-rev.h>
#include <asm/llsc.h>
#include <asm/sgidefs.h>
#include <asm/war.h>
+#define __bit_op(mem, insn, inputs...) do { \
+ unsigned long temp; \
+ \
+ asm volatile( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " __LL "%0, %1 \n" \
+ " " insn " \n" \
+ " " __SC "%0, %1 \n" \
+ " " __SC_BEQZ "%0, 1b \n" \
+ " .set pop \n" \
+ : "=&r"(temp), "+" GCC_OFF_SMALL_ASM()(mem) \
+ : inputs \
+ : __LLSC_CLOBBER); \
+} while (0)
+
+#define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \
+ unsigned long orig, temp; \
+ \
+ asm volatile( \
+ " .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
+ "1: " __LL ll_dst ", %2 \n" \
+ " " insn " \n" \
+ " " __SC "%1, %2 \n" \
+ " " __SC_BEQZ "%1, 1b \n" \
+ " .set pop \n" \
+ : "=&r"(orig), "=&r"(temp), \
+ "+" GCC_OFF_SMALL_ASM()(mem) \
+ : inputs \
+ : __LLSC_CLOBBER); \
+ \
+ orig; \
+})
+
/*
* These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}().
@@ -30,8 +69,6 @@
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
-int __mips_test_and_set_bit(unsigned long nr,
- volatile unsigned long *addr);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_clear_bit(unsigned long nr,
@@ -52,48 +89,20 @@ int __mips_test_and_change_bit(unsigned long nr,
*/
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- int bit = nr & SZLONG_MASK;
- unsigned long temp;
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # set_bit \n"
- " or %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqzl %0, 1b \n"
- " .set pop \n"
- : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
- } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " " __LL "%0, %1 # set_bit \n"
- " " __INS "%0, %3, %2, 1 \n"
- " " __SC "%0, %1 \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (bit), "r" (~0));
- } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
- } else if (kernel_uses_llsc) {
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # set_bit \n"
- " or %0, %2 \n"
- " " __SC "%0, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (1UL << bit));
- } while (unlikely(!temp));
- } else
+ if (!kernel_uses_llsc) {
__mips_set_bit(nr, addr);
+ return;
+ }
+
+ if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
+ __bit_op(*m, __INS "%0, %3, %2, 1", "i"(bit), "r"(~0));
+ return;
+ }
+
+ __bit_op(*m, "or\t%0, %2", "ir"(BIT(bit)));
}
/*
@@ -108,48 +117,20 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
*/
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- int bit = nr & SZLONG_MASK;
- unsigned long temp;
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # clear_bit \n"
- " and %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqzl %0, 1b \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (~(1UL << bit)));
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
- } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " " __LL "%0, %1 # clear_bit \n"
- " " __INS "%0, $0, %2, 1 \n"
- " " __SC "%0, %1 \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (bit));
- } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
- } else if (kernel_uses_llsc) {
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # clear_bit \n"
- " and %0, %2 \n"
- " " __SC "%0, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (~(1UL << bit)));
- } while (unlikely(!temp));
- } else
+ if (!kernel_uses_llsc) {
__mips_clear_bit(nr, addr);
+ return;
+ }
+
+ if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
+ __bit_op(*m, __INS "%0, $0, %2, 1", "i"(bit));
+ return;
+ }
+
+ __bit_op(*m, "and\t%0, %2", "ir"(~BIT(bit)));
}
/*
@@ -177,155 +158,61 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
- int bit = nr & SZLONG_MASK;
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # change_bit \n"
- " xor %0, %2 \n"
- " " __SC "%0, %1 \n"
- " beqzl %0, 1b \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (1UL << bit));
- } else if (kernel_uses_llsc) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- loongson_llsc_mb();
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # change_bit \n"
- " xor %0, %2 \n"
- " " __SC "%0, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
- : "ir" (1UL << bit));
- } while (unlikely(!temp));
- } else
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+
+ if (!kernel_uses_llsc) {
__mips_change_bit(nr, addr);
+ return;
+ }
+
+ __bit_op(*m, "xor\t%0, %2", "ir"(BIT(bit)));
}
/*
- * test_and_set_bit - Set a bit and return its old value
+ * test_and_set_bit_lock - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
+ * This operation is atomic and implies acquire ordering semantics
+ * after the memory operation.
*/
-static inline int test_and_set_bit(unsigned long nr,
+static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
- int bit = nr & SZLONG_MASK;
- unsigned long res;
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+ unsigned long res, orig;
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqzl %2, 1b \n"
- " and %2, %0, %3 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : "memory");
- } else if (kernel_uses_llsc) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : "memory");
- } while (unlikely(!res));
-
- res = temp & (1UL << bit);
- } else
- res = __mips_test_and_set_bit(nr, addr);
+ if (!kernel_uses_llsc) {
+ res = __mips_test_and_set_bit_lock(nr, addr);
+ } else {
+ orig = __test_bit_op(*m, "%0",
+ "or\t%1, %0, %3",
+ "ir"(BIT(bit)));
+ res = (orig & BIT(bit)) != 0;
+ }
smp_llsc_mb();
- return res != 0;
+ return res;
}
/*
- * test_and_set_bit_lock - Set a bit and return its old value
+ * test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
- * This operation is atomic and implies acquire ordering semantics
- * after the memory operation.
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
*/
-static inline int test_and_set_bit_lock(unsigned long nr,
+static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
- int bit = nr & SZLONG_MASK;
- unsigned long res;
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqzl %2, 1b \n"
- " and %2, %0, %3 \n"
- " .set pop \n"
- : "=&r" (temp), "+m" (*m), "=&r" (res)
- : "r" (1UL << bit)
- : "memory");
- } else if (kernel_uses_llsc) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # test_and_set_bit \n"
- " or %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : "memory");
- } while (unlikely(!res));
-
- res = temp & (1UL << bit);
- } else
- res = __mips_test_and_set_bit_lock(nr, addr);
-
- smp_llsc_mb();
-
- return res != 0;
+ smp_mb__before_atomic();
+ return test_and_set_bit_lock(nr, addr);
}
+
/*
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
@@ -337,69 +224,30 @@ static inline int test_and_set_bit_lock(unsigned long nr,
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
- int bit = nr & SZLONG_MASK;
- unsigned long res;
-
- smp_mb__before_llsc();
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+ unsigned long res, orig;
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
+ smp_mb__before_atomic();
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # test_and_clear_bit \n"
- " or %2, %0, %3 \n"
- " xor %2, %3 \n"
- " " __SC "%2, %1 \n"
- " beqzl %2, 1b \n"
- " and %2, %0, %3 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : "memory");
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
- } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- do {
- __asm__ __volatile__(
- " " __LL "%0, %1 # test_and_clear_bit \n"
- " " __EXT "%2, %0, %3, 1 \n"
- " " __INS "%0, $0, %3, 1 \n"
- " " __SC "%0, %1 \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "ir" (bit)
- : "memory");
- } while (unlikely(!temp));
-#endif
- } else if (kernel_uses_llsc) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # test_and_clear_bit \n"
- " or %2, %0, %3 \n"
- " xor %2, %3 \n"
- " " __SC "%2, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : "memory");
- } while (unlikely(!res));
-
- res = temp & (1UL << bit);
- } else
+ if (!kernel_uses_llsc) {
res = __mips_test_and_clear_bit(nr, addr);
+ } else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
+ res = __test_bit_op(*m, "%1",
+ __EXT "%0, %1, %3, 1;"
+ __INS "%1, $0, %3, 1",
+ "i"(bit));
+ } else {
+ orig = __test_bit_op(*m, "%0",
+ "or\t%1, %0, %3;"
+ "xor\t%1, %1, %3",
+ "ir"(BIT(bit)));
+ res = (orig & BIT(bit)) != 0;
+ }
smp_llsc_mb();
- return res != 0;
+ return res;
}
/*
@@ -413,53 +261,29 @@ static inline int test_and_clear_bit(unsigned long nr,
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
- int bit = nr & SZLONG_MASK;
- unsigned long res;
+ volatile unsigned long *m = &addr[BIT_WORD(nr)];
+ int bit = nr % BITS_PER_LONG;
+ unsigned long res, orig;
- smp_mb__before_llsc();
-
- if (kernel_uses_llsc && R10000_LLSC_WAR) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
+ smp_mb__before_atomic();
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- "1: " __LL "%0, %1 # test_and_change_bit \n"
- " xor %2, %0, %3 \n"
- " " __SC "%2, %1 \n"
- " beqzl %2, 1b \n"
- " and %2, %0, %3 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : "memory");
- } else if (kernel_uses_llsc) {
- unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
- unsigned long temp;
-
- do {
- __asm__ __volatile__(
- " .set push \n"
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 # test_and_change_bit \n"
- " xor %2, %0, %3 \n"
- " " __SC "\t%2, %1 \n"
- " .set pop \n"
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
- : "r" (1UL << bit)
- : "memory");
- } while (unlikely(!res));
-
- res = temp & (1UL << bit);
- } else
+ if (!kernel_uses_llsc) {
res = __mips_test_and_change_bit(nr, addr);
+ } else {
+ orig = __test_bit_op(*m, "%0",
+ "xor\t%1, %0, %3",
+ "ir"(BIT(bit)));
+ res = (orig & BIT(bit)) != 0;
+ }
smp_llsc_mb();
- return res != 0;
+ return res;
}
+#undef __bit_op
+#undef __test_bit_op
+
#include <asm-generic/bitops/non-atomic.h>
/*
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index bf6a8afd7ad2..581a6a3c66e4 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -75,11 +75,11 @@ static inline int register_bmips_smp_ops(void)
#endif
}
-extern char bmips_reset_nmi_vec;
-extern char bmips_reset_nmi_vec_end;
-extern char bmips_smp_movevec;
-extern char bmips_smp_int_vec;
-extern char bmips_smp_int_vec_end;
+extern char bmips_reset_nmi_vec[];
+extern char bmips_reset_nmi_vec_end[];
+extern char bmips_smp_movevec[];
+extern char bmips_smp_int_vec[];
+extern char bmips_smp_int_vec_end[];
extern int bmips_smp_enabled;
extern int bmips_cpu_offset;
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 235bc2f52113..61727785a247 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -61,7 +61,7 @@
/*
* Valid machtype for Loongson family
*/
-enum loongson_machine_type {
+enum loongson2ef_machine_type {
MACH_LOONGSON_UNKNOWN,
MACH_LEMOTE_FL2E,
MACH_LEMOTE_FL2F,
@@ -70,7 +70,6 @@ enum loongson_machine_type {
MACH_DEXXON_GDIUM2F10,
MACH_LEMOTE_NAS,
MACH_LEMOTE_LL2F,
- MACH_LOONGSON_GENERIC,
MACH_LOONGSON_END
};
@@ -81,39 +80,26 @@ enum loongson_machine_type {
#define MACH_INGENIC_JZ4740 1 /* JZ4740 SOC */
#define MACH_INGENIC_JZ4770 2 /* JZ4770 SOC */
#define MACH_INGENIC_JZ4780 3 /* JZ4780 SOC */
+#define MACH_INGENIC_X1000 4 /* X1000 SOC */
+#define MACH_INGENIC_X1830 5 /* X1830 SOC */
extern char *system_type;
const char *get_system_type(void);
extern unsigned long mips_machtype;
-#define BOOT_MEM_MAP_MAX 32
#define BOOT_MEM_RAM 1
#define BOOT_MEM_ROM_DATA 2
#define BOOT_MEM_RESERVED 3
#define BOOT_MEM_INIT_RAM 4
#define BOOT_MEM_NOMAP 5
-/*
- * A memory map that's built upon what was determined
- * or specified on the command line.
- */
-struct boot_mem_map {
- int nr_map;
- struct boot_mem_map_entry {
- phys_addr_t addr; /* start of memory segment */
- phys_addr_t size; /* size of memory segment */
- long type; /* type of memory segment */
- } map[BOOT_MEM_MAP_MAX];
-};
-
-extern struct boot_mem_map boot_mem_map;
-
extern void add_memory_region(phys_addr_t start, phys_addr_t size, long type);
extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max);
extern void prom_init(void);
extern void prom_free_prom_memory(void);
+extern void prom_cleanup(void);
extern void free_init_pages(const char *what,
unsigned long begin, unsigned long end);
diff --git a/arch/mips/include/asm/bugs.h b/arch/mips/include/asm/bugs.h
index d8ab8b7129b5..d72dc6e1cf3c 100644
--- a/arch/mips/include/asm/bugs.h
+++ b/arch/mips/include/asm/bugs.h
@@ -26,9 +26,8 @@ extern void check_bugs64(void);
static inline void check_bugs_early(void)
{
-#ifdef CONFIG_64BIT
- check_bugs64_early();
-#endif
+ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ check_bugs64_early();
}
static inline void check_bugs(void)
@@ -37,19 +36,18 @@ static inline void check_bugs(void)
cpu_data[cpu].udelay_val = loops_per_jiffy;
check_bugs32();
-#ifdef CONFIG_64BIT
- check_bugs64();
-#endif
+
+ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ check_bugs64();
}
static inline int r4k_daddiu_bug(void)
{
-#ifdef CONFIG_64BIT
+ if (!IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ return 0;
+
WARN_ON(daddiu_bug < 0);
return daddiu_bug != 0;
-#else
- return 0;
-#endif
}
#endif /* _ASM_BUGS_H */
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index f345a873742d..5b0b3a6777ea 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -11,20 +11,11 @@
#include <linux/bug.h>
#include <linux/irqflags.h>
#include <asm/compiler.h>
+#include <asm/llsc.h>
+#include <asm/sync.h>
#include <asm/war.h>
/*
- * Using a branch-likely instruction to check the result of an sc instruction
- * works around a bug present in R10000 CPUs prior to revision 3.0 that could
- * cause ll-sc sequences to execute non-atomically.
- */
-#if R10000_LLSC_WAR
-# define __scbeqz "beqzl"
-#else
-# define __scbeqz "beqz"
-#endif
-
-/*
* These functions doesn't exist, so if they are called you'll either:
*
* - Get an error at compile-time due to __compiletime_error, if supported by
@@ -51,16 +42,17 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
" .set noat \n" \
" .set push \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
"1: " ld " %0, %2 # __xchg_asm \n" \
" .set pop \n" \
" move $1, %z3 \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
" " st " $1, %1 \n" \
- "\t" __scbeqz " $1, 1b \n" \
+ "\t" __SC_BEQZ "$1, 1b \n" \
" .set pop \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
: GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
- : "memory"); \
+ : __LLSC_CLOBBER); \
} else { \
unsigned long __flags; \
\
@@ -76,8 +68,8 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
unsigned int size);
-static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
- int size)
+static __always_inline
+unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
{
switch (size) {
case 1:
@@ -102,7 +94,13 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
({ \
__typeof__(*(ptr)) __res; \
\
- smp_mb__before_llsc(); \
+ /* \
+ * In the Loongson3 workaround case __xchg_asm() already \
+ * contains a completion barrier prior to the LL, so we don't \
+ * need to emit an extra one here. \
+ */ \
+ if (!__SYNC_loongson3_war) \
+ smp_mb__before_llsc(); \
\
__res = (__typeof__(*(ptr))) \
__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
@@ -122,18 +120,19 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
" .set noat \n" \
" .set push \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set pop \n" \
" move $1, %z4 \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
" " st " $1, %1 \n" \
- "\t" __scbeqz " $1, 1b \n" \
+ "\t" __SC_BEQZ "$1, 1b \n" \
" .set pop \n" \
- "2: \n" \
+ "2: " __SYNC(full, loongson3_war) " \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
- : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
- : "memory"); \
+ : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
+ : __LLSC_CLOBBER); \
} else { \
unsigned long __flags; \
\
@@ -150,8 +149,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size);
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, unsigned int size)
+static __always_inline
+unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
{
switch (size) {
case 1:
@@ -186,9 +186,23 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
({ \
__typeof__(*(ptr)) __res; \
\
- smp_mb__before_llsc(); \
+ /* \
+ * In the Loongson3 workaround case __cmpxchg_asm() already \
+ * contains a completion barrier prior to the LL, so we don't \
+ * need to emit an extra one here. \
+ */ \
+ if (!__SYNC_loongson3_war) \
+ smp_mb__before_llsc(); \
+ \
__res = cmpxchg_local((ptr), (old), (new)); \
- smp_llsc_mb(); \
+ \
+ /* \
+ * In the Loongson3 workaround case __cmpxchg_asm() already \
+ * contains a completion barrier after the SC, so we don't \
+ * need to emit an extra one here. \
+ */ \
+ if (!__SYNC_loongson3_war) \
+ smp_llsc_mb(); \
\
__res; \
})
@@ -233,6 +247,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
" .set push \n"
" .set " MIPS_ISA_ARCH_LEVEL " \n"
/* Load 64 bits from ptr */
+ " " __SYNC(full, loongson3_war) " \n"
"1: lld %L0, %3 # __cmpxchg64 \n"
/*
* Split the 64 bit value we loaded into the 2 registers that hold the
@@ -264,9 +279,9 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
/* Attempt to store new at ptr */
" scd %L1, %2 \n"
/* If we failed, loop! */
- "\t" __scbeqz " %L1, 1b \n"
+ "\t" __SC_BEQZ "%L1, 1b \n"
" .set pop \n"
- "2: \n"
+ "2: " __SYNC(full, loongson3_war) " \n"
: "=&r"(ret),
"=&r"(tmp),
"=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr)
@@ -290,10 +305,13 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
* will cause a build error unless cpu_has_64bits is a \
* compile-time constant 1. \
*/ \
- if (cpu_has_64bits && kernel_uses_llsc) \
+ if (cpu_has_64bits && kernel_uses_llsc) { \
+ smp_mb__before_llsc(); \
__res = __cmpxchg64((ptr), __old, __new); \
- else \
+ smp_llsc_mb(); \
+ } else { \
__res = __cmpxchg64_unsupported(); \
+ } \
\
__res; \
})
@@ -303,6 +321,4 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
# endif /* !CONFIG_SMP */
#endif /* !CONFIG_64BIT */
-#undef __scbeqz
-
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index c99166eadbde..255afcdd79c9 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -100,24 +100,6 @@ typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
-/*
- * A pointer passed in from user mode. This should not
- * be used for syscall parameters, just declare them
- * as pointers because the syscall entry code will have
- * appropriately converted them already.
- */
-
-static inline void __user *compat_ptr(compat_uptr_t uptr)
-{
- /* cast to a __user pointer via "unsigned long" makes sparse happy */
- return (void __user *)(unsigned long)(long)uptr;
-}
-
-static inline compat_uptr_t ptr_to_compat(void __user *uptr)
-{
- return (u32)(unsigned long)uptr;
-}
-
static inline void __user *arch_compat_alloc_user_space(long len)
{
struct pt_regs *regs = (struct pt_regs *)
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h
index 63b3468ede4c..6b7396a6a115 100644
--- a/arch/mips/include/asm/cop2.h
+++ b/arch/mips/include/asm/cop2.h
@@ -33,7 +33,7 @@ extern void nlm_cop2_restore(struct nlm_cop2_state *);
#define cop2_present 1
#define cop2_lazy_restore 0
-#elif defined(CONFIG_CPU_LOONGSON3)
+#elif defined(CONFIG_CPU_LOONGSON64)
#define cop2_present 1
#define cop2_lazy_restore 1
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 6998a9796499..de44c92b1c1f 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -243,9 +243,6 @@
#ifndef cpu_has_pindexed_dcache
#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
#endif
-#ifndef cpu_has_local_ebase
-#define cpu_has_local_ebase 1
-#endif
/*
* I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
@@ -397,6 +394,22 @@
#define cpu_has_dsp3 __ase(MIPS_ASE_DSP3)
#endif
+#ifndef cpu_has_loongson_mmi
+#define cpu_has_loongson_mmi __ase(MIPS_ASE_LOONGSON_MMI)
+#endif
+
+#ifndef cpu_has_loongson_cam
+#define cpu_has_loongson_cam __ase(MIPS_ASE_LOONGSON_CAM)
+#endif
+
+#ifndef cpu_has_loongson_ext
+#define cpu_has_loongson_ext __ase(MIPS_ASE_LOONGSON_EXT)
+#endif
+
+#ifndef cpu_has_loongson_ext2
+#define cpu_has_loongson_ext2 __ase(MIPS_ASE_LOONGSON_EXT2)
+#endif
+
#ifndef cpu_has_mipsmt
#define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
#endif
@@ -542,6 +555,10 @@
# define cpu_has_perf __opt(MIPS_CPU_PERF)
#endif
+#ifndef cpu_has_mac2008_only
+# define cpu_has_mac2008_only __opt(MIPS_CPU_MAC_2008_ONLY)
+#endif
+
#ifdef CONFIG_SMP
/*
* Some systems share FTLB RAMs between threads within a core (siblings in
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index a45af3de075d..49f0061a6051 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -17,16 +17,16 @@ static inline int __pure __get_cpu_type(const int cpu_type)
switch (cpu_type) {
#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
#endif
-#ifdef CONFIG_SYS_HAS_CPU_LOONGSON3
- case CPU_LOONGSON3:
+#ifdef CONFIG_SYS_HAS_CPU_LOONGSON64
+ case CPU_LOONGSON64:
#endif
#if defined(CONFIG_SYS_HAS_CPU_LOONGSON1B) || \
defined(CONFIG_SYS_HAS_CPU_LOONGSON1C)
- case CPU_LOONGSON1:
+ case CPU_LOONGSON32:
#endif
#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R1
@@ -38,7 +38,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R1) || \
defined(CONFIG_SYS_HAS_CPU_MIPS32_R2)
case CPU_4KEC:
- case CPU_JZRISC:
+ case CPU_XBURST:
#endif
#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R2
@@ -116,11 +116,6 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_VR4181A:
#endif
-#ifdef CONFIG_SYS_HAS_CPU_R4300
- case CPU_R4300:
- case CPU_R4310:
-#endif
-
#ifdef CONFIG_SYS_HAS_CPU_R4X00
case CPU_R4000PC:
case CPU_R4000SC:
@@ -143,10 +138,6 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_R5000:
#endif
-#ifdef CONFIG_SYS_HAS_CPU_R5432
- case CPU_R5432:
-#endif
-
#ifdef CONFIG_SYS_HAS_CPU_R5500
case CPU_R5500:
#endif
@@ -155,10 +146,6 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_NEVADA:
#endif
-#ifdef CONFIG_SYS_HAS_CPU_R8000
- case CPU_R8000:
-#endif
-
#ifdef CONFIG_SYS_HAS_CPU_R10000
case CPU_R10000:
case CPU_R12000:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 290369fa44a4..216a22916740 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -46,8 +46,8 @@
#define PRID_COMP_NETLOGIC 0x0c0000
#define PRID_COMP_CAVIUM 0x0d0000
#define PRID_COMP_LOONGSON 0x140000
-#define PRID_COMP_INGENIC_D0 0xd00000 /* JZ4740, JZ4750 */
-#define PRID_COMP_INGENIC_D1 0xd10000 /* JZ4770, JZ4775 */
+#define PRID_COMP_INGENIC_D0 0xd00000 /* JZ4740, JZ4750, X1830 */
+#define PRID_COMP_INGENIC_D1 0xd10000 /* JZ4770, JZ4775, X1000 */
#define PRID_COMP_INGENIC_E1 0xe10000 /* JZ4780 */
/*
@@ -91,7 +91,9 @@
#define PRID_IMP_LOONGSON_32 0x4200 /* Loongson-1 */
#define PRID_IMP_R5432 0x5400
#define PRID_IMP_R5500 0x5500
-#define PRID_IMP_LOONGSON_64 0x6300 /* Loongson-2/3 */
+#define PRID_IMP_LOONGSON_64R 0x6100 /* Reduced Loongson-2 */
+#define PRID_IMP_LOONGSON_64C 0x6300 /* Classic Loongson-2 and Loongson-3 */
+#define PRID_IMP_LOONGSON_64G 0xc000 /* Generic Loongson-2 and Loongson-3 */
#define PRID_IMP_UNKNOWN 0xff00
@@ -183,7 +185,8 @@
* These are the PRID's for when 23:16 == PRID_COMP_INGENIC_*
*/
-#define PRID_IMP_JZRISC 0x0200
+#define PRID_IMP_XBURST_REV1 0x0200 /* XBurst with MXU SIMD ISA */
+#define PRID_IMP_XBURST_REV2 0x0100 /* XBurst with MXU2 SIMD ISA */
/*
* These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC
@@ -293,19 +296,14 @@ enum cpu_type_enum {
/*
* R4000 class processors
*/
- CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310,
+ CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200,
CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650,
- CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000,
+ CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R10000,
CPU_R12000, CPU_R14000, CPU_R16000, CPU_VR41XX, CPU_VR4111, CPU_VR4121,
CPU_VR4122, CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
CPU_SR71000, CPU_TX49XX,
/*
- * R8000 class processors
- */
- CPU_R8000,
-
- /*
* TX3900 class processors
*/
CPU_TX3912, CPU_TX3922, CPU_TX3927,
@@ -315,15 +313,15 @@ enum cpu_type_enum {
*/
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
- CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
+ CPU_BMIPS4380, CPU_BMIPS5000, CPU_XBURST, CPU_LOONGSON32, CPU_M14KC,
CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K,
CPU_M5150, CPU_I6400, CPU_P6600, CPU_M6250,
/*
* MIPS64 class processors
*/
- CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
- CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
+ CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2EF,
+ CPU_LOONGSON64, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, CPU_I6500,
CPU_QEMU_GENERIC,
@@ -418,6 +416,7 @@ enum cpu_type_enum {
#define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \
BIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */
#define MIPS_CPU_MMID BIT_ULL(57) /* CPU supports MemoryMapIDs */
+#define MIPS_CPU_MAC_2008_ONLY BIT_ULL(58) /* CPU Only support MAC2008 Fused multiply-add instruction */
/*
* CPU ASE encodings
@@ -433,5 +432,9 @@ enum cpu_type_enum {
#define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
#define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/
#define MIPS_ASE_MIPS16E2 0x00000400 /* MIPS16e2 */
+#define MIPS_ASE_LOONGSON_MMI 0x00000800 /* Loongson MultiMedia extensions Instructions */
+#define MIPS_ASE_LOONGSON_CAM 0x00001000 /* Loongson CAM */
+#define MIPS_ASE_LOONGSON_EXT 0x00002000 /* Loongson EXTensions */
+#define MIPS_ASE_LOONGSON_EXT2 0x00004000 /* Loongson EXTensions R2 */
#endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/dma-direct.h b/arch/mips/include/asm/dma-direct.h
index b5c240806e1b..14e352651ce9 100644
--- a/arch/mips/include/asm/dma-direct.h
+++ b/arch/mips/include/asm/dma-direct.h
@@ -2,14 +2,6 @@
#ifndef _MIPS_DMA_DIRECT_H
#define _MIPS_DMA_DIRECT_H 1
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 6842ffafd1e7..1784d4348c36 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -70,7 +70,7 @@ enum fixed_addresses {
#include <asm-generic/fixmap.h>
#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
+ pte_offset_kernel(pmd_offset(pud_offset(p4d_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)), (vaddr))
/*
* Called from pgtable_init()
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index b83b0397462d..110220705e97 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -16,6 +16,7 @@
#include <asm/barrier.h>
#include <asm/compiler.h>
#include <asm/errno.h>
+#include <asm/sync.h>
#include <asm/war.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
@@ -32,7 +33,7 @@
" .set arch=r4000 \n" \
"2: sc $1, %2 \n" \
" beqzl $1, 1b \n" \
- __WEAK_LLSC_MB \
+ __stringify(__WEAK_LLSC_MB) " \n" \
"3: \n" \
" .insn \n" \
" .set pop \n" \
@@ -50,19 +51,19 @@
"i" (-EFAULT) \
: "memory"); \
} else if (cpu_has_llsc) { \
- loongson_llsc_mb(); \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set push \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
+ " " __SYNC(full, loongson3_war) " \n" \
"1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
" .set pop \n" \
" " insn " \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
"2: "user_sc("$1", "%2")" \n" \
" beqz $1, 1b \n" \
- __WEAK_LLSC_MB \
+ __stringify(__WEAK_LLSC_MB) " \n" \
"3: \n" \
" .insn \n" \
" .set pop \n" \
@@ -147,7 +148,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .set arch=r4000 \n"
"2: sc $1, %2 \n"
" beqzl $1, 1b \n"
- __WEAK_LLSC_MB
+ __stringify(__WEAK_LLSC_MB) " \n"
"3: \n"
" .insn \n"
" .set pop \n"
@@ -164,13 +165,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"i" (-EFAULT)
: "memory");
} else if (cpu_has_llsc) {
- loongson_llsc_mb();
__asm__ __volatile__(
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
+ " " __SYNC(full, loongson3_war) " \n"
"1: "user_ll("%1", "%3")" \n"
" bne %1, %z4, 3f \n"
" .set pop \n"
@@ -178,8 +179,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .set "MIPS_ISA_ARCH_LEVEL" \n"
"2: "user_sc("$1", "%2")" \n"
" beqz $1, 1b \n"
- __WEAK_LLSC_MB
- "3: \n"
+ "3: " __SYNC_ELSE(full, loongson3_war, __WEAK_LLSC_MB) "\n"
" .insn \n"
" .set pop \n"
" .section .fixup,\"ax\" \n"
@@ -194,7 +194,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT)
: "memory");
- loongson_llsc_mb();
} else
return -ENOSYS;
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h
index c52948f9ca95..159087f5386e 100644
--- a/arch/mips/include/asm/gio_device.h
+++ b/arch/mips/include/asm/gio_device.h
@@ -32,8 +32,6 @@ struct gio_driver {
};
#define to_gio_driver(drv) container_of(drv, struct gio_driver, driver)
-extern const struct gio_device_id *gio_match_device(const struct gio_device_id *,
- const struct gio_device *);
extern struct gio_device *gio_dev_get(struct gio_device *);
extern void gio_dev_put(struct gio_device *);
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 0fa27446869a..a0b92205f933 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -23,7 +23,7 @@
* TLB hazards
*/
#if (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)) && \
- !defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_LOONGSON3_ENHANCEMENT)
+ !defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_CPU_LOONGSON64)
/*
* MIPSR2 defines ehb for hazard avoidance
@@ -158,7 +158,7 @@ do { \
} while (0)
#elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
- defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_LOONGSON3_ENHANCEMENT) || \
+ defined(CONFIG_CPU_LOONGSON2EF) || defined(CONFIG_CPU_LOONGSON64) || \
defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
/*
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 97a280640daf..cf1f2a4a2418 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -63,21 +63,11 @@
* instruction, so the lower 16 bits must be zero. Should be true on
* on any sane architecture; generic code does not use this assumption.
*/
-extern const unsigned long mips_io_port_base;
+extern unsigned long mips_io_port_base;
-/*
- * Gcc will generate code to load the value of mips_io_port_base after each
- * function call which may be fairly wasteful in some cases. So we don't
- * play quite by the book. We tell gcc mips_io_port_base is a long variable
- * which solves the code generation issue. Now we need to violate the
- * aliasing rules a little to make initialization possible and finally we
- * will need the barrier() to fight side effects of the aliasing chat.
- * This trickery will eventually collapse under gcc's optimizer. Oh well.
- */
static inline void set_io_port_base(unsigned long base)
{
- * (unsigned long *) &mips_io_port_base = base;
- barrier();
+ mips_io_port_base = base;
}
/*
@@ -237,36 +227,14 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset,
*/
#define ioremap(offset, size) \
__ioremap_mode((offset), (size), _CACHE_UNCACHED)
+#define ioremap_uc ioremap
/*
- * ioremap_nocache - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap_nocache performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * This version of ioremap ensures that the memory is marked uncachable
- * on the CPU as well as honouring existing caching rules from things like
- * the PCI bus. Note that there are other caches and buffers on many
- * busses. In particular driver authors should read up on PCI writes
- *
- * It's useful if some control registers are in such an area and
- * write combining or read caching is not desirable:
- */
-#define ioremap_nocache(offset, size) \
- __ioremap_mode((offset), (size), _CACHE_UNCACHED)
-#define ioremap_uc ioremap_nocache
-
-/*
- * ioremap_cachable - map bus memory into CPU space
+ * ioremap_cache - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
- * ioremap_nocache performs a platform specific sequence of operations to
+ * ioremap_cache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
@@ -276,9 +244,8 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset,
* the CPU. Also enables full write-combining. Useful for some
* memory-like regions on I/O busses.
*/
-#define ioremap_cachable(offset, size) \
+#define ioremap_cache(offset, size) \
__ioremap_mode((offset), (size), _page_cachable_default)
-#define ioremap_cache ioremap_cachable
/*
* ioremap_wc - map bus memory into CPU space
@@ -317,7 +284,7 @@ static inline void iounmap(const volatile void __iomem *addr)
#undef __IS_KSEG1
}
-#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3)
+#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON64)
#define war_io_reorder_wmb() wmb()
#else
#define war_io_reorder_wmb() barrier()
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index f0b862a83816..47a8ffc0b413 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -18,7 +18,7 @@
#include <asm/compiler.h>
#include <asm/hazards.h>
-#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
+#if defined(CONFIG_CPU_HAS_DIEI)
static inline void arch_local_irq_disable(void)
{
@@ -41,7 +41,7 @@ static inline unsigned long arch_local_irq_save(void)
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#if defined(CONFIG_CPU_LOONGSON3) || defined (CONFIG_CPU_LOONGSON1)
+#if defined(CONFIG_CPU_LOONGSON64) || defined(CONFIG_CPU_LOONGSON32)
" mfc0 %[flags], $12 \n"
" di \n"
#else
@@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
-#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_DIEI */
static inline void arch_local_irq_enable(void)
{
@@ -102,7 +102,7 @@ static inline void arch_local_irq_enable(void)
" .set push \n"
" .set reorder \n"
" .set noat \n"
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+#if defined(CONFIG_CPU_HAS_DIEI)
" ei \n"
#else
" mfc0 $1,$12 \n"
diff --git a/arch/mips/include/asm/llsc.h b/arch/mips/include/asm/llsc.h
index c6d17d171147..c49738bc3bda 100644
--- a/arch/mips/include/asm/llsc.h
+++ b/arch/mips/include/asm/llsc.h
@@ -9,20 +9,31 @@
#ifndef __ASM_LLSC_H
#define __ASM_LLSC_H
+#include <asm/isa-rev.h>
+
#if _MIPS_SZLONG == 32
-#define SZLONG_LOG 5
-#define SZLONG_MASK 31UL
#define __LL "ll "
#define __SC "sc "
#define __INS "ins "
#define __EXT "ext "
#elif _MIPS_SZLONG == 64
-#define SZLONG_LOG 6
-#define SZLONG_MASK 63UL
#define __LL "lld "
#define __SC "scd "
#define __INS "dins "
#define __EXT "dext "
#endif
+/*
+ * Using a branch-likely instruction to check the result of an sc instruction
+ * works around a bug present in R10000 CPUs prior to revision 3.0 that could
+ * cause ll-sc sequences to execute non-atomically.
+ */
+#if R10000_LLSC_WAR
+# define __SC_BEQZ "beqzl "
+#elif MIPS_ISA_REV >= 6
+# define __SC_BEQZ "beqzc "
+#else
+# define __SC_BEQZ "beqz "
+#endif
+
#endif /* __ASM_LLSC_H */
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 02783e141c32..fef0fda8f82f 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -37,6 +37,7 @@ static __inline__ long local_add_return(long i, local_t * l)
__asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n"
+ __SYNC(full, loongson3_war) " \n"
"1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n"
__SC "%0, %2 \n"
@@ -52,6 +53,7 @@ static __inline__ long local_add_return(long i, local_t * l)
__asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
+ __SYNC(full, loongson3_war) " \n"
"1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n"
__SC "%0, %2 \n"
@@ -84,6 +86,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
__asm__ __volatile__(
" .set push \n"
" .set arch=r4000 \n"
+ __SYNC(full, loongson3_war) " \n"
"1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n"
__SC "%0, %2 \n"
@@ -99,6 +102,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
__asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
+ __SYNC(full, loongson3_war) " \n"
"1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n"
__SC "%0, %2 \n"
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
index 0ef8893e07f8..f879be3e8099 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
@@ -98,6 +98,7 @@ enum bcm47xx_board {
BCM47XX_BOARD_MOTOROLA_WR850GP,
BCM47XX_BOARD_MOTOROLA_WR850GV2V3,
+ BCM47XX_BOARD_NETGEAR_R6200_V1,
BCM47XX_BOARD_NETGEAR_WGR614V8,
BCM47XX_BOARD_NETGEAR_WGR614V9,
BCM47XX_BOARD_NETGEAR_WGR614_V10,
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index a4f798629c3d..513270c8adb9 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -45,7 +45,6 @@
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_64bits 1
#define cpu_has_octeon_cache 1
-#define cpu_has_saa octeon_has_saa()
#define cpu_has_mips32r1 1
#define cpu_has_mips32r2 1
#define cpu_has_mips64r1 1
@@ -60,7 +59,6 @@
#define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON)
-#define ARCH_HAS_IRQ_PER_CPU 1
#define ARCH_HAS_SPINLOCK_PREFETCH 1
#define spin_lock_prefetch(x) prefetch(x)
#define PREFETCH_STRIDE 128
@@ -73,13 +71,6 @@
#define ARCH_HAS_USABLE_BUILTIN_POPCOUNT 1
#endif
-static inline int octeon_has_saa(void)
-{
- int id;
- asm volatile ("mfc0 %0, $15,0" : "=r" (id));
- return id >= 0x000d0300;
-}
-
/*
* The last 256MB are reserved for device to device mappings and the
* BAR1 hole.
diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h
index 35c80be92207..2421411b7636 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/war.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/war.h
@@ -12,7 +12,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
diff --git a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
index 1c11310bc8ad..00beb69bfab9 100644
--- a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
@@ -32,7 +32,6 @@
#define cpu_has_vtag_icache 0
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_pindexed_dcache 0
-#define cpu_has_local_ebase 0
#define cpu_icache_snoops_remote_store 1
#define cpu_has_mips_4 0
#define cpu_has_mips_5 0
diff --git a/arch/mips/include/asm/mach-generic/war.h b/arch/mips/include/asm/mach-generic/war.h
index a1bc2e71f983..f0f4a35d0870 100644
--- a/arch/mips/include/asm/mach-generic/war.h
+++ b/arch/mips/include/asm/mach-generic/war.h
@@ -11,7 +11,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
diff --git a/arch/mips/include/asm/mach-ip22/spaces.h b/arch/mips/include/asm/mach-ip22/spaces.h
index 7f9fa6f66059..24fe92cb5313 100644
--- a/arch/mips/include/asm/mach-ip22/spaces.h
+++ b/arch/mips/include/asm/mach-ip22/spaces.h
@@ -10,17 +10,7 @@
#ifndef _ASM_MACH_IP22_SPACES_H
#define _ASM_MACH_IP22_SPACES_H
-
-#ifdef CONFIG_64BIT
-
-#define PAGE_OFFSET 0xffffffff80000000UL
-
-#define CAC_BASE 0xffffffff80000000
-#define IO_BASE 0xffffffffa0000000
-#define UNCAC_BASE 0xffffffffa0000000
-#define MAP_BASE 0xc000000000000000
-
-#endif /* CONFIG_64BIT */
+#define PHYS_OFFSET _AC(0x08000000, UL)
#include <asm/mach-generic/spaces.h>
diff --git a/arch/mips/include/asm/mach-ip22/war.h b/arch/mips/include/asm/mach-ip22/war.h
index fba640517f4f..b48eb4ac362d 100644
--- a/arch/mips/include/asm/mach-ip22/war.h
+++ b/arch/mips/include/asm/mach-ip22/war.h
@@ -15,7 +15,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 1
#define R4600_V1_HIT_CACHEOP_WAR 1
#define R4600_V2_HIT_CACHEOP_WAR 1
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
index f992c1db876b..3e54f605a70b 100644
--- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
@@ -10,20 +10,10 @@
#define __ASM_MACH_IP27_KERNEL_ENTRY_H
#include <asm/sn/addrs.h>
-#include <asm/sn/sn0/hubni.h>
+#include <asm/sn/agent.h>
#include <asm/sn/klkernvars.h>
/*
- * Returns the local nasid into res.
- */
- .macro GET_NASID_ASM res
- dli \res, LOCAL_HUB_ADDR(NI_STATUS_REV_ID)
- ld \res, (\res)
- and \res, NSRI_NODEID_MASK
- dsrl \res, NSRI_NODEID_SHFT
- .endm
-
-/*
* TLB bits
*/
#define PAGE_GLOBAL (1 << 6)
diff --git a/arch/mips/include/asm/mach-ip27/mangle-port.h b/arch/mips/include/asm/mach-ip27/mangle-port.h
index f6e4912ea062..27c56efa519f 100644
--- a/arch/mips/include/asm/mach-ip27/mangle-port.h
+++ b/arch/mips/include/asm/mach-ip27/mangle-port.h
@@ -8,7 +8,7 @@
#ifndef __ASM_MACH_IP27_MANGLE_PORT_H
#define __ASM_MACH_IP27_MANGLE_PORT_H
-#define __swizzle_addr_b(port) (port)
+#define __swizzle_addr_b(port) ((port) ^ 3)
#define __swizzle_addr_w(port) ((port) ^ 2)
#define __swizzle_addr_l(port) (port)
#define __swizzle_addr_q(port) (port)
@@ -20,6 +20,6 @@
# define ioswabl(a, x) (x)
# define __mem_ioswabl(a, x) cpu_to_le32(x)
# define ioswabq(a, x) (x)
-# define __mem_ioswabq(a, x) cpu_to_le32(x)
+# define __mem_ioswabq(a, x) cpu_to_le64(x)
#endif /* __ASM_MACH_IP27_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h
index 1cd6a23a84f2..08c36e50a860 100644
--- a/arch/mips/include/asm/mach-ip27/mmzone.h
+++ b/arch/mips/include/asm/mach-ip27/mmzone.h
@@ -4,15 +4,15 @@
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
-#include <asm/sn/hub.h>
+#include <asm/sn/agent.h>
+#include <asm/sn/klkernvars.h>
-#define pa_to_nid(addr) NASID_TO_COMPACT_NODEID(NASID_GET(addr))
+#define pa_to_nid(addr) NASID_GET(addr)
struct hub_data {
kern_vars_t kern_vars;
DECLARE_BITMAP(h_bigwin_used, HUB_NUM_BIG_WINDOW);
cpumask_t h_cpus;
- unsigned long slice_map;
};
struct node_data {
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index 965f0793a5f9..d66cc53feab8 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -2,19 +2,18 @@
#ifndef _ASM_MACH_TOPOLOGY_H
#define _ASM_MACH_TOPOLOGY_H 1
-#include <asm/sn/hub.h>
#include <asm/sn/types.h>
#include <asm/mmzone.h>
struct cpuinfo_ip27 {
- cnodeid_t p_nodeid; /* my node ID in compact-id-space */
nasid_t p_nasid; /* my node ID in numa-as-id-space */
+ unsigned short p_speed; /* cpu speed in MHz */
unsigned char p_slice; /* Physical position on node board */
};
extern struct cpuinfo_ip27 sn_cpu_info[NR_CPUS];
-#define cpu_to_node(cpu) (sn_cpu_info[(cpu)].p_nodeid)
+#define cpu_to_node(cpu) (cputonasid(cpu))
#define cpumask_of_node(node) ((node) == -1 ? \
cpu_all_mask : \
&hub_data(node)->h_cpus)
@@ -23,7 +22,7 @@ extern int pcibus_to_node(struct pci_bus *);
#define cpumask_of_pcibus(bus) (cpumask_of_node(pcibus_to_node(bus)))
-extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
+extern unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
#define node_distance(from, to) (__node_distances[(from)][(to)])
diff --git a/arch/mips/include/asm/mach-ip27/war.h b/arch/mips/include/asm/mach-ip27/war.h
index 4ee0e4bdf4fb..ef3efce0094a 100644
--- a/arch/mips/include/asm/mach-ip27/war.h
+++ b/arch/mips/include/asm/mach-ip27/war.h
@@ -11,7 +11,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
diff --git a/arch/mips/include/asm/mach-ip28/war.h b/arch/mips/include/asm/mach-ip28/war.h
index 4821c7b7a38c..61cd67354829 100644
--- a/arch/mips/include/asm/mach-ip28/war.h
+++ b/arch/mips/include/asm/mach-ip28/war.h
@@ -11,7 +11,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
diff --git a/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
new file mode 100644
index 000000000000..cfa02f3d25df
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/cpu-feature-overrides.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IP30/Octane cpu-features overrides.
+ *
+ * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org>
+ * 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * 2015 Joshua Kinard <kumba@gentoo.org>
+ *
+ */
+#ifndef __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H
+
+#include <asm/cpu.h>
+
+/*
+ * IP30 only supports R1[024]000 processors, all using the same config
+ */
+#define cpu_has_tlb 1
+#define cpu_has_tlbinv 0
+#define cpu_has_segments 0
+#define cpu_has_eva 0
+#define cpu_has_htw 0
+#define cpu_has_rixiex 0
+#define cpu_has_maar 0
+#define cpu_has_rw_llb 0
+#define cpu_has_3kex 0
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_6k_cache 0
+#define cpu_has_8k_cache 0
+#define cpu_has_tx39_cache 0
+#define cpu_has_fpu 1
+#define cpu_has_nofpuex 0
+#define cpu_has_32fpr 1
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_64bits 1
+#define cpu_has_divec 0
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+#define cpu_has_mcheck 0
+#define cpu_has_ejtag 0
+#define cpu_has_llsc 1
+#define cpu_has_mips16 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips3d 0
+#define cpu_has_smartmips 0
+#define cpu_has_rixi 0
+#define cpu_has_xpa 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 0
+
+#define cpu_icache_snoops_remote_store 1
+
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+#define cpu_has_mips32r6 0
+#define cpu_has_mips64r6 0
+
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+#define cpu_has_inclusive_pcaches 1
+#define cpu_hwrena_impl_bits 0
+#define cpu_has_perf_cntr_intr_bit 0
+#define cpu_has_vz 0
+#define cpu_has_fre 0
+#define cpu_has_cdmm 0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 64
+#define cpu_scache_line_size() 128
+
+#endif /* __ASM_MACH_IP30_CPU_FEATURE_OVERRIDES_H */
+
diff --git a/arch/mips/include/asm/mach-ip30/irq.h b/arch/mips/include/asm/mach-ip30/irq.h
new file mode 100644
index 000000000000..e5c3dd965266
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/irq.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HEART IRQ defines
+ *
+ * Copyright (C) 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * 2014-2016 Joshua Kinard <kumba@gentoo.org>
+ *
+ */
+
+#ifndef __ASM_MACH_IP30_IRQ_H
+#define __ASM_MACH_IP30_IRQ_H
+
+/*
+ * HEART has 64 hardware interrupts, but use 128 to leave room for a few
+ * software interrupts as well (such as the CPU timer interrupt.
+ */
+#define NR_IRQS 128
+
+extern void __init ip30_install_ipi(void);
+
+/*
+ * HEART has 64 interrupt vectors available to it, subdivided into five
+ * priority levels. They are numbered 0 to 63.
+ */
+#define HEART_NUM_IRQS 64
+
+/*
+ * These are the five interrupt priority levels and their corresponding
+ * CPU IPx interrupt pins.
+ *
+ * Level 4 - Error Interrupts.
+ * Level 3 - HEART timer interrupt.
+ * Level 2 - CPU IPI, CPU debug, power putton, general device interrupts.
+ * Level 1 - General device interrupts.
+ * Level 0 - General device GFX flow control interrupts.
+ */
+#define HEART_L4_INT_MASK 0xfff8000000000000ULL /* IP6 */
+#define HEART_L3_INT_MASK 0x0004000000000000ULL /* IP5 */
+#define HEART_L2_INT_MASK 0x0003ffff00000000ULL /* IP4 */
+#define HEART_L1_INT_MASK 0x00000000ffff0000ULL /* IP3 */
+#define HEART_L0_INT_MASK 0x000000000000ffffULL /* IP2 */
+
+/* HEART L0 Interrupts (Low Priority) */
+#define HEART_L0_INT_GENERIC 0
+#define HEART_L0_INT_FLOW_CTRL_HWTR_0 1
+#define HEART_L0_INT_FLOW_CTRL_HWTR_1 2
+
+/* HEART L2 Interrupts (High Priority) */
+#define HEART_L2_INT_RESCHED_CPU_0 46
+#define HEART_L2_INT_RESCHED_CPU_1 47
+#define HEART_L2_INT_CALL_CPU_0 48
+#define HEART_L2_INT_CALL_CPU_1 49
+
+/* HEART L3 Interrupts (Compare/Counter Timer) */
+#define HEART_L3_INT_TIMER 50
+
+/* HEART L4 Interrupts (Errors) */
+#define HEART_L4_INT_XWID_ERR_9 51
+#define HEART_L4_INT_XWID_ERR_A 52
+#define HEART_L4_INT_XWID_ERR_B 53
+#define HEART_L4_INT_XWID_ERR_C 54
+#define HEART_L4_INT_XWID_ERR_D 55
+#define HEART_L4_INT_XWID_ERR_E 56
+#define HEART_L4_INT_XWID_ERR_F 57
+#define HEART_L4_INT_XWID_ERR_XBOW 58
+#define HEART_L4_INT_CPU_BUS_ERR_0 59
+#define HEART_L4_INT_CPU_BUS_ERR_1 60
+#define HEART_L4_INT_CPU_BUS_ERR_2 61
+#define HEART_L4_INT_CPU_BUS_ERR_3 62
+#define HEART_L4_INT_HEART_EXCP 63
+
+/*
+ * Power Switch is wired via BaseIO BRIDGE slot #6.
+ *
+ * ACFail is wired via BaseIO BRIDGE slot #7.
+ */
+#define IP30_POWER_IRQ HEART_L2_INT_POWER_BTN
+
+#include_next <irq.h>
+
+#define IP30_HEART_L0_IRQ (MIPS_CPU_IRQ_BASE + 2)
+#define IP30_HEART_L1_IRQ (MIPS_CPU_IRQ_BASE + 3)
+#define IP30_HEART_L2_IRQ (MIPS_CPU_IRQ_BASE + 4)
+#define IP30_HEART_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 5)
+#define IP30_HEART_ERR_IRQ (MIPS_CPU_IRQ_BASE + 6)
+
+#endif /* __ASM_MACH_IP30_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ip30/kernel-entry-init.h b/arch/mips/include/asm/mach-ip30/kernel-entry-init.h
new file mode 100644
index 000000000000..be0472c977d8
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/kernel-entry-init.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_MACH_IP30_KERNEL_ENTRY_H
+#define __ASM_MACH_IP30_KERNEL_ENTRY_H
+
+ .macro kernel_entry_setup
+ .endm
+
+ .macro smp_slave_setup
+ move gp, a0
+ .endm
+
+#endif /* __ASM_MACH_IP30_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-ip30/mangle-port.h b/arch/mips/include/asm/mach-ip30/mangle-port.h
new file mode 100644
index 000000000000..f3e1262a2d5e
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/mangle-port.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2003, 2004 Ralf Baechle
+ */
+#ifndef __ASM_MACH_IP30_MANGLE_PORT_H
+#define __ASM_MACH_IP30_MANGLE_PORT_H
+
+#define __swizzle_addr_b(port) ((port)^3)
+#define __swizzle_addr_w(port) ((port)^2)
+#define __swizzle_addr_l(port) (port)
+#define __swizzle_addr_q(port) (port)
+
+#define ioswabb(a, x) (x)
+#define __mem_ioswabb(a, x) (x)
+#define ioswabw(a, x) (x)
+#define __mem_ioswabw(a, x) cpu_to_le16(x)
+#define ioswabl(a, x) (x)
+#define __mem_ioswabl(a, x) cpu_to_le32(x)
+#define ioswabq(a, x) (x)
+#define __mem_ioswabq(a, x) cpu_to_le64(x)
+
+#endif /* __ASM_MACH_IP30_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-ip30/spaces.h b/arch/mips/include/asm/mach-ip30/spaces.h
new file mode 100644
index 000000000000..c8a302dfbe05
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/spaces.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016 Joshua Kinard <kumba@gentoo.org>
+ *
+ */
+#ifndef _ASM_MACH_IP30_SPACES_H
+#define _ASM_MACH_IP30_SPACES_H
+
+/*
+ * Memory in IP30/Octane is offset 512MB in the physical address space.
+ */
+#define PHYS_OFFSET _AC(0x20000000, UL)
+
+#ifdef CONFIG_64BIT
+#define CAC_BASE _AC(0xA800000000000000, UL)
+#endif
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* _ASM_MACH_IP30_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ip30/war.h b/arch/mips/include/asm/mach-ip30/war.h
new file mode 100644
index 000000000000..a98ba204f183
--- /dev/null
+++ b/arch/mips/include/asm/mach-ip30/war.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MIPS_MACH_IP30_WAR_H
+#define __ASM_MIPS_MACH_IP30_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+
+#ifdef CONFIG_CPU_R10000
+#define R10000_LLSC_WAR 1
+#else
+#define R10000_LLSC_WAR 0
+#endif
+
+#endif /* __ASM_MIPS_MACH_IP30_WAR_H */
diff --git a/arch/mips/include/asm/mach-ip32/war.h b/arch/mips/include/asm/mach-ip32/war.h
index 9807ecda5a88..e77b9d1b6c96 100644
--- a/arch/mips/include/asm/mach-ip32/war.h
+++ b/arch/mips/include/asm/mach-ip32/war.h
@@ -11,7 +11,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
deleted file mode 100644
index 2092a3597734..000000000000
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009, Lars-Peter Clausen <lars@metafoo.de>
- * JZ4740 GPIO pin definitions
- */
-
-#ifndef _JZ_GPIO_H
-#define _JZ_GPIO_H
-
-#define JZ_GPIO_PORTA(x) ((x) + 32 * 0)
-#define JZ_GPIO_PORTB(x) ((x) + 32 * 1)
-#define JZ_GPIO_PORTC(x) ((x) + 32 * 2)
-#define JZ_GPIO_PORTD(x) ((x) + 32 * 3)
-
-#endif
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_fb.h b/arch/mips/include/asm/mach-jz4740/jz4740_fb.h
deleted file mode 100644
index e84a48f73285..000000000000
--- a/arch/mips/include/asm/mach-jz4740/jz4740_fb.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009, Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __ASM_MACH_JZ4740_JZ4740_FB_H__
-#define __ASM_MACH_JZ4740_JZ4740_FB_H__
-
-#include <linux/fb.h>
-
-enum jz4740_fb_lcd_type {
- JZ_LCD_TYPE_GENERIC_16_BIT = 0,
- JZ_LCD_TYPE_GENERIC_18_BIT = 0 | (1 << 4),
- JZ_LCD_TYPE_SPECIAL_TFT_1 = 1,
- JZ_LCD_TYPE_SPECIAL_TFT_2 = 2,
- JZ_LCD_TYPE_SPECIAL_TFT_3 = 3,
- JZ_LCD_TYPE_NON_INTERLACED_CCIR656 = 5,
- JZ_LCD_TYPE_INTERLACED_CCIR656 = 7,
- JZ_LCD_TYPE_SINGLE_COLOR_STN = 8,
- JZ_LCD_TYPE_SINGLE_MONOCHROME_STN = 9,
- JZ_LCD_TYPE_DUAL_COLOR_STN = 10,
- JZ_LCD_TYPE_DUAL_MONOCHROME_STN = 11,
- JZ_LCD_TYPE_8BIT_SERIAL = 12,
-};
-
-#define JZ4740_FB_SPECIAL_TFT_CONFIG(start, stop) (((start) << 16) | (stop))
-
-/*
-* width: width of the lcd display in mm
-* height: height of the lcd display in mm
-* num_modes: size of modes
-* modes: list of valid video modes
-* bpp: bits per pixel for the lcd
-* lcd_type: lcd type
-*/
-
-struct jz4740_fb_platform_data {
- unsigned int width;
- unsigned int height;
-
- size_t num_modes;
- struct fb_videomode *modes;
-
- unsigned int bpp;
- enum jz4740_fb_lcd_type lcd_type;
-
- struct {
- uint32_t spl;
- uint32_t cls;
- uint32_t ps;
- uint32_t rev;
- } special_tft_config;
-
- unsigned pixclk_falling_edge:1;
- unsigned date_enable_active_low:1;
-};
-
-#endif
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
deleted file mode 100644
index 9a7de47c7c79..000000000000
--- a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_MMC_JZ4740_MMC
-#define __LINUX_MMC_JZ4740_MMC
-
-struct jz4740_mmc_platform_data {
- unsigned card_detect_active_low:1;
- unsigned read_only_active_low:1;
-
- unsigned data_1bit:1;
-};
-
-#endif
diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h
deleted file mode 100644
index 241270d3ea14..000000000000
--- a/arch/mips/include/asm/mach-jz4740/platform.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- * JZ4740 platform device definitions
- */
-
-
-#ifndef __JZ4740_PLATFORM_H
-#define __JZ4740_PLATFORM_H
-
-#include <linux/platform_device.h>
-
-extern struct platform_device jz4740_udc_device;
-extern struct platform_device jz4740_udc_xceiv_device;
-extern struct platform_device jz4740_mmc_device;
-extern struct platform_device jz4740_i2c_device;
-extern struct platform_device jz4740_nand_device;
-extern struct platform_device jz4740_framebuffer_device;
-extern struct platform_device jz4740_i2s_device;
-extern struct platform_device jz4740_pcm_device;
-extern struct platform_device jz4740_codec_device;
-extern struct platform_device jz4740_adc_device;
-extern struct platform_device jz4740_pwm_device;
-extern struct platform_device jz4740_dma_device;
-
-#endif
diff --git a/arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h
new file mode 100644
index 000000000000..b2ee859ca0b7
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/cpu-feature-overrides.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Wu Zhangjin <wuzhangjin@gmail.com>
+ * Copyright (C) 2009 Philippe Vachon <philippe@cowpig.ca>
+ * Copyright (C) 2009 Zhang Le <r0bertz@gentoo.org>
+ *
+ * reference: /proc/cpuinfo,
+ * arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
+ * arch/mips/kernel/proc.c(show_cpuinfo),
+ * loongson2f user manual.
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_LOONGSON2EF_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_32fpr 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 1
+#define cpu_has_4kex 1
+#define cpu_has_64bits 1
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_counter 1
+#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
+#define cpu_has_divec 0
+#define cpu_has_ejtag 0
+#define cpu_has_inclusive_pcaches 1
+#define cpu_has_llsc 1
+#define cpu_has_mcheck 0
+#define cpu_has_mdmx 0
+#define cpu_has_mips16 0
+#define cpu_has_mips16e2 0
+#define cpu_has_mips3d 0
+#define cpu_has_mipsmt 0
+#define cpu_has_smartmips 0
+#define cpu_has_tlb 1
+#define cpu_has_tx39_cache 0
+#define cpu_has_vce 0
+#define cpu_has_veic 0
+#define cpu_has_vint 0
+#define cpu_has_vtag_icache 0
+#define cpu_has_watch 1
+
+#endif /* __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h
index 9795b3361532..9795b3361532 100644
--- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536.h
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536.h
diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_mfgpt.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h
index 52e8bb0fc04d..52e8bb0fc04d 100644
--- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_mfgpt.h
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_mfgpt.h
diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h
index a0d4b752899e..a0d4b752899e 100644
--- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_pci.h
diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_vsm.h b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h
index 70d0153cccc3..70d0153cccc3 100644
--- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_vsm.h
+++ b/arch/mips/include/asm/mach-loongson2ef/cs5536/cs5536_vsm.h
diff --git a/arch/mips/include/asm/mach-loongson2ef/loongson.h b/arch/mips/include/asm/mach-loongson2ef/loongson.h
new file mode 100644
index 000000000000..5008af0a1a19
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/loongson.h
@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_LOONGSON_H
+#define __ASM_MACH_LOONGSON2EF_LOONGSON_H
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+
+/* loongson internal northbridge initialization */
+extern void bonito_irq_init(void);
+
+/* machine-specific reboot/halt operation */
+extern void mach_prepare_reboot(void);
+extern void mach_prepare_shutdown(void);
+
+/* environment arguments from bootloader */
+extern u32 cpu_clock_freq;
+extern u32 memsize, highmemsize;
+
+/* loongson-specific command line, env and memory initialization */
+extern void __init prom_init_memory(void);
+extern void __init prom_init_machtype(void);
+extern void __init prom_init_env(void);
+#ifdef CONFIG_LOONGSON_UART_BASE
+extern unsigned long _loongson_uart_base, loongson_uart_base;
+extern void prom_init_loongson_uart_base(void);
+#endif
+
+static inline void prom_init_uart_base(void)
+{
+#ifdef CONFIG_LOONGSON_UART_BASE
+ prom_init_loongson_uart_base();
+#endif
+}
+
+/* irq operation functions */
+extern void bonito_irqdispatch(void);
+extern void __init bonito_irq_init(void);
+extern void __init mach_init_irq(void);
+extern void mach_irq_dispatch(unsigned int pending);
+extern int mach_i8259_irq(void);
+
+/* We need this in some places... */
+#define delay() ({ \
+ int x; \
+ for (x = 0; x < 100000; x++) \
+ __asm__ __volatile__(""); \
+})
+
+#define LOONGSON_REG(x) \
+ (*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
+
+#define LOONGSON_IRQ_BASE 32
+#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
+
+#include <linux/interrupt.h>
+static inline void do_perfcnt_IRQ(void)
+{
+#if IS_ENABLED(CONFIG_OPROFILE)
+ do_IRQ(LOONGSON2_PERFCNT_IRQ);
+#endif
+}
+
+#define LOONGSON_FLASH_BASE 0x1c000000
+#define LOONGSON_FLASH_SIZE 0x02000000 /* 32M */
+#define LOONGSON_FLASH_TOP (LOONGSON_FLASH_BASE+LOONGSON_FLASH_SIZE-1)
+
+#define LOONGSON_LIO0_BASE 0x1e000000
+#define LOONGSON_LIO0_SIZE 0x01C00000 /* 28M */
+#define LOONGSON_LIO0_TOP (LOONGSON_LIO0_BASE+LOONGSON_LIO0_SIZE-1)
+
+#define LOONGSON_BOOT_BASE 0x1fc00000
+#define LOONGSON_BOOT_SIZE 0x00100000 /* 1M */
+#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+#define LOONGSON_REG_BASE 0x1fe00000
+#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
+
+#define LOONGSON_LIO1_BASE 0x1ff00000
+#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
+#define LOONGSON_LIO1_TOP (LOONGSON_LIO1_BASE+LOONGSON_LIO1_SIZE-1)
+
+#define LOONGSON_PCILO0_BASE 0x10000000
+#define LOONGSON_PCILO1_BASE 0x14000000
+#define LOONGSON_PCILO2_BASE 0x18000000
+#define LOONGSON_PCILO_BASE LOONGSON_PCILO0_BASE
+#define LOONGSON_PCILO_SIZE 0x0c000000 /* 64M * 3 */
+#define LOONGSON_PCILO_TOP (LOONGSON_PCILO0_BASE+LOONGSON_PCILO_SIZE-1)
+
+#define LOONGSON_PCICFG_BASE 0x1fe80000
+#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */
+#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
+#define LOONGSON_PCIIO_BASE 0x1fd00000
+
+#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */
+#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
+
+/* Loongson Register Bases */
+
+#define LOONGSON_PCICONFIGBASE 0x00
+#define LOONGSON_REGBASE 0x100
+
+/* PCI Configuration Registers */
+
+#define LOONGSON_PCI_REG(x) LOONGSON_REG(LOONGSON_PCICONFIGBASE + (x))
+#define LOONGSON_PCIDID LOONGSON_PCI_REG(0x00)
+#define LOONGSON_PCICMD LOONGSON_PCI_REG(0x04)
+#define LOONGSON_PCICLASS LOONGSON_PCI_REG(0x08)
+#define LOONGSON_PCILTIMER LOONGSON_PCI_REG(0x0c)
+#define LOONGSON_PCIBASE0 LOONGSON_PCI_REG(0x10)
+#define LOONGSON_PCIBASE1 LOONGSON_PCI_REG(0x14)
+#define LOONGSON_PCIBASE2 LOONGSON_PCI_REG(0x18)
+#define LOONGSON_PCIBASE3 LOONGSON_PCI_REG(0x1c)
+#define LOONGSON_PCIBASE4 LOONGSON_PCI_REG(0x20)
+#define LOONGSON_PCIEXPRBASE LOONGSON_PCI_REG(0x30)
+#define LOONGSON_PCIINT LOONGSON_PCI_REG(0x3c)
+
+#define LOONGSON_PCI_ISR4C LOONGSON_PCI_REG(0x4c)
+
+#define LOONGSON_PCICMD_PERR_CLR 0x80000000
+#define LOONGSON_PCICMD_SERR_CLR 0x40000000
+#define LOONGSON_PCICMD_MABORT_CLR 0x20000000
+#define LOONGSON_PCICMD_MTABORT_CLR 0x10000000
+#define LOONGSON_PCICMD_TABORT_CLR 0x08000000
+#define LOONGSON_PCICMD_MPERR_CLR 0x01000000
+#define LOONGSON_PCICMD_PERRRESPEN 0x00000040
+#define LOONGSON_PCICMD_ASTEPEN 0x00000080
+#define LOONGSON_PCICMD_SERREN 0x00000100
+#define LOONGSON_PCILTIMER_BUSLATENCY 0x0000ff00
+#define LOONGSON_PCILTIMER_BUSLATENCY_SHIFT 8
+
+/* Loongson h/w Configuration */
+
+#define LOONGSON_GENCFG_OFFSET 0x4
+#define LOONGSON_GENCFG LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
+
+#define LOONGSON_GENCFG_DEBUGMODE 0x00000001
+#define LOONGSON_GENCFG_SNOOPEN 0x00000002
+#define LOONGSON_GENCFG_CPUSELFRESET 0x00000004
+
+#define LOONGSON_GENCFG_FORCE_IRQA 0x00000008
+#define LOONGSON_GENCFG_IRQA_ISOUT 0x00000010
+#define LOONGSON_GENCFG_IRQA_FROM_INT1 0x00000020
+#define LOONGSON_GENCFG_BYTESWAP 0x00000040
+
+#define LOONGSON_GENCFG_UNCACHED 0x00000080
+#define LOONGSON_GENCFG_PREFETCHEN 0x00000100
+#define LOONGSON_GENCFG_WBEHINDEN 0x00000200
+#define LOONGSON_GENCFG_CACHEALG 0x00000c00
+#define LOONGSON_GENCFG_CACHEALG_SHIFT 10
+#define LOONGSON_GENCFG_PCIQUEUE 0x00001000
+#define LOONGSON_GENCFG_CACHESTOP 0x00002000
+#define LOONGSON_GENCFG_MSTRBYTESWAP 0x00004000
+#define LOONGSON_GENCFG_BUSERREN 0x00008000
+#define LOONGSON_GENCFG_NORETRYTIMEOUT 0x00010000
+#define LOONGSON_GENCFG_SHORTCOPYTIMEOUT 0x00020000
+
+/* PCI address map control */
+
+#define LOONGSON_PCIMAP LOONGSON_REG(LOONGSON_REGBASE + 0x10)
+#define LOONGSON_PCIMEMBASECFG LOONGSON_REG(LOONGSON_REGBASE + 0x14)
+#define LOONGSON_PCIMAP_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x18)
+
+/* GPIO Regs - r/w */
+
+#define LOONGSON_GPIODATA LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
+#define LOONGSON_GPIOIE LOONGSON_REG(LOONGSON_REGBASE + 0x20)
+
+/* ICU Configuration Regs - r/w */
+
+#define LOONGSON_INTEDGE LOONGSON_REG(LOONGSON_REGBASE + 0x24)
+#define LOONGSON_INTSTEER LOONGSON_REG(LOONGSON_REGBASE + 0x28)
+#define LOONGSON_INTPOL LOONGSON_REG(LOONGSON_REGBASE + 0x2c)
+
+/* ICU Enable Regs - IntEn & IntISR are r/o. */
+
+#define LOONGSON_INTENSET LOONGSON_REG(LOONGSON_REGBASE + 0x30)
+#define LOONGSON_INTENCLR LOONGSON_REG(LOONGSON_REGBASE + 0x34)
+#define LOONGSON_INTEN LOONGSON_REG(LOONGSON_REGBASE + 0x38)
+#define LOONGSON_INTISR LOONGSON_REG(LOONGSON_REGBASE + 0x3c)
+
+/* ICU */
+#define LOONGSON_ICU_MBOXES 0x0000000f
+#define LOONGSON_ICU_MBOXES_SHIFT 0
+#define LOONGSON_ICU_DMARDY 0x00000010
+#define LOONGSON_ICU_DMAEMPTY 0x00000020
+#define LOONGSON_ICU_COPYRDY 0x00000040
+#define LOONGSON_ICU_COPYEMPTY 0x00000080
+#define LOONGSON_ICU_COPYERR 0x00000100
+#define LOONGSON_ICU_PCIIRQ 0x00000200
+#define LOONGSON_ICU_MASTERERR 0x00000400
+#define LOONGSON_ICU_SYSTEMERR 0x00000800
+#define LOONGSON_ICU_DRAMPERR 0x00001000
+#define LOONGSON_ICU_RETRYERR 0x00002000
+#define LOONGSON_ICU_GPIOS 0x01ff0000
+#define LOONGSON_ICU_GPIOS_SHIFT 16
+#define LOONGSON_ICU_GPINS 0x7e000000
+#define LOONGSON_ICU_GPINS_SHIFT 25
+#define LOONGSON_ICU_MBOX(N) (1<<(LOONGSON_ICU_MBOXES_SHIFT+(N)))
+#define LOONGSON_ICU_GPIO(N) (1<<(LOONGSON_ICU_GPIOS_SHIFT+(N)))
+#define LOONGSON_ICU_GPIN(N) (1<<(LOONGSON_ICU_GPINS_SHIFT+(N)))
+
+/* PCI prefetch window base & mask */
+
+#define LOONGSON_MEM_WIN_BASE_L LOONGSON_REG(LOONGSON_REGBASE + 0x40)
+#define LOONGSON_MEM_WIN_BASE_H LOONGSON_REG(LOONGSON_REGBASE + 0x44)
+#define LOONGSON_MEM_WIN_MASK_L LOONGSON_REG(LOONGSON_REGBASE + 0x48)
+#define LOONGSON_MEM_WIN_MASK_H LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
+
+/* PCI_Hit*_Sel_* */
+
+#define LOONGSON_PCI_HIT0_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x50)
+#define LOONGSON_PCI_HIT0_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x54)
+#define LOONGSON_PCI_HIT1_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x58)
+#define LOONGSON_PCI_HIT1_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x5c)
+#define LOONGSON_PCI_HIT2_SEL_L LOONGSON_REG(LOONGSON_REGBASE + 0x60)
+#define LOONGSON_PCI_HIT2_SEL_H LOONGSON_REG(LOONGSON_REGBASE + 0x64)
+
+/* PXArb Config & Status */
+
+#define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68)
+#define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c)
+
+/* Chip Config registor of each physical cpu package, PRid >= Loongson-2F */
+#define LOONGSON_CHIPCFG (void __iomem *)TO_UNCAC(0x1fc00180)
+
+/* pcimap */
+
+#define LOONGSON_PCIMAP_PCIMAP_LO0 0x0000003f
+#define LOONGSON_PCIMAP_PCIMAP_LO0_SHIFT 0
+#define LOONGSON_PCIMAP_PCIMAP_LO1 0x00000fc0
+#define LOONGSON_PCIMAP_PCIMAP_LO1_SHIFT 6
+#define LOONGSON_PCIMAP_PCIMAP_LO2 0x0003f000
+#define LOONGSON_PCIMAP_PCIMAP_LO2_SHIFT 12
+#define LOONGSON_PCIMAP_PCIMAP_2 0x00040000
+#define LOONGSON_PCIMAP_WIN(WIN, ADDR) \
+ ((((ADDR)>>26) & LOONGSON_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
+
+#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
+#include <linux/cpufreq.h>
+extern struct cpufreq_frequency_table loongson2_clockmod_table[];
+#endif
+
+/*
+ * address windows configuration module
+ *
+ * loongson2e do not have this module
+ */
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+
+/* address window config module base address */
+#define LOONGSON_ADDRWINCFG_BASE 0x3ff00000ul
+#define LOONGSON_ADDRWINCFG_SIZE 0x180
+
+extern unsigned long _loongson_addrwincfg_base;
+#define LOONGSON_ADDRWINCFG(offset) \
+ (*(volatile u64 *)(_loongson_addrwincfg_base + (offset)))
+
+#define CPU_WIN0_BASE LOONGSON_ADDRWINCFG(0x00)
+#define CPU_WIN1_BASE LOONGSON_ADDRWINCFG(0x08)
+#define CPU_WIN2_BASE LOONGSON_ADDRWINCFG(0x10)
+#define CPU_WIN3_BASE LOONGSON_ADDRWINCFG(0x18)
+
+#define CPU_WIN0_MASK LOONGSON_ADDRWINCFG(0x20)
+#define CPU_WIN1_MASK LOONGSON_ADDRWINCFG(0x28)
+#define CPU_WIN2_MASK LOONGSON_ADDRWINCFG(0x30)
+#define CPU_WIN3_MASK LOONGSON_ADDRWINCFG(0x38)
+
+#define CPU_WIN0_MMAP LOONGSON_ADDRWINCFG(0x40)
+#define CPU_WIN1_MMAP LOONGSON_ADDRWINCFG(0x48)
+#define CPU_WIN2_MMAP LOONGSON_ADDRWINCFG(0x50)
+#define CPU_WIN3_MMAP LOONGSON_ADDRWINCFG(0x58)
+
+#define PCIDMA_WIN0_BASE LOONGSON_ADDRWINCFG(0x60)
+#define PCIDMA_WIN1_BASE LOONGSON_ADDRWINCFG(0x68)
+#define PCIDMA_WIN2_BASE LOONGSON_ADDRWINCFG(0x70)
+#define PCIDMA_WIN3_BASE LOONGSON_ADDRWINCFG(0x78)
+
+#define PCIDMA_WIN0_MASK LOONGSON_ADDRWINCFG(0x80)
+#define PCIDMA_WIN1_MASK LOONGSON_ADDRWINCFG(0x88)
+#define PCIDMA_WIN2_MASK LOONGSON_ADDRWINCFG(0x90)
+#define PCIDMA_WIN3_MASK LOONGSON_ADDRWINCFG(0x98)
+
+#define PCIDMA_WIN0_MMAP LOONGSON_ADDRWINCFG(0xa0)
+#define PCIDMA_WIN1_MMAP LOONGSON_ADDRWINCFG(0xa8)
+#define PCIDMA_WIN2_MMAP LOONGSON_ADDRWINCFG(0xb0)
+#define PCIDMA_WIN3_MMAP LOONGSON_ADDRWINCFG(0xb8)
+
+#define ADDRWIN_WIN0 0
+#define ADDRWIN_WIN1 1
+#define ADDRWIN_WIN2 2
+#define ADDRWIN_WIN3 3
+
+#define ADDRWIN_MAP_DST_DDR 0
+#define ADDRWIN_MAP_DST_PCI 1
+#define ADDRWIN_MAP_DST_LIO 1
+
+/*
+ * s: CPU, PCIDMA
+ * d: DDR, PCI, LIO
+ * win: 0, 1, 2, 3
+ * src: map source
+ * dst: map destination
+ * size: ~mask + 1
+ */
+#define LOONGSON_ADDRWIN_CFG(s, d, w, src, dst, size) do {\
+ s##_WIN##w##_BASE = (src); \
+ s##_WIN##w##_MMAP = (dst) | ADDRWIN_MAP_DST_##d; \
+ s##_WIN##w##_MASK = ~(size-1); \
+} while (0)
+
+#define LOONGSON_ADDRWIN_CPUTOPCI(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(CPU, PCI, win, src, dst, size)
+#define LOONGSON_ADDRWIN_CPUTODDR(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(CPU, DDR, win, src, dst, size)
+#define LOONGSON_ADDRWIN_PCITODDR(win, src, dst, size) \
+ LOONGSON_ADDRWIN_CFG(PCIDMA, DDR, win, src, dst, size)
+
+#endif /* ! CONFIG_CPU_SUPPORTS_ADDRWINCFG */
+
+#endif /* __ASM_MACH_LOONGSON2EF_LOONGSON_H */
diff --git a/arch/mips/include/asm/mach-loongson64/machine.h b/arch/mips/include/asm/mach-loongson2ef/machine.h
index 8ef7ea94a26d..4097267ef186 100644
--- a/arch/mips/include/asm/mach-loongson64/machine.h
+++ b/arch/mips/include/asm/mach-loongson2ef/machine.h
@@ -4,8 +4,8 @@
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
-#ifndef __ASM_MACH_LOONGSON64_MACHINE_H
-#define __ASM_MACH_LOONGSON64_MACHINE_H
+#ifndef __ASM_MACH_LOONGSON2EF_MACHINE_H
+#define __ASM_MACH_LOONGSON2EF_MACHINE_H
#ifdef CONFIG_LEMOTE_FULOONG2E
@@ -20,10 +20,4 @@
#endif
-#ifdef CONFIG_LOONGSON_MACH3X
-
-#define LOONGSON_MACHTYPE MACH_LOONGSON_GENERIC
-
-#endif /* CONFIG_LOONGSON_MACH3X */
-
-#endif /* __ASM_MACH_LOONGSON64_MACHINE_H */
+#endif /* __ASM_MACH_LOONGSON2EF_MACHINE_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h b/arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h
new file mode 100644
index 000000000000..00d602629a55
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/mc146818rtc.h
@@ -0,0 +1,36 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998, 2001, 03, 07 by Ralf Baechle (ralf@linux-mips.org)
+ *
+ * RTC routines for PC style attached Dallas chip.
+ */
+#ifndef __ASM_MACH_LOONGSON2EF_MC146818RTC_H
+#define __ASM_MACH_LOONGSON2EF_MC146818RTC_H
+
+#include <linux/io.h>
+
+#define RTC_PORT(x) (0x70 + (x))
+#define RTC_IRQ 8
+
+static inline unsigned char CMOS_READ(unsigned long addr)
+{
+ outb_p(addr, RTC_PORT(0));
+ return inb_p(RTC_PORT(1));
+}
+
+static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
+{
+ outb_p(addr, RTC_PORT(0));
+ outb_p(data, RTC_PORT(1));
+}
+
+#define RTC_ALWAYS_BCD 0
+
+#ifndef mc146818_decode_year
+#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1970)
+#endif
+
+#endif /* __ASM_MACH_LOONGSON2EF_MC146818RTC_H */
diff --git a/arch/mips/include/asm/mach-loongson64/mem.h b/arch/mips/include/asm/mach-loongson2ef/mem.h
index ce33c174c04d..d1d759b8974e 100644
--- a/arch/mips/include/asm/mach-loongson64/mem.h
+++ b/arch/mips/include/asm/mach-loongson2ef/mem.h
@@ -4,8 +4,8 @@
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
-#ifndef __ASM_MACH_LOONGSON64_MEM_H
-#define __ASM_MACH_LOONGSON64_MEM_H
+#ifndef __ASM_MACH_LOONGSON2EF_MEM_H
+#define __ASM_MACH_LOONGSON2EF_MEM_H
/*
* high memory space
@@ -34,4 +34,4 @@
#define LOONGSON_MMIO_MEM_END 0x80000000
#endif
-#endif /* __ASM_MACH_LOONGSON64_MEM_H */
+#endif /* __ASM_MACH_LOONGSON2EF_MEM_H */
diff --git a/arch/mips/include/asm/mach-loongson2ef/pci.h b/arch/mips/include/asm/mach-loongson2ef/pci.h
new file mode 100644
index 000000000000..5588c5bc5395
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/pci.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2008 Zhang Le <r0bertz@gentoo.org>
+ * Copyright (c) 2009 Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#ifndef __ASM_MACH_LOONGSON2EF_PCI_H_
+#define __ASM_MACH_LOONGSON2EF_PCI_H_
+
+extern struct pci_ops loongson_pci_ops;
+
+/* this is an offset from mips_io_port_base */
+#define LOONGSON_PCI_IO_START 0x00004000UL
+
+#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
+
+/*
+ * we use address window2 to map cpu address space to pci space
+ * window2: cpu [1G, 2G] -> pci [1G, 2G]
+ * why not use window 0 & 1? because they are used by cpu when booting.
+ * window0: cpu [0, 256M] -> ddr [0, 256M]
+ * window1: cpu [256M, 512M] -> pci [256M, 512M]
+ */
+
+/* the smallest LOONGSON_CPU_MEM_SRC can be 512M */
+#define LOONGSON_CPU_MEM_SRC 0x40000000ul /* 1G */
+#define LOONGSON_PCI_MEM_DST LOONGSON_CPU_MEM_SRC
+
+#define LOONGSON_PCI_MEM_START LOONGSON_PCI_MEM_DST
+#define LOONGSON_PCI_MEM_END (0x80000000ul-1) /* 2G */
+
+#define MMAP_CPUTOPCI_SIZE (LOONGSON_PCI_MEM_END - \
+ LOONGSON_PCI_MEM_START + 1)
+
+#else /* loongson2f/32bit & loongson2e */
+
+/* this pci memory space is mapped by pcimap in pci.c */
+#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE
+#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2)
+
+/* this is an offset from mips_io_port_base */
+#define LOONGSON_PCI_IO_START 0x00004000UL
+
+#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
+
+#endif /* !__ASM_MACH_LOONGSON2EF_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-loongson2ef/spaces.h b/arch/mips/include/asm/mach-loongson2ef/spaces.h
new file mode 100644
index 000000000000..ba4e8e9b618e
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson2ef/spaces.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_LOONGSON2EF_SPACES_H_
+#define __ASM_MACH_LOONGSON2EF_SPACES_H_
+
+#if defined(CONFIG_64BIT)
+#define CAC_BASE _AC(0x9800000000000000, UL)
+#endif /* CONFIG_64BIT */
+
+#include <asm/mach-generic/spaces.h>
+#endif
diff --git a/arch/mips/include/asm/mach-loongson32/prom.h b/arch/mips/include/asm/mach-loongson32/prom.h
deleted file mode 100644
index cb789f18d790..000000000000
--- a/arch/mips/include/asm/mach-loongson32/prom.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
- */
-
-#ifndef __ASM_MACH_LOONGSON32_PROM_H
-#define __ASM_MACH_LOONGSON32_PROM_H
-
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-
-/* environment arguments from bootloader */
-extern unsigned long memsize, highmemsize;
-
-/* loongson-specific command line, env and memory initialization */
-extern char *prom_getenv(char *name);
-extern void __init prom_init_cmdline(void);
-
-#endif /* __ASM_MACH_LOONGSON32_PROM_H */
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
index 581915ce231c..4fab38c743dd 100644
--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -43,12 +43,10 @@
#define cpu_has_vint 0
#define cpu_has_vtag_icache 0
#define cpu_has_watch 1
-#define cpu_has_local_ebase 0
-
-#ifdef CONFIG_CPU_LOONGSON3
#define cpu_has_wsbh 1
#define cpu_has_ic_fills_f_dc 1
#define cpu_hwrena_impl_bits 0xc0000000
-#endif
+#define cpu_has_mac2008_only 1
+#define cpu_has_mips_r2_exec_hazard 0
#endif /* __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
index be9f727a9328..73a89913dc38 100644
--- a/arch/mips/include/asm/mach-loongson64/irq.h
+++ b/arch/mips/include/asm/mach-loongson64/irq.h
@@ -4,8 +4,6 @@
#include <boot_param.h>
-#ifdef CONFIG_CPU_LOONGSON3
-
/* cpu core interrupt numbers */
#define MIPS_CPU_IRQ_BASE 56
@@ -35,8 +33,6 @@
#define LOONGSON_INT_COREx_INTy(x, y) (1<<(x) | 1<<(y+4)) /* route to int y of core x */
-#endif
-
extern void fixup_irqs(void);
extern void loongson3_ipi_interrupt(struct pt_regs *regs);
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index b5e288a12dfe..87a5bfbf8cfe 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -17,7 +17,6 @@
* Override macros used in arch/mips/kernel/head.S.
*/
.macro kernel_entry_setup
-#ifdef CONFIG_CPU_LOONGSON3
.set push
.set mips64
/* Set LPA on LOONGSON3 config3 */
@@ -30,23 +29,29 @@
mtc0 t0, CP0_PAGEGRAIN
/* Enable STFill Buffer */
mfc0 t0, CP0_PRID
+ /* Loongson-3A R4+ */
+ andi t1, t0, PRID_IMP_MASK
+ li t2, PRID_IMP_LOONGSON_64G
+ beq t1, t2, 1f
+ nop
+ /* Loongson-3A R2/R3 */
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
- slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0)
- bnez t0, 1f
+ slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
+ bnez t0, 2f
+ nop
+1:
mfc0 t0, CP0_CONFIG6
or t0, 0x100
mtc0 t0, CP0_CONFIG6
-1:
+2:
_ehb
.set pop
-#endif
.endm
/*
* Do SMP slave processor setup.
*/
.macro smp_slave_setup
-#ifdef CONFIG_CPU_LOONGSON3
.set push
.set mips64
/* Set LPA on LOONGSON3 config3 */
@@ -59,16 +64,23 @@
mtc0 t0, CP0_PAGEGRAIN
/* Enable STFill Buffer */
mfc0 t0, CP0_PRID
+ /* Loongson-3A R4+ */
+ andi t1, t0, PRID_IMP_MASK
+ li t2, PRID_IMP_LOONGSON_64G
+ beq t1, t2, 1f
+ nop
+ /* Loongson-3A R2/R3 */
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
- slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0)
- bnez t0, 1f
+ slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
+ bnez t0, 2f
+ nop
+1:
mfc0 t0, CP0_CONFIG6
or t0, 0x100
mtc0 t0, CP0_CONFIG6
-1:
+2:
_ehb
.set pop
-#endif
.endm
#endif /* __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h
index 694a58574ec0..a8fce112a9b0 100644
--- a/arch/mips/include/asm/mach-loongson64/loongson.h
+++ b/arch/mips/include/asm/mach-loongson64/loongson.h
@@ -12,8 +12,6 @@
#include <linux/irq.h>
#include <boot_param.h>
-/* loongson internal northbridge initialization */
-extern void bonito_irq_init(void);
/* machine-specific reboot/halt operation */
extern void mach_prepare_reboot(void);
@@ -26,25 +24,9 @@ extern const struct plat_smp_ops loongson3_smp_ops;
/* loongson-specific command line, env and memory initialization */
extern void __init prom_init_memory(void);
-extern void __init prom_init_cmdline(void);
-extern void __init prom_init_machtype(void);
extern void __init prom_init_env(void);
-#ifdef CONFIG_LOONGSON_UART_BASE
-extern unsigned long _loongson_uart_base[], loongson_uart_base[];
-extern void prom_init_loongson_uart_base(void);
-#endif
-
-static inline void prom_init_uart_base(void)
-{
-#ifdef CONFIG_LOONGSON_UART_BASE
- prom_init_loongson_uart_base();
-#endif
-}
/* irq operation functions */
-extern void bonito_irqdispatch(void);
-extern void __init bonito_irq_init(void);
-extern void __init mach_init_irq(void);
extern void mach_irq_dispatch(unsigned int pending);
extern int mach_i8259_irq(void);
@@ -64,17 +46,6 @@ extern int mach_i8259_irq(void);
#define LOONGSON3_REG32(base, x) \
(*(volatile u32 *)((char *)TO_UNCAC(base) + (x)))
-#define LOONGSON_IRQ_BASE 32
-#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
-
-#include <linux/interrupt.h>
-static inline void do_perfcnt_IRQ(void)
-{
-#if IS_ENABLED(CONFIG_OPROFILE)
- do_IRQ(LOONGSON2_PERFCNT_IRQ);
-#endif
-}
-
#define LOONGSON_FLASH_BASE 0x1c000000
#define LOONGSON_FLASH_SIZE 0x02000000 /* 32M */
#define LOONGSON_FLASH_TOP (LOONGSON_FLASH_BASE+LOONGSON_FLASH_SIZE-1)
@@ -109,11 +80,7 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */
#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
-#ifdef CONFIG_CPU_LOONGSON3
#define LOONGSON_PCIIO_BASE loongson_sysconf.pci_io_base
-#else
-#define LOONGSON_PCIIO_BASE 0x1fd00000
-#endif
#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */
#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
@@ -270,86 +237,4 @@ extern u64 loongson_freqctrl[MAX_PACKAGES];
#define LOONGSON_PCIMAP_WIN(WIN, ADDR) \
((((ADDR)>>26) & LOONGSON_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
-#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
-#include <linux/cpufreq.h>
-extern struct cpufreq_frequency_table loongson2_clockmod_table[];
-#endif
-
-/*
- * address windows configuration module
- *
- * loongson2e do not have this module
- */
-#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
-
-/* address window config module base address */
-#define LOONGSON_ADDRWINCFG_BASE 0x3ff00000ul
-#define LOONGSON_ADDRWINCFG_SIZE 0x180
-
-extern unsigned long _loongson_addrwincfg_base;
-#define LOONGSON_ADDRWINCFG(offset) \
- (*(volatile u64 *)(_loongson_addrwincfg_base + (offset)))
-
-#define CPU_WIN0_BASE LOONGSON_ADDRWINCFG(0x00)
-#define CPU_WIN1_BASE LOONGSON_ADDRWINCFG(0x08)
-#define CPU_WIN2_BASE LOONGSON_ADDRWINCFG(0x10)
-#define CPU_WIN3_BASE LOONGSON_ADDRWINCFG(0x18)
-
-#define CPU_WIN0_MASK LOONGSON_ADDRWINCFG(0x20)
-#define CPU_WIN1_MASK LOONGSON_ADDRWINCFG(0x28)
-#define CPU_WIN2_MASK LOONGSON_ADDRWINCFG(0x30)
-#define CPU_WIN3_MASK LOONGSON_ADDRWINCFG(0x38)
-
-#define CPU_WIN0_MMAP LOONGSON_ADDRWINCFG(0x40)
-#define CPU_WIN1_MMAP LOONGSON_ADDRWINCFG(0x48)
-#define CPU_WIN2_MMAP LOONGSON_ADDRWINCFG(0x50)
-#define CPU_WIN3_MMAP LOONGSON_ADDRWINCFG(0x58)
-
-#define PCIDMA_WIN0_BASE LOONGSON_ADDRWINCFG(0x60)
-#define PCIDMA_WIN1_BASE LOONGSON_ADDRWINCFG(0x68)
-#define PCIDMA_WIN2_BASE LOONGSON_ADDRWINCFG(0x70)
-#define PCIDMA_WIN3_BASE LOONGSON_ADDRWINCFG(0x78)
-
-#define PCIDMA_WIN0_MASK LOONGSON_ADDRWINCFG(0x80)
-#define PCIDMA_WIN1_MASK LOONGSON_ADDRWINCFG(0x88)
-#define PCIDMA_WIN2_MASK LOONGSON_ADDRWINCFG(0x90)
-#define PCIDMA_WIN3_MASK LOONGSON_ADDRWINCFG(0x98)
-
-#define PCIDMA_WIN0_MMAP LOONGSON_ADDRWINCFG(0xa0)
-#define PCIDMA_WIN1_MMAP LOONGSON_ADDRWINCFG(0xa8)
-#define PCIDMA_WIN2_MMAP LOONGSON_ADDRWINCFG(0xb0)
-#define PCIDMA_WIN3_MMAP LOONGSON_ADDRWINCFG(0xb8)
-
-#define ADDRWIN_WIN0 0
-#define ADDRWIN_WIN1 1
-#define ADDRWIN_WIN2 2
-#define ADDRWIN_WIN3 3
-
-#define ADDRWIN_MAP_DST_DDR 0
-#define ADDRWIN_MAP_DST_PCI 1
-#define ADDRWIN_MAP_DST_LIO 1
-
-/*
- * s: CPU, PCIDMA
- * d: DDR, PCI, LIO
- * win: 0, 1, 2, 3
- * src: map source
- * dst: map destination
- * size: ~mask + 1
- */
-#define LOONGSON_ADDRWIN_CFG(s, d, w, src, dst, size) do {\
- s##_WIN##w##_BASE = (src); \
- s##_WIN##w##_MMAP = (dst) | ADDRWIN_MAP_DST_##d; \
- s##_WIN##w##_MASK = ~(size-1); \
-} while (0)
-
-#define LOONGSON_ADDRWIN_CPUTOPCI(win, src, dst, size) \
- LOONGSON_ADDRWIN_CFG(CPU, PCI, win, src, dst, size)
-#define LOONGSON_ADDRWIN_CPUTODDR(win, src, dst, size) \
- LOONGSON_ADDRWIN_CFG(CPU, DDR, win, src, dst, size)
-#define LOONGSON_ADDRWIN_PCITODDR(win, src, dst, size) \
- LOONGSON_ADDRWIN_CFG(PCIDMA, DDR, win, src, dst, size)
-
-#endif /* ! CONFIG_CPU_SUPPORTS_ADDRWINCFG */
-
#endif /* __ASM_MACH_LOONGSON64_LOONGSON_H */
diff --git a/arch/mips/include/asm/mach-loongson64/loongson_regs.h b/arch/mips/include/asm/mach-loongson64/loongson_regs.h
new file mode 100644
index 000000000000..363a47a5d26e
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson64/loongson_regs.h
@@ -0,0 +1,227 @@
+/*
+ * Read/Write Loongson Extension Registers
+ */
+
+#ifndef _LOONGSON_REGS_H_
+#define _LOONGSON_REGS_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+#include <asm/mipsregs.h>
+#include <asm/cpu.h>
+
+static inline bool cpu_has_cfg(void)
+{
+ return ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G);
+}
+
+static inline u32 read_cpucfg(u32 reg)
+{
+ u32 __res;
+
+ __asm__ __volatile__(
+ "parse_r __res,%0\n\t"
+ "parse_r reg,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8080118 | (reg << 21) | (__res << 11))\n\t"
+ :"=r"(__res)
+ :"r"(reg)
+ :
+ );
+ return __res;
+}
+
+/* Bit Domains for CFG registers */
+#define LOONGSON_CFG0 0x0
+#define LOONGSON_CFG0_PRID GENMASK(31, 0)
+
+#define LOONGSON_CFG1 0x1
+#define LOONGSON_CFG1_FP BIT(0)
+#define LOONGSON_CFG1_FPREV GENMASK(3, 1)
+#define LOONGSON_CFG1_MMI BIT(4)
+#define LOONGSON_CFG1_MSA1 BIT(5)
+#define LOONGSON_CFG1_MSA2 BIT(6)
+#define LOONGSON_CFG1_CGP BIT(7)
+#define LOONGSON_CFG1_WRP BIT(8)
+#define LOONGSON_CFG1_LSX1 BIT(9)
+#define LOONGSON_CFG1_LSX2 BIT(10)
+#define LOONGSON_CFG1_LASX BIT(11)
+#define LOONGSON_CFG1_R6FXP BIT(12)
+#define LOONGSON_CFG1_R6CRCP BIT(13)
+#define LOONGSON_CFG1_R6FPP BIT(14)
+#define LOONGSON_CFG1_CNT64 BIT(15)
+#define LOONGSON_CFG1_LSLDR0 BIT(16)
+#define LOONGSON_CFG1_LSPREF BIT(17)
+#define LOONGSON_CFG1_LSPREFX BIT(18)
+#define LOONGSON_CFG1_LSSYNCI BIT(19)
+#define LOONGSON_CFG1_LSUCA BIT(20)
+#define LOONGSON_CFG1_LLSYNC BIT(21)
+#define LOONGSON_CFG1_TGTSYNC BIT(22)
+#define LOONGSON_CFG1_LLEXC BIT(23)
+#define LOONGSON_CFG1_SCRAND BIT(24)
+#define LOONGSON_CFG1_MUALP BIT(25)
+#define LOONGSON_CFG1_KMUALEN BIT(26)
+#define LOONGSON_CFG1_ITLBT BIT(27)
+#define LOONGSON_CFG1_LSUPERF BIT(28)
+#define LOONGSON_CFG1_SFBP BIT(29)
+#define LOONGSON_CFG1_CDMAP BIT(30)
+
+#define LOONGSON_CFG2 0x2
+#define LOONGSON_CFG2_LEXT1 BIT(0)
+#define LOONGSON_CFG2_LEXT2 BIT(1)
+#define LOONGSON_CFG2_LEXT3 BIT(2)
+#define LOONGSON_CFG2_LSPW BIT(3)
+#define LOONGSON_CFG2_LBT1 BIT(4)
+#define LOONGSON_CFG2_LBT2 BIT(5)
+#define LOONGSON_CFG2_LBT3 BIT(6)
+#define LOONGSON_CFG2_LBTMMU BIT(7)
+#define LOONGSON_CFG2_LPMP BIT(8)
+#define LOONGSON_CFG2_LPMPREV GENMASK(11, 9)
+#define LOONGSON_CFG2_LAMO BIT(12)
+#define LOONGSON_CFG2_LPIXU BIT(13)
+#define LOONGSON_CFG2_LPIXUN BIT(14)
+#define LOONGSON_CFG2_LZVP BIT(15)
+#define LOONGSON_CFG2_LZVREV GENMASK(18, 16)
+#define LOONGSON_CFG2_LGFTP BIT(19)
+#define LOONGSON_CFG2_LGFTPREV GENMASK(22, 20)
+#define LOONGSON_CFG2_LLFTP BIT(23)
+#define LOONGSON_CFG2_LLFTPREV GENMASK(26, 24)
+#define LOONGSON_CFG2_LCSRP BIT(27)
+#define LOONGSON_CFG2_LDISBLIKELY BIT(28)
+
+#define LOONGSON_CFG3 0x3
+#define LOONGSON_CFG3_LCAMP BIT(0)
+#define LOONGSON_CFG3_LCAMREV GENMASK(3, 1)
+#define LOONGSON_CFG3_LCAMNUM GENMASK(11, 4)
+#define LOONGSON_CFG3_LCAMKW GENMASK(19, 12)
+#define LOONGSON_CFG3_LCAMVW GENMASK(27, 20)
+
+#define LOONGSON_CFG4 0x4
+#define LOONGSON_CFG4_CCFREQ GENMASK(31, 0)
+
+#define LOONGSON_CFG5 0x5
+#define LOONGSON_CFG5_CFM GENMASK(15, 0)
+#define LOONGSON_CFG5_CFD GENMASK(31, 16)
+
+#define LOONGSON_CFG6 0x6
+
+#define LOONGSON_CFG7 0x7
+#define LOONGSON_CFG7_GCCAEQRP BIT(0)
+#define LOONGSON_CFG7_UCAWINP BIT(1)
+
+static inline bool cpu_has_csr(void)
+{
+ if (cpu_has_cfg())
+ return (read_cpucfg(LOONGSON_CFG2) & LOONGSON_CFG2_LCSRP);
+
+ return false;
+}
+
+static inline u32 csr_readl(u32 reg)
+{
+ u32 __res;
+
+ /* RDCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r __res,%0\n\t"
+ "parse_r reg,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8000118 | (reg << 21) | (__res << 11))\n\t"
+ :"=r"(__res)
+ :"r"(reg)
+ :
+ );
+ return __res;
+}
+
+static inline u64 csr_readq(u32 reg)
+{
+ u64 __res;
+
+ /* DWRCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r __res,%0\n\t"
+ "parse_r reg,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8020118 | (reg << 21) | (__res << 11))\n\t"
+ :"=r"(__res)
+ :"r"(reg)
+ :
+ );
+ return __res;
+}
+
+static inline void csr_writel(u32 val, u32 reg)
+{
+ /* WRCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r reg,%0\n\t"
+ "parse_r val,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8010118 | (reg << 21) | (val << 11))\n\t"
+ :
+ :"r"(reg),"r"(val)
+ :
+ );
+}
+
+static inline void csr_writeq(u64 val, u32 reg)
+{
+ /* DWRCSR reg, val */
+ __asm__ __volatile__(
+ "parse_r reg,%0\n\t"
+ "parse_r val,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8030118 | (reg << 21) | (val << 11))\n\t"
+ :
+ :"r"(reg),"r"(val)
+ :
+ );
+}
+
+/* Public CSR Register can also be accessed with regular addresses */
+#define CSR_PUBLIC_MMIO_BASE 0x1fe00000
+
+#define MMIO_CSR(x) (void *)TO_UNCAC(CSR_PUBLIC_MMIO_BASE + x)
+
+#define LOONGSON_CSR_FEATURES 0x8
+#define LOONGSON_CSRF_TEMP BIT(0)
+#define LOONGSON_CSRF_NODECNT BIT(1)
+#define LOONGSON_CSRF_MSI BIT(2)
+#define LOONGSON_CSRF_EXTIOI BIT(3)
+#define LOONGSON_CSRF_IPI BIT(4)
+#define LOONGSON_CSRF_FREQ BIT(5)
+
+#define LOONGSON_CSR_VENDOR 0x10 /* Vendor name string, should be "Loongson" */
+#define LOONGSON_CSR_CPUNAME 0x20 /* Processor name string */
+#define LOONGSON_CSR_NODECNT 0x408
+#define LOONGSON_CSR_CPUTEMP 0x428
+
+/* PerCore CSR, only accessable by local cores */
+#define LOONGSON_CSR_IPI_STATUS 0x1000
+#define LOONGSON_CSR_IPI_EN 0x1004
+#define LOONGSON_CSR_IPI_SET 0x1008
+#define LOONGSON_CSR_IPI_CLEAR 0x100c
+#define LOONGSON_CSR_IPI_SEND 0x1040
+#define CSR_IPI_SEND_IP_SHIFT 0
+#define CSR_IPI_SEND_CPU_SHIFT 16
+#define CSR_IPI_SEND_BLOCK BIT(31)
+
+static inline u64 drdtime(void)
+{
+ int rID = 0;
+ u64 val = 0;
+
+ __asm__ __volatile__(
+ "parse_r rID,%0\n\t"
+ "parse_r val,%1\n\t"
+ ".insn \n\t"
+ ".word (0xc8090118 | (rID << 21) | (val << 11))\n\t"
+ :"=r"(rID),"=r"(val)
+ :
+ );
+ return val;
+}
+
+#endif
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index 62073d60739f..3a25dbd3b3e9 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -6,8 +6,8 @@
* Huacai Chen, chenhc@lemote.com
* Xiaofu Meng, Shuangshuang Zhang
*/
-#ifndef _ASM_MACH_MMZONE_H
-#define _ASM_MACH_MMZONE_H
+#ifndef _ASM_MACH_LOONGSON64_MMZONE_H
+#define _ASM_MACH_LOONGSON64_MMZONE_H
#include <boot_param.h>
#define NODE_ADDRSPACE_SHIFT 44
@@ -19,30 +19,9 @@
#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
-#define LEVELS_PER_SLICE 128
+extern struct pglist_data *__node_data[];
-struct slice_data {
- unsigned long irq_enable_mask[2];
- int level_to_irq[LEVELS_PER_SLICE];
-};
-
-struct hub_data {
- cpumask_t h_cpus;
- unsigned long slice_map;
- unsigned long irq_alloc_mask[2];
- struct slice_data slice[2];
-};
-
-struct node_data {
- struct pglist_data pglist;
- struct hub_data hub;
- cpumask_t cpumask;
-};
-
-extern struct node_data *__node_data[];
-
-#define NODE_DATA(n) (&__node_data[(n)]->pglist)
-#define hub_data(n) (&__node_data[(n)]->hub)
+#define NODE_DATA(n) (__node_data[n])
extern void setup_zero_pages(void);
extern void __init prom_init_numa_memory(void);
diff --git a/arch/mips/include/asm/mach-loongson64/pci.h b/arch/mips/include/asm/mach-loongson64/pci.h
index 97f807fb2117..8b59d64a23e8 100644
--- a/arch/mips/include/asm/mach-loongson64/pci.h
+++ b/arch/mips/include/asm/mach-loongson64/pci.h
@@ -12,39 +12,8 @@ extern struct pci_ops loongson_pci_ops;
/* this is an offset from mips_io_port_base */
#define LOONGSON_PCI_IO_START 0x00004000UL
-#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
-
-/*
- * we use address window2 to map cpu address space to pci space
- * window2: cpu [1G, 2G] -> pci [1G, 2G]
- * why not use window 0 & 1? because they are used by cpu when booting.
- * window0: cpu [0, 256M] -> ddr [0, 256M]
- * window1: cpu [256M, 512M] -> pci [256M, 512M]
- */
-
-/* the smallest LOONGSON_CPU_MEM_SRC can be 512M */
-#define LOONGSON_CPU_MEM_SRC 0x40000000ul /* 1G */
-#define LOONGSON_PCI_MEM_DST LOONGSON_CPU_MEM_SRC
-
-#define LOONGSON_PCI_MEM_START LOONGSON_PCI_MEM_DST
-#define LOONGSON_PCI_MEM_END (0x80000000ul-1) /* 2G */
-
-#define MMAP_CPUTOPCI_SIZE (LOONGSON_PCI_MEM_END - \
- LOONGSON_PCI_MEM_START + 1)
-
-#else /* loongson2f/32bit & loongson2e */
-
-/* this pci memory space is mapped by pcimap in pci.c */
-#ifdef CONFIG_CPU_LOONGSON3
#define LOONGSON_PCI_MEM_START 0x40000000UL
#define LOONGSON_PCI_MEM_END 0x7effffffUL
-#else
-#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE
-#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2)
-#endif
-/* this is an offset from mips_io_port_base */
-#define LOONGSON_PCI_IO_START 0x00004000UL
-#endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
#endif /* !__ASM_MACH_LOONGSON64_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-loongson64/topology.h b/arch/mips/include/asm/mach-loongson64/topology.h
index 7ff819ab308a..3414a1fd1783 100644
--- a/arch/mips/include/asm/mach-loongson64/topology.h
+++ b/arch/mips/include/asm/mach-loongson64/topology.h
@@ -5,7 +5,9 @@
#ifdef CONFIG_NUMA
#define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2)
-#define cpumask_of_node(node) (&__node_data[(node)]->cpumask)
+
+extern cpumask_t __node_cpumask[];
+#define cpumask_of_node(node) (&__node_cpumask[node])
struct pci_bus;
extern int pcibus_to_node(struct pci_bus *);
diff --git a/arch/mips/include/asm/mach-malta/war.h b/arch/mips/include/asm/mach-malta/war.h
index d068fc411f47..d62d2ffe515e 100644
--- a/arch/mips/include/asm/mach-malta/war.h
+++ b/arch/mips/include/asm/mach-malta/war.h
@@ -11,7 +11,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 1
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/war.h b/arch/mips/include/asm/mach-pmcs-msp71xx/war.h
index a60bf9dd14ae..31c546f58bb5 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/war.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/war.h
@@ -11,7 +11,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
diff --git a/arch/mips/include/asm/mach-rc32434/war.h b/arch/mips/include/asm/mach-rc32434/war.h
index 1bfd489a3708..af430d26f713 100644
--- a/arch/mips/include/asm/mach-rc32434/war.h
+++ b/arch/mips/include/asm/mach-rc32434/war.h
@@ -11,7 +11,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 1
diff --git a/arch/mips/include/asm/mach-rm/war.h b/arch/mips/include/asm/mach-rm/war.h
index a3dde98549bb..eca16d167c2f 100644
--- a/arch/mips/include/asm/mach-rm/war.h
+++ b/arch/mips/include/asm/mach-rm/war.h
@@ -15,7 +15,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 1
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
index 520f8fc2c806..4755b6116807 100644
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -11,7 +11,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
#if defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
diff --git a/arch/mips/include/asm/mach-tx49xx/war.h b/arch/mips/include/asm/mach-tx49xx/war.h
index a8e2c586a18c..445abb4eb769 100644
--- a/arch/mips/include/asm/mach-tx49xx/war.h
+++ b/arch/mips/include/asm/mach-tx49xx/war.h
@@ -11,7 +11,6 @@
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 1e6966e8527e..796fe47cfd17 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -689,6 +689,12 @@
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
+/* Ingenic HPTLB off bits */
+#define XBURST_PAGECTRL_HPTLB_DIS 0xa9000000
+
+/* Ingenic Config7 bits */
+#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
+
/* Config7 Bits specific to MIPS Technologies. */
/* Performance counters implemented Per TC */
@@ -1095,9 +1101,12 @@
/*
* Bits 22:20 of the FPU Status Register will be read as 0,
* and should be written as zero.
+ * MAC2008 was removed in Release 5 so we still treat it as
+ * reserved.
*/
#define FPU_CSR_RSVD (_ULCAST_(7) << 20)
+#define FPU_CSR_MAC2008 (_ULCAST_(1) << 20)
#define FPU_CSR_ABS2008 (_ULCAST_(1) << 19)
#define FPU_CSR_NAN2008 (_ULCAST_(1) << 18)
@@ -1968,6 +1977,9 @@ do { \
#define read_c0_brcm_sleepcount() __read_32bit_c0_register($22, 7)
#define write_c0_brcm_sleepcount(val) __write_32bit_c0_register($22, 7, val)
+/* Ingenic page ctrl register */
+#define write_c0_page_ctrl(val) __write_32bit_c0_register($5, 4, val)
+
/*
* Macros to access the guest system control coprocessor
*/
@@ -2813,6 +2825,7 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
+__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 6dc0b21b8acd..9846047b3d3d 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -103,34 +103,28 @@ search_module_dbetables(unsigned long addr)
#define MODULE_PROC_FAMILY "TX39XX "
#elif defined CONFIG_CPU_VR41XX
#define MODULE_PROC_FAMILY "VR41XX "
-#elif defined CONFIG_CPU_R4300
-#define MODULE_PROC_FAMILY "R4300 "
#elif defined CONFIG_CPU_R4X00
#define MODULE_PROC_FAMILY "R4X00 "
#elif defined CONFIG_CPU_TX49XX
#define MODULE_PROC_FAMILY "TX49XX "
#elif defined CONFIG_CPU_R5000
#define MODULE_PROC_FAMILY "R5000 "
-#elif defined CONFIG_CPU_R5432
-#define MODULE_PROC_FAMILY "R5432 "
#elif defined CONFIG_CPU_R5500
#define MODULE_PROC_FAMILY "R5500 "
#elif defined CONFIG_CPU_NEVADA
#define MODULE_PROC_FAMILY "NEVADA "
-#elif defined CONFIG_CPU_R8000
-#define MODULE_PROC_FAMILY "R8000 "
#elif defined CONFIG_CPU_R10000
#define MODULE_PROC_FAMILY "R10000 "
#elif defined CONFIG_CPU_RM7000
#define MODULE_PROC_FAMILY "RM7000 "
#elif defined CONFIG_CPU_SB1
#define MODULE_PROC_FAMILY "SB1 "
-#elif defined CONFIG_CPU_LOONGSON1
-#define MODULE_PROC_FAMILY "LOONGSON1 "
-#elif defined CONFIG_CPU_LOONGSON2
-#define MODULE_PROC_FAMILY "LOONGSON2 "
-#elif defined CONFIG_CPU_LOONGSON3
-#define MODULE_PROC_FAMILY "LOONGSON3 "
+#elif defined CONFIG_CPU_LOONGSON32
+#define MODULE_PROC_FAMILY "LOONGSON32 "
+#elif defined CONFIG_CPU_LOONGSON2EF
+#define MODULE_PROC_FAMILY "LOONGSON2EF "
+#elif defined CONFIG_CPU_LOONGSON64
+#define MODULE_PROC_FAMILY "LOONGSON64 "
#elif defined CONFIG_CPU_CAVIUM_OCTEON
#define MODULE_PROC_FAMILY "OCTEON "
#elif defined CONFIG_CPU_XLR
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd.h b/arch/mips/include/asm/octeon/cvmx-ipd.h
index cbdc14b77435..adab7b54c3b4 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd.h
@@ -36,6 +36,7 @@
#include <asm/octeon/octeon-feature.h>
#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-pip-defs.h>
enum cvmx_ipd_mode {
CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
diff --git a/arch/mips/include/asm/octeon/cvmx-sli-defs.h b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
index 52cf96ea43e5..cbc7cdae1c6a 100644
--- a/arch/mips/include/asm/octeon/cvmx-sli-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
@@ -46,6 +46,7 @@ static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
return 0x0000000000003CB0ull;
+ /* Else, fall through */
default:
return 0x0000000000023CB0ull;
}
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 60481502826a..a2e2876357ce 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -51,7 +51,7 @@ extern void octeon_setup_delays(void);
extern void octeon_io_clk_delay(unsigned long);
#define OCTEON_ARGV_MAX_ARGS 64
-#define OCTOEN_SERIAL_LEN 20
+#define OCTEON_SERIAL_LEN 20
struct octeon_boot_descriptor {
#ifdef __BIG_ENDIAN_BITFIELD
@@ -102,7 +102,7 @@ struct octeon_boot_descriptor {
uint16_t chip_type;
uint8_t chip_rev_major;
uint8_t chip_rev_minor;
- char board_serial_number[OCTOEN_SERIAL_LEN];
+ char board_serial_number[OCTEON_SERIAL_LEN];
uint8_t mac_addr_base[6];
uint8_t mac_addr_count;
uint64_t cvmx_desc_vaddr;
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 436099883022..6f48649201c5 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -108,7 +108,6 @@ extern unsigned long PCIBIOS_MIN_MEM;
#define HAVE_PCI_MMAP
#define ARCH_GENERIC_PCI_MMAP_RESOURCE
-#define HAVE_ARCH_PCI_RESOURCE_TO_USER
/*
* Dynamic DMA mapping stuff.
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index a92cd30b48c9..9c476a0400e0 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -806,7 +806,9 @@ struct bridge_controller {
unsigned long baddr;
unsigned long intr_addr;
struct irq_domain *domain;
- unsigned int pci_int[8];
+ unsigned int pci_int[8][2];
+ unsigned int int_mapping[8][2];
+ u32 ioc3_sid[8];
nasid_t nasid;
};
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index aa16b85ddffc..fa77cb71f303 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define __pte_free_tlb(tlb,pte,address) \
do { \
- pgtable_page_dtor(pte); \
+ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
} while (0)
@@ -96,17 +96,15 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
free_pages((unsigned long)pud, PUD_ORDER);
}
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
{
- set_pgd(pgd, __pgd((unsigned long)pud));
+ set_p4d(p4d, __p4d((unsigned long)pud));
}
#define __pud_free_tlb(tlb, x, addr) pud_free((tlb)->mm, x)
#endif /* __PAGETABLE_PUD_FOLDED */
-#define check_pgt_cache() do { } while (0)
-
extern void pagetable_init(void);
#endif /* _ASM_PGALLOC_H */
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 74afe8c76bdd..1945c8970141 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -16,13 +16,30 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>
#endif
+/*
+ * Regarding 32-bit MIPS huge page support (and the tradeoff it entails):
+ *
+ * We use the same huge page sizes as 64-bit MIPS. Assuming a 4KB page size,
+ * our 2-level table layout would normally have a PGD entry cover a contiguous
+ * 4MB virtual address region (pointing to a 4KB PTE page of 1,024 32-bit pte_t
+ * pointers, each pointing to a 4KB physical page). The problem is that 4MB,
+ * spanning both halves of a TLB EntryLo0,1 pair, requires 2MB hardware page
+ * support, not one of the standard supported sizes (1MB,4MB,16MB,...).
+ * To correct for this, when huge pages are enabled, we halve the number of
+ * pointers a PTE page holds, making its last half go to waste. Correspondingly,
+ * we double the number of PGD pages. Overall, page table memory overhead
+ * increases to match 64-bit MIPS, but PTE lookups remain CPU cache-friendly.
+ *
+ * NOTE: We don't yet support huge pages if extended-addressing is enabled
+ * (i.e. EVA, XPA, 36-bit Alchemy/Netlogic).
+ */
+
extern int temp_tlb_entry;
/*
@@ -44,7 +61,12 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
*/
/* PGDIR_SHIFT determines what a third-level page table entry can map */
-#define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
+# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1)
+#else
+# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
+#endif
+
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -52,14 +74,23 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
* Entries per page directory level: we use two-level, so
* we don't really have any PUD/PMD directory physically.
*/
-#define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
+# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
+#else
+# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
+#endif
+
#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER 1
+#define PMD_ORDER aieeee_attempt_to_allocate_pmd
#define PTE_ORDER 0
#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
-#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
+# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2)
+#else
+# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
+#endif
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0UL
@@ -87,7 +118,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
extern void load_pgd(unsigned long pg_dir);
-extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
+extern pte_t invalid_pte_table[PTRS_PER_PTE];
/*
* Empty pgd/pmd entries point to the invalid_pte_table.
@@ -97,7 +128,19 @@ static inline int pmd_none(pmd_t pmd)
return pmd_val(pmd) == (unsigned long) invalid_pte_table;
}
-#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
+static inline int pmd_bad(pmd_t pmd)
+{
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ /* pmd_huge(pmd) but inline */
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return 0;
+#endif
+
+ if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
+ return 1;
+
+ return 0;
+}
static inline int pmd_present(pmd_t pmd)
{
@@ -146,19 +189,18 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#else
#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
#endif
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define __pgd_offset(address) pgd_index(address)
-#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/* to find an entry in a page-table-directory */
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
@@ -175,7 +217,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+#if defined(CONFIG_CPU_R3K_TLB)
/* Swap entries must have VALID bit cleared. */
#define __swp_type(x) (((x).val >> 10) & 0x1f)
@@ -220,6 +262,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
-#endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */
+#endif /* defined(CONFIG_CPU_R3K_TLB) */
#endif /* _ASM_PGTABLE_32_H */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 93a9dce31f25..f92716cfa4f4 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -17,11 +17,12 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
-#define __ARCH_USE_5LEVEL_HACK
-#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
+#if CONFIG_PGTABLE_LEVELS == 2
#include <asm-generic/pgtable-nopmd.h>
-#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
+#elif CONFIG_PGTABLE_LEVELS == 3
#include <asm-generic/pgtable-nopud.h>
+#else
+#include <asm-generic/pgtable-nop4d.h>
#endif
/*
@@ -186,44 +187,49 @@ extern pud_t invalid_pud_table[PTRS_PER_PUD];
/*
* Empty pgd entries point to the invalid_pud_table.
*/
-static inline int pgd_none(pgd_t pgd)
+static inline int p4d_none(p4d_t p4d)
{
- return pgd_val(pgd) == (unsigned long)invalid_pud_table;
+ return p4d_val(p4d) == (unsigned long)invalid_pud_table;
}
-static inline int pgd_bad(pgd_t pgd)
+static inline int p4d_bad(p4d_t p4d)
{
- if (unlikely(pgd_val(pgd) & ~PAGE_MASK))
+ if (unlikely(p4d_val(p4d) & ~PAGE_MASK))
return 1;
return 0;
}
-static inline int pgd_present(pgd_t pgd)
+static inline int p4d_present(p4d_t p4d)
{
- return pgd_val(pgd) != (unsigned long)invalid_pud_table;
+ return p4d_val(p4d) != (unsigned long)invalid_pud_table;
}
-static inline void pgd_clear(pgd_t *pgdp)
+static inline void p4d_clear(p4d_t *p4dp)
{
- pgd_val(*pgdp) = (unsigned long)invalid_pud_table;
+ p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
}
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+static inline unsigned long p4d_page_vaddr(p4d_t p4d)
{
- return pgd_val(pgd);
+ return p4d_val(p4d);
}
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+#define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d))
+#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
+
+#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
+
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
{
- return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
+ return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
}
-static inline void set_pgd(pgd_t *pgd, pgd_t pgdval)
+static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
{
- *pgd = pgdval;
+ *p4d = p4dval;
}
#endif
@@ -314,10 +320,6 @@ static inline void pud_clear(pud_t *pudp)
#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
#endif
-#define __pgd_offset(address) pgd_index(address)
-#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address) pmd_index(address)
-
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index f88a48cd68b2..4da79b85c179 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -52,6 +52,9 @@ enum pgtable_bits {
_PAGE_WRITE_SHIFT,
_PAGE_ACCESSED_SHIFT,
_PAGE_MODIFIED_SHIFT,
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+ _PAGE_SPECIAL_SHIFT,
+#endif
};
/*
@@ -78,9 +81,12 @@ enum pgtable_bits {
_PAGE_WRITE_SHIFT,
_PAGE_ACCESSED_SHIFT,
_PAGE_MODIFIED_SHIFT,
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+ _PAGE_SPECIAL_SHIFT,
+#endif
};
-#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+#elif defined(CONFIG_CPU_R3K_TLB)
/* Page table bits used for r3k systems */
enum pgtable_bits {
@@ -90,6 +96,9 @@ enum pgtable_bits {
_PAGE_WRITE_SHIFT,
_PAGE_ACCESSED_SHIFT,
_PAGE_MODIFIED_SHIFT,
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+ _PAGE_SPECIAL_SHIFT,
+#endif
/* Used by TLB hardware (placed in EntryLo) */
_PAGE_GLOBAL_SHIFT = 8,
@@ -110,9 +119,12 @@ enum pgtable_bits {
_PAGE_WRITE_SHIFT,
_PAGE_ACCESSED_SHIFT,
_PAGE_MODIFIED_SHIFT,
-#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
_PAGE_HUGE_SHIFT,
#endif
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+ _PAGE_SPECIAL_SHIFT,
+#endif
/* Used by TLB hardware (placed in EntryLo*) */
#if defined(CONFIG_CPU_HAS_RIXI)
@@ -132,9 +144,14 @@ enum pgtable_bits {
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
# define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
#endif
+#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
+# define _PAGE_SPECIAL (1 << _PAGE_SPECIAL_SHIFT)
+#else
+# define _PAGE_SPECIAL 0
+#endif
/* Used by TLB hardware (placed in EntryLo*) */
#if defined(CONFIG_XPA)
@@ -146,7 +163,7 @@ enum pgtable_bits {
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+#if defined(CONFIG_CPU_R3K_TLB)
# define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
# define _CACHE_MASK _CACHE_UNCACHED
# define _PFN_SHIFT PAGE_SHIFT
@@ -204,7 +221,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
/*
* Cache attributes
*/
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+#if defined(CONFIG_CPU_R3K_TLB)
#define _CACHE_CACHABLE_NONCOHERENT 0
#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED
@@ -216,13 +233,6 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
-#elif defined(CONFIG_CPU_LOONGSON3)
-
-/* Using COHERENT flag for NONCOHERENT doesn't hurt. */
-
-#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */
-#define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */
-
#elif defined(CONFIG_MACH_INGENIC)
/* Ingenic uses the WA bit to achieve write-combine memory writes */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 7d27194e3b45..aef5378f909c 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -199,7 +199,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
*ptep = pteval;
-#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
+#if !defined(CONFIG_CPU_R3K_TLB)
if (pte_val(pteval) & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep);
/*
@@ -218,7 +218,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
htw_stop();
-#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
+#if !defined(CONFIG_CPU_R3K_TLB)
/* Preserve global status for the pair */
if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
@@ -277,6 +277,7 @@ extern pgd_t swapper_pg_dir[];
static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
+static inline int pte_special(pte_t pte) { return pte.pte_low & _PAGE_SPECIAL; }
static inline pte_t pte_wrprotect(pte_t pte)
{
@@ -337,10 +338,17 @@ static inline pte_t pte_mkyoung(pte_t pte)
}
return pte;
}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte.pte_low |= _PAGE_SPECIAL;
+ return pte;
+}
#else
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline pte_t pte_wrprotect(pte_t pte)
{
@@ -384,6 +392,12 @@ static inline pte_t pte_mkyoung(pte_t pte)
return pte;
}
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte_val(pte) |= _PAGE_SPECIAL;
+ return pte;
+}
+
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
@@ -394,8 +408,6 @@ static inline pte_t pte_mkhuge(pte_t pte)
}
#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#endif
-static inline int pte_special(pte_t pte) { return 0; }
-static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
* Macro to make mark a page protection value as "uncacheable". Note
@@ -627,31 +639,20 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#ifdef _PAGE_HUGE
+#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
+#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
+#endif
+
#define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
#include <asm-generic/pgtable.h>
/*
- * uncached accelerated TLB map for video memory access
- */
-#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
-struct file;
-pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#endif
-
-/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
*/
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init() do { } while (0)
-
#endif /* _ASM_PGTABLE_H */
diff --git a/arch/mips/include/asm/pmon.h b/arch/mips/include/asm/pmon.h
deleted file mode 100644
index 6ad519189ce2..000000000000
--- a/arch/mips/include/asm/pmon.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2004 by Ralf Baechle
- *
- * The cpustart method is a PMC-Sierra's function to start the secondary CPU.
- * Stock PMON 2000 has the smpfork, semlock and semunlock methods instead.
- */
-#ifndef _ASM_PMON_H
-#define _ASM_PMON_H
-
-struct callvectors {
- int (*open) (char*, int, int);
- int (*close) (int);
- int (*read) (int, void*, int);
- int (*write) (int, void*, int);
- off_t (*lseek) (int, off_t, int);
- int (*printf) (const char*, ...);
- void (*cacheflush) (void);
- char* (*gets) (char*);
- union {
- int (*smpfork) (unsigned long cp, char *sp);
- int (*cpustart) (long, void (*)(void), void *, long);
- } _s;
- int (*semlock) (int sem);
- void (*semunlock) (int sem);
-};
-
-extern struct callvectors *debug_vectors;
-
-#define pmon_open(name, flags, mode) debug_vectors->open(name, flage, mode)
-#define pmon_close(fd) debug_vectors->close(fd)
-#define pmon_read(fd, buf, count) debug_vectors->read(fd, buf, count)
-#define pmon_write(fd, buf, count) debug_vectors->write(fd, buf, count)
-#define pmon_lseek(fd, off, whence) debug_vectors->lseek(fd, off, whence)
-#define pmon_printf(fmt...) debug_vectors->printf(fmt)
-#define pmon_cacheflush() debug_vectors->cacheflush()
-#define pmon_gets(s) debug_vectors->gets(s)
-#define pmon_cpustart(n, f, sp, gp) debug_vectors->_s.cpustart(n, f, sp, gp)
-#define pmon_smpfork(cp, sp) debug_vectors->_s.smpfork(cp, sp)
-#define pmon_semlock(sem) debug_vectors->semlock(sem)
-#define pmon_semunlock(sem) debug_vectors->semunlock(sem)
-
-#endif /* _ASM_PMON_H */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index aca909bd7841..7619ad319400 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -29,11 +29,6 @@
extern unsigned int vced_count, vcei_count;
-/*
- * MIPS does have an arch_pick_mmap_layout()
- */
-#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
-
#ifdef CONFIG_32BIT
#ifdef CONFIG_KVM_GUEST
/* User space process size is limited to 1GB in KVM Guest Mode */
@@ -390,7 +385,7 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
-#ifdef CONFIG_CPU_LOONGSON3
+#ifdef CONFIG_CPU_LOONGSON64
/*
* Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
* tight read loop is executed, because reads take priority over writes & the
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 7f4a32d3345a..15ab16f99f28 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -15,12 +15,14 @@
#include <linux/stringify.h>
#include <asm/asm.h>
+#include <asm/asm-eva.h>
#include <asm/cacheops.h>
#include <asm/compiler.h>
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
#include <asm/mmzone.h>
+#include <asm/unroll.h>
#include <linux/uaccess.h> /* for uaccess_kernel() */
extern void (*r4k_blast_dcache)(void);
@@ -39,16 +41,19 @@ extern void (*r4k_blast_icache)(void);
*/
#define INDEX_BASE CKSEG0
-#define cache_op(op,addr) \
+#define _cache_op(insn, op, addr) \
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
- " cache %0, %1 \n" \
+ " " insn("%0", "%1") " \n" \
" .set pop \n" \
: \
: "i" (op), "R" (*(unsigned char *)(addr)))
+#define cache_op(op, addr) \
+ _cache_op(kernel_cache, op, addr)
+
static inline void flush_icache_line_indexed(unsigned long addr)
{
cache_op(Index_Invalidate_I, addr);
@@ -67,7 +72,7 @@ static inline void flush_scache_line_indexed(unsigned long addr)
static inline void flush_icache_line(unsigned long addr)
{
switch (boot_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
cache_op(Hit_Invalidate_I_Loongson2, addr);
break;
@@ -149,7 +154,7 @@ static inline void flush_scache_line(unsigned long addr)
static inline int protected_flush_icache_line(unsigned long addr)
{
switch (boot_cpu_type()) {
- case CPU_LOONGSON2:
+ case CPU_LOONGSON2EF:
return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
default:
@@ -193,338 +198,10 @@ static inline void invalidate_tcache_page(unsigned long addr)
cache_op(Page_Invalidate_T, addr);
}
-#ifndef CONFIG_CPU_MIPSR6
-#define cache16_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips3 \n" \
- " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
- " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
- " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
- " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
- " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
- " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
- " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
- " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
- " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
- " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
- " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
- " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
- " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
- " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
- " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
- " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache32_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips3 \n" \
- " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
- " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
- " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
- " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
- " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
- " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
- " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
- " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
- " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
- " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
- " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
- " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
- " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
- " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
- " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
- " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache64_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips3 \n" \
- " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
- " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
- " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
- " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
- " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
- " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
- " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
- " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
- " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
- " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
- " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
- " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
- " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
- " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
- " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
- " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache128_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips3 \n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
- " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
- " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
- " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
- " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
- " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
- " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
- " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
- " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
- " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
- " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
- " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
- " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
- " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
- " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
- " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#else
-/*
- * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
- * This means we now need to increment the base register before we flush
- * more cache lines
- */
-#define cache16_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push\n" \
- " .set noreorder\n" \
- " .set mips64r6\n" \
- " .set noat\n" \
- " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \
- " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \
- " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \
- " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \
- " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \
- " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
- " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
- " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
- " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
- " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
- " cache %1, 0x060($1); cache %1, 0x070($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x090($1)\n" \
- " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \
- " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \
- " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \
- " .set pop\n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache32_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push\n" \
- " .set noreorder\n" \
- " .set mips64r6\n" \
- " .set noat\n" \
- " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \
- " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
- " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
- " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
- " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
- " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
- " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
- " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
- " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
- " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
- " .set pop\n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache64_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push\n" \
- " .set noreorder\n" \
- " .set mips64r6\n" \
- " .set noat\n" \
- " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
- " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
- " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
- " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
- " .set pop\n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache128_unroll32(base,op) \
- __asm__ __volatile__( \
- " .set push\n" \
- " .set noreorder\n" \
- " .set mips64r6\n" \
- " .set noat\n" \
- " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
- " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
- " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
- " .set pop\n" \
- : \
- : "r" (base), \
- "i" (op));
-#endif /* CONFIG_CPU_MIPSR6 */
-
-/*
- * Perform the cache operation specified by op using a user mode virtual
- * address while in kernel mode.
- */
-#define cache16_unroll32_user(base,op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips0 \n" \
- " .set eva \n" \
- " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
- " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
- " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
- " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
- " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
- " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
- " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
- " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
- " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
- " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
- " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
- " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
- " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
- " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
- " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
- " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache32_unroll32_user(base, op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips0 \n" \
- " .set eva \n" \
- " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
- " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
- " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
- " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
- " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
- " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
- " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
- " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
- " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
- " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
- " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
- " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
- " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
- " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
- " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
- " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
-
-#define cache64_unroll32_user(base, op) \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noreorder \n" \
- " .set mips0 \n" \
- " .set eva \n" \
- " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
- " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
- " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
- " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
- " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
- " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
- " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
- " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
- " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
- " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
- " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
- " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
- " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
- " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
- " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
- " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
- " .set pop \n" \
- : \
- : "r" (base), \
- "i" (op));
+#define cache_unroll(times, insn, op, addr, lsize) do { \
+ int i = 0; \
+ unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize))); \
+} while (0)
/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
@@ -539,7 +216,8 @@ static inline void extra##blast_##pfx##cache##lsize(void) \
\
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
- cache##lsize##_unroll32(addr|ws, indexop); \
+ cache_unroll(32, kernel_cache, indexop, \
+ addr | ws, lsize); \
} \
\
static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
@@ -548,7 +226,7 @@ static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
unsigned long end = page + PAGE_SIZE; \
\
do { \
- cache##lsize##_unroll32(start, hitop); \
+ cache_unroll(32, kernel_cache, hitop, start, lsize); \
start += lsize * 32; \
} while (start < end); \
} \
@@ -565,7 +243,8 @@ static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long
\
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
- cache##lsize##_unroll32(addr|ws, indexop); \
+ cache_unroll(32, kernel_cache, indexop, \
+ addr | ws, lsize); \
}
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
@@ -596,7 +275,7 @@ static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
unsigned long end = page + PAGE_SIZE; \
\
do { \
- cache##lsize##_unroll32_user(start, hitop); \
+ cache_unroll(32, user_cache, hitop, start, lsize); \
start += lsize * 32; \
} while (start < end); \
}
@@ -688,7 +367,8 @@ static inline void blast_##pfx##cache##lsize##_node(long node) \
\
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
- cache##lsize##_unroll32(addr|ws, indexop); \
+ cache_unroll(32, kernel_cache, indexop, \
+ addr | ws, lsize); \
}
__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h
deleted file mode 100644
index 2777148dbfc5..000000000000
--- a/arch/mips/include/asm/serial.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2017 MIPS Tech, LLC
- */
-#ifndef __ASM__SERIAL_H
-#define __ASM__SERIAL_H
-
-#ifdef CONFIG_MIPS_GENERIC
-/*
- * Generic kernels cannot know a correct value for all platforms at
- * compile time. Set it to 0 to prevent 8250_early using it
- */
-#define BASE_BAUD 0
-#else
-#include <asm-generic/serial.h>
-#endif
-
-#endif /* __ASM__SERIAL_H */
diff --git a/arch/mips/include/asm/sgi/heart.h b/arch/mips/include/asm/sgi/heart.h
new file mode 100644
index 000000000000..c423221b4792
--- /dev/null
+++ b/arch/mips/include/asm/sgi/heart.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * HEART chip definitions
+ *
+ * Copyright (C) 2004-2007 Stanislaw Skowronek <skylark@unaligned.org>
+ * 2009 Johannes Dickgreber <tanzy@gmx.de>
+ * 2007-2015 Joshua Kinard <kumba@gentoo.org>
+ */
+#ifndef __ASM_SGI_HEART_H
+#define __ASM_SGI_HEART_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+
+/*
+ * There are 8 DIMM slots on an IP30 system
+ * board, which are grouped into four banks
+ */
+#define HEART_MEMORY_BANKS 4
+
+/* HEART can support up to four CPUs */
+#define HEART_MAX_CPUS 4
+
+#define HEART_XKPHYS_BASE ((void *)(IO_BASE | 0x000000000ff00000ULL))
+
+/**
+ * struct ip30_heart_regs - struct that maps IP30 HEART registers.
+ * @mode: HEART_MODE - Purpose Unknown, machine reset called from here.
+ * @sdram_mode: HEART_SDRAM_MODE - purpose unknown.
+ * @mem_refresh: HEART_MEM_REF - purpose unknown.
+ * @mem_req_arb: HEART_MEM_REQ_ARB - purpose unknown.
+ * @mem_cfg.q: union for 64bit access to HEART_MEMCFG - 4x 64bit registers.
+ * @mem_cfg.l: union for 32bit access to HEART_MEMCFG - 8x 32bit registers.
+ * @fc_mode: HEART_FC_MODE - Purpose Unknown, possibly for GFX flow control.
+ * @fc_timer_limit: HEART_FC_TIMER_LIMIT - purpose unknown.
+ * @fc_addr: HEART_FC0_ADDR, HEART_FC1_ADDR - purpose unknown.
+ * @fc_credit_cnt: HEART_FC0_CR_CNT, HEART_FC1_CR_CNT - purpose unknown.
+ * @fc_timer: HEART_FC0_TIMER, HEART_FC1_TIMER - purpose unknown.
+ * @status: HEART_STATUS - HEART status information.
+ * @bus_err_addr: HEART_BERR_ADDR - likely contains addr of recent SIGBUS.
+ * @bus_err_misc: HEART_BERR_MISC - purpose unknown.
+ * @mem_err_addr: HEART_MEMERR_ADDR - likely contains addr of recent mem err.
+ * @mem_err_data: HEART_MEMERR_DATA - purpose unknown.
+ * @piur_acc_err: HEART_PIUR_ACC_ERR - likely for access err to HEART regs.
+ * @mlan_clock_div: HEART_MLAN_CLK_DIV - MicroLAN clock divider.
+ * @mlan_ctrl: HEART_MLAN_CTL - MicroLAN control.
+ * @__pad0: 0x0f40 bytes of padding -> next HEART register 0x01000.
+ * @undefined: Undefined/diag register, write to it triggers PIUR_ACC_ERR.
+ * @__pad1: 0xeff8 bytes of padding -> next HEART register 0x10000.
+ * @imr: HEART_IMR0 to HEART_IMR3 - per-cpu interrupt mask register.
+ * @set_isr: HEART_SET_ISR - set interrupt status register.
+ * @clear_isr: HEART_CLR_ISR - clear interrupt status register.
+ * @isr: HEART_ISR - interrupt status register (read-only).
+ * @imsr: HEART_IMSR - purpose unknown.
+ * @cause: HEART_CAUSE - HEART cause information.
+ * @__pad2: 0xffb8 bytes of padding -> next HEART register 0x20000.
+ * @count: HEART_COUNT - 52-bit counter.
+ * @__pad3: 0xfff8 bytes of padding -> next HEART register 0x30000.
+ * @compare: HEART_COMPARE - 24-bit compare.
+ * @__pad4: 0xfff8 bytes of padding -> next HEART register 0x40000.
+ * @trigger: HEART_TRIGGER - purpose unknown.
+ * @__pad5: 0xfff8 bytes of padding -> next HEART register 0x50000.
+ * @cpuid: HEART_PRID - contains CPU ID of CPU currently accessing HEART.
+ * @__pad6: 0xfff8 bytes of padding -> next HEART register 0x60000.
+ * @sync: HEART_SYNC - purpose unknown.
+ *
+ * HEART is the main system controller ASIC for IP30 system. It incorporates
+ * a memory controller, interrupt status/cause/set/clear management, basic
+ * timer with count/compare, and other functionality. For Linux, not all of
+ * HEART's functions are fully understood.
+ *
+ * Implementation note: All HEART registers are 64bits-wide, but the mem_cfg
+ * register only reports correct values if queried in 32bits. Hence the need
+ * for a union. Even though mem_cfg.l has 8 array slots, we only ever query
+ * up to 4 of those. IP30 has 8 DIMM slots arranged into 4 banks, w/ 2 DIMMs
+ * per bank. Each 32bit read accesses one of these banks. Perhaps HEART was
+ * designed to address up to 8 banks (16 DIMMs)? We may never know.
+ */
+struct ip30_heart_regs { /* 0x0ff00000 */
+ u64 mode; /* + 0x00000 */
+ /* Memory */
+ u64 sdram_mode; /* + 0x00008 */
+ u64 mem_refresh; /* + 0x00010 */
+ u64 mem_req_arb; /* + 0x00018 */
+ union {
+ u64 q[HEART_MEMORY_BANKS]; /* readq() */
+ u32 l[HEART_MEMORY_BANKS * 2]; /* readl() */
+ } mem_cfg; /* + 0x00020 */
+ /* Flow control (gfx?) */
+ u64 fc_mode; /* + 0x00040 */
+ u64 fc_timer_limit; /* + 0x00048 */
+ u64 fc_addr[2]; /* + 0x00050 */
+ u64 fc_credit_cnt[2]; /* + 0x00060 */
+ u64 fc_timer[2]; /* + 0x00070 */
+ /* Status */
+ u64 status; /* + 0x00080 */
+ /* Bus error */
+ u64 bus_err_addr; /* + 0x00088 */
+ u64 bus_err_misc; /* + 0x00090 */
+ /* Memory error */
+ u64 mem_err_addr; /* + 0x00098 */
+ u64 mem_err_data; /* + 0x000a0 */
+ /* Misc */
+ u64 piur_acc_err; /* + 0x000a8 */
+ u64 mlan_clock_div; /* + 0x000b0 */
+ u64 mlan_ctrl; /* + 0x000b8 */
+ u64 __pad0[0x01e8]; /* + 0x000c0 + 0x0f40 */
+ /* Undefined */
+ u64 undefined; /* + 0x01000 */
+ u64 __pad1[0x1dff]; /* + 0x01008 + 0xeff8 */
+ /* Interrupts */
+ u64 imr[HEART_MAX_CPUS]; /* + 0x10000 */
+ u64 set_isr; /* + 0x10020 */
+ u64 clear_isr; /* + 0x10028 */
+ u64 isr; /* + 0x10030 */
+ u64 imsr; /* + 0x10038 */
+ u64 cause; /* + 0x10040 */
+ u64 __pad2[0x1ff7]; /* + 0x10048 + 0xffb8 */
+ /* Timer */
+ u64 count; /* + 0x20000 */
+ u64 __pad3[0x1fff]; /* + 0x20008 + 0xfff8 */
+ u64 compare; /* + 0x30000 */
+ u64 __pad4[0x1fff]; /* + 0x30008 + 0xfff8 */
+ u64 trigger; /* + 0x40000 */
+ u64 __pad5[0x1fff]; /* + 0x40008 + 0xfff8 */
+ /* Misc */
+ u64 cpuid; /* + 0x50000 */
+ u64 __pad6[0x1fff]; /* + 0x50008 + 0xfff8 */
+ u64 sync; /* + 0x60000 */
+};
+
+
+/* For timer-related bits. */
+#define HEART_NS_PER_CYCLE 80
+#define HEART_CYCLES_PER_SEC (NSEC_PER_SEC / HEART_NS_PER_CYCLE)
+
+
+/*
+ * XXX: Everything below this comment will either go away or be cleaned
+ * up to fit in better with Linux. A lot of the bit definitions for
+ * HEART were derived from IRIX's sys/RACER/heart.h header file.
+ */
+
+/* HEART Masks */
+#define HEART_ATK_MASK 0x0007ffffffffffff /* HEART attack mask */
+#define HEART_ACK_ALL_MASK 0xffffffffffffffff /* Ack everything */
+#define HEART_CLR_ALL_MASK 0x0000000000000000 /* Clear all */
+#define HEART_BR_ERR_MASK 0x7ff8000000000000 /* BRIDGE error mask */
+#define HEART_CPU0_ERR_MASK 0x8ff8000000000000 /* CPU0 error mask */
+#define HEART_CPU1_ERR_MASK 0x97f8000000000000 /* CPU1 error mask */
+#define HEART_CPU2_ERR_MASK 0xa7f8000000000000 /* CPU2 error mask */
+#define HEART_CPU3_ERR_MASK 0xc7f8000000000000 /* CPU3 error mask */
+#define HEART_ERR_MASK 0x1ff /* HEART error mask */
+#define HEART_ERR_MASK_START 51 /* HEART error start */
+#define HEART_ERR_MASK_END 63 /* HEART error end */
+
+/* Bits in the HEART_MODE register. */
+#define HM_PROC_DISABLE_SHFT 60
+#define HM_PROC_DISABLE_MSK (0xfUL << HM_PROC_DISABLE_SHFT)
+#define HM_PROC_DISABLE(x) (0x1UL << (x) + HM_PROC_DISABLE_SHFT)
+#define HM_MAX_PSR (0x7UL << 57)
+#define HM_MAX_IOSR (0x7UL << 54)
+#define HM_MAX_PEND_IOSR (0x7UL << 51)
+#define HM_TRIG_SRC_SEL_MSK (0x7UL << 48)
+#define HM_TRIG_HEART_EXC (0x0UL << 48)
+#define HM_TRIG_REG_BIT (0x1UL << 48)
+#define HM_TRIG_SYSCLK (0x2UL << 48)
+#define HM_TRIG_MEMCLK_2X (0x3UL << 48)
+#define HM_TRIG_MEMCLK (0x4UL << 48)
+#define HM_TRIG_IOCLK (0x5UL << 48)
+#define HM_PIU_TEST_MODE (0xfUL << 40)
+#define HM_GP_FLAG_MSK (0xfUL << 36)
+#define HM_GP_FLAG(x) BIT((x) + 36)
+#define HM_MAX_PROC_HYST (0xfUL << 32)
+#define HM_LLP_WRST_AFTER_RST BIT(28)
+#define HM_LLP_LINK_RST BIT(27)
+#define HM_LLP_WARM_RST BIT(26)
+#define HM_COR_ECC_LCK BIT(25)
+#define HM_REDUCED_PWR BIT(24)
+#define HM_COLD_RST BIT(23)
+#define HM_SW_RST BIT(22)
+#define HM_MEM_FORCE_WR BIT(21)
+#define HM_DB_ERR_GEN BIT(20)
+#define HM_SB_ERR_GEN BIT(19)
+#define HM_CACHED_PIO_EN BIT(18)
+#define HM_CACHED_PROM_EN BIT(17)
+#define HM_PE_SYS_COR_ERE BIT(16)
+#define HM_GLOBAL_ECC_EN BIT(15)
+#define HM_IO_COH_EN BIT(14)
+#define HM_INT_EN BIT(13)
+#define HM_DATA_CHK_EN BIT(12)
+#define HM_REF_EN BIT(11)
+#define HM_BAD_SYSWR_ERE BIT(10)
+#define HM_BAD_SYSRD_ERE BIT(9)
+#define HM_SYSSTATE_ERE BIT(8)
+#define HM_SYSCMD_ERE BIT(7)
+#define HM_NCOR_SYS_ERE BIT(6)
+#define HM_COR_SYS_ERE BIT(5)
+#define HM_DATA_ELMNT_ERE BIT(4)
+#define HM_MEM_ADDR_PROC_ERE BIT(3)
+#define HM_MEM_ADDR_IO_ERE BIT(2)
+#define HM_NCOR_MEM_ERE BIT(1)
+#define HM_COR_MEM_ERE BIT(0)
+
+/* Bits in the HEART_MEM_REF register. */
+#define HEART_MEMREF_REFS(x) ((0xfUL & (x)) << 16)
+#define HEART_MEMREF_PERIOD(x) ((0xffffUL & (x)))
+#define HEART_MEMREF_REFS_VAL HEART_MEMREF_REFS(8)
+#define HEART_MEMREF_PERIOD_VAL HEART_MEMREF_PERIOD(0x4000)
+#define HEART_MEMREF_VAL (HEART_MEMREF_REFS_VAL | \
+ HEART_MEMREF_PERIOD_VAL)
+
+/* Bits in the HEART_MEM_REQ_ARB register. */
+#define HEART_MEMARB_IODIS (1 << 20)
+#define HEART_MEMARB_MAXPMWRQS (15 << 16)
+#define HEART_MEMARB_MAXPMRRQS (15 << 12)
+#define HEART_MEMARB_MAXPMRQS (15 << 8)
+#define HEART_MEMARB_MAXRRRQS (15 << 4)
+#define HEART_MEMARB_MAXGBRRQS (15)
+
+/* Bits in the HEART_MEMCFG<x> registers. */
+#define HEART_MEMCFG_VALID 0x80000000 /* Bank is valid */
+#define HEART_MEMCFG_DENSITY 0x01c00000 /* Mem density */
+#define HEART_MEMCFG_SIZE_MASK 0x003f0000 /* Mem size mask */
+#define HEART_MEMCFG_ADDR_MASK 0x000001ff /* Base addr mask */
+#define HEART_MEMCFG_SIZE_SHIFT 16 /* Mem size shift */
+#define HEART_MEMCFG_DENSITY_SHIFT 22 /* Density Shift */
+#define HEART_MEMCFG_UNIT_SHIFT 25 /* Unit Shift, 32MB */
+
+/* Bits in the HEART_STATUS register */
+#define HEART_STAT_HSTL_SDRV BIT(14)
+#define HEART_STAT_FC_CR_OUT(x) BIT((x) + 12)
+#define HEART_STAT_DIR_CNNCT BIT(11)
+#define HEART_STAT_TRITON BIT(10)
+#define HEART_STAT_R4K BIT(9)
+#define HEART_STAT_BIG_ENDIAN BIT(8)
+#define HEART_STAT_PROC_SHFT 4
+#define HEART_STAT_PROC_MSK (0xfUL << HEART_STAT_PROC_SHFT)
+#define HEART_STAT_PROC_ACTIVE(x) (0x1UL << ((x) + HEART_STAT_PROC_SHFT))
+#define HEART_STAT_WIDGET_ID 0xf
+
+/* Bits in the HEART_CAUSE register */
+#define HC_PE_SYS_COR_ERR_MSK (0xfUL << 60)
+#define HC_PE_SYS_COR_ERR(x) BIT((x) + 60)
+#define HC_PIOWDB_OFLOW BIT(44)
+#define HC_PIORWRB_OFLOW BIT(43)
+#define HC_PIUR_ACC_ERR BIT(42)
+#define HC_BAD_SYSWR_ERR BIT(41)
+#define HC_BAD_SYSRD_ERR BIT(40)
+#define HC_SYSSTATE_ERR_MSK (0xfUL << 36)
+#define HC_SYSSTATE_ERR(x) BIT((x) + 36)
+#define HC_SYSCMD_ERR_MSK (0xfUL << 32)
+#define HC_SYSCMD_ERR(x) BIT((x) + 32)
+#define HC_NCOR_SYSAD_ERR_MSK (0xfUL << 28)
+#define HC_NCOR_SYSAD_ERR(x) BIT((x) + 28)
+#define HC_COR_SYSAD_ERR_MSK (0xfUL << 24)
+#define HC_COR_SYSAD_ERR(x) BIT((x) + 24)
+#define HC_DATA_ELMNT_ERR_MSK (0xfUL << 20)
+#define HC_DATA_ELMNT_ERR(x) BIT((x) + 20)
+#define HC_WIDGET_ERR BIT(16)
+#define HC_MEM_ADDR_ERR_PROC_MSK (0xfUL << 4)
+#define HC_MEM_ADDR_ERR_PROC(x) BIT((x) + 4)
+#define HC_MEM_ADDR_ERR_IO BIT(2)
+#define HC_NCOR_MEM_ERR BIT(1)
+#define HC_COR_MEM_ERR BIT(0)
+
+extern struct ip30_heart_regs __iomem *heart_regs;
+
+#define heart_read ____raw_readq
+#define heart_write ____raw_writeq
+
+#endif /* __ASM_SGI_HEART_H */
diff --git a/arch/mips/include/asm/sgi/sgi.h b/arch/mips/include/asm/sgi/sgi.h
deleted file mode 100644
index b61557151e3f..000000000000
--- a/arch/mips/include/asm/sgi/sgi.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * sgi.h: Definitions specific to SGI machines.
- *
- * Copyright (C) 1996 David S. Miller (dm@sgi.com)
- */
-#ifndef _ASM_SGI_SGI_H
-#define _ASM_SGI_SGI_H
-
-/* UP=UniProcessor MP=MultiProcessor(capable) */
-enum sgi_mach {
- ip4, /* R2k UP */
- ip5, /* R2k MP */
- ip6, /* R3k UP */
- ip7, /* R3k MP */
- ip9, /* R3k UP */
- ip12, /* R3kA UP, Indigo */
- ip15, /* R3kA MP */
- ip17, /* R4K UP */
- ip19, /* R4K MP */
- ip20, /* R4K UP, Indigo */
- ip21, /* R8k/TFP MP */
- ip22, /* R4x00 UP, Indy, Indigo2 */
- ip25, /* R10k MP */
- ip26, /* R8k/TFP UP, Indigo2 */
- ip27, /* R10k MP, R12k MP, R14k MP, Origin 200/2k, Onyx2 */
- ip28, /* R10k UP, Indigo2 Impact R10k */
- ip30, /* R10k MP, R12k MP, R14k MP, Octane */
- ip32, /* R5k UP, RM5200 UP, RM7k UP, R10k UP, R12k UP, O2 */
- ip35, /* R14k MP, R16k MP, Origin 300/3k, Onyx3, Fuel, Tezro */
-};
-
-extern enum sgi_mach sgimach;
-extern void sgi_sysinit(void);
-
-/* Many I/O space registers are byte sized and are contained within
- * one byte per word, specifically the MSB, this macro helps out.
- */
-#ifdef __MIPSEL__
-#define SGI_MSB(regaddr) (regaddr)
-#else
-#define SGI_MSB(regaddr) ((regaddr) | 0x3)
-#endif
-
-#endif /* _ASM_SGI_SGI_H */
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index 0d9fad5915fe..80f900417f7e 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -15,14 +15,6 @@
#include <asm/sgiarcs.h>
extern struct linux_romvec *romvec;
-extern int prom_argc;
-
-extern LONG *_prom_argv, *_prom_envp;
-
-/* A 32-bit ARC PROM pass arguments and environment as 32-bit pointer.
- These macros take care of sign extension. */
-#define prom_argv(index) ((char *) (long) _prom_argv[(index)])
-#define prom_argc(index) ((char *) (long) _prom_argc[(index)])
extern int prom_flags;
@@ -47,12 +39,6 @@ extern void prom_meminit(void);
/* PROM device tree library routines. */
#define PROM_NULL_COMPONENT ((pcomponent *) 0)
-/* Get sibling component of THIS. */
-extern pcomponent *ArcGetPeer(pcomponent *this);
-
-/* Get child component of THIS. */
-extern pcomponent *ArcGetChild(pcomponent *this);
-
/* This is called at prom_init time to identify the
* ARC architecture we are running on
*/
@@ -60,22 +46,16 @@ extern void prom_identify_arch(void);
/* Environment variable routines. */
extern PCHAR ArcGetEnvironmentVariable(PCHAR name);
-extern LONG ArcSetEnvironmentVariable(PCHAR name, PCHAR value);
/* ARCS command line parsing. */
-extern void prom_init_cmdline(void);
+extern void prom_init_cmdline(int argc, LONG *argv);
/* File operations. */
extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
/* Misc. routines. */
-extern VOID ArcHalt(VOID) __noreturn;
-extern VOID ArcPowerDown(VOID) __noreturn;
-extern VOID ArcRestart(VOID) __noreturn;
-extern VOID ArcReboot(VOID) __noreturn;
extern VOID ArcEnterInteractiveMode(VOID) __noreturn;
-extern VOID ArcFlushAllCaches(VOID);
extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID);
#endif /* _ASM_SGIALIB_H */
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
index 105a9479ac5f..e1512cab180b 100644
--- a/arch/mips/include/asm/sgiarcs.h
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -12,6 +12,8 @@
#ifndef _ASM_SGIARCS_H
#define _ASM_SGIARCS_H
+#include <linux/kernel.h>
+
#include <asm/types.h>
#include <asm/fw/arc/types.h>
@@ -368,110 +370,65 @@ struct linux_smonblock {
#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
-#define __arc_clobbers \
- "$2", "$3" /* ... */, "$8", "$9", "$10", "$11", \
- "$12", "$13", "$14", "$15", "$16", "$24", "$25", "$31"
+extern long call_o32(long vec, void *stack, ...);
+
+extern u64 o32_stk[4096];
+#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)])
#define ARC_CALL0(dest) \
({ long __res; \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec) \
- : __arc_clobbers, "$4", "$5", "$6", "$7"); \
- (unsigned long) __res; \
+ __res = call_o32(__vec, O32_STK); \
+ __res; \
})
#define ARC_CALL1(dest, a1) \
({ long __res; \
- register signed int __a1 __asm__("$4") = (int) (long) (a1); \
+ int __a1 = (int) (long) (a1); \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec), "r" (__a1) \
- : __arc_clobbers, "$5", "$6", "$7"); \
- (unsigned long) __res; \
+ __res = call_o32(__vec, O32_STK, __a1); \
+ __res; \
})
#define ARC_CALL2(dest, a1, a2) \
({ long __res; \
- register signed int __a1 __asm__("$4") = (int) (long) (a1); \
- register signed int __a2 __asm__("$5") = (int) (long) (a2); \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec), "r" (__a1), "r" (__a2) \
- : __arc_clobbers, "$6", "$7"); \
+ __res = call_o32(__vec, O32_STK, __a1, __a2); \
__res; \
})
#define ARC_CALL3(dest, a1, a2, a3) \
({ long __res; \
- register signed int __a1 __asm__("$4") = (int) (long) (a1); \
- register signed int __a2 __asm__("$5") = (int) (long) (a2); \
- register signed int __a3 __asm__("$6") = (int) (long) (a3); \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
+ int __a3 = (int) (long) (a3); \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec), "r" (__a1), "r" (__a2), "r" (__a3) \
- : __arc_clobbers, "$7"); \
+ __res = call_o32(__vec, O32_STK, __a1, __a2, __a3); \
__res; \
})
#define ARC_CALL4(dest, a1, a2, a3, a4) \
({ long __res; \
- register signed int __a1 __asm__("$4") = (int) (long) (a1); \
- register signed int __a2 __asm__("$5") = (int) (long) (a2); \
- register signed int __a3 __asm__("$6") = (int) (long) (a3); \
- register signed int __a4 __asm__("$7") = (int) (long) (a4); \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
+ int __a3 = (int) (long) (a3); \
+ int __a4 = (int) (long) (a4); \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec), "r" (__a1), "r" (__a2), "r" (__a3), \
- "r" (__a4) \
- : __arc_clobbers); \
+ __res = call_o32(__vec, O32_STK, __a1, __a2, __a3, __a4); \
__res; \
})
-#define ARC_CALL5(dest, a1, a2, a3, a4, a5) \
+#define ARC_CALL5(dest, a1, a2, a3, a4, a5) \
({ long __res; \
- register signed int __a1 __asm__("$4") = (int) (long) (a1); \
- register signed int __a2 __asm__("$5") = (int) (long) (a2); \
- register signed int __a3 __asm__("$6") = (int) (long) (a3); \
- register signed int __a4 __asm__("$7") = (int) (long) (a4); \
- register signed int __a5 = (int) (long) (a5); \
+ int __a1 = (int) (long) (a1); \
+ int __a2 = (int) (long) (a2); \
+ int __a3 = (int) (long) (a3); \
+ int __a4 = (int) (long) (a4); \
+ int __a5 = (int) (long) (a5); \
long __vec = (long) romvec->dest; \
- __asm__ __volatile__( \
- "dsubu\t$29, 32\n\t" \
- "sw\t%7, 16($29)\n\t" \
- "jalr\t%1\n\t" \
- "daddu\t$29, 32\n\t" \
- "move\t%0, $2" \
- : "=r" (__res), "=r" (__vec) \
- : "1" (__vec), \
- "r" (__a1), "r" (__a2), "r" (__a3), "r" (__a4), \
- "r" (__a5) \
- : __arc_clobbers); \
+ __res = call_o32(__vec, O32_STK, __a1, __a2, __a3, __a4, __a5); \
__res; \
})
diff --git a/arch/mips/include/asm/sn/agent.h b/arch/mips/include/asm/sn/agent.h
index e33d09293019..7e9b3271737a 100644
--- a/arch/mips/include/asm/sn/agent.h
+++ b/arch/mips/include/asm/sn/agent.h
@@ -26,7 +26,7 @@
#if defined(CONFIG_SGI_IP27)
#define HUB_NIC_ADDR(_cpuid) \
- REMOTE_HUB_ADDR(COMPACT_TO_NASID_NODEID(cpu_to_node(_cpuid)), \
+ REMOTE_HUB_ADDR(cpu_to_node(_cpuid), \
MD_MLAN_CTL)
#endif
diff --git a/arch/mips/include/asm/sn/arch.h b/arch/mips/include/asm/sn/arch.h
index 3f1fb1454749..9a9682543e89 100644
--- a/arch/mips/include/asm/sn/arch.h
+++ b/arch/mips/include/asm/sn/arch.h
@@ -19,44 +19,10 @@
#define cputonasid(cpu) (sn_cpu_info[(cpu)].p_nasid)
#define cputoslice(cpu) (sn_cpu_info[(cpu)].p_slice)
-#define makespnum(_nasid, _slice) \
- (((_nasid) << CPUS_PER_NODE_SHFT) | (_slice))
#define INVALID_NASID (nasid_t)-1
-#define INVALID_CNODEID (cnodeid_t)-1
#define INVALID_PNODEID (pnodeid_t)-1
#define INVALID_MODULE (moduleid_t)-1
#define INVALID_PARTID (partid_t)-1
-extern nasid_t get_nasid(void);
-extern cnodeid_t get_cpu_cnode(cpuid_t);
-extern int get_cpu_slice(cpuid_t);
-
-/*
- * NO ONE should access these arrays directly. The only reason we refer to
- * them here is to avoid the procedure call that would be required in the
- * macros below. (Really want private data members here :-)
- */
-extern cnodeid_t nasid_to_compact_node[MAX_NASIDS];
-extern nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
-
-/*
- * These macros are used by various parts of the kernel to convert
- * between the three different kinds of node numbering. At least some
- * of them may change to procedure calls in the future, but the macros
- * will continue to work. Don't use the arrays above directly.
- */
-
-#define NASID_TO_REGION(nnode) \
- ((nnode) >> \
- (is_fine_dirmode() ? NASID_TO_FINEREG_SHFT : NASID_TO_COARSEREG_SHFT))
-
-extern cnodeid_t nasid_to_compact_node[MAX_NASIDS];
-extern nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
-extern cnodeid_t cpuid_to_compact_node[MAXCPUS];
-
-#define NASID_TO_COMPACT_NODEID(nnode) (nasid_to_compact_node[nnode])
-#define COMPACT_TO_NASID_NODEID(cnode) (compact_to_nasid_node[cnode])
-#define CPUID_TO_COMPACT_NODEID(cpu) (cpuid_to_compact_node[(cpu)])
-
#endif /* _ASM_SN_ARCH_H */
diff --git a/arch/mips/include/asm/sn/gda.h b/arch/mips/include/asm/sn/gda.h
index 85fa1b5f639d..d52f81620661 100644
--- a/arch/mips/include/asm/sn/gda.h
+++ b/arch/mips/include/asm/sn/gda.h
@@ -60,9 +60,7 @@ typedef struct gda {
/* Pointer to a mask of nodes with copies
* of the kernel. */
char g_padding[56]; /* pad out to 128 bytes */
- nasid_t g_nasidtable[MAX_COMPACT_NODES]; /* NASID of each node,
- * indexed by cnodeid.
- */
+ nasid_t g_nasidtable[MAX_NUMNODES]; /* NASID of each node */
} gda_t;
#define GDA ((gda_t*) GDA_ADDR(get_nasid()))
diff --git a/arch/mips/include/asm/sn/hub.h b/arch/mips/include/asm/sn/hub.h
deleted file mode 100644
index 338f7eed74f1..000000000000
--- a/arch/mips/include/asm/sn/hub.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SN_HUB_H
-#define __ASM_SN_HUB_H
-
-#include <linux/types.h>
-#include <linux/cpumask.h>
-#include <asm/sn/types.h>
-#include <asm/sn/io.h>
-#include <asm/sn/klkernvars.h>
-#include <asm/xtalk/xtalk.h>
-
-/* ip27-hubio.c */
-extern unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
- unsigned long xtalk_addr, size_t size);
-extern void hub_pio_init(cnodeid_t cnode);
-
-#endif /* __ASM_SN_HUB_H */
diff --git a/arch/mips/include/asm/sn/intr.h b/arch/mips/include/asm/sn/intr.h
index fc1348193957..3d6954d370dc 100644
--- a/arch/mips/include/asm/sn/intr.h
+++ b/arch/mips/include/asm/sn/intr.h
@@ -8,15 +8,6 @@
#ifndef __ASM_SN_INTR_H
#define __ASM_SN_INTR_H
-/* Number of interrupt levels associated with each interrupt register. */
-#define N_INTPEND_BITS 64
-
-#define INT_PEND0_BASELVL 0
-#define INT_PEND1_BASELVL 64
-
-#define N_INTPENDJUNK_BITS 8
-#define INTPENDJUNK_CLRBIT 0x80
-
/*
* Macros to manipulate the interrupt register on the calling hub chip.
*/
@@ -84,14 +75,6 @@ do { \
#define CPU_RESCHED_B_IRQ 8
#define CPU_CALL_A_IRQ 9
#define CPU_CALL_B_IRQ 10
-#define MSC_MESG_INTR 11
-#define BASE_PCI_IRQ 12
-
-/*
- * INT_PEND0 again, bits determined by hardware / hardcoded:
- */
-#define SDISK_INTR 63 /* SABLE name */
-#define IP_PEND0_6_63 63 /* What is this bit? */
/*
* INT_PEND1 hard-coded bits:
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
index 25c8dccab51f..2c09c17cadcd 100644
--- a/arch/mips/include/asm/sn/ioc3.h
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -3,169 +3,161 @@
* Copyright (C) 1999, 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
-#ifndef _IOC3_H
-#define _IOC3_H
+#ifndef MIPS_SN_IOC3_H
+#define MIPS_SN_IOC3_H
#include <linux/types.h>
+/* serial port register map */
+struct ioc3_serialregs {
+ u32 sscr;
+ u32 stpir;
+ u32 stcir;
+ u32 srpir;
+ u32 srcir;
+ u32 srtr;
+ u32 shadow;
+};
+
/* SUPERIO uart register map */
-typedef volatile struct ioc3_uartregs {
+struct ioc3_uartregs {
+ u8 iu_lcr;
union {
- volatile u8 rbr; /* read only, DLAB == 0 */
- volatile u8 thr; /* write only, DLAB == 0 */
- volatile u8 dll; /* DLAB == 1 */
- } u1;
+ u8 iu_iir; /* read only */
+ u8 iu_fcr; /* write only */
+ };
union {
- volatile u8 ier; /* DLAB == 0 */
- volatile u8 dlm; /* DLAB == 1 */
- } u2;
+ u8 iu_ier; /* DLAB == 0 */
+ u8 iu_dlm; /* DLAB == 1 */
+ };
union {
- volatile u8 iir; /* read only */
- volatile u8 fcr; /* write only */
- } u3;
- volatile u8 iu_lcr;
- volatile u8 iu_mcr;
- volatile u8 iu_lsr;
- volatile u8 iu_msr;
- volatile u8 iu_scr;
-} ioc3_uregs_t;
-
-#define iu_rbr u1.rbr
-#define iu_thr u1.thr
-#define iu_dll u1.dll
-#define iu_ier u2.ier
-#define iu_dlm u2.dlm
-#define iu_iir u3.iir
-#define iu_fcr u3.fcr
+ u8 iu_rbr; /* read only, DLAB == 0 */
+ u8 iu_thr; /* write only, DLAB == 0 */
+ u8 iu_dll; /* DLAB == 1 */
+ };
+ u8 iu_scr;
+ u8 iu_msr;
+ u8 iu_lsr;
+ u8 iu_mcr;
+};
struct ioc3_sioregs {
- volatile u8 fill[0x141]; /* starts at 0x141 */
+ u8 fill[0x141]; /* starts at 0x141 */
- volatile u8 uartc;
- volatile u8 kbdcg;
+ u8 kbdcg;
+ u8 uartc;
- volatile u8 fill0[0x150 - 0x142 - 1];
+ u8 fill0[0x151 - 0x142 - 1];
- volatile u8 pp_data;
- volatile u8 pp_dsr;
- volatile u8 pp_dcr;
+ u8 pp_dcr;
+ u8 pp_dsr;
+ u8 pp_data;
- volatile u8 fill1[0x158 - 0x152 - 1];
+ u8 fill1[0x159 - 0x153 - 1];
- volatile u8 pp_fifa;
- volatile u8 pp_cfgb;
- volatile u8 pp_ecr;
+ u8 pp_ecr;
+ u8 pp_cfgb;
+ u8 pp_fifa;
- volatile u8 fill2[0x168 - 0x15a - 1];
+ u8 fill2[0x16a - 0x15b - 1];
- volatile u8 rtcad;
- volatile u8 rtcdat;
+ u8 rtcdat;
+ u8 rtcad;
- volatile u8 fill3[0x170 - 0x169 - 1];
+ u8 fill3[0x170 - 0x16b - 1];
struct ioc3_uartregs uartb; /* 0x20170 */
struct ioc3_uartregs uarta; /* 0x20178 */
};
+struct ioc3_ethregs {
+ u32 emcr; /* 0x000f0 */
+ u32 eisr; /* 0x000f4 */
+ u32 eier; /* 0x000f8 */
+ u32 ercsr; /* 0x000fc */
+ u32 erbr_h; /* 0x00100 */
+ u32 erbr_l; /* 0x00104 */
+ u32 erbar; /* 0x00108 */
+ u32 ercir; /* 0x0010c */
+ u32 erpir; /* 0x00110 */
+ u32 ertr; /* 0x00114 */
+ u32 etcsr; /* 0x00118 */
+ u32 ersr; /* 0x0011c */
+ u32 etcdc; /* 0x00120 */
+ u32 ebir; /* 0x00124 */
+ u32 etbr_h; /* 0x00128 */
+ u32 etbr_l; /* 0x0012c */
+ u32 etcir; /* 0x00130 */
+ u32 etpir; /* 0x00134 */
+ u32 emar_h; /* 0x00138 */
+ u32 emar_l; /* 0x0013c */
+ u32 ehar_h; /* 0x00140 */
+ u32 ehar_l; /* 0x00144 */
+ u32 micr; /* 0x00148 */
+ u32 midr_r; /* 0x0014c */
+ u32 midr_w; /* 0x00150 */
+};
+
+struct ioc3_serioregs {
+ u32 km_csr; /* 0x0009c */
+ u32 k_rd; /* 0x000a0 */
+ u32 m_rd; /* 0x000a4 */
+ u32 k_wd; /* 0x000a8 */
+ u32 m_wd; /* 0x000ac */
+};
+
/* Register layout of IOC3 in configuration space. */
struct ioc3 {
- volatile u32 pad0[7]; /* 0x00000 */
- volatile u32 sio_ir; /* 0x0001c */
- volatile u32 sio_ies; /* 0x00020 */
- volatile u32 sio_iec; /* 0x00024 */
- volatile u32 sio_cr; /* 0x00028 */
- volatile u32 int_out; /* 0x0002c */
- volatile u32 mcr; /* 0x00030 */
+ /* PCI Config Space registers */
+ u32 pci_id; /* 0x00000 */
+ u32 pci_scr; /* 0x00004 */
+ u32 pci_rev; /* 0x00008 */
+ u32 pci_lat; /* 0x0000c */
+ u32 pci_addr; /* 0x00010 */
+ u32 pci_err_addr_l; /* 0x00014 */
+ u32 pci_err_addr_h; /* 0x00018 */
+
+ u32 sio_ir; /* 0x0001c */
+ u32 sio_ies; /* 0x00020 */
+ u32 sio_iec; /* 0x00024 */
+ u32 sio_cr; /* 0x00028 */
+ u32 int_out; /* 0x0002c */
+ u32 mcr; /* 0x00030 */
/* General Purpose I/O registers */
- volatile u32 gpcr_s; /* 0x00034 */
- volatile u32 gpcr_c; /* 0x00038 */
- volatile u32 gpdr; /* 0x0003c */
- volatile u32 gppr_0; /* 0x00040 */
- volatile u32 gppr_1; /* 0x00044 */
- volatile u32 gppr_2; /* 0x00048 */
- volatile u32 gppr_3; /* 0x0004c */
- volatile u32 gppr_4; /* 0x00050 */
- volatile u32 gppr_5; /* 0x00054 */
- volatile u32 gppr_6; /* 0x00058 */
- volatile u32 gppr_7; /* 0x0005c */
- volatile u32 gppr_8; /* 0x00060 */
- volatile u32 gppr_9; /* 0x00064 */
- volatile u32 gppr_10; /* 0x00068 */
- volatile u32 gppr_11; /* 0x0006c */
- volatile u32 gppr_12; /* 0x00070 */
- volatile u32 gppr_13; /* 0x00074 */
- volatile u32 gppr_14; /* 0x00078 */
- volatile u32 gppr_15; /* 0x0007c */
+ u32 gpcr_s; /* 0x00034 */
+ u32 gpcr_c; /* 0x00038 */
+ u32 gpdr; /* 0x0003c */
+ u32 gppr[16]; /* 0x00040 */
/* Parallel Port Registers */
- volatile u32 ppbr_h_a; /* 0x00080 */
- volatile u32 ppbr_l_a; /* 0x00084 */
- volatile u32 ppcr_a; /* 0x00088 */
- volatile u32 ppcr; /* 0x0008c */
- volatile u32 ppbr_h_b; /* 0x00090 */
- volatile u32 ppbr_l_b; /* 0x00094 */
- volatile u32 ppcr_b; /* 0x00098 */
+ u32 ppbr_h_a; /* 0x00080 */
+ u32 ppbr_l_a; /* 0x00084 */
+ u32 ppcr_a; /* 0x00088 */
+ u32 ppcr; /* 0x0008c */
+ u32 ppbr_h_b; /* 0x00090 */
+ u32 ppbr_l_b; /* 0x00094 */
+ u32 ppcr_b; /* 0x00098 */
/* Keyboard and Mouse Registers */
- volatile u32 km_csr; /* 0x0009c */
- volatile u32 k_rd; /* 0x000a0 */
- volatile u32 m_rd; /* 0x000a4 */
- volatile u32 k_wd; /* 0x000a8 */
- volatile u32 m_wd; /* 0x000ac */
+ struct ioc3_serioregs serio;
/* Serial Port Registers */
- volatile u32 sbbr_h; /* 0x000b0 */
- volatile u32 sbbr_l; /* 0x000b4 */
- volatile u32 sscr_a; /* 0x000b8 */
- volatile u32 stpir_a; /* 0x000bc */
- volatile u32 stcir_a; /* 0x000c0 */
- volatile u32 srpir_a; /* 0x000c4 */
- volatile u32 srcir_a; /* 0x000c8 */
- volatile u32 srtr_a; /* 0x000cc */
- volatile u32 shadow_a; /* 0x000d0 */
- volatile u32 sscr_b; /* 0x000d4 */
- volatile u32 stpir_b; /* 0x000d8 */
- volatile u32 stcir_b; /* 0x000dc */
- volatile u32 srpir_b; /* 0x000e0 */
- volatile u32 srcir_b; /* 0x000e4 */
- volatile u32 srtr_b; /* 0x000e8 */
- volatile u32 shadow_b; /* 0x000ec */
-
- /* Ethernet Registers */
- volatile u32 emcr; /* 0x000f0 */
- volatile u32 eisr; /* 0x000f4 */
- volatile u32 eier; /* 0x000f8 */
- volatile u32 ercsr; /* 0x000fc */
- volatile u32 erbr_h; /* 0x00100 */
- volatile u32 erbr_l; /* 0x00104 */
- volatile u32 erbar; /* 0x00108 */
- volatile u32 ercir; /* 0x0010c */
- volatile u32 erpir; /* 0x00110 */
- volatile u32 ertr; /* 0x00114 */
- volatile u32 etcsr; /* 0x00118 */
- volatile u32 ersr; /* 0x0011c */
- volatile u32 etcdc; /* 0x00120 */
- volatile u32 ebir; /* 0x00124 */
- volatile u32 etbr_h; /* 0x00128 */
- volatile u32 etbr_l; /* 0x0012c */
- volatile u32 etcir; /* 0x00130 */
- volatile u32 etpir; /* 0x00134 */
- volatile u32 emar_h; /* 0x00138 */
- volatile u32 emar_l; /* 0x0013c */
- volatile u32 ehar_h; /* 0x00140 */
- volatile u32 ehar_l; /* 0x00144 */
- volatile u32 micr; /* 0x00148 */
- volatile u32 midr_r; /* 0x0014c */
- volatile u32 midr_w; /* 0x00150 */
- volatile u32 pad1[(0x20000 - 0x00154) / 4];
+ u32 sbbr_h; /* 0x000b0 */
+ u32 sbbr_l; /* 0x000b4 */
+ struct ioc3_serialregs port_a;
+ struct ioc3_serialregs port_b;
+
+ /* Ethernet Registers */
+ struct ioc3_ethregs eth;
+ u32 pad1[(0x20000 - 0x00154) / 4];
/* SuperIO Registers XXX */
struct ioc3_sioregs sregs; /* 0x20000 */
- volatile u32 pad2[(0x40000 - 0x20180) / 4];
+ u32 pad2[(0x40000 - 0x20180) / 4];
/* SSRAM Diagnostic Access */
- volatile u32 ssram[(0x80000 - 0x40000) / 4];
+ u32 ssram[(0x80000 - 0x40000) / 4];
/* Bytebus device offsets
0x80000 - Access to the generic devices selected with DEV0
@@ -178,6 +170,20 @@ struct ioc3 {
0xFFFFF bytebus DEV_SEL_3 */
};
+
+#define PCI_LAT 0xc /* Latency Timer */
+#define PCI_SCR_DROP_MODE_EN 0x00008000 /* drop pios on parity err */
+#define UARTA_BASE 0x178
+#define UARTB_BASE 0x170
+
+/*
+ * Bytebus device space
+ */
+#define IOC3_BYTEBUS_DEV0 0x80000L
+#define IOC3_BYTEBUS_DEV1 0xa0000L
+#define IOC3_BYTEBUS_DEV2 0xc0000L
+#define IOC3_BYTEBUS_DEV3 0xe0000L
+
/*
* Ethernet RX Buffer
*/
@@ -233,28 +239,20 @@ struct ioc3_etxd {
#define ETXD_B2CNT_MASK 0x7ff00000
#define ETXD_B2CNT_SHIFT 20
-/*
- * Bytebus device space
- */
-#define IOC3_BYTEBUS_DEV0 0x80000L
-#define IOC3_BYTEBUS_DEV1 0xa0000L
-#define IOC3_BYTEBUS_DEV2 0xc0000L
-#define IOC3_BYTEBUS_DEV3 0xe0000L
-
/* ------------------------------------------------------------------------- */
/* Superio Registers (PIO Access) */
#define IOC3_SIO_BASE 0x20000
#define IOC3_SIO_UARTC (IOC3_SIO_BASE+0x141) /* UART Config */
#define IOC3_SIO_KBDCG (IOC3_SIO_BASE+0x142) /* KBD Config */
-#define IOC3_SIO_PP_BASE (IOC3_SIO_BASE+PP_BASE) /* Parallel Port */
+#define IOC3_SIO_PP_BASE (IOC3_SIO_BASE+PP_BASE) /* Parallel Port */
#define IOC3_SIO_RTC_BASE (IOC3_SIO_BASE+0x168) /* Real Time Clock */
#define IOC3_SIO_UB_BASE (IOC3_SIO_BASE+UARTB_BASE) /* UART B */
#define IOC3_SIO_UA_BASE (IOC3_SIO_BASE+UARTA_BASE) /* UART A */
/* SSRAM Diagnostic Access */
#define IOC3_SSRAM IOC3_RAM_OFF /* base of SSRAM diagnostic access */
-#define IOC3_SSRAM_LEN 0x40000 /* 256kb (address space size, may not be fully populated) */
+#define IOC3_SSRAM_LEN 0x40000 /* 256kb (addrspc sz, may not be populated) */
#define IOC3_SSRAM_DM 0x0000ffff /* data mask */
#define IOC3_SSRAM_PM 0x00010000 /* parity mask */
@@ -294,10 +292,10 @@ struct ioc3_etxd {
SIO_IR to assert */
#define KM_CSR_M_TO_EN 0x00080000 /* KM_CSR_M_TO + KM_CSR_M_TO_EN = cause
SIO_IR to assert */
-#define KM_CSR_K_CLAMP_ONE 0x00100000 /* Pull K_CLK low after rec. one char */
-#define KM_CSR_M_CLAMP_ONE 0x00200000 /* Pull M_CLK low after rec. one char */
-#define KM_CSR_K_CLAMP_THREE 0x00400000 /* Pull K_CLK low after rec. three chars */
-#define KM_CSR_M_CLAMP_THREE 0x00800000 /* Pull M_CLK low after rec. three char */
+#define KM_CSR_K_CLAMP_1 0x00100000 /* Pull K_CLK low aft recv 1 char */
+#define KM_CSR_M_CLAMP_1 0x00200000 /* Pull M_CLK low aft recv 1 char */
+#define KM_CSR_K_CLAMP_3 0x00400000 /* Pull K_CLK low aft recv 3 chars */
+#define KM_CSR_M_CLAMP_3 0x00800000 /* Pull M_CLK low aft recv 3 chars */
/* bitmasks for IOC3_K_RD and IOC3_M_RD */
#define KM_RD_DATA_2 0x000000ff /* 3rd char recvd since last read */
@@ -440,10 +438,6 @@ struct ioc3_etxd {
SIO_IR_PP_INTB | SIO_IR_PP_MEMERR)
#define SIO_IR_RT (SIO_IR_RT_INT | SIO_IR_GEN_INT1)
-/* macro to load pending interrupts */
-#define IOC3_PENDING_INTRS(mem) (PCI_INW(&((mem)->sio_ir)) & \
- PCI_INW(&((mem)->sio_ies_ro)))
-
/* bitmasks for SIO_CR */
#define SIO_CR_SIO_RESET 0x00000001 /* reset the SIO */
#define SIO_CR_SER_A_BASE 0x000000fe /* DMA poll addr port A */
@@ -500,10 +494,11 @@ struct ioc3_etxd {
#define GPCR_UARTB_MODESEL 0x40 /* pin is output to port B mode sel */
#define GPCR_UARTA_MODESEL 0x80 /* pin is output to port A mode sel */
-#define GPPR_PHY_RESET_PIN 5 /* GIO pin controlling phy reset */
-#define GPPR_UARTB_MODESEL_PIN 6 /* GIO pin controlling uart b mode select */
-#define GPPR_UARTA_MODESEL_PIN 7 /* GIO pin controlling uart a mode select */
+#define GPPR_PHY_RESET_PIN 5 /* GIO pin cntrlling phy reset */
+#define GPPR_UARTB_MODESEL_PIN 6 /* GIO pin cntrlling uart b mode sel */
+#define GPPR_UARTA_MODESEL_PIN 7 /* GIO pin cntrlling uart a mode sel */
+/* ethernet */
#define EMCR_DUPLEX 0x00000001
#define EMCR_PROMISC 0x00000002
#define EMCR_PADEN 0x00000004
@@ -595,70 +590,17 @@ struct ioc3_etxd {
#define MIDR_DATA_MASK 0x0000ffff
-#define ERXBUF_IPCKSUM_MASK 0x0000ffff
-#define ERXBUF_BYTECNT_MASK 0x07ff0000
-#define ERXBUF_BYTECNT_SHIFT 16
-#define ERXBUF_V 0x80000000
-
-#define ERXBUF_CRCERR 0x00000001 /* aka RSV15 */
-#define ERXBUF_FRAMERR 0x00000002 /* aka RSV14 */
-#define ERXBUF_CODERR 0x00000004 /* aka RSV13 */
-#define ERXBUF_INVPREAMB 0x00000008 /* aka RSV18 */
-#define ERXBUF_LOLEN 0x00007000 /* aka RSV2_0 */
-#define ERXBUF_HILEN 0x03ff0000 /* aka RSV12_3 */
-#define ERXBUF_MULTICAST 0x04000000 /* aka RSV16 */
-#define ERXBUF_BROADCAST 0x08000000 /* aka RSV17 */
-#define ERXBUF_LONGEVENT 0x10000000 /* aka RSV19 */
-#define ERXBUF_BADPKT 0x20000000 /* aka RSV20 */
-#define ERXBUF_GOODPKT 0x40000000 /* aka RSV21 */
-#define ERXBUF_CARRIER 0x80000000 /* aka RSV22 */
-
-#define ETXD_BYTECNT_MASK 0x000007ff /* total byte count */
-#define ETXD_INTWHENDONE 0x00001000 /* intr when done */
-#define ETXD_D0V 0x00010000 /* data 0 valid */
-#define ETXD_B1V 0x00020000 /* buf 1 valid */
-#define ETXD_B2V 0x00040000 /* buf 2 valid */
-#define ETXD_DOCHECKSUM 0x00080000 /* insert ip cksum */
-#define ETXD_CHKOFF_MASK 0x07f00000 /* cksum byte offset */
-#define ETXD_CHKOFF_SHIFT 20
-
-#define ETXD_D0CNT_MASK 0x0000007f
-#define ETXD_B1CNT_MASK 0x0007ff00
-#define ETXD_B1CNT_SHIFT 8
-#define ETXD_B2CNT_MASK 0x7ff00000
-#define ETXD_B2CNT_SHIFT 20
-
-typedef enum ioc3_subdevs_e {
- ioc3_subdev_ether,
- ioc3_subdev_generic,
- ioc3_subdev_nic,
- ioc3_subdev_kbms,
- ioc3_subdev_ttya,
- ioc3_subdev_ttyb,
- ioc3_subdev_ecpp,
- ioc3_subdev_rt,
- ioc3_nsubdevs
-} ioc3_subdev_t;
-
-/* subdevice disable bits,
- * from the standard INFO_LBL_SUBDEVS
- */
-#define IOC3_SDB_ETHER (1<<ioc3_subdev_ether)
-#define IOC3_SDB_GENERIC (1<<ioc3_subdev_generic)
-#define IOC3_SDB_NIC (1<<ioc3_subdev_nic)
-#define IOC3_SDB_KBMS (1<<ioc3_subdev_kbms)
-#define IOC3_SDB_TTYA (1<<ioc3_subdev_ttya)
-#define IOC3_SDB_TTYB (1<<ioc3_subdev_ttyb)
-#define IOC3_SDB_ECPP (1<<ioc3_subdev_ecpp)
-#define IOC3_SDB_RT (1<<ioc3_subdev_rt)
-
-#define IOC3_ALL_SUBDEVS ((1<<ioc3_nsubdevs)-1)
-
-#define IOC3_SDB_SERIAL (IOC3_SDB_TTYA|IOC3_SDB_TTYB)
-
-#define IOC3_STD_SUBDEVS IOC3_ALL_SUBDEVS
-
-#define IOC3_INTA_SUBDEVS IOC3_SDB_ETHER
-#define IOC3_INTB_SUBDEVS (IOC3_SDB_GENERIC|IOC3_SDB_KBMS|IOC3_SDB_SERIAL|IOC3_SDB_ECPP|IOC3_SDB_RT)
-
-#endif /* _IOC3_H */
+/* subsystem IDs supplied by card detection in pci-xtalk-bridge */
+#define IOC3_SUBSYS_IP27_BASEIO6G 0xc300
+#define IOC3_SUBSYS_IP27_MIO 0xc301
+#define IOC3_SUBSYS_IP27_BASEIO 0xc302
+#define IOC3_SUBSYS_IP29_SYSBOARD 0xc303
+#define IOC3_SUBSYS_IP30_SYSBOARD 0xc304
+#define IOC3_SUBSYS_MENET 0xc305
+#define IOC3_SUBSYS_MENET4 0xc306
+#define IOC3_SUBSYS_IO7 0xc307
+#define IOC3_SUBSYS_IO8 0xc308
+#define IOC3_SUBSYS_IO9 0xc309
+#define IOC3_SUBSYS_IP34_SYSBOARD 0xc30A
+
+#endif /* MIPS_SN_IOC3_H */
diff --git a/arch/mips/include/asm/sn/klconfig.h b/arch/mips/include/asm/sn/klconfig.h
index 467c313d5767..117f85e4bef5 100644
--- a/arch/mips/include/asm/sn/klconfig.h
+++ b/arch/mips/include/asm/sn/klconfig.h
@@ -889,10 +889,6 @@ typedef union {
extern lboard_t *find_lboard(lboard_t *start, unsigned char type);
extern klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char type);
extern klinfo_t *find_first_component(lboard_t *brd, unsigned char type);
-extern klcpu_t *nasid_slice_to_cpuinfo(nasid_t, int);
extern lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_class);
-
-extern klcpu_t *sn_get_cpuinfo(cpuid_t cpu);
-
#endif /* _ASM_SN_KLCONFIG_H */
diff --git a/arch/mips/include/asm/sn/kldir.h b/arch/mips/include/asm/sn/kldir.h
index bfb3aec94539..245f59bf3845 100644
--- a/arch/mips/include/asm/sn/kldir.h
+++ b/arch/mips/include/asm/sn/kldir.h
@@ -1,201 +1,16 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Derived from IRIX <sys/SN/kldir.h>, revision 1.21.
- *
- * Copyright (C) 1992 - 1997, 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 1999, 2000 by Ralf Baechle
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+
#ifndef _ASM_SN_KLDIR_H
#define _ASM_SN_KLDIR_H
-
-/*
- * The kldir memory area resides at a fixed place in each node's memory and
- * provides pointers to most other IP27 memory areas. This allows us to
- * resize and/or relocate memory areas at a later time without breaking all
- * firmware and kernels that use them. Indices in the array are
- * permanently dedicated to areas listed below. Some memory areas (marked
- * below) reside at a permanently fixed location, but are included in the
- * directory for completeness.
- */
-
#define KLDIR_MAGIC 0x434d5f53505f5357
-/*
- * The upper portion of the memory map applies during boot
- * only and is overwritten by IRIX/SYMMON.
- *
- * MEMORY MAP PER NODE
- *
- * 0x2000000 (32M) +-----------------------------------------+
- * | IO6 BUFFERS FOR FLASH ENET IOC3 |
- * 0x1F80000 (31.5M) +-----------------------------------------+
- * | IO6 TEXT/DATA/BSS/stack |
- * 0x1C00000 (30M) +-----------------------------------------+
- * | IO6 PROM DEBUG TEXT/DATA/BSS/stack |
- * 0x0800000 (28M) +-----------------------------------------+
- * | IP27 PROM TEXT/DATA/BSS/stack |
- * 0x1B00000 (27M) +-----------------------------------------+
- * | IP27 CFG |
- * 0x1A00000 (26M) +-----------------------------------------+
- * | Graphics PROM |
- * 0x1800000 (24M) +-----------------------------------------+
- * | 3rd Party PROM drivers |
- * 0x1600000 (22M) +-----------------------------------------+
- * | |
- * | Free |
- * | |
- * +-----------------------------------------+
- * | UNIX DEBUG Version |
- * 0x190000 (2M--) +-----------------------------------------+
- * | SYMMON |
- * | (For UNIX Debug only) |
- * 0x34000 (208K) +-----------------------------------------+
- * | SYMMON STACK [NUM_CPU_PER_NODE] |
- * | (For UNIX Debug only) |
- * 0x25000 (148K) +-----------------------------------------+
- * | KLCONFIG - II (temp) |
- * | |
- * | ---------------------------- |
- * | |
- * | UNIX NON-DEBUG Version |
- * 0x19000 (100K) +-----------------------------------------+
- *
- *
- * The lower portion of the memory map contains information that is
- * permanent and is used by the IP27PROM, IO6PROM and IRIX.
- *
- * 0x19000 (100K) +-----------------------------------------+
- * | |
- * | PI Error Spools (32K) |
- * | |
- * 0x12000 (72K) +-----------------------------------------+
- * | Unused |
- * 0x11c00 (71K) +-----------------------------------------+
- * | CPU 1 NMI Eframe area |
- * 0x11a00 (70.5K) +-----------------------------------------+
- * | CPU 0 NMI Eframe area |
- * 0x11800 (70K) +-----------------------------------------+
- * | CPU 1 NMI Register save area |
- * 0x11600 (69.5K) +-----------------------------------------+
- * | CPU 0 NMI Register save area |
- * 0x11400 (69K) +-----------------------------------------+
- * | GDA (1k) |
- * 0x11000 (68K) +-----------------------------------------+
- * | Early cache Exception stack |
- * | and/or |
- * | kernel/io6prom nmi registers |
- * 0x10800 (66k) +-----------------------------------------+
- * | cache error eframe |
- * 0x10400 (65K) +-----------------------------------------+
- * | Exception Handlers (UALIAS copy) |
- * 0x10000 (64K) +-----------------------------------------+
- * | |
- * | |
- * | KLCONFIG - I (permanent) (48K) |
- * | |
- * | |
- * | |
- * 0x4000 (16K) +-----------------------------------------+
- * | NMI Handler (Protected Page) |
- * 0x3000 (12K) +-----------------------------------------+
- * | ARCS PVECTORS (master node only) |
- * 0x2c00 (11K) +-----------------------------------------+
- * | ARCS TVECTORS (master node only) |
- * 0x2800 (10K) +-----------------------------------------+
- * | LAUNCH [NUM_CPU] |
- * 0x2400 (9K) +-----------------------------------------+
- * | Low memory directory (KLDIR) |
- * 0x2000 (8K) +-----------------------------------------+
- * | ARCS SPB (1K) |
- * 0x1000 (4K) +-----------------------------------------+
- * | Early cache Exception stack |
- * | and/or |
- * | kernel/io6prom nmi registers |
- * 0x800 (2k) +-----------------------------------------+
- * | cache error eframe |
- * 0x400 (1K) +-----------------------------------------+
- * | Exception Handlers |
- * 0x0 (0K) +-----------------------------------------+
- */
-
-#ifdef __ASSEMBLY__
#define KLDIR_OFF_MAGIC 0x00
#define KLDIR_OFF_OFFSET 0x08
#define KLDIR_OFF_POINTER 0x10
#define KLDIR_OFF_SIZE 0x18
#define KLDIR_OFF_COUNT 0x20
#define KLDIR_OFF_STRIDE 0x28
-#endif /* __ASSEMBLY__ */
-
-/*
- * This is defined here because IP27_SYMMON_STK_SIZE must be at least what
- * we define here. Since it's set up in the prom. We can't redefine it later
- * and expect more space to be allocated. The way to find out the true size
- * of the symmon stacks is to divide SYMMON_STK_SIZE by SYMMON_STK_STRIDE
- * for a particular node.
- */
-#define SYMMON_STACK_SIZE 0x8000
-
-#if defined(PROM)
-
-/*
- * These defines are prom version dependent. No code other than the IP27
- * prom should attempt to use these values.
- */
-#define IP27_LAUNCH_OFFSET 0x2400
-#define IP27_LAUNCH_SIZE 0x400
-#define IP27_LAUNCH_COUNT 2
-#define IP27_LAUNCH_STRIDE 0x200
-
-#define IP27_KLCONFIG_OFFSET 0x4000
-#define IP27_KLCONFIG_SIZE 0xc000
-#define IP27_KLCONFIG_COUNT 1
-#define IP27_KLCONFIG_STRIDE 0
-
-#define IP27_NMI_OFFSET 0x3000
-#define IP27_NMI_SIZE 0x40
-#define IP27_NMI_COUNT 2
-#define IP27_NMI_STRIDE 0x40
-
-#define IP27_PI_ERROR_OFFSET 0x12000
-#define IP27_PI_ERROR_SIZE 0x4000
-#define IP27_PI_ERROR_COUNT 1
-#define IP27_PI_ERROR_STRIDE 0
-
-#define IP27_SYMMON_STK_OFFSET 0x25000
-#define IP27_SYMMON_STK_SIZE 0xe000
-#define IP27_SYMMON_STK_COUNT 2
-/* IP27_SYMMON_STK_STRIDE must be >= SYMMON_STACK_SIZE */
-#define IP27_SYMMON_STK_STRIDE 0x7000
-
-#define IP27_FREEMEM_OFFSET 0x19000
-#define IP27_FREEMEM_SIZE -1
-#define IP27_FREEMEM_COUNT 1
-#define IP27_FREEMEM_STRIDE 0
-
-#endif /* PROM */
-/*
- * There will be only one of these in a partition so the IO6 must set it up.
- */
-#define IO6_GDA_OFFSET 0x11000
-#define IO6_GDA_SIZE 0x400
-#define IO6_GDA_COUNT 1
-#define IO6_GDA_STRIDE 0
-
-/*
- * save area of kernel nmi regs in the prom format
- */
-#define IP27_NMI_KREGS_OFFSET 0x11400
-#define IP27_NMI_KREGS_CPU_SIZE 0x200
-/*
- * save area of kernel nmi regs in eframe format
- */
-#define IP27_NMI_EFRAME_OFFSET 0x11800
-#define IP27_NMI_EFRAME_SIZE 0x200
#define KLDIR_ENT_SIZE 0x40
#define KLDIR_MAX_ENTRIES (0x400 / 0x40)
@@ -214,4 +29,8 @@ typedef struct kldir_ent_s {
} kldir_ent_t;
#endif /* !__ASSEMBLY__ */
+#ifdef CONFIG_SGI_IP27
+#include <asm/sn/sn0/kldir.h>
+#endif
+
#endif /* _ASM_SN_KLDIR_H */
diff --git a/arch/mips/include/asm/sn/mapped_kernel.h b/arch/mips/include/asm/sn/mapped_kernel.h
index 2f3efa91c16e..3f1049807018 100644
--- a/arch/mips/include/asm/sn/mapped_kernel.h
+++ b/arch/mips/include/asm/sn/mapped_kernel.h
@@ -37,10 +37,10 @@
#define MAPPED_KERN_RO_TO_PHYS(x) \
((unsigned long)MAPPED_ADDR_RO_TO_PHYS(x) | \
- MAPPED_KERN_RO_PHYSBASE(get_compact_nodeid()))
+ MAPPED_KERN_RO_PHYSBASE(get_nasid()))
#define MAPPED_KERN_RW_TO_PHYS(x) \
((unsigned long)MAPPED_ADDR_RW_TO_PHYS(x) | \
- MAPPED_KERN_RW_PHYSBASE(get_compact_nodeid()))
+ MAPPED_KERN_RW_PHYSBASE(get_nasid()))
#else /* CONFIG_MAPPED_KERNEL */
diff --git a/arch/mips/include/asm/sn/sn0/arch.h b/arch/mips/include/asm/sn/sn0/arch.h
index 425a67e6a947..12f4c4649ff0 100644
--- a/arch/mips/include/asm/sn/sn0/arch.h
+++ b/arch/mips/include/asm/sn/sn0/arch.h
@@ -12,25 +12,11 @@
#define _ASM_SN_SN0_ARCH_H
-#ifndef SN0XXL /* 128 cpu SMP max */
-/*
- * This is the maximum number of nodes that can be part of a kernel.
- * Effectively, it's the maximum number of compact node ids (cnodeid_t).
- */
-#define MAX_COMPACT_NODES 64
-
/*
* MAXCPUS refers to the maximum number of CPUs in a single kernel.
* This is not necessarily the same as MAXNODES * CPUS_PER_NODE
*/
-#define MAXCPUS 128
-
-#else /* SN0XXL system */
-
-#define MAX_COMPACT_NODES 128
-#define MAXCPUS 256
-
-#endif /* SN0XXL */
+#define MAXCPUS (MAX_NUMNODES * CPUS_PER_NODE)
/*
* This is the maximum number of NASIDS that can be present in a system.
@@ -66,7 +52,5 @@
#define SLOT_MIN_MEM_SIZE (32*1024*1024)
#define CPUS_PER_NODE 2 /* CPUs on a single hub */
-#define CPUS_PER_NODE_SHFT 1 /* Bits to shift in the node number */
-#define CPUS_PER_SUBNODE 2 /* CPUs on a single hub PI */
#endif /* _ASM_SN_SN0_ARCH_H */
diff --git a/arch/mips/include/asm/sn/sn0/hub.h b/arch/mips/include/asm/sn/sn0/hub.h
index d78dd76d5dcf..c84adde36d41 100644
--- a/arch/mips/include/asm/sn/sn0/hub.h
+++ b/arch/mips/include/asm/sn/sn0/hub.h
@@ -37,4 +37,26 @@
#define UATTR_MSPEC 2
#define UATTR_UNCAC 3
+#ifdef __ASSEMBLY__
+/*
+ * Returns the local nasid into res.
+ */
+ .macro GET_NASID_ASM res
+ dli \res, LOCAL_HUB_ADDR(NI_STATUS_REV_ID)
+ ld \res, (\res)
+ and \res, NSRI_NODEID_MASK
+ dsrl \res, NSRI_NODEID_SHFT
+ .endm
+#else
+
+/*
+ * get_nasid() returns the physical node id number of the caller.
+ */
+static inline nasid_t get_nasid(void)
+{
+ return (nasid_t)((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_NODEID_MASK)
+ >> NSRI_NODEID_SHFT);
+}
+#endif
+
#endif /* _ASM_SN_SN0_HUB_H */
diff --git a/arch/mips/include/asm/sn/sn0/hubni.h b/arch/mips/include/asm/sn/sn0/hubni.h
index b73c4bee65f2..b8253142cb83 100644
--- a/arch/mips/include/asm/sn/sn0/hubni.h
+++ b/arch/mips/include/asm/sn/sn0/hubni.h
@@ -250,6 +250,14 @@ typedef union hubni_port_error_u {
#define NI_LLP_CB_MAX 0xff
#define NI_LLP_SN_MAX 0xff
+static inline int get_region_shift(void)
+{
+ if (LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
+ return NASID_TO_FINEREG_SHFT;
+
+ return NASID_TO_COARSEREG_SHFT;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_SGI_SN0_HUBNI_H */
diff --git a/arch/mips/include/asm/sn/sn0/ip27.h b/arch/mips/include/asm/sn/sn0/ip27.h
deleted file mode 100644
index 3b5efeefcc3f..000000000000
--- a/arch/mips/include/asm/sn/sn0/ip27.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Derived from IRIX <sys/SN/SN0/IP27.h>.
- *
- * Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
- * Copyright (C) 1999, 2006 by Ralf Baechle
- */
-#ifndef _ASM_SN_SN0_IP27_H
-#define _ASM_SN_SN0_IP27_H
-
-#include <asm/mipsregs.h>
-
-/*
- * Simple definitions for the masks which remove SW bits from pte.
- */
-
-#define TLBLO_HWBITSHIFT 0 /* Shift value, for masking */
-
-#ifndef __ASSEMBLY__
-
-#define CAUSE_BERRINTR IE_IRQ5
-
-#define ECCF_CACHE_ERR 0
-#define ECCF_TAGLO 1
-#define ECCF_ECC 2
-#define ECCF_ERROREPC 3
-#define ECCF_PADDR 4
-#define ECCF_SIZE (5 * sizeof(long))
-
-#endif /* !__ASSEMBLY__ */
-
-#ifdef __ASSEMBLY__
-
-/*
- * KL_GET_CPUNUM (similar to EV_GET_SPNUM for EVEREST platform) reads
- * the processor number of the calling processor. The proc parameters
- * must be a register.
- */
-#define KL_GET_CPUNUM(proc) \
- dli proc, LOCAL_HUB(0); \
- ld proc, PI_CPU_NUM(proc)
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * R10000 status register interrupt bit mask usage for IP27.
- */
-#define SRB_SWTIMO IE_SW0 /* 0x0100 */
-#define SRB_NET IE_SW1 /* 0x0200 */
-#define SRB_DEV0 IE_IRQ0 /* 0x0400 */
-#define SRB_DEV1 IE_IRQ1 /* 0x0800 */
-#define SRB_TIMOCLK IE_IRQ2 /* 0x1000 */
-#define SRB_PROFCLK IE_IRQ3 /* 0x2000 */
-#define SRB_ERR IE_IRQ4 /* 0x4000 */
-#define SRB_SCHEDCLK IE_IRQ5 /* 0x8000 */
-
-#define SR_IBIT_HI SRB_DEV0
-#define SR_IBIT_PROF SRB_PROFCLK
-
-#define SRB_SWTIMO_IDX 0
-#define SRB_NET_IDX 1
-#define SRB_DEV0_IDX 2
-#define SRB_DEV1_IDX 3
-#define SRB_TIMOCLK_IDX 4
-#define SRB_PROFCLK_IDX 5
-#define SRB_ERR_IDX 6
-#define SRB_SCHEDCLK_IDX 7
-
-#define NUM_CAUSE_INTRS 8
-
-#define SCACHE_LINESIZE 128
-#define SCACHE_LINEMASK (SCACHE_LINESIZE - 1)
-
-#include <asm/sn/addrs.h>
-
-#define LED_CYCLE_MASK 0x0f
-#define LED_CYCLE_SHFT 4
-
-#define SEND_NMI(_nasid, _slice) \
- REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
-
-#endif /* _ASM_SN_SN0_IP27_H */
diff --git a/arch/mips/include/asm/sn/sn0/kldir.h b/arch/mips/include/asm/sn/sn0/kldir.h
new file mode 100644
index 000000000000..1b10af6cbd5e
--- /dev/null
+++ b/arch/mips/include/asm/sn/sn0/kldir.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Derived from IRIX <sys/SN/kldir.h>, revision 1.21.
+ *
+ * Copyright (C) 1992 - 1997, 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1999, 2000 by Ralf Baechle
+ */
+#ifndef _ASM_SN_SN0_KLDIR_H
+#define _ASM_SN_SN0_KLDIR_H
+
+
+/*
+ * The kldir memory area resides at a fixed place in each node's memory and
+ * provides pointers to most other IP27 memory areas. This allows us to
+ * resize and/or relocate memory areas at a later time without breaking all
+ * firmware and kernels that use them. Indices in the array are
+ * permanently dedicated to areas listed below. Some memory areas (marked
+ * below) reside at a permanently fixed location, but are included in the
+ * directory for completeness.
+ */
+
+/*
+ * The upper portion of the memory map applies during boot
+ * only and is overwritten by IRIX/SYMMON.
+ *
+ * MEMORY MAP PER NODE
+ *
+ * 0x2000000 (32M) +-----------------------------------------+
+ * | IO6 BUFFERS FOR FLASH ENET IOC3 |
+ * 0x1F80000 (31.5M) +-----------------------------------------+
+ * | IO6 TEXT/DATA/BSS/stack |
+ * 0x1C00000 (30M) +-----------------------------------------+
+ * | IO6 PROM DEBUG TEXT/DATA/BSS/stack |
+ * 0x0800000 (28M) +-----------------------------------------+
+ * | IP27 PROM TEXT/DATA/BSS/stack |
+ * 0x1B00000 (27M) +-----------------------------------------+
+ * | IP27 CFG |
+ * 0x1A00000 (26M) +-----------------------------------------+
+ * | Graphics PROM |
+ * 0x1800000 (24M) +-----------------------------------------+
+ * | 3rd Party PROM drivers |
+ * 0x1600000 (22M) +-----------------------------------------+
+ * | |
+ * | Free |
+ * | |
+ * +-----------------------------------------+
+ * | UNIX DEBUG Version |
+ * 0x190000 (2M--) +-----------------------------------------+
+ * | SYMMON |
+ * | (For UNIX Debug only) |
+ * 0x34000 (208K) +-----------------------------------------+
+ * | SYMMON STACK [NUM_CPU_PER_NODE] |
+ * | (For UNIX Debug only) |
+ * 0x25000 (148K) +-----------------------------------------+
+ * | KLCONFIG - II (temp) |
+ * | |
+ * | ---------------------------- |
+ * | |
+ * | UNIX NON-DEBUG Version |
+ * 0x19000 (100K) +-----------------------------------------+
+ *
+ *
+ * The lower portion of the memory map contains information that is
+ * permanent and is used by the IP27PROM, IO6PROM and IRIX.
+ *
+ * 0x19000 (100K) +-----------------------------------------+
+ * | |
+ * | PI Error Spools (32K) |
+ * | |
+ * 0x12000 (72K) +-----------------------------------------+
+ * | Unused |
+ * 0x11c00 (71K) +-----------------------------------------+
+ * | CPU 1 NMI Eframe area |
+ * 0x11a00 (70.5K) +-----------------------------------------+
+ * | CPU 0 NMI Eframe area |
+ * 0x11800 (70K) +-----------------------------------------+
+ * | CPU 1 NMI Register save area |
+ * 0x11600 (69.5K) +-----------------------------------------+
+ * | CPU 0 NMI Register save area |
+ * 0x11400 (69K) +-----------------------------------------+
+ * | GDA (1k) |
+ * 0x11000 (68K) +-----------------------------------------+
+ * | Early cache Exception stack |
+ * | and/or |
+ * | kernel/io6prom nmi registers |
+ * 0x10800 (66k) +-----------------------------------------+
+ * | cache error eframe |
+ * 0x10400 (65K) +-----------------------------------------+
+ * | Exception Handlers (UALIAS copy) |
+ * 0x10000 (64K) +-----------------------------------------+
+ * | |
+ * | |
+ * | KLCONFIG - I (permanent) (48K) |
+ * | |
+ * | |
+ * | |
+ * 0x4000 (16K) +-----------------------------------------+
+ * | NMI Handler (Protected Page) |
+ * 0x3000 (12K) +-----------------------------------------+
+ * | ARCS PVECTORS (master node only) |
+ * 0x2c00 (11K) +-----------------------------------------+
+ * | ARCS TVECTORS (master node only) |
+ * 0x2800 (10K) +-----------------------------------------+
+ * | LAUNCH [NUM_CPU] |
+ * 0x2400 (9K) +-----------------------------------------+
+ * | Low memory directory (KLDIR) |
+ * 0x2000 (8K) +-----------------------------------------+
+ * | ARCS SPB (1K) |
+ * 0x1000 (4K) +-----------------------------------------+
+ * | Early cache Exception stack |
+ * | and/or |
+ * | kernel/io6prom nmi registers |
+ * 0x800 (2k) +-----------------------------------------+
+ * | cache error eframe |
+ * 0x400 (1K) +-----------------------------------------+
+ * | Exception Handlers |
+ * 0x0 (0K) +-----------------------------------------+
+ */
+
+/*
+ * This is defined here because IP27_SYMMON_STK_SIZE must be at least what
+ * we define here. Since it's set up in the prom. We can't redefine it later
+ * and expect more space to be allocated. The way to find out the true size
+ * of the symmon stacks is to divide SYMMON_STK_SIZE by SYMMON_STK_STRIDE
+ * for a particular node.
+ */
+#define SYMMON_STACK_SIZE 0x8000
+
+#if defined(PROM)
+
+/*
+ * These defines are prom version dependent. No code other than the IP27
+ * prom should attempt to use these values.
+ */
+#define IP27_LAUNCH_OFFSET 0x2400
+#define IP27_LAUNCH_SIZE 0x400
+#define IP27_LAUNCH_COUNT 2
+#define IP27_LAUNCH_STRIDE 0x200
+
+#define IP27_KLCONFIG_OFFSET 0x4000
+#define IP27_KLCONFIG_SIZE 0xc000
+#define IP27_KLCONFIG_COUNT 1
+#define IP27_KLCONFIG_STRIDE 0
+
+#define IP27_NMI_OFFSET 0x3000
+#define IP27_NMI_SIZE 0x40
+#define IP27_NMI_COUNT 2
+#define IP27_NMI_STRIDE 0x40
+
+#define IP27_PI_ERROR_OFFSET 0x12000
+#define IP27_PI_ERROR_SIZE 0x4000
+#define IP27_PI_ERROR_COUNT 1
+#define IP27_PI_ERROR_STRIDE 0
+
+#define IP27_SYMMON_STK_OFFSET 0x25000
+#define IP27_SYMMON_STK_SIZE 0xe000
+#define IP27_SYMMON_STK_COUNT 2
+/* IP27_SYMMON_STK_STRIDE must be >= SYMMON_STACK_SIZE */
+#define IP27_SYMMON_STK_STRIDE 0x7000
+
+#define IP27_FREEMEM_OFFSET 0x19000
+#define IP27_FREEMEM_SIZE -1
+#define IP27_FREEMEM_COUNT 1
+#define IP27_FREEMEM_STRIDE 0
+
+#endif /* PROM */
+/*
+ * There will be only one of these in a partition so the IO6 must set it up.
+ */
+#define IO6_GDA_OFFSET 0x11000
+#define IO6_GDA_SIZE 0x400
+#define IO6_GDA_COUNT 1
+#define IO6_GDA_STRIDE 0
+
+/*
+ * save area of kernel nmi regs in the prom format
+ */
+#define IP27_NMI_KREGS_OFFSET 0x11400
+#define IP27_NMI_KREGS_CPU_SIZE 0x200
+/*
+ * save area of kernel nmi regs in eframe format
+ */
+#define IP27_NMI_EFRAME_OFFSET 0x11800
+#define IP27_NMI_EFRAME_SIZE 0x200
+
+#endif /* _ASM_SN_SN0_KLDIR_H */
diff --git a/arch/mips/include/asm/sn/sn_private.h b/arch/mips/include/asm/sn/sn_private.h
deleted file mode 100644
index f09ba846c644..000000000000
--- a/arch/mips/include/asm/sn/sn_private.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SN_SN_PRIVATE_H
-#define __ASM_SN_SN_PRIVATE_H
-
-#include <asm/sn/types.h>
-
-extern nasid_t master_nasid;
-
-extern void cpu_node_probe(void);
-extern cnodeid_t get_compact_nodeid(void);
-extern void hub_rtc_init(cnodeid_t);
-extern void cpu_time_init(void);
-extern void per_cpu_init(void);
-extern void install_cpu_nmi_handler(int slice);
-extern void install_ipi(void);
-extern void setup_replication_mask(void);
-extern void replicate_kernel_text(void);
-extern unsigned long node_getfirstfree(cnodeid_t);
-
-#endif /* __ASM_SN_SN_PRIVATE_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
index 6d24d4e8b9ed..451ba1ee41ad 100644
--- a/arch/mips/include/asm/sn/types.h
+++ b/arch/mips/include/asm/sn/types.h
@@ -11,15 +11,15 @@
#include <linux/types.h>
+#ifndef __ASSEMBLY__
+
typedef unsigned long cpuid_t;
-typedef unsigned long cnodemask_t;
typedef signed short nasid_t; /* node id in numa-as-id space */
-typedef signed short cnodeid_t; /* node id in compact-id space */
typedef signed char partid_t; /* partition ID type */
typedef signed short moduleid_t; /* user-visible module number type */
-typedef signed short cmoduleid_t; /* kernel compact module id type */
-typedef unsigned char clusterid_t; /* Clusterid of the cell */
typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */
+#endif
+
#endif /* _ASM_SN_TYPES_H */
diff --git a/arch/mips/include/asm/string.h b/arch/mips/include/asm/string.h
index 29030cb398ee..1de3bbce8e88 100644
--- a/arch/mips/include/asm/string.h
+++ b/arch/mips/include/asm/string.h
@@ -10,127 +10,6 @@
#ifndef _ASM_STRING_H
#define _ASM_STRING_H
-
-/*
- * Most of the inline functions are rather naive implementations so I just
- * didn't bother updating them for 64-bit ...
- */
-#ifdef CONFIG_32BIT
-
-#ifndef IN_STRING_C
-
-#define __HAVE_ARCH_STRCPY
-static __inline__ char *strcpy(char *__dest, __const__ char *__src)
-{
- char *__xdest = __dest;
-
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n"
- "1:\tlbu\t$1,(%1)\n\t"
- "addiu\t%1,1\n\t"
- "sb\t$1,(%0)\n\t"
- "bnez\t$1,1b\n\t"
- "addiu\t%0,1\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- : "=r" (__dest), "=r" (__src)
- : "0" (__dest), "1" (__src)
- : "memory");
-
- return __xdest;
-}
-
-#define __HAVE_ARCH_STRNCPY
-static __inline__ char *strncpy(char *__dest, __const__ char *__src, size_t __n)
-{
- char *__xdest = __dest;
-
- if (__n == 0)
- return __xdest;
-
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n"
- "1:\tlbu\t$1,(%1)\n\t"
- "subu\t%2,1\n\t"
- "sb\t$1,(%0)\n\t"
- "beqz\t$1,2f\n\t"
- "addiu\t%0,1\n\t"
- "bnez\t%2,1b\n\t"
- "addiu\t%1,1\n"
- "2:\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- : "=r" (__dest), "=r" (__src), "=r" (__n)
- : "0" (__dest), "1" (__src), "2" (__n)
- : "memory");
-
- return __xdest;
-}
-
-#define __HAVE_ARCH_STRCMP
-static __inline__ int strcmp(__const__ char *__cs, __const__ char *__ct)
-{
- int __res;
-
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n\t"
- "lbu\t%2,(%0)\n"
- "1:\tlbu\t$1,(%1)\n\t"
- "addiu\t%0,1\n\t"
- "bne\t$1,%2,2f\n\t"
- "addiu\t%1,1\n\t"
- "bnez\t%2,1b\n\t"
- "lbu\t%2,(%0)\n\t"
-#if defined(CONFIG_CPU_R3000)
- "nop\n\t"
-#endif
- "move\t%2,$1\n"
- "2:\tsubu\t%2,$1\n"
- "3:\t.set\tat\n\t"
- ".set\treorder"
- : "=r" (__cs), "=r" (__ct), "=r" (__res)
- : "0" (__cs), "1" (__ct));
-
- return __res;
-}
-
-#endif /* !defined(IN_STRING_C) */
-
-#define __HAVE_ARCH_STRNCMP
-static __inline__ int
-strncmp(__const__ char *__cs, __const__ char *__ct, size_t __count)
-{
- int __res;
-
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n"
- "1:\tlbu\t%3,(%0)\n\t"
- "beqz\t%2,2f\n\t"
- "lbu\t$1,(%1)\n\t"
- "subu\t%2,1\n\t"
- "bne\t$1,%3,3f\n\t"
- "addiu\t%0,1\n\t"
- "bnez\t%3,1b\n\t"
- "addiu\t%1,1\n"
- "2:\n\t"
-#if defined(CONFIG_CPU_R3000)
- "nop\n\t"
-#endif
- "move\t%3,$1\n"
- "3:\tsubu\t%3,$1\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- : "=r" (__cs), "=r" (__ct), "=r" (__count), "=r" (__res)
- : "0" (__cs), "1" (__ct), "2" (__count));
-
- return __res;
-}
-#endif /* CONFIG_32BIT */
-
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
diff --git a/arch/mips/include/asm/sync.h b/arch/mips/include/asm/sync.h
new file mode 100644
index 000000000000..7c6a1095f556
--- /dev/null
+++ b/arch/mips/include/asm/sync.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __MIPS_ASM_SYNC_H__
+#define __MIPS_ASM_SYNC_H__
+
+/*
+ * sync types are defined by the MIPS64 Instruction Set documentation in Volume
+ * II-A of the MIPS Architecture Reference Manual, which can be found here:
+ *
+ * https://www.mips.com/?do-download=the-mips64-instruction-set-v6-06
+ *
+ * Two types of barrier are provided:
+ *
+ * 1) Completion barriers, which ensure that a memory operation has actually
+ * completed & often involve stalling the CPU pipeline to do so.
+ *
+ * 2) Ordering barriers, which only ensure that affected memory operations
+ * won't be reordered in the CPU pipeline in a manner that violates the
+ * restrictions imposed by the barrier.
+ *
+ * Ordering barriers can be more efficient than completion barriers, since:
+ *
+ * a) Ordering barriers only require memory access instructions which preceed
+ * them in program order (older instructions) to reach a point in the
+ * load/store datapath beyond which reordering is not possible before
+ * allowing memory access instructions which follow them (younger
+ * instructions) to be performed. That is, older instructions don't
+ * actually need to complete - they just need to get far enough that all
+ * other coherent CPUs will observe their completion before they observe
+ * the effects of younger instructions.
+ *
+ * b) Multiple variants of ordering barrier are provided which allow the
+ * effects to be restricted to different combinations of older or younger
+ * loads or stores. By way of example, if we only care that stores older
+ * than a barrier are observed prior to stores that are younger than a
+ * barrier & don't care about the ordering of loads then the 'wmb'
+ * ordering barrier can be used. Limiting the barrier's effects to stores
+ * allows loads to continue unaffected & potentially allows the CPU to
+ * make progress faster than if younger loads had to wait for older stores
+ * to complete.
+ */
+
+/*
+ * No sync instruction at all; used to allow code to nullify the effect of the
+ * __SYNC() macro without needing lots of #ifdefery.
+ */
+#define __SYNC_none -1
+
+/*
+ * A full completion barrier; all memory accesses appearing prior to this sync
+ * instruction in program order must complete before any memory accesses
+ * appearing after this sync instruction in program order.
+ */
+#define __SYNC_full 0x00
+
+/*
+ * For now we use a full completion barrier to implement all sync types, until
+ * we're satisfied that lightweight ordering barriers defined by MIPSr6 are
+ * sufficient to uphold our desired memory model.
+ */
+#define __SYNC_aq __SYNC_full
+#define __SYNC_rl __SYNC_full
+#define __SYNC_mb __SYNC_full
+
+/*
+ * ...except on Cavium Octeon CPUs, which have been using the 'wmb' ordering
+ * barrier since 2010 & omit 'rmb' barriers because the CPUs don't perform
+ * speculative reads.
+ */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+# define __SYNC_rmb __SYNC_none
+# define __SYNC_wmb 0x04
+#else
+# define __SYNC_rmb __SYNC_full
+# define __SYNC_wmb __SYNC_full
+#endif
+
+/*
+ * A GINV sync is a little different; it doesn't relate directly to loads or
+ * stores, but instead causes synchronization of an icache or TLB global
+ * invalidation operation triggered by the ginvi or ginvt instructions
+ * respectively. In cases where we need to know that a ginvi or ginvt operation
+ * has been performed by all coherent CPUs, we must issue a sync instruction of
+ * this type. Once this instruction graduates all coherent CPUs will have
+ * observed the invalidation.
+ */
+#define __SYNC_ginv 0x14
+
+/* Trivial; indicate that we always need this sync instruction. */
+#define __SYNC_always (1 << 0)
+
+/*
+ * Indicate that we need this sync instruction only on systems with weakly
+ * ordered memory access. In general this is most MIPS systems, but there are
+ * exceptions which provide strongly ordered memory.
+ */
+#ifdef CONFIG_WEAK_ORDERING
+# define __SYNC_weak_ordering (1 << 1)
+#else
+# define __SYNC_weak_ordering 0
+#endif
+
+/*
+ * Indicate that we need this sync instruction only on systems where LL/SC
+ * don't implicitly provide a memory barrier. In general this is most MIPS
+ * systems.
+ */
+#ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
+# define __SYNC_weak_llsc (1 << 2)
+#else
+# define __SYNC_weak_llsc 0
+#endif
+
+/*
+ * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load,
+ * store or prefetch) in between an LL & SC can cause the SC instruction to
+ * erroneously succeed, breaking atomicity. Whilst it's unusual to write code
+ * containing such sequences, this bug bites harder than we might otherwise
+ * expect due to reordering & speculation:
+ *
+ * 1) A memory access appearing prior to the LL in program order may actually
+ * be executed after the LL - this is the reordering case.
+ *
+ * In order to avoid this we need to place a memory barrier (ie. a SYNC
+ * instruction) prior to every LL instruction, in between it and any earlier
+ * memory access instructions.
+ *
+ * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later.
+ *
+ * 2) If a conditional branch exists between an LL & SC with a target outside
+ * of the LL-SC loop, for example an exit upon value mismatch in cmpxchg()
+ * or similar, then misprediction of the branch may allow speculative
+ * execution of memory accesses from outside of the LL-SC loop.
+ *
+ * In order to avoid this we need a memory barrier (ie. a SYNC instruction)
+ * at each affected branch target.
+ *
+ * This case affects all current Loongson 3 CPUs.
+ *
+ * The above described cases cause an error in the cache coherence protocol;
+ * such that the Invalidate of a competing LL-SC goes 'missing' and SC
+ * erroneously observes its core still has Exclusive state and lets the SC
+ * proceed.
+ *
+ * Therefore the error only occurs on SMP systems.
+ */
+#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS
+# define __SYNC_loongson3_war (1 << 31)
+#else
+# define __SYNC_loongson3_war 0
+#endif
+
+/*
+ * Some Cavium Octeon CPUs suffer from a bug that causes a single wmb ordering
+ * barrier to be ineffective, requiring the use of 2 in sequence to provide an
+ * effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use
+ * optimized memory barrier primitives."). Here we specify that the affected
+ * sync instructions should be emitted twice.
+ */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+# define __SYNC_rpt(type) (1 + (type == __SYNC_wmb))
+#else
+# define __SYNC_rpt(type) 1
+#endif
+
+/*
+ * The main event. Here we actually emit a sync instruction of a given type, if
+ * reason is non-zero.
+ *
+ * In future we have the option of emitting entries in a fixups-style table
+ * here that would allow us to opportunistically remove some sync instructions
+ * when we detect at runtime that we're running on a CPU that doesn't need
+ * them.
+ */
+#ifdef CONFIG_CPU_HAS_SYNC
+# define ____SYNC(_type, _reason, _else) \
+ .if (( _type ) != -1) && ( _reason ); \
+ .set push; \
+ .set MIPS_ISA_LEVEL_RAW; \
+ .rept __SYNC_rpt(_type); \
+ sync _type; \
+ .endr; \
+ .set pop; \
+ .else; \
+ _else; \
+ .endif
+#else
+# define ____SYNC(_type, _reason, _else)
+#endif
+
+/*
+ * Preprocessor magic to expand macros used as arguments before we insert them
+ * into assembly code.
+ */
+#ifdef __ASSEMBLY__
+# define ___SYNC(type, reason, else) \
+ ____SYNC(type, reason, else)
+#else
+# define ___SYNC(type, reason, else) \
+ __stringify(____SYNC(type, reason, else))
+#endif
+
+#define __SYNC(type, reason) \
+ ___SYNC(__SYNC_##type, __SYNC_##reason, )
+#define __SYNC_ELSE(type, reason, else) \
+ ___SYNC(__SYNC_##type, __SYNC_##reason, else)
+
+#endif /* __MIPS_ASM_SYNC_H__ */
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 83bb439597d8..25fa651c937d 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -54,7 +54,7 @@ static inline void mips_syscall_update_nr(struct task_struct *task,
task_thread_info(task)->syscall = regs->regs[2];
}
-static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
+static inline void mips_get_syscall_arg(unsigned long *arg,
struct task_struct *task, struct pt_regs *regs, unsigned int n)
{
unsigned long usp __maybe_unused = regs->regs[29];
@@ -63,23 +63,24 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
case 0: case 1: case 2: case 3:
*arg = regs->regs[4 + n];
- return 0;
+ return;
#ifdef CONFIG_32BIT
case 4: case 5: case 6: case 7:
- return get_user(*arg, (int *)usp + n);
+ get_user(*arg, (int *)usp + n);
+ return;
#endif
#ifdef CONFIG_64BIT
case 4: case 5: case 6: case 7:
#ifdef CONFIG_MIPS32_O32
if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
- return get_user(*arg, (int *)usp + n);
+ get_user(*arg, (int *)usp + n);
else
#endif
*arg = regs->regs[4 + n];
- return 0;
+ return;
#endif
default:
@@ -126,21 +127,13 @@ static inline void syscall_get_arguments(struct task_struct *task,
{
unsigned int i = 0;
unsigned int n = 6;
- int ret;
/* O32 ABI syscall() */
if (mips_syscall_is_indirect(task, regs))
i++;
while (n--)
- ret |= mips_get_syscall_arg(args++, task, regs, i++);
-
- /*
- * No way to communicate an error because this is a void function.
- */
-#if 0
- return ret;
-#endif
+ mips_get_syscall_arg(args++, task, regs, i++);
}
extern const unsigned long sys_call_table[];
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 4993db40482c..ee26f9a4575d 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -49,8 +49,26 @@ struct thread_info {
.addr_limit = KERNEL_DS, \
}
-/* How to get the thread information struct from C. */
+/*
+ * A pointer to the struct thread_info for the currently executing thread is
+ * held in register $28/$gp.
+ *
+ * We declare __current_thread_info as a global register variable rather than a
+ * local register variable within current_thread_info() because clang doesn't
+ * support explicit local register variables.
+ *
+ * When building the VDSO we take care not to declare the global register
+ * variable because this causes GCC to not preserve the value of $28/$gp in
+ * functions that change its value (which is common in the PIC VDSO when
+ * accessing the GOT). Since the VDSO shouldn't be accessing
+ * __current_thread_info anyway we declare it extern in order to cause a link
+ * failure if it's referenced.
+ */
+#ifdef __VDSO__
+extern struct thread_info *__current_thread_info;
+#else
register struct thread_info *__current_thread_info __asm__("$28");
+#endif
static inline struct thread_info *current_thread_info(void)
{
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 071053ece677..5d70babfc9ee 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -52,6 +52,7 @@
# endif
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
/* whitelists for checksyscalls */
#define __IGNORE_fadvise64_64
diff --git a/arch/mips/include/asm/unroll.h b/arch/mips/include/asm/unroll.h
new file mode 100644
index 000000000000..c628747d4ecd
--- /dev/null
+++ b/arch/mips/include/asm/unroll.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_UNROLL_H__
+#define __ASM_UNROLL_H__
+
+/*
+ * Explicitly unroll a loop, for use in cases where doing so is performance
+ * critical.
+ *
+ * Ideally we'd rely upon the compiler to provide this but there's no commonly
+ * available means to do so. For example GCC's "#pragma GCC unroll"
+ * functionality would be ideal but is only available from GCC 8 onwards. Using
+ * -funroll-loops is an option but GCC tends to make poor choices when
+ * compiling our string functions. -funroll-all-loops leads to massive code
+ * bloat, even if only applied to the string functions.
+ */
+#define unroll(times, fn, ...) do { \
+ extern void bad_unroll(void) \
+ __compiletime_error("Unsupported unroll"); \
+ \
+ /* \
+ * We can't unroll if the number of iterations isn't \
+ * compile-time constant. Unfortunately GCC versions \
+ * up until 4.6 tend to miss obvious constants & cause \
+ * this check to fail, even though they go on to \
+ * generate reasonable code for the switch statement, \
+ * so we skip the sanity check for those compilers. \
+ */ \
+ BUILD_BUG_ON((CONFIG_GCC_VERSION >= 40700 || \
+ CONFIG_CLANG_VERSION >= 80000) && \
+ !__builtin_constant_p(times)); \
+ \
+ switch (times) { \
+ case 32: fn(__VA_ARGS__); /* fall through */ \
+ case 31: fn(__VA_ARGS__); /* fall through */ \
+ case 30: fn(__VA_ARGS__); /* fall through */ \
+ case 29: fn(__VA_ARGS__); /* fall through */ \
+ case 28: fn(__VA_ARGS__); /* fall through */ \
+ case 27: fn(__VA_ARGS__); /* fall through */ \
+ case 26: fn(__VA_ARGS__); /* fall through */ \
+ case 25: fn(__VA_ARGS__); /* fall through */ \
+ case 24: fn(__VA_ARGS__); /* fall through */ \
+ case 23: fn(__VA_ARGS__); /* fall through */ \
+ case 22: fn(__VA_ARGS__); /* fall through */ \
+ case 21: fn(__VA_ARGS__); /* fall through */ \
+ case 20: fn(__VA_ARGS__); /* fall through */ \
+ case 19: fn(__VA_ARGS__); /* fall through */ \
+ case 18: fn(__VA_ARGS__); /* fall through */ \
+ case 17: fn(__VA_ARGS__); /* fall through */ \
+ case 16: fn(__VA_ARGS__); /* fall through */ \
+ case 15: fn(__VA_ARGS__); /* fall through */ \
+ case 14: fn(__VA_ARGS__); /* fall through */ \
+ case 13: fn(__VA_ARGS__); /* fall through */ \
+ case 12: fn(__VA_ARGS__); /* fall through */ \
+ case 11: fn(__VA_ARGS__); /* fall through */ \
+ case 10: fn(__VA_ARGS__); /* fall through */ \
+ case 9: fn(__VA_ARGS__); /* fall through */ \
+ case 8: fn(__VA_ARGS__); /* fall through */ \
+ case 7: fn(__VA_ARGS__); /* fall through */ \
+ case 6: fn(__VA_ARGS__); /* fall through */ \
+ case 5: fn(__VA_ARGS__); /* fall through */ \
+ case 4: fn(__VA_ARGS__); /* fall through */ \
+ case 3: fn(__VA_ARGS__); /* fall through */ \
+ case 2: fn(__VA_ARGS__); /* fall through */ \
+ case 1: fn(__VA_ARGS__); /* fall through */ \
+ case 0: break; \
+ \
+ default: \
+ /* \
+ * Either the iteration count is unreasonable \
+ * or we need to add more cases above. \
+ */ \
+ bad_unroll(); \
+ break; \
+ } \
+} while (0)
+
+#endif /* __ASM_UNROLL_H__ */
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h
index a013fa4a3682..cc7b516129a8 100644
--- a/arch/mips/include/asm/vdso.h
+++ b/arch/mips/include/asm/vdso.h
@@ -8,6 +8,7 @@
#define __ASM_VDSO_H
#include <linux/mm_types.h>
+#include <vdso/datapage.h>
#include <asm/barrier.h>
@@ -49,84 +50,9 @@ extern struct mips_vdso_image vdso_image_o32;
extern struct mips_vdso_image vdso_image_n32;
#endif
-/**
- * union mips_vdso_data - Data provided by the kernel for the VDSO.
- * @xtime_sec: Current real time (seconds part).
- * @xtime_nsec: Current real time (nanoseconds part, shifted).
- * @wall_to_mono_sec: Wall-to-monotonic offset (seconds part).
- * @wall_to_mono_nsec: Wall-to-monotonic offset (nanoseconds part).
- * @seq_count: Counter to synchronise updates (odd = updating).
- * @cs_shift: Clocksource shift value.
- * @clock_mode: Clocksource to use for time functions.
- * @cs_mult: Clocksource multiplier value.
- * @cs_cycle_last: Clock cycle value at last update.
- * @cs_mask: Clocksource mask value.
- * @tz_minuteswest: Minutes west of Greenwich (from timezone).
- * @tz_dsttime: Type of DST correction (from timezone).
- *
- * This structure contains data needed by functions within the VDSO. It is
- * populated by the kernel and mapped read-only into user memory. The time
- * fields are mirrors of internal data from the timekeeping infrastructure.
- *
- * Note: Care should be taken when modifying as the layout must remain the same
- * for both 64- and 32-bit (for 32-bit userland on 64-bit kernel).
- */
union mips_vdso_data {
- struct {
- u64 xtime_sec;
- u64 xtime_nsec;
- u64 wall_to_mono_sec;
- u64 wall_to_mono_nsec;
- u32 seq_count;
- u32 cs_shift;
- u8 clock_mode;
- u32 cs_mult;
- u64 cs_cycle_last;
- u64 cs_mask;
- s32 tz_minuteswest;
- s32 tz_dsttime;
- };
-
+ struct vdso_data data[CS_BASES];
u8 page[PAGE_SIZE];
};
-static inline u32 vdso_data_read_begin(const union mips_vdso_data *data)
-{
- u32 seq;
-
- while (true) {
- seq = READ_ONCE(data->seq_count);
- if (likely(!(seq & 1))) {
- /* Paired with smp_wmb() in vdso_data_write_*(). */
- smp_rmb();
- return seq;
- }
-
- cpu_relax();
- }
-}
-
-static inline bool vdso_data_read_retry(const union mips_vdso_data *data,
- u32 start_seq)
-{
- /* Paired with smp_wmb() in vdso_data_write_*(). */
- smp_rmb();
- return unlikely(data->seq_count != start_seq);
-}
-
-static inline void vdso_data_write_begin(union mips_vdso_data *data)
-{
- ++data->seq_count;
-
- /* Ensure sequence update is written before other data page values. */
- smp_wmb();
-}
-
-static inline void vdso_data_write_end(union mips_vdso_data *data)
-{
- /* Ensure data values are written before updating sequence again. */
- smp_wmb();
- ++data->seq_count;
-}
-
#endif /* __ASM_VDSO_H */
diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..a58687e26c5d
--- /dev/null
+++ b/arch/mips/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2018 ARM Limited
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+#include <linux/time.h>
+
+#include <asm/vdso/vdso.h>
+#include <asm/clocksource.h>
+#include <asm/io.h>
+#include <asm/unistd.h>
+#include <asm/vdso.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+#define __VDSO_USE_SYSCALL ULLONG_MAX
+
+static __always_inline long gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct timezone *tz asm("a1") = _tz;
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register long ret asm("v0");
+ register long nr asm("v0") = __NR_gettimeofday;
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (tv), "r" (tz), "r" (nr)
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+
+ return error ? -ret : ret;
+}
+
+static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register clockid_t clkid asm("a0") = _clkid;
+ register long ret asm("v0");
+#if _MIPS_SIM == _MIPS_SIM_ABI64
+ register long nr asm("v0") = __NR_clock_gettime;
+#else
+ register long nr asm("v0") = __NR_clock_gettime64;
+#endif
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+
+ return error ? -ret : ret;
+}
+
+static __always_inline int clock_getres_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+{
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register clockid_t clkid asm("a0") = _clkid;
+ register long ret asm("v0");
+#if _MIPS_SIM == _MIPS_SIM_ABI64
+ register long nr asm("v0") = __NR_clock_getres;
+#else
+ register long nr asm("v0") = __NR_clock_getres_time64;
+#endif
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+
+ return error ? -ret : ret;
+}
+
+#if _MIPS_SIM != _MIPS_SIM_ABI64
+
+static __always_inline long clock_gettime32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("a1") = _ts;
+ register clockid_t clkid asm("a0") = _clkid;
+ register long ret asm("v0");
+ register long nr asm("v0") = __NR_clock_gettime;
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+
+ return error ? -ret : ret;
+}
+
+static __always_inline int clock_getres32_fallback(
+ clockid_t _clkid,
+ struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("a1") = _ts;
+ register clockid_t clkid asm("a0") = _clkid;
+ register long ret asm("v0");
+ register long nr asm("v0") = __NR_clock_getres;
+ register long error asm("a3");
+
+ asm volatile(
+ " syscall\n"
+ : "=r" (ret), "=r" (error)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
+ "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+
+ return error ? -ret : ret;
+}
+#endif
+
+#ifdef CONFIG_CSRC_R4K
+
+static __always_inline u64 read_r4k_count(void)
+{
+ unsigned int count;
+
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set mips32r2\n"
+ " rdhwr %0, $2\n"
+ " .set pop\n"
+ : "=r" (count));
+
+ return count;
+}
+
+#endif
+
+#ifdef CONFIG_CLKSRC_MIPS_GIC
+
+static __always_inline u64 read_gic_count(const struct vdso_data *data)
+{
+ void __iomem *gic = get_gic(data);
+ u32 hi, hi2, lo;
+
+ do {
+ hi = __raw_readl(gic + sizeof(lo));
+ lo = __raw_readl(gic);
+ hi2 = __raw_readl(gic + sizeof(lo));
+ } while (hi2 != hi);
+
+ return (((u64)hi) << 32) + lo;
+}
+
+#endif
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
+{
+#ifdef CONFIG_CLKSRC_MIPS_GIC
+ const struct vdso_data *data = get_vdso_data();
+#endif
+ u64 cycle_now;
+
+ switch (clock_mode) {
+#ifdef CONFIG_CSRC_R4K
+ case VDSO_CLOCK_R4K:
+ cycle_now = read_r4k_count();
+ break;
+#endif
+#ifdef CONFIG_CLKSRC_MIPS_GIC
+ case VDSO_CLOCK_GIC:
+ cycle_now = read_gic_count(data);
+ break;
+#endif
+ default:
+ cycle_now = __VDSO_USE_SYSCALL;
+ break;
+ }
+
+ return cycle_now;
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return get_vdso_data();
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/mips/include/asm/vdso/vdso.h b/arch/mips/include/asm/vdso/vdso.h
new file mode 100644
index 000000000000..737ddfc3411c
--- /dev/null
+++ b/arch/mips/include/asm/vdso/vdso.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <asm/sgidefs.h>
+
+#ifndef __ASSEMBLY__
+
+#include <asm/asm.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+static inline unsigned long get_vdso_base(void)
+{
+ unsigned long addr;
+
+ /*
+ * We can't use cpu_has_mips_r6 since it needs the cpu_data[]
+ * kernel symbol.
+ */
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * lapc <symbol> is an alias to addiupc reg, <symbol> - .
+ *
+ * We can't use addiupc because there is no label-label
+ * support for the addiupc reloc
+ */
+ __asm__("lapc %0, _start \n"
+ : "=r" (addr) : :);
+#else
+ /*
+ * Get the base load address of the VDSO. We have to avoid generating
+ * relocations and references to the GOT because ld.so does not peform
+ * relocations on the VDSO. We use the current offset from the VDSO base
+ * and perform a PC-relative branch which gives the absolute address in
+ * ra, and take the difference. The assembler chokes on
+ * "li %0, _start - .", so embed the offset as a word and branch over
+ * it.
+ *
+ */
+
+ __asm__(
+ " .set push \n"
+ " .set noreorder \n"
+ " bal 1f \n"
+ " nop \n"
+ " .word _start - . \n"
+ "1: lw %0, 0($31) \n"
+ " " STR(PTR_ADDU) " %0, $31, %0 \n"
+ " .set pop \n"
+ : "=r" (addr)
+ :
+ : "$31");
+#endif /* CONFIG_CPU_MIPSR6 */
+
+ return addr;
+}
+
+static inline const struct vdso_data *get_vdso_data(void)
+{
+ return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE);
+}
+
+#ifdef CONFIG_CLKSRC_MIPS_GIC
+
+static inline void __iomem *get_gic(const struct vdso_data *data)
+{
+ return (void __iomem *)data - PAGE_SIZE;
+}
+
+#endif /* CONFIG_CLKSRC_MIPS_GIC */
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/mips/include/asm/vdso/vsyscall.h b/arch/mips/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..00d41b94ba31
--- /dev/null
+++ b/arch/mips/include/asm/vdso/vsyscall.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline
+struct vdso_data *__mips_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+#define __arch_get_k_vdso_data __mips_get_k_vdso_data
+
+static __always_inline
+int __mips_get_clock_mode(struct timekeeper *tk)
+{
+ u32 clock_mode = tk->tkr_mono.clock->archdata.vdso_clock_mode;
+
+ return clock_mode;
+}
+#define __arch_get_clock_mode __mips_get_clock_mode
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/mips/include/asm/vmalloc.h b/arch/mips/include/asm/vmalloc.h
new file mode 100644
index 000000000000..25dc09b25eaf
--- /dev/null
+++ b/arch/mips/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_MIPS_VMALLOC_H
+#define _ASM_MIPS_VMALLOC_H
+
+#endif /* _ASM_MIPS_VMALLOC_H */
diff --git a/arch/mips/include/asm/war.h b/arch/mips/include/asm/war.h
index 9344e247a6c8..1eedd596a064 100644
--- a/arch/mips/include/asm/war.h
+++ b/arch/mips/include/asm/war.h
@@ -129,19 +129,6 @@
#endif
/*
- * When an interrupt happens on a CP0 register read instruction, CPU may
- * lock up or read corrupted values of CP0 registers after it enters
- * the exception handler.
- *
- * This workaround makes sure that we read a "safe" CP0 register as the
- * first thing in the exception handler, which breaks one of the
- * pre-conditions for this problem.
- */
-#ifndef R5432_CP0_INTERRUPT_WAR
-#error Check setting of R5432_CP0_INTERRUPT_WAR for your platform
-#endif
-
-/*
* Workaround for the Sibyte M3 errata the text of which can be found at
*
* http://sibyte.broadcom.com/hw/bcm1250/docs/pass2errata.txt
OpenPOWER on IntegriCloud