summaryrefslogtreecommitdiffstats
path: root/arch/m68k/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/include')
-rw-r--r--arch/m68k/include/asm/atari_stram.h3
-rw-r--r--arch/m68k/include/asm/atarihw.h4
-rw-r--r--arch/m68k/include/asm/atomic.h13
-rw-r--r--arch/m68k/include/asm/bitops.h531
-rw-r--r--arch/m68k/include/asm/bitops_mm.h501
-rw-r--r--arch/m68k/include/asm/bitops_no.h333
-rw-r--r--arch/m68k/include/asm/delay.h97
-rw-r--r--arch/m68k/include/asm/delay_mm.h57
-rw-r--r--arch/m68k/include/asm/delay_no.h76
-rw-r--r--arch/m68k/include/asm/entry.h255
-rw-r--r--arch/m68k/include/asm/entry_mm.h128
-rw-r--r--arch/m68k/include/asm/entry_no.h181
-rw-r--r--arch/m68k/include/asm/hardirq.h40
-rw-r--r--arch/m68k/include/asm/hardirq_mm.h16
-rw-r--r--arch/m68k/include/asm/hardirq_no.h19
-rw-r--r--arch/m68k/include/asm/irq.h80
-rw-r--r--arch/m68k/include/asm/m520xsim.h26
-rw-r--r--arch/m68k/include/asm/machdep.h1
-rw-r--r--arch/m68k/include/asm/macintosh.h2
-rw-r--r--arch/m68k/include/asm/mcfqspi.h8
-rw-r--r--arch/m68k/include/asm/module.h31
-rw-r--r--arch/m68k/include/asm/page_mm.h2
-rw-r--r--arch/m68k/include/asm/page_no.h3
-rw-r--r--arch/m68k/include/asm/posix_types.h2
-rw-r--r--arch/m68k/include/asm/processor.h10
-rw-r--r--arch/m68k/include/asm/ptrace.h1
-rw-r--r--arch/m68k/include/asm/q40ints.h3
-rw-r--r--arch/m68k/include/asm/sections.h2
-rw-r--r--arch/m68k/include/asm/signal.h15
-rw-r--r--arch/m68k/include/asm/system.h194
-rw-r--r--arch/m68k/include/asm/system_mm.h193
-rw-r--r--arch/m68k/include/asm/system_no.h153
-rw-r--r--arch/m68k/include/asm/traps.h1
33 files changed, 1179 insertions, 1802 deletions
diff --git a/arch/m68k/include/asm/atari_stram.h b/arch/m68k/include/asm/atari_stram.h
index 7546d13963be..62e27598af91 100644
--- a/arch/m68k/include/asm/atari_stram.h
+++ b/arch/m68k/include/asm/atari_stram.h
@@ -6,12 +6,11 @@
*/
/* public interface */
-void *atari_stram_alloc(long size, const char *owner);
+void *atari_stram_alloc(unsigned long size, const char *owner);
void atari_stram_free(void *);
/* functions called internally by other parts of the kernel */
void atari_stram_init(void);
void atari_stram_reserve_pages(void *start_mem);
-void atari_stram_mem_init_hook (void);
#endif /*_M68K_ATARI_STRAM_H */
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index f51f709bbf30..0392b28656ab 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -399,8 +399,8 @@ struct CODEC
#define CODEC_OVERFLOW_LEFT 2
u_char unused2, unused3, unused4, unused5;
u_char gpio_directions;
-#define GPIO_IN 0
-#define GPIO_OUT 1
+#define CODEC_GPIO_IN 0
+#define CODEC_GPIO_OUT 1
u_char unused6;
u_char gpio_data;
};
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 03ae3d14cd4a..65c6be6c8180 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -169,21 +169,21 @@ static inline int atomic_add_negative(int i, atomic_t *v)
char c;
__asm__ __volatile__("addl %2,%1; smi %0"
: "=d" (c), "+m" (*v)
- : "id" (i));
+ : ASM_DI (i));
return c != 0;
}
static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
{
- __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
+ __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
}
static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
{
- __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
+ __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
}
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
@@ -195,10 +195,9 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
break;
c = old;
}
- return c != (u);
+ return c;
}
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
@@ -206,6 +205,4 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
-#include <asm-generic/atomic-long.h>
-#include <asm-generic/atomic64.h>
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index ce163abddaba..c6baa913592a 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -1,5 +1,530 @@
-#ifdef __uClinux__
-#include "bitops_no.h"
+#ifndef _M68K_BITOPS_H
+#define _M68K_BITOPS_H
+/*
+ * Copyright 1992, Linus Torvalds.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <linux/compiler.h>
+
+/*
+ * Bit access functions vary across the ColdFire and 68k families.
+ * So we will break them out here, and then macro in the ones we want.
+ *
+ * ColdFire - supports standard bset/bclr/bchg with register operand only
+ * 68000 - supports standard bset/bclr/bchg with memory operand
+ * >= 68020 - also supports the bfset/bfclr/bfchg instructions
+ *
+ * Although it is possible to use only the bset/bclr/bchg with register
+ * operands on all platforms you end up with larger generated code.
+ * So we use the best form possible on a given platform.
+ */
+
+static inline void bset_reg_set_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bset %1,(%0)"
+ :
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+}
+
+static inline void bset_mem_set_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bset %1,%0"
+ : "+m" (*p)
+ : "di" (nr & 7));
+}
+
+static inline void bfset_mem_set_bit(int nr, volatile unsigned long *vaddr)
+{
+ __asm__ __volatile__ ("bfset %1{%0:#1}"
+ :
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define set_bit(nr, vaddr) bset_reg_set_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define set_bit(nr, vaddr) bset_mem_set_bit(nr, vaddr)
+#else
+#define set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bset_mem_set_bit(nr, vaddr) : \
+ bfset_mem_set_bit(nr, vaddr))
+#endif
+
+#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
+
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+static inline void bclr_reg_clear_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bclr %1,(%0)"
+ :
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+}
+
+static inline void bclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bclr %1,%0"
+ : "+m" (*p)
+ : "di" (nr & 7));
+}
+
+static inline void bfclr_mem_clear_bit(int nr, volatile unsigned long *vaddr)
+{
+ __asm__ __volatile__ ("bfclr %1{%0:#1}"
+ :
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define clear_bit(nr, vaddr) bclr_reg_clear_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define clear_bit(nr, vaddr) bclr_mem_clear_bit(nr, vaddr)
+#else
+#define clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bclr_mem_clear_bit(nr, vaddr) : \
+ bfclr_mem_clear_bit(nr, vaddr))
+#endif
+
+#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
+
+
+static inline void bchg_reg_change_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bchg %1,(%0)"
+ :
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+}
+
+static inline void bchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+
+ __asm__ __volatile__ ("bchg %1,%0"
+ : "+m" (*p)
+ : "di" (nr & 7));
+}
+
+static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr)
+{
+ __asm__ __volatile__ ("bfchg %1{%0:#1}"
+ :
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define change_bit(nr, vaddr) bchg_reg_change_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define change_bit(nr, vaddr) bchg_mem_change_bit(nr, vaddr)
+#else
+#define change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bchg_mem_change_bit(nr, vaddr) : \
+ bfchg_mem_change_bit(nr, vaddr))
+#endif
+
+#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
+
+
+static inline int test_bit(int nr, const unsigned long *vaddr)
+{
+ return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
+}
+
+
+static inline int bset_reg_test_and_set_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bset %2,(%1); sne %0"
+ : "=d" (retval)
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+ return retval;
+}
+
+static inline int bset_mem_test_and_set_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bset %2,%1; sne %0"
+ : "=d" (retval), "+m" (*p)
+ : "di" (nr & 7));
+ return retval;
+}
+
+static inline int bfset_mem_test_and_set_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char retval;
+
+ __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
+ : "=d" (retval)
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+ return retval;
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define test_and_set_bit(nr, vaddr) bset_reg_test_and_set_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define test_and_set_bit(nr, vaddr) bset_mem_test_and_set_bit(nr, vaddr)
+#else
+#define test_and_set_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bset_mem_test_and_set_bit(nr, vaddr) : \
+ bfset_mem_test_and_set_bit(nr, vaddr))
+#endif
+
+#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
+
+
+static inline int bclr_reg_test_and_clear_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bclr %2,(%1); sne %0"
+ : "=d" (retval)
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+ return retval;
+}
+
+static inline int bclr_mem_test_and_clear_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bclr %2,%1; sne %0"
+ : "=d" (retval), "+m" (*p)
+ : "di" (nr & 7));
+ return retval;
+}
+
+static inline int bfclr_mem_test_and_clear_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char retval;
+
+ __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
+ : "=d" (retval)
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+ return retval;
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define test_and_clear_bit(nr, vaddr) bclr_reg_test_and_clear_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define test_and_clear_bit(nr, vaddr) bclr_mem_test_and_clear_bit(nr, vaddr)
+#else
+#define test_and_clear_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bclr_mem_test_and_clear_bit(nr, vaddr) : \
+ bfclr_mem_test_and_clear_bit(nr, vaddr))
+#endif
+
+#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
+
+
+static inline int bchg_reg_test_and_change_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bchg %2,(%1); sne %0"
+ : "=d" (retval)
+ : "a" (p), "di" (nr & 7)
+ : "memory");
+ return retval;
+}
+
+static inline int bchg_mem_test_and_change_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char *p = (char *)vaddr + (nr ^ 31) / 8;
+ char retval;
+
+ __asm__ __volatile__ ("bchg %2,%1; sne %0"
+ : "=d" (retval), "+m" (*p)
+ : "di" (nr & 7));
+ return retval;
+}
+
+static inline int bfchg_mem_test_and_change_bit(int nr,
+ volatile unsigned long *vaddr)
+{
+ char retval;
+
+ __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
+ : "=d" (retval)
+ : "d" (nr ^ 31), "o" (*vaddr)
+ : "memory");
+ return retval;
+}
+
+#if defined(CONFIG_COLDFIRE)
+#define test_and_change_bit(nr, vaddr) bchg_reg_test_and_change_bit(nr, vaddr)
+#elif defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#define test_and_change_bit(nr, vaddr) bchg_mem_test_and_change_bit(nr, vaddr)
+#else
+#define test_and_change_bit(nr, vaddr) (__builtin_constant_p(nr) ? \
+ bchg_mem_test_and_change_bit(nr, vaddr) : \
+ bfchg_mem_test_and_change_bit(nr, vaddr))
+#endif
+
+#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
+
+
+/*
+ * The true 68020 and more advanced processors support the "bfffo"
+ * instruction for finding bits. ColdFire and simple 68000 parts
+ * (including CPU32) do not support this. They simply use the generic
+ * functions.
+ */
+#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/ffz.h>
+#else
+
+static inline int find_first_zero_bit(const unsigned long *vaddr,
+ unsigned size)
+{
+ const unsigned long *p = vaddr;
+ int res = 32;
+ unsigned int words;
+ unsigned long num;
+
+ if (!size)
+ return 0;
+
+ words = (size + 31) >> 5;
+ while (!(num = ~*p++)) {
+ if (!--words)
+ goto out;
+ }
+
+ __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
+ : "=d" (res) : "d" (num & -num));
+ res ^= 31;
+out:
+ res += ((long)p - (long)vaddr - 4) * 8;
+ return res < size ? res : size;
+}
+#define find_first_zero_bit find_first_zero_bit
+
+static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
+ int offset)
+{
+ const unsigned long *p = vaddr + (offset >> 5);
+ int bit = offset & 31UL, res;
+
+ if (offset >= size)
+ return size;
+
+ if (bit) {
+ unsigned long num = ~*p++ & (~0UL << bit);
+ offset -= bit;
+
+ /* Look for zero in first longword */
+ __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
+ : "=d" (res) : "d" (num & -num));
+ if (res < 32) {
+ offset += res ^ 31;
+ return offset < size ? offset : size;
+ }
+ offset += 32;
+
+ if (offset >= size)
+ return size;
+ }
+ /* No zero yet, search remaining full bytes for a zero */
+ return offset + find_first_zero_bit(p, size - offset);
+}
+#define find_next_zero_bit find_next_zero_bit
+
+static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
+{
+ const unsigned long *p = vaddr;
+ int res = 32;
+ unsigned int words;
+ unsigned long num;
+
+ if (!size)
+ return 0;
+
+ words = (size + 31) >> 5;
+ while (!(num = *p++)) {
+ if (!--words)
+ goto out;
+ }
+
+ __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
+ : "=d" (res) : "d" (num & -num));
+ res ^= 31;
+out:
+ res += ((long)p - (long)vaddr - 4) * 8;
+ return res < size ? res : size;
+}
+#define find_first_bit find_first_bit
+
+static inline int find_next_bit(const unsigned long *vaddr, int size,
+ int offset)
+{
+ const unsigned long *p = vaddr + (offset >> 5);
+ int bit = offset & 31UL, res;
+
+ if (offset >= size)
+ return size;
+
+ if (bit) {
+ unsigned long num = *p++ & (~0UL << bit);
+ offset -= bit;
+
+ /* Look for one in first longword */
+ __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
+ : "=d" (res) : "d" (num & -num));
+ if (res < 32) {
+ offset += res ^ 31;
+ return offset < size ? offset : size;
+ }
+ offset += 32;
+
+ if (offset >= size)
+ return size;
+ }
+ /* No one yet, search remaining full bytes for a one */
+ return offset + find_first_bit(p, size - offset);
+}
+#define find_next_bit find_next_bit
+
+/*
+ * ffz = Find First Zero in word. Undefined if no zero exists,
+ * so code should check against ~0UL first..
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+ int res;
+
+ __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
+ : "=d" (res) : "d" (~word & -~word));
+ return res ^ 31;
+}
+
+#endif
+
+#ifdef __KERNEL__
+
+#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
+
+/*
+ * The newer ColdFire family members support a "bitrev" instruction
+ * and we can use that to implement a fast ffs. Older Coldfire parts,
+ * and normal 68000 parts don't have anything special, so we use the
+ * generic functions for those.
+ */
+#if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
+ !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
+static inline int __ffs(int x)
+{
+ __asm__ __volatile__ ("bitrev %0; ff1 %0"
+ : "=d" (x)
+ : "0" (x));
+ return x;
+}
+
+static inline int ffs(int x)
+{
+ if (!x)
+ return 0;
+ return __ffs(x) + 1;
+}
+
+#else
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/__ffs.h>
+#endif
+
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+
#else
-#include "bitops_mm.h"
+
+/*
+ * ffs: find first bit set. This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static inline int ffs(int x)
+{
+ int cnt;
+
+ __asm__ ("bfffo %1{#0:#0},%0"
+ : "=d" (cnt)
+ : "dm" (x & -x));
+ return 32 - cnt;
+}
+#define __ffs(x) (ffs(x) - 1)
+
+/*
+ * fls: find last bit set.
+ */
+static inline int fls(int x)
+{
+ int cnt;
+
+ __asm__ ("bfffo %1{#0,#0},%0"
+ : "=d" (cnt)
+ : "dm" (x));
+ return 32 - cnt;
+}
+
+static inline int __fls(int x)
+{
+ return fls(x) - 1;
+}
+
#endif
+
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+#endif /* __KERNEL__ */
+
+#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h
deleted file mode 100644
index 89cf5b814a4d..000000000000
--- a/arch/m68k/include/asm/bitops_mm.h
+++ /dev/null
@@ -1,501 +0,0 @@
-#ifndef _M68K_BITOPS_H
-#define _M68K_BITOPS_H
-/*
- * Copyright 1992, Linus Torvalds.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#ifndef _LINUX_BITOPS_H
-#error only <linux/bitops.h> can be included directly
-#endif
-
-#include <linux/compiler.h>
-
-/*
- * Require 68020 or better.
- *
- * They use the standard big-endian m680x0 bit ordering.
- */
-
-#define test_and_set_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_test_and_set_bit(nr, vaddr) : \
- __generic_test_and_set_bit(nr, vaddr))
-
-#define __test_and_set_bit(nr,vaddr) test_and_set_bit(nr,vaddr)
-
-static inline int __constant_test_and_set_bit(int nr, unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- char retval;
-
- __asm__ __volatile__ ("bset %2,%1; sne %0"
- : "=d" (retval), "+m" (*p)
- : "di" (nr & 7));
-
- return retval;
-}
-
-static inline int __generic_test_and_set_bit(int nr, unsigned long *vaddr)
-{
- char retval;
-
- __asm__ __volatile__ ("bfset %2{%1:#1}; sne %0"
- : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
-
- return retval;
-}
-
-#define set_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_set_bit(nr, vaddr) : \
- __generic_set_bit(nr, vaddr))
-
-#define __set_bit(nr,vaddr) set_bit(nr,vaddr)
-
-static inline void __constant_set_bit(int nr, volatile unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- __asm__ __volatile__ ("bset %1,%0"
- : "+m" (*p) : "di" (nr & 7));
-}
-
-static inline void __generic_set_bit(int nr, volatile unsigned long *vaddr)
-{
- __asm__ __volatile__ ("bfset %1{%0:#1}"
- : : "d" (nr^31), "o" (*vaddr) : "memory");
-}
-
-#define test_and_clear_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_test_and_clear_bit(nr, vaddr) : \
- __generic_test_and_clear_bit(nr, vaddr))
-
-#define __test_and_clear_bit(nr,vaddr) test_and_clear_bit(nr,vaddr)
-
-static inline int __constant_test_and_clear_bit(int nr, unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- char retval;
-
- __asm__ __volatile__ ("bclr %2,%1; sne %0"
- : "=d" (retval), "+m" (*p)
- : "di" (nr & 7));
-
- return retval;
-}
-
-static inline int __generic_test_and_clear_bit(int nr, unsigned long *vaddr)
-{
- char retval;
-
- __asm__ __volatile__ ("bfclr %2{%1:#1}; sne %0"
- : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
-
- return retval;
-}
-
-/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
-#define clear_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_clear_bit(nr, vaddr) : \
- __generic_clear_bit(nr, vaddr))
-#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
-
-static inline void __constant_clear_bit(int nr, volatile unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- __asm__ __volatile__ ("bclr %1,%0"
- : "+m" (*p) : "di" (nr & 7));
-}
-
-static inline void __generic_clear_bit(int nr, volatile unsigned long *vaddr)
-{
- __asm__ __volatile__ ("bfclr %1{%0:#1}"
- : : "d" (nr^31), "o" (*vaddr) : "memory");
-}
-
-#define test_and_change_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_test_and_change_bit(nr, vaddr) : \
- __generic_test_and_change_bit(nr, vaddr))
-
-#define __test_and_change_bit(nr,vaddr) test_and_change_bit(nr,vaddr)
-#define __change_bit(nr,vaddr) change_bit(nr,vaddr)
-
-static inline int __constant_test_and_change_bit(int nr, unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- char retval;
-
- __asm__ __volatile__ ("bchg %2,%1; sne %0"
- : "=d" (retval), "+m" (*p)
- : "di" (nr & 7));
-
- return retval;
-}
-
-static inline int __generic_test_and_change_bit(int nr, unsigned long *vaddr)
-{
- char retval;
-
- __asm__ __volatile__ ("bfchg %2{%1:#1}; sne %0"
- : "=d" (retval) : "d" (nr^31), "o" (*vaddr) : "memory");
-
- return retval;
-}
-
-#define change_bit(nr,vaddr) \
- (__builtin_constant_p(nr) ? \
- __constant_change_bit(nr, vaddr) : \
- __generic_change_bit(nr, vaddr))
-
-static inline void __constant_change_bit(int nr, unsigned long *vaddr)
-{
- char *p = (char *)vaddr + (nr ^ 31) / 8;
- __asm__ __volatile__ ("bchg %1,%0"
- : "+m" (*p) : "di" (nr & 7));
-}
-
-static inline void __generic_change_bit(int nr, unsigned long *vaddr)
-{
- __asm__ __volatile__ ("bfchg %1{%0:#1}"
- : : "d" (nr^31), "o" (*vaddr) : "memory");
-}
-
-static inline int test_bit(int nr, const unsigned long *vaddr)
-{
- return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
-}
-
-static inline int find_first_zero_bit(const unsigned long *vaddr,
- unsigned size)
-{
- const unsigned long *p = vaddr;
- int res = 32;
- unsigned int words;
- unsigned long num;
-
- if (!size)
- return 0;
-
- words = (size + 31) >> 5;
- while (!(num = ~*p++)) {
- if (!--words)
- goto out;
- }
-
- __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- : "=d" (res) : "d" (num & -num));
- res ^= 31;
-out:
- res += ((long)p - (long)vaddr - 4) * 8;
- return res < size ? res : size;
-}
-#define find_first_zero_bit find_first_zero_bit
-
-static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
- int offset)
-{
- const unsigned long *p = vaddr + (offset >> 5);
- int bit = offset & 31UL, res;
-
- if (offset >= size)
- return size;
-
- if (bit) {
- unsigned long num = ~*p++ & (~0UL << bit);
- offset -= bit;
-
- /* Look for zero in first longword */
- __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- : "=d" (res) : "d" (num & -num));
- if (res < 32) {
- offset += res ^ 31;
- return offset < size ? offset : size;
- }
- offset += 32;
-
- if (offset >= size)
- return size;
- }
- /* No zero yet, search remaining full bytes for a zero */
- return offset + find_first_zero_bit(p, size - offset);
-}
-#define find_next_zero_bit find_next_zero_bit
-
-static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
-{
- const unsigned long *p = vaddr;
- int res = 32;
- unsigned int words;
- unsigned long num;
-
- if (!size)
- return 0;
-
- words = (size + 31) >> 5;
- while (!(num = *p++)) {
- if (!--words)
- goto out;
- }
-
- __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- : "=d" (res) : "d" (num & -num));
- res ^= 31;
-out:
- res += ((long)p - (long)vaddr - 4) * 8;
- return res < size ? res : size;
-}
-#define find_first_bit find_first_bit
-
-static inline int find_next_bit(const unsigned long *vaddr, int size,
- int offset)
-{
- const unsigned long *p = vaddr + (offset >> 5);
- int bit = offset & 31UL, res;
-
- if (offset >= size)
- return size;
-
- if (bit) {
- unsigned long num = *p++ & (~0UL << bit);
- offset -= bit;
-
- /* Look for one in first longword */
- __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- : "=d" (res) : "d" (num & -num));
- if (res < 32) {
- offset += res ^ 31;
- return offset < size ? offset : size;
- }
- offset += 32;
-
- if (offset >= size)
- return size;
- }
- /* No one yet, search remaining full bytes for a one */
- return offset + find_first_bit(p, size - offset);
-}
-#define find_next_bit find_next_bit
-
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static inline unsigned long ffz(unsigned long word)
-{
- int res;
-
- __asm__ __volatile__ ("bfffo %1{#0,#0},%0"
- : "=d" (res) : "d" (~word & -~word));
- return res ^ 31;
-}
-
-#ifdef __KERNEL__
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-
-static inline int ffs(int x)
-{
- int cnt;
-
- asm ("bfffo %1{#0:#0},%0" : "=d" (cnt) : "dm" (x & -x));
-
- return 32 - cnt;
-}
-#define __ffs(x) (ffs(x) - 1)
-
-/*
- * fls: find last bit set.
- */
-
-static inline int fls(int x)
-{
- int cnt;
-
- asm ("bfffo %1{#0,#0},%0" : "=d" (cnt) : "dm" (x));
-
- return 32 - cnt;
-}
-
-static inline int __fls(int x)
-{
- return fls(x) - 1;
-}
-
-#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
-
-/* Bitmap functions for the little endian bitmap. */
-
-static inline void __set_bit_le(int nr, void *addr)
-{
- __set_bit(nr ^ 24, addr);
-}
-
-static inline void __clear_bit_le(int nr, void *addr)
-{
- __clear_bit(nr ^ 24, addr);
-}
-
-static inline int __test_and_set_bit_le(int nr, void *addr)
-{
- return __test_and_set_bit(nr ^ 24, addr);
-}
-
-static inline int test_and_set_bit_le(int nr, void *addr)
-{
- return test_and_set_bit(nr ^ 24, addr);
-}
-
-static inline int __test_and_clear_bit_le(int nr, void *addr)
-{
- return __test_and_clear_bit(nr ^ 24, addr);
-}
-
-static inline int test_and_clear_bit_le(int nr, void *addr)
-{
- return test_and_clear_bit(nr ^ 24, addr);
-}
-
-static inline int test_bit_le(int nr, const void *vaddr)
-{
- const unsigned char *p = vaddr;
- return (p[nr >> 3] & (1U << (nr & 7))) != 0;
-}
-
-static inline int find_first_zero_bit_le(const void *vaddr, unsigned size)
-{
- const unsigned long *p = vaddr, *addr = vaddr;
- int res = 0;
- unsigned int words;
-
- if (!size)
- return 0;
-
- words = (size >> 5) + ((size & 31) > 0);
- while (*p++ == ~0UL) {
- if (--words == 0)
- goto out;
- }
-
- --p;
- for (res = 0; res < 32; res++)
- if (!test_bit_le(res, p))
- break;
-out:
- res += (p - addr) * 32;
- return res < size ? res : size;
-}
-#define find_first_zero_bit_le find_first_zero_bit_le
-
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr;
- int bit = offset & 31UL, res;
-
- if (offset >= size)
- return size;
-
- p += offset >> 5;
-
- if (bit) {
- offset -= bit;
- /* Look for zero in first longword */
- for (res = bit; res < 32; res++)
- if (!test_bit_le(res, p)) {
- offset += res;
- return offset < size ? offset : size;
- }
- p++;
- offset += 32;
-
- if (offset >= size)
- return size;
- }
- /* No zero yet, search remaining full bytes for a zero */
- return offset + find_first_zero_bit_le(p, size - offset);
-}
-#define find_next_zero_bit_le find_next_zero_bit_le
-
-static inline int find_first_bit_le(const void *vaddr, unsigned size)
-{
- const unsigned long *p = vaddr, *addr = vaddr;
- int res = 0;
- unsigned int words;
-
- if (!size)
- return 0;
-
- words = (size >> 5) + ((size & 31) > 0);
- while (*p++ == 0UL) {
- if (--words == 0)
- goto out;
- }
-
- --p;
- for (res = 0; res < 32; res++)
- if (test_bit_le(res, p))
- break;
-out:
- res += (p - addr) * 32;
- return res < size ? res : size;
-}
-#define find_first_bit_le find_first_bit_le
-
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- const unsigned long *p = addr;
- int bit = offset & 31UL, res;
-
- if (offset >= size)
- return size;
-
- p += offset >> 5;
-
- if (bit) {
- offset -= bit;
- /* Look for one in first longword */
- for (res = bit; res < 32; res++)
- if (test_bit_le(res, p)) {
- offset += res;
- return offset < size ? offset : size;
- }
- p++;
- offset += 32;
-
- if (offset >= size)
- return size;
- }
- /* No set bit yet, search remaining full bytes for a set bit */
- return offset + find_first_bit_le(p, size - offset);
-}
-#define find_next_bit_le find_next_bit_le
-
-/* Bitmap functions for the ext2 filesystem. */
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit_le(nr, addr)
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit_le(nr, addr)
-
-#endif /* __KERNEL__ */
-
-#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h
deleted file mode 100644
index 72e85acdd7bd..000000000000
--- a/arch/m68k/include/asm/bitops_no.h
+++ /dev/null
@@ -1,333 +0,0 @@
-#ifndef _M68KNOMMU_BITOPS_H
-#define _M68KNOMMU_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-#include <linux/compiler.h>
-#include <asm/byteorder.h> /* swab32 */
-
-#ifdef __KERNEL__
-
-#ifndef _LINUX_BITOPS_H
-#error only <linux/bitops.h> can be included directly
-#endif
-
-#if defined (__mcfisaaplus__) || defined (__mcfisac__)
-static inline int ffs(unsigned int val)
-{
- if (!val)
- return 0;
-
- asm volatile(
- "bitrev %0\n\t"
- "ff1 %0\n\t"
- : "=d" (val)
- : "0" (val)
- );
- val++;
- return val;
-}
-
-static inline int __ffs(unsigned int val)
-{
- asm volatile(
- "bitrev %0\n\t"
- "ff1 %0\n\t"
- : "=d" (val)
- : "0" (val)
- );
- return val;
-}
-
-#else
-#include <asm-generic/bitops/ffs.h>
-#include <asm-generic/bitops/__ffs.h>
-#endif
-
-#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/ffz.h>
-
-static __inline__ void set_bit(int nr, volatile unsigned long * addr)
-{
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0", "cc");
-#else
- __asm__ __volatile__ ("bset %1,%0"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- : "cc");
-#endif
-}
-
-#define __set_bit(nr, addr) set_bit(nr, addr)
-
-/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
-static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
-{
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0", "cc");
-#else
- __asm__ __volatile__ ("bclr %1,%0"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- : "cc");
-#endif
-}
-
-#define __clear_bit(nr, addr) clear_bit(nr, addr)
-
-static __inline__ void change_bit(int nr, volatile unsigned long * addr)
-{
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0", "cc");
-#else
- __asm__ __volatile__ ("bchg %1,%0"
- : "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- : "cc");
-#endif
-}
-
-#define __change_bit(nr, addr) change_bit(nr, addr)
-
-static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("bset %2,%1; sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
-
-static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("bclr %2,%1; sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
-
-static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("bchg %2,%1; sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
- : "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-#define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
-
-/*
- * This routine doesn't need to be atomic.
- */
-static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
-{
- return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
-}
-
-static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
-{
- int * a = (int *) addr;
- int mask;
-
- a += nr >> 5;
- mask = 1 << (nr & 0x1f);
- return ((mask & *a) != 0);
-}
-
-#define test_bit(nr,addr) \
-(__builtin_constant_p(nr) ? \
- __constant_test_bit((nr),(addr)) : \
- __test_bit((nr),(addr)))
-
-#include <asm-generic/bitops/find.h>
-#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
-
-#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-
-static inline void __set_bit_le(int nr, void *addr)
-{
- __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline void __clear_bit_le(int nr, void *addr)
-{
- __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
-}
-
-static inline int __test_and_set_bit_le(int nr, volatile void *addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
- : "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("bset %2,%1; sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
- : "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
- : "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("bclr %2,%1; sne %0"
- : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
- : "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-#include <asm-generic/bitops/ext2-atomic.h>
-
-static inline int test_bit_le(int nr, const volatile void *addr)
-{
- char retval;
-
-#ifdef CONFIG_COLDFIRE
- __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
- : "=d" (retval)
- : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
- : "%a0");
-#else
- __asm__ __volatile__ ("btst %2,%1; sne %0"
- : "=d" (retval)
- : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
- /* No clobber */);
-#endif
-
- return retval;
-}
-
-#define find_first_zero_bit_le(addr, size) \
- find_next_zero_bit_le((addr), (size), 0)
-
-static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)
-{
- unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
- unsigned long result = offset & ~31UL;
- unsigned long tmp;
-
- if (offset >= size)
- return size;
- size -= result;
- offset &= 31UL;
- if(offset) {
- /* We hold the little endian value in tmp, but then the
- * shift is illegal. So we could keep a big endian value
- * in tmp, like this:
- *
- * tmp = __swab32(*(p++));
- * tmp |= ~0UL >> (32-offset);
- *
- * but this would decrease performance, so we change the
- * shift:
- */
- tmp = *(p++);
- tmp |= __swab32(~0UL >> (32-offset));
- if(size < 32)
- goto found_first;
- if(~tmp)
- goto found_middle;
- size -= 32;
- result += 32;
- }
- while(size & ~31UL) {
- if(~(tmp = *(p++)))
- goto found_middle;
- result += 32;
- size -= 32;
- }
- if(!size)
- return result;
- tmp = *p;
-
-found_first:
- /* tmp is little endian, so we would have to swab the shift,
- * see above. But then we have to swab tmp below for ffz, so
- * we might as well do this here.
- */
- return result + ffz(__swab32(tmp) | (~0UL << size));
-found_middle:
- return result + ffz(__swab32(tmp));
-}
-#define find_next_zero_bit_le find_next_zero_bit_le
-
-extern unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset);
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/bitops/fls.h>
-#include <asm-generic/bitops/__fls.h>
-#include <asm-generic/bitops/fls64.h>
-
-#endif /* _M68KNOMMU_BITOPS_H */
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index d2598e3dd7b2..9c09becfd4c9 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -1,5 +1,96 @@
-#ifdef __uClinux__
-#include "delay_no.h"
+#ifndef _M68K_DELAY_H
+#define _M68K_DELAY_H
+
+#include <asm/param.h>
+
+/*
+ * Copyright (C) 1994 Hamish Macdonald
+ * Copyright (C) 2004 Greg Ungerer <gerg@uclinux.com>
+ *
+ * Delay routines, using a pre-computed "loops_per_jiffy" value.
+ */
+
+#if defined(CONFIG_COLDFIRE)
+/*
+ * The ColdFire runs the delay loop at significantly different speeds
+ * depending upon long word alignment or not. We'll pad it to
+ * long word alignment which is the faster version.
+ * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
+ * than using a NOP (0x4e71) instruction because it executes in one
+ * cycle not three and doesn't allow for an arbitrary delay waiting
+ * for bus cycles to finish. Also fp/a6 isn't likely to cause a
+ * stall waiting for the register to become valid if such is added
+ * to the coldfire at some stage.
+ */
+#define DELAY_ALIGN ".balignw 4, 0x4a8e\n\t"
#else
-#include "delay_mm.h"
+/*
+ * No instruction alignment required for other m68k types.
+ */
+#define DELAY_ALIGN
#endif
+
+static inline void __delay(unsigned long loops)
+{
+ __asm__ __volatile__ (
+ DELAY_ALIGN
+ "1: subql #1,%0\n\t"
+ "jcc 1b"
+ : "=d" (loops)
+ : "0" (loops));
+}
+
+extern void __bad_udelay(void);
+
+
+#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+/*
+ * The simpler m68k and ColdFire processors do not have a 32*32->64
+ * multiply instruction. So we need to handle them a little differently.
+ * We use a bit of shifting and a single 32*32->32 multiply to get close.
+ * This is a macro so that the const version can factor out the first
+ * multiply and shift.
+ */
+#define HZSCALE (268435456 / (1000000 / HZ))
+
+#define __const_udelay(u) \
+ __delay(((((u) * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6)
+
+#else
+
+static inline void __xdelay(unsigned long xloops)
+{
+ unsigned long tmp;
+
+ __asm__ ("mulul %2,%0:%1"
+ : "=d" (xloops), "=d" (tmp)
+ : "d" (xloops), "1" (loops_per_jiffy));
+ __delay(xloops * HZ);
+}
+
+/*
+ * The definition of __const_udelay is specifically made a macro so that
+ * the const factor (4295 = 2**32 / 1000000) can be optimized out when
+ * the delay is a const.
+ */
+#define __const_udelay(n) (__xdelay((n) * 4295))
+
+#endif
+
+static inline void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs);
+}
+
+/*
+ * Use only for very small delays ( < 1 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays. This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+#define udelay(n) (__builtin_constant_p(n) ? \
+ ((n) > 20000 ? __bad_udelay() : __const_udelay(n)) : __udelay(n))
+
+
+#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/delay_mm.h b/arch/m68k/include/asm/delay_mm.h
deleted file mode 100644
index 5ed92851bc6f..000000000000
--- a/arch/m68k/include/asm/delay_mm.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#ifndef _M68K_DELAY_H
-#define _M68K_DELAY_H
-
-#include <asm/param.h>
-
-/*
- * Copyright (C) 1994 Hamish Macdonald
- *
- * Delay routines, using a pre-computed "loops_per_jiffy" value.
- */
-
-static inline void __delay(unsigned long loops)
-{
- __asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
- : "=d" (loops) : "0" (loops));
-}
-
-extern void __bad_udelay(void);
-
-/*
- * Use only for very small delays ( < 1 msec). Should probably use a
- * lookup table, really, as the multiplications take much too long with
- * short delays. This is a "reasonable" implementation, though (and the
- * first constant multiplications gets optimized away if the delay is
- * a constant)
- */
-static inline void __const_udelay(unsigned long xloops)
-{
- unsigned long tmp;
-
- __asm__ ("mulul %2,%0:%1"
- : "=d" (xloops), "=d" (tmp)
- : "d" (xloops), "1" (loops_per_jiffy));
- __delay(xloops * HZ);
-}
-
-static inline void __udelay(unsigned long usecs)
-{
- __const_udelay(usecs * 4295); /* 2**32 / 1000000 */
-}
-
-#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 4295)) : \
- __udelay(n))
-
-static inline unsigned long muldiv(unsigned long a, unsigned long b,
- unsigned long c)
-{
- unsigned long tmp;
-
- __asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
- : "=d" (tmp), "=d" (a)
- : "d" (b), "d" (c), "1" (a));
- return a;
-}
-
-#endif /* defined(_M68K_DELAY_H) */
diff --git a/arch/m68k/include/asm/delay_no.h b/arch/m68k/include/asm/delay_no.h
deleted file mode 100644
index c3a0edc90f21..000000000000
--- a/arch/m68k/include/asm/delay_no.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef _M68KNOMMU_DELAY_H
-#define _M68KNOMMU_DELAY_H
-
-/*
- * Copyright (C) 1994 Hamish Macdonald
- * Copyright (C) 2004 Greg Ungerer <gerg@snapgear.com>
- */
-
-#include <asm/param.h>
-
-static inline void __delay(unsigned long loops)
-{
-#if defined(CONFIG_COLDFIRE)
- /* The coldfire runs this loop at significantly different speeds
- * depending upon long word alignment or not. We'll pad it to
- * long word alignment which is the faster version.
- * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
- * than using a NOP (0x4e71) instruction because it executes in one
- * cycle not three and doesn't allow for an arbitrary delay waiting
- * for bus cycles to finish. Also fp/a6 isn't likely to cause a
- * stall waiting for the register to become valid if such is added
- * to the coldfire at some stage.
- */
- __asm__ __volatile__ ( ".balignw 4, 0x4a8e\n\t"
- "1: subql #1, %0\n\t"
- "jcc 1b"
- : "=d" (loops) : "0" (loops));
-#else
- __asm__ __volatile__ ( "1: subql #1, %0\n\t"
- "jcc 1b"
- : "=d" (loops) : "0" (loops));
-#endif
-}
-
-/*
- * Ideally we use a 32*32->64 multiply to calculate the number of
- * loop iterations, but the older standard 68k and ColdFire do not
- * have this instruction. So for them we have a clsoe approximation
- * loop using 32*32->32 multiplies only. This calculation based on
- * the ARM version of delay.
- *
- * We want to implement:
- *
- * loops = (usecs * 0x10c6 * HZ * loops_per_jiffy) / 2^32
- */
-
-#define HZSCALE (268435456 / (1000000/HZ))
-
-extern unsigned long loops_per_jiffy;
-
-static inline void _udelay(unsigned long usecs)
-{
-#if defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
- defined(CONFIG_M68VZ328) || defined(CONFIG_M68360) || \
- defined(CONFIG_COLDFIRE)
- __delay((((usecs * HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6);
-#else
- unsigned long tmp;
-
- usecs *= 4295; /* 2**32 / 1000000 */
- __asm__ ("mulul %2,%0:%1"
- : "=d" (usecs), "=d" (tmp)
- : "d" (usecs), "1" (loops_per_jiffy*HZ));
- __delay(usecs);
-#endif
-}
-
-/*
- * Moved the udelay() function into library code, no longer inlined.
- * I had to change the algorithm because we are overflowing now on
- * the faster ColdFire parts. The code is a little bigger, so it makes
- * sense to library it.
- */
-extern void udelay(unsigned long usecs);
-
-#endif /* defined(_M68KNOMMU_DELAY_H) */
diff --git a/arch/m68k/include/asm/entry.h b/arch/m68k/include/asm/entry.h
index 876eec6f2b52..c3c5a8643e15 100644
--- a/arch/m68k/include/asm/entry.h
+++ b/arch/m68k/include/asm/entry.h
@@ -1,5 +1,254 @@
-#ifdef __uClinux__
-#include "entry_no.h"
+#ifndef __M68K_ENTRY_H
+#define __M68K_ENTRY_H
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#ifdef __ASSEMBLY__
+#include <asm/thread_info.h>
+#endif
+
+/*
+ * Stack layout in 'ret_from_exception':
+ *
+ * This allows access to the syscall arguments in registers d1-d5
+ *
+ * 0(sp) - d1
+ * 4(sp) - d2
+ * 8(sp) - d3
+ * C(sp) - d4
+ * 10(sp) - d5
+ * 14(sp) - a0
+ * 18(sp) - a1
+ * 1C(sp) - a2
+ * 20(sp) - d0
+ * 24(sp) - orig_d0
+ * 28(sp) - stack adjustment
+ * 2C(sp) - [ sr ] [ format & vector ]
+ * 2E(sp) - [ pc-hiword ] [ sr ]
+ * 30(sp) - [ pc-loword ] [ pc-hiword ]
+ * 32(sp) - [ format & vector ] [ pc-loword ]
+ * ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
+ * M68K COLDFIRE
+ */
+
+/* the following macro is used when enabling interrupts */
+#if defined(MACH_ATARI_ONLY)
+ /* block out HSYNC on the atari */
+#define ALLOWINT (~0x400)
+#define MAX_NOINT_IPL 3
#else
-#include "entry_mm.h"
+ /* portable version */
+#define ALLOWINT (~0x700)
+#define MAX_NOINT_IPL 0
+#endif /* machine compilation types */
+
+#ifdef __ASSEMBLY__
+/*
+ * This defines the normal kernel pt-regs layout.
+ *
+ * regs a3-a6 and d6-d7 are preserved by C code
+ * the kernel doesn't mess with usp unless it needs to
+ */
+#define SWITCH_STACK_SIZE (6*4+4) /* includes return address */
+
+#ifdef CONFIG_COLDFIRE
+#ifdef CONFIG_COLDFIRE_SW_A7
+/*
+ * This is made a little more tricky on older ColdFires. There is no
+ * separate supervisor and user stack pointers. Need to artificially
+ * construct a usp in software... When doing this we need to disable
+ * interrupts, otherwise bad things will happen.
+ */
+.globl sw_usp
+.globl sw_ksp
+
+.macro SAVE_ALL_SYS
+ move #0x2700,%sr /* disable intrs */
+ btst #5,%sp@(2) /* from user? */
+ bnes 6f /* no, skip */
+ movel %sp,sw_usp /* save user sp */
+ addql #8,sw_usp /* remove exception */
+ movel sw_ksp,%sp /* kernel sp */
+ subql #8,%sp /* room for exception */
+ clrl %sp@- /* stkadj */
+ movel %d0,%sp@- /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ lea %sp@(-32),%sp /* space for 8 regs */
+ moveml %d1-%d5/%a0-%a2,%sp@
+ movel sw_usp,%a0 /* get usp */
+ movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */
+ movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */
+ bra 7f
+ 6:
+ clrl %sp@- /* stkadj */
+ movel %d0,%sp@- /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ lea %sp@(-32),%sp /* space for 8 regs */
+ moveml %d1-%d5/%a0-%a2,%sp@
+ 7:
+.endm
+
+.macro SAVE_ALL_INT
+ SAVE_ALL_SYS
+ moveq #-1,%d0 /* not system call entry */
+ movel %d0,%sp@(PT_OFF_ORIG_D0)
+.endm
+
+.macro RESTORE_USER
+ move #0x2700,%sr /* disable intrs */
+ movel sw_usp,%a0 /* get usp */
+ movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
+ movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */
+ moveml %sp@,%d1-%d5/%a0-%a2
+ lea %sp@(32),%sp /* space for 8 regs */
+ movel %sp@+,%d0
+ addql #4,%sp /* orig d0 */
+ addl %sp@+,%sp /* stkadj */
+ addql #8,%sp /* remove exception */
+ movel %sp,sw_ksp /* save ksp */
+ subql #8,sw_usp /* set exception */
+ movel sw_usp,%sp /* restore usp */
+ rte
+.endm
+
+.macro RDUSP
+ movel sw_usp,%a3
+.endm
+
+.macro WRUSP
+ movel %a3,sw_usp
+.endm
+
+#else /* !CONFIG_COLDFIRE_SW_A7 */
+/*
+ * Modern ColdFire parts have separate supervisor and user stack
+ * pointers. Simple load and restore macros for this case.
+ */
+.macro SAVE_ALL_SYS
+ move #0x2700,%sr /* disable intrs */
+ clrl %sp@- /* stkadj */
+ movel %d0,%sp@- /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ lea %sp@(-32),%sp /* space for 8 regs */
+ moveml %d1-%d5/%a0-%a2,%sp@
+.endm
+
+.macro SAVE_ALL_INT
+ move #0x2700,%sr /* disable intrs */
+ clrl %sp@- /* stkadj */
+ pea -1:w /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ lea %sp@(-32),%sp /* space for 8 regs */
+ moveml %d1-%d5/%a0-%a2,%sp@
+.endm
+
+.macro RESTORE_USER
+ moveml %sp@,%d1-%d5/%a0-%a2
+ lea %sp@(32),%sp /* space for 8 regs */
+ movel %sp@+,%d0
+ addql #4,%sp /* orig d0 */
+ addl %sp@+,%sp /* stkadj */
+ rte
+.endm
+
+.macro RDUSP
+ /*move %usp,%a3*/
+ .word 0x4e6b
+.endm
+
+.macro WRUSP
+ /*move %a3,%usp*/
+ .word 0x4e63
+.endm
+
+#endif /* !CONFIG_COLDFIRE_SW_A7 */
+
+.macro SAVE_SWITCH_STACK
+ lea %sp@(-24),%sp /* 6 regs */
+ moveml %a3-%a6/%d6-%d7,%sp@
+.endm
+
+.macro RESTORE_SWITCH_STACK
+ moveml %sp@,%a3-%a6/%d6-%d7
+ lea %sp@(24),%sp /* 6 regs */
+.endm
+
+#else /* !CONFIG_COLDFIRE */
+
+/*
+ * All other types of m68k parts (68000, 680x0, CPU32) have the same
+ * entry and exit code.
+ */
+
+/*
+ * a -1 in the orig_d0 field signifies
+ * that the stack frame is NOT for syscall
+ */
+.macro SAVE_ALL_INT
+ clrl %sp@- /* stk_adj */
+ pea -1:w /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ moveml %d1-%d5/%a0-%a2,%sp@-
+.endm
+
+.macro SAVE_ALL_SYS
+ clrl %sp@- /* stk_adj */
+ movel %d0,%sp@- /* orig d0 */
+ movel %d0,%sp@- /* d0 */
+ moveml %d1-%d5/%a0-%a2,%sp@-
+.endm
+
+.macro RESTORE_ALL
+ moveml %sp@+,%a0-%a2/%d1-%d5
+ movel %sp@+,%d0
+ addql #4,%sp /* orig d0 */
+ addl %sp@+,%sp /* stk adj */
+ rte
+.endm
+
+
+.macro SAVE_SWITCH_STACK
+ moveml %a3-%a6/%d6-%d7,%sp@-
+.endm
+
+.macro RESTORE_SWITCH_STACK
+ moveml %sp@+,%a3-%a6/%d6-%d7
+.endm
+
+#endif /* !CONFIG_COLDFIRE */
+
+/*
+ * Register %a2 is reserved and set to current task on MMU enabled systems.
+ * Non-MMU systems do not reserve %a2 in this way, and this definition is
+ * not used for them.
+ */
+#define curptr a2
+
+#define GET_CURRENT(tmp) get_current tmp
+.macro get_current reg=%d0
+ movel %sp,\reg
+ andw #-THREAD_SIZE,\reg
+ movel \reg,%curptr
+ movel %curptr@,%curptr
+.endm
+
+#else /* C source */
+
+#define STR(X) STR1(X)
+#define STR1(X) #X
+
+#define SAVE_ALL_INT \
+ "clrl %%sp@-;" /* stk_adj */ \
+ "pea -1:w;" /* orig d0 = -1 */ \
+ "movel %%d0,%%sp@-;" /* d0 */ \
+ "moveml %%d1-%%d5/%%a0-%%a2,%%sp@-"
+
+#define GET_CURRENT(tmp) \
+ "movel %%sp,"#tmp"\n\t" \
+ "andw #-"STR(THREAD_SIZE)","#tmp"\n\t" \
+ "movel "#tmp",%%a2\n\t" \
+ "movel %%a2@,%%a2"
+
#endif
+
+#endif /* __M68K_ENTRY_H */
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h
deleted file mode 100644
index 73b8c8fbed9c..000000000000
--- a/arch/m68k/include/asm/entry_mm.h
+++ /dev/null
@@ -1,128 +0,0 @@
-#ifndef __M68K_ENTRY_H
-#define __M68K_ENTRY_H
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#ifdef __ASSEMBLY__
-#include <asm/thread_info.h>
-#endif
-
-/*
- * Stack layout in 'ret_from_exception':
- *
- * This allows access to the syscall arguments in registers d1-d5
- *
- * 0(sp) - d1
- * 4(sp) - d2
- * 8(sp) - d3
- * C(sp) - d4
- * 10(sp) - d5
- * 14(sp) - a0
- * 18(sp) - a1
- * 1C(sp) - a2
- * 20(sp) - d0
- * 24(sp) - orig_d0
- * 28(sp) - stack adjustment
- * 2C(sp) - sr
- * 2E(sp) - pc
- * 32(sp) - format & vector
- */
-
-/*
- * 97/05/14 Andreas: Register %a2 is now set to the current task throughout
- * the whole kernel.
- */
-
-/* the following macro is used when enabling interrupts */
-#if defined(MACH_ATARI_ONLY)
- /* block out HSYNC on the atari */
-#define ALLOWINT (~0x400)
-#define MAX_NOINT_IPL 3
-#else
- /* portable version */
-#define ALLOWINT (~0x700)
-#define MAX_NOINT_IPL 0
-#endif /* machine compilation types */
-
-#ifdef __ASSEMBLY__
-
-#define curptr a2
-
-LFLUSH_I_AND_D = 0x00000808
-
-#define SAVE_ALL_INT save_all_int
-#define SAVE_ALL_SYS save_all_sys
-#define RESTORE_ALL restore_all
-/*
- * This defines the normal kernel pt-regs layout.
- *
- * regs a3-a6 and d6-d7 are preserved by C code
- * the kernel doesn't mess with usp unless it needs to
- */
-
-/*
- * a -1 in the orig_d0 field signifies
- * that the stack frame is NOT for syscall
- */
-.macro save_all_int
- clrl %sp@- | stk_adj
- pea -1:w | orig d0
- movel %d0,%sp@- | d0
- moveml %d1-%d5/%a0-%a1/%curptr,%sp@-
-.endm
-
-.macro save_all_sys
- clrl %sp@- | stk_adj
- movel %d0,%sp@- | orig d0
- movel %d0,%sp@- | d0
- moveml %d1-%d5/%a0-%a1/%curptr,%sp@-
-.endm
-
-.macro restore_all
- moveml %sp@+,%a0-%a1/%curptr/%d1-%d5
- movel %sp@+,%d0
- addql #4,%sp | orig d0
- addl %sp@+,%sp | stk adj
- rte
-.endm
-
-#define SWITCH_STACK_SIZE (6*4+4) /* includes return address */
-
-#define SAVE_SWITCH_STACK save_switch_stack
-#define RESTORE_SWITCH_STACK restore_switch_stack
-#define GET_CURRENT(tmp) get_current tmp
-
-.macro save_switch_stack
- moveml %a3-%a6/%d6-%d7,%sp@-
-.endm
-
-.macro restore_switch_stack
- moveml %sp@+,%a3-%a6/%d6-%d7
-.endm
-
-.macro get_current reg=%d0
- movel %sp,\reg
- andw #-THREAD_SIZE,\reg
- movel \reg,%curptr
- movel %curptr@,%curptr
-.endm
-
-#else /* C source */
-
-#define STR(X) STR1(X)
-#define STR1(X) #X
-
-#define SAVE_ALL_INT \
- "clrl %%sp@-;" /* stk_adj */ \
- "pea -1:w;" /* orig d0 = -1 */ \
- "movel %%d0,%%sp@-;" /* d0 */ \
- "moveml %%d1-%%d5/%%a0-%%a2,%%sp@-"
-#define GET_CURRENT(tmp) \
- "movel %%sp,"#tmp"\n\t" \
- "andw #-"STR(THREAD_SIZE)","#tmp"\n\t" \
- "movel "#tmp",%%a2\n\t" \
- "movel %%a2@,%%a2"
-
-#endif
-
-#endif /* __M68K_ENTRY_H */
diff --git a/arch/m68k/include/asm/entry_no.h b/arch/m68k/include/asm/entry_no.h
deleted file mode 100644
index 627d69bacc58..000000000000
--- a/arch/m68k/include/asm/entry_no.h
+++ /dev/null
@@ -1,181 +0,0 @@
-#ifndef __M68KNOMMU_ENTRY_H
-#define __M68KNOMMU_ENTRY_H
-
-#include <asm/setup.h>
-#include <asm/page.h>
-
-/*
- * Stack layout in 'ret_from_exception':
- *
- * This allows access to the syscall arguments in registers d1-d5
- *
- * 0(sp) - d1
- * 4(sp) - d2
- * 8(sp) - d3
- * C(sp) - d4
- * 10(sp) - d5
- * 14(sp) - a0
- * 18(sp) - a1
- * 1C(sp) - a2
- * 20(sp) - d0
- * 24(sp) - orig_d0
- * 28(sp) - stack adjustment
- * 2C(sp) - [ sr ] [ format & vector ]
- * 2E(sp) - [ pc-hiword ] [ sr ]
- * 30(sp) - [ pc-loword ] [ pc-hiword ]
- * 32(sp) - [ format & vector ] [ pc-loword ]
- * ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^
- * M68K COLDFIRE
- */
-
-#define ALLOWINT (~0x700)
-
-#ifdef __ASSEMBLY__
-
-#define SWITCH_STACK_SIZE (6*4+4) /* Includes return address */
-
-/*
- * This defines the normal kernel pt-regs layout.
- *
- * regs are a2-a6 and d6-d7 preserved by C code
- * the kernel doesn't mess with usp unless it needs to
- */
-
-#ifdef CONFIG_COLDFIRE
-#ifdef CONFIG_COLDFIRE_SW_A7
-/*
- * This is made a little more tricky on older ColdFires. There is no
- * separate supervisor and user stack pointers. Need to artificially
- * construct a usp in software... When doing this we need to disable
- * interrupts, otherwise bad things will happen.
- */
-.globl sw_usp
-.globl sw_ksp
-
-.macro SAVE_ALL
- move #0x2700,%sr /* disable intrs */
- btst #5,%sp@(2) /* from user? */
- bnes 6f /* no, skip */
- movel %sp,sw_usp /* save user sp */
- addql #8,sw_usp /* remove exception */
- movel sw_ksp,%sp /* kernel sp */
- subql #8,%sp /* room for exception */
- clrl %sp@- /* stkadj */
- movel %d0,%sp@- /* orig d0 */
- movel %d0,%sp@- /* d0 */
- lea %sp@(-32),%sp /* space for 8 regs */
- moveml %d1-%d5/%a0-%a2,%sp@
- movel sw_usp,%a0 /* get usp */
- movel %a0@-,%sp@(PT_OFF_PC) /* copy exception program counter */
- movel %a0@-,%sp@(PT_OFF_FORMATVEC)/*copy exception format/vector/sr */
- bra 7f
- 6:
- clrl %sp@- /* stkadj */
- movel %d0,%sp@- /* orig d0 */
- movel %d0,%sp@- /* d0 */
- lea %sp@(-32),%sp /* space for 8 regs */
- moveml %d1-%d5/%a0-%a2,%sp@
- 7:
-.endm
-
-.macro RESTORE_USER
- move #0x2700,%sr /* disable intrs */
- movel sw_usp,%a0 /* get usp */
- movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */
- movel %sp@(PT_OFF_FORMATVEC),%a0@-/*copy exception format/vector/sr */
- moveml %sp@,%d1-%d5/%a0-%a2
- lea %sp@(32),%sp /* space for 8 regs */
- movel %sp@+,%d0
- addql #4,%sp /* orig d0 */
- addl %sp@+,%sp /* stkadj */
- addql #8,%sp /* remove exception */
- movel %sp,sw_ksp /* save ksp */
- subql #8,sw_usp /* set exception */
- movel sw_usp,%sp /* restore usp */
- rte
-.endm
-
-.macro RDUSP
- movel sw_usp,%a2
-.endm
-
-.macro WRUSP
- movel %a0,sw_usp
-.endm
-
-#else /* !CONFIG_COLDFIRE_SW_A7 */
-/*
- * Modern ColdFire parts have separate supervisor and user stack
- * pointers. Simple load and restore macros for this case.
- */
-.macro SAVE_ALL
- move #0x2700,%sr /* disable intrs */
- clrl %sp@- /* stkadj */
- movel %d0,%sp@- /* orig d0 */
- movel %d0,%sp@- /* d0 */
- lea %sp@(-32),%sp /* space for 8 regs */
- moveml %d1-%d5/%a0-%a2,%sp@
-.endm
-
-.macro RESTORE_USER
- moveml %sp@,%d1-%d5/%a0-%a2
- lea %sp@(32),%sp /* space for 8 regs */
- movel %sp@+,%d0
- addql #4,%sp /* orig d0 */
- addl %sp@+,%sp /* stkadj */
- rte
-.endm
-
-.macro RDUSP
- /*move %usp,%a2*/
- .word 0x4e6a
-.endm
-
-.macro WRUSP
- /*move %a0,%usp*/
- .word 0x4e60
-.endm
-
-#endif /* !CONFIG_COLDFIRE_SW_A7 */
-
-.macro SAVE_SWITCH_STACK
- lea %sp@(-24),%sp /* 6 regs */
- moveml %a3-%a6/%d6-%d7,%sp@
-.endm
-
-.macro RESTORE_SWITCH_STACK
- moveml %sp@,%a3-%a6/%d6-%d7
- lea %sp@(24),%sp /* 6 regs */
-.endm
-
-#else /* !CONFIG_COLDFIRE */
-
-/*
- * Standard 68k interrupt entry and exit macros.
- */
-.macro SAVE_ALL
- clrl %sp@- /* stkadj */
- movel %d0,%sp@- /* orig d0 */
- movel %d0,%sp@- /* d0 */
- moveml %d1-%d5/%a0-%a2,%sp@-
-.endm
-
-.macro RESTORE_ALL
- moveml %sp@+,%a0-%a2/%d1-%d5
- movel %sp@+,%d0
- addql #4,%sp /* orig d0 */
- addl %sp@+,%sp /* stkadj */
- rte
-.endm
-
-.macro SAVE_SWITCH_STACK
- moveml %a3-%a6/%d6-%d7,%sp@-
-.endm
-
-.macro RESTORE_SWITCH_STACK
- moveml %sp@+,%a3-%a6/%d6-%d7
-.endm
-
-#endif /* !COLDFIRE_SW_A7 */
-#endif /* __ASSEMBLY__ */
-#endif /* __M68KNOMMU_ENTRY_H */
diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
index 56d0d5db231c..db30ed276878 100644
--- a/arch/m68k/include/asm/hardirq.h
+++ b/arch/m68k/include/asm/hardirq.h
@@ -1,5 +1,39 @@
-#ifdef __uClinux__
-#include "hardirq_no.h"
+#ifndef __M68K_HARDIRQ_H
+#define __M68K_HARDIRQ_H
+
+#include <linux/threads.h>
+#include <linux/cache.h>
+#include <asm/irq.h>
+
+#define HARDIRQ_BITS 8
+
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+#ifdef CONFIG_MMU
+
+static inline void ack_bad_irq(unsigned int irq)
+{
+ pr_crit("unexpected IRQ trap at vector %02x\n", irq);
+}
+
+/* entry.S is sensitive to the offsets of these fields */
+typedef struct {
+ unsigned int __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
#else
-#include "hardirq_mm.h"
+
+#include <asm-generic/hardirq.h>
+
+#endif /* !CONFIG_MMU */
+
#endif
diff --git a/arch/m68k/include/asm/hardirq_mm.h b/arch/m68k/include/asm/hardirq_mm.h
deleted file mode 100644
index 394ee946015c..000000000000
--- a/arch/m68k/include/asm/hardirq_mm.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __M68K_HARDIRQ_H
-#define __M68K_HARDIRQ_H
-
-#include <linux/threads.h>
-#include <linux/cache.h>
-
-/* entry.S is sensitive to the offsets of these fields */
-typedef struct {
- unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
-
-#define HARDIRQ_BITS 8
-
-#endif
diff --git a/arch/m68k/include/asm/hardirq_no.h b/arch/m68k/include/asm/hardirq_no.h
deleted file mode 100644
index b44b14be87d9..000000000000
--- a/arch/m68k/include/asm/hardirq_no.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __M68K_HARDIRQ_H
-#define __M68K_HARDIRQ_H
-
-#include <asm/irq.h>
-
-#define HARDIRQ_BITS 8
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
-#include <asm-generic/hardirq.h>
-
-#endif /* __M68K_HARDIRQ_H */
diff --git a/arch/m68k/include/asm/irq.h b/arch/m68k/include/asm/irq.h
index 907eff1edd2f..6198df5ff245 100644
--- a/arch/m68k/include/asm/irq.h
+++ b/arch/m68k/include/asm/irq.h
@@ -27,20 +27,6 @@
#ifdef CONFIG_MMU
-#include <linux/linkage.h>
-#include <linux/hardirq.h>
-#include <linux/irqreturn.h>
-#include <linux/spinlock_types.h>
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
/*
* Interrupt source definitions
* General interrupt sources are the level 1-7.
@@ -63,10 +49,6 @@
#define IRQ_USER 8
-extern unsigned int irq_canonicalize(unsigned int irq);
-
-struct pt_regs;
-
/*
* various flags for request_irq() - the Amiga now uses the standard
* mechanism like all other architectures - IRQF_DISABLED and
@@ -80,55 +62,27 @@ struct pt_regs;
#define IRQ_FLG_STD (0x8000) /* internally used */
#endif
-/*
- * This structure is used to chain together the ISRs for a particular
- * interrupt source (if it supports chaining).
- */
-typedef struct irq_node {
- irqreturn_t (*handler)(int, void *);
- void *dev_id;
- struct irq_node *next;
- unsigned long flags;
- const char *devname;
-} irq_node_t;
+struct irq_data;
+struct irq_chip;
+struct irq_desc;
+extern unsigned int m68k_irq_startup(struct irq_data *data);
+extern unsigned int m68k_irq_startup_irq(unsigned int irq);
+extern void m68k_irq_shutdown(struct irq_data *data);
+extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int,
+ struct pt_regs *));
+extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt);
+extern void m68k_setup_irq_controller(struct irq_chip *,
+ void (*handle)(unsigned int irq,
+ struct irq_desc *desc),
+ unsigned int irq, unsigned int cnt);
-/*
- * This structure has only 4 elements for speed reasons
- */
-struct irq_handler {
- int (*handler)(int, void *);
- unsigned long flags;
- void *dev_id;
- const char *devname;
-};
-
-struct irq_controller {
- const char *name;
- spinlock_t lock;
- int (*startup)(unsigned int irq);
- void (*shutdown)(unsigned int irq);
- void (*enable)(unsigned int irq);
- void (*disable)(unsigned int irq);
-};
-
-extern int m68k_irq_startup(unsigned int);
-extern void m68k_irq_shutdown(unsigned int);
-
-/*
- * This function returns a new irq_node_t
- */
-extern irq_node_t *new_irq_node(void);
-
-extern void m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *));
-extern void m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt,
- void (*handler)(unsigned int, struct pt_regs *));
-extern void m68k_setup_irq_controller(struct irq_controller *, unsigned int, unsigned int);
-
-asmlinkage void m68k_handle_int(unsigned int);
-asmlinkage void __m68k_handle_int(unsigned int, struct pt_regs *);
+extern unsigned int irq_canonicalize(unsigned int irq);
#else
#define irq_canonicalize(irq) (irq)
#endif /* CONFIG_MMU */
+asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
+extern atomic_t irq_err_count;
+
#endif /* _M68K_IRQ_H_ */
diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h
index b6bf2c518bac..eda62de7e607 100644
--- a/arch/m68k/include/asm/m520xsim.h
+++ b/arch/m68k/include/asm/m520xsim.h
@@ -90,15 +90,13 @@
#define MCFGPIO_PDDR_FECH 0xFC0A4013
#define MCFGPIO_PDDR_FECL 0xFC0A4014
-#define MCFGPIO_PPDSDR_BUSCTL 0xFC0A401A
-#define MCFGPIO_PPDSDR_BE 0xFC0A401B
-#define MCFGPIO_PPDSDR_CS 0xFC0A401C
-#define MCFGPIO_PPDSDR_FECI2C 0xFC0A401D
-#define MCFGPIO_PPDSDR_QSPI 0xFC0A401E
-#define MCFGPIO_PPDSDR_TIMER 0xFC0A401F
-#define MCFGPIO_PPDSDR_UART 0xFC0A4021
-#define MCFGPIO_PPDSDR_FECH 0xFC0A4021
-#define MCFGPIO_PPDSDR_FECL 0xFC0A4022
+#define MCFGPIO_PPDSDR_CS 0xFC0A401A
+#define MCFGPIO_PPDSDR_FECI2C 0xFC0A401B
+#define MCFGPIO_PPDSDR_QSPI 0xFC0A401C
+#define MCFGPIO_PPDSDR_TIMER 0xFC0A401D
+#define MCFGPIO_PPDSDR_UART 0xFC0A401E
+#define MCFGPIO_PPDSDR_FECH 0xFC0A401F
+#define MCFGPIO_PPDSDR_FECL 0xFC0A4020
#define MCFGPIO_PCLRR_BUSCTL 0xFC0A4024
#define MCFGPIO_PCLRR_BE 0xFC0A4025
@@ -113,11 +111,11 @@
/*
* Generic GPIO support
*/
-#define MCFGPIO_PODR MCFGPIO_PODR_BUSCTL
-#define MCFGPIO_PDDR MCFGPIO_PDDR_BUSCTL
-#define MCFGPIO_PPDR MCFGPIO_PPDSDR_BUSCTL
-#define MCFGPIO_SETR MCFGPIO_PPDSDR_BUSCTL
-#define MCFGPIO_CLRR MCFGPIO_PCLRR_BUSCTL
+#define MCFGPIO_PODR MCFGPIO_PODR_CS
+#define MCFGPIO_PDDR MCFGPIO_PDDR_CS
+#define MCFGPIO_PPDR MCFGPIO_PPDSDR_CS
+#define MCFGPIO_SETR MCFGPIO_PPDSDR_CS
+#define MCFGPIO_CLRR MCFGPIO_PCLRR_CS
#define MCFGPIO_PIN_MAX 80
#define MCFGPIO_IRQ_MAX 8
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 415d5484916c..789f3b2de0e9 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,6 +40,5 @@ extern unsigned long hw_timer_offset(void);
extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
extern void config_BSP(char *command, int len);
-extern void do_IRQ(int irq, struct pt_regs *fp);
#endif /* _M68K_MACHDEP_H */
diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
index c2a1c5eac1a6..12ebe43b008b 100644
--- a/arch/m68k/include/asm/macintosh.h
+++ b/arch/m68k/include/asm/macintosh.h
@@ -12,6 +12,8 @@ extern void mac_reset(void);
extern void mac_poweroff(void);
extern void mac_init_IRQ(void);
extern int mac_irq_pending(unsigned int);
+extern void mac_irq_enable(struct irq_data *data);
+extern void mac_irq_disable(struct irq_data *data);
/*
* Floppy driver magic hook - probably shouldn't be here
diff --git a/arch/m68k/include/asm/mcfqspi.h b/arch/m68k/include/asm/mcfqspi.h
index 39d90d51111d..7fe631972f1f 100644
--- a/arch/m68k/include/asm/mcfqspi.h
+++ b/arch/m68k/include/asm/mcfqspi.h
@@ -24,9 +24,11 @@
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
#define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340)
#elif defined(CONFIG_M5249)
-#define MCFQSPI_IOBASE (MCF_MBAR + 0x300)
-#elif defined(CONFIG_M520x) || defined(CONFIG_M532x)
-#define MCFQSPI_IOBASE 0xFC058000
+#define MCFQSPI_IOBASE (MCF_MBAR + 0x300)
+#elif defined(CONFIG_M520x)
+#define MCFQSPI_IOBASE 0xFC05C000
+#elif defined(CONFIG_M532x)
+#define MCFQSPI_IOBASE 0xFC058000
#endif
#define MCFQSPI_IOSIZE 0x40
diff --git a/arch/m68k/include/asm/module.h b/arch/m68k/include/asm/module.h
index 5f21e11071bd..edffe66b7f49 100644
--- a/arch/m68k/include/asm/module.h
+++ b/arch/m68k/include/asm/module.h
@@ -1,46 +1,41 @@
#ifndef _ASM_M68K_MODULE_H
#define _ASM_M68K_MODULE_H
-#ifdef CONFIG_MMU
+enum m68k_fixup_type {
+ m68k_fixup_memoffset,
+ m68k_fixup_vnode_shift,
+};
+
+struct m68k_fixup_info {
+ enum m68k_fixup_type type;
+ void *addr;
+};
struct mod_arch_specific {
struct m68k_fixup_info *fixup_start, *fixup_end;
};
+#ifdef CONFIG_MMU
+
#define MODULE_ARCH_INIT { \
.fixup_start = __start_fixup, \
.fixup_end = __stop_fixup, \
}
-enum m68k_fixup_type {
- m68k_fixup_memoffset,
- m68k_fixup_vnode_shift,
-};
-
-struct m68k_fixup_info {
- enum m68k_fixup_type type;
- void *addr;
-};
-
#define m68k_fixup(type, addr) \
" .section \".m68k_fixup\",\"aw\"\n" \
" .long " #type "," #addr "\n" \
" .previous\n"
+#endif /* CONFIG_MMU */
+
extern struct m68k_fixup_info __start_fixup[], __stop_fixup[];
struct module;
extern void module_fixup(struct module *mod, struct m68k_fixup_info *start,
struct m68k_fixup_info *end);
-#else
-
-struct mod_arch_specific {
-};
-
-#endif /* CONFIG_MMU */
-
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index 31d5570d6567..89f201434b5a 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -162,7 +162,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
})
#define page_to_pfn(_page) ({ \
- struct page *__p = (_page); \
+ const struct page *__p = (_page); \
struct pglist_data *pgdat; \
pgdat = &pg_data_map[page_to_nid(__p)]; \
((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index 90595721185f..a8d1c60eb9ce 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -5,6 +5,9 @@
extern unsigned long memory_start;
extern unsigned long memory_end;
+extern unsigned long _rambase;
+extern unsigned long _ramstart;
+extern unsigned long _ramend;
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
diff --git a/arch/m68k/include/asm/posix_types.h b/arch/m68k/include/asm/posix_types.h
index 63cdcc142d93..98d0970d9bad 100644
--- a/arch/m68k/include/asm/posix_types.h
+++ b/arch/m68k/include/asm/posix_types.h
@@ -51,7 +51,7 @@ typedef struct {
#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
#undef __FD_ISSET
-#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
+#define __FD_ISSET(d, set) (!!((set)->fds_bits[__FDELT(d)] & __FDMASK(d)))
#undef __FD_ZERO
#define __FD_ZERO(fdsetp) (memset (fdsetp, 0, sizeof(*(fd_set *)fdsetp)))
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index f111b02b704f..568facf30276 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -105,9 +105,6 @@ struct thread_struct {
static inline void start_thread(struct pt_regs * regs, unsigned long pc,
unsigned long usp)
{
- /* reads from user space */
- set_fs(USER_DS);
-
regs->pc = pc;
regs->sr &= ~0x2000;
wrusp(usp);
@@ -129,7 +126,6 @@ extern int handle_kernel_fault(struct pt_regs *regs);
#define start_thread(_regs, _pc, _usp) \
do { \
- set_fs(USER_DS); /* reads from user space */ \
(_regs)->pc = (_pc); \
((struct switch_stack *)(_regs))[-1].a6 = 0; \
reformat(_regs); \
@@ -139,6 +135,12 @@ do { \
wrusp(_usp); \
} while(0)
+static inline int handle_kernel_fault(struct pt_regs *regs)
+{
+ /* Any fault in kernel is fatal on non-mmu */
+ return 0;
+}
+
#endif
/* Forward declaration, a strange C thing */
diff --git a/arch/m68k/include/asm/ptrace.h b/arch/m68k/include/asm/ptrace.h
index 6e6e3ac1d913..65322b17b6cf 100644
--- a/arch/m68k/include/asm/ptrace.h
+++ b/arch/m68k/include/asm/ptrace.h
@@ -85,7 +85,6 @@ struct switch_stack {
#define user_mode(regs) (!((regs)->sr & PS_S))
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
-extern void show_regs(struct pt_regs *);
#define arch_has_single_step() (1)
diff --git a/arch/m68k/include/asm/q40ints.h b/arch/m68k/include/asm/q40ints.h
index 3d970afb708f..22f12c9eb910 100644
--- a/arch/m68k/include/asm/q40ints.h
+++ b/arch/m68k/include/asm/q40ints.h
@@ -24,6 +24,3 @@
#define Q40_IRQ10_MASK (1<<5)
#define Q40_IRQ14_MASK (1<<6)
#define Q40_IRQ15_MASK (1<<7)
-
-extern unsigned long q40_probe_irq_on (void);
-extern int q40_probe_irq_off (unsigned long irqs);
diff --git a/arch/m68k/include/asm/sections.h b/arch/m68k/include/asm/sections.h
index d64967ecfec6..5277e52715ec 100644
--- a/arch/m68k/include/asm/sections.h
+++ b/arch/m68k/include/asm/sections.h
@@ -3,4 +3,6 @@
#include <asm-generic/sections.h>
+extern char _sbss[], _ebss[];
+
#endif /* _ASM_M68K_SECTIONS_H */
diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h
index 5bc09c787a11..60e88660169c 100644
--- a/arch/m68k/include/asm/signal.h
+++ b/arch/m68k/include/asm/signal.h
@@ -150,7 +150,7 @@ typedef struct sigaltstack {
#ifdef __KERNEL__
#include <asm/sigcontext.h>
-#ifndef __uClinux__
+#ifndef CONFIG_CPU_HAS_NO_BITFIELDS
#define __HAVE_ARCH_SIG_BITOPS
static inline void sigaddset(sigset_t *set, int _sig)
@@ -199,15 +199,14 @@ static inline int sigfindinword(unsigned long word)
return word ^ 31;
}
-struct pt_regs;
-extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
+#endif /* !CONFIG_CPU_HAS_NO_BITFIELDS */
-#else
-
-#undef __HAVE_ARCH_SIG_BITOPS
+#ifdef __uClinux__
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-
+#else
+struct pt_regs;
+extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
#endif /* __uClinux__ */
-#endif /* __KERNEL__ */
+#endif /* __KERNEL__ */
#endif /* _M68K_SIGNAL_H */
diff --git a/arch/m68k/include/asm/system.h b/arch/m68k/include/asm/system.h
index ccea925ff4f5..47b01f4726bc 100644
--- a/arch/m68k/include/asm/system.h
+++ b/arch/m68k/include/asm/system.h
@@ -1,5 +1,193 @@
-#ifdef __uClinux__
-#include "system_no.h"
+#ifndef _M68K_SYSTEM_H
+#define _M68K_SYSTEM_H
+
+#include <linux/linkage.h>
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+#include <asm/segment.h>
+#include <asm/entry.h>
+
+#ifdef __KERNEL__
+
+/*
+ * switch_to(n) should switch tasks to task ptr, first checking that
+ * ptr isn't the current task, in which case it does nothing. This
+ * also clears the TS-flag if the task we switched to has used the
+ * math co-processor latest.
+ */
+/*
+ * switch_to() saves the extra registers, that are not saved
+ * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
+ * a0-a1. Some of these are used by schedule() and its predecessors
+ * and so we might get see unexpected behaviors when a task returns
+ * with unexpected register values.
+ *
+ * syscall stores these registers itself and none of them are used
+ * by syscall after the function in the syscall has been called.
+ *
+ * Beware that resume now expects *next to be in d1 and the offset of
+ * tss to be in a1. This saves a few instructions as we no longer have
+ * to push them onto the stack and read them back right after.
+ *
+ * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
+ *
+ * Changed 96/09/19 by Andreas Schwab
+ * pass prev in a0, next in a1
+ */
+asmlinkage void resume(void);
+#define switch_to(prev,next,last) do { \
+ register void *_prev __asm__ ("a0") = (prev); \
+ register void *_next __asm__ ("a1") = (next); \
+ register void *_last __asm__ ("d1"); \
+ __asm__ __volatile__("jbsr resume" \
+ : "=a" (_prev), "=a" (_next), "=d" (_last) \
+ : "0" (_prev), "1" (_next) \
+ : "d0", "d2", "d3", "d4", "d5"); \
+ (last) = _last; \
+} while (0)
+
+
+/*
+ * Force strict CPU ordering.
+ * Not really required on m68k...
+ */
+#define nop() do { asm volatile ("nop"); barrier(); } while (0)
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define read_barrier_depends() ((void)0)
+#define set_mb(var, value) ({ (var) = (value); wmb(); })
+
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_read_barrier_depends() ((void)0)
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((volatile struct __xchg_dummy *)(x))
+
+#ifndef CONFIG_RMW_INSNS
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ unsigned long flags, tmp;
+
+ local_irq_save(flags);
+
+ switch (size) {
+ case 1:
+ tmp = *(u8 *)ptr;
+ *(u8 *)ptr = x;
+ x = tmp;
+ break;
+ case 2:
+ tmp = *(u16 *)ptr;
+ *(u16 *)ptr = x;
+ x = tmp;
+ break;
+ case 4:
+ tmp = *(u32 *)ptr;
+ *(u32 *)ptr = x;
+ x = tmp;
+ break;
+ default:
+ BUG();
+ }
+
+ local_irq_restore(flags);
+ return x;
+}
#else
-#include "system_mm.h"
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__
+ ("moveb %2,%0\n\t"
+ "1:\n\t"
+ "casb %0,%1,%2\n\t"
+ "jne 1b"
+ : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__
+ ("movew %2,%0\n\t"
+ "1:\n\t"
+ "casw %0,%1,%2\n\t"
+ "jne 1b"
+ : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__
+ ("movel %2,%0\n\t"
+ "1:\n\t"
+ "casl %0,%1,%2\n\t"
+ "jne 1b"
+ : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
+ break;
+ }
+ return x;
+}
#endif
+
+#include <asm-generic/cmpxchg-local.h>
+
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+#ifdef CONFIG_RMW_INSNS
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__ ("casb %0,%2,%1"
+ : "=d" (old), "=m" (*(char *)p)
+ : "d" (new), "0" (old), "m" (*(char *)p));
+ break;
+ case 2:
+ __asm__ __volatile__ ("casw %0,%2,%1"
+ : "=d" (old), "=m" (*(short *)p)
+ : "d" (new), "0" (old), "m" (*(short *)p));
+ break;
+ case 4:
+ __asm__ __volatile__ ("casl %0,%2,%1"
+ : "=d" (old), "=m" (*(int *)p)
+ : "d" (new), "0" (old), "m" (*(int *)p));
+ break;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#else
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+
+#include <asm-generic/cmpxchg.h>
+
+#endif
+
+#define arch_align_stack(x) (x)
+
+#endif /* __KERNEL__ */
+
+#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_mm.h b/arch/m68k/include/asm/system_mm.h
deleted file mode 100644
index 47b01f4726bc..000000000000
--- a/arch/m68k/include/asm/system_mm.h
+++ /dev/null
@@ -1,193 +0,0 @@
-#ifndef _M68K_SYSTEM_H
-#define _M68K_SYSTEM_H
-
-#include <linux/linkage.h>
-#include <linux/kernel.h>
-#include <linux/irqflags.h>
-#include <asm/segment.h>
-#include <asm/entry.h>
-
-#ifdef __KERNEL__
-
-/*
- * switch_to(n) should switch tasks to task ptr, first checking that
- * ptr isn't the current task, in which case it does nothing. This
- * also clears the TS-flag if the task we switched to has used the
- * math co-processor latest.
- */
-/*
- * switch_to() saves the extra registers, that are not saved
- * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
- * a0-a1. Some of these are used by schedule() and its predecessors
- * and so we might get see unexpected behaviors when a task returns
- * with unexpected register values.
- *
- * syscall stores these registers itself and none of them are used
- * by syscall after the function in the syscall has been called.
- *
- * Beware that resume now expects *next to be in d1 and the offset of
- * tss to be in a1. This saves a few instructions as we no longer have
- * to push them onto the stack and read them back right after.
- *
- * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
- *
- * Changed 96/09/19 by Andreas Schwab
- * pass prev in a0, next in a1
- */
-asmlinkage void resume(void);
-#define switch_to(prev,next,last) do { \
- register void *_prev __asm__ ("a0") = (prev); \
- register void *_next __asm__ ("a1") = (next); \
- register void *_last __asm__ ("d1"); \
- __asm__ __volatile__("jbsr resume" \
- : "=a" (_prev), "=a" (_next), "=d" (_last) \
- : "0" (_prev), "1" (_next) \
- : "d0", "d2", "d3", "d4", "d5"); \
- (last) = _last; \
-} while (0)
-
-
-/*
- * Force strict CPU ordering.
- * Not really required on m68k...
- */
-#define nop() do { asm volatile ("nop"); barrier(); } while (0)
-#define mb() barrier()
-#define rmb() barrier()
-#define wmb() barrier()
-#define read_barrier_depends() ((void)0)
-#define set_mb(var, value) ({ (var) = (value); wmb(); })
-
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() ((void)0)
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-#ifndef CONFIG_RMW_INSNS
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- unsigned long flags, tmp;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- tmp = *(u8 *)ptr;
- *(u8 *)ptr = x;
- x = tmp;
- break;
- case 2:
- tmp = *(u16 *)ptr;
- *(u16 *)ptr = x;
- x = tmp;
- break;
- case 4:
- tmp = *(u32 *)ptr;
- *(u32 *)ptr = x;
- x = tmp;
- break;
- default:
- BUG();
- }
-
- local_irq_restore(flags);
- return x;
-}
-#else
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("moveb %2,%0\n\t"
- "1:\n\t"
- "casb %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("movew %2,%0\n\t"
- "1:\n\t"
- "casw %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("movel %2,%0\n\t"
- "1:\n\t"
- "casl %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- }
- return x;
-}
-#endif
-
-#include <asm-generic/cmpxchg-local.h>
-
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-#ifdef CONFIG_RMW_INSNS
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
- unsigned long new, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__ ("casb %0,%2,%1"
- : "=d" (old), "=m" (*(char *)p)
- : "d" (new), "0" (old), "m" (*(char *)p));
- break;
- case 2:
- __asm__ __volatile__ ("casw %0,%2,%1"
- : "=d" (old), "=m" (*(short *)p)
- : "d" (new), "0" (old), "m" (*(short *)p));
- break;
- case 4:
- __asm__ __volatile__ ("casl %0,%2,%1"
- : "=d" (old), "=m" (*(int *)p)
- : "d" (new), "0" (old), "m" (*(int *)p));
- break;
- }
- return old;
-}
-
-#define cmpxchg(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))))
-#else
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
-
-#include <asm-generic/cmpxchg.h>
-
-#endif
-
-#define arch_align_stack(x) (x)
-
-#endif /* __KERNEL__ */
-
-#endif /* _M68K_SYSTEM_H */
diff --git a/arch/m68k/include/asm/system_no.h b/arch/m68k/include/asm/system_no.h
deleted file mode 100644
index 6fe9f93bc3ff..000000000000
--- a/arch/m68k/include/asm/system_no.h
+++ /dev/null
@@ -1,153 +0,0 @@
-#ifndef _M68KNOMMU_SYSTEM_H
-#define _M68KNOMMU_SYSTEM_H
-
-#include <linux/linkage.h>
-#include <linux/irqflags.h>
-#include <asm/segment.h>
-#include <asm/entry.h>
-
-/*
- * switch_to(n) should switch tasks to task ptr, first checking that
- * ptr isn't the current task, in which case it does nothing. This
- * also clears the TS-flag if the task we switched to has used the
- * math co-processor latest.
- */
-/*
- * switch_to() saves the extra registers, that are not saved
- * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
- * a0-a1. Some of these are used by schedule() and its predecessors
- * and so we might get see unexpected behaviors when a task returns
- * with unexpected register values.
- *
- * syscall stores these registers itself and none of them are used
- * by syscall after the function in the syscall has been called.
- *
- * Beware that resume now expects *next to be in d1 and the offset of
- * tss to be in a1. This saves a few instructions as we no longer have
- * to push them onto the stack and read them back right after.
- *
- * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
- *
- * Changed 96/09/19 by Andreas Schwab
- * pass prev in a0, next in a1, offset of tss in d1, and whether
- * the mm structures are shared in d2 (to avoid atc flushing).
- */
-asmlinkage void resume(void);
-#define switch_to(prev,next,last) \
-{ \
- void *_last; \
- __asm__ __volatile__( \
- "movel %1, %%a0\n\t" \
- "movel %2, %%a1\n\t" \
- "jbsr resume\n\t" \
- "movel %%d1, %0\n\t" \
- : "=d" (_last) \
- : "d" (prev), "d" (next) \
- : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1"); \
- (last) = _last; \
-}
-
-#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
-
-/*
- * Force strict CPU ordering.
- * Not really required on m68k...
- */
-#define nop() asm volatile ("nop"::)
-#define mb() asm volatile ("" : : :"memory")
-#define rmb() asm volatile ("" : : :"memory")
-#define wmb() asm volatile ("" : : :"memory")
-#define set_mb(var, value) ({ (var) = (value); wmb(); })
-
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while(0)
-
-#define read_barrier_depends() ((void)0)
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-#ifndef CONFIG_RMW_INSNS
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- unsigned long tmp, flags;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("moveb %2,%0\n\t"
- "moveb %1,%2"
- : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("movew %2,%0\n\t"
- "movew %1,%2"
- : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("movel %2,%0\n\t"
- "movel %1,%2"
- : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- }
- local_irq_restore(flags);
- return tmp;
-}
-#else
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("moveb %2,%0\n\t"
- "1:\n\t"
- "casb %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("movew %2,%0\n\t"
- "1:\n\t"
- "casw %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("movel %2,%0\n\t"
- "1:\n\t"
- "casl %0,%1,%2\n\t"
- "jne 1b"
- : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
- break;
- }
- return x;
-}
-#endif
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
- (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#include <asm-generic/cmpxchg.h>
-
-#define arch_align_stack(x) (x)
-
-
-#endif /* _M68KNOMMU_SYSTEM_H */
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 0bffb17d5db7..151068f64f44 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -22,7 +22,6 @@ extern e_vector vectors[];
asmlinkage void auto_inthandler(void);
asmlinkage void user_inthandler(void);
asmlinkage void bad_inthandler(void);
-extern void init_vectors(void);
#endif
OpenPOWER on IntegriCloud