summaryrefslogtreecommitdiffstats
path: root/arch/mn10300/include/asm/bitops.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-29 14:46:59 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-29 14:47:05 +0200
commite7fd5d4b3d240f42c30a9e3d20a4689c4d3a795a (patch)
tree4ba588631dd8189a818a91c9e3976526071178b6 /arch/mn10300/include/asm/bitops.h
parent1130b0296184bc21806225fd06d533515a99d2db (diff)
parent56a50adda49b2020156616c4eb15353e0f9ad7de (diff)
downloadblackbird-op-linux-e7fd5d4b3d240f42c30a9e3d20a4689c4d3a795a.tar.gz
blackbird-op-linux-e7fd5d4b3d240f42c30a9e3d20a4689c4d3a795a.zip
Merge branch 'linus' into perfcounters/core
Merge reason: This brach was on -rc1, refresh it to almost-rc4 to pick up the latest upstream fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/mn10300/include/asm/bitops.h')
-rw-r--r--arch/mn10300/include/asm/bitops.h240
1 files changed, 240 insertions, 0 deletions
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
new file mode 100644
index 000000000000..0b610f482abb
--- /dev/null
+++ b/arch/mn10300/include/asm/bitops.h
@@ -0,0 +1,240 @@
+/* MN10300 bit operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ *
+ * These have to be done with inline assembly: that way the bit-setting
+ * is guaranteed to be atomic. All bit operations return 0 if the bit
+ * was cleared before the operation and != 0 if it was not.
+ *
+ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
+ */
+#ifndef __ASM_BITOPS_H
+#define __ASM_BITOPS_H
+
+#include <asm/cpu-regs.h>
+
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+/*
+ * set bit
+ */
+#define __set_bit(nr, addr) \
+({ \
+ volatile unsigned char *_a = (unsigned char *)(addr); \
+ const unsigned shift = (nr) & 7; \
+ _a += (nr) >> 3; \
+ \
+ asm volatile("bset %2,(%1) # set_bit reg" \
+ : "=m"(*_a) \
+ : "a"(_a), "d"(1 << shift), "m"(*_a) \
+ : "memory", "cc"); \
+})
+
+#define set_bit(nr, addr) __set_bit((nr), (addr))
+
+/*
+ * clear bit
+ */
+#define ___clear_bit(nr, addr) \
+({ \
+ volatile unsigned char *_a = (unsigned char *)(addr); \
+ const unsigned shift = (nr) & 7; \
+ _a += (nr) >> 3; \
+ \
+ asm volatile("bclr %2,(%1) # clear_bit reg" \
+ : "=m"(*_a) \
+ : "a"(_a), "d"(1 << shift), "m"(*_a) \
+ : "memory", "cc"); \
+})
+
+#define clear_bit(nr, addr) ___clear_bit((nr), (addr))
+
+
+static inline void __clear_bit(int nr, volatile void *addr)
+{
+ unsigned int *a = (unsigned int *) addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a &= ~mask;
+}
+
+/*
+ * test bit
+ */
+static inline int test_bit(int nr, const volatile void *addr)
+{
+ return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+}
+
+/*
+ * change bit
+ */
+static inline void __change_bit(int nr, volatile void *addr)
+{
+ int mask;
+ unsigned int *a = (unsigned int *) addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a ^= mask;
+}
+
+extern void change_bit(int nr, volatile void *addr);
+
+/*
+ * test and set bit
+ */
+#define __test_and_set_bit(nr,addr) \
+({ \
+ volatile unsigned char *_a = (unsigned char *)(addr); \
+ const unsigned shift = (nr) & 7; \
+ unsigned epsw; \
+ _a += (nr) >> 3; \
+ \
+ asm volatile("bset %3,(%2) # test_set_bit reg\n" \
+ "mov epsw,%1" \
+ : "=m"(*_a), "=d"(epsw) \
+ : "a"(_a), "d"(1 << shift), "m"(*_a) \
+ : "memory", "cc"); \
+ \
+ !(epsw & EPSW_FLAG_Z); \
+})
+
+#define test_and_set_bit(nr, addr) __test_and_set_bit((nr), (addr))
+
+/*
+ * test and clear bit
+ */
+#define __test_and_clear_bit(nr, addr) \
+({ \
+ volatile unsigned char *_a = (unsigned char *)(addr); \
+ const unsigned shift = (nr) & 7; \
+ unsigned epsw; \
+ _a += (nr) >> 3; \
+ \
+ asm volatile("bclr %3,(%2) # test_clear_bit reg\n" \
+ "mov epsw,%1" \
+ : "=m"(*_a), "=d"(epsw) \
+ : "a"(_a), "d"(1 << shift), "m"(*_a) \
+ : "memory", "cc"); \
+ \
+ !(epsw & EPSW_FLAG_Z); \
+})
+
+#define test_and_clear_bit(nr, addr) __test_and_clear_bit((nr), (addr))
+
+/*
+ * test and change bit
+ */
+static inline int __test_and_change_bit(int nr, volatile void *addr)
+{
+ int mask, retval;
+ unsigned int *a = (unsigned int *)addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+
+ return retval;
+}
+
+extern int test_and_change_bit(int nr, volatile void *addr);
+
+#include <asm-generic/bitops/lock.h>
+
+#ifdef __KERNEL__
+
+/**
+ * __ffs - find first bit set
+ * @x: the word to search
+ *
+ * - return 31..0 to indicate bit 31..0 most least significant bit set
+ * - if no bits are set in x, the result is undefined
+ */
+static inline __attribute__((const))
+unsigned long __ffs(unsigned long x)
+{
+ int bit;
+ asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x));
+ return bit;
+}
+
+/*
+ * special slimline version of fls() for calculating ilog2_u32()
+ * - note: no protection against n == 0
+ */
+static inline __attribute__((const))
+int __ilog2_u32(u32 n)
+{
+ int bit;
+ asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n));
+ return bit;
+}
+
+/**
+ * fls - find last bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs:
+ * - return 32..1 to indicate bit 31..0 most significant bit set
+ * - return 0 to indicate no bits set
+ */
+static inline __attribute__((const))
+int fls(int x)
+{
+ return (x != 0) ? __ilog2_u32(x) + 1 : 0;
+}
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ return __ilog2_u32(word);
+}
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * - return 32..1 to indicate bit 31..0 most least significant bit set
+ * - return 0 to indicate no bits set
+ */
+static inline __attribute__((const))
+int ffs(int x)
+{
+ /* Note: (x & -x) gives us a mask that is the least significant
+ * (rightmost) 1-bit of the value in x.
+ */
+ return fls(x & -x);
+}
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/hweight.h>
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_bit((nr) ^ 0x18, (addr))
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_bit((nr) ^ 0x18, (addr))
+
+#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/minix-le.h>
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_BITOPS_H */
OpenPOWER on IntegriCloud