From 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 16 Apr 2005 15:20:36 -0700 Subject: Linux-2.6.12-rc2 Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip! --- include/asm-ppc64/bitops.h | 360 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 360 insertions(+) create mode 100644 include/asm-ppc64/bitops.h (limited to 'include/asm-ppc64/bitops.h') diff --git a/include/asm-ppc64/bitops.h b/include/asm-ppc64/bitops.h new file mode 100644 index 000000000000..a0f831224f96 --- /dev/null +++ b/include/asm-ppc64/bitops.h @@ -0,0 +1,360 @@ +/* + * PowerPC64 atomic bit operations. + * Dave Engebretsen, Todd Inglett, Don Reed, Pat McCarthy, Peter Bergner, + * Anton Blanchard + * + * Originally taken from the 32b PPC code. Modified to use 64b values for + * the various counters & memory references. + * + * Bitops are odd when viewed on big-endian systems. They were designed + * on little endian so the size of the bitset doesn't matter (low order bytes + * come first) as long as the bit in question is valid. + * + * Bits are "tested" often using the C expression (val & (1< + +/* + * clear_bit doesn't imply a memory barrier + */ +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + +static __inline__ int test_bit(unsigned long nr, __const__ volatile unsigned long *addr) +{ + return (1UL & (addr[nr >> 6] >> (nr & 63))); +} + +static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long old; + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + + __asm__ __volatile__( +"1: ldarx %0,0,%3 # set_bit\n\ + or %0,%0,%2\n\ + stdcx. %0,0,%3\n\ + bne- 1b" + : "=&r" (old), "=m" (*p) + : "r" (mask), "r" (p), "m" (*p) + : "cc"); +} + +static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long old; + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + + __asm__ __volatile__( +"1: ldarx %0,0,%3 # clear_bit\n\ + andc %0,%0,%2\n\ + stdcx. %0,0,%3\n\ + bne- 1b" + : "=&r" (old), "=m" (*p) + : "r" (mask), "r" (p), "m" (*p) + : "cc"); +} + +static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long old; + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + + __asm__ __volatile__( +"1: ldarx %0,0,%3 # change_bit\n\ + xor %0,%0,%2\n\ + stdcx. %0,0,%3\n\ + bne- 1b" + : "=&r" (old), "=m" (*p) + : "r" (mask), "r" (p), "m" (*p) + : "cc"); +} + +static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long old, t; + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: ldarx %0,0,%3 # test_and_set_bit\n\ + or %1,%0,%2 \n\ + stdcx. %1,0,%3 \n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (old), "=&r" (t) + : "r" (mask), "r" (p) + : "cc", "memory"); + + return (old & mask) != 0; +} + +static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long old, t; + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: ldarx %0,0,%3 # test_and_clear_bit\n\ + andc %1,%0,%2\n\ + stdcx. %1,0,%3\n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (old), "=&r" (t) + : "r" (mask), "r" (p) + : "cc", "memory"); + + return (old & mask) != 0; +} + +static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long old, t; + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + + __asm__ __volatile__( + EIEIO_ON_SMP +"1: ldarx %0,0,%3 # test_and_change_bit\n\ + xor %1,%0,%2\n\ + stdcx. %1,0,%3\n\ + bne- 1b" + ISYNC_ON_SMP + : "=&r" (old), "=&r" (t) + : "r" (mask), "r" (p) + : "cc", "memory"); + + return (old & mask) != 0; +} + +static __inline__ void set_bits(unsigned long mask, unsigned long *addr) +{ + unsigned long old; + + __asm__ __volatile__( +"1: ldarx %0,0,%3 # set_bit\n\ + or %0,%0,%2\n\ + stdcx. %0,0,%3\n\ + bne- 1b" + : "=&r" (old), "=m" (*addr) + : "r" (mask), "r" (addr), "m" (*addr) + : "cc"); +} + +/* + * non-atomic versions + */ +static __inline__ void __set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + + *p |= mask; +} + +static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + + *p &= ~mask; +} + +static __inline__ void __change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + + *p ^= mask; +} + +static __inline__ int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +static __inline__ int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +static __inline__ int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1UL << (nr & 0x3f); + unsigned long *p = ((unsigned long *)addr) + (nr >> 6); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/* + * Return the zero-based bit position (from RIGHT TO LEFT, 63 -> 0) of the + * most significant (left-most) 1-bit in a double word. + */ +static __inline__ int __ilog2(unsigned long x) +{ + int lz; + + asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x)); + return 63 - lz; +} + +/* + * Determines the bit position of the least significant (rightmost) 0 bit + * in the specified double word. The returned bit position will be zero-based, + * starting from the right side (63 - 0). + */ +static __inline__ unsigned long ffz(unsigned long x) +{ + /* no zero exists anywhere in the 8 byte area. */ + if ((x = ~x) == 0) + return 64; + + /* + * Calculate the bit position of the least signficant '1' bit in x + * (since x has been changed this will actually be the least signficant + * '0' bit in * the original x). Note: (x & -x) gives us a mask that + * is the least significant * (RIGHT-most) 1-bit of the value in x. + */ + return __ilog2(x & -x); +} + +static __inline__ int __ffs(unsigned long x) +{ + return __ilog2(x & -x); +} + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ +static __inline__ int ffs(int x) +{ + unsigned long i = (unsigned long)x; + return __ilog2(i & -i) + 1; +} + +/* + * fls: find last (most-significant) bit set. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +#define fls(x) generic_fls(x) + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ +#define hweight64(x) generic_hweight64(x) +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) + +extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +#define find_first_zero_bit(addr, size) \ + find_next_zero_bit((addr), (size), 0) + +extern unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +#define find_first_bit(addr, size) \ + find_next_bit((addr), (size), 0) + +extern unsigned long find_next_zero_le_bit(const unsigned long *addr, unsigned long size, unsigned long offset); +#define find_first_zero_le_bit(addr, size) \ + find_next_zero_le_bit((addr), (size), 0) + +static __inline__ int test_le_bit(unsigned long nr, __const__ unsigned long * addr) +{ + __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; + return (ADDR[nr >> 3] >> (nr & 7)) & 1; +} + +#define test_and_clear_le_bit(nr, addr) \ + test_and_clear_bit((nr) ^ 0x38, (addr)) +#define test_and_set_le_bit(nr, addr) \ + test_and_set_bit((nr) ^ 0x38, (addr)) + +/* + * non-atomic versions + */ + +#define __set_le_bit(nr, addr) \ + __set_bit((nr) ^ 0x38, (addr)) +#define __clear_le_bit(nr, addr) \ + __clear_bit((nr) ^ 0x38, (addr)) +#define __test_and_clear_le_bit(nr, addr) \ + __test_and_clear_bit((nr) ^ 0x38, (addr)) +#define __test_and_set_le_bit(nr, addr) \ + __test_and_set_bit((nr) ^ 0x38, (addr)) + +#define ext2_set_bit(nr,addr) \ + __test_and_set_le_bit((nr), (unsigned long*)addr) +#define ext2_clear_bit(nr, addr) \ + __test_and_clear_le_bit((nr), (unsigned long*)addr) + +#define ext2_set_bit_atomic(lock, nr, addr) \ + test_and_set_le_bit((nr), (unsigned long*)addr) +#define ext2_clear_bit_atomic(lock, nr, addr) \ + test_and_clear_le_bit((nr), (unsigned long*)addr) + + +#define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) +#define ext2_find_first_zero_bit(addr, size) \ + find_first_zero_le_bit((unsigned long*)addr, size) +#define ext2_find_next_zero_bit(addr, size, off) \ + find_next_zero_le_bit((unsigned long*)addr, size, off) + +#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) +#define minix_set_bit(nr,addr) set_bit(nr,addr) +#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) +#define minix_test_bit(nr,addr) test_bit(nr,addr) +#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) + +#endif /* __KERNEL__ */ +#endif /* _PPC64_BITOPS_H */ -- cgit v1.2.1