summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/bitops.h51
-rw-r--r--include/asm-arm/arch-pxa/pm.h2
-rw-r--r--include/asm-arm/bitops.h1
-rw-r--r--include/asm-avr32/bitops.h1
-rw-r--r--include/asm-blackfin/bitops.h1
-rw-r--r--include/asm-blackfin/processor.h4
-rw-r--r--include/asm-cris/bitops.h1
-rw-r--r--include/asm-frv/bitops.h1
-rw-r--r--include/asm-generic/bitops.h1
-rw-r--r--include/asm-generic/bitops/lock.h45
-rw-r--r--include/asm-h8300/bitops.h1
-rw-r--r--include/asm-ia64/bitops.h41
-rw-r--r--include/asm-m32r/bitops.h1
-rw-r--r--include/asm-m68k/bitops.h1
-rw-r--r--include/asm-m68knommu/bitops.h1
-rw-r--r--include/asm-mips/bitops.h102
-rw-r--r--include/asm-parisc/bitops.h1
-rw-r--r--include/asm-powerpc/bitops.h45
-rw-r--r--include/asm-powerpc/mpc52xx.h6
-rw-r--r--include/asm-powerpc/paca.h3
-rw-r--r--include/asm-ppc/time.h2
-rw-r--r--include/asm-s390/bitops.h1
-rw-r--r--include/asm-sh/bitops.h1
-rw-r--r--include/asm-sh64/bitops.h1
-rw-r--r--include/asm-sparc/bitops.h1
-rw-r--r--include/asm-sparc64/bitops.h1
-rw-r--r--include/asm-v850/bitops.h1
-rw-r--r--include/asm-x86/bitops_32.h1
-rw-r--r--include/asm-x86/bitops_64.h1
-rw-r--r--include/asm-x86/suspend_64.h2
-rw-r--r--include/asm-xtensa/bitops.h1
-rw-r--r--include/linux/aio.h12
-rw-r--r--include/linux/bit_spinlock.h26
-rw-r--r--include/linux/capability.h4
-rw-r--r--include/linux/clocksource.h5
-rw-r--r--include/linux/compiler-gcc.h12
-rw-r--r--include/linux/compiler.h14
-rw-r--r--include/linux/console.h7
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/cyclades.h27
-rw-r--r--include/linux/dma-mapping.h30
-rw-r--r--include/linux/ext3_fs.h4
-rw-r--r--include/linux/ext4_fs.h103
-rw-r--r--include/linux/ext4_fs_extents.h4
-rw-r--r--include/linux/ext4_fs_i.h5
-rw-r--r--include/linux/ext4_fs_sb.h3
-rw-r--r--include/linux/ext4_jbd2.h6
-rw-r--r--include/linux/freezer.h38
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/fuse.h65
-rw-r--r--include/linux/ipmi.h10
-rw-r--r--include/linux/ipmi_smi.h36
-rw-r--r--include/linux/jbd.h17
-rw-r--r--include/linux/jbd2.h49
-rw-r--r--include/linux/kernel_stat.h2
-rw-r--r--include/linux/parport.h1
-rw-r--r--include/linux/pm.h98
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/security.h7
-rw-r--r--include/linux/suspend.h169
-rw-r--r--include/linux/sysctl.h58
-rw-r--r--include/linux/taskstats.h7
63 files changed, 812 insertions, 348 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h
index 9e71201000d5..381b4f5b4d5d 100644
--- a/include/asm-alpha/bitops.h
+++ b/include/asm-alpha/bitops.h
@@ -2,6 +2,7 @@
#define _ALPHA_BITOPS_H
#include <asm/compiler.h>
+#include <asm/barrier.h>
/*
* Copyright 1994, Linus Torvalds.
@@ -69,6 +70,13 @@ clear_bit(unsigned long nr, volatile void * addr)
:"Ir" (1UL << (nr & 31)), "m" (*m));
}
+static inline void
+clear_bit_unlock(unsigned long nr, volatile void * addr)
+{
+ smp_mb();
+ clear_bit(nr, addr);
+}
+
/*
* WARNING: non atomic version.
*/
@@ -81,6 +89,13 @@ __clear_bit(unsigned long nr, volatile void * addr)
}
static inline void
+__clear_bit_unlock(unsigned long nr, volatile void * addr)
+{
+ smp_mb();
+ __clear_bit(nr, addr);
+}
+
+static inline void
change_bit(unsigned long nr, volatile void * addr)
{
unsigned long temp;
@@ -117,6 +132,36 @@ test_and_set_bit(unsigned long nr, volatile void *addr)
int *m = ((int *) addr) + (nr >> 5);
__asm__ __volatile__(
+#ifdef CONFIG_SMP
+ " mb\n"
+#endif
+ "1: ldl_l %0,%4\n"
+ " and %0,%3,%2\n"
+ " bne %2,2f\n"
+ " xor %0,%3,%0\n"
+ " stl_c %0,%1\n"
+ " beq %0,3f\n"
+ "2:\n"
+#ifdef CONFIG_SMP
+ " mb\n"
+#endif
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
+ :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory");
+
+ return oldbit != 0;
+}
+
+static inline int
+test_and_set_bit_lock(unsigned long nr, volatile void *addr)
+{
+ unsigned long oldbit;
+ unsigned long temp;
+ int *m = ((int *) addr) + (nr >> 5);
+
+ __asm__ __volatile__(
"1: ldl_l %0,%4\n"
" and %0,%3,%2\n"
" bne %2,2f\n"
@@ -158,6 +203,9 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
int *m = ((int *) addr) + (nr >> 5);
__asm__ __volatile__(
+#ifdef CONFIG_SMP
+ " mb\n"
+#endif
"1: ldl_l %0,%4\n"
" and %0,%3,%2\n"
" beq %2,2f\n"
@@ -199,6 +247,9 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
int *m = ((int *) addr) + (nr >> 5);
__asm__ __volatile__(
+#ifdef CONFIG_SMP
+ " mb\n"
+#endif
"1: ldl_l %0,%4\n"
" and %0,%3,%2\n"
" xor %0,%3,%0\n"
diff --git a/include/asm-arm/arch-pxa/pm.h b/include/asm-arm/arch-pxa/pm.h
index 6903db7fae15..9d9f4b54b2ce 100644
--- a/include/asm-arm/arch-pxa/pm.h
+++ b/include/asm-arm/arch-pxa/pm.h
@@ -7,6 +7,8 @@
*
*/
+#include <linux/suspend.h>
+
struct pxa_cpu_pm_fns {
int save_size;
void (*save)(unsigned long *);
diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h
index b41831b6432f..52fe05895deb 100644
--- a/include/asm-arm/bitops.h
+++ b/include/asm-arm/bitops.h
@@ -286,6 +286,7 @@ static inline int constant_fls(int x)
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
/*
* Ext2 is defined to use little-endian byte ordering.
diff --git a/include/asm-avr32/bitops.h b/include/asm-avr32/bitops.h
index 5299f8c8e11d..f3faddfd46a8 100644
--- a/include/asm-avr32/bitops.h
+++ b/include/asm-avr32/bitops.h
@@ -288,6 +288,7 @@ static inline int ffs(unsigned long word)
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
diff --git a/include/asm-blackfin/bitops.h b/include/asm-blackfin/bitops.h
index 27c2d0e48e1b..03ecedc1f2a7 100644
--- a/include/asm-blackfin/bitops.h
+++ b/include/asm-blackfin/bitops.h
@@ -199,6 +199,7 @@ static __inline__ int __test_bit(int nr, const void *addr)
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
diff --git a/include/asm-blackfin/processor.h b/include/asm-blackfin/processor.h
index 6bb3e0d4705d..c571e958558c 100644
--- a/include/asm-blackfin/processor.h
+++ b/include/asm-blackfin/processor.h
@@ -104,13 +104,13 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
/* Get the Silicon Revision of the chip */
-static inline __attribute_pure__ uint32_t bfin_revid(void)
+static inline uint32_t __pure bfin_revid(void)
{
/* stored in the upper 4 bits */
return bfin_read_CHIPID() >> 28;
}
-static inline __attribute_pure__ uint32_t bfin_compiled_revid(void)
+static inline uint32_t __pure bfin_compiled_revid(void)
{
#if defined(CONFIG_BF_REV_0_0)
return 0;
diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h
index a569065113d9..617151b9b72b 100644
--- a/include/asm-cris/bitops.h
+++ b/include/asm-cris/bitops.h
@@ -154,6 +154,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h
index f8560edf59ff..8dba74b1a254 100644
--- a/include/asm-frv/bitops.h
+++ b/include/asm-frv/bitops.h
@@ -302,6 +302,7 @@ int __ilog2_u64(u64 n)
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index 1f9d99193df8..e022a0f59e6b 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -22,6 +22,7 @@
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
new file mode 100644
index 000000000000..308a9e22c802
--- /dev/null
+++ b/include/asm-generic/bitops/lock.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
+#define _ASM_GENERIC_BITOPS_LOCK_H_
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics.
+ * It can be used to implement bit locks.
+ */
+#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+#define clear_bit_unlock(nr, addr) \
+do { \
+ smp_mb__before_clear_bit(); \
+ clear_bit(nr, addr); \
+} while (0)
+
+/**
+ * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is like clear_bit_unlock, however it is not atomic.
+ * It does provide release barrier semantics so it can be used to unlock
+ * a bit lock, however it would only be used if no other CPU can modify
+ * any bits in the memory until the lock is released (a good example is
+ * if the bit lock itself protects access to the other bits in the word).
+ */
+#define __clear_bit_unlock(nr, addr) \
+do { \
+ smp_mb(); \
+ __clear_bit(nr, addr); \
+} while (0)
+
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+
diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h
index d76299c98b81..e64ad315656d 100644
--- a/include/asm-h8300/bitops.h
+++ b/include/asm-h8300/bitops.h
@@ -194,6 +194,7 @@ static __inline__ unsigned long __ffs(unsigned long word)
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index 6cc517e212a9..2144f1a8ed6f 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -94,6 +94,38 @@ clear_bit (int nr, volatile void *addr)
}
/**
+ * clear_bit_unlock - Clears a bit in memory with release
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit_unlock() is atomic and may not be reordered. It does
+ * contain a memory barrier suitable for unlock type operations.
+ */
+static __inline__ void
+clear_bit_unlock (int nr, volatile void *addr)
+{
+ __u32 mask, old, new;
+ volatile __u32 *m;
+ CMPXCHG_BUGCHECK_DECL
+
+ m = (volatile __u32 *) addr + (nr >> 5);
+ mask = ~(1 << (nr & 31));
+ do {
+ CMPXCHG_BUGCHECK(m);
+ old = *m;
+ new = old & mask;
+ } while (cmpxchg_rel(m, old, new) != old);
+}
+
+/**
+ * __clear_bit_unlock - Non-atomically clear a bit with release
+ *
+ * This is like clear_bit_unlock, but the implementation may use a non-atomic
+ * store (this one uses an atomic, however).
+ */
+#define __clear_bit_unlock clear_bit_unlock
+
+/**
* __clear_bit - Clears a bit in memory (non-atomic version)
*/
static __inline__ void
@@ -170,6 +202,15 @@ test_and_set_bit (int nr, volatile void *addr)
}
/**
+ * test_and_set_bit_lock - Set a bit and return its old value for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is the same as test_and_set_bit on ia64
+ */
+#define test_and_set_bit_lock test_and_set_bit
+
+/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h
index 66ab672162cd..313a02c4a889 100644
--- a/include/asm-m32r/bitops.h
+++ b/include/asm-m32r/bitops.h
@@ -255,6 +255,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr)
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-m68k/bitops.h b/include/asm-m68k/bitops.h
index 1a61fdb56aaf..da151f70cdc6 100644
--- a/include/asm-m68k/bitops.h
+++ b/include/asm-m68k/bitops.h
@@ -314,6 +314,7 @@ static inline int fls(int x)
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
/* Bitmap functions for the minix filesystem */
diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h
index 7d6075d9b5cb..b8b2770d6870 100644
--- a/include/asm-m68knommu/bitops.h
+++ b/include/asm-m68knommu/bitops.h
@@ -160,6 +160,7 @@ static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
static __inline__ int ext2_set_bit(int nr, volatile void * addr)
{
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 899357a72ac4..77ed0c79830b 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -172,6 +172,20 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
}
/*
+ * clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock.
+ */
+static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(nr, addr);
+}
+
+/*
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
@@ -240,6 +254,8 @@ static inline int test_and_set_bit(unsigned long nr,
unsigned short bit = nr & SZLONG_MASK;
unsigned long res;
+ smp_llsc_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
@@ -295,6 +311,73 @@ static inline int test_and_set_bit(unsigned long nr,
}
/*
+ * test_and_set_bit_lock - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and implies acquire ordering semantics
+ * after the memory operation.
+ */
+static inline int test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned short bit = nr & SZLONG_MASK;
+ unsigned long res;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: " __LL "%0, %1 # test_and_set_bit \n"
+ " or %2, %0, %3 \n"
+ " " __SC "%2, %1 \n"
+ " beqzl %2, 1b \n"
+ " and %2, %0, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << bit), "m" (*m)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set mips3 \n"
+ "1: " __LL "%0, %1 # test_and_set_bit \n"
+ " or %2, %0, %3 \n"
+ " " __SC "%2, %1 \n"
+ " beqz %2, 2f \n"
+ " and %2, %0, %3 \n"
+ " .subsection 2 \n"
+ "2: b 1b \n"
+ " nop \n"
+ " .previous \n"
+ " .set pop \n"
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << bit), "m" (*m)
+ : "memory");
+ } else {
+ volatile unsigned long *a = addr;
+ unsigned long mask;
+ unsigned long flags;
+
+ a += nr >> SZLONG_LOG;
+ mask = 1UL << bit;
+ raw_local_irq_save(flags);
+ res = (mask & *a);
+ *a |= mask;
+ raw_local_irq_restore(flags);
+ }
+
+ smp_llsc_mb();
+
+ return res != 0;
+}
+/*
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
@@ -308,6 +391,8 @@ static inline int test_and_clear_bit(unsigned long nr,
unsigned short bit = nr & SZLONG_MASK;
unsigned long res;
+ smp_llsc_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
@@ -396,6 +481,8 @@ static inline int test_and_change_bit(unsigned long nr,
unsigned short bit = nr & SZLONG_MASK;
unsigned long res;
+ smp_llsc_mb();
+
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
@@ -453,6 +540,21 @@ static inline int test_and_change_bit(unsigned long nr,
#include <asm-generic/bitops/non-atomic.h>
/*
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * __clear_bit() is non-atomic and implies release semantics before the memory
+ * operation. It can be used for an unlock if no other CPUs can concurrently
+ * modify other bits in the word.
+ */
+static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
+{
+ smp_mb();
+ __clear_bit(nr, addr);
+}
+
+/*
* Return the bit position (0..63) of the most significant 1 bit in a word
* Returns -1 if no 1 bit exists
*/
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h
index 015cb0d379bd..03ae287baf89 100644
--- a/include/asm-parisc/bitops.h
+++ b/include/asm-parisc/bitops.h
@@ -208,6 +208,7 @@ static __inline__ int fls(int x)
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index 8144a2788db6..e85c3e078ba2 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -86,6 +86,24 @@ static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
: "cc" );
}
+static __inline__ void clear_bit_unlock(int nr, volatile unsigned long *addr)
+{
+ unsigned long old;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1:" PPC_LLARX "%0,0,%3 # clear_bit_unlock\n"
+ "andc %0,%0,%2\n"
+ PPC405_ERR77(0,%3)
+ PPC_STLCX "%0,0,%3\n"
+ "bne- 1b"
+ : "=&r" (old), "+m" (*p)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+}
+
static __inline__ void change_bit(int nr, volatile unsigned long *addr)
{
unsigned long old;
@@ -125,6 +143,27 @@ static __inline__ int test_and_set_bit(unsigned long nr,
return (old & mask) != 0;
}
+static __inline__ int test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *addr)
+{
+ unsigned long old, t;
+ unsigned long mask = BITOP_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit_lock\n"
+ "or %1,%0,%2 \n"
+ PPC405_ERR77(0,%3)
+ PPC_STLCX "%1,0,%3 \n"
+ "bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+
+ return (old & mask) != 0;
+}
+
static __inline__ int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
@@ -185,6 +224,12 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
#include <asm-generic/bitops/non-atomic.h>
+static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
+{
+ __asm__ __volatile__(LWSYNC_ON_SMP "" ::: "memory");
+ __clear_bit(nr, addr);
+}
+
/*
* Return the zero-based bit position (LE, not IBM bit numbering) of
* the most significant 1-bit in a double word.
diff --git a/include/asm-powerpc/mpc52xx.h b/include/asm-powerpc/mpc52xx.h
index 24751df791ac..568135fe52ea 100644
--- a/include/asm-powerpc/mpc52xx.h
+++ b/include/asm-powerpc/mpc52xx.h
@@ -18,6 +18,8 @@
#include <asm/prom.h>
#endif /* __ASSEMBLY__ */
+#include <linux/suspend.h>
+
/* ======================================================================== */
/* Structures mapping of some unit register set */
@@ -267,9 +269,9 @@ extern int mpc52xx_set_wakeup_gpio(u8 pin, u8 level);
extern int __init lite5200_pm_init(void);
/* lite5200 calls mpc5200 suspend functions, so here they are */
-extern int mpc52xx_pm_prepare(suspend_state_t);
+extern int mpc52xx_pm_prepare(void);
extern int mpc52xx_pm_enter(suspend_state_t);
-extern int mpc52xx_pm_finish(suspend_state_t);
+extern void mpc52xx_pm_finish(void);
extern char saved_sram[0x4000]; /* reuse buffer from mpc52xx suspend */
#endif
#endif /* CONFIG_PM */
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index fcd7b428ed0b..98c6bd5756b7 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -114,6 +114,9 @@ struct paca_struct {
u64 user_time; /* accumulated usermode TB ticks */
u64 system_time; /* accumulated system TB ticks */
u64 startpurr; /* PURR/TB value snapshot */
+ u64 startspurr; /* SPURR value snapshot */
+ u64 purrdelta; /* FIXME: document */
+ u64 spurrdelta; /* FIXME: document */
};
extern struct paca_struct paca[];
diff --git a/include/asm-ppc/time.h b/include/asm-ppc/time.h
index f7eadf6ac806..81dbcd43a501 100644
--- a/include/asm-ppc/time.h
+++ b/include/asm-ppc/time.h
@@ -57,7 +57,7 @@ static __inline__ void set_dec(unsigned int val)
/* Accessor functions for the timebase (RTC on 601) registers. */
/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
#ifdef CONFIG_6xx
-extern __inline__ int __attribute_pure__ __USE_RTC(void) {
+extern __inline__ int __pure __USE_RTC(void) {
return (mfspr(SPRN_PVR)>>16) == 1;
}
#else
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index f79c9b792af1..d756b34d25f3 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -746,6 +746,7 @@ static inline int sched_find_first_bit(unsigned long *b)
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
/*
* ATTENTION: intel byte ordering convention for ext2 and minix !!
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h
index 1c16792cee1d..9d7021723a25 100644
--- a/include/asm-sh/bitops.h
+++ b/include/asm-sh/bitops.h
@@ -137,6 +137,7 @@ static inline unsigned long __ffs(unsigned long word)
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h
index f3bdcdb5d046..444d5ea92ce9 100644
--- a/include/asm-sh64/bitops.h
+++ b/include/asm-sh64/bitops.h
@@ -136,6 +136,7 @@ static __inline__ unsigned long ffz(unsigned long word)
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index 329e696e7751..00bd0a679d70 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -96,6 +96,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 3d5e1af84723..dd4bfe993b61 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -81,6 +81,7 @@ static inline unsigned int hweight8(unsigned int w)
#include <asm-generic/bitops/hweight.h>
#endif
+#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/find.h>
diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h
index 1fa99baf4e25..8eafdb1c08ba 100644
--- a/include/asm-v850/bitops.h
+++ b/include/asm-v850/bitops.h
@@ -145,6 +145,7 @@ static inline int __test_bit (int nr, const void *addr)
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
index a20fe9822f60..c96641f75022 100644
--- a/include/asm-x86/bitops_32.h
+++ b/include/asm-x86/bitops_32.h
@@ -402,6 +402,7 @@ static inline int fls(int x)
}
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index 1d7d9b4bcacb..525edf2ce5c2 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -408,6 +408,7 @@ static __inline__ int fls(int x)
#define ARCH_HAS_FAST_MULTIPLIER 1
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-x86/suspend_64.h b/include/asm-x86/suspend_64.h
index b897e8cb55fb..9440a7a1b99a 100644
--- a/include/asm-x86/suspend_64.h
+++ b/include/asm-x86/suspend_64.h
@@ -53,3 +53,5 @@ extern unsigned long saved_rdi;
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
+extern char core_restore_code;
+extern char restore_registers;
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h
index 1c1e0d933eea..78db04cf6e48 100644
--- a/include/asm-xtensa/bitops.h
+++ b/include/asm-xtensa/bitops.h
@@ -108,6 +108,7 @@ static inline int fls (unsigned int x)
#endif
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/minix.h>
diff --git a/include/linux/aio.h b/include/linux/aio.h
index d10e608f232d..7ef8de662001 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -232,18 +232,6 @@ int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
__put_ioctx(kioctx); \
} while (0)
-#define in_aio() (unlikely(!is_sync_wait(current->io_wait)))
-
-/* may be used for debugging */
-#define warn_if_async() \
-do { \
- if (in_aio()) { \
- printk(KERN_ERR "%s(%s:%d) called in async context!\n", \
- __FUNCTION__, __FILE__, __LINE__); \
- dump_stack(); \
- } \
-} while (0)
-
#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
#include <linux/aio_abi.h>
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
index 6b20af0bbb79..7113a32a86ea 100644
--- a/include/linux/bit_spinlock.h
+++ b/include/linux/bit_spinlock.h
@@ -18,7 +18,7 @@ static inline void bit_spin_lock(int bitnum, unsigned long *addr)
*/
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- while (test_and_set_bit(bitnum, addr)) {
+ while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
while (test_bit(bitnum, addr)) {
preempt_enable();
cpu_relax();
@@ -36,7 +36,7 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
{
preempt_disable();
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- if (test_and_set_bit(bitnum, addr)) {
+ if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
preempt_enable();
return 0;
}
@@ -50,10 +50,28 @@ static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
*/
static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ BUG_ON(!test_bit(bitnum, addr));
+#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ clear_bit_unlock(bitnum, addr);
+#endif
+ preempt_enable();
+ __release(bitlock);
+}
+
+/*
+ * bit-based spin_unlock()
+ * non-atomic version, which can be used eg. if the bit lock itself is
+ * protecting the rest of the flags in the word.
+ */
+static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
BUG_ON(!test_bit(bitnum, addr));
- smp_mb__before_clear_bit();
- clear_bit(bitnum, addr);
+#endif
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ __clear_bit_unlock(bitnum, addr);
#endif
preempt_enable();
__release(bitlock);
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 8961e7fb755c..7a8d7ade28a0 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -310,10 +310,6 @@ typedef __u32 kernel_cap_t;
#define CAP_SETFCAP 31
#ifdef __KERNEL__
-/*
- * Bounding set
- */
-extern kernel_cap_t cap_bset;
/*
* Internal kernel functions only
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 16ea3374dddf..107787aacb64 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -221,10 +221,15 @@ extern void clocksource_resume(void);
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
+extern void update_vsyscall_tz(void);
#else
static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
{
}
+
+static inline void update_vsyscall_tz(void)
+{
+}
#endif
#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index acd583384bd9..fe23792f05c1 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -36,10 +36,20 @@
#define __weak __attribute__((weak))
#define __naked __attribute__((naked))
#define __noreturn __attribute__((noreturn))
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions have no effects except the return value and their
+ * return value depends only on the parameters and/or global
+ * variables. Such a function can be subject to common subexpression
+ * elimination and loop optimization just as an arithmetic operator
+ * would be.
+ * [...]
+ */
#define __pure __attribute__((pure))
#define __aligned(x) __attribute__((aligned(x)))
#define __printf(a,b) __attribute__((format(printf,a,b)))
#define noinline __attribute__((noinline))
-#define __attribute_pure__ __attribute__((pure))
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 86f9a3a6137d..c811c8b979ac 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -132,20 +132,6 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __maybe_unused /* unimplemented */
#endif
-/*
- * From the GCC manual:
- *
- * Many functions have no effects except the return value and their
- * return value depends only on the parameters and/or global
- * variables. Such a function can be subject to common subexpression
- * elimination and loop optimization just as an arithmetic operator
- * would be.
- * [...]
- */
-#ifndef __attribute_pure__
-# define __attribute_pure__ /* unimplemented */
-#endif
-
#ifndef noinline
#define noinline
#endif
diff --git a/include/linux/console.h b/include/linux/console.h
index 0a4542ddb73d..a5f88a6a259d 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -122,14 +122,11 @@ extern void console_stop(struct console *);
extern void console_start(struct console *);
extern int is_console_locked(void);
-#ifndef CONFIG_DISABLE_CONSOLE_SUSPEND
+extern int console_suspend_enabled;
+
/* Suspend and resume console messages over PM events */
extern void suspend_console(void);
extern void resume_console(void);
-#else
-static inline void suspend_console(void) {}
-static inline void resume_console(void) {}
-#endif /* CONFIG_DISABLE_CONSOLE_SUSPEND */
int mda_console_init(void);
void prom_con_init(void);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 0ad72c4cf312..b79c57569367 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -119,8 +119,9 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
#define lock_cpu_hotplug() do { } while (0)
#define unlock_cpu_hotplug() do { } while (0)
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
-#define register_hotcpu_notifier(nb) do { (void)(nb); } while (0)
-#define unregister_hotcpu_notifier(nb) do { (void)(nb); } while (0)
+/* These aren't inline functions due to a GCC bug. */
+#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
+#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
static inline int cpu_is_offline(int cpu) { return 0; }
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index 72aa00cc4b2d..8f3dcd30828f 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -512,11 +512,11 @@ struct cyclades_card {
void __iomem *base_addr;
void __iomem *ctl_addr;
int irq;
- int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
- int first_line; /* minor number of first channel on card */
- int nports; /* Number of ports in the card */
- int bus_index; /* address shift - 0 for ISA, 1 for PCI */
- int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
+ unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
+ unsigned int first_line; /* minor number of first channel on card */
+ unsigned int nports; /* Number of ports in the card */
+ int bus_index; /* address shift - 0 for ISA, 1 for PCI */
+ int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
spinlock_t card_lock;
struct cyclades_port *ports;
};
@@ -566,10 +566,9 @@ struct cyclades_port {
int rtsdtr_inv;
int chip_rev;
int custom_divisor;
- int x_char; /* to be pushed out ASAP */
+ u8 x_char; /* to be pushed out ASAP */
int close_delay;
unsigned short closing_wait;
- unsigned long event;
int count; /* # of fd on device */
int breakon;
int breakoff;
@@ -584,7 +583,6 @@ struct cyclades_port {
struct cyclades_monitor mon;
struct cyclades_idle_stats idle_stats;
struct cyclades_icount icount;
- struct work_struct tqueue;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
struct completion shutdown_wait;
@@ -592,19 +590,6 @@ struct cyclades_port {
int throttle;
};
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at cy interrupt time.
- */
-#define Cy_EVENT_READ_PROCESS 0
-#define Cy_EVENT_WRITE_WAKEUP 1
-#define Cy_EVENT_HANGUP 2
-#define Cy_EVENT_BREAK 3
-#define Cy_EVENT_OPEN_WAKEUP 4
-#define Cy_EVENT_SHUTDOWN_WAKEUP 5
-#define Cy_EVENT_DELTA_WAKEUP 6
-#define Cy_EVENT_Z_RX_FULL 7
-
#define CLOSING_WAIT_DELAY 30*HZ
#define CY_CLOSING_WAIT_NONE 65535
#define CY_CLOSING_WAIT_INF 0
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 0ebfafbd338c..101a2d4636be 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -13,16 +13,26 @@ enum dma_data_direction {
DMA_NONE = 3,
};
-#define DMA_64BIT_MASK 0xffffffffffffffffULL
-#define DMA_48BIT_MASK 0x0000ffffffffffffULL
-#define DMA_40BIT_MASK 0x000000ffffffffffULL
-#define DMA_39BIT_MASK 0x0000007fffffffffULL
-#define DMA_32BIT_MASK 0x00000000ffffffffULL
-#define DMA_31BIT_MASK 0x000000007fffffffULL
-#define DMA_30BIT_MASK 0x000000003fffffffULL
-#define DMA_29BIT_MASK 0x000000001fffffffULL
-#define DMA_28BIT_MASK 0x000000000fffffffULL
-#define DMA_24BIT_MASK 0x0000000000ffffffULL
+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+/*
+ * NOTE: do not use the below macros in new code and do not add new definitions
+ * here.
+ *
+ * Instead, just open-code DMA_BIT_MASK(n) within your driver
+ */
+#define DMA_64BIT_MASK DMA_BIT_MASK(64)
+#define DMA_48BIT_MASK DMA_BIT_MASK(48)
+#define DMA_47BIT_MASK DMA_BIT_MASK(47)
+#define DMA_40BIT_MASK DMA_BIT_MASK(40)
+#define DMA_39BIT_MASK DMA_BIT_MASK(39)
+#define DMA_35BIT_MASK DMA_BIT_MASK(35)
+#define DMA_32BIT_MASK DMA_BIT_MASK(32)
+#define DMA_31BIT_MASK DMA_BIT_MASK(31)
+#define DMA_30BIT_MASK DMA_BIT_MASK(30)
+#define DMA_29BIT_MASK DMA_BIT_MASK(29)
+#define DMA_28BIT_MASK DMA_BIT_MASK(28)
+#define DMA_24BIT_MASK DMA_BIT_MASK(24)
#define DMA_MASK_NONE 0x0ULL
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 589b0b355d84..64134456ed8c 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -72,8 +72,8 @@
* Macro-instructions used to manage several block sizes
*/
#define EXT3_MIN_BLOCK_SIZE 1024
-#define EXT3_MAX_BLOCK_SIZE 4096
-#define EXT3_MIN_BLOCK_LOG_SIZE 10
+#define EXT3_MAX_BLOCK_SIZE 65536
+#define EXT3_MIN_BLOCK_LOG_SIZE 10
#ifdef __KERNEL__
# define EXT3_BLOCK_SIZE(s) ((s)->s_blocksize)
#else
diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h
index cdee7aaa57aa..97dd409d5f4a 100644
--- a/include/linux/ext4_fs.h
+++ b/include/linux/ext4_fs.h
@@ -36,10 +36,6 @@
/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
#define EXT4_MAX_RESERVE_BLOCKS 1027
#define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0
-/*
- * Always enable hashed directories
- */
-#define CONFIG_EXT4_INDEX
/*
* Debug code
@@ -105,37 +101,29 @@
#define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits)))
/*
- * Macro-instructions used to manage fragments
- */
-#define EXT4_MIN_FRAG_SIZE 1024
-#define EXT4_MAX_FRAG_SIZE 4096
-#define EXT4_MIN_FRAG_LOG_SIZE 10
-#ifdef __KERNEL__
-# define EXT4_FRAG_SIZE(s) (EXT4_SB(s)->s_frag_size)
-# define EXT4_FRAGS_PER_BLOCK(s) (EXT4_SB(s)->s_frags_per_block)
-#else
-# define EXT4_FRAG_SIZE(s) (EXT4_MIN_FRAG_SIZE << (s)->s_log_frag_size)
-# define EXT4_FRAGS_PER_BLOCK(s) (EXT4_BLOCK_SIZE(s) / EXT4_FRAG_SIZE(s))
-#endif
-
-/*
* Structure of a blocks group descriptor
*/
struct ext4_group_desc
{
- __le32 bg_block_bitmap; /* Blocks bitmap block */
- __le32 bg_inode_bitmap; /* Inodes bitmap block */
- __le32 bg_inode_table; /* Inodes table block */
+ __le32 bg_block_bitmap_lo; /* Blocks bitmap block */
+ __le32 bg_inode_bitmap_lo; /* Inodes bitmap block */
+ __le32 bg_inode_table_lo; /* Inodes table block */
__le16 bg_free_blocks_count; /* Free blocks count */
__le16 bg_free_inodes_count; /* Free inodes count */
__le16 bg_used_dirs_count; /* Directories count */
- __u16 bg_flags;
- __u32 bg_reserved[3];
+ __le16 bg_flags; /* EXT4_BG_flags (INODE_UNINIT, etc) */
+ __u32 bg_reserved[2]; /* Likely block/inode bitmap checksum */
+ __le16 bg_itable_unused; /* Unused inodes count */
+ __le16 bg_checksum; /* crc16(sb_uuid+group+desc) */
__le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */
__le32 bg_inode_bitmap_hi; /* Inodes bitmap block MSB */
__le32 bg_inode_table_hi; /* Inodes table block MSB */
};
+#define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */
+#define EXT4_BG_BLOCK_UNINIT 0x0002 /* Block bitmap not in use */
+#define EXT4_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */
+
#ifdef __KERNEL__
#include <linux/ext4_fs_i.h>
#include <linux/ext4_fs_sb.h>
@@ -311,27 +299,24 @@ struct ext4_inode {
__le32 i_generation; /* File version (for NFS) */
__le32 i_file_acl; /* File ACL */
__le32 i_dir_acl; /* Directory ACL */
- __le32 i_faddr; /* Fragment address */
+ __le32 i_obso_faddr; /* Obsoleted fragment address */
union {
struct {
- __u8 l_i_frag; /* Fragment number */
- __u8 l_i_fsize; /* Fragment size */
+ __le16 l_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
__le16 l_i_file_acl_high;
__le16 l_i_uid_high; /* these 2 fields */
__le16 l_i_gid_high; /* were reserved2[0] */
__u32 l_i_reserved2;
} linux2;
struct {
- __u8 h_i_frag; /* Fragment number */
- __u8 h_i_fsize; /* Fragment size */
+ __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
__u16 h_i_mode_high;
__u16 h_i_uid_high;
__u16 h_i_gid_high;
__u32 h_i_author;
} hurd2;
struct {
- __u8 m_i_frag; /* Fragment number */
- __u8 m_i_fsize; /* Fragment size */
+ __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
__le16 m_i_file_acl_high;
__u32 m_i_reserved2[2];
} masix2;
@@ -419,8 +404,6 @@ do { \
#if defined(__KERNEL__) || defined(__linux__)
#define i_reserved1 osd1.linux1.l_i_reserved1
-#define i_frag osd2.linux2.l_i_frag
-#define i_fsize osd2.linux2.l_i_fsize
#define i_file_acl_high osd2.linux2.l_i_file_acl_high
#define i_uid_low i_uid
#define i_gid_low i_gid
@@ -431,8 +414,6 @@ do { \
#elif defined(__GNU__)
#define i_translator osd1.hurd1.h_i_translator
-#define i_frag osd2.hurd2.h_i_frag;
-#define i_fsize osd2.hurd2.h_i_fsize;
#define i_uid_high osd2.hurd2.h_i_uid_high
#define i_gid_high osd2.hurd2.h_i_gid_high
#define i_author osd2.hurd2.h_i_author
@@ -440,8 +421,6 @@ do { \
#elif defined(__masix__)
#define i_reserved1 osd1.masix1.m_i_reserved1
-#define i_frag osd2.masix2.m_i_frag
-#define i_fsize osd2.masix2.m_i_fsize
#define i_file_acl_high osd2.masix2.m_i_file_acl_high
#define i_reserved2 osd2.masix2.m_i_reserved2
@@ -522,15 +501,15 @@ do { \
*/
struct ext4_super_block {
/*00*/ __le32 s_inodes_count; /* Inodes count */
- __le32 s_blocks_count; /* Blocks count */
- __le32 s_r_blocks_count; /* Reserved blocks count */
- __le32 s_free_blocks_count; /* Free blocks count */
+ __le32 s_blocks_count_lo; /* Blocks count */
+ __le32 s_r_blocks_count_lo; /* Reserved blocks count */
+ __le32 s_free_blocks_count_lo; /* Free blocks count */
/*10*/ __le32 s_free_inodes_count; /* Free inodes count */
__le32 s_first_data_block; /* First Data Block */
__le32 s_log_block_size; /* Block size */
- __le32 s_log_frag_size; /* Fragment size */
+ __le32 s_obso_log_frag_size; /* Obsoleted fragment size */
/*20*/ __le32 s_blocks_per_group; /* # Blocks per group */
- __le32 s_frags_per_group; /* # Fragments per group */
+ __le32 s_obso_frags_per_group; /* Obsoleted fragments per group */
__le32 s_inodes_per_group; /* # Inodes per group */
__le32 s_mtime; /* Mount time */
/*30*/ __le32 s_wtime; /* Write time */
@@ -595,13 +574,13 @@ struct ext4_super_block {
/*150*/ __le32 s_blocks_count_hi; /* Blocks count */
__le32 s_r_blocks_count_hi; /* Reserved blocks count */
__le32 s_free_blocks_count_hi; /* Free blocks count */
- __u16 s_min_extra_isize; /* All inodes have at least # bytes */
- __u16 s_want_extra_isize; /* New inodes should reserve # bytes */
- __u32 s_flags; /* Miscellaneous flags */
- __u16 s_raid_stride; /* RAID stride */
- __u16 s_mmp_interval; /* # seconds to wait in MMP checking */
- __u64 s_mmp_block; /* Block for multi-mount protection */
- __u32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
+ __le16 s_min_extra_isize; /* All inodes have at least # bytes */
+ __le16 s_want_extra_isize; /* New inodes should reserve # bytes */
+ __le32 s_flags; /* Miscellaneous flags */
+ __le16 s_raid_stride; /* RAID stride */
+ __le16 s_mmp_interval; /* # seconds to wait in MMP checking */
+ __le64 s_mmp_block; /* Block for multi-mount protection */
+ __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
__u32 s_reserved[163]; /* Padding to the end of the block */
};
@@ -692,6 +671,7 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
#define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
#define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
#define EXT4_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
+#define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010
#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
@@ -702,15 +682,18 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
#define EXT4_FEATURE_INCOMPAT_META_BG 0x0010
#define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
#define EXT4_FEATURE_INCOMPAT_64BIT 0x0080
+#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
#define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR
#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
EXT4_FEATURE_INCOMPAT_RECOVER| \
EXT4_FEATURE_INCOMPAT_META_BG| \
EXT4_FEATURE_INCOMPAT_EXTENTS| \
- EXT4_FEATURE_INCOMPAT_64BIT)
+ EXT4_FEATURE_INCOMPAT_64BIT| \
+ EXT4_FEATURE_INCOMPAT_FLEX_BG)
#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
+ EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
@@ -789,17 +772,11 @@ struct ext4_dir_entry_2 {
* (c) Daniel Phillips, 2001
*/
-#ifdef CONFIG_EXT4_INDEX
- #define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \
- EXT4_FEATURE_COMPAT_DIR_INDEX) && \
+#define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \
+ EXT4_FEATURE_COMPAT_DIR_INDEX) && \
(EXT4_I(dir)->i_flags & EXT4_INDEX_FL))
#define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX)
#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
-#else
- #define is_dx(dir) 0
-#define EXT4_DIR_LINK_MAX(dir) ((dir)->i_nlink >= EXT4_LINK_MAX)
-#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2)
-#endif
/* Legal values for the dx_root hash_version field: */
@@ -1004,39 +981,39 @@ extern void ext4_inode_table_set(struct super_block *sb,
static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
{
return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
- le32_to_cpu(es->s_blocks_count);
+ le32_to_cpu(es->s_blocks_count_lo);
}
static inline ext4_fsblk_t ext4_r_blocks_count(struct ext4_super_block *es)
{
return ((ext4_fsblk_t)le32_to_cpu(es->s_r_blocks_count_hi) << 32) |
- le32_to_cpu(es->s_r_blocks_count);
+ le32_to_cpu(es->s_r_blocks_count_lo);
}
static inline ext4_fsblk_t ext4_free_blocks_count(struct ext4_super_block *es)
{
return ((ext4_fsblk_t)le32_to_cpu(es->s_free_blocks_count_hi) << 32) |
- le32_to_cpu(es->s_free_blocks_count);
+ le32_to_cpu(es->s_free_blocks_count_lo);
}
static inline void ext4_blocks_count_set(struct ext4_super_block *es,
ext4_fsblk_t blk)
{
- es->s_blocks_count = cpu_to_le32((u32)blk);
+ es->s_blocks_count_lo = cpu_to_le32((u32)blk);
es->s_blocks_count_hi = cpu_to_le32(blk >> 32);
}
static inline void ext4_free_blocks_count_set(struct ext4_super_block *es,
ext4_fsblk_t blk)
{
- es->s_free_blocks_count = cpu_to_le32((u32)blk);
+ es->s_free_blocks_count_lo = cpu_to_le32((u32)blk);
es->s_free_blocks_count_hi = cpu_to_le32(blk >> 32);
}
static inline void ext4_r_blocks_count_set(struct ext4_super_block *es,
ext4_fsblk_t blk)
{
- es->s_r_blocks_count = cpu_to_le32((u32)blk);
+ es->s_r_blocks_count_lo = cpu_to_le32((u32)blk);
es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
}
diff --git a/include/linux/ext4_fs_extents.h b/include/linux/ext4_fs_extents.h
index 81406f3655d4..d2045a26195d 100644
--- a/include/linux/ext4_fs_extents.h
+++ b/include/linux/ext4_fs_extents.h
@@ -74,7 +74,7 @@ struct ext4_extent {
__le32 ee_block; /* first logical block extent covers */
__le16 ee_len; /* number of blocks covered by extent */
__le16 ee_start_hi; /* high 16 bits of physical block */
- __le32 ee_start; /* low 32 bits of physical block */
+ __le32 ee_start_lo; /* low 32 bits of physical block */
};
/*
@@ -83,7 +83,7 @@ struct ext4_extent {
*/
struct ext4_extent_idx {
__le32 ei_block; /* index covers logical blocks from 'block' */
- __le32 ei_leaf; /* pointer to the physical block of the next *
+ __le32 ei_leaf_lo; /* pointer to the physical block of the next *
* level. leaf or next index could be there */
__le16 ei_leaf_hi; /* high 16 bits of physical block */
__u16 ei_unused;
diff --git a/include/linux/ext4_fs_i.h b/include/linux/ext4_fs_i.h
index 1a511e9905aa..86ddfe2089f3 100644
--- a/include/linux/ext4_fs_i.h
+++ b/include/linux/ext4_fs_i.h
@@ -78,11 +78,6 @@ struct ext4_ext_cache {
struct ext4_inode_info {
__le32 i_data[15]; /* unconverted */
__u32 i_flags;
-#ifdef EXT4_FRAGMENTS
- __u32 i_faddr;
- __u8 i_frag_no;
- __u8 i_frag_size;
-#endif
ext4_fsblk_t i_file_acl;
__u32 i_dir_acl;
__u32 i_dtime;
diff --git a/include/linux/ext4_fs_sb.h b/include/linux/ext4_fs_sb.h
index 0a8e47d47c91..b40e827cd495 100644
--- a/include/linux/ext4_fs_sb.h
+++ b/include/linux/ext4_fs_sb.h
@@ -28,11 +28,8 @@
* third extended-fs super-block data in memory
*/
struct ext4_sb_info {
- unsigned long s_frag_size; /* Size of a fragment in bytes */
unsigned long s_desc_size; /* Size of a group descriptor in bytes */
- unsigned long s_frags_per_block;/* Number of fragments per block */
unsigned long s_inodes_per_block;/* Number of inodes per block */
- unsigned long s_frags_per_group;/* Number of fragments in a group */
unsigned long s_blocks_per_group;/* Number of blocks in a group */
unsigned long s_inodes_per_group;/* Number of inodes in a group */
unsigned long s_itb_per_group; /* Number of inode table blocks per group */
diff --git a/include/linux/ext4_jbd2.h b/include/linux/ext4_jbd2.h
index d716e6392cf6..38c71d3c8dbf 100644
--- a/include/linux/ext4_jbd2.h
+++ b/include/linux/ext4_jbd2.h
@@ -12,8 +12,8 @@
* Ext4-specific journaling extensions.
*/
-#ifndef _LINUX_EXT4_JBD_H
-#define _LINUX_EXT4_JBD_H
+#ifndef _LINUX_EXT4_JBD2_H
+#define _LINUX_EXT4_JBD2_H
#include <linux/fs.h>
#include <linux/jbd2.h>
@@ -228,4 +228,4 @@ static inline int ext4_should_writeback_data(struct inode *inode)
return 0;
}
-#endif /* _LINUX_EXT4_JBD_H */
+#endif /* _LINUX_EXT4_JBD2_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index efded00ad08c..08934995c7ab 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -4,6 +4,7 @@
#define FREEZER_H_INCLUDED
#include <linux/sched.h>
+#include <linux/wait.h>
#ifdef CONFIG_PM_SLEEP
/*
@@ -126,6 +127,36 @@ static inline void set_freezable(void)
current->flags &= ~PF_NOFREEZE;
}
+/*
+ * Freezer-friendly wrappers around wait_event_interruptible() and
+ * wait_event_interruptible_timeout(), originally defined in <linux/wait.h>
+ */
+
+#define wait_event_freezable(wq, condition) \
+({ \
+ int __retval; \
+ do { \
+ __retval = wait_event_interruptible(wq, \
+ (condition) || freezing(current)); \
+ if (__retval && !freezing(current)) \
+ break; \
+ else if (!(condition)) \
+ __retval = -ERESTARTSYS; \
+ } while (try_to_freeze()); \
+ __retval; \
+})
+
+
+#define wait_event_freezable_timeout(wq, condition, timeout) \
+({ \
+ long __retval = timeout; \
+ do { \
+ __retval = wait_event_interruptible_timeout(wq, \
+ (condition) || freezing(current), \
+ __retval); \
+ } while (try_to_freeze()); \
+ __retval; \
+})
#else /* !CONFIG_PM_SLEEP */
static inline int frozen(struct task_struct *p) { return 0; }
static inline int freezing(struct task_struct *p) { return 0; }
@@ -143,6 +174,13 @@ static inline void freezer_do_not_count(void) {}
static inline void freezer_count(void) {}
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
static inline void set_freezable(void) {}
+
+#define wait_event_freezable(wq, condition) \
+ wait_event_interruptible(wq, condition)
+
+#define wait_event_freezable_timeout(wq, condition, timeout) \
+ wait_event_interruptible_timeout(wq, condition, timeout)
+
#endif /* !CONFIG_PM_SLEEP */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e3fc5dbb2246..6a4d170ad9a5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -330,6 +330,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_KILL_SGID 4096
#define ATTR_FILE 8192
#define ATTR_KILL_PRIV 16384
+#define ATTR_OPEN 32768 /* Truncating from open(O_TRUNC) */
/*
* This is the Inode Attributes structure, used for notify_change(). It
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 9fbe9d258e22..d0c437028c80 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -6,7 +6,17 @@
See the file COPYING.
*/
-/* This file defines the kernel interface of FUSE */
+/*
+ * This file defines the kernel interface of FUSE
+ *
+ * Protocol changelog:
+ *
+ * 7.9:
+ * - new fuse_getattr_in input argument of GETATTR
+ * - add lk_flags in fuse_lk_in
+ * - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in
+ * - add blksize field to fuse_attr
+ */
#include <asm/types.h>
#include <linux/major.h>
@@ -15,7 +25,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 8
+#define FUSE_KERNEL_MINOR_VERSION 9
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -44,6 +54,8 @@ struct fuse_attr {
__u32 uid;
__u32 gid;
__u32 rdev;
+ __u32 blksize;
+ __u32 padding;
};
struct fuse_kstatfs {
@@ -76,6 +88,9 @@ struct fuse_file_lock {
#define FATTR_ATIME (1 << 4)
#define FATTR_MTIME (1 << 5)
#define FATTR_FH (1 << 6)
+#define FATTR_ATIME_NOW (1 << 7)
+#define FATTR_MTIME_NOW (1 << 8)
+#define FATTR_LOCKOWNER (1 << 9)
/**
* Flags returned by the OPEN request
@@ -91,12 +106,38 @@ struct fuse_file_lock {
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
+#define FUSE_FILE_OPS (1 << 2)
+#define FUSE_ATOMIC_O_TRUNC (1 << 3)
/**
* Release flags
*/
#define FUSE_RELEASE_FLUSH (1 << 0)
+/**
+ * Getattr flags
+ */
+#define FUSE_GETATTR_FH (1 << 0)
+
+/**
+ * Lock flags
+ */
+#define FUSE_LK_FLOCK (1 << 0)
+
+/**
+ * WRITE flags
+ *
+ * FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed
+ * FUSE_WRITE_LOCKOWNER: lock_owner field is valid
+ */
+#define FUSE_WRITE_CACHE (1 << 0)
+#define FUSE_WRITE_LOCKOWNER (1 << 1)
+
+/**
+ * Read flags
+ */
+#define FUSE_READ_LOCKOWNER (1 << 1)
+
enum fuse_opcode {
FUSE_LOOKUP = 1,
FUSE_FORGET = 2, /* no reply */
@@ -139,6 +180,8 @@ enum fuse_opcode {
/* The read buffer is required to be at least 8k, but may be much larger */
#define FUSE_MIN_READ_BUFFER 8192
+#define FUSE_COMPAT_ENTRY_OUT_SIZE 120
+
struct fuse_entry_out {
__u64 nodeid; /* Inode ID */
__u64 generation; /* Inode generation: nodeid:gen must
@@ -154,6 +197,14 @@ struct fuse_forget_in {
__u64 nlookup;
};
+struct fuse_getattr_in {
+ __u32 getattr_flags;
+ __u32 dummy;
+ __u64 fh;
+};
+
+#define FUSE_COMPAT_ATTR_OUT_SIZE 96
+
struct fuse_attr_out {
__u64 attr_valid; /* Cache timeout for the attributes */
__u32 attr_valid_nsec;
@@ -184,7 +235,7 @@ struct fuse_setattr_in {
__u32 padding;
__u64 fh;
__u64 size;
- __u64 unused1;
+ __u64 lock_owner;
__u64 atime;
__u64 mtime;
__u64 unused2;
@@ -227,14 +278,18 @@ struct fuse_read_in {
__u64 fh;
__u64 offset;
__u32 size;
- __u32 padding;
+ __u32 read_flags;
+ __u64 lock_owner;
};
+#define FUSE_COMPAT_WRITE_IN_SIZE 24
+
struct fuse_write_in {
__u64 fh;
__u64 offset;
__u32 size;
__u32 write_flags;
+ __u64 lock_owner;
};
struct fuse_write_out {
@@ -273,6 +328,8 @@ struct fuse_lk_in {
__u64 fh;
__u64 owner;
struct fuse_file_lock lk;
+ __u32 lk_flags;
+ __u32 padding;
};
struct fuse_lk_out {
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 7a9db390c56a..c5bd28b69aec 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -365,6 +365,16 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
int priority);
/*
+ * Poll the IPMI interface for the user. This causes the IPMI code to
+ * do an immediate check for information from the driver and handle
+ * anything that is immediately pending. This will not block in any
+ * way. This is useful if you need to implement polling from the user
+ * for things like modifying the watchdog timeout when a panic occurs
+ * or disabling the watchdog timer on a reboot.
+ */
+void ipmi_poll_interface(ipmi_user_t user);
+
+/*
* When commands come in to the SMS, the user can register to receive
* them. Only one user can be listening on a specific netfn/cmd/chan tuple
* at a time, you will get an EBUSY error if the command is already
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index c0633108d05d..efa292a52e7e 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -148,26 +148,46 @@ struct ipmi_device_id {
/* Take a pointer to a raw data buffer and a length and extract device
id information from it. The first byte of data must point to the
- byte from the get device id response after the completion code.
- The caller is responsible for making sure the length is at least
- 11 and the command completed without error. */
-static inline void ipmi_demangle_device_id(unsigned char *data,
- unsigned int data_len,
- struct ipmi_device_id *id)
+ netfn << 2, the data should be of the format:
+ netfn << 2, cmd, completion code, data
+ as normally comes from a device interface. */
+static inline int ipmi_demangle_device_id(const unsigned char *data,
+ unsigned int data_len,
+ struct ipmi_device_id *id)
{
+ if (data_len < 9)
+ return -EINVAL;
+ if (data[0] != IPMI_NETFN_APP_RESPONSE << 2 ||
+ data[1] != IPMI_GET_DEVICE_ID_CMD)
+ /* Strange, didn't get the response we expected. */
+ return -EINVAL;
+ if (data[2] != 0)
+ /* That's odd, it shouldn't be able to fail. */
+ return -EINVAL;
+
+ data += 3;
+ data_len -= 3;
id->device_id = data[0];
id->device_revision = data[1];
id->firmware_revision_1 = data[2];
id->firmware_revision_2 = data[3];
id->ipmi_version = data[4];
id->additional_device_support = data[5];
- id->manufacturer_id = data[6] | (data[7] << 8) | (data[8] << 16);
- id->product_id = data[9] | (data[10] << 8);
+ if (data_len >= 6) {
+ id->manufacturer_id = (data[6] | (data[7] << 8) |
+ (data[8] << 16));
+ id->product_id = data[9] | (data[10] << 8);
+ } else {
+ id->manufacturer_id = 0;
+ id->product_id = 0;
+ }
if (data_len >= 15) {
memcpy(id->aux_firmware_revision, data+11, 4);
id->aux_firmware_revision_set = 1;
} else
id->aux_firmware_revision_set = 0;
+
+ return 0;
}
/* Add a low-level interface to the IPMI driver. Note that if the
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 72f522372924..a3abf51e488f 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -72,14 +72,15 @@ extern int journal_enable_debug;
#define jbd_debug(f, a...) /**/
#endif
-extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
-extern void * jbd_slab_alloc(size_t size, gfp_t flags);
-extern void jbd_slab_free(void *ptr, size_t size);
-
-#define jbd_kmalloc(size, flags) \
- __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
-#define jbd_rep_kmalloc(size, flags) \
- __jbd_kmalloc(__FUNCTION__, (size), (flags), 1)
+static inline void *jbd_alloc(size_t size, gfp_t flags)
+{
+ return (void *)__get_free_pages(flags, get_order(size));
+}
+
+static inline void jbd_free(void *ptr, size_t size)
+{
+ free_pages((unsigned long)ptr, get_order(size));
+};
#define JFS_MIN_JOURNAL_BLOCKS 1024
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 260d6d76c5f3..06ef11457051 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -13,8 +13,8 @@
* filesystem journaling support.
*/
-#ifndef _LINUX_JBD_H
-#define _LINUX_JBD_H
+#ifndef _LINUX_JBD2_H
+#define _LINUX_JBD2_H
/* Allow this file to be included directly into e2fsprogs */
#ifndef __KERNEL__
@@ -37,26 +37,26 @@
#define journal_oom_retry 1
/*
- * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds
+ * Define JBD2_PARANIOD_IOFAIL to cause a kernel BUG() if ext4 finds
* certain classes of error which can occur due to failed IOs. Under
- * normal use we want ext3 to continue after such errors, because
+ * normal use we want ext4 to continue after such errors, because
* hardware _can_ fail, but for debugging purposes when running tests on
* known-good hardware we may want to trap these errors.
*/
-#undef JBD_PARANOID_IOFAIL
+#undef JBD2_PARANOID_IOFAIL
/*
* The default maximum commit age, in seconds.
*/
-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
+#define JBD2_DEFAULT_MAX_COMMIT_AGE 5
#ifdef CONFIG_JBD2_DEBUG
/*
- * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
+ * Define JBD2_EXPENSIVE_CHECKING to enable more expensive internal
* consistency checks. By default we don't do this unless
* CONFIG_JBD2_DEBUG is on.
*/
-#define JBD_EXPENSIVE_CHECKING
+#define JBD2_EXPENSIVE_CHECKING
extern u8 jbd2_journal_enable_debug;
#define jbd_debug(n, f, a...) \
@@ -71,14 +71,15 @@ extern u8 jbd2_journal_enable_debug;
#define jbd_debug(f, a...) /**/
#endif
-extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry);
-extern void * jbd2_slab_alloc(size_t size, gfp_t flags);
-extern void jbd2_slab_free(void *ptr, size_t size);
+static inline void *jbd2_alloc(size_t size, gfp_t flags)
+{
+ return (void *)__get_free_pages(flags, get_order(size));
+}
-#define jbd_kmalloc(size, flags) \
- __jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry)
-#define jbd_rep_kmalloc(size, flags) \
- __jbd2_kmalloc(__FUNCTION__, (size), (flags), 1)
+static inline void jbd2_free(void *ptr, size_t size)
+{
+ free_pages((unsigned long)ptr, get_order(size));
+};
#define JBD2_MIN_JOURNAL_BLOCKS 1024
@@ -162,8 +163,8 @@ typedef struct journal_block_tag_s
__be32 t_blocknr_high; /* most-significant high 32bits. */
} journal_block_tag_t;
-#define JBD_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
-#define JBD_TAG_SIZE64 (sizeof(journal_block_tag_t))
+#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
+#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
/*
* The revoke descriptor: used on disk to describe a series of blocks to
@@ -255,8 +256,8 @@ typedef struct journal_superblock_s
#include <linux/fs.h>
#include <linux/sched.h>
-#define JBD_ASSERTIONS
-#ifdef JBD_ASSERTIONS
+#define JBD2_ASSERTIONS
+#ifdef JBD2_ASSERTIONS
#define J_ASSERT(assert) \
do { \
if (!(assert)) { \
@@ -283,9 +284,9 @@ void buffer_assertion_failure(struct buffer_head *bh);
#else
#define J_ASSERT(assert) do { } while (0)
-#endif /* JBD_ASSERTIONS */
+#endif /* JBD2_ASSERTIONS */
-#if defined(JBD_PARANOID_IOFAIL)
+#if defined(JBD2_PARANOID_IOFAIL)
#define J_EXPECT(expr, why...) J_ASSERT(expr)
#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
@@ -959,12 +960,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
*/
extern struct kmem_cache *jbd2_handle_cache;
-static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
+static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags)
{
return kmem_cache_alloc(jbd2_handle_cache, gfp_flags);
}
-static inline void jbd_free_handle(handle_t *handle)
+static inline void jbd2_free_handle(handle_t *handle)
{
kmem_cache_free(jbd2_handle_cache, handle);
}
@@ -1103,4 +1104,4 @@ extern int jbd_blocks_per_page(struct inode *inode);
#endif /* __KERNEL__ */
-#endif /* _LINUX_JBD_H */
+#endif /* _LINUX_JBD2_H */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 12bf44f083f5..e8ffce898bf9 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -53,7 +53,9 @@ static inline int kstat_irqs(int irq)
}
extern void account_user_time(struct task_struct *, cputime_t);
+extern void account_user_time_scaled(struct task_struct *, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t);
+extern void account_system_time_scaled(struct task_struct *, cputime_t);
extern void account_steal_time(struct task_struct *, cputime_t);
#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 9cdd6943e01b..ec3f76598327 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -510,7 +510,6 @@ extern struct pardevice *parport_open (int devnum, const char *name,
int flags, void *handle);
extern void parport_close (struct pardevice *dev);
extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
-extern int parport_device_num (int parport, int mux, int daisy);
extern void parport_daisy_deselect_all (struct parport *port);
extern int parport_daisy_select (struct parport *port, int daisy, int mode);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 48b71badfb4c..09a309b7b5d2 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -104,104 +104,6 @@ extern void (*pm_idle)(void);
extern void (*pm_power_off)(void);
extern void (*pm_power_off_prepare)(void);
-typedef int __bitwise suspend_state_t;
-
-#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
-#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
-#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
-#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
-
-/**
- * struct pm_ops - Callbacks for managing platform dependent system sleep
- * states.
- *
- * @valid: Callback to determine if given system sleep state is supported by
- * the platform.
- * Valid (ie. supported) states are advertised in /sys/power/state. Note
- * that it still may be impossible to enter given system sleep state if the
- * conditions aren't right.
- * There is the %pm_valid_only_mem function available that can be assigned
- * to this if the platform only supports mem sleep.
- *
- * @set_target: Tell the platform which system sleep state is going to be
- * entered.
- * @set_target() is executed right prior to suspending devices. The
- * information conveyed to the platform code by @set_target() should be
- * disregarded by the platform as soon as @finish() is executed and if
- * @prepare() fails. If @set_target() fails (ie. returns nonzero),
- * @prepare(), @enter() and @finish() will not be called by the PM core.
- * This callback is optional. However, if it is implemented, the argument
- * passed to @prepare(), @enter() and @finish() is meaningless and should
- * be ignored.
- *
- * @prepare: Prepare the platform for entering the system sleep state indicated
- * by @set_target() or represented by the argument if @set_target() is not
- * implemented.
- * @prepare() is called right after devices have been suspended (ie. the
- * appropriate .suspend() method has been executed for each device) and
- * before the nonboot CPUs are disabled (it is executed with IRQs enabled).
- * This callback is optional. It returns 0 on success or a negative
- * error code otherwise, in which case the system cannot enter the desired
- * sleep state (@enter() and @finish() will not be called in that case).
- *
- * @enter: Enter the system sleep state indicated by @set_target() or
- * represented by the argument if @set_target() is not implemented.
- * This callback is mandatory. It returns 0 on success or a negative
- * error code otherwise, in which case the system cannot enter the desired
- * sleep state.
- *
- * @finish: Called when the system has just left a sleep state, right after
- * the nonboot CPUs have been enabled and before devices are resumed (it is
- * executed with IRQs enabled). If @set_target() is not implemented, the
- * argument represents the sleep state being left.
- * This callback is optional, but should be implemented by the platforms
- * that implement @prepare(). If implemented, it is always called after
- * @enter() (even if @enter() fails).
- */
-struct pm_ops {
- int (*valid)(suspend_state_t state);
- int (*set_target)(suspend_state_t state);
- int (*prepare)(suspend_state_t state);
- int (*enter)(suspend_state_t state);
- int (*finish)(suspend_state_t state);
-};
-
-#ifdef CONFIG_SUSPEND
-extern struct pm_ops *pm_ops;
-
-/**
- * pm_set_ops - set platform dependent power management ops
- * @pm_ops: The new power management operations to set.
- */
-extern void pm_set_ops(struct pm_ops *pm_ops);
-extern int pm_valid_only_mem(suspend_state_t state);
-
-/**
- * arch_suspend_disable_irqs - disable IRQs for suspend
- *
- * Disables IRQs (in the default case). This is a weak symbol in the common
- * code and thus allows architectures to override it if more needs to be
- * done. Not called for suspend to disk.
- */
-extern void arch_suspend_disable_irqs(void);
-
-/**
- * arch_suspend_enable_irqs - enable IRQs after suspend
- *
- * Enables IRQs (in the default case). This is a weak symbol in the common
- * code and thus allows architectures to override it if more needs to be
- * done. Not called for suspend to disk.
- */
-extern void arch_suspend_enable_irqs(void);
-
-extern int pm_suspend(suspend_state_t state);
-#else /* !CONFIG_SUSPEND */
-#define suspend_valid_only_mem NULL
-
-static inline void pm_set_ops(struct pm_ops *pm_ops) {}
-static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
-#endif /* !CONFIG_SUSPEND */
-
/*
* Device power management
*/
diff --git a/include/linux/poison.h b/include/linux/poison.h
index d93c300a3449..a9c31be7052c 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -36,7 +36,8 @@
*/
/********** fs/jbd/journal.c **********/
-#define JBD_POISON_FREE 0x5b
+#define JBD_POISON_FREE 0x5b
+#define JBD2_POISON_FREE 0x5c
/********** drivers/base/dmapool.c **********/
#define POOL_POISON_FREED 0xa7 /* !inuse */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c204ab0d4df1..7accc04e23ab 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -991,7 +991,7 @@ struct task_struct {
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
unsigned int rt_priority;
- cputime_t utime, stime;
+ cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time; /* monotonic time */
@@ -1110,13 +1110,6 @@ struct task_struct {
unsigned long ptrace_message;
siginfo_t *last_siginfo; /* For ptrace use. */
-/*
- * current io wait handle: wait queue entry to use for io waits
- * If this thread is processing aio, this points at the waitqueue
- * inside the currently handled kiocb. It may be NULL (i.e. default
- * to a stack based synchronous wait) if its doing sync IO.
- */
- wait_queue_t *io_wait;
#ifdef CONFIG_TASK_XACCT
/* i/o counters(bytes read/written, #syscalls */
u64 rchar, wchar, syscr, syscw;
diff --git a/include/linux/security.h b/include/linux/security.h
index 9b0b63c50f44..ff3f857f6957 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -34,6 +34,13 @@
#include <linux/xfrm.h>
#include <net/flow.h>
+/*
+ * Bounding set
+ */
+extern kernel_cap_t cap_bset;
+
+extern unsigned securebits;
+
struct ctl_table;
/*
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 388cace9751f..4360e0816956 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -1,5 +1,5 @@
-#ifndef _LINUX_SWSUSP_H
-#define _LINUX_SWSUSP_H
+#ifndef _LINUX_SUSPEND_H
+#define _LINUX_SUSPEND_H
#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
#include <asm/suspend.h>
@@ -9,6 +9,108 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/mm.h>
+#include <asm/errno.h>
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
+extern int pm_prepare_console(void);
+extern void pm_restore_console(void);
+#else
+static inline int pm_prepare_console(void) { return 0; }
+static inline void pm_restore_console(void) {}
+#endif
+
+typedef int __bitwise suspend_state_t;
+
+#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
+#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
+#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
+#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
+
+/**
+ * struct platform_suspend_ops - Callbacks for managing platform dependent
+ * system sleep states.
+ *
+ * @valid: Callback to determine if given system sleep state is supported by
+ * the platform.
+ * Valid (ie. supported) states are advertised in /sys/power/state. Note
+ * that it still may be impossible to enter given system sleep state if the
+ * conditions aren't right.
+ * There is the %suspend_valid_only_mem function available that can be
+ * assigned to this if the platform only supports mem sleep.
+ *
+ * @set_target: Tell the platform which system sleep state is going to be
+ * entered.
+ * @set_target() is executed right prior to suspending devices. The
+ * information conveyed to the platform code by @set_target() should be
+ * disregarded by the platform as soon as @finish() is executed and if
+ * @prepare() fails. If @set_target() fails (ie. returns nonzero),
+ * @prepare(), @enter() and @finish() will not be called by the PM core.
+ * This callback is optional. However, if it is implemented, the argument
+ * passed to @enter() is meaningless and should be ignored.
+ *
+ * @prepare: Prepare the platform for entering the system sleep state indicated
+ * by @set_target().
+ * @prepare() is called right after devices have been suspended (ie. the
+ * appropriate .suspend() method has been executed for each device) and
+ * before the nonboot CPUs are disabled (it is executed with IRQs enabled).
+ * This callback is optional. It returns 0 on success or a negative
+ * error code otherwise, in which case the system cannot enter the desired
+ * sleep state (@enter() and @finish() will not be called in that case).
+ *
+ * @enter: Enter the system sleep state indicated by @set_target() or
+ * represented by the argument if @set_target() is not implemented.
+ * This callback is mandatory. It returns 0 on success or a negative
+ * error code otherwise, in which case the system cannot enter the desired
+ * sleep state.
+ *
+ * @finish: Called when the system has just left a sleep state, right after
+ * the nonboot CPUs have been enabled and before devices are resumed (it is
+ * executed with IRQs enabled).
+ * This callback is optional, but should be implemented by the platforms
+ * that implement @prepare(). If implemented, it is always called after
+ * @enter() (even if @enter() fails).
+ */
+struct platform_suspend_ops {
+ int (*valid)(suspend_state_t state);
+ int (*set_target)(suspend_state_t state);
+ int (*prepare)(void);
+ int (*enter)(suspend_state_t state);
+ void (*finish)(void);
+};
+
+#ifdef CONFIG_SUSPEND
+/**
+ * suspend_set_ops - set platform dependent suspend operations
+ * @ops: The new suspend operations to set.
+ */
+extern void suspend_set_ops(struct platform_suspend_ops *ops);
+extern int suspend_valid_only_mem(suspend_state_t state);
+
+/**
+ * arch_suspend_disable_irqs - disable IRQs for suspend
+ *
+ * Disables IRQs (in the default case). This is a weak symbol in the common
+ * code and thus allows architectures to override it if more needs to be
+ * done. Not called for suspend to disk.
+ */
+extern void arch_suspend_disable_irqs(void);
+
+/**
+ * arch_suspend_enable_irqs - enable IRQs after suspend
+ *
+ * Enables IRQs (in the default case). This is a weak symbol in the common
+ * code and thus allows architectures to override it if more needs to be
+ * done. Not called for suspend to disk.
+ */
+extern void arch_suspend_enable_irqs(void);
+
+extern int pm_suspend(suspend_state_t state);
+#else /* !CONFIG_SUSPEND */
+#define suspend_valid_only_mem NULL
+
+static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
+static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
+#endif /* !CONFIG_SUSPEND */
/* struct pbe is used for creating lists of pages that should be restored
* atomically during the resume from disk, because the page frames they have
@@ -24,32 +126,57 @@ struct pbe {
extern void drain_local_pages(void);
extern void mark_free_pages(struct zone *zone);
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
-extern int pm_prepare_console(void);
-extern void pm_restore_console(void);
-#else
-static inline int pm_prepare_console(void) { return 0; }
-static inline void pm_restore_console(void) {}
-#endif
-
/**
- * struct hibernation_ops - hibernation platform support
+ * struct platform_hibernation_ops - hibernation platform support
*
* The methods in this structure allow a platform to override the default
* mechanism of shutting down the machine during a hibernation transition.
*
* All three methods must be assigned.
*
- * @prepare: prepare system for hibernation
- * @enter: shut down system after state has been saved to disk
- * @finish: finish/clean up after state has been reloaded
- * @pre_restore: prepare system for the restoration from a hibernation image
- * @restore_cleanup: clean up after a failing image restoration
+ * @start: Tell the platform driver that we're starting hibernation.
+ * Called right after shrinking memory and before freezing devices.
+ *
+ * @pre_snapshot: Prepare the platform for creating the hibernation image.
+ * Called right after devices have been frozen and before the nonboot
+ * CPUs are disabled (runs with IRQs on).
+ *
+ * @finish: Restore the previous state of the platform after the hibernation
+ * image has been created *or* put the platform into the normal operation
+ * mode after the hibernation (the same method is executed in both cases).
+ * Called right after the nonboot CPUs have been enabled and before
+ * thawing devices (runs with IRQs on).
+ *
+ * @prepare: Prepare the platform for entering the low power state.
+ * Called right after the hibernation image has been saved and before
+ * devices are prepared for entering the low power state.
+ *
+ * @enter: Put the system into the low power state after the hibernation image
+ * has been saved to disk.
+ * Called after the nonboot CPUs have been disabled and all of the low
+ * level devices have been shut down (runs with IRQs off).
+ *
+ * @leave: Perform the first stage of the cleanup after the system sleep state
+ * indicated by @set_target() has been left.
+ * Called right after the control has been passed from the boot kernel to
+ * the image kernel, before the nonboot CPUs are enabled and before devices
+ * are resumed. Executed with interrupts disabled.
+ *
+ * @pre_restore: Prepare system for the restoration from a hibernation image.
+ * Called right after devices have been frozen and before the nonboot
+ * CPUs are disabled (runs with IRQs on).
+ *
+ * @restore_cleanup: Clean up after a failing image restoration.
+ * Called right after the nonboot CPUs have been enabled and before
+ * thawing devices (runs with IRQs on).
*/
-struct hibernation_ops {
+struct platform_hibernation_ops {
+ int (*start)(void);
+ int (*pre_snapshot)(void);
+ void (*finish)(void);
int (*prepare)(void);
int (*enter)(void);
- void (*finish)(void);
+ void (*leave)(void);
int (*pre_restore)(void);
void (*restore_cleanup)(void);
};
@@ -70,14 +197,14 @@ extern void swsusp_set_page_free(struct page *);
extern void swsusp_unset_page_free(struct page *);
extern unsigned long get_safe_page(gfp_t gfp_mask);
-extern void hibernation_set_ops(struct hibernation_ops *ops);
+extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
extern int hibernate(void);
#else /* CONFIG_HIBERNATION */
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {}
-static inline void hibernation_set_ops(struct hibernation_ops *ops) {}
+static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
static inline int hibernate(void) { return -ENOSYS; }
#endif /* CONFIG_HIBERNATION */
@@ -130,4 +257,4 @@ static inline void register_nosave_region_late(unsigned long b, unsigned long e)
}
#endif
-#endif /* _LINUX_SWSUSP_H */
+#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 483050c924c3..e99171f01b4c 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -238,6 +238,7 @@ enum
NET_LLC=18,
NET_NETFILTER=19,
NET_DCCP=20,
+ NET_IRDA=412,
};
/* /proc/sys/kernel/random */
@@ -795,6 +796,25 @@ enum {
NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
};
+/* proc/sys/net/irda */
+enum {
+ NET_IRDA_DISCOVERY=1,
+ NET_IRDA_DEVNAME=2,
+ NET_IRDA_DEBUG=3,
+ NET_IRDA_FAST_POLL=4,
+ NET_IRDA_DISCOVERY_SLOTS=5,
+ NET_IRDA_DISCOVERY_TIMEOUT=6,
+ NET_IRDA_SLOT_TIMEOUT=7,
+ NET_IRDA_MAX_BAUD_RATE=8,
+ NET_IRDA_MIN_TX_TURN_TIME=9,
+ NET_IRDA_MAX_TX_DATA_SIZE=10,
+ NET_IRDA_MAX_TX_WINDOW=11,
+ NET_IRDA_MAX_NOREPLY_TIME=12,
+ NET_IRDA_WARN_NOREPLY_TIME=13,
+ NET_IRDA_LAP_KEEPALIVE_TIME=14,
+};
+
+
/* CTL_FS names: */
enum
{
@@ -937,41 +957,42 @@ extern int sysctl_perm(struct ctl_table *table, int op);
typedef struct ctl_table ctl_table;
-typedef int ctl_handler (ctl_table *table, int __user *name, int nlen,
+typedef int ctl_handler (struct ctl_table *table, int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
-typedef int proc_handler (ctl_table *ctl, int write, struct file * filp,
+typedef int proc_handler (struct ctl_table *ctl, int write, struct file * filp,
void __user *buffer, size_t *lenp, loff_t *ppos);
-extern int proc_dostring(ctl_table *, int, struct file *,
+extern int proc_dostring(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec(ctl_table *, int, struct file *,
+extern int proc_dointvec(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec_bset(ctl_table *, int, struct file *,
+extern int proc_dointvec_bset(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec_minmax(ctl_table *, int, struct file *,
+extern int proc_dointvec_minmax(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec_jiffies(ctl_table *, int, struct file *,
+extern int proc_dointvec_jiffies(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec_userhz_jiffies(ctl_table *, int, struct file *,
+extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec_ms_jiffies(ctl_table *, int, struct file *,
+extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-extern int proc_doulongvec_minmax(ctl_table *, int, struct file *,
+extern int proc_doulongvec_minmax(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-extern int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int,
+extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
struct file *, void __user *, size_t *, loff_t *);
extern int do_sysctl (int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
-extern int do_sysctl_strategy (ctl_table *table,
+extern int do_sysctl_strategy (struct ctl_table *table,
int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
+extern ctl_handler sysctl_data;
extern ctl_handler sysctl_string;
extern ctl_handler sysctl_intvec;
extern ctl_handler sysctl_jiffies;
@@ -980,7 +1001,7 @@ extern ctl_handler sysctl_ms_jiffies;
/*
* Register a set of sysctl names by calling register_sysctl_table
- * with an initialised array of ctl_table's. An entry with zero
+ * with an initialised array of struct ctl_table's. An entry with zero
* ctl_name and NULL procname terminates the table. table->de will be
* set up by the registration and need not be initialised in advance.
*
@@ -1026,8 +1047,8 @@ struct ctl_table
void *data;
int maxlen;
mode_t mode;
- ctl_table *child;
- ctl_table *parent; /* Automatically set */
+ struct ctl_table *child;
+ struct ctl_table *parent; /* Automatically set */
proc_handler *proc_handler; /* Callback for text formatting */
ctl_handler *strategy; /* Callback function for all r/w */
void *extra1;
@@ -1035,18 +1056,19 @@ struct ctl_table
};
/* struct ctl_table_header is used to maintain dynamic lists of
- ctl_table trees. */
+ struct ctl_table trees. */
struct ctl_table_header
{
- ctl_table *ctl_table;
+ struct ctl_table *ctl_table;
struct list_head ctl_entry;
int used;
struct completion *unregistering;
};
-struct ctl_table_header * register_sysctl_table(ctl_table * table);
+struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
void unregister_sysctl_table(struct ctl_table_header * table);
+int sysctl_check_table(struct ctl_table *table);
#else /* __KERNEL__ */
diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h
index dce1ed204972..5d69c0744fff 100644
--- a/include/linux/taskstats.h
+++ b/include/linux/taskstats.h
@@ -31,7 +31,7 @@
*/
-#define TASKSTATS_VERSION 5
+#define TASKSTATS_VERSION 6
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -152,6 +152,11 @@ struct taskstats {
__u64 nvcsw; /* voluntary_ctxt_switches */
__u64 nivcsw; /* nonvoluntary_ctxt_switches */
+
+ /* time accounting for SMT machines */
+ __u64 ac_utimescaled; /* utime scaled on frequency etc */
+ __u64 ac_stimescaled; /* stime scaled on frequency etc */
+ __u64 cpu_scaled_run_real_total; /* scaled cpu_run_real_total */
};
OpenPOWER on IntegriCloud