summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-alpha/smp.h1
-rw-r--r--include/asm-alpha/thread_info.h8
-rw-r--r--include/asm-arm/arch-at91/cpu.h6
-rw-r--r--include/asm-avr32/arch-at32ap/cpu.h33
-rw-r--r--include/asm-avr32/setup.h2
-rw-r--r--include/asm-avr32/unistd.h4
-rw-r--r--include/asm-blackfin/processor.h6
-rw-r--r--include/asm-blackfin/system.h4
-rw-r--r--include/asm-frv/tlb.h4
-rw-r--r--include/asm-i386/mmzone.h6
-rw-r--r--include/asm-i386/msr.h56
-rw-r--r--include/asm-i386/paravirt.h5
-rw-r--r--include/asm-i386/smp.h37
-rw-r--r--include/asm-i386/thread_info.h2
-rw-r--r--include/asm-ia64/smp.h6
-rw-r--r--include/asm-ia64/thread_info.h2
-rw-r--r--include/asm-m32r/smp.h6
-rw-r--r--include/asm-m68k/thread_info.h6
-rw-r--r--include/asm-mips/system.h2
-rw-r--r--include/asm-parisc/compat.h2
-rw-r--r--include/asm-powerpc/smp.h1
-rw-r--r--include/asm-s390/smp.h1
-rw-r--r--include/asm-sh/cpu-sh3/dma.h2
-rw-r--r--include/asm-sh/cpu-sh4/dma-sh7780.h2
-rw-r--r--include/asm-sh/cpu-sh4/dma.h2
-rw-r--r--include/asm-sparc/smp.h1
-rw-r--r--include/asm-sparc64/smp.h1
-rw-r--r--include/asm-um/required-features.h9
-rw-r--r--include/asm-um/smp.h4
-rw-r--r--include/asm-x86_64/smp.h14
-rw-r--r--include/asm-x86_64/system.h2
-rw-r--r--include/asm-x86_64/thread_info.h2
-rw-r--r--include/linux/aio.h3
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/compiler-gcc.h1
-rw-r--r--include/linux/compiler-gcc3.h6
-rw-r--r--include/linux/compiler-gcc4.h3
-rw-r--r--include/linux/compiler.h21
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/futex.h42
-rw-r--r--include/linux/genhd.h1
-rw-r--r--include/linux/gfp.h6
-rw-r--r--include/linux/highmem.h27
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/kthread.h3
-rw-r--r--include/linux/ktime.h6
-rw-r--r--include/linux/mca.h2
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/nfs4_acl.h1
-rw-r--r--include/linux/notifier.h66
-rw-r--r--include/linux/pm.h31
-rw-r--r--include/linux/raid/md_k.h1
-rw-r--r--include/linux/relay.h3
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/signal.h125
-rw-r--r--include/linux/smp.h1
-rw-r--r--include/linux/sunrpc/svc.h19
-rw-r--r--include/linux/sunrpc/svcsock.h3
-rw-r--r--include/linux/suspend.h24
-rw-r--r--include/linux/svga.h2
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/vmstat.h3
-rw-r--r--include/linux/workqueue.h95
67 files changed, 515 insertions, 253 deletions
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index a1a1eca6be45..286e1d844f63 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -51,6 +51,7 @@ int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, in
#else /* CONFIG_SMP */
+#define hard_smp_processor_id() 0
#define smp_call_function_on_cpu(func,info,retry,wait,cpu) ({ 0; })
#endif /* CONFIG_SMP */
diff --git a/include/asm-alpha/thread_info.h b/include/asm-alpha/thread_info.h
index eeb3bef91e11..f4defc2bd3fb 100644
--- a/include/asm-alpha/thread_info.h
+++ b/include/asm-alpha/thread_info.h
@@ -97,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8");
1 << TIF_UAC_SIGBUS)
#define SET_UNALIGN_CTL(task,value) ({ \
- (task)->thread_info->flags = (((task)->thread_info->flags & \
+ task_thread_info(task)->flags = ((task_thread_info(task)->flags & \
~ALPHA_UAC_MASK) \
| (((value) << ALPHA_UAC_SHIFT) & (1<<TIF_UAC_NOPRINT))\
| (((value) << (ALPHA_UAC_SHIFT + 1)) & (1<<TIF_UAC_SIGBUS)) \
@@ -105,11 +105,11 @@ register struct thread_info *__current_thread_info __asm__("$8");
0; })
#define GET_UNALIGN_CTL(task,value) ({ \
- put_user(((task)->thread_info->flags & (1 << TIF_UAC_NOPRINT)) \
+ put_user((task_thread_info(task)->flags & (1 << TIF_UAC_NOPRINT))\
>> ALPHA_UAC_SHIFT \
- | ((task)->thread_info->flags & (1 << TIF_UAC_SIGBUS)) \
+ | (task_thread_info(task)->flags & (1 << TIF_UAC_SIGBUS))\
>> (ALPHA_UAC_SHIFT + 1) \
- | ((task)->thread_info->flags & (1 << TIF_UAC_NOFIX)) \
+ | (task_thread_info(task)->flags & (1 << TIF_UAC_NOFIX))\
>> (ALPHA_UAC_SHIFT - 1), \
(int __user *)(value)); \
})
diff --git a/include/asm-arm/arch-at91/cpu.h b/include/asm-arm/arch-at91/cpu.h
index d464ca58cdbc..7ef4eebe9f8e 100644
--- a/include/asm-arm/arch-at91/cpu.h
+++ b/include/asm-arm/arch-at91/cpu.h
@@ -68,4 +68,10 @@ static inline unsigned long at91_arch_identify(void)
#define cpu_is_at91sam9263() (0)
#endif
+/*
+ * Since this is ARM, we will never run on any AVR32 CPU. But these
+ * definitions may reduce clutter in common drivers.
+ */
+#define cpu_is_at32ap7000() (0)
+
#endif
diff --git a/include/asm-avr32/arch-at32ap/cpu.h b/include/asm-avr32/arch-at32ap/cpu.h
new file mode 100644
index 000000000000..2bdc5bd6f793
--- /dev/null
+++ b/include/asm-avr32/arch-at32ap/cpu.h
@@ -0,0 +1,33 @@
+/*
+ * AVR32 and (fake) AT91 CPU identification
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARCH_CPU_H
+#define __ASM_ARCH_CPU_H
+
+/*
+ * Only AT32AP7000 is defined for now. We can identify the specific
+ * chip at runtime, but I'm not sure if it's really worth it.
+ */
+#ifdef CONFIG_CPU_AT32AP7000
+# define cpu_is_at32ap7000() (1)
+#else
+# define cpu_is_at32ap7000() (0)
+#endif
+
+/*
+ * Since this is AVR32, we will never run on any AT91 CPU. But these
+ * definitions may reduce clutter in common drivers.
+ */
+#define cpu_is_at91rm9200() (0)
+#define cpu_is_at91sam9xe() (0)
+#define cpu_is_at91sam9260() (0)
+#define cpu_is_at91sam9261() (0)
+#define cpu_is_at91sam9263() (0)
+
+#endif /* __ASM_ARCH_CPU_H */
diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h
index 1ff1a217015d..b0828d43e110 100644
--- a/include/asm-avr32/setup.h
+++ b/include/asm-avr32/setup.h
@@ -110,7 +110,7 @@ struct tagtable {
int (*parse)(struct tag *);
};
-#define __tag __attribute_used__ __attribute__((__section__(".taglist")))
+#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
#define __tagtable(tag, fn) \
static struct tagtable __tagtable_##fn __tag = { tag, fn }
diff --git a/include/asm-avr32/unistd.h b/include/asm-avr32/unistd.h
index 8f5120471819..2418cce624cc 100644
--- a/include/asm-avr32/unistd.h
+++ b/include/asm-avr32/unistd.h
@@ -295,8 +295,10 @@
#define __NR_shmdt 276
#define __NR_shmctl 277
+#define __NR_utimensat 278
+
#ifdef __KERNEL__
-#define NR_syscalls 278
+#define NR_syscalls 279
#define __ARCH_WANT_IPC_PARSE_VERSION
diff --git a/include/asm-blackfin/processor.h b/include/asm-blackfin/processor.h
index 997465c93e82..0336ff132c16 100644
--- a/include/asm-blackfin/processor.h
+++ b/include/asm-blackfin/processor.h
@@ -58,10 +58,10 @@ do { \
(_regs)->pc = (_pc); \
if (current->mm) \
(_regs)->p5 = current->mm->start_data; \
- current->thread_info->l1_task_info.stack_start \
+ task_thread_info(current)->l1_task_info.stack_start \
= (void *)current->mm->context.stack_start; \
- current->thread_info->l1_task_info.lowest_sp = (void *)(_usp); \
- memcpy(L1_SCRATCH_TASK_INFO, &current->thread_info->l1_task_info, \
+ task_thread_info(current)->l1_task_info.lowest_sp = (void *)(_usp); \
+ memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info, \
sizeof(*L1_SCRATCH_TASK_INFO)); \
wrusp(_usp); \
} while(0)
diff --git a/include/asm-blackfin/system.h b/include/asm-blackfin/system.h
index b5bf6e7cb5e8..5e5f1a0566c0 100644
--- a/include/asm-blackfin/system.h
+++ b/include/asm-blackfin/system.h
@@ -239,9 +239,9 @@ asmlinkage struct task_struct *resume(struct task_struct *prev, struct task_stru
#define switch_to(prev,next,last) \
do { \
- memcpy (&prev->thread_info->l1_task_info, L1_SCRATCH_TASK_INFO, \
+ memcpy (&task_thread_info(prev)->l1_task_info, L1_SCRATCH_TASK_INFO, \
sizeof *L1_SCRATCH_TASK_INFO); \
- memcpy (L1_SCRATCH_TASK_INFO, &next->thread_info->l1_task_info, \
+ memcpy (L1_SCRATCH_TASK_INFO, &task_thread_info(next)->l1_task_info, \
sizeof *L1_SCRATCH_TASK_INFO); \
(last) = resume (prev, next); \
} while (0)
diff --git a/include/asm-frv/tlb.h b/include/asm-frv/tlb.h
index f94fe5cb9b3a..cd458eb6d75e 100644
--- a/include/asm-frv/tlb.h
+++ b/include/asm-frv/tlb.h
@@ -3,7 +3,11 @@
#include <asm/tlbflush.h>
+#ifdef CONFIG_MMU
+extern void check_pgt_cache(void);
+#else
#define check_pgt_cache() do {} while(0)
+#endif
/*
* we don't need any special per-pte or per-vma handling...
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 3503ad66945e..118e9812778f 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -122,21 +122,21 @@ static inline int pfn_valid(int pfn)
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
#define alloc_bootmem_node(pgdat, x) \
({ \
- struct pglist_data __attribute__ ((unused)) \
+ struct pglist_data __maybe_unused \
*__alloc_bootmem_node__pgdat = (pgdat); \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
__pa(MAX_DMA_ADDRESS)); \
})
#define alloc_bootmem_pages_node(pgdat, x) \
({ \
- struct pglist_data __attribute__ ((unused)) \
+ struct pglist_data __maybe_unused \
*__alloc_bootmem_node__pgdat = (pgdat); \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
__pa(MAX_DMA_ADDRESS)) \
})
#define alloc_bootmem_low_pages_node(pgdat, x) \
({ \
- struct pglist_data __attribute__ ((unused)) \
+ struct pglist_data __maybe_unused \
*__alloc_bootmem_node__pgdat = (pgdat); \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
})
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 26861df52cc4..df21ea049369 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -86,62 +86,50 @@ static inline unsigned long long native_read_pmc(void)
#define rdmsr(msr,val1,val2) \
do { \
- unsigned long long __val = native_read_msr(msr); \
- val1 = __val; \
- val2 = __val >> 32; \
+ u64 __val = native_read_msr(msr); \
+ (val1) = (u32)__val; \
+ (val2) = (u32)(__val >> 32); \
} while(0)
-#define wrmsr(msr,val1,val2) \
- native_write_msr(msr, ((unsigned long long)val2 << 32) | val1)
-
-#define rdmsrl(msr,val) \
- do { \
- (val) = native_read_msr(msr); \
- } while(0)
-
-static inline void wrmsrl (unsigned long msr, unsigned long long val)
+static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
{
- unsigned long lo, hi;
- lo = (unsigned long) val;
- hi = val >> 32;
- wrmsr (msr, lo, hi);
+ native_write_msr(__msr, ((u64)__high << 32) | __low);
}
+#define rdmsrl(msr,val) \
+ ((val) = native_read_msr(msr))
+
+#define wrmsrl(msr,val) native_write_msr(msr, val)
+
/* wrmsr with exception handling */
-#define wrmsr_safe(msr,val1,val2) \
- (native_write_msr_safe(msr, ((unsigned long long)val2 << 32) | val1))
+static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
+{
+ return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
+}
/* rdmsr with exception handling */
#define rdmsr_safe(msr,p1,p2) \
({ \
int __err; \
- unsigned long long __val = native_read_msr_safe(msr, &__err);\
- (*p1) = __val; \
- (*p2) = __val >> 32; \
+ u64 __val = native_read_msr_safe(msr, &__err); \
+ (*p1) = (u32)__val; \
+ (*p2) = (u32)(__val >> 32); \
__err; \
})
-#define rdtsc(low,high) \
- do { \
- u64 _l = native_read_tsc(); \
- (low) = (u32)_l; \
- (high) = _l >> 32; \
- } while(0)
-
#define rdtscl(low) \
- do { \
- (low) = native_read_tsc(); \
- } while(0)
+ ((low) = (u32)native_read_tsc())
-#define rdtscll(val) ((val) = native_read_tsc())
+#define rdtscll(val) \
+ ((val) = native_read_tsc())
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
#define rdpmc(counter,low,high) \
do { \
u64 _l = native_read_pmc(); \
- low = (u32)_l; \
- high = _l >> 32; \
+ (low) = (u32)_l; \
+ (high) = (u32)(_l >> 32); \
} while(0)
#endif /* !CONFIG_PARAVIRT */
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index e2e7f98723c5..bc5c12c13581 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -560,11 +560,6 @@ static inline u64 paravirt_read_tsc(void)
{
return PVOP_CALL0(u64, read_tsc);
}
-#define rdtsc(low,high) do { \
- u64 _l = paravirt_read_tsc(); \
- low = (u32)_l; \
- high = _l >> 32; \
-} while(0)
#define rdtscl(low) do { \
u64 _l = paravirt_read_tsc(); \
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 090abc1da32a..0c7132787062 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -124,20 +124,6 @@ static inline int num_booting_cpus(void)
return cpus_weight(cpu_callout_map);
}
-#ifdef CONFIG_X86_LOCAL_APIC
-
-#ifdef APIC_DEFINITION
-extern int hard_smp_processor_id(void);
-#else
-#include <mach_apicdef.h>
-static inline int hard_smp_processor_id(void)
-{
- /* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
-}
-#endif
-#endif
-
extern int safe_smp_processor_id(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
@@ -152,10 +138,31 @@ extern unsigned int num_processors;
#define NO_PROC_ID 0xFF /* No processor magic marker */
-#endif
+#endif /* CONFIG_SMP */
#ifndef __ASSEMBLY__
+#ifdef CONFIG_X86_LOCAL_APIC
+
+#ifdef APIC_DEFINITION
+extern int hard_smp_processor_id(void);
+#else
+#include <mach_apicdef.h>
+static inline int hard_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
+}
+#endif /* APIC_DEFINITION */
+
+#else /* CONFIG_X86_LOCAL_APIC */
+
+#ifndef CONFIG_SMP
+#define hard_smp_processor_id() 0
+#endif
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
extern u8 apicid_2_node[];
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index bf01d4b342bd..4cb0f91ae64f 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -172,7 +172,7 @@ static inline struct thread_info *current_thread_info(void)
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
-#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
#endif /* __KERNEL__ */
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 60fd4ae014f6..c60024989ebd 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -38,6 +38,8 @@ ia64_get_lid (void)
return lid.f.id << 8 | lid.f.eid;
}
+#define hard_smp_processor_id() ia64_get_lid()
+
#ifdef CONFIG_SMP
#define XTP_OFFSET 0x1e0008
@@ -110,8 +112,6 @@ max_xtp (void)
writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */
}
-#define hard_smp_processor_id() ia64_get_lid()
-
/* Upping and downing of CPUs */
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
@@ -128,7 +128,7 @@ extern void unlock_ipi_calllock(void);
extern void identify_siblings (struct cpuinfo_ia64 *);
extern int is_multithreading_enabled(void);
-#else
+#else /* CONFIG_SMP */
#define cpu_logical_id(i) 0
#define cpu_physical_id(i) ia64_get_lid()
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 91698599f918..d28147506585 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -110,6 +110,6 @@ struct thread_info {
#define TS_POLLING 1 /* true if in idle loop and not sleeping */
-#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index abd937ac5239..078e1a51a042 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -108,6 +108,10 @@ extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
#define IPI_SHIFT (0)
#define NR_IPIS (8)
-#endif /* CONFIG_SMP */
+#else /* CONFIG_SMP */
+
+#define hard_smp_processor_id() 0
+
+#endif /* CONFIG_SMP */
#endif /* _ASM_M32R_SMP_H */
diff --git a/include/asm-m68k/thread_info.h b/include/asm-m68k/thread_info.h
index c4d622a57dfb..d635a3752488 100644
--- a/include/asm-m68k/thread_info.h
+++ b/include/asm-m68k/thread_info.h
@@ -37,17 +37,17 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
#define task_thread_info(tsk) (&(tsk)->thread.info)
-#define task_stack_page(tsk) ((void *)(tsk)->thread_info)
+#define task_stack_page(tsk) ((tsk)->stack)
#define current_thread_info() task_thread_info(current)
#define __HAVE_THREAD_FUNCTIONS
#define setup_thread_stack(p, org) ({ \
- *(struct task_struct **)(p)->thread_info = (p); \
+ *(struct task_struct **)(p)->stack = (p); \
task_thread_info(p)->task = (p); \
})
-#define end_of_stack(p) ((unsigned long *)(p)->thread_info + 1)
+#define end_of_stack(p) ((unsigned long *)(p)->stack + 1)
/* entry.S relies on these definitions!
* bits 0-7 are tested at every exception exit
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 30f23a2b46ca..3713d256d369 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -55,7 +55,7 @@ do { \
if (cpu_has_dsp) \
__save_dsp(prev); \
next->thread.emulated_fp = 0; \
- (last) = resume(prev, next, next->thread_info); \
+ (last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
diff --git a/include/asm-parisc/compat.h b/include/asm-parisc/compat.h
index fe8579023531..11f4222597a0 100644
--- a/include/asm-parisc/compat.h
+++ b/include/asm-parisc/compat.h
@@ -152,7 +152,7 @@ static __inline__ void __user *compat_alloc_user_space(long len)
static inline int __is_compat_task(struct task_struct *t)
{
- return test_ti_thread_flag(t->thread_info, TIF_32BIT);
+ return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
}
static inline int is_compat_task(void)
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
index 01717f266dc9..d037f50580e2 100644
--- a/include/asm-powerpc/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -83,6 +83,7 @@ extern void __cpu_die(unsigned int cpu);
#else
/* for UP */
+#define hard_smp_processor_id() 0
#define smp_setup_cpu_maps()
#endif /* CONFIG_SMP */
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 0a28e6d6ef40..76e424f718c6 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -110,6 +110,7 @@ static inline void smp_send_stop(void)
__load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
}
+#define hard_smp_processor_id() 0
#define smp_cpu_not_running(cpu) 1
#define smp_setup_cpu_possible_map() do { } while (0)
#endif
diff --git a/include/asm-sh/cpu-sh3/dma.h b/include/asm-sh/cpu-sh3/dma.h
index 954801b46022..3a66dc458023 100644
--- a/include/asm-sh/cpu-sh3/dma.h
+++ b/include/asm-sh/cpu-sh3/dma.h
@@ -26,7 +26,7 @@ enum {
XMIT_SZ_128BIT,
};
-static unsigned int ts_shift[] __attribute__ ((used)) = {
+static unsigned int ts_shift[] __maybe_unused = {
[XMIT_SZ_8BIT] = 0,
[XMIT_SZ_16BIT] = 1,
[XMIT_SZ_32BIT] = 2,
diff --git a/include/asm-sh/cpu-sh4/dma-sh7780.h b/include/asm-sh/cpu-sh4/dma-sh7780.h
index 6c90d28331b2..71b426a6e482 100644
--- a/include/asm-sh/cpu-sh4/dma-sh7780.h
+++ b/include/asm-sh/cpu-sh4/dma-sh7780.h
@@ -28,7 +28,7 @@ enum {
/*
* The DMA count is defined as the number of bytes to transfer.
*/
-static unsigned int __attribute__ ((used)) ts_shift[] = {
+static unsigned int ts_shift[] __maybe_unused = {
[XMIT_SZ_8BIT] = 0,
[XMIT_SZ_16BIT] = 1,
[XMIT_SZ_32BIT] = 2,
diff --git a/include/asm-sh/cpu-sh4/dma.h b/include/asm-sh/cpu-sh4/dma.h
index c135e9cebd9c..36e26a964765 100644
--- a/include/asm-sh/cpu-sh4/dma.h
+++ b/include/asm-sh/cpu-sh4/dma.h
@@ -53,7 +53,7 @@ enum {
/*
* The DMA count is defined as the number of bytes to transfer.
*/
-static unsigned int ts_shift[] __attribute__ ((used)) = {
+static unsigned int ts_shift[] __maybe_unused = {
[XMIT_SZ_64BIT] = 3,
[XMIT_SZ_8BIT] = 0,
[XMIT_SZ_16BIT] = 1,
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index b9da9a600e35..b3f492208fd2 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -165,6 +165,7 @@ void smp_setup_cpu_possible_map(void);
#else /* SMP */
+#define hard_smp_processor_id() 0
#define smp_setup_cpu_possible_map() do { } while (0)
#endif /* !(SMP) */
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index cca54804b722..869d16fb907b 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -48,6 +48,7 @@ extern unsigned char boot_cpu_id;
#else
+#define hard_smp_processor_id() 0
#define smp_setup_cpu_possible_map() do { } while (0)
#define boot_cpu_id (0)
diff --git a/include/asm-um/required-features.h b/include/asm-um/required-features.h
new file mode 100644
index 000000000000..dfb967b2d2f3
--- /dev/null
+++ b/include/asm-um/required-features.h
@@ -0,0 +1,9 @@
+#ifndef __UM_REQUIRED_FEATURES_H
+#define __UM_REQUIRED_FEATURES_H
+
+/*
+ * Nothing to see, just need something for the i386 and x86_64 asm
+ * headers to include.
+ */
+
+#endif
diff --git a/include/asm-um/smp.h b/include/asm-um/smp.h
index ca552261ed1f..84f8cf29324e 100644
--- a/include/asm-um/smp.h
+++ b/include/asm-um/smp.h
@@ -24,6 +24,10 @@ extern inline void smp_cpus_done(unsigned int maxcpus)
extern struct task_struct *idle_threads[NR_CPUS];
+#else
+
+#define hard_smp_processor_id() 0
+
#endif
#endif
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index d5704421456b..3f303d2365ed 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -57,12 +57,6 @@ static inline int num_booting_cpus(void)
#define raw_smp_processor_id() read_pda(cpunumber)
-static inline int hard_smp_processor_id(void)
-{
- /* we don't want to mark this access volatile - bad code generation */
- return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
-}
-
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern void prefill_possible_map(void);
@@ -71,7 +65,13 @@ extern unsigned __cpuinitdata disabled_cpus;
#define NO_PROC_ID 0xFF /* No processor magic marker */
-#endif
+#endif /* CONFIG_SMP */
+
+static inline int hard_smp_processor_id(void)
+{
+ /* we don't want to mark this access volatile - bad code generation */
+ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
+}
/*
* Some lowlevel functions might want to know about
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index b7b8021e8c43..ead9f9a56234 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -39,7 +39,7 @@
[threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)),\
[tif_fork] "i" (TIF_FORK), \
- [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
+ [thread_info] "i" (offsetof(struct task_struct, stack)), \
[pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
: "memory", "cc" __EXTRA_CLOBBER)
diff --git a/include/asm-x86_64/thread_info.h b/include/asm-x86_64/thread_info.h
index 74a6c74397f7..10bb5a8ed688 100644
--- a/include/asm-x86_64/thread_info.h
+++ b/include/asm-x86_64/thread_info.h
@@ -162,7 +162,7 @@ static inline struct thread_info *stack_thread_info(void)
#define TS_COMPAT 0x0002 /* 32bit syscall active */
#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
-#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
#endif /* __KERNEL__ */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index a30ef13c9e62..43dc2ebfaa0e 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -226,7 +226,8 @@ int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
__put_ioctx(kioctx); \
} while (0)
-#define in_aio() !is_sync_wait(current->io_wait)
+#define in_aio() (unlikely(!is_sync_wait(current->io_wait)))
+
/* may be used for debugging */
#define warn_if_async() \
do { \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a686eabe22d6..db5b00a792f5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -854,7 +854,7 @@ static inline void put_dev_sector(Sector p)
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
-void kblockd_flush(void);
+void kblockd_flush_work(struct work_struct *work);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 2665ca04cf8f..bf297b03a4e4 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -49,6 +49,7 @@ struct clocksource;
* @shift: cycle to nanosecond divisor (power of two)
* @flags: flags describing special properties
* @vread: vsyscall based read
+ * @resume: resume function for the clocksource, if necessary
* @cycle_interval: Used internally by timekeeping core, please ignore.
* @xtime_interval: Used internally by timekeeping core, please ignore.
*/
@@ -65,6 +66,7 @@ struct clocksource {
u32 shift;
unsigned long flags;
cycle_t (*vread)(void);
+ void (*resume)(void);
/* timekeeping specific data, ignore */
cycle_t cycle_interval;
@@ -209,6 +211,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
extern int clocksource_register(struct clocksource*);
extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
+extern void clocksource_resume(void);
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index ccd863dd77fa..70a157a130bb 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -253,5 +253,8 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
+asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename,
+ struct compat_timespec __user *t, int flags);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index a9f794716a81..03ec2311fb29 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -40,3 +40,4 @@
#define noinline __attribute__((noinline))
#define __attribute_pure__ __attribute__((pure))
#define __attribute_const__ __attribute__((__const__))
+#define __maybe_unused __attribute__((unused))
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
index ecd621fd27d2..a9e2863c2dbf 100644
--- a/include/linux/compiler-gcc3.h
+++ b/include/linux/compiler-gcc3.h
@@ -4,9 +4,11 @@
#include <linux/compiler-gcc.h>
#if __GNUC_MINOR__ >= 3
-# define __attribute_used__ __attribute__((__used__))
+# define __used __attribute__((__used__))
+# define __attribute_used__ __used /* deprecated */
#else
-# define __attribute_used__ __attribute__((__unused__))
+# define __used __attribute__((__unused__))
+# define __attribute_used__ __used /* deprecated */
#endif
#if __GNUC_MINOR__ >= 4
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index fd0cc7c4a636..a03e9398a6c2 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -12,7 +12,8 @@
# define __inline __inline __attribute__((always_inline))
#endif
-#define __attribute_used__ __attribute__((__used__))
+#define __used __attribute__((__used__))
+#define __attribute_used__ __used /* deprecated */
#define __must_check __attribute__((warn_unused_result))
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
#define __always_inline inline __attribute__((always_inline))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 3b6949b41745..498c35920762 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -108,15 +108,30 @@ extern void __chk_io_ptr(const void __iomem *);
* Allow us to avoid 'defined but not used' warnings on functions and data,
* as well as force them to be emitted to the assembly file.
*
- * As of gcc 3.3, static functions that are not marked with attribute((used))
- * may be elided from the assembly file. As of gcc 3.3, static data not so
+ * As of gcc 3.4, static functions that are not marked with attribute((used))
+ * may be elided from the assembly file. As of gcc 3.4, static data not so
* marked will not be elided, but this may change in a future gcc version.
*
+ * NOTE: Because distributions shipped with a backported unit-at-a-time
+ * compiler in gcc 3.3, we must define __used to be __attribute__((used))
+ * for gcc >=3.3 instead of 3.4.
+ *
* In prior versions of gcc, such functions and data would be emitted, but
* would be warned about except with attribute((unused)).
+ *
+ * Mark functions that are referenced only in inline assembly as __used so
+ * the code is emitted even though it appears to be unreferenced.
*/
#ifndef __attribute_used__
-# define __attribute_used__ /* unimplemented */
+# define __attribute_used__ /* deprecated */
+#endif
+
+#ifndef __used
+# define __used /* unimplemented */
+#endif
+
+#ifndef __maybe_unused
+# define __maybe_unused /* unimplemented */
#endif
/*
diff --git a/include/linux/fb.h b/include/linux/fb.h
index dff7a728948c..c654d0e9ce33 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -868,7 +868,7 @@ struct fb_info {
#define fb_writeq sbus_writeq
#define fb_memset sbus_memset_io
-#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__)
+#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) || defined(__avr32__)
#define fb_readb __raw_readb
#define fb_readw __raw_readw
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 820125c628c1..899fc7f20edd 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -3,6 +3,8 @@
#include <linux/sched.h>
+union ktime;
+
/* Second argument to futex syscall */
@@ -15,6 +17,19 @@
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
+#define FUTEX_CMP_REQUEUE_PI 9
+
+#define FUTEX_PRIVATE_FLAG 128
+#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG
+
+#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
+#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
+#define FUTEX_REQUEUE_PRIVATE (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG)
+#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)
+#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)
+#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
+#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
+#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
/*
* Support for robust futexes: the kernel cleans up held futexes at
@@ -83,9 +98,14 @@ struct robust_list_head {
#define FUTEX_OWNER_DIED 0x40000000
/*
+ * Some processes have been requeued on this PI-futex
+ */
+#define FUTEX_WAITER_REQUEUED 0x20000000
+
+/*
* The rest of the robust-futex field is for the TID:
*/
-#define FUTEX_TID_MASK 0x3fffffff
+#define FUTEX_TID_MASK 0x0fffffff
/*
* This limit protects against a deliberately circular list.
@@ -94,7 +114,7 @@ struct robust_list_head {
#define ROBUST_LIST_LIMIT 2048
#ifdef __KERNEL__
-long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
+long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
extern int
@@ -106,9 +126,20 @@ handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
* Don't rearrange members without looking at hash_futex().
*
* offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
- * We set bit 0 to indicate if it's an inode-based key.
- */
+ * We use the two low order bits of offset to tell what is the kind of key :
+ * 00 : Private process futex (PTHREAD_PROCESS_PRIVATE)
+ * (no reference on an inode or mm)
+ * 01 : Shared futex (PTHREAD_PROCESS_SHARED)
+ * mapped on a file (reference on the underlying inode)
+ * 10 : Shared futex (PTHREAD_PROCESS_SHARED)
+ * (but private mapping on an mm, and reference taken on it)
+*/
+
+#define FUT_OFF_INODE 1 /* We set bit 0 if key has a reference on inode */
+#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
+
union futex_key {
+ u32 __user *uaddr;
struct {
unsigned long pgoff;
struct inode *inode;
@@ -125,7 +156,8 @@ union futex_key {
int offset;
} both;
};
-int get_futex_key(u32 __user *uaddr, union futex_key *key);
+int get_futex_key(u32 __user *uaddr, struct rw_semaphore *shared,
+ union futex_key *key);
void get_futex_key_refs(union futex_key *key);
void drop_futex_key_refs(union futex_key *key);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 2c65da7cabb2..f589559cf070 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -413,6 +413,7 @@ char *disk_name (struct gendisk *hd, int part, char *buf);
extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
extern void add_partition(struct gendisk *, int, sector_t, sector_t, int);
extern void delete_partition(struct gendisk *, int);
+extern void printk_all_partitions(void);
extern struct gendisk *alloc_disk_node(int minors, int node_id);
extern struct gendisk *alloc_disk(int minors);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 97a36c3d96e2..0d2ef0b082a6 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -176,10 +176,6 @@ extern void FASTCALL(free_cold_page(struct page *page));
#define free_page(addr) free_pages((addr),0)
void page_alloc_init(void);
-#ifdef CONFIG_NUMA
-void drain_node_pages(int node);
-#else
-static inline void drain_node_pages(int node) { };
-#endif
+void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index a515eb0afdfb..98e2cce996a4 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -94,17 +94,26 @@ static inline void clear_highpage(struct page *page)
/*
* Same but also flushes aliased cache contents to RAM.
+ *
+ * This must be a macro because KM_USER0 and friends aren't defined if
+ * !CONFIG_HIGHMEM
*/
-static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size)
+#define zero_user_page(page, offset, size, km_type) \
+ do { \
+ void *kaddr; \
+ \
+ BUG_ON((offset) + (size) > PAGE_SIZE); \
+ \
+ kaddr = kmap_atomic(page, km_type); \
+ memset((char *)kaddr + (offset), 0, (size)); \
+ flush_dcache_page(page); \
+ kunmap_atomic(kaddr, (km_type)); \
+ } while (0)
+
+static inline void __deprecated memclear_highpage_flush(struct page *page,
+ unsigned int offset, unsigned int size)
{
- void *kaddr;
-
- BUG_ON(offset + size > PAGE_SIZE);
-
- kaddr = kmap_atomic(page, KM_USER0);
- memset((char *)kaddr + offset, 0, size);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_page(page, offset, size, KM_USER0);
}
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 795102309bf1..45170b2fa253 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -95,7 +95,7 @@ extern struct group_info init_groups;
#define INIT_TASK(tsk) \
{ \
.state = 0, \
- .thread_info = &init_thread_info, \
+ .stack = &init_thread_info, \
.usage = ATOMIC_INIT(2), \
.flags = 0, \
.lock_depth = -1, \
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 1c65e7a9f186..00dd957e245b 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -30,4 +30,7 @@ void kthread_bind(struct task_struct *k, unsigned int cpu);
int kthread_stop(struct task_struct *k);
int kthread_should_stop(void);
+int kthreadd(void *unused);
+extern struct task_struct *kthreadd_task;
+
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 81bb9c7a4eb3..c762954bda14 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -43,7 +43,7 @@
* plain scalar nanosecond based representation can be selected by the
* config switch CONFIG_KTIME_SCALAR.
*/
-typedef union {
+union ktime {
s64 tv64;
#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
struct {
@@ -54,7 +54,9 @@ typedef union {
# endif
} tv;
#endif
-} ktime_t;
+};
+
+typedef union ktime ktime_t; /* Kill this */
#define KTIME_MAX ((s64)~((u64)1 << 63))
#if (BITS_PER_LONG == 64)
diff --git a/include/linux/mca.h b/include/linux/mca.h
index 5cff2923092b..37972704617f 100644
--- a/include/linux/mca.h
+++ b/include/linux/mca.h
@@ -94,6 +94,7 @@ struct mca_bus {
struct mca_driver {
const short *id_table;
void *driver_data;
+ int integrated_id;
struct device_driver driver;
};
#define to_mca_driver(mdriver) container_of(mdriver, struct mca_driver, driver)
@@ -125,6 +126,7 @@ extern enum MCA_AdapterStatus mca_device_status(struct mca_device *mca_dev);
extern struct bus_type mca_bus_type;
extern int mca_register_driver(struct mca_driver *drv);
+extern int mca_register_driver_integrated(struct mca_driver *, int);
extern void mca_unregister_driver(struct mca_driver *drv);
/* WARNING: only called by the boot time device setup */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2f1544e83042..d09b1345a3a1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -83,6 +83,9 @@ struct per_cpu_pages {
struct per_cpu_pageset {
struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
+#ifdef CONFIG_NUMA
+ s8 expire;
+#endif
#ifdef CONFIG_SMP
s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
diff --git a/include/linux/module.h b/include/linux/module.h
index 6d3dc9c4ff96..792d483c9af7 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -356,6 +356,9 @@ struct module
keeping pointers to this stuff */
char *args;
};
+#ifndef MODULE_ARCH_INIT
+#define MODULE_ARCH_INIT {}
+#endif
/* FIXME: It'd be nice to isolate modules during init, too, so they
aren't used before they (may) fail. But presently too much code
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index b81bc2adaeff..0d50ea3df689 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -121,11 +121,12 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
* Also see Documentation/mutex-design.txt.
*/
extern void fastcall mutex_lock(struct mutex *lock);
-extern int fastcall mutex_lock_interruptible(struct mutex *lock);
+extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
-extern int mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass);
+extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
+ unsigned int subclass);
#else
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
diff --git a/include/linux/nfs4_acl.h b/include/linux/nfs4_acl.h
index 409b6e02f337..c9c05a78e9bb 100644
--- a/include/linux/nfs4_acl.h
+++ b/include/linux/nfs4_acl.h
@@ -44,7 +44,6 @@
#define NFS4_ACL_MAX 170
struct nfs4_acl *nfs4_acl_new(int);
-void nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
int nfs4_acl_get_whotype(char *, u32);
int nfs4_acl_write_who(int who, char *p);
int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group,
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 10a43ed0527e..9431101bf876 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -112,32 +112,40 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#ifdef __KERNEL__
-extern int atomic_notifier_chain_register(struct atomic_notifier_head *,
- struct notifier_block *);
-extern int blocking_notifier_chain_register(struct blocking_notifier_head *,
- struct notifier_block *);
-extern int raw_notifier_chain_register(struct raw_notifier_head *,
- struct notifier_block *);
-extern int srcu_notifier_chain_register(struct srcu_notifier_head *,
- struct notifier_block *);
-
-extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *,
- struct notifier_block *);
-extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *,
- struct notifier_block *);
-extern int raw_notifier_chain_unregister(struct raw_notifier_head *,
- struct notifier_block *);
-extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *,
- struct notifier_block *);
-
-extern int atomic_notifier_call_chain(struct atomic_notifier_head *,
+extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+ struct notifier_block *nb);
+extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+ struct notifier_block *nb);
+extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
+ struct notifier_block *nb);
+extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
+ struct notifier_block *nb);
+
+extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
+ struct notifier_block *nb);
+extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
+ struct notifier_block *nb);
+extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
+ struct notifier_block *nb);
+extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
+ struct notifier_block *nb);
+
+extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v);
-extern int blocking_notifier_call_chain(struct blocking_notifier_head *,
+extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v);
-extern int raw_notifier_call_chain(struct raw_notifier_head *,
+extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v);
-extern int srcu_notifier_call_chain(struct srcu_notifier_head *,
+extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v);
+extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
#define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */
@@ -186,6 +194,20 @@ extern int srcu_notifier_call_chain(struct srcu_notifier_head *,
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
+#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */
+#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */
+
+/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
+ * operation in progress
+ */
+#define CPU_TASKS_FROZEN 0x0010
+
+#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
+#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
+#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
+#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
+#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
+#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
#endif /* __KERNEL__ */
#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 6e8fa3049e5d..87545e0f0b58 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -107,26 +107,11 @@ typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
-#define PM_SUSPEND_DISK ((__force suspend_state_t) 4)
-#define PM_SUSPEND_MAX ((__force suspend_state_t) 5)
-
-typedef int __bitwise suspend_disk_method_t;
-
-/* invalid must be 0 so struct pm_ops initialisers can leave it out */
-#define PM_DISK_INVALID ((__force suspend_disk_method_t) 0)
-#define PM_DISK_PLATFORM ((__force suspend_disk_method_t) 1)
-#define PM_DISK_SHUTDOWN ((__force suspend_disk_method_t) 2)
-#define PM_DISK_REBOOT ((__force suspend_disk_method_t) 3)
-#define PM_DISK_TEST ((__force suspend_disk_method_t) 4)
-#define PM_DISK_TESTPROC ((__force suspend_disk_method_t) 5)
-#define PM_DISK_MAX ((__force suspend_disk_method_t) 6)
+#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
/**
* struct pm_ops - Callbacks for managing platform dependent suspend states.
* @valid: Callback to determine whether the given state can be entered.
- * If %CONFIG_SOFTWARE_SUSPEND is set then %PM_SUSPEND_DISK is
- * always valid and never passed to this call. If not assigned,
- * no suspend states are valid.
* Valid states are advertised in /sys/power/state but can still
* be rejected by prepare or enter if the conditions aren't right.
* There is a %pm_valid_only_mem function available that can be assigned
@@ -140,24 +125,12 @@ typedef int __bitwise suspend_disk_method_t;
*
* @finish: Called when the system has left the given state and all devices
* are resumed. The return value is ignored.
- *
- * @pm_disk_mode: The generic code always allows one of the shutdown methods
- * %PM_DISK_SHUTDOWN, %PM_DISK_REBOOT, %PM_DISK_TEST and
- * %PM_DISK_TESTPROC. If this variable is set, the mode it is set
- * to is allowed in addition to those modes and is also made default.
- * When this mode is sent selected, the @prepare call will be called
- * before suspending to disk (if present), the @enter call should be
- * present and will be called after all state has been saved and the
- * machine is ready to be powered off; the @finish callback is called
- * after state has been restored. All these calls are called with
- * %PM_SUSPEND_DISK as the state.
*/
struct pm_ops {
int (*valid)(suspend_state_t state);
int (*prepare)(suspend_state_t state);
int (*enter)(suspend_state_t state);
int (*finish)(suspend_state_t state);
- suspend_disk_method_t pm_disk_mode;
};
/**
@@ -276,8 +249,6 @@ extern void device_power_up(void);
extern void device_resume(void);
#ifdef CONFIG_PM
-extern suspend_disk_method_t pm_disk_mode;
-
extern int device_suspend(pm_message_t state);
extern int device_prepare_suspend(pm_message_t state);
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index de72c49747c8..a121f36f4437 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -201,7 +201,6 @@ struct mddev_s
struct mutex reconfig_mutex;
atomic_t active;
- int changed; /* true if we might need to reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 759a0f97bec2..6cd8c4425fc7 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/sched.h>
+#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/fs.h>
@@ -38,7 +39,7 @@ struct rchan_buf
size_t subbufs_consumed; /* count of sub-buffers consumed */
struct rchan *chan; /* associated channel */
wait_queue_head_t read_wait; /* reader wait queue */
- struct delayed_work wake_readers; /* reader wake-up work struct */
+ struct timer_list timer; /* reader wake-up timer */
struct dentry *dentry; /* channel file dentry */
struct kref kref; /* channel buffer refcount */
struct page **page_array; /* array of current buffer pages */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3d95c480f58d..17b72d88c4cb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -817,7 +817,7 @@ struct prio_array;
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
- struct thread_info *thread_info;
+ void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
@@ -1317,6 +1317,7 @@ extern int in_egroup_p(gid_t);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
+extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
@@ -1512,8 +1513,8 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
#ifndef __HAVE_THREAD_FUNCTIONS
-#define task_thread_info(task) (task)->thread_info
-#define task_stack_page(task) ((void*)((task)->thread_info))
+#define task_thread_info(task) ((struct thread_info *)(task)->stack)
+#define task_stack_page(task) ((task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
@@ -1523,7 +1524,7 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
static inline unsigned long *end_of_stack(struct task_struct *p)
{
- return (unsigned long *)(p->thread_info + 1);
+ return (unsigned long *)(task_thread_info(p) + 1);
}
#endif
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 14749056dd63..3fa0fab4a04b 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -243,6 +243,131 @@ extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
extern struct kmem_cache *sighand_cachep;
+/*
+ * In POSIX a signal is sent either to a specific thread (Linux task)
+ * or to the process as a whole (Linux thread group). How the signal
+ * is sent determines whether it's to one thread or the whole group,
+ * which determines which signal mask(s) are involved in blocking it
+ * from being delivered until later. When the signal is delivered,
+ * either it's caught or ignored by a user handler or it has a default
+ * effect that applies to the whole thread group (POSIX process).
+ *
+ * The possible effects an unblocked signal set to SIG_DFL can have are:
+ * ignore - Nothing Happens
+ * terminate - kill the process, i.e. all threads in the group,
+ * similar to exit_group. The group leader (only) reports
+ * WIFSIGNALED status to its parent.
+ * coredump - write a core dump file describing all threads using
+ * the same mm and then kill all those threads
+ * stop - stop all the threads in the group, i.e. TASK_STOPPED state
+ *
+ * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
+ * Other signals when not blocked and set to SIG_DFL behaves as follows.
+ * The job control signals also have other special effects.
+ *
+ * +--------------------+------------------+
+ * | POSIX signal | default action |
+ * +--------------------+------------------+
+ * | SIGHUP | terminate |
+ * | SIGINT | terminate |
+ * | SIGQUIT | coredump |
+ * | SIGILL | coredump |
+ * | SIGTRAP | coredump |
+ * | SIGABRT/SIGIOT | coredump |
+ * | SIGBUS | coredump |
+ * | SIGFPE | coredump |
+ * | SIGKILL | terminate(+) |
+ * | SIGUSR1 | terminate |
+ * | SIGSEGV | coredump |
+ * | SIGUSR2 | terminate |
+ * | SIGPIPE | terminate |
+ * | SIGALRM | terminate |
+ * | SIGTERM | terminate |
+ * | SIGCHLD | ignore |
+ * | SIGCONT | ignore(*) |
+ * | SIGSTOP | stop(*)(+) |
+ * | SIGTSTP | stop(*) |
+ * | SIGTTIN | stop(*) |
+ * | SIGTTOU | stop(*) |
+ * | SIGURG | ignore |
+ * | SIGXCPU | coredump |
+ * | SIGXFSZ | coredump |
+ * | SIGVTALRM | terminate |
+ * | SIGPROF | terminate |
+ * | SIGPOLL/SIGIO | terminate |
+ * | SIGSYS/SIGUNUSED | coredump |
+ * | SIGSTKFLT | terminate |
+ * | SIGWINCH | ignore |
+ * | SIGPWR | terminate |
+ * | SIGRTMIN-SIGRTMAX | terminate |
+ * +--------------------+------------------+
+ * | non-POSIX signal | default action |
+ * +--------------------+------------------+
+ * | SIGEMT | coredump |
+ * +--------------------+------------------+
+ *
+ * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
+ * (*) Special job control effects:
+ * When SIGCONT is sent, it resumes the process (all threads in the group)
+ * from TASK_STOPPED state and also clears any pending/queued stop signals
+ * (any of those marked with "stop(*)"). This happens regardless of blocking,
+ * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
+ * any pending/queued SIGCONT signals; this happens regardless of blocking,
+ * catching, or ignored the stop signal, though (except for SIGSTOP) the
+ * default action of stopping the process may happen later or never.
+ */
+
+#ifdef SIGEMT
+#define SIGEMT_MASK rt_sigmask(SIGEMT)
+#else
+#define SIGEMT_MASK 0
+#endif
+
+#if SIGRTMIN > BITS_PER_LONG
+#define rt_sigmask(sig) (1ULL << ((sig)-1))
+#else
+#define rt_sigmask(sig) sigmask(sig)
+#endif
+#define siginmask(sig, mask) (rt_sigmask(sig) & (mask))
+
+#define SIG_KERNEL_ONLY_MASK (\
+ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
+
+#define SIG_KERNEL_STOP_MASK (\
+ rt_sigmask(SIGSTOP) | rt_sigmask(SIGTSTP) | \
+ rt_sigmask(SIGTTIN) | rt_sigmask(SIGTTOU) )
+
+#define SIG_KERNEL_COREDUMP_MASK (\
+ rt_sigmask(SIGQUIT) | rt_sigmask(SIGILL) | \
+ rt_sigmask(SIGTRAP) | rt_sigmask(SIGABRT) | \
+ rt_sigmask(SIGFPE) | rt_sigmask(SIGSEGV) | \
+ rt_sigmask(SIGBUS) | rt_sigmask(SIGSYS) | \
+ rt_sigmask(SIGXCPU) | rt_sigmask(SIGXFSZ) | \
+ SIGEMT_MASK )
+
+#define SIG_KERNEL_IGNORE_MASK (\
+ rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \
+ rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) )
+
+#define sig_kernel_only(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK))
+#define sig_kernel_coredump(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK))
+#define sig_kernel_ignore(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK))
+#define sig_kernel_stop(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK))
+
+#define sig_needs_tasklist(sig) ((sig) == SIGCONT)
+
+#define sig_user_defined(t, signr) \
+ (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
+ ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
+
+#define sig_fatal(t, signr) \
+ (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
+ (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 7ba23ec8211b..3f70149eabbb 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -83,7 +83,6 @@ void smp_prepare_boot_cpu(void);
* These macros fold the SMP functionality into a single CPU system
*/
#define raw_smp_processor_id() 0
-#define hard_smp_processor_id() 0
static inline int up_smp_call_function(void)
{
return 0;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 35fa4d5aadd0..4a7ae8ab6eb8 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -396,4 +396,23 @@ char * svc_print_addr(struct svc_rqst *, char *, size_t);
#define RPC_MAX_ADDRBUFLEN (63U)
+/*
+ * When we want to reduce the size of the reserved space in the response
+ * buffer, we need to take into account the size of any checksum data that
+ * may be at the end of the packet. This is difficult to determine exactly
+ * for all cases without actually generating the checksum, so we just use a
+ * static value.
+ */
+static inline void
+svc_reserve_auth(struct svc_rqst *rqstp, int space)
+{
+ int added_space = 0;
+
+ switch(rqstp->rq_authop->flavour) {
+ case RPC_AUTH_GSS:
+ added_space = RPC_MAX_AUTH_SIZE;
+ }
+ return svc_reserve(rqstp, space + added_space);
+}
+
#endif /* SUNRPC_SVC_H */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 7909687557bf..e21dd93ac4b7 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -37,7 +37,8 @@ struct svc_sock {
atomic_t sk_reserved; /* space on outq that is reserved */
- spinlock_t sk_defer_lock; /* protects sk_deferred */
+ spinlock_t sk_lock; /* protects sk_deferred and
+ * sk_info_authunix */
struct list_head sk_deferred; /* deferred requests that need to
* be revisted */
struct mutex sk_mutex; /* to serialize sending data */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 9d2aa1a12aa0..d74da9122b60 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -32,6 +32,24 @@ static inline int pm_prepare_console(void) { return 0; }
static inline void pm_restore_console(void) {}
#endif
+/**
+ * struct hibernation_ops - hibernation platform support
+ *
+ * The methods in this structure allow a platform to override the default
+ * mechanism of shutting down the machine during a hibernation transition.
+ *
+ * All three methods must be assigned.
+ *
+ * @prepare: prepare system for hibernation
+ * @enter: shut down system after state has been saved to disk
+ * @finish: finish/clean up after state has been reloaded
+ */
+struct hibernation_ops {
+ int (*prepare)(void);
+ int (*enter)(void);
+ void (*finish)(void);
+};
+
#if defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND)
/* kernel/power/snapshot.c */
extern void __init register_nosave_region(unsigned long, unsigned long);
@@ -39,11 +57,17 @@ extern int swsusp_page_is_forbidden(struct page *);
extern void swsusp_set_page_free(struct page *);
extern void swsusp_unset_page_free(struct page *);
extern unsigned long get_safe_page(gfp_t gfp_mask);
+
+extern void hibernation_set_ops(struct hibernation_ops *ops);
+extern int hibernate(void);
#else
static inline void register_nosave_region(unsigned long b, unsigned long e) {}
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {}
+
+static inline void hibernation_set_ops(struct hibernation_ops *ops) {}
+static inline int hibernate(void) { return -ENOSYS; }
#endif /* defined(CONFIG_PM) && defined(CONFIG_SOFTWARE_SUSPEND) */
void save_processor_state(void);
diff --git a/include/linux/svga.h b/include/linux/svga.h
index e1cc552e04fe..13ad0b82ac28 100644
--- a/include/linux/svga.h
+++ b/include/linux/svga.h
@@ -113,6 +113,8 @@ void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect);
void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit);
void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor);
int svga_get_tilemax(struct fb_info *info);
+void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
+ struct fb_var_screeninfo *var);
int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node);
int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 1912c6cbef55..3139f4412297 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -576,6 +576,8 @@ asmlinkage long sys_fstatat64(int dfd, char __user *filename,
struct stat64 __user *statbuf, int flag);
asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
int bufsiz);
+asmlinkage long sys_utimensat(int dfd, char __user *filename,
+ struct timespec __user *utimes, int flags);
asmlinkage long compat_sys_futimesat(unsigned int dfd, char __user *filename,
struct compat_timeval __user *t);
asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index acb1f105870c..d9325cf8a134 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -212,8 +212,6 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
void refresh_cpu_vm_stats(int);
-void refresh_vm_stats(void);
-
#else /* CONFIG_SMP */
/*
@@ -260,7 +258,6 @@ static inline void __dec_zone_page_state(struct page *page,
#define mod_zone_page_state __mod_zone_page_state
static inline void refresh_cpu_vm_stats(int cpu) { }
-static inline void refresh_vm_stats(void) { }
#endif
#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f16ba1e0687d..d555f31c0746 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -24,15 +24,13 @@ typedef void (*work_func_t)(struct work_struct *work);
struct work_struct {
atomic_long_t data;
#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
-#define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
#define WORK_STRUCT_FLAG_MASK (3UL)
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
struct list_head entry;
work_func_t func;
};
-#define WORK_DATA_INIT(autorelease) \
- ATOMIC_LONG_INIT((autorelease) << WORK_STRUCT_NOAUTOREL)
+#define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
struct delayed_work {
struct work_struct work;
@@ -44,14 +42,8 @@ struct execute_work {
};
#define __WORK_INITIALIZER(n, f) { \
- .data = WORK_DATA_INIT(0), \
- .entry = { &(n).entry, &(n).entry }, \
- .func = (f), \
- }
-
-#define __WORK_INITIALIZER_NAR(n, f) { \
- .data = WORK_DATA_INIT(1), \
- .entry = { &(n).entry, &(n).entry }, \
+ .data = WORK_DATA_INIT(), \
+ .entry = { &(n).entry, &(n).entry }, \
.func = (f), \
}
@@ -60,23 +52,12 @@ struct execute_work {
.timer = TIMER_INITIALIZER(NULL, 0, 0), \
}
-#define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \
- .work = __WORK_INITIALIZER_NAR((n).work, (f)), \
- .timer = TIMER_INITIALIZER(NULL, 0, 0), \
- }
-
#define DECLARE_WORK(n, f) \
struct work_struct n = __WORK_INITIALIZER(n, f)
-#define DECLARE_WORK_NAR(n, f) \
- struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
-
#define DECLARE_DELAYED_WORK(n, f) \
struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
-#define DECLARE_DELAYED_WORK_NAR(n, f) \
- struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
-
/*
* initialize a work item's function pointer
*/
@@ -95,16 +76,9 @@ struct execute_work {
* assignment of the work data initializer allows the compiler
* to generate better code.
*/
-#define INIT_WORK(_work, _func) \
- do { \
- (_work)->data = (atomic_long_t) WORK_DATA_INIT(0); \
- INIT_LIST_HEAD(&(_work)->entry); \
- PREPARE_WORK((_work), (_func)); \
- } while (0)
-
-#define INIT_WORK_NAR(_work, _func) \
+#define INIT_WORK(_work, _func) \
do { \
- (_work)->data = (atomic_long_t) WORK_DATA_INIT(1); \
+ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
INIT_LIST_HEAD(&(_work)->entry); \
PREPARE_WORK((_work), (_func)); \
} while (0)
@@ -115,12 +89,6 @@ struct execute_work {
init_timer(&(_work)->timer); \
} while (0)
-#define INIT_DELAYED_WORK_NAR(_work, _func) \
- do { \
- INIT_WORK_NAR(&(_work)->work, (_func)); \
- init_timer(&(_work)->timer); \
- } while (0)
-
#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
do { \
INIT_WORK(&(_work)->work, (_func)); \
@@ -143,24 +111,10 @@ struct execute_work {
work_pending(&(w)->work)
/**
- * work_release - Release a work item under execution
- * @work: The work item to release
- *
- * This is used to release a work item that has been initialised with automatic
- * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work
- * function the opportunity to grab auxiliary data from the container of the
- * work_struct before clearing the pending bit as the work_struct may be
- * subject to deallocation the moment the pending bit is cleared.
- *
- * In such a case, this should be called in the work function after it has
- * fetched any data it may require from the containter of the work_struct.
- * After this function has been called, the work_struct may be scheduled for
- * further execution or it may be deallocated unless other precautions are
- * taken.
- *
- * This should also be used to release a delayed work item.
+ * work_clear_pending - for internal use only, mark a work item as not pending
+ * @work: The work item in question
*/
-#define work_release(work) \
+#define work_clear_pending(work) \
clear_bit(WORK_STRUCT_PENDING, work_data_bits(work))
@@ -174,27 +128,28 @@ extern struct workqueue_struct *__create_workqueue(const char *name,
extern void destroy_workqueue(struct workqueue_struct *wq);
extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
-extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
+extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *work, unsigned long delay));
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
- struct delayed_work *work, unsigned long delay);
+ struct delayed_work *work, unsigned long delay);
+
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
+extern void flush_scheduled_work(void);
extern int FASTCALL(schedule_work(struct work_struct *work));
-extern int FASTCALL(run_scheduled_work(struct work_struct *work));
-extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
-
-extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
+extern int FASTCALL(schedule_delayed_work(struct delayed_work *work,
+ unsigned long delay));
+extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
+ unsigned long delay);
extern int schedule_on_each_cpu(work_func_t func);
-extern void flush_scheduled_work(void);
extern int current_is_keventd(void);
extern int keventd_up(void);
extern void init_workqueues(void);
-void cancel_rearming_delayed_work(struct delayed_work *work);
-void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
- struct delayed_work *);
int execute_in_process_context(work_func_t fn, struct execute_work *);
+extern void cancel_work_sync(struct work_struct *work);
+
/*
* Kill off a pending schedule_delayed_work(). Note that the work callback
* function may still be running on return from cancel_delayed_work(), unless
@@ -207,8 +162,18 @@ static inline int cancel_delayed_work(struct delayed_work *work)
ret = del_timer(&work->timer);
if (ret)
- work_release(&work->work);
+ work_clear_pending(&work->work);
return ret;
}
+extern void cancel_rearming_delayed_work(struct delayed_work *work);
+
+/* Obsolete. use cancel_rearming_delayed_work() */
+static inline
+void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
+ struct delayed_work *work)
+{
+ cancel_rearming_delayed_work(work);
+}
+
#endif
OpenPOWER on IntegriCloud