diff options
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r-- | include/asm-x86_64/futex.h | 27 | ||||
-rw-r--r-- | include/asm-x86_64/kdebug.h | 23 | ||||
-rw-r--r-- | include/asm-x86_64/mmzone.h | 4 | ||||
-rw-r--r-- | include/asm-x86_64/page.h | 3 | ||||
-rw-r--r-- | include/asm-x86_64/processor.h | 7 | ||||
-rw-r--r-- | include/asm-x86_64/smp.h | 1 | ||||
-rw-r--r-- | include/asm-x86_64/topology.h | 2 | ||||
-rw-r--r-- | include/asm-x86_64/unistd.h | 6 |
8 files changed, 51 insertions, 22 deletions
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h index 8602c09bf89e..9804bf07b092 100644 --- a/include/asm-x86_64/futex.h +++ b/include/asm-x86_64/futex.h @@ -94,5 +94,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + __asm__ __volatile__( + "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" + + "2: .section .fixup, \"ax\" \n" + "3: mov %2, %0 \n" + " jmp 2b \n" + " .previous \n" + + " .section __ex_table, \"a\" \n" + " .align 8 \n" + " .quad 1b,3b \n" + " .previous \n" + + : "=a" (oldval), "=m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "memory" + ); + + return oldval; +} + #endif #endif diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index b9ed4c0c8783..cf795631d9b4 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h @@ -5,21 +5,20 @@ struct pt_regs; -struct die_args { +struct die_args { struct pt_regs *regs; const char *str; - long err; + long err; int trapnr; int signr; -}; +}; + +extern int register_die_notifier(struct notifier_block *); +extern int unregister_die_notifier(struct notifier_block *); +extern struct atomic_notifier_head die_chain; -/* Note - you should never unregister because that can race with NMIs. - If you really want to do it first unregister - then synchronize_sched - then free. - */ -int register_die_notifier(struct notifier_block *nb); -extern struct notifier_block *die_chain; /* Grossly misnamed. */ -enum die_val { +enum die_val { DIE_OOPS = 1, DIE_INT3, DIE_DEBUG, @@ -33,8 +32,8 @@ enum die_val { DIE_CALL, DIE_NMI_IPI, DIE_PAGE_FAULT, -}; - +}; + static inline int notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { @@ -45,7 +44,7 @@ static inline int notify_die(enum die_val val, const char *str, .trapnr = trap, .signr = sig }; - return notifier_call_chain(&die_chain, val, &args); + return atomic_notifier_call_chain(&die_chain, val, &args); } extern int printk_address(unsigned long address); diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index 937f99b26883..6b18cd8f293d 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h @@ -44,12 +44,8 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) -extern struct page *pfn_to_page(unsigned long pfn); -extern unsigned long page_to_pfn(struct page *page); extern int pfn_valid(unsigned long pfn); #endif -#define local_mapnr(kvaddr) \ - ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) #endif #endif diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index 615e3e494929..408185bac351 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -123,8 +123,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define __boot_va(x) __va(x) #define __boot_pa(x) __pa(x) #ifdef CONFIG_FLATMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < end_pfn) #endif @@ -140,6 +138,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _X86_64_PAGE_H */ diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 8c8d88c036ed..37a3ec433ee5 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -20,6 +20,7 @@ #include <asm/mmsegment.h> #include <asm/percpu.h> #include <linux/personality.h> +#include <linux/cpumask.h> #define TF_MASK 0x00000100 #define IF_MASK 0x00000200 @@ -65,6 +66,9 @@ struct cpuinfo_x86 { __u32 x86_power; __u32 extended_cpuid_level; /* Max extended CPUID function supported */ unsigned long loops_per_jiffy; +#ifdef CONFIG_SMP + cpumask_t llc_shared_map; /* cpus sharing the last level cache */ +#endif __u8 apicid; __u8 booted_cores; /* number of cores as seen by OS */ } ____cacheline_aligned; @@ -354,9 +358,6 @@ struct extended_sigtable { struct extended_signature sigs[0]; }; -/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ -#define MICROCODE_IOCFREE _IO('6',0) - #define ASM_NOP1 K8_NOP1 #define ASM_NOP2 K8_NOP2 diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 9ccbb2cfd5c0..a4fdaeb5c397 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -56,6 +56,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; extern u8 phys_proc_id[NR_CPUS]; extern u8 cpu_core_id[NR_CPUS]; +extern u8 cpu_llc_id[NR_CPUS]; #define SMP_TRAMPOLINE_BASE 0x6000 diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index c642f5d9882d..9db54e9d17bb 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -68,4 +68,6 @@ extern int __node_distance(int, int); #include <asm-generic/topology.h> +extern cpumask_t cpu_coregroup_map(int cpu); + #endif diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index da0341c57949..fcc516353087 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -605,8 +605,12 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */ __SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ #define __NR_unshare 272 __SYSCALL(__NR_unshare, sys_unshare) +#define __NR_set_robust_list 273 +__SYSCALL(__NR_set_robust_list, sys_set_robust_list) +#define __NR_get_robust_list 274 +__SYSCALL(__NR_get_robust_list, sys_get_robust_list) -#define __NR_syscall_max __NR_unshare +#define __NR_syscall_max __NR_get_robust_list #ifndef __NO_STUBS |