diff options
Diffstat (limited to 'include/asm-x86_64')
39 files changed, 225 insertions, 202 deletions
diff --git a/include/asm-x86_64/apicdef.h b/include/asm-x86_64/apicdef.h index decaa2d540e8..1dd40067c67c 100644 --- a/include/asm-x86_64/apicdef.h +++ b/include/asm-x86_64/apicdef.h @@ -39,6 +39,7 @@ #define APIC_SPIV_FOCUS_DISABLED (1<<9) #define APIC_SPIV_APIC_ENABLED (1<<8) #define APIC_ISR 0x100 +#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ #define APIC_TMR 0x180 #define APIC_IRR 0x200 #define APIC_ESR 0x280 @@ -136,8 +137,6 @@ */ #define u32 unsigned int -#define lapic ((volatile struct local_apic *)APIC_BASE) - struct local_apic { /*000*/ struct { u32 __reserved[4]; } __reserved_01; diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 4b5cd553e772..cecbf7baa6aa 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h @@ -405,8 +405,14 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) ({ \ int c, old; \ c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ c = old; \ + } \ c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index eb4df23e1e41..79212128d0f7 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h @@ -356,14 +356,7 @@ static __inline__ unsigned long __fls(unsigned long word) #ifdef __KERNEL__ -static inline int sched_find_first_bit(const unsigned long *b) -{ - if (b[0]) - return __ffs(b[0]); - if (b[1]) - return __ffs(b[1]) + 64; - return __ffs(b[2]) + 128; -} +#include <asm-generic/bitops/sched.h> /** * ffs - find first bit set @@ -412,43 +405,20 @@ static __inline__ int fls(int x) return r+1; } -/** - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ - -#define hweight64(x) generic_hweight64(x) -#define hweight32(x) generic_hweight32(x) -#define hweight16(x) generic_hweight16(x) -#define hweight8(x) generic_hweight8(x) +#include <asm-generic/bitops/hweight.h> #endif /* __KERNEL__ */ #ifdef __KERNEL__ -#define ext2_set_bit(nr,addr) \ - __test_and_set_bit((nr),(unsigned long*)addr) +#include <asm-generic/bitops/ext2-non-atomic.h> + #define ext2_set_bit_atomic(lock,nr,addr) \ test_and_set_bit((nr),(unsigned long*)addr) -#define ext2_clear_bit(nr, addr) \ - __test_and_clear_bit((nr),(unsigned long*)addr) #define ext2_clear_bit_atomic(lock,nr,addr) \ test_and_clear_bit((nr),(unsigned long*)addr) -#define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) -#define ext2_find_first_zero_bit(addr, size) \ - find_first_zero_bit((unsigned long*)addr, size) -#define ext2_find_next_zero_bit(addr, size, off) \ - find_next_zero_bit((unsigned long*)addr, size, off) - -/* Bitmap functions for the minix filesystem. */ -#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) -#define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) -#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) -#define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) -#define minix_find_first_zero_bit(addr,size) \ - find_first_zero_bit((void*)addr,size) + +#include <asm-generic/bitops/minix.h> #endif /* __KERNEL__ */ diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h index 263f0a211ed7..f8dff1c67538 100644 --- a/include/asm-x86_64/cache.h +++ b/include/asm-x86_64/cache.h @@ -22,4 +22,6 @@ #endif +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) + #endif diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index 76bb6193ae91..662964b74e34 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h @@ -64,6 +64,7 @@ #define X86_FEATURE_REP_GOOD (3*32+ 4) /* rep microcode works well on this CPU */ #define X86_FEATURE_CONSTANT_TSC (3*32+5) /* TSC runs at constant rate */ #define X86_FEATURE_SYNC_RDTSC (3*32+6) /* RDTSC syncs CPU core */ +#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ diff --git a/include/asm-x86_64/dmi.h b/include/asm-x86_64/dmi.h new file mode 100644 index 000000000000..93b2b15d4325 --- /dev/null +++ b/include/asm-x86_64/dmi.h @@ -0,0 +1,27 @@ +#ifndef _ASM_DMI_H +#define _ASM_DMI_H 1 + +#include <asm/io.h> + +extern void *dmi_ioremap(unsigned long addr, unsigned long size); +extern void dmi_iounmap(void *addr, unsigned long size); + +#define DMI_MAX_DATA 2048 + +extern int dmi_alloc_index; +extern char dmi_alloc_data[DMI_MAX_DATA]; + +/* This is so early that there is no good way to allocate dynamic memory. + Allocate data in an BSS array. */ +static inline void *dmi_alloc(unsigned len) +{ + int idx = dmi_alloc_index; + if ((dmi_alloc_index += len) > DMI_MAX_DATA) + return NULL; + return dmi_alloc_data + idx; +} + +#define dmi_ioremap early_ioremap +#define dmi_iounmap early_iounmap + +#endif diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h index 8dcc32665240..670a3388e70a 100644 --- a/include/asm-x86_64/e820.h +++ b/include/asm-x86_64/e820.h @@ -47,7 +47,8 @@ extern void contig_e820_setup(void); extern unsigned long e820_end_of_ram(void); extern void e820_reserve_resources(void); extern void e820_print_map(char *who); -extern int e820_mapped(unsigned long start, unsigned long end, unsigned type); +extern int e820_any_mapped(unsigned long start, unsigned long end, unsigned type); +extern int e820_all_mapped(unsigned long start, unsigned long end, unsigned type); extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end); extern void e820_setup_gap(void); @@ -58,6 +59,8 @@ extern void __init parse_memopt(char *p, char **end); extern void __init parse_memmapopt(char *p, char **end); extern struct e820map e820; + +extern unsigned ebda_addr, ebda_size; #endif/*!__ASSEMBLY__*/ #endif/*__E820_HEADER*/ diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h index 43862cd6a569..b4f8f4a41a6e 100644 --- a/include/asm-x86_64/elf.h +++ b/include/asm-x86_64/elf.h @@ -8,6 +8,7 @@ #include <asm/ptrace.h> #include <asm/user.h> #include <asm/processor.h> +#include <asm/compat.h> /* x86-64 relocation types */ #define R_X86_64_NONE 0 /* No reloc */ @@ -157,6 +158,9 @@ extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) +/* 1GB for 64bit, 8MB for 32bit */ +#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff) + #endif #endif diff --git a/include/asm-x86_64/floppy.h b/include/asm-x86_64/floppy.h index af7ded63b517..52825ce689f2 100644 --- a/include/asm-x86_64/floppy.h +++ b/include/asm-x86_64/floppy.h @@ -155,7 +155,7 @@ static int fd_request_irq(void) static unsigned long dma_mem_alloc(unsigned long size) { - return __get_dma_pages(GFP_KERNEL,get_order(size)); + return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY,get_order(size)); } diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h index 8602c09bf89e..9804bf07b092 100644 --- a/include/asm-x86_64/futex.h +++ b/include/asm-x86_64/futex.h @@ -94,5 +94,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + __asm__ __volatile__( + "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" + + "2: .section .fixup, \"ax\" \n" + "3: mov %2, %0 \n" + " jmp 2b \n" + " .previous \n" + + " .section __ex_table, \"a\" \n" + " .align 8 \n" + " .quad 1b,3b \n" + " .previous \n" + + : "=a" (oldval), "=m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "0" (oldval) + : "memory" + ); + + return oldval; +} + #endif #endif diff --git a/include/asm-x86_64/hpet.h b/include/asm-x86_64/hpet.h index c20c28f5c7a0..18ff7ee9e774 100644 --- a/include/asm-x86_64/hpet.h +++ b/include/asm-x86_64/hpet.h @@ -51,10 +51,14 @@ #define HPET_TN_ROUTE_SHIFT 9 +#define HPET_TICK_RATE (HZ * 100000UL) + extern int is_hpet_enabled(void); extern int hpet_rtc_timer_init(void); extern int oem_force_hpet_timer(void); +extern int hpet_use_timer; + #ifdef CONFIG_HPET_EMULATE_RTC extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); diff --git a/include/asm-x86_64/i387.h b/include/asm-x86_64/i387.h index 876eb9a2fe78..cba8a3b0cded 100644 --- a/include/asm-x86_64/i387.h +++ b/include/asm-x86_64/i387.h @@ -72,6 +72,23 @@ extern int set_fpregs(struct task_struct *tsk, #define set_fpu_swd(t,val) ((t)->thread.i387.fxsave.swd = (val)) #define set_fpu_fxsr_twd(t,val) ((t)->thread.i387.fxsave.twd = (val)) +#define X87_FSW_ES (1 << 7) /* Exception Summary */ + +/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception + is pending. Clear the x87 state here by setting it to fixed + values. The kernel data segment can be sometimes 0 and sometimes + new user value. Both should be ok. + Use the PDA as safe address because it should be already in L1. */ +static inline void clear_fpu_state(struct i387_fxsave_struct *fx) +{ + if (unlikely(fx->swd & X87_FSW_ES)) + asm volatile("fnclex"); + alternative_input(ASM_NOP8 ASM_NOP2, + " emms\n" /* clear stack tags */ + " fildl %%gs:0", /* load to clear state */ + X86_FEATURE_FXSAVE_LEAK); +} + static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) { int err; @@ -119,6 +136,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) #endif if (unlikely(err)) __clear_user(fx, sizeof(struct i387_fxsave_struct)); + /* No need to clear here because the caller clears USED_MATH */ return err; } @@ -149,7 +167,7 @@ static inline void __fxsave_clear(struct task_struct *tsk) "i" (offsetof(__typeof__(*tsk), thread.i387.fxsave))); #endif - __asm__ __volatile__("fnclex"); + clear_fpu_state(&tsk->thread.i387.fxsave); } static inline void kernel_fpu_begin(void) diff --git a/include/asm-x86_64/ia32_unistd.h b/include/asm-x86_64/ia32_unistd.h index 20468983d453..b4f4b172b15a 100644 --- a/include/asm-x86_64/ia32_unistd.h +++ b/include/asm-x86_64/ia32_unistd.h @@ -305,7 +305,7 @@ #define __NR_ia32_mknodat 297 #define __NR_ia32_fchownat 298 #define __NR_ia32_futimesat 299 -#define __NR_ia32_newfstatat 300 +#define __NR_ia32_fstatat64 300 #define __NR_ia32_unlinkat 301 #define __NR_ia32_renameat 302 #define __NR_ia32_linkat 303 @@ -317,6 +317,4 @@ #define __NR_ia32_ppoll 309 #define __NR_ia32_unshare 310 -#define IA32_NR_syscalls 315 /* must be > than biggest syscall! */ - #endif /* _ASM_X86_64_IA32_UNISTD_H_ */ diff --git a/include/asm-x86_64/io.h b/include/asm-x86_64/io.h index 9dac18db8291..a05da8a50bfd 100644 --- a/include/asm-x86_64/io.h +++ b/include/asm-x86_64/io.h @@ -135,6 +135,9 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size) return __ioremap(offset, size, 0); } +extern void *early_ioremap(unsigned long addr, unsigned long size); +extern void early_iounmap(void *addr, unsigned long size); + /* * This one maps high address device memory and turns off caching for that area. * it's useful if some control registers are in such an area and write combining @@ -143,11 +146,6 @@ static inline void __iomem * ioremap (unsigned long offset, unsigned long size) extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); extern void iounmap(volatile void __iomem *addr); -/* Use normal IO mappings for DMI */ -#define dmi_ioremap ioremap -#define dmi_iounmap(x,l) iounmap(x) -#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) - /* * ISA I/O bus memory addresses are 1:1 with the physical address. */ @@ -179,7 +177,7 @@ static inline __u16 __readw(const volatile void __iomem *addr) { return *(__force volatile __u16 *)addr; } -static inline __u32 __readl(const volatile void __iomem *addr) +static __always_inline __u32 __readl(const volatile void __iomem *addr) { return *(__force volatile __u32 *)addr; } @@ -202,23 +200,6 @@ static inline __u64 __readq(const volatile void __iomem *addr) #define mmiowb() -#ifdef CONFIG_UNORDERED_IO -static inline void __writel(__u32 val, volatile void __iomem *addr) -{ - volatile __u32 __iomem *target = addr; - asm volatile("movnti %1,%0" - : "=m" (*target) - : "r" (val) : "memory"); -} - -static inline void __writeq(__u64 val, volatile void __iomem *addr) -{ - volatile __u64 __iomem *target = addr; - asm volatile("movnti %1,%0" - : "=m" (*target) - : "r" (val) : "memory"); -} -#else static inline void __writel(__u32 b, volatile void __iomem *addr) { *(__force volatile __u32 *)addr = b; @@ -227,7 +208,6 @@ static inline void __writeq(__u64 b, volatile void __iomem *addr) { *(__force volatile __u64 *)addr = b; } -#endif static inline void __writeb(__u8 b, volatile void __iomem *addr) { *(__force volatile __u8 *)addr = b; @@ -269,23 +249,11 @@ void memset_io(volatile void __iomem *a, int b, size_t c); */ #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) -#define isa_readb(a) readb(__ISA_IO_base + (a)) -#define isa_readw(a) readw(__ISA_IO_base + (a)) -#define isa_readl(a) readl(__ISA_IO_base + (a)) -#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) -#define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) -#define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) -#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) -#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) -#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) - - /* * Again, x86-64 does not require mem IO specific function. */ #define eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(b),(c),(d)) -#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(void *)(__ISA_IO_base + (b)),(c),(d)) /** * check_signature - find BIOS signatures diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h index ee1bc69aec9c..52484e82c641 100644 --- a/include/asm-x86_64/io_apic.h +++ b/include/asm-x86_64/io_apic.h @@ -205,6 +205,7 @@ extern int skip_ioapic_setup; extern int io_apic_get_version (int ioapic); extern int io_apic_get_redir_entries (int ioapic); extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int); +extern int timer_uses_ioapic_pin_0; #endif extern int sis_apic_bug; /* dummy */ diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index b9ed4c0c8783..cf795631d9b4 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h @@ -5,21 +5,20 @@ struct pt_regs; -struct die_args { +struct die_args { struct pt_regs *regs; const char *str; - long err; + long err; int trapnr; int signr; -}; +}; + +extern int register_die_notifier(struct notifier_block *); +extern int unregister_die_notifier(struct notifier_block *); +extern struct atomic_notifier_head die_chain; -/* Note - you should never unregister because that can race with NMIs. - If you really want to do it first unregister - then synchronize_sched - then free. - */ -int register_die_notifier(struct notifier_block *nb); -extern struct notifier_block *die_chain; /* Grossly misnamed. */ -enum die_val { +enum die_val { DIE_OOPS = 1, DIE_INT3, DIE_DEBUG, @@ -33,8 +32,8 @@ enum die_val { DIE_CALL, DIE_NMI_IPI, DIE_PAGE_FAULT, -}; - +}; + static inline int notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { @@ -45,7 +44,7 @@ static inline int notify_die(enum die_val val, const char *str, .trapnr = trap, .signr = sig }; - return notifier_call_chain(&die_chain, val, &args); + return atomic_notifier_call_chain(&die_chain, val, &args); } extern int printk_address(unsigned long address); diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index 3e72c41727c5..cd17945bf218 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h @@ -5,7 +5,7 @@ typedef struct { - volatile unsigned int counter; + volatile long counter; } local_t; #define LOCAL_INIT(i) { (i) } @@ -13,34 +13,34 @@ typedef struct #define local_read(v) ((v)->counter) #define local_set(v,i) (((v)->counter) = (i)) -static __inline__ void local_inc(local_t *v) +static inline void local_inc(local_t *v) { __asm__ __volatile__( - "incl %0" + "incq %0" :"=m" (v->counter) :"m" (v->counter)); } -static __inline__ void local_dec(local_t *v) +static inline void local_dec(local_t *v) { __asm__ __volatile__( - "decl %0" + "decq %0" :"=m" (v->counter) :"m" (v->counter)); } -static __inline__ void local_add(unsigned int i, local_t *v) +static inline void local_add(long i, local_t *v) { __asm__ __volatile__( - "addl %1,%0" + "addq %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } -static __inline__ void local_sub(unsigned int i, local_t *v) +static inline void local_sub(long i, local_t *v) { __asm__ __volatile__( - "subl %1,%0" + "subq %1,%0" :"=m" (v->counter) :"ir" (i), "m" (v->counter)); } diff --git a/include/asm-x86_64/mce.h b/include/asm-x86_64/mce.h index 5d298b799a9f..7229785094e3 100644 --- a/include/asm-x86_64/mce.h +++ b/include/asm-x86_64/mce.h @@ -70,6 +70,9 @@ struct mce_log { #define MCE_THRESHOLD_BASE MCE_EXTENDED_BANK + 1 /* MCE_AMD */ #define MCE_THRESHOLD_DRAM_ECC MCE_THRESHOLD_BASE + 4 +#ifdef __KERNEL__ +#include <asm/atomic.h> + void mce_log(struct mce *m); #ifdef CONFIG_X86_MCE_INTEL void mce_intel_feature_init(struct cpuinfo_x86 *c); @@ -87,4 +90,8 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) } #endif +extern atomic_t mce_entry; + +#endif + #endif diff --git a/include/asm-x86_64/mman.h b/include/asm-x86_64/mman.h index d0e97b74f735..dd5cb0534d37 100644 --- a/include/asm-x86_64/mman.h +++ b/include/asm-x86_64/mman.h @@ -1,19 +1,8 @@ #ifndef __X8664_MMAN_H__ #define __X8664_MMAN_H__ -#define PROT_READ 0x1 /* page can be read */ -#define PROT_WRITE 0x2 /* page can be written */ -#define PROT_EXEC 0x4 /* page can be executed */ -#define PROT_NONE 0x0 /* page can not be accessed */ -#define PROT_SEM 0x8 -#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ -#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ +#include <asm-generic/mman.h> -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_TYPE 0x0f /* Mask for type of mapping */ -#define MAP_FIXED 0x10 /* Interpret addr exactly */ -#define MAP_ANONYMOUS 0x20 /* don't use a file */ #define MAP_32BIT 0x40 /* only give out 32bit addresses */ #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ @@ -24,22 +13,7 @@ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ #define MAP_NONBLOCK 0x10000 /* do not block on IO */ -#define MS_ASYNC 1 /* sync memory asynchronously */ -#define MS_INVALIDATE 2 /* invalidate the caches */ -#define MS_SYNC 4 /* synchronous memory sync */ - #define MCL_CURRENT 1 /* lock all current mappings */ #define MCL_FUTURE 2 /* lock all future mappings */ -#define MADV_NORMAL 0x0 /* default page-in behavior */ -#define MADV_RANDOM 0x1 /* page-in minimum required */ -#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ -#define MADV_WILLNEED 0x3 /* pre-fault pages */ -#define MADV_DONTNEED 0x4 /* discard these pages */ -#define MADV_REMOVE 0x5 /* remove these pages & resources */ - -/* compatibility flags */ -#define MAP_ANON MAP_ANONYMOUS -#define MAP_FILE 0 - #endif diff --git a/include/asm-x86_64/mmu_context.h b/include/asm-x86_64/mmu_context.h index 16e4be4de0c5..19f0c83d0792 100644 --- a/include/asm-x86_64/mmu_context.h +++ b/include/asm-x86_64/mmu_context.h @@ -34,12 +34,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, unsigned cpu = smp_processor_id(); if (likely(prev != next)) { /* stop flush ipis for the previous mm */ - clear_bit(cpu, &prev->cpu_vm_mask); + cpu_clear(cpu, prev->cpu_vm_mask); #ifdef CONFIG_SMP write_pda(mmu_state, TLBSTATE_OK); write_pda(active_mm, next); #endif - set_bit(cpu, &next->cpu_vm_mask); + cpu_set(cpu, next->cpu_vm_mask); load_cr3(next->pgd); if (unlikely(next->context.ldt != prev->context.ldt)) @@ -50,7 +50,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, write_pda(mmu_state, TLBSTATE_OK); if (read_pda(active_mm) != next) out_of_line_bug(); - if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) { + if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { /* We were in lazy tlb mode and leave_mm disabled * tlb flush IPI delivery. We must reload CR3 * to make sure to use no freed page tables. diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index 972c9359f7d7..6944e7122df5 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h @@ -12,11 +12,17 @@ #include <asm/smp.h> -#define NODEMAPSIZE 0xfff +/* Should really switch to dynamic allocation at some point */ +#define NODEMAPSIZE 0x4fff /* Simple perfect hash to map physical addresses to node numbers */ -extern int memnode_shift; -extern u8 memnodemap[NODEMAPSIZE]; +struct memnode { + int shift; + u8 map[NODEMAPSIZE]; +} ____cacheline_aligned; +extern struct memnode memnode; +#define memnode_shift memnode.shift +#define memnodemap memnode.map extern struct pglist_data *node_data[]; @@ -39,12 +45,8 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) -extern struct page *pfn_to_page(unsigned long pfn); -extern unsigned long page_to_pfn(struct page *page); extern int pfn_valid(unsigned long pfn); #endif -#define local_mapnr(kvaddr) \ - ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) #endif #endif diff --git a/include/asm-x86_64/numa.h b/include/asm-x86_64/numa.h index dffe276ca2df..1cc92fe02503 100644 --- a/include/asm-x86_64/numa.h +++ b/include/asm-x86_64/numa.h @@ -2,13 +2,12 @@ #define _ASM_X8664_NUMA_H 1 #include <linux/nodemask.h> -#include <asm/numnodes.h> -struct node { +struct bootnode { u64 start,end; }; -extern int compute_hash_shift(struct node *nodes, int numnodes); +extern int compute_hash_shift(struct bootnode *nodes, int numnodes); extern int pxm_to_node(int nid); #define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) @@ -18,6 +17,8 @@ extern void numa_init_array(void); extern int numa_off; extern void numa_set_node(int cpu, int node); +extern void srat_reserve_add_area(int nodeid); +extern int hotadd_percent; extern unsigned char apicid_to_node[256]; #ifdef CONFIG_NUMA diff --git a/include/asm-x86_64/numnodes.h b/include/asm-x86_64/numnodes.h deleted file mode 100644 index 32be16b8ae96..000000000000 --- a/include/asm-x86_64/numnodes.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASM_X8664_NUMNODES_H -#define _ASM_X8664_NUMNODES_H 1 - -#include <linux/config.h> - -#ifdef CONFIG_NUMA -#define NODES_SHIFT 6 -#else -#define NODES_SHIFT 0 -#endif - -#endif diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index 615e3e494929..408185bac351 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h @@ -123,8 +123,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; #define __boot_va(x) __va(x) #define __boot_pa(x) __pa(x) #ifdef CONFIG_FLATMEM -#define pfn_to_page(pfn) (mem_map + (pfn)) -#define page_to_pfn(page) ((unsigned long)((page) - mem_map)) #define pfn_valid(pfn) ((pfn) < end_pfn) #endif @@ -140,6 +138,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; #endif /* __KERNEL__ */ +#include <asm-generic/memory_model.h> #include <asm-generic/page.h> #endif /* _X86_64_PAGE_H */ diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h index fd03e15d7ea6..8a05af264d18 100644 --- a/include/asm-x86_64/pci.h +++ b/include/asm-x86_64/pci.h @@ -19,8 +19,6 @@ extern unsigned int pcibios_assign_all_busses(void); #endif #define pcibios_scan_all_fns(a, b) 0 -extern int no_iommu, force_iommu; - extern unsigned long pci_mem_start; #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM (pci_mem_start) diff --git a/include/asm-x86_64/pda.h b/include/asm-x86_64/pda.h index c7ab38a601af..b47c3df9ed1d 100644 --- a/include/asm-x86_64/pda.h +++ b/include/asm-x86_64/pda.h @@ -22,8 +22,8 @@ struct x8664_pda { int nodenumber; /* number of current node */ unsigned int __softirq_pending; unsigned int __nmi_count; /* number of NMI on this CPUs */ - struct mm_struct *active_mm; int mmu_state; + struct mm_struct *active_mm; unsigned apic_timer_irqs; } ____cacheline_aligned_in_smp; diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h index 29a6b0408f75..7f33aaf9f7b1 100644 --- a/include/asm-x86_64/percpu.h +++ b/include/asm-x86_64/percpu.h @@ -26,10 +26,9 @@ #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for (__i = 0; __i < NR_CPUS; __i++) \ - if (cpu_possible(__i)) \ - memcpy((pcpudst)+__per_cpu_offset(__i), \ - (src), (size)); \ + for_each_possible_cpu(__i) \ + memcpy((pcpudst)+__per_cpu_offset(__i), \ + (src), (size)); \ } while (0) extern void setup_per_cpu_areas(void); diff --git a/include/asm-x86_64/pgalloc.h b/include/asm-x86_64/pgalloc.h index 08cad2482bcb..43d4c333a8b1 100644 --- a/include/asm-x86_64/pgalloc.h +++ b/include/asm-x86_64/pgalloc.h @@ -45,12 +45,39 @@ static inline void pud_free (pud_t *pud) free_page((unsigned long)pud); } +static inline void pgd_list_add(pgd_t *pgd) +{ + struct page *page = virt_to_page(pgd); + + spin_lock(&pgd_lock); + page->index = (pgoff_t)pgd_list; + if (pgd_list) + pgd_list->private = (unsigned long)&page->index; + pgd_list = page; + page->private = (unsigned long)&pgd_list; + spin_unlock(&pgd_lock); +} + +static inline void pgd_list_del(pgd_t *pgd) +{ + struct page *next, **pprev, *page = virt_to_page(pgd); + + spin_lock(&pgd_lock); + next = (struct page *)page->index; + pprev = (struct page **)page->private; + *pprev = next; + if (next) + next->private = (unsigned long)pprev; + spin_unlock(&pgd_lock); +} + static inline pgd_t *pgd_alloc(struct mm_struct *mm) { unsigned boundary; pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); if (!pgd) return NULL; + pgd_list_add(pgd); /* * Copy kernel pointers in from init. * Could keep a freelist or slab cache of those because the kernel @@ -67,6 +94,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline void pgd_free(pgd_t *pgd) { BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); + pgd_list_del(pgd); free_page((unsigned long)pgd); } diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 8fbf4dd72115..31e83c3bd022 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -131,7 +131,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) +#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) #define FIRST_USER_ADDRESS 0 #ifndef __ASSEMBLY__ @@ -273,7 +273,7 @@ static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } -static inline int pte_huge(pte_t pte) { return (pte_val(pte) & __LARGE_PTE) == __LARGE_PTE; } +static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } @@ -285,7 +285,7 @@ static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _ static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | __LARGE_PTE)); return pte; } +static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } struct vm_area_struct; @@ -293,19 +293,19 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned { if (!pte_dirty(*ptep)) return 0; - return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); + return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte); } static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { if (!pte_young(*ptep)) return 0; - return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); + return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); } static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - clear_bit(_PAGE_BIT_RW, ptep); + clear_bit(_PAGE_BIT_RW, &ptep->pte); } /* @@ -420,6 +420,10 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) +extern spinlock_t pgd_lock; +extern struct page *pgd_list; +void vmalloc_sync_all(void); + #endif /* !__ASSEMBLY__ */ extern int kern_addr_valid(unsigned long addr); diff --git a/include/asm-x86_64/poll.h b/include/asm-x86_64/poll.h index c43cbba31913..c0475a9d8bb8 100644 --- a/include/asm-x86_64/poll.h +++ b/include/asm-x86_64/poll.h @@ -16,6 +16,7 @@ #define POLLWRBAND 0x0200 #define POLLMSG 0x0400 #define POLLREMOVE 0x1000 +#define POLLRDHUP 0x2000 struct pollfd { int fd; diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 8c8d88c036ed..37a3ec433ee5 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h @@ -20,6 +20,7 @@ #include <asm/mmsegment.h> #include <asm/percpu.h> #include <linux/personality.h> +#include <linux/cpumask.h> #define TF_MASK 0x00000100 #define IF_MASK 0x00000200 @@ -65,6 +66,9 @@ struct cpuinfo_x86 { __u32 x86_power; __u32 extended_cpuid_level; /* Max extended CPUID function supported */ unsigned long loops_per_jiffy; +#ifdef CONFIG_SMP + cpumask_t llc_shared_map; /* cpus sharing the last level cache */ +#endif __u8 apicid; __u8 booted_cores; /* number of cores as seen by OS */ } ____cacheline_aligned; @@ -354,9 +358,6 @@ struct extended_sigtable { struct extended_signature sigs[0]; }; -/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ -#define MICROCODE_IOCFREE _IO('6',0) - #define ASM_NOP1 K8_NOP1 #define ASM_NOP2 K8_NOP2 diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index c99832e7bf3f..8abf2a43c944 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h @@ -39,7 +39,6 @@ extern void config_acpi_tables(void); extern void ia32_syscall(void); extern void iommu_hole_init(void); -extern void time_init_gtod(void); extern int pmtimer_mark_offset(void); extern void pmtimer_resume(void); extern void pmtimer_wait(unsigned); @@ -54,8 +53,6 @@ extern int sysctl_vsyscall; extern int nohpet; extern unsigned long vxtime_hz; -extern void do_softirq_thunk(void); - extern int numa_setup(char *opt); extern int setup_early_printk(char *); @@ -130,9 +127,10 @@ extern int fix_aperture; #define iommu_aperture 0 #define iommu_aperture_allowed 0 #endif -extern int force_iommu; extern int reboot_force; +extern int notsc_setup(char *); +extern int setup_additional_cpus(char *); extern void smp_local_timer_interrupt(struct pt_regs * regs); diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 9ccbb2cfd5c0..a4fdaeb5c397 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -56,6 +56,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; extern u8 phys_proc_id[NR_CPUS]; extern u8 cpu_core_id[NR_CPUS]; +extern u8 cpu_llc_id[NR_CPUS]; #define SMP_TRAMPOLINE_BASE 0x6000 diff --git a/include/asm-x86_64/string.h b/include/asm-x86_64/string.h index a3493ee282bb..ee6bf275349e 100644 --- a/include/asm-x86_64/string.h +++ b/include/asm-x86_64/string.h @@ -40,26 +40,15 @@ extern void *__memcpy(void *to, const void *from, size_t len); #define __HAVE_ARCH_MEMSET -#define memset __builtin_memset +void *memset(void *s, int c, size_t n); #define __HAVE_ARCH_MEMMOVE void * memmove(void * dest,const void *src,size_t count); -/* Use C out of line version for memcmp */ -#define memcmp __builtin_memcmp int memcmp(const void * cs,const void * ct,size_t count); - -/* out of line string functions use always C versions */ -#define strlen __builtin_strlen size_t strlen(const char * s); - -#define strcpy __builtin_strcpy -char * strcpy(char * dest,const char *src); - -#define strcat __builtin_strcat -char * strcat(char * dest, const char * src); - -#define strcmp __builtin_strcmp +char *strcpy(char * dest,const char *src); +char *strcat(char * dest, const char * src); int strcmp(const char * cs,const char * ct); #endif /* __KERNEL__ */ diff --git a/include/asm-x86_64/suspend.h b/include/asm-x86_64/suspend.h index bb9f40597d09..bc7f81715e5e 100644 --- a/include/asm-x86_64/suspend.h +++ b/include/asm-x86_64/suspend.h @@ -39,9 +39,7 @@ extern unsigned long saved_context_r12, saved_context_r13, saved_context_r14, sa extern unsigned long saved_context_eflags; #define loaddebug(thread,register) \ - __asm__("movq %0,%%db" #register \ - : /* no output */ \ - :"r" ((thread)->debugreg##register)) + set_debugreg((thread)->debugreg##register, register) extern void fix_processor_context(void); diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index b7f66034ae7a..397598980228 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -70,12 +70,6 @@ extern void load_gs_index(unsigned); ".previous" \ : :"r" (value), "r" (0)) -#define set_debug(value,register) \ - __asm__("movq %0,%%db" #register \ - : /* no output */ \ - :"r" ((unsigned long) value)) - - #ifdef __KERNEL__ struct alt_instr { __u8 *instr; /* original instruction */ diff --git a/include/asm-x86_64/timex.h b/include/asm-x86_64/timex.h index f18443fcdf04..b9e5320b7625 100644 --- a/include/asm-x86_64/timex.h +++ b/include/asm-x86_64/timex.h @@ -33,7 +33,7 @@ static __always_inline cycles_t get_cycles_sync(void) unsigned eax; /* Don't do an additional sync on CPUs where we know RDTSC is already synchronous. */ - alternative_io(ASM_NOP2, "cpuid", X86_FEATURE_SYNC_RDTSC, + alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); rdtscll(ret); return ret; diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index c642f5d9882d..9db54e9d17bb 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h @@ -68,4 +68,6 @@ extern int __node_distance(int, int); #include <asm-generic/topology.h> +extern cpumask_t cpu_coregroup_map(int cpu); + #endif diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index da0341c57949..feb77cb8c044 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -605,8 +605,20 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */ __SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ #define __NR_unshare 272 __SYSCALL(__NR_unshare, sys_unshare) - -#define __NR_syscall_max __NR_unshare +#define __NR_set_robust_list 273 +__SYSCALL(__NR_set_robust_list, sys_set_robust_list) +#define __NR_get_robust_list 274 +__SYSCALL(__NR_get_robust_list, sys_get_robust_list) +#define __NR_splice 275 +__SYSCALL(__NR_splice, sys_splice) +#define __NR_tee 276 +__SYSCALL(__NR_tee, sys_tee) +#define __NR_sync_file_range 277 +__SYSCALL(__NR_sync_file_range, sys_sync_file_range) +#define __NR_vmsplice 278 +__SYSCALL(__NR_vmsplice, sys_vmsplice) + +#define __NR_syscall_max __NR_vmsplice #ifndef __NO_STUBS |