diff options
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/atomic.h | 21 | ||||
-rw-r--r-- | include/asm-i386/elf.h | 2 | ||||
-rw-r--r-- | include/asm-i386/ide.h | 6 | ||||
-rw-r--r-- | include/asm-i386/kprobes.h | 17 | ||||
-rw-r--r-- | include/asm-i386/msi.h | 9 | ||||
-rw-r--r-- | include/asm-i386/pgtable.h | 3 | ||||
-rw-r--r-- | include/asm-i386/processor.h | 6 | ||||
-rw-r--r-- | include/asm-i386/smp.h | 6 | ||||
-rw-r--r-- | include/asm-i386/system.h | 42 |
9 files changed, 101 insertions, 11 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 509720be772a..c68557aa04b2 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -215,6 +215,27 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v) return atomic_add_return(-i,v); } +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) + +/** + * atomic_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c != (u); \ +}) +#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) + #define atomic_inc_return(v) (atomic_add_return(1,v)) #define atomic_dec_return(v) (atomic_sub_return(1,v)) diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index fa11117d3cfa..4153d80e4d2b 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h @@ -119,6 +119,8 @@ typedef struct user_fxsr_struct elf_fpxregset_t; */ #define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) +struct task_struct; + extern int dump_task_regs (struct task_struct *, elf_gregset_t *); extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *); diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h index 79dfab87135d..454440193eac 100644 --- a/include/asm-i386/ide.h +++ b/include/asm-i386/ide.h @@ -41,6 +41,12 @@ static __inline__ int ide_default_irq(unsigned long base) static __inline__ unsigned long ide_default_io_base(int index) { + /* + * If PCI is present then it is not safe to poke around + * the other legacy IDE ports. Only 0x1f0 and 0x170 are + * defined compatibility mode ports for PCI. A user can + * override this using ide= but we must default safe. + */ if (pci_find_device(PCI_ANY_ID, PCI_ANY_ID, NULL) == NULL) { switch(index) { case 2: return 0x1e8; diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index 8b6d3a90cd78..ca916a892877 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h @@ -49,6 +49,23 @@ struct arch_specific_insn { kprobe_opcode_t insn[MAX_INSN_SIZE]; }; +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; + unsigned long old_eflags; + unsigned long saved_eflags; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned long kprobe_status; + unsigned long kprobe_old_eflags; + unsigned long kprobe_saved_eflags; + long *jprobe_saved_esp; + struct pt_regs jprobe_saved_regs; + kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; + struct prev_kprobe prev_kprobe; +}; /* trap3/1 are intr gates for kprobes. So, restore the status of IF, * if necessary, before executing the original int3/1 (trap) handler. diff --git a/include/asm-i386/msi.h b/include/asm-i386/msi.h index b85393094c83..f041d4495faf 100644 --- a/include/asm-i386/msi.h +++ b/include/asm-i386/msi.h @@ -10,13 +10,6 @@ #include <mach_apic.h> #define LAST_DEVICE_VECTOR 232 -#define MSI_DEST_MODE MSI_LOGICAL_MODE -#define MSI_TARGET_CPU_SHIFT 12 - -#ifdef CONFIG_SMP -#define MSI_TARGET_CPU logical_smp_processor_id() -#else -#define MSI_TARGET_CPU cpu_to_logical_apicid(first_cpu(cpu_online_map)) -#endif +#define MSI_TARGET_CPU_SHIFT 12 #endif /* ASM_MSI_H */ diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 03f3c8ac6383..088a945bf26b 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -25,6 +25,9 @@ #include <linux/list.h> #include <linux/spinlock.h> +struct mm_struct; +struct vm_area_struct; + /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 9cd4a05234a1..5c96cf6dcb39 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -720,4 +720,10 @@ extern void mtrr_bp_init(void); #define mtrr_bp_init() do {} while (0) #endif +#ifdef CONFIG_X86_MCE +extern void mcheck_init(struct cpuinfo_x86 *c); +#else +#define mcheck_init(c) do {} while(0) +#endif + #endif /* __ASM_I386_PROCESSOR_H */ diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index 13250199976d..61d3ab9db70c 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h @@ -45,6 +45,8 @@ extern void unlock_ipi_call_lock(void); #define MAX_APICID 256 extern u8 x86_cpu_to_apicid[]; +#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] + #ifdef CONFIG_HOTPLUG_CPU extern void cpu_exit_clear(void); extern void cpu_uninit(void); @@ -92,6 +94,10 @@ extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); #endif /* !__ASSEMBLY__ */ +#else /* CONFIG_SMP */ + +#define cpu_physical_id(cpu) boot_cpu_physical_apicid + #define NO_PROC_ID 0xFF /* No processor magic marker */ #endif diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 97d52ac49e46..772f85da1206 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -263,6 +263,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #ifdef CONFIG_X86_CMPXCHG #define __HAVE_ARCH_CMPXCHG 1 +#define cmpxchg(ptr,o,n)\ + ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ + (unsigned long)(n),sizeof(*(ptr)))) +#endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) @@ -291,10 +295,42 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return old; } -#define cmpxchg(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) +#ifndef CONFIG_X86_CMPXCHG +/* + * Building a kernel capable running on 80386. It may be necessary to + * simulate the cmpxchg on the 80386 CPU. For that purpose we define + * a function for each of the sizes we support. + */ +extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); +extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); +extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); + +static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: + return cmpxchg_386_u8(ptr, old, new); + case 2: + return cmpxchg_386_u16(ptr, old, new); + case 4: + return cmpxchg_386_u32(ptr, old, new); + } + return old; +} + +#define cmpxchg(ptr,o,n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + if (likely(boot_cpu_data.x86 > 3)) \ + __ret = __cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + else \ + __ret = cmpxchg_386((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + __ret; \ +}) #endif #ifdef CONFIG_X86_CMPXCHG64 |