diff options
Diffstat (limited to 'include/asm-i386')
243 files changed, 0 insertions, 22778 deletions
diff --git a/include/asm-i386/8253pit.h b/include/asm-i386/8253pit.h deleted file mode 100644 index 96c7c3592daf..000000000000 --- a/include/asm-i386/8253pit.h +++ /dev/null @@ -1,12 +0,0 @@ -/* - * 8253/8254 Programmable Interval Timer - */ - -#ifndef _8253PIT_H -#define _8253PIT_H - -#include <asm/timex.h> - -#define PIT_TICK_RATE CLOCK_TICK_RATE - -#endif diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild deleted file mode 100644 index cbf6e8f1087b..000000000000 --- a/include/asm-i386/Kbuild +++ /dev/null @@ -1,12 +0,0 @@ -include include/asm-generic/Kbuild.asm - -header-y += boot.h -header-y += debugreg.h -header-y += ldt.h -header-y += msr-index.h -header-y += ptrace-abi.h -header-y += ucontext.h - -unifdef-y += msr.h -unifdef-y += mtrr.h -unifdef-y += vm86.h diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h deleted file mode 100644 index 851a60f8258c..000000000000 --- a/include/asm-i386/a.out.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef __I386_A_OUT_H__ -#define __I386_A_OUT_H__ - -struct exec -{ - unsigned long a_info; /* Use macros N_MAGIC, etc for access */ - unsigned a_text; /* length of text, in bytes */ - unsigned a_data; /* length of data, in bytes */ - unsigned a_bss; /* length of uninitialized data area for file, in bytes */ - unsigned a_syms; /* length of symbol table data in file, in bytes */ - unsigned a_entry; /* start address */ - unsigned a_trsize; /* length of relocation info for text, in bytes */ - unsigned a_drsize; /* length of relocation info for data, in bytes */ -}; - -#define N_TRSIZE(a) ((a).a_trsize) -#define N_DRSIZE(a) ((a).a_drsize) -#define N_SYMSIZE(a) ((a).a_syms) - -#ifdef __KERNEL__ - -#define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP - -#endif - -#endif /* __A_OUT_GNU_H__ */ diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h deleted file mode 100644 index 125179adf044..000000000000 --- a/include/asm-i386/acpi.h +++ /dev/null @@ -1,147 +0,0 @@ -/* - * asm-i386/acpi.h - * - * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> - * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - */ - -#ifndef _ASM_ACPI_H -#define _ASM_ACPI_H - -#ifdef __KERNEL__ - -#include <acpi/pdc_intel.h> - -#include <asm/system.h> /* defines cmpxchg */ - -#define COMPILER_DEPENDENT_INT64 long long -#define COMPILER_DEPENDENT_UINT64 unsigned long long - -/* - * Calling conventions: - * - * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) - * ACPI_EXTERNAL_XFACE - External ACPI interfaces - * ACPI_INTERNAL_XFACE - Internal ACPI interfaces - * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces - */ -#define ACPI_SYSTEM_XFACE -#define ACPI_EXTERNAL_XFACE -#define ACPI_INTERNAL_XFACE -#define ACPI_INTERNAL_VAR_XFACE - -/* Asm macros */ - -#define ACPI_ASM_MACROS -#define BREAKPOINT3 -#define ACPI_DISABLE_IRQS() local_irq_disable() -#define ACPI_ENABLE_IRQS() local_irq_enable() -#define ACPI_FLUSH_CPU_CACHE() wbinvd() - -int __acpi_acquire_global_lock(unsigned int *lock); -int __acpi_release_global_lock(unsigned int *lock); - -#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ - ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) - -#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ - ((Acq) = __acpi_release_global_lock(&facs->global_lock)) - -/* - * Math helper asm macros - */ -#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \ - asm("divl %2;" \ - :"=a"(q32), "=d"(r32) \ - :"r"(d32), \ - "0"(n_lo), "1"(n_hi)) - - -#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \ - asm("shrl $1,%2;" \ - "rcrl $1,%3;" \ - :"=r"(n_hi), "=r"(n_lo) \ - :"0"(n_hi), "1"(n_lo)) - -#ifdef CONFIG_X86_IO_APIC -extern void check_acpi_pci(void); -#else -static inline void check_acpi_pci(void) { } -#endif - -#ifdef CONFIG_ACPI -extern int acpi_lapic; -extern int acpi_ioapic; -extern int acpi_noirq; -extern int acpi_strict; -extern int acpi_disabled; -extern int acpi_ht; -extern int acpi_pci_disabled; -static inline void disable_acpi(void) -{ - acpi_disabled = 1; - acpi_ht = 0; - acpi_pci_disabled = 1; - acpi_noirq = 1; -} - -/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */ -#define FIX_ACPI_PAGES 4 - -extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); - -#ifdef CONFIG_X86_IO_APIC -extern int acpi_skip_timer_override; -extern int acpi_use_timer_override; -#endif - -static inline void acpi_noirq_set(void) { acpi_noirq = 1; } -static inline void acpi_disable_pci(void) -{ - acpi_pci_disabled = 1; - acpi_noirq_set(); -} -extern int acpi_irq_balance_set(char *str); - -/* routines for saving/restoring kernel state */ -extern int acpi_save_state_mem(void); -extern void acpi_restore_state_mem(void); - -extern unsigned long acpi_wakeup_address; - -/* early initialization routine */ -extern void acpi_reserve_bootmem(void); - -#else /* !CONFIG_ACPI */ - -#define acpi_lapic 0 -#define acpi_ioapic 0 -static inline void acpi_noirq_set(void) { } -static inline void acpi_disable_pci(void) { } -static inline void disable_acpi(void) { } - -#endif /* !CONFIG_ACPI */ - -#define ARCH_HAS_POWER_INIT 1 - -#endif /*__KERNEL__*/ - -#endif /*_ASM_ACPI_H*/ diff --git a/include/asm-i386/agp.h b/include/asm-i386/agp.h deleted file mode 100644 index 6af173dbf123..000000000000 --- a/include/asm-i386/agp.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef AGP_H -#define AGP_H 1 - -#include <asm/pgtable.h> -#include <asm/cacheflush.h> - -/* - * Functions to keep the agpgart mappings coherent with the MMU. - * The GART gives the CPU a physical alias of pages in memory. The alias region is - * mapped uncacheable. Make sure there are no conflicting mappings - * with different cachability attributes for the same page. This avoids - * data corruption on some CPUs. - */ - -/* Caller's responsibility to call global_flush_tlb() for - * performance reasons */ -#define map_page_into_agp(page) change_page_attr(page, 1, PAGE_KERNEL_NOCACHE) -#define unmap_page_from_agp(page) change_page_attr(page, 1, PAGE_KERNEL) -#define flush_agp_mappings() global_flush_tlb() - -/* Could use CLFLUSH here if the cpu supports it. But then it would - need to be called for each cacheline of the whole page so it may not be - worth it. Would need a page for it. */ -#define flush_agp_cache() wbinvd() - -/* Convert a physical address to an address suitable for the GART. */ -#define phys_to_gart(x) (x) -#define gart_to_phys(x) (x) - -/* GATT allocation. Returns/accepts GATT kernel virtual address. */ -#define alloc_gatt_pages(order) \ - ((char *)__get_free_pages(GFP_KERNEL, (order))) -#define free_gatt_pages(table, order) \ - free_pages((unsigned long)(table), (order)) - -#endif diff --git a/include/asm-i386/alternative-asm.i b/include/asm-i386/alternative-asm.i deleted file mode 100644 index f0510209ccbe..000000000000 --- a/include/asm-i386/alternative-asm.i +++ /dev/null @@ -1,12 +0,0 @@ -#ifdef CONFIG_SMP - .macro LOCK_PREFIX -1: lock - .section .smp_locks,"a" - .align 4 - .long 1b - .previous - .endm -#else - .macro LOCK_PREFIX - .endm -#endif diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h deleted file mode 100644 index bda6c810c0f4..000000000000 --- a/include/asm-i386/alternative.h +++ /dev/null @@ -1,154 +0,0 @@ -#ifndef _I386_ALTERNATIVE_H -#define _I386_ALTERNATIVE_H - -#include <asm/types.h> -#include <linux/stddef.h> -#include <linux/types.h> - -struct alt_instr { - u8 *instr; /* original instruction */ - u8 *replacement; - u8 cpuid; /* cpuid bit set for replacement */ - u8 instrlen; /* length of original instruction */ - u8 replacementlen; /* length of new instruction, <= instrlen */ - u8 pad; -}; - -extern void alternative_instructions(void); -extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); - -struct module; -#ifdef CONFIG_SMP -extern void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end); -extern void alternatives_smp_module_del(struct module *mod); -extern void alternatives_smp_switch(int smp); -#else -static inline void alternatives_smp_module_add(struct module *mod, char *name, - void *locks, void *locks_end, - void *text, void *text_end) {} -static inline void alternatives_smp_module_del(struct module *mod) {} -static inline void alternatives_smp_switch(int smp) {} -#endif /* CONFIG_SMP */ - -/* - * Alternative instructions for different CPU types or capabilities. - * - * This allows to use optimized instructions even on generic binary - * kernels. - * - * length of oldinstr must be longer or equal the length of newinstr - * It can be padded with nops as needed. - * - * For non barrier like inlines please define new variants - * without volatile and memory clobber. - */ -#define alternative(oldinstr, newinstr, feature) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */\ - ".previous" :: "i" (feature) : "memory") - -/* - * Alternative inline assembly with input. - * - * Pecularities: - * No memory clobber here. - * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement maake sure to pad to the worst case length. - */ -#define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */\ - ".previous" :: "i" (feature), ##input) - -/* Like alternative_input, but with a single output argument */ -#define alternative_io(oldinstr, newinstr, feature, output, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c[feat]\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" : output : [feat] "i" (feature), ##input) - -/* - * use this macro(s) if you need more than one output parameter - * in alternative_io - */ -#define ASM_OUTPUT2(a, b) a, b - -/* - * Alternative inline assembly for SMP. - * - * The LOCK_PREFIX macro defined here replaces the LOCK and - * LOCK_PREFIX macros used everywhere in the source tree. - * - * SMP alternatives use the same data structures as the other - * alternatives and the X86_FEATURE_UP flag to indicate the case of a - * UP system running a SMP kernel. The existing apply_alternatives() - * works fine for patching a SMP kernel for UP. - * - * The SMP alternative tables can be kept after boot and contain both - * UP and SMP versions of the instructions to allow switching back to - * SMP at runtime, when hotplugging in a new CPU, which is especially - * useful in virtualized environments. - * - * The very common lock prefix is handled as special case in a - * separate table which is a pure address list without replacement ptr - * and size information. That keeps the table sizes small. - */ - -#ifdef CONFIG_SMP -#define LOCK_PREFIX \ - ".section .smp_locks,\"a\"\n" \ - " .align 4\n" \ - " .long 661f\n" /* address */ \ - ".previous\n" \ - "661:\n\tlock; " - -#else /* ! CONFIG_SMP */ -#define LOCK_PREFIX "" -#endif - -struct paravirt_patch_site; -#ifdef CONFIG_PARAVIRT -void apply_paravirt(struct paravirt_patch_site *start, - struct paravirt_patch_site *end); -#else -static inline void -apply_paravirt(struct paravirt_patch_site *start, - struct paravirt_patch_site *end) -{} -#define __parainstructions NULL -#define __parainstructions_end NULL -#endif - -extern void text_poke(void *addr, unsigned char *opcode, int len); - -#endif /* _I386_ALTERNATIVE_H */ diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h deleted file mode 100644 index 4091b33dcb10..000000000000 --- a/include/asm-i386/apic.h +++ /dev/null @@ -1,126 +0,0 @@ -#ifndef __ASM_APIC_H -#define __ASM_APIC_H - -#include <linux/pm.h> -#include <linux/delay.h> -#include <asm/fixmap.h> -#include <asm/apicdef.h> -#include <asm/processor.h> -#include <asm/system.h> - -#define Dprintk(x...) - -/* - * Debugging macros - */ -#define APIC_QUIET 0 -#define APIC_VERBOSE 1 -#define APIC_DEBUG 2 - -extern int apic_verbosity; - -/* - * Define the default level of output to be very little - * This can be turned up by using apic=verbose for more - * information and apic=debug for _lots_ of information. - * apic_verbosity is defined in apic.c - */ -#define apic_printk(v, s, a...) do { \ - if ((v) <= apic_verbosity) \ - printk(s, ##a); \ - } while (0) - - -extern void generic_apic_probe(void); - -#ifdef CONFIG_X86_LOCAL_APIC - -/* - * Basic functions accessing APICs. - */ -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define apic_write native_apic_write -#define apic_write_atomic native_apic_write_atomic -#define apic_read native_apic_read -#define setup_boot_clock setup_boot_APIC_clock -#define setup_secondary_clock setup_secondary_APIC_clock -#endif - -static __inline fastcall void native_apic_write(unsigned long reg, - unsigned long v) -{ - *((volatile unsigned long *)(APIC_BASE+reg)) = v; -} - -static __inline fastcall void native_apic_write_atomic(unsigned long reg, - unsigned long v) -{ - xchg((volatile unsigned long *)(APIC_BASE+reg), v); -} - -static __inline fastcall unsigned long native_apic_read(unsigned long reg) -{ - return *((volatile unsigned long *)(APIC_BASE+reg)); -} - -void apic_wait_icr_idle(void); -unsigned long safe_apic_wait_icr_idle(void); -int get_physical_broadcast(void); - -#ifdef CONFIG_X86_GOOD_APIC -# define FORCE_READ_AROUND_WRITE 0 -# define apic_read_around(x) -# define apic_write_around(x,y) apic_write((x),(y)) -#else -# define FORCE_READ_AROUND_WRITE 1 -# define apic_read_around(x) apic_read(x) -# define apic_write_around(x,y) apic_write_atomic((x),(y)) -#endif - -static inline void ack_APIC_irq(void) -{ - /* - * ack_APIC_irq() actually gets compiled as a single instruction: - * - a single rmw on Pentium/82489DX - * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) - * ... yummie. - */ - - /* Docs say use 0 for future compatibility */ - apic_write_around(APIC_EOI, 0); -} - -extern int lapic_get_maxlvt(void); -extern void clear_local_APIC(void); -extern void connect_bsp_APIC (void); -extern void disconnect_bsp_APIC (int virt_wire_setup); -extern void disable_local_APIC (void); -extern void lapic_shutdown (void); -extern int verify_local_APIC (void); -extern void cache_APIC_registers (void); -extern void sync_Arb_IDs (void); -extern void init_bsp_APIC (void); -extern void setup_local_APIC (void); -extern void init_apic_mappings (void); -extern void smp_local_timer_interrupt (void); -extern void setup_boot_APIC_clock (void); -extern void setup_secondary_APIC_clock (void); -extern int APIC_init_uniprocessor (void); - -extern void enable_NMI_through_LVT0 (void * dummy); - -#define ARCH_APICTIMER_STOPS_ON_C3 1 - -extern int timer_over_8254; -extern int local_apic_timer_c2_ok; - -extern int local_apic_timer_disabled; - -#else /* !CONFIG_X86_LOCAL_APIC */ -static inline void lapic_shutdown(void) { } - -#endif /* !CONFIG_X86_LOCAL_APIC */ - -#endif /* __ASM_APIC_H */ diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h deleted file mode 100644 index 9f6995341fdc..000000000000 --- a/include/asm-i386/apicdef.h +++ /dev/null @@ -1,375 +0,0 @@ -#ifndef __ASM_APICDEF_H -#define __ASM_APICDEF_H - -/* - * Constants for various Intel APICs. (local APIC, IOAPIC, etc.) - * - * Alan Cox <Alan.Cox@linux.org>, 1995. - * Ingo Molnar <mingo@redhat.com>, 1999, 2000 - */ - -#define APIC_DEFAULT_PHYS_BASE 0xfee00000 - -#define APIC_ID 0x20 -#define APIC_LVR 0x30 -#define APIC_LVR_MASK 0xFF00FF -#define GET_APIC_VERSION(x) ((x)&0xFF) -#define GET_APIC_MAXLVT(x) (((x)>>16)&0xFF) -#define APIC_INTEGRATED(x) ((x)&0xF0) -#define APIC_XAPIC(x) ((x) >= 0x14) -#define APIC_TASKPRI 0x80 -#define APIC_TPRI_MASK 0xFF -#define APIC_ARBPRI 0x90 -#define APIC_ARBPRI_MASK 0xFF -#define APIC_PROCPRI 0xA0 -#define APIC_EOI 0xB0 -#define APIC_EIO_ACK 0x0 /* Write this to the EOI register */ -#define APIC_RRR 0xC0 -#define APIC_LDR 0xD0 -#define APIC_LDR_MASK (0xFF<<24) -#define GET_APIC_LOGICAL_ID(x) (((x)>>24)&0xFF) -#define SET_APIC_LOGICAL_ID(x) (((x)<<24)) -#define APIC_ALL_CPUS 0xFF -#define APIC_DFR 0xE0 -#define APIC_DFR_CLUSTER 0x0FFFFFFFul -#define APIC_DFR_FLAT 0xFFFFFFFFul -#define APIC_SPIV 0xF0 -#define APIC_SPIV_FOCUS_DISABLED (1<<9) -#define APIC_SPIV_APIC_ENABLED (1<<8) -#define APIC_ISR 0x100 -#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */ -#define APIC_TMR 0x180 -#define APIC_IRR 0x200 -#define APIC_ESR 0x280 -#define APIC_ESR_SEND_CS 0x00001 -#define APIC_ESR_RECV_CS 0x00002 -#define APIC_ESR_SEND_ACC 0x00004 -#define APIC_ESR_RECV_ACC 0x00008 -#define APIC_ESR_SENDILL 0x00020 -#define APIC_ESR_RECVILL 0x00040 -#define APIC_ESR_ILLREGA 0x00080 -#define APIC_ICR 0x300 -#define APIC_DEST_SELF 0x40000 -#define APIC_DEST_ALLINC 0x80000 -#define APIC_DEST_ALLBUT 0xC0000 -#define APIC_ICR_RR_MASK 0x30000 -#define APIC_ICR_RR_INVALID 0x00000 -#define APIC_ICR_RR_INPROG 0x10000 -#define APIC_ICR_RR_VALID 0x20000 -#define APIC_INT_LEVELTRIG 0x08000 -#define APIC_INT_ASSERT 0x04000 -#define APIC_ICR_BUSY 0x01000 -#define APIC_DEST_LOGICAL 0x00800 -#define APIC_DM_FIXED 0x00000 -#define APIC_DM_LOWEST 0x00100 -#define APIC_DM_SMI 0x00200 -#define APIC_DM_REMRD 0x00300 -#define APIC_DM_NMI 0x00400 -#define APIC_DM_INIT 0x00500 -#define APIC_DM_STARTUP 0x00600 -#define APIC_DM_EXTINT 0x00700 -#define APIC_VECTOR_MASK 0x000FF -#define APIC_ICR2 0x310 -#define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) -#define SET_APIC_DEST_FIELD(x) ((x)<<24) -#define APIC_LVTT 0x320 -#define APIC_LVTTHMR 0x330 -#define APIC_LVTPC 0x340 -#define APIC_LVT0 0x350 -#define APIC_LVT_TIMER_BASE_MASK (0x3<<18) -#define GET_APIC_TIMER_BASE(x) (((x)>>18)&0x3) -#define SET_APIC_TIMER_BASE(x) (((x)<<18)) -#define APIC_TIMER_BASE_CLKIN 0x0 -#define APIC_TIMER_BASE_TMBASE 0x1 -#define APIC_TIMER_BASE_DIV 0x2 -#define APIC_LVT_TIMER_PERIODIC (1<<17) -#define APIC_LVT_MASKED (1<<16) -#define APIC_LVT_LEVEL_TRIGGER (1<<15) -#define APIC_LVT_REMOTE_IRR (1<<14) -#define APIC_INPUT_POLARITY (1<<13) -#define APIC_SEND_PENDING (1<<12) -#define APIC_MODE_MASK 0x700 -#define GET_APIC_DELIVERY_MODE(x) (((x)>>8)&0x7) -#define SET_APIC_DELIVERY_MODE(x,y) (((x)&~0x700)|((y)<<8)) -#define APIC_MODE_FIXED 0x0 -#define APIC_MODE_NMI 0x4 -#define APIC_MODE_EXTINT 0x7 -#define APIC_LVT1 0x360 -#define APIC_LVTERR 0x370 -#define APIC_TMICT 0x380 -#define APIC_TMCCT 0x390 -#define APIC_TDCR 0x3E0 -#define APIC_TDR_DIV_TMBASE (1<<2) -#define APIC_TDR_DIV_1 0xB -#define APIC_TDR_DIV_2 0x0 -#define APIC_TDR_DIV_4 0x1 -#define APIC_TDR_DIV_8 0x2 -#define APIC_TDR_DIV_16 0x3 -#define APIC_TDR_DIV_32 0x8 -#define APIC_TDR_DIV_64 0x9 -#define APIC_TDR_DIV_128 0xA - -#define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) - -#define MAX_IO_APICS 64 - -/* - * the local APIC register structure, memory mapped. Not terribly well - * tested, but we might eventually use this one in the future - the - * problem why we cannot use it right now is the P5 APIC, it has an - * errata which cannot take 8-bit reads and writes, only 32-bit ones ... - */ -#define u32 unsigned int - - -struct local_apic { - -/*000*/ struct { u32 __reserved[4]; } __reserved_01; - -/*010*/ struct { u32 __reserved[4]; } __reserved_02; - -/*020*/ struct { /* APIC ID Register */ - u32 __reserved_1 : 24, - phys_apic_id : 4, - __reserved_2 : 4; - u32 __reserved[3]; - } id; - -/*030*/ const - struct { /* APIC Version Register */ - u32 version : 8, - __reserved_1 : 8, - max_lvt : 8, - __reserved_2 : 8; - u32 __reserved[3]; - } version; - -/*040*/ struct { u32 __reserved[4]; } __reserved_03; - -/*050*/ struct { u32 __reserved[4]; } __reserved_04; - -/*060*/ struct { u32 __reserved[4]; } __reserved_05; - -/*070*/ struct { u32 __reserved[4]; } __reserved_06; - -/*080*/ struct { /* Task Priority Register */ - u32 priority : 8, - __reserved_1 : 24; - u32 __reserved_2[3]; - } tpr; - -/*090*/ const - struct { /* Arbitration Priority Register */ - u32 priority : 8, - __reserved_1 : 24; - u32 __reserved_2[3]; - } apr; - -/*0A0*/ const - struct { /* Processor Priority Register */ - u32 priority : 8, - __reserved_1 : 24; - u32 __reserved_2[3]; - } ppr; - -/*0B0*/ struct { /* End Of Interrupt Register */ - u32 eoi; - u32 __reserved[3]; - } eoi; - -/*0C0*/ struct { u32 __reserved[4]; } __reserved_07; - -/*0D0*/ struct { /* Logical Destination Register */ - u32 __reserved_1 : 24, - logical_dest : 8; - u32 __reserved_2[3]; - } ldr; - -/*0E0*/ struct { /* Destination Format Register */ - u32 __reserved_1 : 28, - model : 4; - u32 __reserved_2[3]; - } dfr; - -/*0F0*/ struct { /* Spurious Interrupt Vector Register */ - u32 spurious_vector : 8, - apic_enabled : 1, - focus_cpu : 1, - __reserved_2 : 22; - u32 __reserved_3[3]; - } svr; - -/*100*/ struct { /* In Service Register */ -/*170*/ u32 bitfield; - u32 __reserved[3]; - } isr [8]; - -/*180*/ struct { /* Trigger Mode Register */ -/*1F0*/ u32 bitfield; - u32 __reserved[3]; - } tmr [8]; - -/*200*/ struct { /* Interrupt Request Register */ -/*270*/ u32 bitfield; - u32 __reserved[3]; - } irr [8]; - -/*280*/ union { /* Error Status Register */ - struct { - u32 send_cs_error : 1, - receive_cs_error : 1, - send_accept_error : 1, - receive_accept_error : 1, - __reserved_1 : 1, - send_illegal_vector : 1, - receive_illegal_vector : 1, - illegal_register_address : 1, - __reserved_2 : 24; - u32 __reserved_3[3]; - } error_bits; - struct { - u32 errors; - u32 __reserved_3[3]; - } all_errors; - } esr; - -/*290*/ struct { u32 __reserved[4]; } __reserved_08; - -/*2A0*/ struct { u32 __reserved[4]; } __reserved_09; - -/*2B0*/ struct { u32 __reserved[4]; } __reserved_10; - -/*2C0*/ struct { u32 __reserved[4]; } __reserved_11; - -/*2D0*/ struct { u32 __reserved[4]; } __reserved_12; - -/*2E0*/ struct { u32 __reserved[4]; } __reserved_13; - -/*2F0*/ struct { u32 __reserved[4]; } __reserved_14; - -/*300*/ struct { /* Interrupt Command Register 1 */ - u32 vector : 8, - delivery_mode : 3, - destination_mode : 1, - delivery_status : 1, - __reserved_1 : 1, - level : 1, - trigger : 1, - __reserved_2 : 2, - shorthand : 2, - __reserved_3 : 12; - u32 __reserved_4[3]; - } icr1; - -/*310*/ struct { /* Interrupt Command Register 2 */ - union { - u32 __reserved_1 : 24, - phys_dest : 4, - __reserved_2 : 4; - u32 __reserved_3 : 24, - logical_dest : 8; - } dest; - u32 __reserved_4[3]; - } icr2; - -/*320*/ struct { /* LVT - Timer */ - u32 vector : 8, - __reserved_1 : 4, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - timer_mode : 1, - __reserved_3 : 14; - u32 __reserved_4[3]; - } lvt_timer; - -/*330*/ struct { /* LVT - Thermal Sensor */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - __reserved_3 : 15; - u32 __reserved_4[3]; - } lvt_thermal; - -/*340*/ struct { /* LVT - Performance Counter */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - __reserved_3 : 15; - u32 __reserved_4[3]; - } lvt_pc; - -/*350*/ struct { /* LVT - LINT0 */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - polarity : 1, - remote_irr : 1, - trigger : 1, - mask : 1, - __reserved_2 : 15; - u32 __reserved_3[3]; - } lvt_lint0; - -/*360*/ struct { /* LVT - LINT1 */ - u32 vector : 8, - delivery_mode : 3, - __reserved_1 : 1, - delivery_status : 1, - polarity : 1, - remote_irr : 1, - trigger : 1, - mask : 1, - __reserved_2 : 15; - u32 __reserved_3[3]; - } lvt_lint1; - -/*370*/ struct { /* LVT - Error */ - u32 vector : 8, - __reserved_1 : 4, - delivery_status : 1, - __reserved_2 : 3, - mask : 1, - __reserved_3 : 15; - u32 __reserved_4[3]; - } lvt_error; - -/*380*/ struct { /* Timer Initial Count Register */ - u32 initial_count; - u32 __reserved_2[3]; - } timer_icr; - -/*390*/ const - struct { /* Timer Current Count Register */ - u32 curr_count; - u32 __reserved_2[3]; - } timer_ccr; - -/*3A0*/ struct { u32 __reserved[4]; } __reserved_16; - -/*3B0*/ struct { u32 __reserved[4]; } __reserved_17; - -/*3C0*/ struct { u32 __reserved[4]; } __reserved_18; - -/*3D0*/ struct { u32 __reserved[4]; } __reserved_19; - -/*3E0*/ struct { /* Timer Divide Configuration Register */ - u32 divisor : 4, - __reserved_1 : 28; - u32 __reserved_2[3]; - } timer_dcr; - -/*3F0*/ struct { u32 __reserved[4]; } __reserved_20; - -} __attribute__ ((packed)); - -#undef u32 - -#endif diff --git a/include/asm-i386/arch_hooks.h b/include/asm-i386/arch_hooks.h deleted file mode 100644 index a8c1fca9726d..000000000000 --- a/include/asm-i386/arch_hooks.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _ASM_ARCH_HOOKS_H -#define _ASM_ARCH_HOOKS_H - -#include <linux/interrupt.h> - -/* - * linux/include/asm/arch_hooks.h - * - * define the architecture specific hooks - */ - -/* these aren't arch hooks, they are generic routines - * that can be used by the hooks */ -extern void init_ISA_irqs(void); -extern void apic_intr_init(void); -extern void smp_intr_init(void); -extern irqreturn_t timer_interrupt(int irq, void *dev_id); - -/* these are the defined hooks */ -extern void intr_init_hook(void); -extern void pre_intr_init_hook(void); -extern void pre_setup_arch_hook(void); -extern void trap_init_hook(void); -extern void time_init_hook(void); -extern void mca_nmi_hook(void); - -extern int setup_early_printk(char *); -extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); - -#endif diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h deleted file mode 100644 index 437aac801711..000000000000 --- a/include/asm-i386/atomic.h +++ /dev/null @@ -1,266 +0,0 @@ -#ifndef __ARCH_I386_ATOMIC__ -#define __ARCH_I386_ATOMIC__ - -#include <linux/compiler.h> -#include <asm/processor.h> -#include <asm/cmpxchg.h> - -/* - * Atomic operations that C can't guarantee us. Useful for - * resource counting etc.. - */ - -/* - * Make sure gcc doesn't try to be clever and move things around - * on us. We need to use _exactly_ the address the user gave us, - * not some alias that contains the same information. - */ -typedef struct { int counter; } atomic_t; - -#define ATOMIC_INIT(i) { (i) } - -/** - * atomic_read - read atomic variable - * @v: pointer of type atomic_t - * - * Atomically reads the value of @v. - */ -#define atomic_read(v) ((v)->counter) - -/** - * atomic_set - set atomic variable - * @v: pointer of type atomic_t - * @i: required value - * - * Atomically sets the value of @v to @i. - */ -#define atomic_set(v,i) (((v)->counter) = (i)) - -/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic_add(int i, atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "addl %1,%0" - :"+m" (v->counter) - :"ir" (i)); -} - -/** - * atomic_sub - subtract integer from atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic_sub(int i, atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "subl %1,%0" - :"+m" (v->counter) - :"ir" (i)); -} - -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __inline__ int atomic_sub_and_test(int i, atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "subl %2,%0; sete %1" - :"+m" (v->counter), "=qm" (c) - :"ir" (i) : "memory"); - return c; -} - -/** - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -static __inline__ void atomic_inc(atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "incl %0" - :"+m" (v->counter)); -} - -/** - * atomic_dec - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -static __inline__ void atomic_dec(atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "decl %0" - :"+m" (v->counter)); -} - -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __inline__ int atomic_dec_and_test(atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "decl %0; sete %1" - :"+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; -} - -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __inline__ int atomic_inc_and_test(atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "incl %0; sete %1" - :"+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; -} - -/** - * atomic_add_negative - add and test if negative - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __inline__ int atomic_add_negative(int i, atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "addl %2,%0; sets %1" - :"+m" (v->counter), "=qm" (c) - :"ir" (i) : "memory"); - return c; -} - -/** - * atomic_add_return - add integer and return - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns @i + @v - */ -static __inline__ int atomic_add_return(int i, atomic_t *v) -{ - int __i; -#ifdef CONFIG_M386 - unsigned long flags; - if(unlikely(boot_cpu_data.x86 <= 3)) - goto no_xadd; -#endif - /* Modern 486+ processor */ - __i = i; - __asm__ __volatile__( - LOCK_PREFIX "xaddl %0, %1" - :"+r" (i), "+m" (v->counter) - : : "memory"); - return i + __i; - -#ifdef CONFIG_M386 -no_xadd: /* Legacy 386 processor */ - local_irq_save(flags); - __i = atomic_read(v); - atomic_set(v, i + __i); - local_irq_restore(flags); - return i + __i; -#endif -} - -/** - * atomic_sub_return - subtract integer and return - * @v: pointer of type atomic_t - * @i: integer value to subtract - * - * Atomically subtracts @i from @v and returns @v - @i - */ -static __inline__ int atomic_sub_return(int i, atomic_t *v) -{ - return atomic_add_return(-i,v); -} - -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) - -/** - * atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns non-zero if @v was not @u, and zero otherwise. - */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; - c = atomic_read(v); - for (;;) { - if (unlikely(c == (u))) - break; - old = atomic_cmpxchg((v), c, c + (a)); - if (likely(old == c)) - break; - c = old; - } - return c != (u); -} - -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -#define atomic_inc_return(v) (atomic_add_return(1,v)) -#define atomic_dec_return(v) (atomic_sub_return(1,v)) - -/* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ -: : "r" (~(mask)),"m" (*addr) : "memory") - -#define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ -: : "r" (mask),"m" (*(addr)) : "memory") - -/* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - -#include <asm-generic/atomic.h> -#endif diff --git a/include/asm-i386/auxvec.h b/include/asm-i386/auxvec.h deleted file mode 100644 index 395e13016bfb..000000000000 --- a/include/asm-i386/auxvec.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef __ASMi386_AUXVEC_H -#define __ASMi386_AUXVEC_H - -/* - * Architecture-neutral AT_ values in 0-17, leave some room - * for more of them, start the x86-specific ones at 32. - */ -#define AT_SYSINFO 32 -#define AT_SYSINFO_EHDR 33 - -#endif diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h deleted file mode 100644 index a20fe9822f60..000000000000 --- a/include/asm-i386/bitops.h +++ /dev/null @@ -1,423 +0,0 @@ -#ifndef _I386_BITOPS_H -#define _I386_BITOPS_H - -/* - * Copyright 1992, Linus Torvalds. - */ - -#include <linux/compiler.h> -#include <asm/alternative.h> - -/* - * These have to be done with inline assembly: that way the bit-setting - * is guaranteed to be atomic. All bit operations return 0 if the bit - * was cleared before the operation and != 0 if it was not. - * - * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). - */ - -#define ADDR (*(volatile long *) addr) - -/** - * set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * - * Note: there are no guarantees that this function will not be reordered - * on non x86 architectures, so if you are writing portable code, - * make sure not to rely on its reordering guarantees. - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void set_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btsl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -/** - * __set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __set_bit(int nr, volatile unsigned long * addr) -{ - __asm__( - "btsl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -/** - * clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. - */ -static inline void clear_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btrl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -static inline void __clear_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( - "btrl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static inline void __change_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( - "btcl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -/** - * change_bit - Toggle a bit in memory - * @nr: Bit to change - * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. It may be - * reordered on other architectures than x86. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void change_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__( LOCK_PREFIX - "btcl %1,%0" - :"+m" (ADDR) - :"Ir" (nr)); -} - -/** - * test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It may be reordered on other architectures than x86. - * It also implies a memory barrier. - */ -static inline int test_and_set_bit(int nr, volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK_PREFIX - "btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_set_bit(int nr, volatile unsigned long * addr) -{ - int oldbit; - - __asm__( - "btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr)); - return oldbit; -} - -/** - * test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. - * It also implies a memory barrier. - */ -static inline int test_and_clear_bit(int nr, volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK_PREFIX - "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__( - "btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr)); - return oldbit; -} - -/* WARNING: non atomic and it can be reordered! */ -static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) -{ - int oldbit; - - __asm__ __volatile__( - "btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -/** - * test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int test_and_change_bit(int nr, volatile unsigned long* addr) -{ - int oldbit; - - __asm__ __volatile__( LOCK_PREFIX - "btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -#if 0 /* Fool kernel-doc since it doesn't do macros yet */ -/** - * test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static int test_bit(int nr, const volatile void * addr); -#endif - -static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr) -{ - return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0; -} - -static inline int variable_test_bit(int nr, const volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__( - "btl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit) - :"m" (ADDR),"Ir" (nr)); - return oldbit; -} - -#define test_bit(nr,addr) \ -(__builtin_constant_p(nr) ? \ - constant_test_bit((nr),(addr)) : \ - variable_test_bit((nr),(addr))) - -#undef ADDR - -/** - * find_first_zero_bit - find the first zero bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first zero bit, not the number of the byte - * containing a bit. - */ -static inline int find_first_zero_bit(const unsigned long *addr, unsigned size) -{ - int d0, d1, d2; - int res; - - if (!size) - return 0; - /* This looks at memory. Mark it volatile to tell gcc not to move it around */ - __asm__ __volatile__( - "movl $-1,%%eax\n\t" - "xorl %%edx,%%edx\n\t" - "repe; scasl\n\t" - "je 1f\n\t" - "xorl -4(%%edi),%%eax\n\t" - "subl $4,%%edi\n\t" - "bsfl %%eax,%%edx\n" - "1:\tsubl %%ebx,%%edi\n\t" - "shll $3,%%edi\n\t" - "addl %%edi,%%edx" - :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2) - :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory"); - return res; -} - -/** - * find_next_zero_bit - find the first zero bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -int find_next_zero_bit(const unsigned long *addr, int size, int offset); - -/** - * __ffs - find first bit in word. - * @word: The word to search - * - * Undefined if no bit exists, so code should check against 0 first. - */ -static inline unsigned long __ffs(unsigned long word) -{ - __asm__("bsfl %1,%0" - :"=r" (word) - :"rm" (word)); - return word; -} - -/** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search - * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -static inline unsigned find_first_bit(const unsigned long *addr, unsigned size) -{ - unsigned x = 0; - - while (x < size) { - unsigned long val = *addr++; - if (val) - return __ffs(val) + x; - x += (sizeof(*addr)<<3); - } - return x; -} - -/** - * find_next_bit - find the first set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -int find_next_bit(const unsigned long *addr, int size, int offset); - -/** - * ffz - find first zero in word. - * @word: The word to search - * - * Undefined if no zero exists, so code should check against ~0UL first. - */ -static inline unsigned long ffz(unsigned long word) -{ - __asm__("bsfl %1,%0" - :"=r" (word) - :"r" (~word)); - return word; -} - -#ifdef __KERNEL__ - -#include <asm-generic/bitops/sched.h> - -/** - * ffs - find first bit set - * @x: the word to search - * - * This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz() (man ffs). - */ -static inline int ffs(int x) -{ - int r; - - __asm__("bsfl %1,%0\n\t" - "jnz 1f\n\t" - "movl $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return r+1; -} - -/** - * fls - find last bit set - * @x: the word to search - * - * This is defined the same way as ffs(). - */ -static inline int fls(int x) -{ - int r; - - __asm__("bsrl %1,%0\n\t" - "jnz 1f\n\t" - "movl $-1,%0\n" - "1:" : "=r" (r) : "rm" (x)); - return r+1; -} - -#include <asm-generic/bitops/hweight.h> - -#endif /* __KERNEL__ */ - -#include <asm-generic/bitops/fls64.h> - -#ifdef __KERNEL__ - -#include <asm-generic/bitops/ext2-non-atomic.h> - -#define ext2_set_bit_atomic(lock,nr,addr) \ - test_and_set_bit((nr),(unsigned long*)addr) -#define ext2_clear_bit_atomic(lock,nr, addr) \ - test_and_clear_bit((nr),(unsigned long*)addr) - -#include <asm-generic/bitops/minix.h> - -#endif /* __KERNEL__ */ - -#endif /* _I386_BITOPS_H */ diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h deleted file mode 100644 index ed8affbf96cb..000000000000 --- a/include/asm-i386/boot.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _ASM_BOOT_H -#define _ASM_BOOT_H - -/* Don't touch these, unless you really know what you're doing. */ -#define DEF_INITSEG 0x9000 -#define DEF_SYSSEG 0x1000 -#define DEF_SETUPSEG 0x9020 -#define DEF_SYSSIZE 0x7F00 - -/* Internal svga startup constants */ -#define NORMAL_VGA 0xffff /* 80x25 mode */ -#define EXTENDED_VGA 0xfffe /* 80x50 mode */ -#define ASK_VGA 0xfffd /* ask for it at bootup */ - -/* Physical address where kernel should be loaded. */ -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ - + (CONFIG_PHYSICAL_ALIGN - 1)) \ - & ~(CONFIG_PHYSICAL_ALIGN - 1)) - -#endif /* _ASM_BOOT_H */ diff --git a/include/asm-i386/bootparam.h b/include/asm-i386/bootparam.h deleted file mode 100644 index b91b01783e4b..000000000000 --- a/include/asm-i386/bootparam.h +++ /dev/null @@ -1,86 +0,0 @@ -#ifndef _ASM_BOOTPARAM_H -#define _ASM_BOOTPARAM_H - -#include <linux/types.h> -#include <linux/screen_info.h> -#include <linux/apm_bios.h> -#include <linux/edd.h> -#include <asm/e820.h> -#include <asm/ist.h> -#include <video/edid.h> - -struct setup_header { - u8 setup_sects; - u16 root_flags; - u32 syssize; - u16 ram_size; - u16 vid_mode; - u16 root_dev; - u16 boot_flag; - u16 jump; - u32 header; - u16 version; - u32 realmode_swtch; - u16 start_sys; - u16 kernel_version; - u8 type_of_loader; - u8 loadflags; -#define LOADED_HIGH 0x01 -#define CAN_USE_HEAP 0x80 - u16 setup_move_size; - u32 code32_start; - u32 ramdisk_image; - u32 ramdisk_size; - u32 bootsect_kludge; - u16 heap_end_ptr; - u16 _pad1; - u32 cmd_line_ptr; - u32 initrd_addr_max; - u32 kernel_alignment; - u8 relocatable_kernel; -} __attribute__((packed)); - -struct sys_desc_table { - u16 length; - u8 table[14]; -}; - -struct efi_info { - u32 _pad1; - u32 efi_systab; - u32 efi_memdesc_size; - u32 efi_memdesc_version; - u32 efi_memmap; - u32 efi_memmap_size; - u32 _pad2[2]; -}; - -/* The so-called "zeropage" */ -struct boot_params { - struct screen_info screen_info; /* 0x000 */ - struct apm_bios_info apm_bios_info; /* 0x040 */ - u8 _pad2[12]; /* 0x054 */ - struct ist_info ist_info; /* 0x060 */ - u8 _pad3[16]; /* 0x070 */ - u8 hd0_info[16]; /* obsolete! */ /* 0x080 */ - u8 hd1_info[16]; /* obsolete! */ /* 0x090 */ - struct sys_desc_table sys_desc_table; /* 0x0a0 */ - u8 _pad4[144]; /* 0x0b0 */ - struct edid_info edid_info; /* 0x140 */ - struct efi_info efi_info; /* 0x1c0 */ - u32 alt_mem_k; /* 0x1e0 */ - u32 scratch; /* Scratch field! */ /* 0x1e4 */ - u8 e820_entries; /* 0x1e8 */ - u8 eddbuf_entries; /* 0x1e9 */ - u8 edd_mbr_sig_buf_entries; /* 0x1ea */ - u8 _pad6[6]; /* 0x1eb */ - struct setup_header hdr; /* setup header */ /* 0x1f1 */ - u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)]; - u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */ - struct e820entry e820_map[E820MAX]; /* 0x2d0 */ - u8 _pad8[48]; /* 0xcd0 */ - struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */ - u8 _pad9[276]; /* 0xeec */ -} __attribute__((packed)); - -#endif /* _ASM_BOOTPARAM_H */ diff --git a/include/asm-i386/bug.h b/include/asm-i386/bug.h deleted file mode 100644 index b0fd78ca2619..000000000000 --- a/include/asm-i386/bug.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef _I386_BUG_H -#define _I386_BUG_H - - -/* - * Tell the user there is some problem. - * The offending file and line are encoded encoded in the __bug_table section. - */ - -#ifdef CONFIG_BUG -#define HAVE_ARCH_BUG - -#ifdef CONFIG_DEBUG_BUGVERBOSE -#define BUG() \ - do { \ - asm volatile("1:\tud2\n" \ - ".pushsection __bug_table,\"a\"\n" \ - "2:\t.long 1b, %c0\n" \ - "\t.word %c1, 0\n" \ - "\t.org 2b+%c2\n" \ - ".popsection" \ - : : "i" (__FILE__), "i" (__LINE__), \ - "i" (sizeof(struct bug_entry))); \ - for(;;) ; \ - } while(0) - -#else -#define BUG() \ - do { \ - asm volatile("ud2"); \ - for(;;) ; \ - } while(0) -#endif -#endif - -#include <asm-generic/bug.h> -#endif diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h deleted file mode 100644 index d28979ff73be..000000000000 --- a/include/asm-i386/bugs.h +++ /dev/null @@ -1,12 +0,0 @@ -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ -#ifndef _ASM_I386_BUG_H -#define _ASM_I386_BUG_H - -void check_bugs(void); - -#endif /* _ASM_I386_BUG_H */ diff --git a/include/asm-i386/byteorder.h b/include/asm-i386/byteorder.h deleted file mode 100644 index a45470a8b74a..000000000000 --- a/include/asm-i386/byteorder.h +++ /dev/null @@ -1,58 +0,0 @@ -#ifndef _I386_BYTEORDER_H -#define _I386_BYTEORDER_H - -#include <asm/types.h> -#include <linux/compiler.h> - -#ifdef __GNUC__ - -/* For avoiding bswap on i386 */ -#ifdef __KERNEL__ -#endif - -static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) -{ -#ifdef CONFIG_X86_BSWAP - __asm__("bswap %0" : "=r" (x) : "0" (x)); -#else - __asm__("xchgb %b0,%h0\n\t" /* swap lower bytes */ - "rorl $16,%0\n\t" /* swap words */ - "xchgb %b0,%h0" /* swap higher bytes */ - :"=q" (x) - : "0" (x)); -#endif - return x; -} - -static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 val) -{ - union { - struct { __u32 a,b; } s; - __u64 u; - } v; - v.u = val; -#ifdef CONFIG_X86_BSWAP - asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" - : "=r" (v.s.a), "=r" (v.s.b) - : "0" (v.s.a), "1" (v.s.b)); -#else - v.s.a = ___arch__swab32(v.s.a); - v.s.b = ___arch__swab32(v.s.b); - asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); -#endif - return v.u; -} - -/* Do not define swab16. Gcc is smart enough to recognize "C" version and - convert it into rotation or exhange. */ - -#define __arch__swab64(x) ___arch__swab64(x) -#define __arch__swab32(x) ___arch__swab32(x) - -#define __BYTEORDER_HAS_U64__ - -#endif /* __GNUC__ */ - -#include <linux/byteorder/little_endian.h> - -#endif /* _I386_BYTEORDER_H */ diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h deleted file mode 100644 index 57c62f414158..000000000000 --- a/include/asm-i386/cache.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * include/asm-i386/cache.h - */ -#ifndef __ARCH_I386_CACHE_H -#define __ARCH_I386_CACHE_H - - -/* L1 cache line size */ -#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) - -#define __read_mostly __attribute__((__section__(".data.read_mostly"))) - -#endif diff --git a/include/asm-i386/cacheflush.h b/include/asm-i386/cacheflush.h deleted file mode 100644 index 74e03c8f2e51..000000000000 --- a/include/asm-i386/cacheflush.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef _I386_CACHEFLUSH_H -#define _I386_CACHEFLUSH_H - -/* Keep includes the same across arches. */ -#include <linux/mm.h> - -/* Caches aren't brain-dead on the intel. */ -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) -#define flush_dcache_page(page) do { } while (0) -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) -#define flush_icache_range(start, end) do { } while (0) -#define flush_icache_page(vma,pg) do { } while (0) -#define flush_icache_user_range(vma,pg,adr,len) do { } while (0) -#define flush_cache_vmap(start, end) do { } while (0) -#define flush_cache_vunmap(start, end) do { } while (0) - -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ - memcpy(dst, src, len) - -void global_flush_tlb(void); -int change_page_attr(struct page *page, int numpages, pgprot_t prot); - -#ifdef CONFIG_DEBUG_PAGEALLOC -/* internal debugging function */ -void kernel_map_pages(struct page *page, int numpages, int enable); -#endif - -#ifdef CONFIG_DEBUG_RODATA -void mark_rodata_ro(void); -#endif - -#endif /* _I386_CACHEFLUSH_H */ diff --git a/include/asm-i386/checksum.h b/include/asm-i386/checksum.h deleted file mode 100644 index 75194abbe8ee..000000000000 --- a/include/asm-i386/checksum.h +++ /dev/null @@ -1,191 +0,0 @@ -#ifndef _I386_CHECKSUM_H -#define _I386_CHECKSUM_H - -#include <linux/in6.h> - -#include <asm/uaccess.h> - -/* - * computes the checksum of a memory block at buff, length len, - * and adds in "sum" (32-bit) - * - * returns a 32-bit number suitable for feeding into itself - * or csum_tcpudp_magic - * - * this function must be called with even lengths, except - * for the last fragment, which may be odd - * - * it's best to have buff aligned on a 32-bit boundary - */ -asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); - -/* - * the same as csum_partial, but copies from src while it - * checksums, and handles user-space pointer exceptions correctly, when needed. - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ - -asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); - -/* - * Note: when you get a NULL pointer exception here this means someone - * passed in an incorrect kernel address to one of these functions. - * - * If you use these functions directly please don't forget the - * access_ok(). - */ -static __inline__ -__wsum csum_partial_copy_nocheck (const void *src, void *dst, - int len, __wsum sum) -{ - return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); -} - -static __inline__ -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) -{ - might_sleep(); - return csum_partial_copy_generic((__force void *)src, dst, - len, sum, err_ptr, NULL); -} - -/* - * This is a version of ip_compute_csum() optimized for IP headers, - * which always checksum on 4 octet boundaries. - * - * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by - * Arnt Gulbrandsen. - */ -static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) -{ - unsigned int sum; - - __asm__ __volatile__( - "movl (%1), %0 ;\n" - "subl $4, %2 ;\n" - "jbe 2f ;\n" - "addl 4(%1), %0 ;\n" - "adcl 8(%1), %0 ;\n" - "adcl 12(%1), %0 ;\n" -"1: adcl 16(%1), %0 ;\n" - "lea 4(%1), %1 ;\n" - "decl %2 ;\n" - "jne 1b ;\n" - "adcl $0, %0 ;\n" - "movl %0, %2 ;\n" - "shrl $16, %0 ;\n" - "addw %w2, %w0 ;\n" - "adcl $0, %0 ;\n" - "notl %0 ;\n" -"2: ;\n" - /* Since the input registers which are loaded with iph and ihl - are modified, we must also specify them as outputs, or gcc - will assume they contain their original values. */ - : "=r" (sum), "=r" (iph), "=r" (ihl) - : "1" (iph), "2" (ihl) - : "memory"); - return (__force __sum16)sum; -} - -/* - * Fold a partial checksum - */ - -static inline __sum16 csum_fold(__wsum sum) -{ - __asm__( - "addl %1, %0 ;\n" - "adcl $0xffff, %0 ;\n" - : "=r" (sum) - : "r" ((__force u32)sum << 16), - "0" ((__force u32)sum & 0xffff0000) - ); - return (__force __sum16)(~(__force u32)sum >> 16); -} - -static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) -{ - __asm__( - "addl %1, %0 ;\n" - "adcl %2, %0 ;\n" - "adcl %3, %0 ;\n" - "adcl $0, %0 ;\n" - : "=r" (sum) - : "g" (daddr), "g"(saddr), "g"((len + proto) << 8), "0"(sum)); - return sum; -} - -/* - * computes the checksum of the TCP/UDP pseudo-header - * returns a 16-bit checksum, already complemented - */ -static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) -{ - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); -} - -/* - * this routine is used for miscellaneous IP-like checksums, mainly - * in icmp.c - */ - -static inline __sum16 ip_compute_csum(const void *buff, int len) -{ - return csum_fold (csum_partial(buff, len, 0)); -} - -#define _HAVE_ARCH_IPV6_CSUM -static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, - const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum) -{ - __asm__( - "addl 0(%1), %0 ;\n" - "adcl 4(%1), %0 ;\n" - "adcl 8(%1), %0 ;\n" - "adcl 12(%1), %0 ;\n" - "adcl 0(%2), %0 ;\n" - "adcl 4(%2), %0 ;\n" - "adcl 8(%2), %0 ;\n" - "adcl 12(%2), %0 ;\n" - "adcl %3, %0 ;\n" - "adcl %4, %0 ;\n" - "adcl $0, %0 ;\n" - : "=&r" (sum) - : "r" (saddr), "r" (daddr), - "r"(htonl(len)), "r"(htonl(proto)), "0"(sum)); - - return csum_fold(sum); -} - -/* - * Copy and checksum to user - */ -#define HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user(const void *src, - void __user *dst, - int len, __wsum sum, - int *err_ptr) -{ - might_sleep(); - if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr); - - if (len) - *err_ptr = -EFAULT; - - return (__force __wsum)-1; /* invalid checksum */ -} - -#endif diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h deleted file mode 100644 index f86ede28f6dc..000000000000 --- a/include/asm-i386/cmpxchg.h +++ /dev/null @@ -1,289 +0,0 @@ -#ifndef __ASM_CMPXCHG_H -#define __ASM_CMPXCHG_H - -#include <linux/bitops.h> /* for LOCK_PREFIX */ - -/* - * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you - * you need to test for the feature in boot_cpu_data. - */ - -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) - -struct __xchg_dummy { unsigned long a[100]; }; -#define __xg(x) ((struct __xchg_dummy *)(x)) - -/* - * The semantics of XCHGCMP8B are a bit strange, this is why - * there is a loop and the loading of %%eax and %%edx has to - * be inside. This inlines well in most cases, the cached - * cost is around ~38 cycles. (in the future we might want - * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that - * might have an implicit FPU-save as a cost, so it's not - * clear which path to go.) - * - * cmpxchg8b must be used with the lock prefix here to allow - * the instruction to be executed atomically, see page 3-102 - * of the instruction set reference 24319102.pdf. We need - * the reader side to see the coherent 64bit value. - */ -static inline void __set_64bit (unsigned long long * ptr, - unsigned int low, unsigned int high) -{ - __asm__ __volatile__ ( - "\n1:\t" - "movl (%0), %%eax\n\t" - "movl 4(%0), %%edx\n\t" - LOCK_PREFIX "cmpxchg8b (%0)\n\t" - "jnz 1b" - : /* no outputs */ - : "D"(ptr), - "b"(low), - "c"(high) - : "ax","dx","memory"); -} - -static inline void __set_64bit_constant (unsigned long long *ptr, - unsigned long long value) -{ - __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); -} -#define ll_low(x) *(((unsigned int*)&(x))+0) -#define ll_high(x) *(((unsigned int*)&(x))+1) - -static inline void __set_64bit_var (unsigned long long *ptr, - unsigned long long value) -{ - __set_64bit(ptr,ll_low(value), ll_high(value)); -} - -#define set_64bit(ptr,value) \ -(__builtin_constant_p(value) ? \ - __set_64bit_constant(ptr, value) : \ - __set_64bit_var(ptr, value) ) - -#define _set_64bit(ptr,value) \ -(__builtin_constant_p(value) ? \ - __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ - __set_64bit(ptr, ll_low(value), ll_high(value)) ) - -/* - * Note: no "lock" prefix even on SMP: xchg always implies lock anyway - * Note 2: xchg has side effect, so that attribute volatile is necessary, - * but generally the primitive is invalid, *ptr is output argument. --ANK - */ -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) -{ - switch (size) { - case 1: - __asm__ __volatile__("xchgb %b0,%1" - :"=q" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 2: - __asm__ __volatile__("xchgw %w0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - case 4: - __asm__ __volatile__("xchgl %0,%1" - :"=r" (x) - :"m" (*__xg(ptr)), "0" (x) - :"memory"); - break; - } - return x; -} - -/* - * Atomic compare and exchange. Compare OLD with MEM, if identical, - * store NEW in MEM. Return the initial value in MEM. Success is - * indicated by comparing RETURN with OLD. - */ - -#ifdef CONFIG_X86_CMPXCHG -#define __HAVE_ARCH_CMPXCHG 1 -#define cmpxchg(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) -#define sync_cmpxchg(ptr,o,n)\ - ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) -#define cmpxchg_local(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\ - (unsigned long)(n),sizeof(*(ptr)))) -#endif - -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 2: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 4: - __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - } - return old; -} - -/* - * Always use locked operations when touching memory shared with a - * hypervisor, since the system may be SMP even if the guest kernel - * isn't. - */ -static inline unsigned long __sync_cmpxchg(volatile void *ptr, - unsigned long old, - unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: - __asm__ __volatile__("lock; cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 2: - __asm__ __volatile__("lock; cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 4: - __asm__ __volatile__("lock; cmpxchgl %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - } - return old; -} - -static inline unsigned long __cmpxchg_local(volatile void *ptr, - unsigned long old, unsigned long new, int size) -{ - unsigned long prev; - switch (size) { - case 1: - __asm__ __volatile__("cmpxchgb %b1,%2" - : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 2: - __asm__ __volatile__("cmpxchgw %w1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - case 4: - __asm__ __volatile__("cmpxchgl %1,%2" - : "=a"(prev) - : "r"(new), "m"(*__xg(ptr)), "0"(old) - : "memory"); - return prev; - } - return old; -} - -#ifndef CONFIG_X86_CMPXCHG -/* - * Building a kernel capable running on 80386. It may be necessary to - * simulate the cmpxchg on the 80386 CPU. For that purpose we define - * a function for each of the sizes we support. - */ - -extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8); -extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16); -extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32); - -static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - switch (size) { - case 1: - return cmpxchg_386_u8(ptr, old, new); - case 2: - return cmpxchg_386_u16(ptr, old, new); - case 4: - return cmpxchg_386_u32(ptr, old, new); - } - return old; -} - -#define cmpxchg(ptr,o,n) \ -({ \ - __typeof__(*(ptr)) __ret; \ - if (likely(boot_cpu_data.x86 > 3)) \ - __ret = __cmpxchg((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ - else \ - __ret = cmpxchg_386((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ - __ret; \ -}) -#define cmpxchg_local(ptr,o,n) \ -({ \ - __typeof__(*(ptr)) __ret; \ - if (likely(boot_cpu_data.x86 > 3)) \ - __ret = __cmpxchg_local((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ - else \ - __ret = cmpxchg_386((ptr), (unsigned long)(o), \ - (unsigned long)(n), sizeof(*(ptr))); \ - __ret; \ -}) -#endif - -static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old, - unsigned long long new) -{ - unsigned long long prev; - __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" - : "=A"(prev) - : "b"((unsigned long)new), - "c"((unsigned long)(new >> 32)), - "m"(*__xg(ptr)), - "0"(old) - : "memory"); - return prev; -} - -static inline unsigned long long __cmpxchg64_local(volatile void *ptr, - unsigned long long old, unsigned long long new) -{ - unsigned long long prev; - __asm__ __volatile__("cmpxchg8b %3" - : "=A"(prev) - : "b"((unsigned long)new), - "c"((unsigned long)(new >> 32)), - "m"(*__xg(ptr)), - "0"(old) - : "memory"); - return prev; -} - -#define cmpxchg64(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\ - (unsigned long long)(n))) -#define cmpxchg64_local(ptr,o,n)\ - ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\ - (unsigned long long)(n))) -#endif diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h deleted file mode 100644 index 9d914e1e4aad..000000000000 --- a/include/asm-i386/cpu.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _ASM_I386_CPU_H_ -#define _ASM_I386_CPU_H_ - -#include <linux/device.h> -#include <linux/cpu.h> -#include <linux/topology.h> -#include <linux/nodemask.h> -#include <linux/percpu.h> - -struct i386_cpu { - struct cpu cpu; -}; -extern int arch_register_cpu(int num); -#ifdef CONFIG_HOTPLUG_CPU -extern void arch_unregister_cpu(int); -extern int enable_cpu_hotplug; -#else -#define enable_cpu_hotplug 0 -#endif - -DECLARE_PER_CPU(int, cpu_state); -#endif /* _ASM_I386_CPU_H_ */ diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h deleted file mode 100644 index 7b3aa28ebc6e..000000000000 --- a/include/asm-i386/cpufeature.h +++ /dev/null @@ -1,175 +0,0 @@ -/* - * cpufeature.h - * - * Defines x86 CPU feature bits - */ - -#ifndef __ASM_I386_CPUFEATURE_H -#define __ASM_I386_CPUFEATURE_H - -#ifndef __ASSEMBLY__ -#include <linux/bitops.h> -#endif -#include <asm/required-features.h> - -#define NCAPINTS 8 /* N 32-bit words worth of info */ - -/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ -#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ -#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ -#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ -#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ -#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ -#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ -#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ -#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ -#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ -#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ -#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ -#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ -#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ -#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ -#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ -#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ -#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ -#define X86_FEATURE_PN (0*32+18) /* Processor serial number */ -#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ -#define X86_FEATURE_DS (0*32+21) /* Debug Store */ -#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ -#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ -#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ - /* of FPU context), and CR4.OSFXSR available */ -#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ -#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ -#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ -#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ -#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ -#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ - -/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ -/* Don't duplicate feature flags which are redundant with Intel! */ -#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ -#define X86_FEATURE_MP (1*32+19) /* MP Capable. */ -#define X86_FEATURE_NX (1*32+20) /* Execute Disable */ -#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ -#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ -#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ -#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ -#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ - -/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ -#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ -#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ -#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ - -/* Other features, Linux-defined mapping, word 3 */ -/* This range is used for feature bits which conflict or are synthesized */ -#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ -#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ -#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ -#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ -/* cpu types for specific tunings: */ -#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ -#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ -#define X86_FEATURE_P3 (3*32+ 6) /* P3 */ -#define X86_FEATURE_P4 (3*32+ 7) /* P4 */ -#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ -#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ -#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ -#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ -#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ -/* 14 free */ -#define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */ -#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ - -/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ -#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ -#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ -#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ -#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ -#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ -#define X86_FEATURE_CID (4*32+10) /* Context ID */ -#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ -#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ - -/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ -#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ -#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ -#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ -#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ -#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ -#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ -#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ -#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ -#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ -#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ - -/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ -#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ - -/* - * Auxiliary flags: Linux defined - For features scattered in various - * CPUID levels like 0x6, 0xA etc - */ -#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ - -#define cpu_has(c, bit) \ - (__builtin_constant_p(bit) && \ - ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \ - (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \ - (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \ - (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \ - (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \ - (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \ - (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ - (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ - ? 1 : \ - test_bit(bit, (c)->x86_capability)) -#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) - -#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) -#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) -#define cpu_has_de boot_cpu_has(X86_FEATURE_DE) -#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) -#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) -#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE) -#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) -#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) -#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP) -#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR) -#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX) -#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) -#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) -#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) -#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3) -#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) -#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) -#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) -#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) -#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) -#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR) -#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE) -#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN) -#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT) -#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN) -#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2) -#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN) -#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE) -#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) -#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) -#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) -#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) -#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) -#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) -#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) - -#endif /* __ASM_I386_CPUFEATURE_H */ - -/* - * Local Variables: - * mode:c - * comment-column:42 - * End: - */ diff --git a/include/asm-i386/cputime.h b/include/asm-i386/cputime.h deleted file mode 100644 index 398ed7cd171d..000000000000 --- a/include/asm-i386/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __I386_CPUTIME_H -#define __I386_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __I386_CPUTIME_H */ diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h deleted file mode 100644 index d35248539912..000000000000 --- a/include/asm-i386/current.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _I386_CURRENT_H -#define _I386_CURRENT_H - -#include <linux/compiler.h> -#include <asm/percpu.h> - -struct task_struct; - -DECLARE_PER_CPU(struct task_struct *, current_task); -static __always_inline struct task_struct *get_current(void) -{ - return x86_read_percpu(current_task); -} - -#define current get_current() - -#endif /* !(_I386_CURRENT_H) */ diff --git a/include/asm-i386/debugreg.h b/include/asm-i386/debugreg.h deleted file mode 100644 index f0b2b06ae0f7..000000000000 --- a/include/asm-i386/debugreg.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef _I386_DEBUGREG_H -#define _I386_DEBUGREG_H - - -/* Indicate the register numbers for a number of the specific - debug registers. Registers 0-3 contain the addresses we wish to trap on */ -#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */ -#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */ - -#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */ -#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */ - -/* Define a few things for the status register. We can use this to determine - which debugging register was responsible for the trap. The other bits - are either reserved or not of interest to us. */ - -#define DR_TRAP0 (0x1) /* db0 */ -#define DR_TRAP1 (0x2) /* db1 */ -#define DR_TRAP2 (0x4) /* db2 */ -#define DR_TRAP3 (0x8) /* db3 */ - -#define DR_STEP (0x4000) /* single-step */ -#define DR_SWITCH (0x8000) /* task switch */ - -/* Now define a bunch of things for manipulating the control register. - The top two bytes of the control register consist of 4 fields of 4 - bits - each field corresponds to one of the four debug registers, - and indicates what types of access we trap on, and how large the data - field is that we are looking at */ - -#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */ -#define DR_CONTROL_SIZE 4 /* 4 control bits per register */ - -#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */ -#define DR_RW_WRITE (0x1) -#define DR_RW_READ (0x3) - -#define DR_LEN_1 (0x0) /* Settings for data length to trap on */ -#define DR_LEN_2 (0x4) -#define DR_LEN_4 (0xC) - -/* The low byte to the control register determine which registers are - enabled. There are 4 fields of two bits. One bit is "local", meaning - that the processor will reset the bit after a task switch and the other - is global meaning that we have to explicitly reset the bit. With linux, - you can use either one, since we explicitly zero the register when we enter - kernel mode. */ - -#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ -#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ -#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ - -#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ -#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */ - -/* The second byte to the control register has a few special things. - We can slow the instruction pipeline for instructions coming via the - gdt or the ldt if we want to. I am not sure why this is an advantage */ - -#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */ -#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ -#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ - -#endif diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h deleted file mode 100644 index 9ae5e3782ed8..000000000000 --- a/include/asm-i386/delay.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _I386_DELAY_H -#define _I386_DELAY_H - -/* - * Copyright (C) 1993 Linus Torvalds - * - * Delay routines calling functions in arch/i386/lib/delay.c - */ - -/* Undefined functions to get compile-time errors */ -extern void __bad_udelay(void); -extern void __bad_ndelay(void); - -extern void __udelay(unsigned long usecs); -extern void __ndelay(unsigned long nsecs); -extern void __const_udelay(unsigned long usecs); -extern void __delay(unsigned long loops); - -/* 0x10c7 is 2**32 / 1000000 (rounded up) */ -#define udelay(n) (__builtin_constant_p(n) ? \ - ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ - __udelay(n)) - -/* 0x5 is 2**32 / 1000000000 (rounded up) */ -#define ndelay(n) (__builtin_constant_p(n) ? \ - ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ - __ndelay(n)) - -void use_tsc_delay(void); - -#endif /* defined(_I386_DELAY_H) */ diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h deleted file mode 100644 index c547403f341d..000000000000 --- a/include/asm-i386/desc.h +++ /dev/null @@ -1,244 +0,0 @@ -#ifndef __ARCH_DESC_H -#define __ARCH_DESC_H - -#include <asm/ldt.h> -#include <asm/segment.h> - -#ifndef __ASSEMBLY__ - -#include <linux/preempt.h> -#include <linux/smp.h> -#include <linux/percpu.h> - -#include <asm/mmu.h> - -struct Xgt_desc_struct { - unsigned short size; - unsigned long address __attribute__((packed)); - unsigned short pad; -} __attribute__ ((packed)); - -struct gdt_page -{ - struct desc_struct gdt[GDT_ENTRIES]; -} __attribute__((aligned(PAGE_SIZE))); -DECLARE_PER_CPU(struct gdt_page, gdt_page); - -static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) -{ - return per_cpu(gdt_page, cpu).gdt; -} - -extern struct Xgt_desc_struct idt_descr; -extern struct desc_struct idt_table[]; -extern void set_intr_gate(unsigned int irq, void * addr); - -static inline void pack_descriptor(__u32 *a, __u32 *b, - unsigned long base, unsigned long limit, unsigned char type, unsigned char flags) -{ - *a = ((base & 0xffff) << 16) | (limit & 0xffff); - *b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | - (limit & 0x000f0000) | ((type & 0xff) << 8) | ((flags & 0xf) << 20); -} - -static inline void pack_gate(__u32 *a, __u32 *b, - unsigned long base, unsigned short seg, unsigned char type, unsigned char flags) -{ - *a = (seg << 16) | (base & 0xffff); - *b = (base & 0xffff0000) | ((type & 0xff) << 8) | (flags & 0xff); -} - -#define DESCTYPE_LDT 0x82 /* present, system, DPL-0, LDT */ -#define DESCTYPE_TSS 0x89 /* present, system, DPL-0, 32-bit TSS */ -#define DESCTYPE_TASK 0x85 /* present, system, DPL-0, task gate */ -#define DESCTYPE_INT 0x8e /* present, system, DPL-0, interrupt gate */ -#define DESCTYPE_TRAP 0x8f /* present, system, DPL-0, trap gate */ -#define DESCTYPE_DPL3 0x60 /* DPL-3 */ -#define DESCTYPE_S 0x10 /* !system */ - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define load_TR_desc() native_load_tr_desc() -#define load_gdt(dtr) native_load_gdt(dtr) -#define load_idt(dtr) native_load_idt(dtr) -#define load_tr(tr) __asm__ __volatile("ltr %0"::"m" (tr)) -#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"m" (ldt)) - -#define store_gdt(dtr) native_store_gdt(dtr) -#define store_idt(dtr) native_store_idt(dtr) -#define store_tr(tr) (tr = native_store_tr()) -#define store_ldt(ldt) __asm__ ("sldt %0":"=m" (ldt)) - -#define load_TLS(t, cpu) native_load_tls(t, cpu) -#define set_ldt native_set_ldt - -#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) -#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) -#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) -#endif - -static inline void write_dt_entry(struct desc_struct *dt, - int entry, u32 entry_low, u32 entry_high) -{ - dt[entry].a = entry_low; - dt[entry].b = entry_high; -} - -static inline void native_set_ldt(const void *addr, unsigned int entries) -{ - if (likely(entries == 0)) - __asm__ __volatile__("lldt %w0"::"q" (0)); - else { - unsigned cpu = smp_processor_id(); - __u32 a, b; - - pack_descriptor(&a, &b, (unsigned long)addr, - entries * sizeof(struct desc_struct) - 1, - DESCTYPE_LDT, 0); - write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); - __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); - } -} - - -static inline void native_load_tr_desc(void) -{ - asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); -} - -static inline void native_load_gdt(const struct Xgt_desc_struct *dtr) -{ - asm volatile("lgdt %0"::"m" (*dtr)); -} - -static inline void native_load_idt(const struct Xgt_desc_struct *dtr) -{ - asm volatile("lidt %0"::"m" (*dtr)); -} - -static inline void native_store_gdt(struct Xgt_desc_struct *dtr) -{ - asm ("sgdt %0":"=m" (*dtr)); -} - -static inline void native_store_idt(struct Xgt_desc_struct *dtr) -{ - asm ("sidt %0":"=m" (*dtr)); -} - -static inline unsigned long native_store_tr(void) -{ - unsigned long tr; - asm ("str %0":"=r" (tr)); - return tr; -} - -static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) -{ - unsigned int i; - struct desc_struct *gdt = get_cpu_gdt_table(cpu); - - for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) - gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; -} - -static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) -{ - __u32 a, b; - pack_gate(&a, &b, (unsigned long)addr, seg, type, 0); - write_idt_entry(idt_table, gate, a, b); -} - -static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr) -{ - __u32 a, b; - pack_descriptor(&a, &b, (unsigned long)addr, - offsetof(struct tss_struct, __cacheline_filler) - 1, - DESCTYPE_TSS, 0); - write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); -} - - -#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) - -#define LDT_entry_a(info) \ - ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) - -#define LDT_entry_b(info) \ - (((info)->base_addr & 0xff000000) | \ - (((info)->base_addr & 0x00ff0000) >> 16) | \ - ((info)->limit & 0xf0000) | \ - (((info)->read_exec_only ^ 1) << 9) | \ - ((info)->contents << 10) | \ - (((info)->seg_not_present ^ 1) << 15) | \ - ((info)->seg_32bit << 22) | \ - ((info)->limit_in_pages << 23) | \ - ((info)->useable << 20) | \ - 0x7000) - -#define LDT_empty(info) (\ - (info)->base_addr == 0 && \ - (info)->limit == 0 && \ - (info)->contents == 0 && \ - (info)->read_exec_only == 1 && \ - (info)->seg_32bit == 0 && \ - (info)->limit_in_pages == 0 && \ - (info)->seg_not_present == 1 && \ - (info)->useable == 0 ) - -static inline void clear_LDT(void) -{ - set_ldt(NULL, 0); -} - -/* - * load one particular LDT into the current CPU - */ -static inline void load_LDT_nolock(mm_context_t *pc) -{ - set_ldt(pc->ldt, pc->size); -} - -static inline void load_LDT(mm_context_t *pc) -{ - preempt_disable(); - load_LDT_nolock(pc); - preempt_enable(); -} - -static inline unsigned long get_desc_base(unsigned long *desc) -{ - unsigned long base; - base = ((desc[0] >> 16) & 0x0000ffff) | - ((desc[1] << 16) & 0x00ff0000) | - (desc[1] & 0xff000000); - return base; -} - -#else /* __ASSEMBLY__ */ - -/* - * GET_DESC_BASE reads the descriptor base of the specified segment. - * - * Args: - * idx - descriptor index - * gdt - GDT pointer - * base - 32bit register to which the base will be written - * lo_w - lo word of the "base" register - * lo_b - lo byte of the "base" register - * hi_b - hi byte of the low word of the "base" register - * - * Example: - * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) - * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. - */ -#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ - movb idx*8+4(gdt), lo_b; \ - movb idx*8+7(gdt), hi_b; \ - shll $16, base; \ - movw idx*8+2(gdt), lo_w; - -#endif /* !__ASSEMBLY__ */ - -#endif diff --git a/include/asm-i386/device.h b/include/asm-i386/device.h deleted file mode 100644 index 849604c70e6b..000000000000 --- a/include/asm-i386/device.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#ifndef _ASM_I386_DEVICE_H -#define _ASM_I386_DEVICE_H - -struct dev_archdata { -#ifdef CONFIG_ACPI - void *acpi_handle; -#endif -}; - -#endif /* _ASM_I386_DEVICE_H */ diff --git a/include/asm-i386/div64.h b/include/asm-i386/div64.h deleted file mode 100644 index 438e980068bd..000000000000 --- a/include/asm-i386/div64.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef __I386_DIV64 -#define __I386_DIV64 - -#include <linux/types.h> - -/* - * do_div() is NOT a C function. It wants to return - * two values (the quotient and the remainder), but - * since that doesn't work very well in C, what it - * does is: - * - * - modifies the 64-bit dividend _in_place_ - * - returns the 32-bit remainder - * - * This ends up being the most efficient "calling - * convention" on x86. - */ -#define do_div(n,base) ({ \ - unsigned long __upper, __low, __high, __mod, __base; \ - __base = (base); \ - asm("":"=a" (__low), "=d" (__high):"A" (n)); \ - __upper = __high; \ - if (__high) { \ - __upper = __high % (__base); \ - __high = __high / (__base); \ - } \ - asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ - asm("":"=A" (n):"a" (__low),"d" (__high)); \ - __mod; \ -}) - -/* - * (long)X = ((long long)divs) / (long)div - * (long)rem = ((long long)divs) % (long)div - * - * Warning, this will do an exception if X overflows. - */ -#define div_long_long_rem(a,b,c) div_ll_X_l_rem(a,b,c) - -static inline long -div_ll_X_l_rem(long long divs, long div, long *rem) -{ - long dum2; - __asm__("divl %2":"=a"(dum2), "=d"(*rem) - : "rm"(div), "A"(divs)); - - return dum2; - -} - -extern uint64_t div64_64(uint64_t dividend, uint64_t divisor); -#endif diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h deleted file mode 100644 index f1d72d177f68..000000000000 --- a/include/asm-i386/dma-mapping.h +++ /dev/null @@ -1,186 +0,0 @@ -#ifndef _ASM_I386_DMA_MAPPING_H -#define _ASM_I386_DMA_MAPPING_H - -#include <linux/mm.h> - -#include <asm/cache.h> -#include <asm/io.h> -#include <asm/scatterlist.h> -#include <asm/bug.h> - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) - -void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - -void dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - -static inline dma_addr_t -dma_map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); - WARN_ON(size == 0); - flush_write_buffers(); - return virt_to_phys(ptr); -} - -static inline void -dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); -} - -static inline int -dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) -{ - int i; - - BUG_ON(!valid_dma_direction(direction)); - WARN_ON(nents == 0 || sg[0].length == 0); - - for (i = 0; i < nents; i++ ) { - BUG_ON(!sg[i].page); - - sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; - } - - flush_write_buffers(); - return nents; -} - -static inline dma_addr_t -dma_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); - return page_to_phys(page) + offset; -} - -static inline void -dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); -} - - -static inline void -dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, - enum dma_data_direction direction) -{ - BUG_ON(!valid_dma_direction(direction)); -} - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - flush_write_buffers(); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - flush_write_buffers(); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - flush_write_buffers(); -} - -static inline int -dma_mapping_error(dma_addr_t dma_addr) -{ - return 0; -} - -extern int forbid_dac; - -static inline int -dma_supported(struct device *dev, u64 mask) -{ - /* - * we fall back to GFP_DMA when the mask isn't all 1s, - * so we can't guarantee allocations that must be - * within a tighter range than GFP_DMA.. - */ - if(mask < 0x00ffffff) - return 0; - - /* Work around chipset bugs */ - if (forbid_dac > 0 && mask > 0xffffffffULL) - return 0; - - return 1; -} - -static inline int -dma_set_mask(struct device *dev, u64 mask) -{ - if(!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - - *dev->dma_mask = mask; - - return 0; -} - -static inline int -dma_get_cache_alignment(void) -{ - /* no easy way to get cache size on all x86, so return the - * maximum possible, to be safe */ - return (1 << INTERNODE_CACHE_SHIFT); -} - -#define dma_is_consistent(d, h) (1) - -static inline void -dma_cache_sync(struct device *dev, void *vaddr, size_t size, - enum dma_data_direction direction) -{ - flush_write_buffers(); -} - -#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY -extern int -dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, - dma_addr_t device_addr, size_t size, int flags); - -extern void -dma_release_declared_memory(struct device *dev); - -extern void * -dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size); - -#endif diff --git a/include/asm-i386/dma.h b/include/asm-i386/dma.h deleted file mode 100644 index d23aac8e1a50..000000000000 --- a/include/asm-i386/dma.h +++ /dev/null @@ -1,297 +0,0 @@ -/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ - * linux/include/asm/dma.h: Defines for using and allocating dma channels. - * Written by Hennus Bergman, 1992. - * High DMA channel support & info by Hannu Savolainen - * and John Boyd, Nov. 1992. - */ - -#ifndef _ASM_DMA_H -#define _ASM_DMA_H - -#include <linux/spinlock.h> /* And spinlocks */ -#include <asm/io.h> /* need byte IO */ -#include <linux/delay.h> - - -#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER -#define dma_outb outb_p -#else -#define dma_outb outb -#endif - -#define dma_inb inb - -/* - * NOTES about DMA transfers: - * - * controller 1: channels 0-3, byte operations, ports 00-1F - * controller 2: channels 4-7, word operations, ports C0-DF - * - * - ALL registers are 8 bits only, regardless of transfer size - * - channel 4 is not used - cascades 1 into 2. - * - channels 0-3 are byte - addresses/counts are for physical bytes - * - channels 5-7 are word - addresses/counts are for physical words - * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries - * - transfer count loaded to registers is 1 less than actual count - * - controller 2 offsets are all even (2x offsets for controller 1) - * - page registers for 5-7 don't use data bit 0, represent 128K pages - * - page registers for 0-3 use bit 0, represent 64K pages - * - * DMA transfers are limited to the lower 16MB of _physical_ memory. - * Note that addresses loaded into registers must be _physical_ addresses, - * not logical addresses (which may differ if paging is active). - * - * Address mapping for channels 0-3: - * - * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * P7 ... P0 A7 ... A0 A7 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Address mapping for channels 5-7: - * - * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) - * | ... | \ \ ... \ \ \ ... \ \ - * | ... | \ \ ... \ \ \ ... \ (not used) - * | ... | \ \ ... \ \ \ ... \ - * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses - * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at - * the hardware level, so odd-byte transfers aren't possible). - * - * Transfer count (_not # bytes_) is limited to 64K, represented as actual - * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, - * and up to 128K bytes may be transferred on channels 5-7 in one operation. - * - */ - -#define MAX_DMA_CHANNELS 8 - -/* The maximum address that we can perform a DMA transfer to on this platform */ -#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) - -/* 8237 DMA controllers */ -#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ -#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ - -/* DMA controller registers */ -#define DMA1_CMD_REG 0x08 /* command register (w) */ -#define DMA1_STAT_REG 0x08 /* status register (r) */ -#define DMA1_REQ_REG 0x09 /* request register (w) */ -#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ -#define DMA1_MODE_REG 0x0B /* mode register (w) */ -#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ -#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ -#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ -#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ -#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ - -#define DMA2_CMD_REG 0xD0 /* command register (w) */ -#define DMA2_STAT_REG 0xD0 /* status register (r) */ -#define DMA2_REQ_REG 0xD2 /* request register (w) */ -#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ -#define DMA2_MODE_REG 0xD6 /* mode register (w) */ -#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ -#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ -#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ -#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ -#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ - -#define DMA_ADDR_0 0x00 /* DMA address registers */ -#define DMA_ADDR_1 0x02 -#define DMA_ADDR_2 0x04 -#define DMA_ADDR_3 0x06 -#define DMA_ADDR_4 0xC0 -#define DMA_ADDR_5 0xC4 -#define DMA_ADDR_6 0xC8 -#define DMA_ADDR_7 0xCC - -#define DMA_CNT_0 0x01 /* DMA count registers */ -#define DMA_CNT_1 0x03 -#define DMA_CNT_2 0x05 -#define DMA_CNT_3 0x07 -#define DMA_CNT_4 0xC2 -#define DMA_CNT_5 0xC6 -#define DMA_CNT_6 0xCA -#define DMA_CNT_7 0xCE - -#define DMA_PAGE_0 0x87 /* DMA page registers */ -#define DMA_PAGE_1 0x83 -#define DMA_PAGE_2 0x81 -#define DMA_PAGE_3 0x82 -#define DMA_PAGE_5 0x8B -#define DMA_PAGE_6 0x89 -#define DMA_PAGE_7 0x8A - -#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ -#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ -#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ - -#define DMA_AUTOINIT 0x10 - - -extern spinlock_t dma_spin_lock; - -static __inline__ unsigned long claim_dma_lock(void) -{ - unsigned long flags; - spin_lock_irqsave(&dma_spin_lock, flags); - return flags; -} - -static __inline__ void release_dma_lock(unsigned long flags) -{ - spin_unlock_irqrestore(&dma_spin_lock, flags); -} - -/* enable/disable a specific DMA channel */ -static __inline__ void enable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr, DMA1_MASK_REG); - else - dma_outb(dmanr & 3, DMA2_MASK_REG); -} - -static __inline__ void disable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr | 4, DMA1_MASK_REG); - else - dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); -} - -/* Clear the 'DMA Pointer Flip Flop'. - * Write 0 for LSB/MSB, 1 for MSB/LSB access. - * Use this once to initialize the FF to a known state. - * After that, keep track of it. :-) - * --- In order to do that, the DMA routines below should --- - * --- only be used while holding the DMA lock ! --- - */ -static __inline__ void clear_dma_ff(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(0, DMA1_CLEAR_FF_REG); - else - dma_outb(0, DMA2_CLEAR_FF_REG); -} - -/* set mode (above) for a specific DMA channel */ -static __inline__ void set_dma_mode(unsigned int dmanr, char mode) -{ - if (dmanr<=3) - dma_outb(mode | dmanr, DMA1_MODE_REG); - else - dma_outb(mode | (dmanr&3), DMA2_MODE_REG); -} - -/* Set only the page register bits of the transfer address. - * This is used for successive transfers when we know the contents of - * the lower 16 bits of the DMA current address register, but a 64k boundary - * may have been crossed. - */ -static __inline__ void set_dma_page(unsigned int dmanr, char pagenr) -{ - switch(dmanr) { - case 0: - dma_outb(pagenr, DMA_PAGE_0); - break; - case 1: - dma_outb(pagenr, DMA_PAGE_1); - break; - case 2: - dma_outb(pagenr, DMA_PAGE_2); - break; - case 3: - dma_outb(pagenr, DMA_PAGE_3); - break; - case 5: - dma_outb(pagenr & 0xfe, DMA_PAGE_5); - break; - case 6: - dma_outb(pagenr & 0xfe, DMA_PAGE_6); - break; - case 7: - dma_outb(pagenr & 0xfe, DMA_PAGE_7); - break; - } -} - - -/* Set transfer address & page bits for specific DMA channel. - * Assumes dma flipflop is clear. - */ -static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) -{ - set_dma_page(dmanr, a>>16); - if (dmanr <= 3) { - dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - } else { - dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - } -} - - -/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for - * a specific DMA channel. - * You must ensure the parameters are valid. - * NOTE: from a manual: "the number of transfers is one more - * than the initial word count"! This is taken into account. - * Assumes dma flip-flop is clear. - * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. - */ -static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) -{ - count--; - if (dmanr <= 3) { - dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - } else { - dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - } -} - - -/* Get DMA residue count. After a DMA transfer, this - * should return zero. Reading this while a DMA transfer is - * still in progress will return unpredictable results. - * If called before the channel has been used, it may return 1. - * Otherwise, it returns the number of _bytes_ left to transfer. - * - * Assumes DMA flip-flop is clear. - */ -static __inline__ int get_dma_residue(unsigned int dmanr) -{ - unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE - : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; - - /* using short to get 16-bit wrap around */ - unsigned short count; - - count = 1 + dma_inb(io_port); - count += dma_inb(io_port) << 8; - - return (dmanr<=3)? count : (count<<1); -} - - -/* These are in kernel/dma.c: */ -extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ -extern void free_dma(unsigned int dmanr); /* release it again */ - -/* From PCI */ - -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy (0) -#endif - -#endif /* _ASM_DMA_H */ diff --git a/include/asm-i386/dmi.h b/include/asm-i386/dmi.h deleted file mode 100644 index 38d4eeb7fc7e..000000000000 --- a/include/asm-i386/dmi.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _ASM_DMI_H -#define _ASM_DMI_H 1 - -#include <asm/io.h> - -/* Use early IO mappings for DMI because it's initialized early */ -#define dmi_ioremap bt_ioremap -#define dmi_iounmap bt_iounmap -#define dmi_alloc alloc_bootmem - -#endif diff --git a/include/asm-i386/dwarf2.h b/include/asm-i386/dwarf2.h deleted file mode 100644 index 6d66398a307d..000000000000 --- a/include/asm-i386/dwarf2.h +++ /dev/null @@ -1,61 +0,0 @@ -#ifndef _DWARF2_H -#define _DWARF2_H - -#ifndef __ASSEMBLY__ -#warning "asm/dwarf2.h should be only included in pure assembly files" -#endif - -/* - Macros for dwarf2 CFI unwind table entries. - See "as.info" for details on these pseudo ops. Unfortunately - they are only supported in very new binutils, so define them - away for older version. - */ - -#ifdef CONFIG_UNWIND_INFO - -#define CFI_STARTPROC .cfi_startproc -#define CFI_ENDPROC .cfi_endproc -#define CFI_DEF_CFA .cfi_def_cfa -#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register -#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset -#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset -#define CFI_OFFSET .cfi_offset -#define CFI_REL_OFFSET .cfi_rel_offset -#define CFI_REGISTER .cfi_register -#define CFI_RESTORE .cfi_restore -#define CFI_REMEMBER_STATE .cfi_remember_state -#define CFI_RESTORE_STATE .cfi_restore_state -#define CFI_UNDEFINED .cfi_undefined - -#ifdef CONFIG_AS_CFI_SIGNAL_FRAME -#define CFI_SIGNAL_FRAME .cfi_signal_frame -#else -#define CFI_SIGNAL_FRAME -#endif - -#else - -/* Due to the structure of pre-exisiting code, don't use assembler line - comment character # to ignore the arguments. Instead, use a dummy macro. */ -.macro ignore a=0, b=0, c=0, d=0 -.endm - -#define CFI_STARTPROC ignore -#define CFI_ENDPROC ignore -#define CFI_DEF_CFA ignore -#define CFI_DEF_CFA_REGISTER ignore -#define CFI_DEF_CFA_OFFSET ignore -#define CFI_ADJUST_CFA_OFFSET ignore -#define CFI_OFFSET ignore -#define CFI_REL_OFFSET ignore -#define CFI_REGISTER ignore -#define CFI_RESTORE ignore -#define CFI_REMEMBER_STATE ignore -#define CFI_RESTORE_STATE ignore -#define CFI_UNDEFINED ignore -#define CFI_SIGNAL_FRAME ignore - -#endif - -#endif diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h deleted file mode 100644 index cf67dbb1db79..000000000000 --- a/include/asm-i386/e820.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * structures and definitions for the int 15, ax=e820 memory map - * scheme. - * - * In a nutshell, arch/i386/boot/setup.S populates a scratch table - * in the empty_zero_block that contains a list of usable address/size - * duples. In arch/i386/kernel/setup.c, this information is - * transferred into the e820map, and in arch/i386/mm/init.c, that - * new information is used to mark pages reserved or not. - * - */ -#ifndef __E820_HEADER -#define __E820_HEADER - -#define E820MAP 0x2d0 /* our map */ -#define E820MAX 128 /* number of entries in E820MAP */ -#define E820NR 0x1e8 /* # entries in E820MAP */ - -#define E820_RAM 1 -#define E820_RESERVED 2 -#define E820_ACPI 3 -#define E820_NVS 4 - -#define HIGH_MEMORY (1024*1024) - -#ifndef __ASSEMBLY__ - -struct e820entry { - u64 addr; /* start of memory segment */ - u64 size; /* size of memory segment */ - u32 type; /* type of memory segment */ -} __attribute__((packed)); - -struct e820map { - u32 nr_map; - struct e820entry map[E820MAX]; -}; - -extern struct e820map e820; - -extern int e820_all_mapped(unsigned long start, unsigned long end, - unsigned type); -extern int e820_any_mapped(u64 start, u64 end, unsigned type); -extern void find_max_pfn(void); -extern void register_bootmem_low_pages(unsigned long max_low_pfn); -extern void e820_register_memory(void); -extern void limit_regions(unsigned long long size); -extern void print_memory_map(char *who); - -#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION) -extern void e820_mark_nosave_regions(void); -#else -static inline void e820_mark_nosave_regions(void) -{ -} -#endif - -#endif/*!__ASSEMBLY__*/ - -#endif/*__E820_HEADER*/ diff --git a/include/asm-i386/edac.h b/include/asm-i386/edac.h deleted file mode 100644 index 3e7dd0ab68ce..000000000000 --- a/include/asm-i386/edac.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef ASM_EDAC_H -#define ASM_EDAC_H - -/* ECC atomic, DMA, SMP and interrupt safe scrub function */ - -static __inline__ void atomic_scrub(void *va, u32 size) -{ - unsigned long *virt_addr = va; - u32 i; - - for (i = 0; i < size / 4; i++, virt_addr++) - /* Very carefully read and write to memory atomically - * so we are interrupt, DMA and SMP safe. - */ - __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr)); -} - -#endif diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h deleted file mode 100644 index b32df3a332da..000000000000 --- a/include/asm-i386/elf.h +++ /dev/null @@ -1,163 +0,0 @@ -#ifndef __ASMi386_ELF_H -#define __ASMi386_ELF_H - -/* - * ELF register definitions.. - */ - -#include <asm/ptrace.h> -#include <asm/user.h> -#include <asm/auxvec.h> - -#define R_386_NONE 0 -#define R_386_32 1 -#define R_386_PC32 2 -#define R_386_GOT32 3 -#define R_386_PLT32 4 -#define R_386_COPY 5 -#define R_386_GLOB_DAT 6 -#define R_386_JMP_SLOT 7 -#define R_386_RELATIVE 8 -#define R_386_GOTOFF 9 -#define R_386_GOTPC 10 -#define R_386_NUM 11 - -typedef unsigned long elf_greg_t; - -#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t)) -typedef elf_greg_t elf_gregset_t[ELF_NGREG]; - -typedef struct user_i387_struct elf_fpregset_t; -typedef struct user_fxsr_struct elf_fpxregset_t; - -/* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) \ - (((x)->e_machine == EM_386) || ((x)->e_machine == EM_486)) - -/* - * These are used to set parameters in the core dumps. - */ -#define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2LSB -#define ELF_ARCH EM_386 - -#ifdef __KERNEL__ - -#include <asm/processor.h> -#include <asm/system.h> /* for savesegment */ -#include <asm/desc.h> - -/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx - contains a pointer to a function which might be registered using `atexit'. - This provides a mean for the dynamic linker to call DT_FINI functions for - shared libraries that have been loaded before the code runs. - - A value of 0 tells we have no such handler. - - We might as well make sure everything else is cleared too (except for %esp), - just to make things more deterministic. - */ -#define ELF_PLAT_INIT(_r, load_addr) do { \ - _r->ebx = 0; _r->ecx = 0; _r->edx = 0; \ - _r->esi = 0; _r->edi = 0; _r->ebp = 0; \ - _r->eax = 0; \ -} while (0) - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) - -/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is - now struct_user_regs, they are different) */ - -#define ELF_CORE_COPY_REGS(pr_reg, regs) \ - pr_reg[0] = regs->ebx; \ - pr_reg[1] = regs->ecx; \ - pr_reg[2] = regs->edx; \ - pr_reg[3] = regs->esi; \ - pr_reg[4] = regs->edi; \ - pr_reg[5] = regs->ebp; \ - pr_reg[6] = regs->eax; \ - pr_reg[7] = regs->xds & 0xffff; \ - pr_reg[8] = regs->xes & 0xffff; \ - pr_reg[9] = regs->xfs & 0xffff; \ - savesegment(gs,pr_reg[10]); \ - pr_reg[11] = regs->orig_eax; \ - pr_reg[12] = regs->eip; \ - pr_reg[13] = regs->xcs & 0xffff; \ - pr_reg[14] = regs->eflags; \ - pr_reg[15] = regs->esp; \ - pr_reg[16] = regs->xss & 0xffff; - -/* This yields a mask that user programs can use to figure out what - instruction set this CPU supports. This could be done in user space, - but it's not easy, and we've already done it here. */ - -#define ELF_HWCAP (boot_cpu_data.x86_capability[0]) - -/* This yields a string that ld.so will use to load implementation - specific libraries for optimization. This is more specific in - intent than poking at uname or /proc/cpuinfo. - - For the moment, we have only optimizations for the Intel generations, - but that could change... */ - -#define ELF_PLATFORM (utsname()->machine) - -#define SET_PERSONALITY(ex, ibcs2) do { } while (0) - -/* - * An executable for which elf_read_implies_exec() returns TRUE will - * have the READ_IMPLIES_EXEC personality flag set automatically. - */ -#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) - -struct task_struct; - -extern int dump_task_regs (struct task_struct *, elf_gregset_t *); -extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); -extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct *); - -#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) -#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) -#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs) - -#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) -#define VDSO_PRELINK 0 - -#define VDSO_SYM(x) \ - (VDSO_CURRENT_BASE + (unsigned long)(x) - VDSO_PRELINK) - -#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE) -#define VDSO_EHDR ((const struct elfhdr *) VDSO_CURRENT_BASE) - -extern void __kernel_vsyscall; - -#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall) - -struct linux_binprm; - -#define ARCH_HAS_SETUP_ADDITIONAL_PAGES -extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack); - -extern unsigned int vdso_enabled; - -#define ARCH_DLINFO \ -do if (vdso_enabled) { \ - NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \ - NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \ -} while (0) - -#endif - -#endif diff --git a/include/asm-i386/emergency-restart.h b/include/asm-i386/emergency-restart.h deleted file mode 100644 index 680c39563345..000000000000 --- a/include/asm-i386/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -extern void machine_emergency_restart(void); - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/include/asm-i386/errno.h b/include/asm-i386/errno.h deleted file mode 100644 index 969b34374728..000000000000 --- a/include/asm-i386/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _I386_ERRNO_H -#define _I386_ERRNO_H - -#include <asm-generic/errno.h> - -#endif diff --git a/include/asm-i386/fb.h b/include/asm-i386/fb.h deleted file mode 100644 index d1c6297d4a61..000000000000 --- a/include/asm-i386/fb.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ - -#include <linux/fb.h> -#include <linux/fs.h> -#include <asm/page.h> - -extern int fb_is_primary_device(struct fb_info *info); - -static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, - unsigned long off) -{ - if (boot_cpu_data.x86 > 3) - pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; -} - -#endif /* _ASM_FB_H_ */ diff --git a/include/asm-i386/fcntl.h b/include/asm-i386/fcntl.h deleted file mode 100644 index 46ab12db5739..000000000000 --- a/include/asm-i386/fcntl.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/fcntl.h> diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h deleted file mode 100644 index 249e753ac805..000000000000 --- a/include/asm-i386/fixmap.h +++ /dev/null @@ -1,157 +0,0 @@ -/* - * fixmap.h: compile-time virtual memory allocation - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1998 Ingo Molnar - * - * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 - */ - -#ifndef _ASM_FIXMAP_H -#define _ASM_FIXMAP_H - - -/* used by vmalloc.c, vsyscall.lds.S. - * - * Leave one empty page between vmalloc'ed areas and - * the start of the fixmap. - */ -extern unsigned long __FIXADDR_TOP; -#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO) -#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1) - -#ifndef __ASSEMBLY__ -#include <linux/kernel.h> -#include <asm/acpi.h> -#include <asm/apicdef.h> -#include <asm/page.h> -#ifdef CONFIG_HIGHMEM -#include <linux/threads.h> -#include <asm/kmap_types.h> -#endif - -/* - * Here we define all the compile-time 'special' virtual - * addresses. The point is to have a constant address at - * compile time, but to set the physical address only - * in the boot process. We allocate these special addresses - * from the end of virtual memory (0xfffff000) backwards. - * Also this lets us do fail-safe vmalloc(), we - * can guarantee that these special addresses and - * vmalloc()-ed addresses never overlap. - * - * these 'compile-time allocated' memory buffers are - * fixed-size 4k pages. (or larger if used with an increment - * highger than 1) use fixmap_set(idx,phys) to associate - * physical memory with fixmap indices. - * - * TLB entries of such buffers will not be flushed across - * task switches. - */ -enum fixed_addresses { - FIX_HOLE, - FIX_VDSO, - FIX_DBGP_BASE, - FIX_EARLYCON_MEM_BASE, -#ifdef CONFIG_X86_LOCAL_APIC - FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ -#endif -#ifdef CONFIG_X86_IO_APIC - FIX_IO_APIC_BASE_0, - FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1, -#endif -#ifdef CONFIG_X86_VISWS_APIC - FIX_CO_CPU, /* Cobalt timer */ - FIX_CO_APIC, /* Cobalt APIC Redirection Table */ - FIX_LI_PCIA, /* Lithium PCI Bridge A */ - FIX_LI_PCIB, /* Lithium PCI Bridge B */ -#endif -#ifdef CONFIG_X86_F00F_BUG - FIX_F00F_IDT, /* Virtual mapping for IDT */ -#endif -#ifdef CONFIG_X86_CYCLONE_TIMER - FIX_CYCLONE_TIMER, /*cyclone timer register*/ -#endif -#ifdef CONFIG_HIGHMEM - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, -#endif -#ifdef CONFIG_ACPI - FIX_ACPI_BEGIN, - FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, -#endif -#ifdef CONFIG_PCI_MMCONFIG - FIX_PCIE_MCFG, -#endif -#ifdef CONFIG_PARAVIRT - FIX_PARAVIRT_BOOTMAP, -#endif - __end_of_permanent_fixed_addresses, - /* temporary boot-time mappings, used before ioremap() is functional */ -#define NR_FIX_BTMAPS 16 - FIX_BTMAP_END = __end_of_permanent_fixed_addresses, - FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, - FIX_WP_TEST, - __end_of_fixed_addresses -}; - -extern void __set_fixmap (enum fixed_addresses idx, - unsigned long phys, pgprot_t flags); -extern void reserve_top_address(unsigned long reserve); - -#define set_fixmap(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL) -/* - * Some hardware wants to get fixmapped without caching. - */ -#define set_fixmap_nocache(idx, phys) \ - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) - -#define clear_fixmap(idx) \ - __set_fixmap(idx, 0, __pgprot(0)) - -#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) - -#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) -#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE) -#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE) - -#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) -#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) - -extern void __this_fixmap_does_not_exist(void); - -/* - * 'index to address' translation. If anyone tries to use the idx - * directly without tranlation, we catch the bug with a NULL-deference - * kernel oops. Illegal ranges of incoming indices are caught too. - */ -static __always_inline unsigned long fix_to_virt(const unsigned int idx) -{ - /* - * this branch gets completely eliminated after inlining, - * except when someone tries to use fixaddr indices in an - * illegal way. (such as mixing up address types or using - * out-of-range indices). - * - * If it doesn't get removed, the linker will complain - * loudly with a reasonably clear error message.. - */ - if (idx >= __end_of_fixed_addresses) - __this_fixmap_does_not_exist(); - - return __fix_to_virt(idx); -} - -static inline unsigned long virt_to_fix(const unsigned long vaddr) -{ - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); - return __virt_to_fix(vaddr); -} - -#endif /* !__ASSEMBLY__ */ -#endif diff --git a/include/asm-i386/floppy.h b/include/asm-i386/floppy.h deleted file mode 100644 index 44ef2f55a8e9..000000000000 --- a/include/asm-i386/floppy.h +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Architecture specific parts of the Floppy driver - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1995 - */ -#ifndef __ASM_I386_FLOPPY_H -#define __ASM_I386_FLOPPY_H - -#include <linux/vmalloc.h> - - -/* - * The DMA channel used by the floppy controller cannot access data at - * addresses >= 16MB - * - * Went back to the 1MB limit, as some people had problems with the floppy - * driver otherwise. It doesn't matter much for performance anyway, as most - * floppy accesses go through the track buffer. - */ -#define _CROSS_64KB(a,s,vdma) \ -(!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) - -#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) - - -#define SW fd_routine[use_virtual_dma&1] -#define CSW fd_routine[can_use_virtual_dma & 1] - - -#define fd_inb(port) inb_p(port) -#define fd_outb(value,port) outb_p(value,port) - -#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy") -#define fd_free_dma() CSW._free_dma(FLOPPY_DMA) -#define fd_enable_irq() enable_irq(FLOPPY_IRQ) -#define fd_disable_irq() disable_irq(FLOPPY_IRQ) -#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) -#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA) -#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size) -#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io) - -#define FLOPPY_CAN_FALLBACK_ON_NODMA - -static int virtual_dma_count; -static int virtual_dma_residue; -static char *virtual_dma_addr; -static int virtual_dma_mode; -static int doing_pdma; - -static irqreturn_t floppy_hardint(int irq, void *dev_id) -{ - register unsigned char st; - -#undef TRACE_FLPY_INT - -#ifdef TRACE_FLPY_INT - static int calls=0; - static int bytes=0; - static int dma_wait=0; -#endif - if (!doing_pdma) - return floppy_interrupt(irq, dev_id); - -#ifdef TRACE_FLPY_INT - if(!calls) - bytes = virtual_dma_count; -#endif - - { - register int lcount; - register char *lptr; - - st = 1; - for(lcount=virtual_dma_count, lptr=virtual_dma_addr; - lcount; lcount--, lptr++) { - st=inb(virtual_dma_port+4) & 0xa0 ; - if(st != 0xa0) - break; - if(virtual_dma_mode) - outb_p(*lptr, virtual_dma_port+5); - else - *lptr = inb_p(virtual_dma_port+5); - } - virtual_dma_count = lcount; - virtual_dma_addr = lptr; - st = inb(virtual_dma_port+4); - } - -#ifdef TRACE_FLPY_INT - calls++; -#endif - if(st == 0x20) - return IRQ_HANDLED; - if(!(st & 0x20)) { - virtual_dma_residue += virtual_dma_count; - virtual_dma_count=0; -#ifdef TRACE_FLPY_INT - printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n", - virtual_dma_count, virtual_dma_residue, calls, bytes, - dma_wait); - calls = 0; - dma_wait=0; -#endif - doing_pdma = 0; - floppy_interrupt(irq, dev_id); - return IRQ_HANDLED; - } -#ifdef TRACE_FLPY_INT - if(!virtual_dma_count) - dma_wait++; -#endif - return IRQ_HANDLED; -} - -static void fd_disable_dma(void) -{ - if(! (can_use_virtual_dma & 1)) - disable_dma(FLOPPY_DMA); - doing_pdma = 0; - virtual_dma_residue += virtual_dma_count; - virtual_dma_count=0; -} - -static int vdma_request_dma(unsigned int dmanr, const char * device_id) -{ - return 0; -} - -static void vdma_nop(unsigned int dummy) -{ -} - - -static int vdma_get_dma_residue(unsigned int dummy) -{ - return virtual_dma_count + virtual_dma_residue; -} - - -static int fd_request_irq(void) -{ - if(can_use_virtual_dma) - return request_irq(FLOPPY_IRQ, floppy_hardint, - IRQF_DISABLED, "floppy", NULL); - else - return request_irq(FLOPPY_IRQ, floppy_interrupt, - IRQF_DISABLED, "floppy", NULL); - -} - -static unsigned long dma_mem_alloc(unsigned long size) -{ - return __get_dma_pages(GFP_KERNEL,get_order(size)); -} - - -static unsigned long vdma_mem_alloc(unsigned long size) -{ - return (unsigned long) vmalloc(size); - -} - -#define nodma_mem_alloc(size) vdma_mem_alloc(size) - -static void _fd_dma_mem_free(unsigned long addr, unsigned long size) -{ - if((unsigned int) addr >= (unsigned int) high_memory) - vfree((void *)addr); - else - free_pages(addr, get_order(size)); -} - -#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size) - -static void _fd_chose_dma_mode(char *addr, unsigned long size) -{ - if(can_use_virtual_dma == 2) { - if((unsigned int) addr >= (unsigned int) high_memory || - isa_virt_to_bus(addr) >= 0x1000000 || - _CROSS_64KB(addr, size, 0)) - use_virtual_dma = 1; - else - use_virtual_dma = 0; - } else { - use_virtual_dma = can_use_virtual_dma & 1; - } -} - -#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size) - - -static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io) -{ - doing_pdma = 1; - virtual_dma_port = io; - virtual_dma_mode = (mode == DMA_MODE_WRITE); - virtual_dma_addr = addr; - virtual_dma_count = size; - virtual_dma_residue = 0; - return 0; -} - -static int hard_dma_setup(char *addr, unsigned long size, int mode, int io) -{ -#ifdef FLOPPY_SANITY_CHECK - if (CROSS_64KB(addr, size)) { - printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size); - return -1; - } -#endif - /* actual, physical DMA */ - doing_pdma = 0; - clear_dma_ff(FLOPPY_DMA); - set_dma_mode(FLOPPY_DMA,mode); - set_dma_addr(FLOPPY_DMA,isa_virt_to_bus(addr)); - set_dma_count(FLOPPY_DMA,size); - enable_dma(FLOPPY_DMA); - return 0; -} - -static struct fd_routine_l { - int (*_request_dma)(unsigned int dmanr, const char * device_id); - void (*_free_dma)(unsigned int dmanr); - int (*_get_dma_residue)(unsigned int dummy); - unsigned long (*_dma_mem_alloc) (unsigned long size); - int (*_dma_setup)(char *addr, unsigned long size, int mode, int io); -} fd_routine[] = { - { - request_dma, - free_dma, - get_dma_residue, - dma_mem_alloc, - hard_dma_setup - }, - { - vdma_request_dma, - vdma_nop, - vdma_get_dma_residue, - vdma_mem_alloc, - vdma_dma_setup - } -}; - - -static int FDC1 = 0x3f0; -static int FDC2 = -1; - -/* - * Floppy types are stored in the rtc's CMOS RAM and so rtc_lock - * is needed to prevent corrupted CMOS RAM in case "insmod floppy" - * coincides with another rtc CMOS user. Paul G. - */ -#define FLOPPY0_TYPE ({ \ - unsigned long flags; \ - unsigned char val; \ - spin_lock_irqsave(&rtc_lock, flags); \ - val = (CMOS_READ(0x10) >> 4) & 15; \ - spin_unlock_irqrestore(&rtc_lock, flags); \ - val; \ -}) - -#define FLOPPY1_TYPE ({ \ - unsigned long flags; \ - unsigned char val; \ - spin_lock_irqsave(&rtc_lock, flags); \ - val = CMOS_READ(0x10) & 15; \ - spin_unlock_irqrestore(&rtc_lock, flags); \ - val; \ -}) - -#define N_FDC 2 -#define N_DRIVE 8 - -#define FLOPPY_MOTOR_MASK 0xf0 - -#define AUTO_DMA - -#define EXTRA_FLOPPY_PARAMS - -#endif /* __ASM_I386_FLOPPY_H */ diff --git a/include/asm-i386/frame.i b/include/asm-i386/frame.i deleted file mode 100644 index 03620251ae17..000000000000 --- a/include/asm-i386/frame.i +++ /dev/null @@ -1,23 +0,0 @@ -#include <asm/dwarf2.h> - -/* The annotation hides the frame from the unwinder and makes it look - like a ordinary ebp save/restore. This avoids some special cases for - frame pointer later */ -#ifdef CONFIG_FRAME_POINTER - .macro FRAME - pushl %ebp - CFI_ADJUST_CFA_OFFSET 4 - CFI_REL_OFFSET ebp,0 - movl %esp,%ebp - .endm - .macro ENDFRAME - popl %ebp - CFI_ADJUST_CFA_OFFSET -4 - CFI_RESTORE ebp - .endm -#else - .macro FRAME - .endm - .macro ENDFRAME - .endm -#endif diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h deleted file mode 100644 index 438ef0ec7101..000000000000 --- a/include/asm-i386/futex.h +++ /dev/null @@ -1,135 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#ifdef __KERNEL__ - -#include <linux/futex.h> -#include <asm/errno.h> -#include <asm/system.h> -#include <asm/processor.h> -#include <asm/uaccess.h> - -#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ - __asm__ __volatile ( \ -"1: " insn "\n" \ -"2: .section .fixup,\"ax\"\n\ -3: mov %3, %1\n\ - jmp 2b\n\ - .previous\n\ - .section __ex_table,\"a\"\n\ - .align 8\n\ - .long 1b,3b\n\ - .previous" \ - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ - : "i" (-EFAULT), "0" (oparg), "1" (0)) - -#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ - __asm__ __volatile ( \ -"1: movl %2, %0\n\ - movl %0, %3\n" \ - insn "\n" \ -"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\ - jnz 1b\n\ -3: .section .fixup,\"ax\"\n\ -4: mov %5, %1\n\ - jmp 3b\n\ - .previous\n\ - .section __ex_table,\"a\"\n\ - .align 8\n\ - .long 1b,4b,2b,4b\n\ - .previous" \ - : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ - "=&r" (tem) \ - : "r" (oparg), "i" (-EFAULT), "1" (0)) - -static inline int -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) -{ - int op = (encoded_op >> 28) & 7; - int cmp = (encoded_op >> 24) & 15; - int oparg = (encoded_op << 8) >> 20; - int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, ret, tem; - if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) - oparg = 1 << oparg; - - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; - - pagefault_disable(); - - if (op == FUTEX_OP_SET) - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); - else { -#ifndef CONFIG_X86_BSWAP - if (boot_cpu_data.x86 == 3) - ret = -ENOSYS; - else -#endif - switch (op) { - case FUTEX_OP_ADD: - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, - oldval, uaddr, oparg); - break; - case FUTEX_OP_OR: - __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, - oparg); - break; - case FUTEX_OP_ANDN: - __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, - ~oparg); - break; - case FUTEX_OP_XOR: - __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, - oparg); - break; - default: - ret = -ENOSYS; - } - } - - pagefault_enable(); - - if (!ret) { - switch (cmp) { - case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; - case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; - case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; - case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; - case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; - case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; - default: ret = -ENOSYS; - } - } - return ret; -} - -static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) -{ - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) - return -EFAULT; - - __asm__ __volatile__( - "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" - - "2: .section .fixup, \"ax\" \n" - "3: mov %2, %0 \n" - " jmp 2b \n" - " .previous \n" - - " .section __ex_table, \"a\" \n" - " .align 8 \n" - " .long 1b,3b \n" - " .previous \n" - - : "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "0" (oldval) - : "memory" - ); - - return oldval; -} - -#endif -#endif diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h deleted file mode 100644 index 33e3ffe1766c..000000000000 --- a/include/asm-i386/genapic.h +++ /dev/null @@ -1,127 +0,0 @@ -#ifndef _ASM_GENAPIC_H -#define _ASM_GENAPIC_H 1 - -#include <asm/mpspec.h> - -/* - * Generic APIC driver interface. - * - * An straight forward mapping of the APIC related parts of the - * x86 subarchitecture interface to a dynamic object. - * - * This is used by the "generic" x86 subarchitecture. - * - * Copyright 2003 Andi Kleen, SuSE Labs. - */ - -struct mpc_config_translation; -struct mpc_config_bus; -struct mp_config_table; -struct mpc_config_processor; - -struct genapic { - char *name; - int (*probe)(void); - - int (*apic_id_registered)(void); - cpumask_t (*target_cpus)(void); - int int_delivery_mode; - int int_dest_mode; - int ESR_DISABLE; - int apic_destination_logical; - unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); - unsigned long (*check_apicid_present)(int apicid); - int no_balance_irq; - int no_ioapic_check; - void (*init_apic_ldr)(void); - physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); - - void (*setup_apic_routing)(void); - int (*multi_timer_check)(int apic, int irq); - int (*apicid_to_node)(int logical_apicid); - int (*cpu_to_logical_apicid)(int cpu); - int (*cpu_present_to_apicid)(int mps_cpu); - physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); - int (*mpc_apic_id)(struct mpc_config_processor *m, - struct mpc_config_translation *t); - void (*setup_portio_remap)(void); - int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); - void (*enable_apic_mode)(void); - u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); - - /* mpparse */ - void (*mpc_oem_bus_info)(struct mpc_config_bus *, char *, - struct mpc_config_translation *); - void (*mpc_oem_pci_bus)(struct mpc_config_bus *, - struct mpc_config_translation *); - - /* When one of the next two hooks returns 1 the genapic - is switched to this. Essentially they are additional probe - functions. */ - int (*mps_oem_check)(struct mp_config_table *mpc, char *oem, - char *productid); - int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); - - unsigned (*get_apic_id)(unsigned long x); - unsigned long apic_id_mask; - unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); - -#ifdef CONFIG_SMP - /* ipi */ - void (*send_IPI_mask)(cpumask_t mask, int vector); - void (*send_IPI_allbutself)(int vector); - void (*send_IPI_all)(int vector); -#endif -}; - -#define APICFUNC(x) .x = x, - -/* More functions could be probably marked IPIFUNC and save some space - in UP GENERICARCH kernels, but I don't have the nerve right now - to untangle this mess. -AK */ -#ifdef CONFIG_SMP -#define IPIFUNC(x) APICFUNC(x) -#else -#define IPIFUNC(x) -#endif - -#define APIC_INIT(aname, aprobe) { \ - .name = aname, \ - .probe = aprobe, \ - .int_delivery_mode = INT_DELIVERY_MODE, \ - .int_dest_mode = INT_DEST_MODE, \ - .no_balance_irq = NO_BALANCE_IRQ, \ - .ESR_DISABLE = esr_disable, \ - .apic_destination_logical = APIC_DEST_LOGICAL, \ - APICFUNC(apic_id_registered) \ - APICFUNC(target_cpus) \ - APICFUNC(check_apicid_used) \ - APICFUNC(check_apicid_present) \ - APICFUNC(init_apic_ldr) \ - APICFUNC(ioapic_phys_id_map) \ - APICFUNC(setup_apic_routing) \ - APICFUNC(multi_timer_check) \ - APICFUNC(apicid_to_node) \ - APICFUNC(cpu_to_logical_apicid) \ - APICFUNC(cpu_present_to_apicid) \ - APICFUNC(apicid_to_cpu_present) \ - APICFUNC(mpc_apic_id) \ - APICFUNC(setup_portio_remap) \ - APICFUNC(check_phys_apicid_present) \ - APICFUNC(mpc_oem_bus_info) \ - APICFUNC(mpc_oem_pci_bus) \ - APICFUNC(mps_oem_check) \ - APICFUNC(get_apic_id) \ - .apic_id_mask = APIC_ID_MASK, \ - APICFUNC(cpu_mask_to_apicid) \ - APICFUNC(acpi_madt_oem_check) \ - IPIFUNC(send_IPI_mask) \ - IPIFUNC(send_IPI_allbutself) \ - IPIFUNC(send_IPI_all) \ - APICFUNC(enable_apic_mode) \ - APICFUNC(phys_pkg_id) \ - } - -extern struct genapic *genapic; - -#endif diff --git a/include/asm-i386/geode.h b/include/asm-i386/geode.h deleted file mode 100644 index 6da4bbbea3dc..000000000000 --- a/include/asm-i386/geode.h +++ /dev/null @@ -1,159 +0,0 @@ -/* - * AMD Geode definitions - * Copyright (C) 2006, Advanced Micro Devices, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public License - * as published by the Free Software Foundation. - */ - -#ifndef _ASM_GEODE_H_ -#define _ASM_GEODE_H_ - -#include <asm/processor.h> -#include <linux/io.h> - -/* Generic southbridge functions */ - -#define GEODE_DEV_PMS 0 -#define GEODE_DEV_ACPI 1 -#define GEODE_DEV_GPIO 2 -#define GEODE_DEV_MFGPT 3 - -extern int geode_get_dev_base(unsigned int dev); - -/* Useful macros */ -#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS) -#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI) -#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO) -#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT) - -/* MSRS */ - -#define GX_GLCP_SYS_RSTPLL 0x4C000014 - -#define MSR_LBAR_SMB 0x5140000B -#define MSR_LBAR_GPIO 0x5140000C -#define MSR_LBAR_MFGPT 0x5140000D -#define MSR_LBAR_ACPI 0x5140000E -#define MSR_LBAR_PMS 0x5140000F - -#define MSR_PIC_YSEL_LOW 0x51400020 -#define MSR_PIC_YSEL_HIGH 0x51400021 -#define MSR_PIC_ZSEL_LOW 0x51400022 -#define MSR_PIC_ZSEL_HIGH 0x51400023 - -#define MFGPT_IRQ_MSR 0x51400028 -#define MFGPT_NR_MSR 0x51400029 - -/* Resource Sizes */ - -#define LBAR_GPIO_SIZE 0xFF -#define LBAR_MFGPT_SIZE 0x40 -#define LBAR_ACPI_SIZE 0x40 -#define LBAR_PMS_SIZE 0x80 - -/* ACPI registers (PMS block) */ - -/* - * PM1_EN is only valid when VSA is enabled for 16 bit reads. - * When VSA is not enabled, *always* read both PM1_STS and PM1_EN - * with a 32 bit read at offset 0x0 - */ - -#define PM1_STS 0x00 -#define PM1_EN 0x02 -#define PM1_CNT 0x08 -#define PM2_CNT 0x0C -#define PM_TMR 0x10 -#define PM_GPE0_STS 0x18 -#define PM_GPE0_EN 0x1C - -/* PMC registers (PMS block) */ - -#define PM_SSD 0x00 -#define PM_SCXA 0x04 -#define PM_SCYA 0x08 -#define PM_OUT_SLPCTL 0x0C -#define PM_SCLK 0x10 -#define PM_SED 0x1 -#define PM_SCXD 0x18 -#define PM_SCYD 0x1C -#define PM_IN_SLPCTL 0x20 -#define PM_WKD 0x30 -#define PM_WKXD 0x34 -#define PM_RD 0x38 -#define PM_WKXA 0x3C -#define PM_FSD 0x40 -#define PM_TSD 0x44 -#define PM_PSD 0x48 -#define PM_NWKD 0x4C -#define PM_AWKD 0x50 -#define PM_SSC 0x54 - -/* GPIO */ - -#define GPIO_OUTPUT_VAL 0x00 -#define GPIO_OUTPUT_ENABLE 0x04 -#define GPIO_OUTPUT_OPEN_DRAIN 0x08 -#define GPIO_OUTPUT_INVERT 0x0C -#define GPIO_OUTPUT_AUX1 0x10 -#define GPIO_OUTPUT_AUX2 0x14 -#define GPIO_PULL_UP 0x18 -#define GPIO_PULL_DOWN 0x1C -#define GPIO_INPUT_ENABLE 0x20 -#define GPIO_INPUT_INVERT 0x24 -#define GPIO_INPUT_FILTER 0x28 -#define GPIO_INPUT_EVENT_COUNT 0x2C -#define GPIO_READ_BACK 0x30 -#define GPIO_INPUT_AUX1 0x34 -#define GPIO_EVENTS_ENABLE 0x38 -#define GPIO_LOCK_ENABLE 0x3C -#define GPIO_POSITIVE_EDGE_EN 0x40 -#define GPIO_NEGATIVE_EDGE_EN 0x44 -#define GPIO_POSITIVE_EDGE_STS 0x48 -#define GPIO_NEGATIVE_EDGE_STS 0x4C - -#define GPIO_MAP_X 0xE0 -#define GPIO_MAP_Y 0xE4 -#define GPIO_MAP_Z 0xE8 -#define GPIO_MAP_W 0xEC - -extern void geode_gpio_set(unsigned int, unsigned int); -extern void geode_gpio_clear(unsigned int, unsigned int); -extern int geode_gpio_isset(unsigned int, unsigned int); -extern void geode_gpio_setup_event(unsigned int, int, int); -extern void geode_gpio_set_irq(unsigned int, unsigned int); - -static inline void geode_gpio_event_irq(unsigned int gpio, int pair) -{ - geode_gpio_setup_event(gpio, pair, 0); -} - -static inline void geode_gpio_event_pme(unsigned int gpio, int pair) -{ - geode_gpio_setup_event(gpio, pair, 1); -} - -/* Specific geode tests */ - -static inline int is_geode_gx(void) -{ - return ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC) && - (boot_cpu_data.x86 == 5) && - (boot_cpu_data.x86_model == 5)); -} - -static inline int is_geode_lx(void) -{ - return ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && - (boot_cpu_data.x86 == 5) && - (boot_cpu_data.x86_model == 10)); -} - -static inline int is_geode(void) -{ - return (is_geode_gx() || is_geode_lx()); -} - -#endif diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h deleted file mode 100644 index 0e358dc405f8..000000000000 --- a/include/asm-i386/hardirq.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __ASM_HARDIRQ_H -#define __ASM_HARDIRQ_H - -#include <linux/threads.h> -#include <linux/irq.h> - -typedef struct { - unsigned int __softirq_pending; - unsigned long idle_timestamp; - unsigned int __nmi_count; /* arch dependent */ - unsigned int apic_timer_irqs; /* arch dependent */ -} ____cacheline_aligned irq_cpustat_t; - -DECLARE_PER_CPU(irq_cpustat_t, irq_stat); -extern irq_cpustat_t irq_stat[]; - -#define __ARCH_IRQ_STAT -#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) - -void ack_bad_irq(unsigned int irq); -#include <linux/irq_cpustat.h> - -#endif /* __ASM_HARDIRQ_H */ diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h deleted file mode 100644 index 13cdcd66fff2..000000000000 --- a/include/asm-i386/highmem.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * highmem.h: virtual kernel memory mappings for high memory - * - * Used in CONFIG_HIGHMEM systems for memory pages which - * are not addressable by direct kernel virtual addresses. - * - * Copyright (C) 1999 Gerhard Wichert, Siemens AG - * Gerhard.Wichert@pdb.siemens.de - * - * - * Redesigned the x86 32-bit VM architecture to deal with - * up to 16 Terabyte physical memory. With current x86 CPUs - * we now support up to 64 Gigabytes physical RAM. - * - * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> - */ - -#ifndef _ASM_HIGHMEM_H -#define _ASM_HIGHMEM_H - -#ifdef __KERNEL__ - -#include <linux/interrupt.h> -#include <linux/threads.h> -#include <asm/kmap_types.h> -#include <asm/tlbflush.h> -#include <asm/paravirt.h> - -/* declarations for highmem.c */ -extern unsigned long highstart_pfn, highend_pfn; - -extern pte_t *kmap_pte; -extern pgprot_t kmap_prot; -extern pte_t *pkmap_page_table; - -/* - * Right now we initialize only a single pte table. It can be extended - * easily, subsequent pte tables have to be allocated in one physical - * chunk of RAM. - */ -#ifdef CONFIG_X86_PAE -#define LAST_PKMAP 512 -#else -#define LAST_PKMAP 1024 -#endif -/* - * Ordering is: - * - * FIXADDR_TOP - * fixed_addresses - * FIXADDR_START - * temp fixed addresses - * FIXADDR_BOOT_START - * Persistent kmap area - * PKMAP_BASE - * VMALLOC_END - * Vmalloc area - * VMALLOC_START - * high_memory - */ -#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK ) -#define LAST_PKMAP_MASK (LAST_PKMAP-1) -#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) -#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) - -extern void * FASTCALL(kmap_high(struct page *page)); -extern void FASTCALL(kunmap_high(struct page *page)); - -void *kmap(struct page *page); -void kunmap(struct page *page); -void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); -void *kmap_atomic(struct page *page, enum km_type type); -void kunmap_atomic(void *kvaddr, enum km_type type); -void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); -struct page *kmap_atomic_to_page(void *ptr); - -#ifndef CONFIG_PARAVIRT -#define kmap_atomic_pte(page, type) kmap_atomic(page, type) -#endif - -#define flush_cache_kmaps() do { } while (0) - -#endif /* __KERNEL__ */ - -#endif /* _ASM_HIGHMEM_H */ diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h deleted file mode 100644 index c82dc7ed96b3..000000000000 --- a/include/asm-i386/hpet.h +++ /dev/null @@ -1,90 +0,0 @@ - -#ifndef _I386_HPET_H -#define _I386_HPET_H - -#ifdef CONFIG_HPET_TIMER - -/* - * Documentation on HPET can be found at: - * http://www.intel.com/ial/home/sp/pcmmspec.htm - * ftp://download.intel.com/ial/home/sp/mmts098.pdf - */ - -#define HPET_MMAP_SIZE 1024 - -#define HPET_ID 0x000 -#define HPET_PERIOD 0x004 -#define HPET_CFG 0x010 -#define HPET_STATUS 0x020 -#define HPET_COUNTER 0x0f0 -#define HPET_T0_CFG 0x100 -#define HPET_T0_CMP 0x108 -#define HPET_T0_ROUTE 0x110 -#define HPET_T1_CFG 0x120 -#define HPET_T1_CMP 0x128 -#define HPET_T1_ROUTE 0x130 -#define HPET_T2_CFG 0x140 -#define HPET_T2_CMP 0x148 -#define HPET_T2_ROUTE 0x150 - -#define HPET_ID_REV 0x000000ff -#define HPET_ID_NUMBER 0x00001f00 -#define HPET_ID_64BIT 0x00002000 -#define HPET_ID_LEGSUP 0x00008000 -#define HPET_ID_VENDOR 0xffff0000 -#define HPET_ID_NUMBER_SHIFT 8 -#define HPET_ID_VENDOR_SHIFT 16 - -#define HPET_ID_VENDOR_8086 0x8086 - -#define HPET_CFG_ENABLE 0x001 -#define HPET_CFG_LEGACY 0x002 -#define HPET_LEGACY_8254 2 -#define HPET_LEGACY_RTC 8 - -#define HPET_TN_LEVEL 0x0002 -#define HPET_TN_ENABLE 0x0004 -#define HPET_TN_PERIODIC 0x0008 -#define HPET_TN_PERIODIC_CAP 0x0010 -#define HPET_TN_64BIT_CAP 0x0020 -#define HPET_TN_SETVAL 0x0040 -#define HPET_TN_32BIT 0x0100 -#define HPET_TN_ROUTE 0x3e00 -#define HPET_TN_FSB 0x4000 -#define HPET_TN_FSB_CAP 0x8000 -#define HPET_TN_ROUTE_SHIFT 9 - -/* Max HPET Period is 10^8 femto sec as in HPET spec */ -#define HPET_MAX_PERIOD 100000000UL -/* - * Min HPET period is 10^5 femto sec just for safety. If it is less than this, - * then 32 bit HPET counter wrapsaround in less than 0.5 sec. - */ -#define HPET_MIN_PERIOD 100000UL - -/* hpet memory map physical address */ -extern unsigned long hpet_address; -extern int is_hpet_enabled(void); -extern int hpet_enable(void); - -#ifdef CONFIG_HPET_EMULATE_RTC - -#include <linux/interrupt.h> - -extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask); -extern int hpet_set_rtc_irq_bit(unsigned long bit_mask); -extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, - unsigned char sec); -extern int hpet_set_periodic_freq(unsigned long freq); -extern int hpet_rtc_dropped_irq(void); -extern int hpet_rtc_timer_init(void); -extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id); - -#endif /* CONFIG_HPET_EMULATE_RTC */ - -#else - -static inline int hpet_enable(void) { return 0; } - -#endif /* CONFIG_HPET_TIMER */ -#endif /* _I386_HPET_H */ diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h deleted file mode 100644 index 0bedbdf5e907..000000000000 --- a/include/asm-i386/hw_irq.h +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef _ASM_HW_IRQ_H -#define _ASM_HW_IRQ_H - -/* - * linux/include/asm/hw_irq.h - * - * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar - * - * moved some of the old arch/i386/kernel/irq.h to here. VY - * - * IRQ/IPI changes taken from work by Thomas Radke - * <tomsoft@informatik.tu-chemnitz.de> - */ - -#include <linux/profile.h> -#include <asm/atomic.h> -#include <asm/irq.h> -#include <asm/sections.h> - -#define NMI_VECTOR 0x02 - -/* - * Various low-level irq details needed by irq.c, process.c, - * time.c, io_apic.c and smp.c - * - * Interrupt entry/exit code at both C and assembly level - */ - -extern void (*interrupt[NR_IRQS])(void); - -#ifdef CONFIG_SMP -fastcall void reschedule_interrupt(void); -fastcall void invalidate_interrupt(void); -fastcall void call_function_interrupt(void); -#endif - -#ifdef CONFIG_X86_LOCAL_APIC -fastcall void apic_timer_interrupt(void); -fastcall void error_interrupt(void); -fastcall void spurious_interrupt(void); -fastcall void thermal_interrupt(void); -#define platform_legacy_irq(irq) ((irq) < 16) -#endif - -void disable_8259A_irq(unsigned int irq); -void enable_8259A_irq(unsigned int irq); -int i8259A_irq_pending(unsigned int irq); -void make_8259A_irq(unsigned int irq); -void init_8259A(int aeoi); -void FASTCALL(send_IPI_self(int vector)); -void init_VISWS_APIC_irqs(void); -void setup_IO_APIC(void); -void disable_IO_APIC(void); -void print_IO_APIC(void); -int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); -void send_IPI(int dest, int vector); -void setup_ioapic_dest(void); - -extern unsigned long io_apic_irqs; - -extern atomic_t irq_err_count; -extern atomic_t irq_mis_count; - -#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) - -#endif /* _ASM_HW_IRQ_H */ diff --git a/include/asm-i386/hypertransport.h b/include/asm-i386/hypertransport.h deleted file mode 100644 index c16c6ff4bdd7..000000000000 --- a/include/asm-i386/hypertransport.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef ASM_HYPERTRANSPORT_H -#define ASM_HYPERTRANSPORT_H - -/* - * Constants for x86 Hypertransport Interrupts. - */ - -#define HT_IRQ_LOW_BASE 0xf8000000 - -#define HT_IRQ_LOW_VECTOR_SHIFT 16 -#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000 -#define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK) - -#define HT_IRQ_LOW_DEST_ID_SHIFT 8 -#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00 -#define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK) - -#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000 -#define HT_IRQ_LOW_DM_LOGICAL 0x0000040 - -#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000 -#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020 - - -#define HT_IRQ_LOW_MT_FIXED 0x0000000 -#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004 -#define HT_IRQ_LOW_MT_SMI 0x0000008 -#define HT_IRQ_LOW_MT_NMI 0x000000c -#define HT_IRQ_LOW_MT_INIT 0x0000010 -#define HT_IRQ_LOW_MT_STARTUP 0x0000014 -#define HT_IRQ_LOW_MT_EXTINT 0x0000018 -#define HT_IRQ_LOW_MT_LINT1 0x000008c -#define HT_IRQ_LOW_MT_LINT0 0x0000098 - -#define HT_IRQ_LOW_IRQ_MASKED 0x0000001 - - -#define HT_IRQ_HIGH_DEST_ID_SHIFT 0 -#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff -#define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK) - -#endif /* ASM_HYPERTRANSPORT_H */ diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h deleted file mode 100644 index cdd1e248e3b4..000000000000 --- a/include/asm-i386/i387.h +++ /dev/null @@ -1,151 +0,0 @@ -/* - * include/asm-i386/i387.h - * - * Copyright (C) 1994 Linus Torvalds - * - * Pentium III FXSR, SSE support - * General FPU state handling cleanups - * Gareth Hughes <gareth@valinux.com>, May 2000 - */ - -#ifndef __ASM_I386_I387_H -#define __ASM_I386_I387_H - -#include <linux/sched.h> -#include <linux/init.h> -#include <linux/kernel_stat.h> -#include <asm/processor.h> -#include <asm/sigcontext.h> -#include <asm/user.h> - -extern void mxcsr_feature_mask_init(void); -extern void init_fpu(struct task_struct *); - -/* - * FPU lazy state save handling... - */ - -/* - * The "nop" is needed to make the instructions the same - * length. - */ -#define restore_fpu(tsk) \ - alternative_input( \ - "nop ; frstor %1", \ - "fxrstor %1", \ - X86_FEATURE_FXSR, \ - "m" ((tsk)->thread.i387.fxsave)) - -extern void kernel_fpu_begin(void); -#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) - -/* We need a safe address that is cheap to find and that is already - in L1 during context switch. The best choices are unfortunately - different for UP and SMP */ -#ifdef CONFIG_SMP -#define safe_address (__per_cpu_offset[0]) -#else -#define safe_address (kstat_cpu(0).cpustat.user) -#endif - -/* - * These must be called with preempt disabled - */ -static inline void __save_init_fpu( struct task_struct *tsk ) -{ - /* Use more nops than strictly needed in case the compiler - varies code */ - alternative_input( - "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4, - "fxsave %[fx]\n" - "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:", - X86_FEATURE_FXSR, - [fx] "m" (tsk->thread.i387.fxsave), - [fsw] "m" (tsk->thread.i387.fxsave.swd) : "memory"); - /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception - is pending. Clear the x87 state here by setting it to fixed - values. safe_address is a random variable that should be in L1 */ - alternative_input( - GENERIC_NOP8 GENERIC_NOP2, - "emms\n\t" /* clear stack tags */ - "fildl %[addr]", /* set F?P to defined value */ - X86_FEATURE_FXSAVE_LEAK, - [addr] "m" (safe_address)); - task_thread_info(tsk)->status &= ~TS_USEDFPU; -} - -#define __unlazy_fpu( tsk ) do { \ - if (task_thread_info(tsk)->status & TS_USEDFPU) { \ - __save_init_fpu(tsk); \ - stts(); \ - } else \ - tsk->fpu_counter = 0; \ -} while (0) - -#define __clear_fpu( tsk ) \ -do { \ - if (task_thread_info(tsk)->status & TS_USEDFPU) { \ - asm volatile("fnclex ; fwait"); \ - task_thread_info(tsk)->status &= ~TS_USEDFPU; \ - stts(); \ - } \ -} while (0) - - -/* - * These disable preemption on their own and are safe - */ -static inline void save_init_fpu( struct task_struct *tsk ) -{ - preempt_disable(); - __save_init_fpu(tsk); - stts(); - preempt_enable(); -} - -#define unlazy_fpu( tsk ) do { \ - preempt_disable(); \ - __unlazy_fpu(tsk); \ - preempt_enable(); \ -} while (0) - -#define clear_fpu( tsk ) do { \ - preempt_disable(); \ - __clear_fpu( tsk ); \ - preempt_enable(); \ -} while (0) - -/* - * FPU state interaction... - */ -extern unsigned short get_fpu_cwd( struct task_struct *tsk ); -extern unsigned short get_fpu_swd( struct task_struct *tsk ); -extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); -extern asmlinkage void math_state_restore(void); - -/* - * Signal frame handlers... - */ -extern int save_i387( struct _fpstate __user *buf ); -extern int restore_i387( struct _fpstate __user *buf ); - -/* - * ptrace request handers... - */ -extern int get_fpregs( struct user_i387_struct __user *buf, - struct task_struct *tsk ); -extern int set_fpregs( struct task_struct *tsk, - struct user_i387_struct __user *buf ); - -extern int get_fpxregs( struct user_fxsr_struct __user *buf, - struct task_struct *tsk ); -extern int set_fpxregs( struct task_struct *tsk, - struct user_fxsr_struct __user *buf ); - -/* - * FPU state for core dumps... - */ -extern int dump_fpu( struct pt_regs *regs, - struct user_i387_struct *fpu ); - -#endif /* __ASM_I386_I387_H */ diff --git a/include/asm-i386/i8253.h b/include/asm-i386/i8253.h deleted file mode 100644 index 7577d058d86e..000000000000 --- a/include/asm-i386/i8253.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef __ASM_I8253_H__ -#define __ASM_I8253_H__ - -#include <linux/clockchips.h> - -/* i8253A PIT registers */ -#define PIT_MODE 0x43 -#define PIT_CH0 0x40 -#define PIT_CH2 0x42 - -extern spinlock_t i8253_lock; - -extern struct clock_event_device *global_clock_event; - -extern void setup_pit_timer(void); - -#endif /* __ASM_I8253_H__ */ diff --git a/include/asm-i386/i8259.h b/include/asm-i386/i8259.h deleted file mode 100644 index 29d8f9a6b3fc..000000000000 --- a/include/asm-i386/i8259.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef __ASM_I8259_H__ -#define __ASM_I8259_H__ - -extern unsigned int cached_irq_mask; - -#define __byte(x,y) (((unsigned char *) &(y))[x]) -#define cached_master_mask (__byte(0, cached_irq_mask)) -#define cached_slave_mask (__byte(1, cached_irq_mask)) - -extern spinlock_t i8259A_lock; - -extern void init_8259A(int auto_eoi); -extern void enable_8259A_irq(unsigned int irq); -extern void disable_8259A_irq(unsigned int irq); -extern unsigned int startup_8259A_irq(unsigned int irq); - -#endif /* __ASM_I8259_H__ */ diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h deleted file mode 100644 index e7817a3d6578..000000000000 --- a/include/asm-i386/ide.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * linux/include/asm-i386/ide.h - * - * Copyright (C) 1994-1996 Linus Torvalds & authors - */ - -/* - * This file contains the i386 architecture specific IDE code. - */ - -#ifndef __ASMi386_IDE_H -#define __ASMi386_IDE_H - -#ifdef __KERNEL__ - - -#ifndef MAX_HWIFS -# ifdef CONFIG_BLK_DEV_IDEPCI -#define MAX_HWIFS 10 -# else -#define MAX_HWIFS 6 -# endif -#endif - -#define IDE_ARCH_OBSOLETE_DEFAULTS - -static __inline__ int ide_default_irq(unsigned long base) -{ - switch (base) { - case 0x1f0: return 14; - case 0x170: return 15; - case 0x1e8: return 11; - case 0x168: return 10; - case 0x1e0: return 8; - case 0x160: return 12; - default: - return 0; - } -} - -static __inline__ unsigned long ide_default_io_base(int index) -{ - /* - * If PCI is present then it is not safe to poke around - * the other legacy IDE ports. Only 0x1f0 and 0x170 are - * defined compatibility mode ports for PCI. A user can - * override this using ide= but we must default safe. - */ - if (no_pci_devices()) { - switch(index) { - case 2: return 0x1e8; - case 3: return 0x168; - case 4: return 0x1e0; - case 5: return 0x160; - } - } - switch (index) { - case 0: return 0x1f0; - case 1: return 0x170; - default: - return 0; - } -} - -#define IDE_ARCH_OBSOLETE_INIT -#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ - -#ifdef CONFIG_BLK_DEV_IDEPCI -#define ide_init_default_irq(base) (0) -#else -#define ide_init_default_irq(base) ide_default_irq(base) -#endif - -#include <asm-generic/ide_iops.h> - -#endif /* __KERNEL__ */ - -#endif /* __ASMi386_IDE_H */ diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h deleted file mode 100644 index b52cd60a075b..000000000000 --- a/include/asm-i386/intel_arch_perfmon.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef X86_INTEL_ARCH_PERFMON_H -#define X86_INTEL_ARCH_PERFMON_H 1 - -#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 -#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 - -#define MSR_ARCH_PERFMON_EVENTSEL0 0x186 -#define MSR_ARCH_PERFMON_EVENTSEL1 0x187 - -#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) -#define ARCH_PERFMON_EVENTSEL_INT (1 << 20) -#define ARCH_PERFMON_EVENTSEL_OS (1 << 17) -#define ARCH_PERFMON_EVENTSEL_USR (1 << 16) - -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0) -#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ - (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) - -union cpuid10_eax { - struct { - unsigned int version_id:8; - unsigned int num_counters:8; - unsigned int bit_width:8; - unsigned int mask_length:8; - } split; - unsigned int full; -}; - -#endif /* X86_INTEL_ARCH_PERFMON_H */ diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h deleted file mode 100644 index e8e0bd641120..000000000000 --- a/include/asm-i386/io.h +++ /dev/null @@ -1,349 +0,0 @@ -#ifndef _ASM_IO_H -#define _ASM_IO_H - -#include <linux/string.h> -#include <linux/compiler.h> - -/* - * This file contains the definitions for the x86 IO instructions - * inb/inw/inl/outb/outw/outl and the "string versions" of the same - * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" - * versions of the single-IO instructions (inb_p/inw_p/..). - * - * This file is not meant to be obfuscating: it's just complicated - * to (a) handle it all in a way that makes gcc able to optimize it - * as well as possible and (b) trying to avoid writing the same thing - * over and over again with slight variations and possibly making a - * mistake somewhere. - */ - -/* - * Thanks to James van Artsdalen for a better timing-fix than - * the two short jumps: using outb's to a nonexistent port seems - * to guarantee better timings even on fast machines. - * - * On the other hand, I'd like to be sure of a non-existent port: - * I feel a bit unsafe about using 0x80 (should be safe, though) - * - * Linus - */ - - /* - * Bit simplified and optimized by Jan Hubicka - * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. - * - * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, - * isa_read[wl] and isa_write[wl] fixed - * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> - */ - -#define IO_SPACE_LIMIT 0xffff - -#define XQUAD_PORTIO_BASE 0xfe400000 -#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */ - -#ifdef __KERNEL__ - -#include <asm-generic/iomap.h> - -#include <linux/vmalloc.h> - -/* - * Convert a physical pointer to a virtual kernel pointer for /dev/mem - * access - */ -#define xlate_dev_mem_ptr(p) __va(p) - -/* - * Convert a virtual cached pointer to an uncached pointer - */ -#define xlate_dev_kmem_ptr(p) p - -/** - * virt_to_phys - map virtual addresses to physical - * @address: address to remap - * - * The returned physical address is the physical (CPU) mapping for - * the memory address given. It is only valid to use this function on - * addresses directly mapped or allocated via kmalloc. - * - * This function does not give bus mappings for DMA transfers. In - * almost all conceivable cases a device driver should not be using - * this function - */ - -static inline unsigned long virt_to_phys(volatile void * address) -{ - return __pa(address); -} - -/** - * phys_to_virt - map physical address to virtual - * @address: address to remap - * - * The returned virtual address is a current CPU mapping for - * the memory address given. It is only valid to use this function on - * addresses that have a kernel mapping - * - * This function does not handle bus mappings for DMA transfers. In - * almost all conceivable cases a device driver should not be using - * this function - */ - -static inline void * phys_to_virt(unsigned long address) -{ - return __va(address); -} - -/* - * Change "struct page" to physical address. - */ -#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) - -extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); - -/** - * ioremap - map bus memory into CPU space - * @offset: bus address of the memory - * @size: size of the resource to map - * - * ioremap performs a platform specific sequence of operations to - * make bus memory CPU accessible via the readb/readw/readl/writeb/ - * writew/writel functions and the other mmio helpers. The returned - * address is not guaranteed to be usable directly as a virtual - * address. - * - * If the area you are trying to map is a PCI BAR you should have a - * look at pci_iomap(). - */ - -static inline void __iomem * ioremap(unsigned long offset, unsigned long size) -{ - return __ioremap(offset, size, 0); -} - -extern void __iomem * ioremap_nocache(unsigned long offset, unsigned long size); -extern void iounmap(volatile void __iomem *addr); - -/* - * bt_ioremap() and bt_iounmap() are for temporary early boot-time - * mappings, before the real ioremap() is functional. - * A boot-time mapping is currently limited to at most 16 pages. - */ -extern void *bt_ioremap(unsigned long offset, unsigned long size); -extern void bt_iounmap(void *addr, unsigned long size); -extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); - -/* Use early IO mappings for DMI because it's initialized early */ -#define dmi_ioremap bt_ioremap -#define dmi_iounmap bt_iounmap -#define dmi_alloc alloc_bootmem - -/* - * ISA I/O bus memory addresses are 1:1 with the physical address. - */ -#define isa_virt_to_bus virt_to_phys -#define isa_page_to_bus page_to_phys -#define isa_bus_to_virt phys_to_virt - -/* - * However PCI ones are not necessarily 1:1 and therefore these interfaces - * are forbidden in portable PCI drivers. - * - * Allow them on x86 for legacy drivers, though. - */ -#define virt_to_bus virt_to_phys -#define bus_to_virt phys_to_virt - -/* - * readX/writeX() are used to access memory mapped devices. On some - * architectures the memory mapped IO stuff needs to be accessed - * differently. On the x86 architecture, we just read/write the - * memory location directly. - */ - -static inline unsigned char readb(const volatile void __iomem *addr) -{ - return *(volatile unsigned char __force *) addr; -} -static inline unsigned short readw(const volatile void __iomem *addr) -{ - return *(volatile unsigned short __force *) addr; -} -static inline unsigned int readl(const volatile void __iomem *addr) -{ - return *(volatile unsigned int __force *) addr; -} -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) -#define __raw_readb readb -#define __raw_readw readw -#define __raw_readl readl - -static inline void writeb(unsigned char b, volatile void __iomem *addr) -{ - *(volatile unsigned char __force *) addr = b; -} -static inline void writew(unsigned short b, volatile void __iomem *addr) -{ - *(volatile unsigned short __force *) addr = b; -} -static inline void writel(unsigned int b, volatile void __iomem *addr) -{ - *(volatile unsigned int __force *) addr = b; -} -#define __raw_writeb writeb -#define __raw_writew writew -#define __raw_writel writel - -#define mmiowb() - -static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) -{ - memset((void __force *) addr, val, count); -} -static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) -{ - __memcpy(dst, (void __force *) src, count); -} -static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) -{ - __memcpy((void __force *) dst, src, count); -} - -/* - * ISA space is 'always mapped' on a typical x86 system, no need to - * explicitly ioremap() it. The fact that the ISA IO space is mapped - * to PAGE_OFFSET is pure coincidence - it does not mean ISA values - * are physical addresses. The following constant pointer can be - * used as the IO-area pointer (it can be iounmapped as well, so the - * analogy with PCI is quite large): - */ -#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) - -/* - * Cache management - * - * This needed for two cases - * 1. Out of order aware processors - * 2. Accidentally out of order processors (PPro errata #51) - */ - -#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) - -static inline void flush_write_buffers(void) -{ - __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory"); -} - -#define dma_cache_inv(_start,_size) flush_write_buffers() -#define dma_cache_wback(_start,_size) flush_write_buffers() -#define dma_cache_wback_inv(_start,_size) flush_write_buffers() - -#else - -/* Nothing to do */ - -#define dma_cache_inv(_start,_size) do { } while (0) -#define dma_cache_wback(_start,_size) do { } while (0) -#define dma_cache_wback_inv(_start,_size) do { } while (0) -#define flush_write_buffers() - -#endif - -#endif /* __KERNEL__ */ - -static inline void native_io_delay(void) -{ - asm volatile("outb %%al,$0x80" : : : "memory"); -} - -#if defined(CONFIG_PARAVIRT) -#include <asm/paravirt.h> -#else - -static inline void slow_down_io(void) { - native_io_delay(); -#ifdef REALLY_SLOW_IO - native_io_delay(); - native_io_delay(); - native_io_delay(); -#endif -} - -#endif - -#ifdef CONFIG_X86_NUMAQ -extern void *xquad_portio; /* Where the IO area was mapped */ -#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) -#define __BUILDIO(bwl,bw,type) \ -static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \ - if (xquad_portio) \ - write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \ - else \ - out##bwl##_local(value, port); \ -} \ -static inline void out##bwl(unsigned type value, int port) { \ - out##bwl##_quad(value, port, 0); \ -} \ -static inline unsigned type in##bwl##_quad(int port, int quad) { \ - if (xquad_portio) \ - return read##bwl(XQUAD_PORT_ADDR(port, quad)); \ - else \ - return in##bwl##_local(port); \ -} \ -static inline unsigned type in##bwl(int port) { \ - return in##bwl##_quad(port, 0); \ -} -#else -#define __BUILDIO(bwl,bw,type) \ -static inline void out##bwl(unsigned type value, int port) { \ - out##bwl##_local(value, port); \ -} \ -static inline unsigned type in##bwl(int port) { \ - return in##bwl##_local(port); \ -} -#endif - - -#define BUILDIO(bwl,bw,type) \ -static inline void out##bwl##_local(unsigned type value, int port) { \ - __asm__ __volatile__("out" #bwl " %" #bw "0, %w1" : : "a"(value), "Nd"(port)); \ -} \ -static inline unsigned type in##bwl##_local(int port) { \ - unsigned type value; \ - __asm__ __volatile__("in" #bwl " %w1, %" #bw "0" : "=a"(value) : "Nd"(port)); \ - return value; \ -} \ -static inline void out##bwl##_local_p(unsigned type value, int port) { \ - out##bwl##_local(value, port); \ - slow_down_io(); \ -} \ -static inline unsigned type in##bwl##_local_p(int port) { \ - unsigned type value = in##bwl##_local(port); \ - slow_down_io(); \ - return value; \ -} \ -__BUILDIO(bwl,bw,type) \ -static inline void out##bwl##_p(unsigned type value, int port) { \ - out##bwl(value, port); \ - slow_down_io(); \ -} \ -static inline unsigned type in##bwl##_p(int port) { \ - unsigned type value = in##bwl(port); \ - slow_down_io(); \ - return value; \ -} \ -static inline void outs##bwl(int port, const void *addr, unsigned long count) { \ - __asm__ __volatile__("rep; outs" #bwl : "+S"(addr), "+c"(count) : "d"(port)); \ -} \ -static inline void ins##bwl(int port, void *addr, unsigned long count) { \ - __asm__ __volatile__("rep; ins" #bwl : "+D"(addr), "+c"(count) : "d"(port)); \ -} - -BUILDIO(b,b,char) -BUILDIO(w,w,short) -BUILDIO(l,,int) - -#endif diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h deleted file mode 100644 index dbe734ddf2af..000000000000 --- a/include/asm-i386/io_apic.h +++ /dev/null @@ -1,155 +0,0 @@ -#ifndef __ASM_IO_APIC_H -#define __ASM_IO_APIC_H - -#include <asm/types.h> -#include <asm/mpspec.h> -#include <asm/apicdef.h> - -/* - * Intel IO-APIC support for SMP and UP systems. - * - * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar - */ - -#ifdef CONFIG_X86_IO_APIC - -/* - * The structure of the IO-APIC: - */ -union IO_APIC_reg_00 { - u32 raw; - struct { - u32 __reserved_2 : 14, - LTS : 1, - delivery_type : 1, - __reserved_1 : 8, - ID : 8; - } __attribute__ ((packed)) bits; -}; - -union IO_APIC_reg_01 { - u32 raw; - struct { - u32 version : 8, - __reserved_2 : 7, - PRQ : 1, - entries : 8, - __reserved_1 : 8; - } __attribute__ ((packed)) bits; -}; - -union IO_APIC_reg_02 { - u32 raw; - struct { - u32 __reserved_2 : 24, - arbitration : 4, - __reserved_1 : 4; - } __attribute__ ((packed)) bits; -}; - -union IO_APIC_reg_03 { - u32 raw; - struct { - u32 boot_DT : 1, - __reserved_1 : 31; - } __attribute__ ((packed)) bits; -}; - -/* - * # of IO-APICs and # of IRQ routing registers - */ -extern int nr_ioapics; -extern int nr_ioapic_registers[MAX_IO_APICS]; - -enum ioapic_irq_destination_types { - dest_Fixed = 0, - dest_LowestPrio = 1, - dest_SMI = 2, - dest__reserved_1 = 3, - dest_NMI = 4, - dest_INIT = 5, - dest__reserved_2 = 6, - dest_ExtINT = 7 -}; - -struct IO_APIC_route_entry { - __u32 vector : 8, - delivery_mode : 3, /* 000: FIXED - * 001: lowest prio - * 111: ExtINT - */ - dest_mode : 1, /* 0: physical, 1: logical */ - delivery_status : 1, - polarity : 1, - irr : 1, - trigger : 1, /* 0: edge, 1: level */ - mask : 1, /* 0: enabled, 1: disabled */ - __reserved_2 : 15; - - union { struct { __u32 - __reserved_1 : 24, - physical_dest : 4, - __reserved_2 : 4; - } physical; - - struct { __u32 - __reserved_1 : 24, - logical_dest : 8; - } logical; - } dest; - -} __attribute__ ((packed)); - -/* - * MP-BIOS irq configuration table structures: - */ - -/* I/O APIC entries */ -extern struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; - -/* # of MP IRQ source entries */ -extern int mp_irq_entries; - -/* MP IRQ source entries */ -extern struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; - -/* non-0 if default (table-less) MP configuration */ -extern int mpc_default_type; - -/* Older SiS APIC requires we rewrite the index register */ -extern int sis_apic_bug; - -/* 1 if "noapic" boot option passed */ -extern int skip_ioapic_setup; - -static inline void disable_ioapic_setup(void) -{ - skip_ioapic_setup = 1; -} - -static inline int ioapic_setup_disabled(void) -{ - return skip_ioapic_setup; -} - -/* - * If we use the IO-APIC for IRQ routing, disable automatic - * assignment of PCI IRQ's. - */ -#define io_apic_assign_pci_irqs (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) - -#ifdef CONFIG_ACPI -extern int io_apic_get_unique_id (int ioapic, int apic_id); -extern int io_apic_get_version (int ioapic); -extern int io_apic_get_redir_entries (int ioapic); -extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low); -extern int timer_uses_ioapic_pin_0; -#endif /* CONFIG_ACPI */ - -extern int (*ioapic_renumber_irq)(int ioapic, int irq); - -#else /* !CONFIG_X86_IO_APIC */ -#define io_apic_assign_pci_irqs 0 -#endif - -#endif diff --git a/include/asm-i386/ioctl.h b/include/asm-i386/ioctl.h deleted file mode 100644 index b279fe06dfe5..000000000000 --- a/include/asm-i386/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ioctl.h> diff --git a/include/asm-i386/ioctls.h b/include/asm-i386/ioctls.h deleted file mode 100644 index ef5878762dc9..000000000000 --- a/include/asm-i386/ioctls.h +++ /dev/null @@ -1,87 +0,0 @@ -#ifndef __ARCH_I386_IOCTLS_H__ -#define __ARCH_I386_IOCTLS_H__ - -#include <asm/ioctl.h> - -/* 0x54 is just a magic number to make these relatively unique ('T') */ - -#define TCGETS 0x5401 -#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */ -#define TCSETSW 0x5403 -#define TCSETSF 0x5404 -#define TCGETA 0x5405 -#define TCSETA 0x5406 -#define TCSETAW 0x5407 -#define TCSETAF 0x5408 -#define TCSBRK 0x5409 -#define TCXONC 0x540A -#define TCFLSH 0x540B -#define TIOCEXCL 0x540C -#define TIOCNXCL 0x540D -#define TIOCSCTTY 0x540E -#define TIOCGPGRP 0x540F -#define TIOCSPGRP 0x5410 -#define TIOCOUTQ 0x5411 -#define TIOCSTI 0x5412 -#define TIOCGWINSZ 0x5413 -#define TIOCSWINSZ 0x5414 -#define TIOCMGET 0x5415 -#define TIOCMBIS 0x5416 -#define TIOCMBIC 0x5417 -#define TIOCMSET 0x5418 -#define TIOCGSOFTCAR 0x5419 -#define TIOCSSOFTCAR 0x541A -#define FIONREAD 0x541B -#define TIOCINQ FIONREAD -#define TIOCLINUX 0x541C -#define TIOCCONS 0x541D -#define TIOCGSERIAL 0x541E -#define TIOCSSERIAL 0x541F -#define TIOCPKT 0x5420 -#define FIONBIO 0x5421 -#define TIOCNOTTY 0x5422 -#define TIOCSETD 0x5423 -#define TIOCGETD 0x5424 -#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ -/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */ -#define TIOCSBRK 0x5427 /* BSD compatibility */ -#define TIOCCBRK 0x5428 /* BSD compatibility */ -#define TIOCGSID 0x5429 /* Return the session ID of FD */ -#define TCGETS2 _IOR('T',0x2A, struct termios2) -#define TCSETS2 _IOW('T',0x2B, struct termios2) -#define TCSETSW2 _IOW('T',0x2C, struct termios2) -#define TCSETSF2 _IOW('T',0x2D, struct termios2) -#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ -#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ - -#define FIONCLEX 0x5450 -#define FIOCLEX 0x5451 -#define FIOASYNC 0x5452 -#define TIOCSERCONFIG 0x5453 -#define TIOCSERGWILD 0x5454 -#define TIOCSERSWILD 0x5455 -#define TIOCGLCKTRMIOS 0x5456 -#define TIOCSLCKTRMIOS 0x5457 -#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ -#define TIOCSERGETLSR 0x5459 /* Get line status register */ -#define TIOCSERGETMULTI 0x545A /* Get multiport config */ -#define TIOCSERSETMULTI 0x545B /* Set multiport config */ - -#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ -#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ -#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ -#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ -#define FIOQSIZE 0x5460 - -/* Used for packet mode */ -#define TIOCPKT_DATA 0 -#define TIOCPKT_FLUSHREAD 1 -#define TIOCPKT_FLUSHWRITE 2 -#define TIOCPKT_STOP 4 -#define TIOCPKT_START 8 -#define TIOCPKT_NOSTOP 16 -#define TIOCPKT_DOSTOP 32 - -#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ - -#endif diff --git a/include/asm-i386/ipc.h b/include/asm-i386/ipc.h deleted file mode 100644 index a46e3d9c2a3f..000000000000 --- a/include/asm-i386/ipc.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/ipc.h> diff --git a/include/asm-i386/ipcbuf.h b/include/asm-i386/ipcbuf.h deleted file mode 100644 index 0dcad4f84c2a..000000000000 --- a/include/asm-i386/ipcbuf.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __i386_IPCBUF_H__ -#define __i386_IPCBUF_H__ - -/* - * The ipc64_perm structure for i386 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 32-bit mode_t and seq - * - 2 miscellaneous 32-bit values - */ - -struct ipc64_perm -{ - __kernel_key_t key; - __kernel_uid32_t uid; - __kernel_gid32_t gid; - __kernel_uid32_t cuid; - __kernel_gid32_t cgid; - __kernel_mode_t mode; - unsigned short __pad1; - unsigned short seq; - unsigned short __pad2; - unsigned long __unused1; - unsigned long __unused2; -}; - -#endif /* __i386_IPCBUF_H__ */ diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h deleted file mode 100644 index 36f310632c49..000000000000 --- a/include/asm-i386/irq.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef _ASM_IRQ_H -#define _ASM_IRQ_H - -/* - * linux/include/asm/irq.h - * - * (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar - * - * IRQ/IPI changes taken from work by Thomas Radke - * <tomsoft@informatik.tu-chemnitz.de> - */ - -#include <linux/sched.h> -/* include comes from machine specific directory */ -#include "irq_vectors.h" -#include <asm/thread_info.h> - -static __inline__ int irq_canonicalize(int irq) -{ - return ((irq == 2) ? 9 : irq); -} - -#ifdef CONFIG_X86_LOCAL_APIC -# define ARCH_HAS_NMI_WATCHDOG /* See include/linux/nmi.h */ -#endif - -#ifdef CONFIG_4KSTACKS - extern void irq_ctx_init(int cpu); - extern void irq_ctx_exit(int cpu); -# define __ARCH_HAS_DO_SOFTIRQ -#else -# define irq_ctx_init(cpu) do { } while (0) -# define irq_ctx_exit(cpu) do { } while (0) -#endif - -#ifdef CONFIG_IRQBALANCE -extern int irqbalance_disable(char *str); -#endif - -#ifdef CONFIG_HOTPLUG_CPU -extern void fixup_irqs(cpumask_t map); -#endif - -unsigned int do_IRQ(struct pt_regs *regs); -void init_IRQ(void); -void __init native_init_IRQ(void); - -#endif /* _ASM_IRQ_H */ diff --git a/include/asm-i386/irq_regs.h b/include/asm-i386/irq_regs.h deleted file mode 100644 index 3368b20c0b48..000000000000 --- a/include/asm-i386/irq_regs.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Per-cpu current frame pointer - the location of the last exception frame on - * the stack, stored in the per-cpu area. - * - * Jeremy Fitzhardinge <jeremy@goop.org> - */ -#ifndef _ASM_I386_IRQ_REGS_H -#define _ASM_I386_IRQ_REGS_H - -#include <asm/percpu.h> - -DECLARE_PER_CPU(struct pt_regs *, irq_regs); - -static inline struct pt_regs *get_irq_regs(void) -{ - return x86_read_percpu(irq_regs); -} - -static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) -{ - struct pt_regs *old_regs; - - old_regs = get_irq_regs(); - x86_write_percpu(irq_regs, new_regs); - - return old_regs; -} - -#endif /* _ASM_I386_IRQ_REGS_H */ diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h deleted file mode 100644 index eff8585cb741..000000000000 --- a/include/asm-i386/irqflags.h +++ /dev/null @@ -1,163 +0,0 @@ -/* - * include/asm-i386/irqflags.h - * - * IRQ flags handling - * - * This file gets included from lowlevel asm headers too, to provide - * wrapped versions of the local_irq_*() APIs, based on the - * raw_local_irq_*() functions from the lowlevel headers. - */ -#ifndef _ASM_IRQFLAGS_H -#define _ASM_IRQFLAGS_H -#include <asm/processor-flags.h> - -#ifndef __ASSEMBLY__ -static inline unsigned long native_save_fl(void) -{ - unsigned long f; - asm volatile("pushfl ; popl %0":"=g" (f): /* no input */); - return f; -} - -static inline void native_restore_fl(unsigned long f) -{ - asm volatile("pushl %0 ; popfl": /* no output */ - :"g" (f) - :"memory", "cc"); -} - -static inline void native_irq_disable(void) -{ - asm volatile("cli": : :"memory"); -} - -static inline void native_irq_enable(void) -{ - asm volatile("sti": : :"memory"); -} - -static inline void native_safe_halt(void) -{ - asm volatile("sti; hlt": : :"memory"); -} - -static inline void native_halt(void) -{ - asm volatile("hlt": : :"memory"); -} -#endif /* __ASSEMBLY__ */ - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#ifndef __ASSEMBLY__ - -static inline unsigned long __raw_local_save_flags(void) -{ - return native_save_fl(); -} - -static inline void raw_local_irq_restore(unsigned long flags) -{ - native_restore_fl(flags); -} - -static inline void raw_local_irq_disable(void) -{ - native_irq_disable(); -} - -static inline void raw_local_irq_enable(void) -{ - native_irq_enable(); -} - -/* - * Used in the idle loop; sti takes one instruction cycle - * to complete: - */ -static inline void raw_safe_halt(void) -{ - native_safe_halt(); -} - -/* - * Used when interrupts are already enabled or to - * shutdown the processor: - */ -static inline void halt(void) -{ - native_halt(); -} - -/* - * For spinlocks, etc: - */ -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long flags = __raw_local_save_flags(); - - raw_local_irq_disable(); - - return flags; -} - -#else -#define DISABLE_INTERRUPTS(clobbers) cli -#define ENABLE_INTERRUPTS(clobbers) sti -#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit -#define INTERRUPT_RETURN iret -#define GET_CR0_INTO_EAX movl %cr0, %eax -#endif /* __ASSEMBLY__ */ -#endif /* CONFIG_PARAVIRT */ - -#ifndef __ASSEMBLY__ -#define raw_local_save_flags(flags) \ - do { (flags) = __raw_local_save_flags(); } while (0) - -#define raw_local_irq_save(flags) \ - do { (flags) = __raw_local_irq_save(); } while (0) - -static inline int raw_irqs_disabled_flags(unsigned long flags) -{ - return !(flags & X86_EFLAGS_IF); -} - -static inline int raw_irqs_disabled(void) -{ - unsigned long flags = __raw_local_save_flags(); - - return raw_irqs_disabled_flags(flags); -} -#endif /* __ASSEMBLY__ */ - -/* - * Do the CPU's IRQ-state tracing from assembly code. We call a - * C function, so save all the C-clobbered registers: - */ -#ifdef CONFIG_TRACE_IRQFLAGS - -# define TRACE_IRQS_ON \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call trace_hardirqs_on; \ - popl %edx; \ - popl %ecx; \ - popl %eax; - -# define TRACE_IRQS_OFF \ - pushl %eax; \ - pushl %ecx; \ - pushl %edx; \ - call trace_hardirqs_off; \ - popl %edx; \ - popl %ecx; \ - popl %eax; - -#else -# define TRACE_IRQS_ON -# define TRACE_IRQS_OFF -#endif - -#endif diff --git a/include/asm-i386/ist.h b/include/asm-i386/ist.h deleted file mode 100644 index ef2003ebc6f9..000000000000 --- a/include/asm-i386/ist.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef _ASM_IST_H -#define _ASM_IST_H - -/* - * Include file for the interface to IST BIOS - * Copyright 2002 Andy Grover <andrew.grover@intel.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2, or (at your option) any - * later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - - -#ifdef __KERNEL__ - -#include <linux/types.h> - -struct ist_info { - u32 signature; - u32 command; - u32 event; - u32 perf_level; -}; - -extern struct ist_info ist_info; - -#endif /* __KERNEL__ */ -#endif /* _ASM_IST_H */ diff --git a/include/asm-i386/k8.h b/include/asm-i386/k8.h deleted file mode 100644 index dfd88a6e6040..000000000000 --- a/include/asm-i386/k8.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-x86_64/k8.h> diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h deleted file mode 100644 index a185b5f73e7f..000000000000 --- a/include/asm-i386/kdebug.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef _I386_KDEBUG_H -#define _I386_KDEBUG_H 1 - -/* - * Aug-05 2004 Ported by Prasanna S Panchamukhi <prasanna@in.ibm.com> - * from x86_64 architecture. - */ -#include <linux/notifier.h> - -struct pt_regs; - -extern int register_page_fault_notifier(struct notifier_block *); -extern int unregister_page_fault_notifier(struct notifier_block *); - - -/* Grossly misnamed. */ -enum die_val { - DIE_OOPS = 1, - DIE_INT3, - DIE_DEBUG, - DIE_PANIC, - DIE_NMI, - DIE_DIE, - DIE_NMIWATCHDOG, - DIE_KERNELDEBUG, - DIE_TRAP, - DIE_GPF, - DIE_CALL, - DIE_NMI_IPI, - DIE_PAGE_FAULT, -}; - -#endif diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h deleted file mode 100644 index 4b9dc9e6b701..000000000000 --- a/include/asm-i386/kexec.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef _I386_KEXEC_H -#define _I386_KEXEC_H - -#define PA_CONTROL_PAGE 0 -#define VA_CONTROL_PAGE 1 -#define PA_PGD 2 -#define VA_PGD 3 -#define PA_PTE_0 4 -#define VA_PTE_0 5 -#define PA_PTE_1 6 -#define VA_PTE_1 7 -#ifdef CONFIG_X86_PAE -#define PA_PMD_0 8 -#define VA_PMD_0 9 -#define PA_PMD_1 10 -#define VA_PMD_1 11 -#define PAGES_NR 12 -#else -#define PAGES_NR 8 -#endif - -#ifndef __ASSEMBLY__ - -#include <asm/ptrace.h> -#include <asm/string.h> - -/* - * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. - * I.e. Maximum page that is mapped directly into kernel memory, - * and kmap is not required. - */ - -/* Maximum physical address we can use pages from */ -#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) -/* Maximum address we can reach in physical address mode */ -#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) -/* Maximum address we can use for the control code buffer */ -#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE - -#define KEXEC_CONTROL_CODE_SIZE 4096 - -/* The native architecture */ -#define KEXEC_ARCH KEXEC_ARCH_386 - -/* We can also handle crash dumps from 64 bit kernel. */ -#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64) - -/* CPU does not save ss and esp on stack if execution is already - * running in kernel mode at the time of NMI occurrence. This code - * fixes it. - */ -static inline void crash_fixup_ss_esp(struct pt_regs *newregs, - struct pt_regs *oldregs) -{ - memcpy(newregs, oldregs, sizeof(*newregs)); - newregs->esp = (unsigned long)&(oldregs->esp); - __asm__ __volatile__( - "xorl %%eax, %%eax\n\t" - "movw %%ss, %%ax\n\t" - :"=a"(newregs->xss)); -} - -/* - * This function is responsible for capturing register states if coming - * via panic otherwise just fix up the ss and esp if coming via kernel - * mode exception. - */ -static inline void crash_setup_regs(struct pt_regs *newregs, - struct pt_regs *oldregs) -{ - if (oldregs) - crash_fixup_ss_esp(newregs, oldregs); - else { - __asm__ __volatile__("movl %%ebx,%0" : "=m"(newregs->ebx)); - __asm__ __volatile__("movl %%ecx,%0" : "=m"(newregs->ecx)); - __asm__ __volatile__("movl %%edx,%0" : "=m"(newregs->edx)); - __asm__ __volatile__("movl %%esi,%0" : "=m"(newregs->esi)); - __asm__ __volatile__("movl %%edi,%0" : "=m"(newregs->edi)); - __asm__ __volatile__("movl %%ebp,%0" : "=m"(newregs->ebp)); - __asm__ __volatile__("movl %%eax,%0" : "=m"(newregs->eax)); - __asm__ __volatile__("movl %%esp,%0" : "=m"(newregs->esp)); - __asm__ __volatile__("movw %%ss, %%ax;" :"=a"(newregs->xss)); - __asm__ __volatile__("movw %%cs, %%ax;" :"=a"(newregs->xcs)); - __asm__ __volatile__("movw %%ds, %%ax;" :"=a"(newregs->xds)); - __asm__ __volatile__("movw %%es, %%ax;" :"=a"(newregs->xes)); - __asm__ __volatile__("pushfl; popl %0" :"=m"(newregs->eflags)); - - newregs->eip = (unsigned long)current_text_addr(); - } -} -asmlinkage NORET_TYPE void -relocate_kernel(unsigned long indirection_page, - unsigned long control_page, - unsigned long start_address, - unsigned int has_pae) ATTRIB_NORET; - -#endif /* __ASSEMBLY__ */ - -#endif /* _I386_KEXEC_H */ diff --git a/include/asm-i386/kmap_types.h b/include/asm-i386/kmap_types.h deleted file mode 100644 index 806aae3c5338..000000000000 --- a/include/asm-i386/kmap_types.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef _ASM_KMAP_TYPES_H -#define _ASM_KMAP_TYPES_H - - -#ifdef CONFIG_DEBUG_HIGHMEM -# define D(n) __KM_FENCE_##n , -#else -# define D(n) -#endif - -enum km_type { -D(0) KM_BOUNCE_READ, -D(1) KM_SKB_SUNRPC_DATA, -D(2) KM_SKB_DATA_SOFTIRQ, -D(3) KM_USER0, -D(4) KM_USER1, -D(5) KM_BIO_SRC_IRQ, -D(6) KM_BIO_DST_IRQ, -D(7) KM_PTE0, -D(8) KM_PTE1, -D(9) KM_IRQ0, -D(10) KM_IRQ1, -D(11) KM_SOFTIRQ0, -D(12) KM_SOFTIRQ1, -D(13) KM_TYPE_NR -}; - -#undef D - -#endif diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h deleted file mode 100644 index 06f7303c30ca..000000000000 --- a/include/asm-i386/kprobes.h +++ /dev/null @@ -1,92 +0,0 @@ -#ifndef _ASM_KPROBES_H -#define _ASM_KPROBES_H -/* - * Kernel Probes (KProbes) - * include/asm-i386/kprobes.h - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2002, 2004 - * - * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel - * Probes initial implementation ( includes suggestions from - * Rusty Russell). - */ -#include <linux/types.h> -#include <linux/ptrace.h> - -#define __ARCH_WANT_KPROBES_INSN_SLOT - -struct kprobe; -struct pt_regs; - -typedef u8 kprobe_opcode_t; -#define BREAKPOINT_INSTRUCTION 0xcc -#define RELATIVEJUMP_INSTRUCTION 0xe9 -#define MAX_INSN_SIZE 16 -#define MAX_STACK_SIZE 64 -#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ - (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ - ? (MAX_STACK_SIZE) \ - : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) - -#define ARCH_SUPPORTS_KRETPROBES -#define ARCH_INACTIVE_KPROBE_COUNT 0 -#define flush_insn_slot(p) do { } while (0) - -void arch_remove_kprobe(struct kprobe *p); -void kretprobe_trampoline(void); - -/* Architecture specific copy of original instruction*/ -struct arch_specific_insn { - /* copy of the original instruction */ - kprobe_opcode_t *insn; - /* - * If this flag is not 0, this kprobe can be boost when its - * post_handler and break_handler is not set. - */ - int boostable; -}; - -struct prev_kprobe { - struct kprobe *kp; - unsigned long status; - unsigned long old_eflags; - unsigned long saved_eflags; -}; - -/* per-cpu kprobe control block */ -struct kprobe_ctlblk { - unsigned long kprobe_status; - unsigned long kprobe_old_eflags; - unsigned long kprobe_saved_eflags; - long *jprobe_saved_esp; - struct pt_regs jprobe_saved_regs; - kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; - struct prev_kprobe prev_kprobe; -}; - -/* trap3/1 are intr gates for kprobes. So, restore the status of IF, - * if necessary, before executing the original int3/1 (trap) handler. - */ -static inline void restore_interrupts(struct pt_regs *regs) -{ - if (regs->eflags & IF_MASK) - local_irq_enable(); -} - -extern int kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data); -#endif /* _ASM_KPROBES_H */ diff --git a/include/asm-i386/ldt.h b/include/asm-i386/ldt.h deleted file mode 100644 index e9d3de1dee6c..000000000000 --- a/include/asm-i386/ldt.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * ldt.h - * - * Definitions of structures used with the modify_ldt system call. - */ -#ifndef _LINUX_LDT_H -#define _LINUX_LDT_H - -/* Maximum number of LDT entries supported. */ -#define LDT_ENTRIES 8192 -/* The size of each LDT entry. */ -#define LDT_ENTRY_SIZE 8 - -#ifndef __ASSEMBLY__ -struct user_desc { - unsigned int entry_number; - unsigned long base_addr; - unsigned int limit; - unsigned int seg_32bit:1; - unsigned int contents:2; - unsigned int read_exec_only:1; - unsigned int limit_in_pages:1; - unsigned int seg_not_present:1; - unsigned int useable:1; -}; - -#define MODIFY_LDT_CONTENTS_DATA 0 -#define MODIFY_LDT_CONTENTS_STACK 1 -#define MODIFY_LDT_CONTENTS_CODE 2 - -#endif /* !__ASSEMBLY__ */ -#endif diff --git a/include/asm-i386/linkage.h b/include/asm-i386/linkage.h deleted file mode 100644 index f4a6ebac0247..000000000000 --- a/include/asm-i386/linkage.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef __ASM_LINKAGE_H -#define __ASM_LINKAGE_H - -#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) -#define FASTCALL(x) x __attribute__((regparm(3))) -#define fastcall __attribute__((regparm(3))) - -#define prevent_tail_call(ret) __asm__ ("" : "=r" (ret) : "0" (ret)) - -#ifdef CONFIG_X86_ALIGNMENT_16 -#define __ALIGN .align 16,0x90 -#define __ALIGN_STR ".align 16,0x90" -#endif - -#endif diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h deleted file mode 100644 index 6e85975b9ed2..000000000000 --- a/include/asm-i386/local.h +++ /dev/null @@ -1,233 +0,0 @@ -#ifndef _ARCH_I386_LOCAL_H -#define _ARCH_I386_LOCAL_H - -#include <linux/percpu.h> -#include <asm/system.h> -#include <asm/atomic.h> - -typedef struct -{ - atomic_long_t a; -} local_t; - -#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } - -#define local_read(l) atomic_long_read(&(l)->a) -#define local_set(l,i) atomic_long_set(&(l)->a, (i)) - -static __inline__ void local_inc(local_t *l) -{ - __asm__ __volatile__( - "incl %0" - :"+m" (l->a.counter)); -} - -static __inline__ void local_dec(local_t *l) -{ - __asm__ __volatile__( - "decl %0" - :"+m" (l->a.counter)); -} - -static __inline__ void local_add(long i, local_t *l) -{ - __asm__ __volatile__( - "addl %1,%0" - :"+m" (l->a.counter) - :"ir" (i)); -} - -static __inline__ void local_sub(long i, local_t *l) -{ - __asm__ __volatile__( - "subl %1,%0" - :"+m" (l->a.counter) - :"ir" (i)); -} - -/** - * local_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @l: pointer of type local_t - * - * Atomically subtracts @i from @l and returns - * true if the result is zero, or false for all - * other cases. - */ -static __inline__ int local_sub_and_test(long i, local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "subl %2,%0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - :"ir" (i) : "memory"); - return c; -} - -/** - * local_dec_and_test - decrement and test - * @l: pointer of type local_t - * - * Atomically decrements @l by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __inline__ int local_dec_and_test(local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "decl %0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - : : "memory"); - return c != 0; -} - -/** - * local_inc_and_test - increment and test - * @l: pointer of type local_t - * - * Atomically increments @l by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __inline__ int local_inc_and_test(local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "incl %0; sete %1" - :"+m" (l->a.counter), "=qm" (c) - : : "memory"); - return c != 0; -} - -/** - * local_add_negative - add and test if negative - * @l: pointer of type local_t - * @i: integer value to add - * - * Atomically adds @i to @l and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __inline__ int local_add_negative(long i, local_t *l) -{ - unsigned char c; - - __asm__ __volatile__( - "addl %2,%0; sets %1" - :"+m" (l->a.counter), "=qm" (c) - :"ir" (i) : "memory"); - return c; -} - -/** - * local_add_return - add and return - * @l: pointer of type local_t - * @i: integer value to add - * - * Atomically adds @i to @l and returns @i + @l - */ -static __inline__ long local_add_return(long i, local_t *l) -{ - long __i; -#ifdef CONFIG_M386 - unsigned long flags; - if(unlikely(boot_cpu_data.x86 <= 3)) - goto no_xadd; -#endif - /* Modern 486+ processor */ - __i = i; - __asm__ __volatile__( - "xaddl %0, %1;" - :"+r" (i), "+m" (l->a.counter) - : : "memory"); - return i + __i; - -#ifdef CONFIG_M386 -no_xadd: /* Legacy 386 processor */ - local_irq_save(flags); - __i = local_read(l); - local_set(l, i + __i); - local_irq_restore(flags); - return i + __i; -#endif -} - -static __inline__ long local_sub_return(long i, local_t *l) -{ - return local_add_return(-i,l); -} - -#define local_inc_return(l) (local_add_return(1,l)) -#define local_dec_return(l) (local_sub_return(1,l)) - -#define local_cmpxchg(l, o, n) \ - (cmpxchg_local(&((l)->a.counter), (o), (n))) -/* Always has a lock prefix */ -#define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) - -/** - * local_add_unless - add unless the number is a given value - * @l: pointer of type local_t - * @a: the amount to add to l... - * @u: ...unless l is equal to u. - * - * Atomically adds @a to @l, so long as it was not @u. - * Returns non-zero if @l was not @u, and zero otherwise. - */ -#define local_add_unless(l, a, u) \ -({ \ - long c, old; \ - c = local_read(l); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = local_cmpxchg((l), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) -#define local_inc_not_zero(l) local_add_unless((l), 1, 0) - -/* On x86, these are no better than the atomic variants. */ -#define __local_inc(l) local_inc(l) -#define __local_dec(l) local_dec(l) -#define __local_add(i,l) local_add((i),(l)) -#define __local_sub(i,l) local_sub((i),(l)) - -/* Use these for per-cpu local_t variables: on some archs they are - * much more efficient than these naive implementations. Note they take - * a variable, not an address. - */ - -/* Need to disable preemption for the cpu local counters otherwise we could - still access a variable of a previous CPU in a non atomic way. */ -#define cpu_local_wrap_v(l) \ - ({ local_t res__; \ - preempt_disable(); \ - res__ = (l); \ - preempt_enable(); \ - res__; }) -#define cpu_local_wrap(l) \ - ({ preempt_disable(); \ - l; \ - preempt_enable(); }) \ - -#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l))) -#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i))) -#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l))) -#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l))) -#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l))) -#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l))) - -#define __cpu_local_inc(l) cpu_local_inc(l) -#define __cpu_local_dec(l) cpu_local_dec(l) -#define __cpu_local_add(i, l) cpu_local_add((i), (l)) -#define __cpu_local_sub(i, l) cpu_local_sub((i), (l)) - -#endif /* _ARCH_I386_LOCAL_H */ diff --git a/include/asm-i386/mach-bigsmp/mach_apic.h b/include/asm-i386/mach-bigsmp/mach_apic.h deleted file mode 100644 index ebd319f838ab..000000000000 --- a/include/asm-i386/mach-bigsmp/mach_apic.h +++ /dev/null @@ -1,158 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - - -extern u8 bios_cpu_apicid[]; - -#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) -#define esr_disable (1) - -static inline int apic_id_registered(void) -{ - return (1); -} - -/* Round robin the irqs amoung the online cpus */ -static inline cpumask_t target_cpus(void) -{ - static unsigned long cpu = NR_CPUS; - do { - if (cpu >= NR_CPUS) - cpu = first_cpu(cpu_online_map); - else - cpu = next_cpu(cpu, cpu_online_map); - } while (cpu >= NR_CPUS); - return cpumask_of_cpu(cpu); -} - -#undef APIC_DEST_LOGICAL -#define APIC_DEST_LOGICAL 0 -#define TARGET_CPUS (target_cpus()) -#define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define INT_DELIVERY_MODE (dest_Fixed) -#define INT_DEST_MODE (0) /* phys delivery to target proc */ -#define NO_BALANCE_IRQ (0) -#define WAKE_SECONDARY_VIA_INIT - - -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return (0); -} - -static inline unsigned long check_apicid_present(int bit) -{ - return (1); -} - -static inline unsigned long calculate_ldr(int cpu) -{ - unsigned long val, id; - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - id = xapic_phys_to_log_apicid(cpu); - val |= SET_APIC_LOGICAL_ID(id); - return val; -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void init_apic_ldr(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = calculate_ldr(cpu); - apic_write_around(APIC_LDR, val); -} - -static inline void setup_apic_routing(void) -{ - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "Physflat", nr_ioapics); -} - -static inline int multi_timer_check(int apic, int irq) -{ - return (0); -} - -static inline int apicid_to_node(int logical_apicid) -{ - return (0); -} - -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < NR_CPUS) - return (int) bios_cpu_apicid[mps_cpu]; - - return BAD_APICID; -} - -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) -{ - return physid_mask_of_physid(phys_apicid); -} - -extern u8 cpu_2_logical_apicid[]; -/* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) -{ - if (cpu >= NR_CPUS) - return BAD_APICID; - return cpu_physical_id(cpu); -} - -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %ld:%ld APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return m->mpc_apicid; -} - -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xFFL); -} - -static inline void setup_portio_remap(void) -{ -} - -static inline void enable_apic_mode(void) -{ -} - -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return (1); -} - -/* As we are using single CPU as destination, pick only one CPU here */ -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - int cpu; - int apicid; - - cpu = first_cpu(cpumask); - apicid = cpu_to_logical_apicid(cpu); - return apicid; -} - -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff --git a/include/asm-i386/mach-bigsmp/mach_apicdef.h b/include/asm-i386/mach-bigsmp/mach_apicdef.h deleted file mode 100644 index a58ab5a75c8c..000000000000 --- a/include/asm-i386/mach-bigsmp/mach_apicdef.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ASM_MACH_APICDEF_H -#define __ASM_MACH_APICDEF_H - -#define APIC_ID_MASK (0xFF<<24) - -static inline unsigned get_apic_id(unsigned long x) -{ - return (((x)>>24)&0xFF); -} - -#define GET_APIC_ID(x) get_apic_id(x) - -#endif diff --git a/include/asm-i386/mach-bigsmp/mach_ipi.h b/include/asm-i386/mach-bigsmp/mach_ipi.h deleted file mode 100644 index 9404c535b7ec..000000000000 --- a/include/asm-i386/mach-bigsmp/mach_ipi.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __ASM_MACH_IPI_H -#define __ASM_MACH_IPI_H - -void send_IPI_mask_sequence(cpumask_t mask, int vector); - -static inline void send_IPI_mask(cpumask_t mask, int vector) -{ - send_IPI_mask_sequence(mask, vector); -} - -static inline void send_IPI_allbutself(int vector) -{ - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - - if (!cpus_empty(mask)) - send_IPI_mask(mask, vector); -} - -static inline void send_IPI_all(int vector) -{ - send_IPI_mask(cpu_online_map, vector); -} - -#endif /* __ASM_MACH_IPI_H */ diff --git a/include/asm-i386/mach-bigsmp/mach_mpspec.h b/include/asm-i386/mach-bigsmp/mach_mpspec.h deleted file mode 100644 index 6b5dadcf1d0e..000000000000 --- a/include/asm-i386/mach-bigsmp/mach_mpspec.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_MACH_MPSPEC_H -#define __ASM_MACH_MPSPEC_H - -#define MAX_IRQ_SOURCES 256 - -#define MAX_MP_BUSSES 32 - -#endif /* __ASM_MACH_MPSPEC_H */ diff --git a/include/asm-i386/mach-default/apm.h b/include/asm-i386/mach-default/apm.h deleted file mode 100644 index 1f730b8bd1fd..000000000000 --- a/include/asm-i386/mach-default/apm.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * include/asm-i386/mach-default/apm.h - * - * Machine specific APM BIOS functions for generic. - * Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp> - */ - -#ifndef _ASM_APM_H -#define _ASM_APM_H - -#ifdef APM_ZERO_SEGS -# define APM_DO_ZERO_SEGS \ - "pushl %%ds\n\t" \ - "pushl %%es\n\t" \ - "xorl %%edx, %%edx\n\t" \ - "mov %%dx, %%ds\n\t" \ - "mov %%dx, %%es\n\t" \ - "mov %%dx, %%fs\n\t" \ - "mov %%dx, %%gs\n\t" -# define APM_DO_POP_SEGS \ - "popl %%es\n\t" \ - "popl %%ds\n\t" -#else -# define APM_DO_ZERO_SEGS -# define APM_DO_POP_SEGS -#endif - -static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, - u32 *eax, u32 *ebx, u32 *ecx, - u32 *edx, u32 *esi) -{ - /* - * N.B. We do NOT need a cld after the BIOS call - * because we always save and restore the flags. - */ - __asm__ __volatile__(APM_DO_ZERO_SEGS - "pushl %%edi\n\t" - "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" - "setc %%al\n\t" - "popl %%ebp\n\t" - "popl %%edi\n\t" - APM_DO_POP_SEGS - : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx), - "=S" (*esi) - : "a" (func), "b" (ebx_in), "c" (ecx_in) - : "memory", "cc"); -} - -static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, - u32 ecx_in, u32 *eax) -{ - int cx, dx, si; - u8 error; - - /* - * N.B. We do NOT need a cld after the BIOS call - * because we always save and restore the flags. - */ - __asm__ __volatile__(APM_DO_ZERO_SEGS - "pushl %%edi\n\t" - "pushl %%ebp\n\t" - "lcall *%%cs:apm_bios_entry\n\t" - "setc %%bl\n\t" - "popl %%ebp\n\t" - "popl %%edi\n\t" - APM_DO_POP_SEGS - : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx), - "=S" (si) - : "a" (func), "b" (ebx_in), "c" (ecx_in) - : "memory", "cc"); - return error; -} - -#endif /* _ASM_APM_H */ diff --git a/include/asm-i386/mach-default/bios_ebda.h b/include/asm-i386/mach-default/bios_ebda.h deleted file mode 100644 index 9cbd9a668af8..000000000000 --- a/include/asm-i386/mach-default/bios_ebda.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _MACH_BIOS_EBDA_H -#define _MACH_BIOS_EBDA_H - -/* - * there is a real-mode segmented pointer pointing to the - * 4K EBDA area at 0x40E. - */ -static inline unsigned int get_bios_ebda(void) -{ - unsigned int address = *(unsigned short *)phys_to_virt(0x40E); - address <<= 4; - return address; /* 0 means none */ -} - -#endif /* _MACH_BIOS_EBDA_H */ diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h deleted file mode 100644 index 23ecda0b28a0..000000000000 --- a/include/asm-i386/mach-default/do_timer.h +++ /dev/null @@ -1,16 +0,0 @@ -/* defines for inline arch setup functions */ -#include <linux/clockchips.h> - -#include <asm/i8259.h> -#include <asm/i8253.h> - -/** - * do_timer_interrupt_hook - hook into timer tick - * - * Call the pit clock event handler. see asm/i8253.h - **/ - -static inline void do_timer_interrupt_hook(void) -{ - global_clock_event->event_handler(global_clock_event); -} diff --git a/include/asm-i386/mach-default/entry_arch.h b/include/asm-i386/mach-default/entry_arch.h deleted file mode 100644 index bc861469bdba..000000000000 --- a/include/asm-i386/mach-default/entry_arch.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * This file is designed to contain the BUILD_INTERRUPT specifications for - * all of the extra named interrupt vectors used by the architecture. - * Usually this is the Inter Process Interrupts (IPIs) - */ - -/* - * The following vectors are part of the Linux architecture, there - * is no hardware IRQ pin equivalent for them, they are triggered - * through the ICC by us (IPIs) - */ -#ifdef CONFIG_X86_SMP -BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) -BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) -BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) -#endif - -/* - * every pentium local APIC has two 'local interrupts', with a - * soft-definable vector attached to both interrupts, one of - * which is a timer interrupt, the other one is error counter - * overflow. Linux uses the local APIC timer interrupt to get - * a much simpler SMP time architecture: - */ -#ifdef CONFIG_X86_LOCAL_APIC -BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) -BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) -BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) - -#ifdef CONFIG_X86_MCE_P4THERMAL -BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) -#endif - -#endif diff --git a/include/asm-i386/mach-default/io_ports.h b/include/asm-i386/mach-default/io_ports.h deleted file mode 100644 index 48540ba97166..000000000000 --- a/include/asm-i386/mach-default/io_ports.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * arch/i386/mach-generic/io_ports.h - * - * Machine specific IO port address definition for generic. - * Written by Osamu Tomita <tomita@cinet.co.jp> - */ -#ifndef _MACH_IO_PORTS_H -#define _MACH_IO_PORTS_H - -/* i8259A PIC registers */ -#define PIC_MASTER_CMD 0x20 -#define PIC_MASTER_IMR 0x21 -#define PIC_MASTER_ISR PIC_MASTER_CMD -#define PIC_MASTER_POLL PIC_MASTER_ISR -#define PIC_MASTER_OCW3 PIC_MASTER_ISR -#define PIC_SLAVE_CMD 0xa0 -#define PIC_SLAVE_IMR 0xa1 - -/* i8259A PIC related value */ -#define PIC_CASCADE_IR 2 -#define MASTER_ICW4_DEFAULT 0x01 -#define SLAVE_ICW4_DEFAULT 0x01 -#define PIC_ICW4_AEOI 2 - -#endif /* !_MACH_IO_PORTS_H */ diff --git a/include/asm-i386/mach-default/irq_vectors.h b/include/asm-i386/mach-default/irq_vectors.h deleted file mode 100644 index 881c63ca61ad..000000000000 --- a/include/asm-i386/mach-default/irq_vectors.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * This file should contain #defines for all of the interrupt vector - * numbers used by this architecture. - * - * In addition, there are some standard defines: - * - * FIRST_EXTERNAL_VECTOR: - * The first free place for external interrupts - * - * SYSCALL_VECTOR: - * The IRQ vector a syscall makes the user to kernel transition - * under. - * - * TIMER_IRQ: - * The IRQ number the timer interrupt comes in at. - * - * NR_IRQS: - * The total number of interrupt vectors (including all the - * architecture specific interrupts) needed. - * - */ -#ifndef _ASM_IRQ_VECTORS_H -#define _ASM_IRQ_VECTORS_H - -/* - * IDT vectors usable for external interrupt sources start - * at 0x20: - */ -#define FIRST_EXTERNAL_VECTOR 0x20 - -#define SYSCALL_VECTOR 0x80 - -/* - * Vectors 0x20-0x2f are used for ISA interrupts. - */ - -/* - * Special IRQ vectors used by the SMP architecture, 0xf0-0xff - * - * some of the following vectors are 'rare', they are merged - * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. - * TLB, reschedule and local APIC vectors are performance-critical. - * - * Vectors 0xf0-0xfa are free (reserved for future Linux use). - */ -#define SPURIOUS_APIC_VECTOR 0xff -#define ERROR_APIC_VECTOR 0xfe -#define INVALIDATE_TLB_VECTOR 0xfd -#define RESCHEDULE_VECTOR 0xfc -#define CALL_FUNCTION_VECTOR 0xfb - -#define THERMAL_APIC_VECTOR 0xf0 -/* - * Local APIC timer IRQ vector is on a different priority level, - * to work around the 'lost local interrupt if more than 2 IRQ - * sources per level' errata. - */ -#define LOCAL_TIMER_VECTOR 0xef - -/* - * First APIC vector available to drivers: (vectors 0x30-0xee) - * we start at 0x31 to spread out vectors evenly between priority - * levels. (0x80 is the syscall vector) - */ -#define FIRST_DEVICE_VECTOR 0x31 -#define FIRST_SYSTEM_VECTOR 0xef - -#define TIMER_IRQ 0 - -/* - * 16 8259A IRQ's, 208 potential APIC interrupt sources. - * Right now the APIC is mostly only used for SMP. - * 256 vectors is an architectural limit. (we can have - * more than 256 devices theoretically, but they will - * have to use shared interrupts) - * Since vectors 0x00-0x1f are used/reserved for the CPU, - * the usable vector space is 0x20-0xff (224 vectors) - */ - -/* - * The maximum number of vectors supported by i386 processors - * is limited to 256. For processors other than i386, NR_VECTORS - * should be changed accordingly. - */ -#define NR_VECTORS 256 - -#include "irq_vectors_limits.h" - -#define FPU_IRQ 13 - -#define FIRST_VM86_IRQ 3 -#define LAST_VM86_IRQ 15 -#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) - - -#endif /* _ASM_IRQ_VECTORS_H */ diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h deleted file mode 100644 index a90c7a60109f..000000000000 --- a/include/asm-i386/mach-default/irq_vectors_limits.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _ASM_IRQ_VECTORS_LIMITS_H -#define _ASM_IRQ_VECTORS_LIMITS_H - -#if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) -#define NR_IRQS 224 -# if (224 >= 32 * NR_CPUS) -# define NR_IRQ_VECTORS NR_IRQS -# else -# define NR_IRQ_VECTORS (32 * NR_CPUS) -# endif -#else -#define NR_IRQS 16 -#define NR_IRQ_VECTORS NR_IRQS -#endif - -#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ diff --git a/include/asm-i386/mach-default/mach_apic.h b/include/asm-i386/mach-default/mach_apic.h deleted file mode 100644 index 6db1c3babe9a..000000000000 --- a/include/asm-i386/mach-default/mach_apic.h +++ /dev/null @@ -1,131 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -#include <mach_apicdef.h> -#include <asm/smp.h> - -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - -static inline cpumask_t target_cpus(void) -{ -#ifdef CONFIG_SMP - return cpu_online_map; -#else - return cpumask_of_cpu(0); -#endif -} -#define TARGET_CPUS (target_cpus()) - -#define NO_BALANCE_IRQ (0) -#define esr_disable (0) - -#define INT_DELIVERY_MODE dest_LowestPrio -#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ - -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return physid_isset(apicid, bitmap); -} - -static inline unsigned long check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void init_apic_ldr(void) -{ - unsigned long val; - - apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); - apic_write_around(APIC_LDR, val); -} - -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) -{ - return phys_map; -} - -static inline void setup_apic_routing(void) -{ - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "Flat", nr_ioapics); -} - -static inline int multi_timer_check(int apic, int irq) -{ - return 0; -} - -static inline int apicid_to_node(int logical_apicid) -{ - return 0; -} - -/* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) -{ - return 1 << cpu; -} - -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < get_physical_broadcast()) - return mps_cpu; - else - return BAD_APICID; -} - -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) -{ - return physid_mask_of_physid(phys_apicid); -} - -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %ld:%ld APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return (m->mpc_apicid); -} - -static inline void setup_portio_remap(void) -{ -} - -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); -} - -static inline int apic_id_registered(void) -{ - return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); -} - -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - return cpus_addr(cpumask)[0]; -} - -static inline void enable_apic_mode(void) -{ -} - -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff --git a/include/asm-i386/mach-default/mach_apicdef.h b/include/asm-i386/mach-default/mach_apicdef.h deleted file mode 100644 index 7bcb350c3ee8..000000000000 --- a/include/asm-i386/mach-default/mach_apicdef.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ASM_MACH_APICDEF_H -#define __ASM_MACH_APICDEF_H - -#define APIC_ID_MASK (0xF<<24) - -static inline unsigned get_apic_id(unsigned long x) -{ - return (((x)>>24)&0xF); -} - -#define GET_APIC_ID(x) get_apic_id(x) - -#endif diff --git a/include/asm-i386/mach-default/mach_ipi.h b/include/asm-i386/mach-default/mach_ipi.h deleted file mode 100644 index 0dba244c86db..000000000000 --- a/include/asm-i386/mach-default/mach_ipi.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef __ASM_MACH_IPI_H -#define __ASM_MACH_IPI_H - -/* Avoid include hell */ -#define NMI_VECTOR 0x02 - -void send_IPI_mask_bitmask(cpumask_t mask, int vector); -void __send_IPI_shortcut(unsigned int shortcut, int vector); - -extern int no_broadcast; - -static inline void send_IPI_mask(cpumask_t mask, int vector) -{ - send_IPI_mask_bitmask(mask, vector); -} - -static inline void __local_send_IPI_allbutself(int vector) -{ - if (no_broadcast || vector == NMI_VECTOR) { - cpumask_t mask = cpu_online_map; - - cpu_clear(smp_processor_id(), mask); - send_IPI_mask(mask, vector); - } else - __send_IPI_shortcut(APIC_DEST_ALLBUT, vector); -} - -static inline void __local_send_IPI_all(int vector) -{ - if (no_broadcast || vector == NMI_VECTOR) - send_IPI_mask(cpu_online_map, vector); - else - __send_IPI_shortcut(APIC_DEST_ALLINC, vector); -} - -static inline void send_IPI_allbutself(int vector) -{ - /* - * if there are no other CPUs in the system then we get an APIC send - * error if we try to broadcast, thus avoid sending IPIs in this case. - */ - if (!(num_online_cpus() > 1)) - return; - - __local_send_IPI_allbutself(vector); - return; -} - -static inline void send_IPI_all(int vector) -{ - __local_send_IPI_all(vector); -} - -#endif /* __ASM_MACH_IPI_H */ diff --git a/include/asm-i386/mach-default/mach_mpparse.h b/include/asm-i386/mach-default/mach_mpparse.h deleted file mode 100644 index 1d3832482580..000000000000 --- a/include/asm-i386/mach-default/mach_mpparse.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef __ASM_MACH_MPPARSE_H -#define __ASM_MACH_MPPARSE_H - -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ -// Dprintk("Bus #%d is %s\n", m->mpc_busid, name); -} - -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ -} - -static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid) -{ - return 0; -} - -/* Hook from generic ACPI tables.c */ -static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - return 0; -} - - -#endif /* __ASM_MACH_MPPARSE_H */ diff --git a/include/asm-i386/mach-default/mach_mpspec.h b/include/asm-i386/mach-default/mach_mpspec.h deleted file mode 100644 index 51c9a9775932..000000000000 --- a/include/asm-i386/mach-default/mach_mpspec.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ASM_MACH_MPSPEC_H -#define __ASM_MACH_MPSPEC_H - -#define MAX_IRQ_SOURCES 256 - -#if CONFIG_BASE_SMALL == 0 -#define MAX_MP_BUSSES 256 -#else -#define MAX_MP_BUSSES 32 -#endif - -#endif /* __ASM_MACH_MPSPEC_H */ diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h deleted file mode 100644 index e23fd9fbebb3..000000000000 --- a/include/asm-i386/mach-default/mach_reboot.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * arch/i386/mach-generic/mach_reboot.h - * - * Machine specific reboot functions for generic. - * Split out from reboot.c by Osamu Tomita <tomita@cinet.co.jp> - */ -#ifndef _MACH_REBOOT_H -#define _MACH_REBOOT_H - -static inline void kb_wait(void) -{ - int i; - - for (i = 0; i < 0x10000; i++) - if ((inb_p(0x64) & 0x02) == 0) - break; -} - -static inline void mach_reboot(void) -{ - int i; - - /* old method, works on most machines */ - for (i = 0; i < 10; i++) { - kb_wait(); - udelay(50); - outb(0xfe, 0x64); /* pulse reset low */ - udelay(50); - } - - /* New method: sets the "System flag" which, when set, indicates - * successful completion of the keyboard controller self-test (Basic - * Assurance Test, BAT). This is needed for some machines with no - * keyboard plugged in. This read-modify-write sequence sets only the - * system flag - */ - for (i = 0; i < 10; i++) { - int cmd; - - outb(0x20, 0x64); /* read Controller Command Byte */ - udelay(50); - kb_wait(); - udelay(50); - cmd = inb(0x60); - udelay(50); - kb_wait(); - udelay(50); - outb(0x60, 0x64); /* write Controller Command Byte */ - udelay(50); - kb_wait(); - udelay(50); - outb(cmd | 0x04, 0x60); /* set "System flag" */ - udelay(50); - kb_wait(); - udelay(50); - outb(0xfe, 0x64); /* pulse reset low */ - udelay(50); - } -} - -#endif /* !_MACH_REBOOT_H */ diff --git a/include/asm-i386/mach-default/mach_time.h b/include/asm-i386/mach-default/mach_time.h deleted file mode 100644 index 31eb5de6f3dc..000000000000 --- a/include/asm-i386/mach-default/mach_time.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - * include/asm-i386/mach-default/mach_time.h - * - * Machine specific set RTC function for generic. - * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp> - */ -#ifndef _MACH_TIME_H -#define _MACH_TIME_H - -#include <linux/mc146818rtc.h> - -/* for check timing call set_rtc_mmss() 500ms */ -/* used in arch/i386/time.c::do_timer_interrupt() */ -#define USEC_AFTER 500000 -#define USEC_BEFORE 500000 - -/* - * In order to set the CMOS clock precisely, set_rtc_mmss has to be - * called 500 ms after the second nowtime has started, because when - * nowtime is written into the registers of the CMOS clock, it will - * jump to the next second precisely 500 ms later. Check the Motorola - * MC146818A or Dallas DS12887 data sheet for details. - * - * BUG: This routine does not handle hour overflow properly; it just - * sets the minutes. Usually you'll only notice that after reboot! - */ -static inline int mach_set_rtc_mmss(unsigned long nowtime) -{ - int retval = 0; - int real_seconds, real_minutes, cmos_minutes; - unsigned char save_control, save_freq_select; - - save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ - CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - - cmos_minutes = CMOS_READ(RTC_MINUTES); - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - BCD_TO_BIN(cmos_minutes); - - /* - * since we're only adjusting minutes and seconds, - * don't interfere with hour overflow. This avoids - * messing with unknown time zones but requires your - * RTC not to be off by more than 15 minutes - */ - real_seconds = nowtime % 60; - real_minutes = nowtime / 60; - if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) - real_minutes += 30; /* correct for half hour time zone */ - real_minutes %= 60; - - if (abs(real_minutes - cmos_minutes) < 30) { - if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BIN_TO_BCD(real_seconds); - BIN_TO_BCD(real_minutes); - } - CMOS_WRITE(real_seconds,RTC_SECONDS); - CMOS_WRITE(real_minutes,RTC_MINUTES); - } else { - printk(KERN_WARNING - "set_rtc_mmss: can't update from %d to %d\n", - cmos_minutes, real_minutes); - retval = -1; - } - - /* The following flags have to be released exactly in this order, - * otherwise the DS12887 (popular MC146818A clone with integrated - * battery and quartz) will not reset the oscillator and will not - * update precisely 500 ms later. You won't find this mentioned in - * the Dallas Semiconductor data sheets, but who believes data - * sheets anyway ... -- Markus Kuhn - */ - CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); - - return retval; -} - -static inline unsigned long mach_get_cmos_time(void) -{ - unsigned int year, mon, day, hour, min, sec; - - do { - sec = CMOS_READ(RTC_SECONDS); - min = CMOS_READ(RTC_MINUTES); - hour = CMOS_READ(RTC_HOURS); - day = CMOS_READ(RTC_DAY_OF_MONTH); - mon = CMOS_READ(RTC_MONTH); - year = CMOS_READ(RTC_YEAR); - } while (sec != CMOS_READ(RTC_SECONDS)); - - if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { - BCD_TO_BIN(sec); - BCD_TO_BIN(min); - BCD_TO_BIN(hour); - BCD_TO_BIN(day); - BCD_TO_BIN(mon); - BCD_TO_BIN(year); - } - - year += 1900; - if (year < 1970) - year += 100; - - return mktime(year, mon, day, hour, min, sec); -} - -#endif /* !_MACH_TIME_H */ diff --git a/include/asm-i386/mach-default/mach_timer.h b/include/asm-i386/mach-default/mach_timer.h deleted file mode 100644 index 807992fd4171..000000000000 --- a/include/asm-i386/mach-default/mach_timer.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * include/asm-i386/mach-default/mach_timer.h - * - * Machine specific calibrate_tsc() for generic. - * Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp> - */ -/* ------ Calibrate the TSC ------- - * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset(). - * Too much 64-bit arithmetic here to do this cleanly in C, and for - * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2) - * output busy loop as low as possible. We avoid reading the CTC registers - * directly because of the awkward 8-bit access mechanism of the 82C54 - * device. - */ -#ifndef _MACH_TIMER_H -#define _MACH_TIMER_H - -#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */ -#define CALIBRATE_LATCH \ - ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000) - -static inline void mach_prepare_counter(void) -{ - /* Set the Gate high, disable speaker */ - outb((inb(0x61) & ~0x02) | 0x01, 0x61); - - /* - * Now let's take care of CTC channel 2 - * - * Set the Gate high, program CTC channel 2 for mode 0, - * (interrupt on terminal count mode), binary count, - * load 5 * LATCH count, (LSB and MSB) to begin countdown. - * - * Some devices need a delay here. - */ - outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */ - outb_p(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */ - outb_p(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */ -} - -static inline void mach_countup(unsigned long *count_p) -{ - unsigned long count = 0; - do { - count++; - } while ((inb_p(0x61) & 0x20) == 0); - *count_p = count; -} - -#endif /* !_MACH_TIMER_H */ diff --git a/include/asm-i386/mach-default/mach_traps.h b/include/asm-i386/mach-default/mach_traps.h deleted file mode 100644 index 625438b8a6eb..000000000000 --- a/include/asm-i386/mach-default/mach_traps.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * include/asm-i386/mach-default/mach_traps.h - * - * Machine specific NMI handling for generic. - * Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp> - */ -#ifndef _MACH_TRAPS_H -#define _MACH_TRAPS_H - -#include <asm/mc146818rtc.h> - -static inline void clear_mem_error(unsigned char reason) -{ - reason = (reason & 0xf) | 4; - outb(reason, 0x61); -} - -static inline unsigned char get_nmi_reason(void) -{ - return inb(0x61); -} - -static inline void reassert_nmi(void) -{ - int old_reg = -1; - - if (do_i_have_lock_cmos()) - old_reg = current_lock_cmos_reg(); - else - lock_cmos(0); /* register doesn't matter here */ - outb(0x8f, 0x70); - inb(0x71); /* dummy */ - outb(0x0f, 0x70); - inb(0x71); /* dummy */ - if (old_reg >= 0) - outb(old_reg, 0x70); - else - unlock_cmos(); -} - -#endif /* !_MACH_TRAPS_H */ diff --git a/include/asm-i386/mach-default/mach_wakecpu.h b/include/asm-i386/mach-default/mach_wakecpu.h deleted file mode 100644 index 3ebb17893aa5..000000000000 --- a/include/asm-i386/mach-default/mach_wakecpu.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef __ASM_MACH_WAKECPU_H -#define __ASM_MACH_WAKECPU_H - -/* - * This file copes with machines that wakeup secondary CPUs by the - * INIT, INIT, STARTUP sequence. - */ - -#define WAKE_SECONDARY_VIA_INIT - -#define TRAMPOLINE_LOW phys_to_virt(0x467) -#define TRAMPOLINE_HIGH phys_to_virt(0x469) - -#define boot_cpu_apicid boot_cpu_physical_apicid - -static inline void wait_for_init_deassert(atomic_t *deassert) -{ - while (!atomic_read(deassert)) - cpu_relax(); - return; -} - -/* Nothing to do for most platforms, since cleared by the INIT cycle */ -static inline void smp_callin_clear_local_apic(void) -{ -} - -static inline void store_NMI_vector(unsigned short *high, unsigned short *low) -{ -} - -static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) -{ -} - -#if APIC_DEBUG - #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) -#else - #define inquire_remote_apic(apicid) {} -#endif - -#endif /* __ASM_MACH_WAKECPU_H */ diff --git a/include/asm-i386/mach-default/pci-functions.h b/include/asm-i386/mach-default/pci-functions.h deleted file mode 100644 index ed0bab427354..000000000000 --- a/include/asm-i386/mach-default/pci-functions.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * PCI BIOS function numbering for conventional PCI BIOS - * systems - */ - -#define PCIBIOS_PCI_FUNCTION_ID 0xb1XX -#define PCIBIOS_PCI_BIOS_PRESENT 0xb101 -#define PCIBIOS_FIND_PCI_DEVICE 0xb102 -#define PCIBIOS_FIND_PCI_CLASS_CODE 0xb103 -#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106 -#define PCIBIOS_READ_CONFIG_BYTE 0xb108 -#define PCIBIOS_READ_CONFIG_WORD 0xb109 -#define PCIBIOS_READ_CONFIG_DWORD 0xb10a -#define PCIBIOS_WRITE_CONFIG_BYTE 0xb10b -#define PCIBIOS_WRITE_CONFIG_WORD 0xb10c -#define PCIBIOS_WRITE_CONFIG_DWORD 0xb10d -#define PCIBIOS_GET_ROUTING_OPTIONS 0xb10e -#define PCIBIOS_SET_PCI_HW_INT 0xb10f - diff --git a/include/asm-i386/mach-default/setup_arch.h b/include/asm-i386/mach-default/setup_arch.h deleted file mode 100644 index 605e3ccb991b..000000000000 --- a/include/asm-i386/mach-default/setup_arch.h +++ /dev/null @@ -1,7 +0,0 @@ -/* Hook to call BIOS initialisation function */ - -/* no action for generic */ - -#ifndef ARCH_SETUP -#define ARCH_SETUP -#endif diff --git a/include/asm-i386/mach-default/smpboot_hooks.h b/include/asm-i386/mach-default/smpboot_hooks.h deleted file mode 100644 index 7f45f6311059..000000000000 --- a/include/asm-i386/mach-default/smpboot_hooks.h +++ /dev/null @@ -1,44 +0,0 @@ -/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws - * which needs to alter them. */ - -static inline void smpboot_clear_io_apic_irqs(void) -{ - io_apic_irqs = 0; -} - -static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) -{ - CMOS_WRITE(0xa, 0xf); - local_flush_tlb(); - Dprintk("1.\n"); - *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; - Dprintk("2.\n"); - *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; - Dprintk("3.\n"); -} - -static inline void smpboot_restore_warm_reset_vector(void) -{ - /* - * Install writable page 0 entry to set BIOS data area. - */ - local_flush_tlb(); - - /* - * Paranoid: Set warm reset code and vector here back - * to default values. - */ - CMOS_WRITE(0, 0xf); - - *((volatile long *) phys_to_virt(0x467)) = 0; -} - -static inline void smpboot_setup_io_apic(void) -{ - /* - * Here we can be sure that there is an IO-APIC in the system. Let's - * go and set it up: - */ - if (!skip_ioapic_setup && nr_ioapics) - setup_IO_APIC(); -} diff --git a/include/asm-i386/mach-es7000/mach_apic.h b/include/asm-i386/mach-es7000/mach_apic.h deleted file mode 100644 index caec64be516d..000000000000 --- a/include/asm-i386/mach-es7000/mach_apic.h +++ /dev/null @@ -1,206 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -extern u8 bios_cpu_apicid[]; - -#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) -#define esr_disable (1) - -static inline int apic_id_registered(void) -{ - return (1); -} - -static inline cpumask_t target_cpus(void) -{ -#if defined CONFIG_ES7000_CLUSTERED_APIC - return CPU_MASK_ALL; -#else - return cpumask_of_cpu(smp_processor_id()); -#endif -} -#define TARGET_CPUS (target_cpus()) - -#if defined CONFIG_ES7000_CLUSTERED_APIC -#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) -#define INT_DELIVERY_MODE (dest_LowestPrio) -#define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ -#define NO_BALANCE_IRQ (1) -#undef WAKE_SECONDARY_VIA_INIT -#define WAKE_SECONDARY_VIA_MIP -#else -#define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define INT_DELIVERY_MODE (dest_Fixed) -#define INT_DEST_MODE (0) /* phys delivery to target procs */ -#define NO_BALANCE_IRQ (0) -#undef APIC_DEST_LOGICAL -#define APIC_DEST_LOGICAL 0x0 -#define WAKE_SECONDARY_VIA_INIT -#endif - -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} -static inline unsigned long check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -#define apicid_cluster(apicid) (apicid & 0xF0) - -static inline unsigned long calculate_ldr(int cpu) -{ - unsigned long id; - id = xapic_phys_to_log_apicid(cpu); - return (SET_APIC_LOGICAL_ID(id)); -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LdR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void init_apic_ldr(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = calculate_ldr(cpu); - apic_write_around(APIC_LDR, val); -} - -#ifndef CONFIG_X86_GENERICARCH -extern void enable_apic_mode(void); -#endif - -extern int apic_version [MAX_APICS]; -static inline void setup_apic_routing(void) -{ - int apic = bios_cpu_apicid[smp_processor_id()]; - printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", - (apic_version[apic] == 0x14) ? - "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); -} - -static inline int multi_timer_check(int apic, int irq) -{ - return 0; -} - -static inline int apicid_to_node(int logical_apicid) -{ - return 0; -} - - -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (!mps_cpu) - return boot_cpu_physical_apicid; - else if (mps_cpu < NR_CPUS) - return (int) bios_cpu_apicid[mps_cpu]; - else - return BAD_APICID; -} - -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) -{ - static int id = 0; - physid_mask_t mask; - mask = physid_mask_of_physid(id); - ++id; - return mask; -} - -extern u8 cpu_2_logical_apicid[]; -/* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= NR_CPUS) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; -#else - return logical_smp_processor_id(); -#endif -} - -static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *unused) -{ - printk("Processor #%d %ld:%ld APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return (m->mpc_apicid); -} - -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xff); -} - - -static inline void setup_portio_remap(void) -{ -} - -extern unsigned int boot_cpu_physical_apicid; -static inline int check_phys_apicid_present(int cpu_physical_apicid) -{ - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); - return (1); -} - -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - int num_bits_set; - int cpus_found = 0; - int cpu; - int apicid; - - num_bits_set = cpus_weight(cpumask); - /* Return id to all */ - if (num_bits_set == NR_CPUS) -#if defined CONFIG_ES7000_CLUSTERED_APIC - return 0xFF; -#else - return cpu_to_logical_apicid(0); -#endif - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of TARGET_CPUS. - */ - cpu = first_cpu(cpumask); - apicid = cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, cpumask)) { - int new_apicid = cpu_to_logical_apicid(cpu); - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)){ - printk ("%s: Not a valid mask!\n",__FUNCTION__); -#if defined CONFIG_ES7000_CLUSTERED_APIC - return 0xFF; -#else - return cpu_to_logical_apicid(0); -#endif - } - apicid = new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff --git a/include/asm-i386/mach-es7000/mach_apicdef.h b/include/asm-i386/mach-es7000/mach_apicdef.h deleted file mode 100644 index a58ab5a75c8c..000000000000 --- a/include/asm-i386/mach-es7000/mach_apicdef.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ASM_MACH_APICDEF_H -#define __ASM_MACH_APICDEF_H - -#define APIC_ID_MASK (0xFF<<24) - -static inline unsigned get_apic_id(unsigned long x) -{ - return (((x)>>24)&0xFF); -} - -#define GET_APIC_ID(x) get_apic_id(x) - -#endif diff --git a/include/asm-i386/mach-es7000/mach_ipi.h b/include/asm-i386/mach-es7000/mach_ipi.h deleted file mode 100644 index 5e61bd220b06..000000000000 --- a/include/asm-i386/mach-es7000/mach_ipi.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef __ASM_MACH_IPI_H -#define __ASM_MACH_IPI_H - -void send_IPI_mask_sequence(cpumask_t mask, int vector); - -static inline void send_IPI_mask(cpumask_t mask, int vector) -{ - send_IPI_mask_sequence(mask, vector); -} - -static inline void send_IPI_allbutself(int vector) -{ - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - if (!cpus_empty(mask)) - send_IPI_mask(mask, vector); -} - -static inline void send_IPI_all(int vector) -{ - send_IPI_mask(cpu_online_map, vector); -} - -#endif /* __ASM_MACH_IPI_H */ diff --git a/include/asm-i386/mach-es7000/mach_mpparse.h b/include/asm-i386/mach-es7000/mach_mpparse.h deleted file mode 100644 index 8aa10547b4b1..000000000000 --- a/include/asm-i386/mach-es7000/mach_mpparse.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef __ASM_MACH_MPPARSE_H -#define __ASM_MACH_MPPARSE_H - -#include <linux/acpi.h> - -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ - Dprintk("Bus #%d is %s\n", m->mpc_busid, name); -} - -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ -} - -extern int parse_unisys_oem (char *oemptr); -extern int find_unisys_acpi_oem_table(unsigned long *oem_addr); -extern void setup_unisys(void); - -#ifndef CONFIG_X86_GENERICARCH -extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id); -extern int mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid); -#endif - -#ifdef CONFIG_ACPI - -static inline int es7000_check_dsdt(void) -{ - struct acpi_table_header header; - memcpy(&header, 0, sizeof(struct acpi_table_header)); - acpi_get_table_header(ACPI_SIG_DSDT, 0, &header); - if (!strncmp(header.oem_id, "UNISYS", 6)) - return 1; - return 0; -} -#endif - -#endif /* __ASM_MACH_MPPARSE_H */ diff --git a/include/asm-i386/mach-es7000/mach_mpspec.h b/include/asm-i386/mach-es7000/mach_mpspec.h deleted file mode 100644 index b1f5039d4506..000000000000 --- a/include/asm-i386/mach-es7000/mach_mpspec.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_MACH_MPSPEC_H -#define __ASM_MACH_MPSPEC_H - -#define MAX_IRQ_SOURCES 256 - -#define MAX_MP_BUSSES 256 - -#endif /* __ASM_MACH_MPSPEC_H */ diff --git a/include/asm-i386/mach-es7000/mach_wakecpu.h b/include/asm-i386/mach-es7000/mach_wakecpu.h deleted file mode 100644 index 84ff58314501..000000000000 --- a/include/asm-i386/mach-es7000/mach_wakecpu.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef __ASM_MACH_WAKECPU_H -#define __ASM_MACH_WAKECPU_H - -/* - * This file copes with machines that wakeup secondary CPUs by the - * INIT, INIT, STARTUP sequence. - */ - -#ifdef CONFIG_ES7000_CLUSTERED_APIC -#define WAKE_SECONDARY_VIA_MIP -#else -#define WAKE_SECONDARY_VIA_INIT -#endif - -#ifdef WAKE_SECONDARY_VIA_MIP -extern int es7000_start_cpu(int cpu, unsigned long eip); -static inline int -wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) -{ - int boot_error = 0; - boot_error = es7000_start_cpu(phys_apicid, start_eip); - return boot_error; -} -#endif - -#define TRAMPOLINE_LOW phys_to_virt(0x467) -#define TRAMPOLINE_HIGH phys_to_virt(0x469) - -#define boot_cpu_apicid boot_cpu_physical_apicid - -static inline void wait_for_init_deassert(atomic_t *deassert) -{ -#ifdef WAKE_SECONDARY_VIA_INIT - while (!atomic_read(deassert)) - cpu_relax(); -#endif - return; -} - -/* Nothing to do for most platforms, since cleared by the INIT cycle */ -static inline void smp_callin_clear_local_apic(void) -{ -} - -static inline void store_NMI_vector(unsigned short *high, unsigned short *low) -{ -} - -static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) -{ -} - -#if APIC_DEBUG - #define inquire_remote_apic(apicid) __inquire_remote_apic(apicid) -#else - #define inquire_remote_apic(apicid) {} -#endif - -#endif /* __ASM_MACH_WAKECPU_H */ diff --git a/include/asm-i386/mach-generic/irq_vectors_limits.h b/include/asm-i386/mach-generic/irq_vectors_limits.h deleted file mode 100644 index 890ce3f5e09a..000000000000 --- a/include/asm-i386/mach-generic/irq_vectors_limits.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _ASM_IRQ_VECTORS_LIMITS_H -#define _ASM_IRQ_VECTORS_LIMITS_H - -/* - * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, - * even with uni-proc kernels, so use a big array. - * - * This value should be the same in both the generic and summit subarches. - * Change one, change 'em both. - */ -#define NR_IRQS 224 -#define NR_IRQ_VECTORS 1024 - -#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ diff --git a/include/asm-i386/mach-generic/mach_apic.h b/include/asm-i386/mach-generic/mach_apic.h deleted file mode 100644 index a236e7021528..000000000000 --- a/include/asm-i386/mach-generic/mach_apic.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -#include <asm/genapic.h> - -#define esr_disable (genapic->ESR_DISABLE) -#define NO_BALANCE_IRQ (genapic->no_balance_irq) -#define INT_DELIVERY_MODE (genapic->int_delivery_mode) -#define INT_DEST_MODE (genapic->int_dest_mode) -#undef APIC_DEST_LOGICAL -#define APIC_DEST_LOGICAL (genapic->apic_destination_logical) -#define TARGET_CPUS (genapic->target_cpus()) -#define apic_id_registered (genapic->apic_id_registered) -#define init_apic_ldr (genapic->init_apic_ldr) -#define ioapic_phys_id_map (genapic->ioapic_phys_id_map) -#define setup_apic_routing (genapic->setup_apic_routing) -#define multi_timer_check (genapic->multi_timer_check) -#define apicid_to_node (genapic->apicid_to_node) -#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) -#define cpu_present_to_apicid (genapic->cpu_present_to_apicid) -#define apicid_to_cpu_present (genapic->apicid_to_cpu_present) -#define mpc_apic_id (genapic->mpc_apic_id) -#define setup_portio_remap (genapic->setup_portio_remap) -#define check_apicid_present (genapic->check_apicid_present) -#define check_phys_apicid_present (genapic->check_phys_apicid_present) -#define check_apicid_used (genapic->check_apicid_used) -#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) -#define enable_apic_mode (genapic->enable_apic_mode) -#define phys_pkg_id (genapic->phys_pkg_id) - -extern void generic_bigsmp_probe(void); - -#endif /* __ASM_MACH_APIC_H */ diff --git a/include/asm-i386/mach-generic/mach_apicdef.h b/include/asm-i386/mach-generic/mach_apicdef.h deleted file mode 100644 index 28ed98972ca8..000000000000 --- a/include/asm-i386/mach-generic/mach_apicdef.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _GENAPIC_MACH_APICDEF_H -#define _GENAPIC_MACH_APICDEF_H 1 - -#ifndef APIC_DEFINITION -#include <asm/genapic.h> - -#define GET_APIC_ID (genapic->get_apic_id) -#define APIC_ID_MASK (genapic->apic_id_mask) -#endif - -#endif diff --git a/include/asm-i386/mach-generic/mach_ipi.h b/include/asm-i386/mach-generic/mach_ipi.h deleted file mode 100644 index 441b0fe3ed1d..000000000000 --- a/include/asm-i386/mach-generic/mach_ipi.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _MACH_IPI_H -#define _MACH_IPI_H 1 - -#include <asm/genapic.h> - -#define send_IPI_mask (genapic->send_IPI_mask) -#define send_IPI_allbutself (genapic->send_IPI_allbutself) -#define send_IPI_all (genapic->send_IPI_all) - -#endif diff --git a/include/asm-i386/mach-generic/mach_mpparse.h b/include/asm-i386/mach-generic/mach_mpparse.h deleted file mode 100644 index dbd9fce54f4d..000000000000 --- a/include/asm-i386/mach-generic/mach_mpparse.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _MACH_MPPARSE_H -#define _MACH_MPPARSE_H 1 - -#include <asm/genapic.h> - -#define mpc_oem_bus_info (genapic->mpc_oem_bus_info) -#define mpc_oem_pci_bus (genapic->mpc_oem_pci_bus) - -int mps_oem_check(struct mp_config_table *mpc, char *oem, char *productid); -int acpi_madt_oem_check(char *oem_id, char *oem_table_id); - -#endif diff --git a/include/asm-i386/mach-generic/mach_mpspec.h b/include/asm-i386/mach-generic/mach_mpspec.h deleted file mode 100644 index 9ef0b941bb22..000000000000 --- a/include/asm-i386/mach-generic/mach_mpspec.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef __ASM_MACH_MPSPEC_H -#define __ASM_MACH_MPSPEC_H - -#define MAX_IRQ_SOURCES 256 - -/* Summit or generic (i.e. installer) kernels need lots of bus entries. */ -/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ -#define MAX_MP_BUSSES 260 - -#endif /* __ASM_MACH_MPSPEC_H */ diff --git a/include/asm-i386/mach-numaq/mach_apic.h b/include/asm-i386/mach-numaq/mach_apic.h deleted file mode 100644 index 5e5e7dd2692e..000000000000 --- a/include/asm-i386/mach-numaq/mach_apic.h +++ /dev/null @@ -1,149 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -#include <asm/io.h> -#include <linux/mmzone.h> -#include <linux/nodemask.h> - -#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) - -static inline cpumask_t target_cpus(void) -{ - return CPU_MASK_ALL; -} - -#define TARGET_CPUS (target_cpus()) - -#define NO_BALANCE_IRQ (1) -#define esr_disable (1) - -#define INT_DELIVERY_MODE dest_LowestPrio -#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */ - -#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap) -#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map) -#define apicid_cluster(apicid) (apicid & 0xF0) - -static inline int apic_id_registered(void) -{ - return 1; -} - -static inline void init_apic_ldr(void) -{ - /* Already done in NUMA-Q firmware */ -} - -static inline void setup_apic_routing(void) -{ - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "NUMA-Q", nr_ioapics); -} - -/* - * Skip adding the timer int on secondary nodes, which causes - * a small but painful rift in the time-space continuum. - */ -static inline int multi_timer_check(int apic, int irq) -{ - return apic != 0 && irq == 0; -} - -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* We don't have a good way to do this yet - hack */ - return physids_promote(0xFUL); -} - -/* Mapping from cpu number to logical apicid */ -extern u8 cpu_2_logical_apicid[]; -static inline int cpu_to_logical_apicid(int cpu) -{ - if (cpu >= NR_CPUS) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; -} - -/* - * Supporting over 60 cpus on NUMA-Q requires a locality-dependent - * cpu to APIC ID relation to properly interact with the intelligent - * mode of the cluster controller. - */ -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < 60) - return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3)); - else - return BAD_APICID; -} - -static inline int generate_logical_apicid(int quad, int phys_apicid) -{ - return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1); -} - -static inline int apicid_to_node(int logical_apicid) -{ - return logical_apicid >> 4; -} - -static inline physid_mask_t apicid_to_cpu_present(int logical_apicid) -{ - int node = apicid_to_node(logical_apicid); - int cpu = __ffs(logical_apicid & 0xf); - - return physid_mask_of_physid(cpu + 4*node); -} - -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - int quad = translation_record->trans_quad; - int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid); - - printk("Processor #%d %ld:%ld APIC version %d (quad %d, apic %d)\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver, quad, logical_apicid); - return logical_apicid; -} - -static inline void setup_portio_remap(void) -{ - int num_quads = num_online_nodes(); - - if (num_quads <= 1) - return; - - printk("Remapping cross-quad port I/O for %d quads\n", num_quads); - xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD); - printk("xquad_portio vaddr 0x%08lx, len %08lx\n", - (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD); -} - -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return (1); -} - -static inline void enable_apic_mode(void) -{ -} - -/* - * We use physical apicids here, not logical, so just return the default - * physical broadcast to stop people from breaking us - */ -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - return (int) 0xF; -} - -/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */ -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff --git a/include/asm-i386/mach-numaq/mach_apicdef.h b/include/asm-i386/mach-numaq/mach_apicdef.h deleted file mode 100644 index bf439d0690f5..000000000000 --- a/include/asm-i386/mach-numaq/mach_apicdef.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef __ASM_MACH_APICDEF_H -#define __ASM_MACH_APICDEF_H - - -#define APIC_ID_MASK (0xF<<24) - -static inline unsigned get_apic_id(unsigned long x) -{ - return (((x)>>24)&0x0F); -} - -#define GET_APIC_ID(x) get_apic_id(x) - -#endif diff --git a/include/asm-i386/mach-numaq/mach_ipi.h b/include/asm-i386/mach-numaq/mach_ipi.h deleted file mode 100644 index c6044488e9e6..000000000000 --- a/include/asm-i386/mach-numaq/mach_ipi.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __ASM_MACH_IPI_H -#define __ASM_MACH_IPI_H - -void send_IPI_mask_sequence(cpumask_t, int vector); - -static inline void send_IPI_mask(cpumask_t mask, int vector) -{ - send_IPI_mask_sequence(mask, vector); -} - -static inline void send_IPI_allbutself(int vector) -{ - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - - if (!cpus_empty(mask)) - send_IPI_mask(mask, vector); -} - -static inline void send_IPI_all(int vector) -{ - send_IPI_mask(cpu_online_map, vector); -} - -#endif /* __ASM_MACH_IPI_H */ diff --git a/include/asm-i386/mach-numaq/mach_mpparse.h b/include/asm-i386/mach-numaq/mach_mpparse.h deleted file mode 100644 index 51bbac8fc0c2..000000000000 --- a/include/asm-i386/mach-numaq/mach_mpparse.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __ASM_MACH_MPPARSE_H -#define __ASM_MACH_MPPARSE_H - -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ - int quad = translation->trans_quad; - int local = translation->trans_local; - - mp_bus_id_to_node[m->mpc_busid] = quad; - mp_bus_id_to_local[m->mpc_busid] = local; - printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad); -} - -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ - int quad = translation->trans_quad; - int local = translation->trans_local; - - quad_local_to_mp_bus_id[quad][local] = m->mpc_busid; -} - -/* Hook from generic ACPI tables.c */ -static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ -} - -#endif /* __ASM_MACH_MPPARSE_H */ diff --git a/include/asm-i386/mach-numaq/mach_mpspec.h b/include/asm-i386/mach-numaq/mach_mpspec.h deleted file mode 100644 index dffb09856f8f..000000000000 --- a/include/asm-i386/mach-numaq/mach_mpspec.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef __ASM_MACH_MPSPEC_H -#define __ASM_MACH_MPSPEC_H - -#define MAX_IRQ_SOURCES 512 - -#define MAX_MP_BUSSES 32 - -#endif /* __ASM_MACH_MPSPEC_H */ diff --git a/include/asm-i386/mach-numaq/mach_wakecpu.h b/include/asm-i386/mach-numaq/mach_wakecpu.h deleted file mode 100644 index 00530041a991..000000000000 --- a/include/asm-i386/mach-numaq/mach_wakecpu.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef __ASM_MACH_WAKECPU_H -#define __ASM_MACH_WAKECPU_H - -/* This file copes with machines that wakeup secondary CPUs by NMIs */ - -#define WAKE_SECONDARY_VIA_NMI - -#define TRAMPOLINE_LOW phys_to_virt(0x8) -#define TRAMPOLINE_HIGH phys_to_virt(0xa) - -#define boot_cpu_apicid boot_cpu_logical_apicid - -/* We don't do anything here because we use NMI's to boot instead */ -static inline void wait_for_init_deassert(atomic_t *deassert) -{ -} - -/* - * Because we use NMIs rather than the INIT-STARTUP sequence to - * bootstrap the CPUs, the APIC may be in a weird state. Kick it. - */ -static inline void smp_callin_clear_local_apic(void) -{ - clear_local_APIC(); -} - -static inline void store_NMI_vector(unsigned short *high, unsigned short *low) -{ - printk("Storing NMI vector\n"); - *high = *((volatile unsigned short *) TRAMPOLINE_HIGH); - *low = *((volatile unsigned short *) TRAMPOLINE_LOW); -} - -static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) -{ - printk("Restoring NMI vector\n"); - *((volatile unsigned short *) TRAMPOLINE_HIGH) = *high; - *((volatile unsigned short *) TRAMPOLINE_LOW) = *low; -} - -#define inquire_remote_apic(apicid) {} - -#endif /* __ASM_MACH_WAKECPU_H */ diff --git a/include/asm-i386/mach-summit/irq_vectors_limits.h b/include/asm-i386/mach-summit/irq_vectors_limits.h deleted file mode 100644 index 890ce3f5e09a..000000000000 --- a/include/asm-i386/mach-summit/irq_vectors_limits.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _ASM_IRQ_VECTORS_LIMITS_H -#define _ASM_IRQ_VECTORS_LIMITS_H - -/* - * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, - * even with uni-proc kernels, so use a big array. - * - * This value should be the same in both the generic and summit subarches. - * Change one, change 'em both. - */ -#define NR_IRQS 224 -#define NR_IRQ_VECTORS 1024 - -#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ diff --git a/include/asm-i386/mach-summit/mach_apic.h b/include/asm-i386/mach-summit/mach_apic.h deleted file mode 100644 index 732f776aab8e..000000000000 --- a/include/asm-i386/mach-summit/mach_apic.h +++ /dev/null @@ -1,197 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -#include <asm/smp.h> - -#define esr_disable (1) -#define NO_BALANCE_IRQ (0) - -/* In clustered mode, the high nibble of APIC ID is a cluster number. - * The low nibble is a 4-bit bitmap. */ -#define XAPIC_DEST_CPUS_SHIFT 4 -#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) -#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) - -#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) - -static inline cpumask_t target_cpus(void) -{ - /* CPU_MASK_ALL (0xff) has undefined behaviour with - * dest_LowestPrio mode logical clustered apic interrupt routing - * Just start on cpu 0. IRQ balancing will spread load - */ - return cpumask_of_cpu(0); -} -#define TARGET_CPUS (target_cpus()) - -#define INT_DELIVERY_MODE (dest_LowestPrio) -#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ - -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} - -/* we don't use the phys_cpu_present_map to indicate apicid presence */ -static inline unsigned long check_apicid_present(int bit) -{ - return 1; -} - -#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) - -extern u8 bios_cpu_apicid[]; -extern u8 cpu_2_logical_apicid[]; - -static inline void init_apic_ldr(void) -{ - unsigned long val, id; - int count = 0; - u8 my_id = (u8)hard_smp_processor_id(); - u8 my_cluster = (u8)apicid_cluster(my_id); -#ifdef CONFIG_SMP - u8 lid; - int i; - - /* Create logical APIC IDs by counting CPUs already in cluster. */ - for (count = 0, i = NR_CPUS; --i >= 0; ) { - lid = cpu_2_logical_apicid[i]; - if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) - ++count; - } -#endif - /* We only have a 4 wide bitmap in cluster mode. If a deranged - * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ - BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); - id = my_cluster | (1UL << count); - apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_APIC_LOGICAL_ID(id); - apic_write_around(APIC_LDR, val); -} - -static inline int multi_timer_check(int apic, int irq) -{ - return 0; -} - -static inline int apic_id_registered(void) -{ - return 1; -} - -static inline void setup_apic_routing(void) -{ - printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", - nr_ioapics); -} - -static inline int apicid_to_node(int logical_apicid) -{ -#ifdef CONFIG_SMP - return apicid_2_node[hard_smp_processor_id()]; -#else - return 0; -#endif -} - -/* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) -{ -#ifdef CONFIG_SMP - if (cpu >= NR_CPUS) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; -#else - return logical_smp_processor_id(); -#endif -} - -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < NR_CPUS) - return (int)bios_cpu_apicid[mps_cpu]; - else - return BAD_APICID; -} - -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0x0F); -} - -static inline physid_mask_t apicid_to_cpu_present(int apicid) -{ - return physid_mask_of_physid(0); -} - -static inline int mpc_apic_id(struct mpc_config_processor *m, - struct mpc_config_translation *translation_record) -{ - printk("Processor #%d %ld:%ld APIC version %d\n", - m->mpc_apicid, - (m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8, - (m->mpc_cpufeature & CPU_MODEL_MASK) >> 4, - m->mpc_apicver); - return (m->mpc_apicid); -} - -static inline void setup_portio_remap(void) -{ -} - -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return 1; -} - -static inline void enable_apic_mode(void) -{ -} - -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - int num_bits_set; - int cpus_found = 0; - int cpu; - int apicid; - - num_bits_set = cpus_weight(cpumask); - /* Return id to all */ - if (num_bits_set == NR_CPUS) - return (int) 0xFF; - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of TARGET_CPUS. - */ - cpu = first_cpu(cpumask); - apicid = cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, cpumask)) { - int new_apicid = cpu_to_logical_apicid(cpu); - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)){ - printk ("%s: Not a valid mask!\n",__FUNCTION__); - return 0xFF; - } - apicid = apicid | new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -/* cpuid returns the value latched in the HW at reset, not the APIC ID - * register's value. For any box whose BIOS changes APIC IDs, like - * clustered APIC systems, we must use hard_smp_processor_id. - * - * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. - */ -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return hard_smp_processor_id() >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff --git a/include/asm-i386/mach-summit/mach_apicdef.h b/include/asm-i386/mach-summit/mach_apicdef.h deleted file mode 100644 index a58ab5a75c8c..000000000000 --- a/include/asm-i386/mach-summit/mach_apicdef.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ASM_MACH_APICDEF_H -#define __ASM_MACH_APICDEF_H - -#define APIC_ID_MASK (0xFF<<24) - -static inline unsigned get_apic_id(unsigned long x) -{ - return (((x)>>24)&0xFF); -} - -#define GET_APIC_ID(x) get_apic_id(x) - -#endif diff --git a/include/asm-i386/mach-summit/mach_ipi.h b/include/asm-i386/mach-summit/mach_ipi.h deleted file mode 100644 index 9404c535b7ec..000000000000 --- a/include/asm-i386/mach-summit/mach_ipi.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __ASM_MACH_IPI_H -#define __ASM_MACH_IPI_H - -void send_IPI_mask_sequence(cpumask_t mask, int vector); - -static inline void send_IPI_mask(cpumask_t mask, int vector) -{ - send_IPI_mask_sequence(mask, vector); -} - -static inline void send_IPI_allbutself(int vector) -{ - cpumask_t mask = cpu_online_map; - cpu_clear(smp_processor_id(), mask); - - if (!cpus_empty(mask)) - send_IPI_mask(mask, vector); -} - -static inline void send_IPI_all(int vector) -{ - send_IPI_mask(cpu_online_map, vector); -} - -#endif /* __ASM_MACH_IPI_H */ diff --git a/include/asm-i386/mach-summit/mach_mpparse.h b/include/asm-i386/mach-summit/mach_mpparse.h deleted file mode 100644 index c2520539d934..000000000000 --- a/include/asm-i386/mach-summit/mach_mpparse.h +++ /dev/null @@ -1,121 +0,0 @@ -#ifndef __ASM_MACH_MPPARSE_H -#define __ASM_MACH_MPPARSE_H - -#include <mach_apic.h> -#include <asm/tsc.h> - -extern int use_cyclone; - -#ifdef CONFIG_X86_SUMMIT_NUMA -extern void setup_summit(void); -#else -#define setup_summit() {} -#endif - -static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name, - struct mpc_config_translation *translation) -{ - Dprintk("Bus #%d is %s\n", m->mpc_busid, name); -} - -static inline void mpc_oem_pci_bus(struct mpc_config_bus *m, - struct mpc_config_translation *translation) -{ -} - -static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, - char *productid) -{ - if (!strncmp(oem, "IBM ENSW", 8) && - (!strncmp(productid, "VIGIL SMP", 9) - || !strncmp(productid, "EXA", 3) - || !strncmp(productid, "RUTHLESS SMP", 12))){ - mark_tsc_unstable("Summit based system"); - use_cyclone = 1; /*enable cyclone-timer*/ - setup_summit(); - return 1; - } - return 0; -} - -/* Hook from generic ACPI tables.c */ -static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) -{ - if (!strncmp(oem_id, "IBM", 3) && - (!strncmp(oem_table_id, "SERVIGIL", 8) - || !strncmp(oem_table_id, "EXA", 3))){ - mark_tsc_unstable("Summit based system"); - use_cyclone = 1; /*enable cyclone-timer*/ - setup_summit(); - return 1; - } - return 0; -} - -struct rio_table_hdr { - unsigned char version; /* Version number of this data structure */ - /* Version 3 adds chassis_num & WP_index */ - unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */ - unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */ -} __attribute__((packed)); - -struct scal_detail { - unsigned char node_id; /* Scalability Node ID */ - unsigned long CBAR; /* Address of 1MB register space */ - unsigned char port0node; /* Node ID port connected to: 0xFF=None */ - unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char port1node; /* Node ID port connected to: 0xFF = None */ - unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char port2node; /* Node ID port connected to: 0xFF = None */ - unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */ -} __attribute__((packed)); - -struct rio_detail { - unsigned char node_id; /* RIO Node ID */ - unsigned long BBAR; /* Address of 1MB register space */ - unsigned char type; /* Type of device */ - unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/ - /* For CYC: Node ID of Twister that owns this CYC */ - unsigned char port0node; /* Node ID port connected to: 0xFF=None */ - unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char port1node; /* Node ID port connected to: 0xFF=None */ - unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */ - unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */ - /* For CYC: 0 */ - unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */ - /* = 0 : the XAPIC is not used, ie:*/ - /* ints fwded to another XAPIC */ - /* Bits1:7 Reserved */ - /* For CYC: Bits0:7 Reserved */ - unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */ - /* lower slot numbers/PCI bus numbers */ - /* For CYC: No meaning */ - unsigned char chassis_num; /* 1 based Chassis number */ - /* For LookOut WPEGs this field indicates the */ - /* Expansion Chassis #, enumerated from Boot */ - /* Node WPEG external port, then Boot Node CYC */ - /* external port, then Next Vigil chassis WPEG */ - /* external port, etc. */ - /* Shared Lookouts have only 1 chassis number (the */ - /* first one assigned) */ -} __attribute__((packed)); - - -typedef enum { - CompatTwister = 0, /* Compatibility Twister */ - AltTwister = 1, /* Alternate Twister of internal 8-way */ - CompatCyclone = 2, /* Compatibility Cyclone */ - AltCyclone = 3, /* Alternate Cyclone of internal 8-way */ - CompatWPEG = 4, /* Compatibility WPEG */ - AltWPEG = 5, /* Second Planar WPEG */ - LookOutAWPEG = 6, /* LookOut WPEG */ - LookOutBWPEG = 7, /* LookOut WPEG */ -} node_type; - -static inline int is_WPEG(struct rio_detail *rio){ - return (rio->type == CompatWPEG || rio->type == AltWPEG || - rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); -} - -#endif /* __ASM_MACH_MPPARSE_H */ diff --git a/include/asm-i386/mach-summit/mach_mpspec.h b/include/asm-i386/mach-summit/mach_mpspec.h deleted file mode 100644 index bd765523511a..000000000000 --- a/include/asm-i386/mach-summit/mach_mpspec.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_MACH_MPSPEC_H -#define __ASM_MACH_MPSPEC_H - -#define MAX_IRQ_SOURCES 256 - -/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */ -#define MAX_MP_BUSSES 260 - -#endif /* __ASM_MACH_MPSPEC_H */ diff --git a/include/asm-i386/mach-visws/cobalt.h b/include/asm-i386/mach-visws/cobalt.h deleted file mode 100644 index 33c36225a042..000000000000 --- a/include/asm-i386/mach-visws/cobalt.h +++ /dev/null @@ -1,125 +0,0 @@ -#ifndef __I386_SGI_COBALT_H -#define __I386_SGI_COBALT_H - -#include <asm/fixmap.h> - -/* - * Cobalt SGI Visual Workstation system ASIC - */ - -#define CO_CPU_NUM_PHYS 0x1e00 -#define CO_CPU_TAB_PHYS (CO_CPU_NUM_PHYS + 2) - -#define CO_CPU_MAX 4 - -#define CO_CPU_PHYS 0xc2000000 -#define CO_APIC_PHYS 0xc4000000 - -/* see set_fixmap() and asm/fixmap.h */ -#define CO_CPU_VADDR (fix_to_virt(FIX_CO_CPU)) -#define CO_APIC_VADDR (fix_to_virt(FIX_CO_APIC)) - -/* Cobalt CPU registers -- relative to CO_CPU_VADDR, use co_cpu_*() */ -#define CO_CPU_REV 0x08 -#define CO_CPU_CTRL 0x10 -#define CO_CPU_STAT 0x20 -#define CO_CPU_TIMEVAL 0x30 - -/* CO_CPU_CTRL bits */ -#define CO_CTRL_TIMERUN 0x04 /* 0 == disabled */ -#define CO_CTRL_TIMEMASK 0x08 /* 0 == unmasked */ - -/* CO_CPU_STATUS bits */ -#define CO_STAT_TIMEINTR 0x02 /* (r) 1 == int pend, (w) 0 == clear */ - -/* CO_CPU_TIMEVAL value */ -#define CO_TIME_HZ 100000000 /* Cobalt core rate */ - -/* Cobalt APIC registers -- relative to CO_APIC_VADDR, use co_apic_*() */ -#define CO_APIC_HI(n) (((n) * 0x10) + 4) -#define CO_APIC_LO(n) ((n) * 0x10) -#define CO_APIC_ID 0x0ffc - -/* CO_APIC_ID bits */ -#define CO_APIC_ENABLE 0x00000100 - -/* CO_APIC_LO bits */ -#define CO_APIC_MASK 0x00010000 /* 0 = enabled */ -#define CO_APIC_LEVEL 0x00008000 /* 0 = edge */ - -/* - * Where things are physically wired to Cobalt - * #defines with no board _<type>_<rev>_ are common to all (thus far) - */ -#define CO_APIC_IDE0 4 -#define CO_APIC_IDE1 2 /* Only on 320 */ - -#define CO_APIC_8259 12 /* serial, floppy, par-l-l */ - -/* Lithium PCI Bridge A -- "the one with 82557 Ethernet" */ -#define CO_APIC_PCIA_BASE0 0 /* and 1 */ /* slot 0, line 0 */ -#define CO_APIC_PCIA_BASE123 5 /* and 6 */ /* slot 0, line 1 */ - -#define CO_APIC_PIIX4_USB 7 /* this one is weird */ - -/* Lithium PCI Bridge B -- "the one with PIIX4" */ -#define CO_APIC_PCIB_BASE0 8 /* and 9-12 *//* slot 0, line 0 */ -#define CO_APIC_PCIB_BASE123 13 /* 14.15 */ /* slot 0, line 1 */ - -#define CO_APIC_VIDOUT0 16 -#define CO_APIC_VIDOUT1 17 -#define CO_APIC_VIDIN0 18 -#define CO_APIC_VIDIN1 19 - -#define CO_APIC_LI_AUDIO 22 - -#define CO_APIC_AS 24 -#define CO_APIC_RE 25 - -#define CO_APIC_CPU 28 /* Timer and Cache interrupt */ -#define CO_APIC_NMI 29 -#define CO_APIC_LAST CO_APIC_NMI - -/* - * This is how irqs are assigned on the Visual Workstation. - * Legacy devices get irq's 1-15 (system clock is 0 and is CO_APIC_CPU). - * All other devices (including PCI) go to Cobalt and are irq's 16 on up. - */ -#define CO_IRQ_APIC0 16 /* irq of apic entry 0 */ -#define IS_CO_APIC(irq) ((irq) >= CO_IRQ_APIC0) -#define CO_IRQ(apic) (CO_IRQ_APIC0 + (apic)) /* apic ent to irq */ -#define CO_APIC(irq) ((irq) - CO_IRQ_APIC0) /* irq to apic ent */ -#define CO_IRQ_IDE0 14 /* knowledge of... */ -#define CO_IRQ_IDE1 15 /* ... ide driver defaults! */ -#define CO_IRQ_8259 CO_IRQ(CO_APIC_8259) - -#ifdef CONFIG_X86_VISWS_APIC -extern __inline void co_cpu_write(unsigned long reg, unsigned long v) -{ - *((volatile unsigned long *)(CO_CPU_VADDR+reg))=v; -} - -extern __inline unsigned long co_cpu_read(unsigned long reg) -{ - return *((volatile unsigned long *)(CO_CPU_VADDR+reg)); -} - -extern __inline void co_apic_write(unsigned long reg, unsigned long v) -{ - *((volatile unsigned long *)(CO_APIC_VADDR+reg))=v; -} - -extern __inline unsigned long co_apic_read(unsigned long reg) -{ - return *((volatile unsigned long *)(CO_APIC_VADDR+reg)); -} -#endif - -extern char visws_board_type; - -#define VISWS_320 0 -#define VISWS_540 1 - -extern char visws_board_rev; - -#endif /* __I386_SGI_COBALT_H */ diff --git a/include/asm-i386/mach-visws/entry_arch.h b/include/asm-i386/mach-visws/entry_arch.h deleted file mode 100644 index b183fa6d83d9..000000000000 --- a/include/asm-i386/mach-visws/entry_arch.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * The following vectors are part of the Linux architecture, there - * is no hardware IRQ pin equivalent for them, they are triggered - * through the ICC by us (IPIs) - */ -#ifdef CONFIG_X86_SMP -BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) -BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) -BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) -#endif - -/* - * every pentium local APIC has two 'local interrupts', with a - * soft-definable vector attached to both interrupts, one of - * which is a timer interrupt, the other one is error counter - * overflow. Linux uses the local APIC timer interrupt to get - * a much simpler SMP time architecture: - */ -#ifdef CONFIG_X86_LOCAL_APIC -BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR) -BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) -BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) -#endif diff --git a/include/asm-i386/mach-visws/irq_vectors.h b/include/asm-i386/mach-visws/irq_vectors.h deleted file mode 100644 index cb572d8db505..000000000000 --- a/include/asm-i386/mach-visws/irq_vectors.h +++ /dev/null @@ -1,62 +0,0 @@ -#ifndef _ASM_IRQ_VECTORS_H -#define _ASM_IRQ_VECTORS_H - -/* - * IDT vectors usable for external interrupt sources start - * at 0x20: - */ -#define FIRST_EXTERNAL_VECTOR 0x20 - -#define SYSCALL_VECTOR 0x80 - -/* - * Vectors 0x20-0x2f are used for ISA interrupts. - */ - -/* - * Special IRQ vectors used by the SMP architecture, 0xf0-0xff - * - * some of the following vectors are 'rare', they are merged - * into a single vector (CALL_FUNCTION_VECTOR) to save vector space. - * TLB, reschedule and local APIC vectors are performance-critical. - * - * Vectors 0xf0-0xfa are free (reserved for future Linux use). - */ -#define SPURIOUS_APIC_VECTOR 0xff -#define ERROR_APIC_VECTOR 0xfe -#define INVALIDATE_TLB_VECTOR 0xfd -#define RESCHEDULE_VECTOR 0xfc -#define CALL_FUNCTION_VECTOR 0xfb - -#define THERMAL_APIC_VECTOR 0xf0 -/* - * Local APIC timer IRQ vector is on a different priority level, - * to work around the 'lost local interrupt if more than 2 IRQ - * sources per level' errata. - */ -#define LOCAL_TIMER_VECTOR 0xef - -/* - * First APIC vector available to drivers: (vectors 0x30-0xee) - * we start at 0x31 to spread out vectors evenly between priority - * levels. (0x80 is the syscall vector) - */ -#define FIRST_DEVICE_VECTOR 0x31 -#define FIRST_SYSTEM_VECTOR 0xef - -#define TIMER_IRQ 0 - -/* - * IRQ definitions - */ -#define NR_VECTORS 256 -#define NR_IRQS 224 -#define NR_IRQ_VECTORS NR_IRQS - -#define FPU_IRQ 13 - -#define FIRST_VM86_IRQ 3 -#define LAST_VM86_IRQ 15 -#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) - -#endif /* _ASM_IRQ_VECTORS_H */ diff --git a/include/asm-i386/mach-visws/lithium.h b/include/asm-i386/mach-visws/lithium.h deleted file mode 100644 index d443e68d0066..000000000000 --- a/include/asm-i386/mach-visws/lithium.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef __I386_SGI_LITHIUM_H -#define __I386_SGI_LITHIUM_H - -#include <asm/fixmap.h> - -/* - * Lithium is the SGI Visual Workstation I/O ASIC - */ - -#define LI_PCI_A_PHYS 0xfc000000 /* Enet is dev 3 */ -#define LI_PCI_B_PHYS 0xfd000000 /* PIIX4 is here */ - -/* see set_fixmap() and asm/fixmap.h */ -#define LI_PCIA_VADDR (fix_to_virt(FIX_LI_PCIA)) -#define LI_PCIB_VADDR (fix_to_virt(FIX_LI_PCIB)) - -/* Not a standard PCI? (not in linux/pci.h) */ -#define LI_PCI_BUSNUM 0x44 /* lo8: primary, hi8: sub */ -#define LI_PCI_INTEN 0x46 - -/* LI_PCI_INTENT bits */ -#define LI_INTA_0 0x0001 -#define LI_INTA_1 0x0002 -#define LI_INTA_2 0x0004 -#define LI_INTA_3 0x0008 -#define LI_INTA_4 0x0010 -#define LI_INTB 0x0020 -#define LI_INTC 0x0040 -#define LI_INTD 0x0080 - -/* More special purpose macros... */ -extern __inline void li_pcia_write16(unsigned long reg, unsigned short v) -{ - *((volatile unsigned short *)(LI_PCIA_VADDR+reg))=v; -} - -extern __inline unsigned short li_pcia_read16(unsigned long reg) -{ - return *((volatile unsigned short *)(LI_PCIA_VADDR+reg)); -} - -extern __inline void li_pcib_write16(unsigned long reg, unsigned short v) -{ - *((volatile unsigned short *)(LI_PCIB_VADDR+reg))=v; -} - -extern __inline unsigned short li_pcib_read16(unsigned long reg) -{ - return *((volatile unsigned short *)(LI_PCIB_VADDR+reg)); -} - -#endif - diff --git a/include/asm-i386/mach-visws/mach_apic.h b/include/asm-i386/mach-visws/mach_apic.h deleted file mode 100644 index efac6f0d139f..000000000000 --- a/include/asm-i386/mach-visws/mach_apic.h +++ /dev/null @@ -1,103 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -#include <mach_apicdef.h> -#include <asm/smp.h> - -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - -#define no_balance_irq (0) -#define esr_disable (0) - -#define INT_DELIVERY_MODE dest_LowestPrio -#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ - -#ifdef CONFIG_SMP - #define TARGET_CPUS cpu_online_map -#else - #define TARGET_CPUS cpumask_of_cpu(0) -#endif - -#define check_apicid_used(bitmap, apicid) physid_isset(apicid, bitmap) -#define check_apicid_present(bit) physid_isset(bit, phys_cpu_present_map) - -static inline int apic_id_registered(void) -{ - return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void init_apic_ldr(void) -{ - unsigned long val; - - apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); - apic_write_around(APIC_LDR, val); -} - -static inline void summit_check(char *oem, char *productid) -{ -} - -static inline void setup_apic_routing(void) -{ -} - -static inline int apicid_to_node(int logical_apicid) -{ - return 0; -} - -/* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) -{ - return 1 << cpu; -} - -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < get_physical_broadcast()) - return mps_cpu; - else - return BAD_APICID; -} - -static inline physid_mask_t apicid_to_cpu_present(int apicid) -{ - return physid_mask_of_physid(apicid); -} - -#define WAKE_SECONDARY_VIA_INIT - -static inline void setup_portio_remap(void) -{ -} - -static inline void enable_apic_mode(void) -{ -} - -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); -} - -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - return cpus_addr(cpumask)[0]; -} - -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff --git a/include/asm-i386/mach-visws/mach_apicdef.h b/include/asm-i386/mach-visws/mach_apicdef.h deleted file mode 100644 index 826cfa97d778..000000000000 --- a/include/asm-i386/mach-visws/mach_apicdef.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef __ASM_MACH_APICDEF_H -#define __ASM_MACH_APICDEF_H - -#define APIC_ID_MASK (0xF<<24) - -static inline unsigned get_apic_id(unsigned long x) -{ - return (((x)>>24)&0xF); -} -#define GET_APIC_ID(x) get_apic_id(x) - -#endif diff --git a/include/asm-i386/mach-visws/piix4.h b/include/asm-i386/mach-visws/piix4.h deleted file mode 100644 index 83ea4f46e419..000000000000 --- a/include/asm-i386/mach-visws/piix4.h +++ /dev/null @@ -1,107 +0,0 @@ -#ifndef __I386_SGI_PIIX_H -#define __I386_SGI_PIIX_H - -/* - * PIIX4 as used on SGI Visual Workstations - */ - -#define PIIX_PM_START 0x0F80 - -#define SIO_GPIO_START 0x0FC0 - -#define SIO_PM_START 0x0FC8 - -#define PMBASE PIIX_PM_START -#define GPIREG0 (PMBASE+0x30) -#define GPIREG(x) (GPIREG0+((x)/8)) -#define GPIBIT(x) (1 << ((x)%8)) - -#define PIIX_GPI_BD_ID1 18 -#define PIIX_GPI_BD_ID2 19 -#define PIIX_GPI_BD_ID3 20 -#define PIIX_GPI_BD_ID4 21 -#define PIIX_GPI_BD_REG GPIREG(PIIX_GPI_BD_ID1) -#define PIIX_GPI_BD_MASK (GPIBIT(PIIX_GPI_BD_ID1) | \ - GPIBIT(PIIX_GPI_BD_ID2) | \ - GPIBIT(PIIX_GPI_BD_ID3) | \ - GPIBIT(PIIX_GPI_BD_ID4) ) - -#define PIIX_GPI_BD_SHIFT (PIIX_GPI_BD_ID1 % 8) - -#define SIO_INDEX 0x2e -#define SIO_DATA 0x2f - -#define SIO_DEV_SEL 0x7 -#define SIO_DEV_ENB 0x30 -#define SIO_DEV_MSB 0x60 -#define SIO_DEV_LSB 0x61 - -#define SIO_GP_DEV 0x7 - -#define SIO_GP_BASE SIO_GPIO_START -#define SIO_GP_MSB (SIO_GP_BASE>>8) -#define SIO_GP_LSB (SIO_GP_BASE&0xff) - -#define SIO_GP_DATA1 (SIO_GP_BASE+0) - -#define SIO_PM_DEV 0x8 - -#define SIO_PM_BASE SIO_PM_START -#define SIO_PM_MSB (SIO_PM_BASE>>8) -#define SIO_PM_LSB (SIO_PM_BASE&0xff) -#define SIO_PM_INDEX (SIO_PM_BASE+0) -#define SIO_PM_DATA (SIO_PM_BASE+1) - -#define SIO_PM_FER2 0x1 - -#define SIO_PM_GP_EN 0x80 - - - -/* - * This is the dev/reg where generating a config cycle will - * result in a PCI special cycle. - */ -#define SPECIAL_DEV 0xff -#define SPECIAL_REG 0x00 - -/* - * PIIX4 needs to see a special cycle with the following data - * to be convinced the processor has gone into the stop grant - * state. PIIX4 insists on seeing this before it will power - * down a system. - */ -#define PIIX_SPECIAL_STOP 0x00120002 - -#define PIIX4_RESET_PORT 0xcf9 -#define PIIX4_RESET_VAL 0x6 - -#define PMSTS_PORT 0xf80 // 2 bytes PM Status -#define PMEN_PORT 0xf82 // 2 bytes PM Enable -#define PMCNTRL_PORT 0xf84 // 2 bytes PM Control - -#define PM_SUSPEND_ENABLE 0x2000 // start sequence to suspend state - -/* - * PMSTS and PMEN I/O bit definitions. - * (Bits are the same in both registers) - */ -#define PM_STS_RSM (1<<15) // Resume Status -#define PM_STS_PWRBTNOR (1<<11) // Power Button Override -#define PM_STS_RTC (1<<10) // RTC status -#define PM_STS_PWRBTN (1<<8) // Power Button Pressed? -#define PM_STS_GBL (1<<5) // Global Status -#define PM_STS_BM (1<<4) // Bus Master Status -#define PM_STS_TMROF (1<<0) // Timer Overflow Status. - -/* - * Stop clock GPI register - */ -#define PIIX_GPIREG0 (0xf80 + 0x30) - -/* - * Stop clock GPI bit in GPIREG0 - */ -#define PIIX_GPI_STPCLK 0x4 // STPCLK signal routed back in - -#endif diff --git a/include/asm-i386/mach-visws/setup_arch.h b/include/asm-i386/mach-visws/setup_arch.h deleted file mode 100644 index 33f700ef6831..000000000000 --- a/include/asm-i386/mach-visws/setup_arch.h +++ /dev/null @@ -1,8 +0,0 @@ -/* Hook to call BIOS initialisation function */ - -extern unsigned long sgivwfb_mem_phys; -extern unsigned long sgivwfb_mem_size; - -/* no action for visws */ - -#define ARCH_SETUP diff --git a/include/asm-i386/mach-visws/smpboot_hooks.h b/include/asm-i386/mach-visws/smpboot_hooks.h deleted file mode 100644 index d926471fa359..000000000000 --- a/include/asm-i386/mach-visws/smpboot_hooks.h +++ /dev/null @@ -1,24 +0,0 @@ -static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) -{ - CMOS_WRITE(0xa, 0xf); - local_flush_tlb(); - Dprintk("1.\n"); - *((volatile unsigned short *) TRAMPOLINE_HIGH) = start_eip >> 4; - Dprintk("2.\n"); - *((volatile unsigned short *) TRAMPOLINE_LOW) = start_eip & 0xf; - Dprintk("3.\n"); -} - -/* for visws do nothing for any of these */ - -static inline void smpboot_clear_io_apic_irqs(void) -{ -} - -static inline void smpboot_restore_warm_reset_vector(void) -{ -} - -static inline void smpboot_setup_io_apic(void) -{ -} diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h deleted file mode 100644 index bc2b58926308..000000000000 --- a/include/asm-i386/mach-voyager/do_timer.h +++ /dev/null @@ -1,18 +0,0 @@ -/* defines for inline arch setup functions */ -#include <linux/clockchips.h> - -#include <asm/voyager.h> -#include <asm/i8253.h> - -/** - * do_timer_interrupt_hook - hook into timer tick - * @regs: standard registers from interrupt - * - * Call the pit clock event handler. see asm/i8253.h - **/ -static inline void do_timer_interrupt_hook(void) -{ - global_clock_event->event_handler(global_clock_event); - voyager_timer_interrupt(); -} - diff --git a/include/asm-i386/mach-voyager/entry_arch.h b/include/asm-i386/mach-voyager/entry_arch.h deleted file mode 100644 index 4a1e1e8c10b6..000000000000 --- a/include/asm-i386/mach-voyager/entry_arch.h +++ /dev/null @@ -1,26 +0,0 @@ -/* -*- mode: c; c-basic-offset: 8 -*- */ - -/* Copyright (C) 2002 - * - * Author: James.Bottomley@HansenPartnership.com - * - * linux/arch/i386/voyager/entry_arch.h - * - * This file builds the VIC and QIC CPI gates - */ - -/* initialise the voyager interrupt gates - * - * This uses the macros in irq.h to set up assembly jump gates. The - * calls are then redirected to the same routine with smp_ prefixed */ -BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT) -BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT) -BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0); - -/* do all the QIC interrupts */ -BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI); -BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI); -BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); -BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); -BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); - diff --git a/include/asm-i386/mach-voyager/irq_vectors.h b/include/asm-i386/mach-voyager/irq_vectors.h deleted file mode 100644 index 165421f5821c..000000000000 --- a/include/asm-i386/mach-voyager/irq_vectors.h +++ /dev/null @@ -1,79 +0,0 @@ -/* -*- mode: c; c-basic-offset: 8 -*- */ - -/* Copyright (C) 2002 - * - * Author: James.Bottomley@HansenPartnership.com - * - * linux/arch/i386/voyager/irq_vectors.h - * - * This file provides definitions for the VIC and QIC CPIs - */ - -#ifndef _ASM_IRQ_VECTORS_H -#define _ASM_IRQ_VECTORS_H - -/* - * IDT vectors usable for external interrupt sources start - * at 0x20: - */ -#define FIRST_EXTERNAL_VECTOR 0x20 - -#define SYSCALL_VECTOR 0x80 - -/* - * Vectors 0x20-0x2f are used for ISA interrupts. - */ - -/* These define the CPIs we use in linux */ -#define VIC_CPI_LEVEL0 0 -#define VIC_CPI_LEVEL1 1 -/* now the fake CPIs */ -#define VIC_TIMER_CPI 2 -#define VIC_INVALIDATE_CPI 3 -#define VIC_RESCHEDULE_CPI 4 -#define VIC_ENABLE_IRQ_CPI 5 -#define VIC_CALL_FUNCTION_CPI 6 - -/* Now the QIC CPIs: Since we don't need the two initial levels, - * these are 2 less than the VIC CPIs */ -#define QIC_CPI_OFFSET 1 -#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET) -#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET) -#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) -#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) -#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) - -#define VIC_START_FAKE_CPI VIC_TIMER_CPI -#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI - -/* this is the SYS_INT CPI. */ -#define VIC_SYS_INT 8 -#define VIC_CMN_INT 15 - -/* This is the boot CPI for alternate processors. It gets overwritten - * by the above once the system has activated all available processors */ -#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0 -#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8) - -#define NR_VECTORS 256 -#define NR_IRQS 224 -#define NR_IRQ_VECTORS NR_IRQS - -#define FPU_IRQ 13 - -#define FIRST_VM86_IRQ 3 -#define LAST_VM86_IRQ 15 -#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) - -#ifndef __ASSEMBLY__ -extern asmlinkage void vic_cpi_interrupt(void); -extern asmlinkage void vic_sys_interrupt(void); -extern asmlinkage void vic_cmn_interrupt(void); -extern asmlinkage void qic_timer_interrupt(void); -extern asmlinkage void qic_invalidate_interrupt(void); -extern asmlinkage void qic_reschedule_interrupt(void); -extern asmlinkage void qic_enable_irq_interrupt(void); -extern asmlinkage void qic_call_function_interrupt(void); -#endif /* !__ASSEMBLY__ */ - -#endif /* _ASM_IRQ_VECTORS_H */ diff --git a/include/asm-i386/mach-voyager/setup_arch.h b/include/asm-i386/mach-voyager/setup_arch.h deleted file mode 100644 index 84d01ad33459..000000000000 --- a/include/asm-i386/mach-voyager/setup_arch.h +++ /dev/null @@ -1,10 +0,0 @@ -#include <asm/voyager.h> -#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *)(PARAM+0x40)) - -/* Hook to call BIOS initialisation function */ - -/* for voyager, pass the voyager BIOS/SUS info area to the detection - * routines */ - -#define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO); - diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h deleted file mode 100644 index a4b0aa3320e6..000000000000 --- a/include/asm-i386/math_emu.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef _I386_MATH_EMU_H -#define _I386_MATH_EMU_H - -#include <asm/sigcontext.h> - -int restore_i387_soft(void *s387, struct _fpstate __user *buf); -int save_i387_soft(void *s387, struct _fpstate __user *buf); - -/* This structure matches the layout of the data saved to the stack - following a device-not-present interrupt, part of it saved - automatically by the 80386/80486. - */ -struct info { - long ___orig_eip; - long ___ebx; - long ___ecx; - long ___edx; - long ___esi; - long ___edi; - long ___ebp; - long ___eax; - long ___ds; - long ___es; - long ___fs; - long ___orig_eax; - long ___eip; - long ___cs; - long ___eflags; - long ___esp; - long ___ss; - long ___vm86_es; /* This and the following only in vm86 mode */ - long ___vm86_ds; - long ___vm86_fs; - long ___vm86_gs; -}; -#endif diff --git a/include/asm-i386/mc146818rtc.h b/include/asm-i386/mc146818rtc.h deleted file mode 100644 index 1613b42eaf58..000000000000 --- a/include/asm-i386/mc146818rtc.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Machine dependent access functions for RTC registers. - */ -#ifndef _ASM_MC146818RTC_H -#define _ASM_MC146818RTC_H - -#include <asm/io.h> -#include <asm/system.h> -#include <asm/processor.h> -#include <linux/mc146818rtc.h> - -#ifndef RTC_PORT -#define RTC_PORT(x) (0x70 + (x)) -#define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ -#endif - -#ifdef __HAVE_ARCH_CMPXCHG -/* - * This lock provides nmi access to the CMOS/RTC registers. It has some - * special properties. It is owned by a CPU and stores the index register - * currently being accessed (if owned). The idea here is that it works - * like a normal lock (normally). However, in an NMI, the NMI code will - * first check to see if its CPU owns the lock, meaning that the NMI - * interrupted during the read/write of the device. If it does, it goes ahead - * and performs the access and then restores the index register. If it does - * not, it locks normally. - * - * Note that since we are working with NMIs, we need this lock even in - * a non-SMP machine just to mark that the lock is owned. - * - * This only works with compare-and-swap. There is no other way to - * atomically claim the lock and set the owner. - */ -#include <linux/smp.h> -extern volatile unsigned long cmos_lock; - -/* - * All of these below must be called with interrupts off, preempt - * disabled, etc. - */ - -static inline void lock_cmos(unsigned char reg) -{ - unsigned long new; - new = ((smp_processor_id()+1) << 8) | reg; - for (;;) { - if (cmos_lock) { - cpu_relax(); - continue; - } - if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0) - return; - } -} - -static inline void unlock_cmos(void) -{ - cmos_lock = 0; -} -static inline int do_i_have_lock_cmos(void) -{ - return (cmos_lock >> 8) == (smp_processor_id()+1); -} -static inline unsigned char current_lock_cmos_reg(void) -{ - return cmos_lock & 0xff; -} -#define lock_cmos_prefix(reg) \ - do { \ - unsigned long cmos_flags; \ - local_irq_save(cmos_flags); \ - lock_cmos(reg) -#define lock_cmos_suffix(reg) \ - unlock_cmos(); \ - local_irq_restore(cmos_flags); \ - } while (0) -#else -#define lock_cmos_prefix(reg) do {} while (0) -#define lock_cmos_suffix(reg) do {} while (0) -#define lock_cmos(reg) -#define unlock_cmos() -#define do_i_have_lock_cmos() 0 -#define current_lock_cmos_reg() 0 -#endif - -/* - * The yet supported machines all access the RTC index register via - * an ISA port access but the way to access the date register differs ... - */ -#define CMOS_READ(addr) rtc_cmos_read(addr) -#define CMOS_WRITE(val, addr) rtc_cmos_write(val, addr) -unsigned char rtc_cmos_read(unsigned char addr); -void rtc_cmos_write(unsigned char val, unsigned char addr); - -#define RTC_IRQ 8 - -#endif /* _ASM_MC146818RTC_H */ diff --git a/include/asm-i386/mca.h b/include/asm-i386/mca.h deleted file mode 100644 index 09adf2eac4dc..000000000000 --- a/include/asm-i386/mca.h +++ /dev/null @@ -1,43 +0,0 @@ -/* -*- mode: c; c-basic-offset: 8 -*- */ - -/* Platform specific MCA defines */ -#ifndef _ASM_MCA_H -#define _ASM_MCA_H - -/* Maximal number of MCA slots - actually, some machines have less, but - * they all have sufficient number of POS registers to cover 8. - */ -#define MCA_MAX_SLOT_NR 8 - -/* Most machines have only one MCA bus. The only multiple bus machines - * I know have at most two */ -#define MAX_MCA_BUSSES 2 - -#define MCA_PRIMARY_BUS 0 -#define MCA_SECONDARY_BUS 1 - -/* Dummy slot numbers on primary MCA for integrated functions */ -#define MCA_INTEGSCSI (MCA_MAX_SLOT_NR) -#define MCA_INTEGVIDEO (MCA_MAX_SLOT_NR+1) -#define MCA_MOTHERBOARD (MCA_MAX_SLOT_NR+2) - -/* Dummy POS values for integrated functions */ -#define MCA_DUMMY_POS_START 0x10000 -#define MCA_INTEGSCSI_POS (MCA_DUMMY_POS_START+1) -#define MCA_INTEGVIDEO_POS (MCA_DUMMY_POS_START+2) -#define MCA_MOTHERBOARD_POS (MCA_DUMMY_POS_START+3) - -/* MCA registers */ - -#define MCA_MOTHERBOARD_SETUP_REG 0x94 -#define MCA_ADAPTER_SETUP_REG 0x96 -#define MCA_POS_REG(n) (0x100+(n)) - -#define MCA_ENABLED 0x01 /* POS 2, set if adapter enabled */ - -/* Max number of adapters, including both slots and various integrated - * things. - */ -#define MCA_NUMADAPTERS (MCA_MAX_SLOT_NR+3) - -#endif diff --git a/include/asm-i386/mca_dma.h b/include/asm-i386/mca_dma.h deleted file mode 100644 index fbb1f3b71279..000000000000 --- a/include/asm-i386/mca_dma.h +++ /dev/null @@ -1,201 +0,0 @@ -#ifndef MCA_DMA_H -#define MCA_DMA_H - -#include <asm/io.h> -#include <linux/ioport.h> - -/* - * Microchannel specific DMA stuff. DMA on an MCA machine is fairly similar to - * standard PC dma, but it certainly has its quirks. DMA register addresses - * are in a different place and there are some added functions. Most of this - * should be pretty obvious on inspection. Note that the user must divide - * count by 2 when using 16-bit dma; that is not handled by these functions. - * - * Ramen Noodles are yummy. - * - * 1998 Tymm Twillman <tymm@computer.org> - */ - -/* - * Registers that are used by the DMA controller; FN is the function register - * (tell the controller what to do) and EXE is the execution register (how - * to do it) - */ - -#define MCA_DMA_REG_FN 0x18 -#define MCA_DMA_REG_EXE 0x1A - -/* - * Functions that the DMA controller can do - */ - -#define MCA_DMA_FN_SET_IO 0x00 -#define MCA_DMA_FN_SET_ADDR 0x20 -#define MCA_DMA_FN_GET_ADDR 0x30 -#define MCA_DMA_FN_SET_COUNT 0x40 -#define MCA_DMA_FN_GET_COUNT 0x50 -#define MCA_DMA_FN_GET_STATUS 0x60 -#define MCA_DMA_FN_SET_MODE 0x70 -#define MCA_DMA_FN_SET_ARBUS 0x80 -#define MCA_DMA_FN_MASK 0x90 -#define MCA_DMA_FN_RESET_MASK 0xA0 -#define MCA_DMA_FN_MASTER_CLEAR 0xD0 - -/* - * Modes (used by setting MCA_DMA_FN_MODE in the function register) - * - * Note that the MODE_READ is read from memory (write to device), and - * MODE_WRITE is vice-versa. - */ - -#define MCA_DMA_MODE_XFER 0x04 /* read by default */ -#define MCA_DMA_MODE_READ 0x04 /* same as XFER */ -#define MCA_DMA_MODE_WRITE 0x08 /* OR with MODE_XFER to use */ -#define MCA_DMA_MODE_IO 0x01 /* DMA from IO register */ -#define MCA_DMA_MODE_16 0x40 /* 16 bit xfers */ - - -/** - * mca_enable_dma - channel to enable DMA on - * @dmanr: DMA channel - * - * Enable the MCA bus DMA on a channel. This can be called from - * IRQ context. - */ - -static __inline__ void mca_enable_dma(unsigned int dmanr) -{ - outb(MCA_DMA_FN_RESET_MASK | dmanr, MCA_DMA_REG_FN); -} - -/** - * mca_disble_dma - channel to disable DMA on - * @dmanr: DMA channel - * - * Enable the MCA bus DMA on a channel. This can be called from - * IRQ context. - */ - -static __inline__ void mca_disable_dma(unsigned int dmanr) -{ - outb(MCA_DMA_FN_MASK | dmanr, MCA_DMA_REG_FN); -} - -/** - * mca_set_dma_addr - load a 24bit DMA address - * @dmanr: DMA channel - * @a: 24bit bus address - * - * Load the address register in the DMA controller. This has a 24bit - * limitation (16Mb). - */ - -static __inline__ void mca_set_dma_addr(unsigned int dmanr, unsigned int a) -{ - outb(MCA_DMA_FN_SET_ADDR | dmanr, MCA_DMA_REG_FN); - outb(a & 0xff, MCA_DMA_REG_EXE); - outb((a >> 8) & 0xff, MCA_DMA_REG_EXE); - outb((a >> 16) & 0xff, MCA_DMA_REG_EXE); -} - -/** - * mca_get_dma_addr - load a 24bit DMA address - * @dmanr: DMA channel - * - * Read the address register in the DMA controller. This has a 24bit - * limitation (16Mb). The return is a bus address. - */ - -static __inline__ unsigned int mca_get_dma_addr(unsigned int dmanr) -{ - unsigned int addr; - - outb(MCA_DMA_FN_GET_ADDR | dmanr, MCA_DMA_REG_FN); - addr = inb(MCA_DMA_REG_EXE); - addr |= inb(MCA_DMA_REG_EXE) << 8; - addr |= inb(MCA_DMA_REG_EXE) << 16; - - return addr; -} - -/** - * mca_set_dma_count - load a 16bit transfer count - * @dmanr: DMA channel - * @count: count - * - * Set the DMA count for this channel. This can be up to 64Kbytes. - * Setting a count of zero will not do what you expect. - */ - -static __inline__ void mca_set_dma_count(unsigned int dmanr, unsigned int count) -{ - count--; /* transfers one more than count -- correct for this */ - - outb(MCA_DMA_FN_SET_COUNT | dmanr, MCA_DMA_REG_FN); - outb(count & 0xff, MCA_DMA_REG_EXE); - outb((count >> 8) & 0xff, MCA_DMA_REG_EXE); -} - -/** - * mca_get_dma_residue - get the remaining bytes to transfer - * @dmanr: DMA channel - * - * This function returns the number of bytes left to transfer - * on this DMA channel. - */ - -static __inline__ unsigned int mca_get_dma_residue(unsigned int dmanr) -{ - unsigned short count; - - outb(MCA_DMA_FN_GET_COUNT | dmanr, MCA_DMA_REG_FN); - count = 1 + inb(MCA_DMA_REG_EXE); - count += inb(MCA_DMA_REG_EXE) << 8; - - return count; -} - -/** - * mca_set_dma_io - set the port for an I/O transfer - * @dmanr: DMA channel - * @io_addr: an I/O port number - * - * Unlike the ISA bus DMA controllers the DMA on MCA bus can transfer - * with an I/O port target. - */ - -static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) -{ - /* - * DMA from a port address -- set the io address - */ - - outb(MCA_DMA_FN_SET_IO | dmanr, MCA_DMA_REG_FN); - outb(io_addr & 0xff, MCA_DMA_REG_EXE); - outb((io_addr >> 8) & 0xff, MCA_DMA_REG_EXE); -} - -/** - * mca_set_dma_mode - set the DMA mode - * @dmanr: DMA channel - * @mode: mode to set - * - * The DMA controller supports several modes. The mode values you can - * set are- - * - * %MCA_DMA_MODE_READ when reading from the DMA device. - * - * %MCA_DMA_MODE_WRITE to writing to the DMA device. - * - * %MCA_DMA_MODE_IO to do DMA to or from an I/O port. - * - * %MCA_DMA_MODE_16 to do 16bit transfers. - */ - -static __inline__ void mca_set_dma_mode(unsigned int dmanr, unsigned int mode) -{ - outb(MCA_DMA_FN_SET_MODE | dmanr, MCA_DMA_REG_FN); - outb(mode, MCA_DMA_REG_EXE); -} - -#endif /* MCA_DMA_H */ diff --git a/include/asm-i386/mce.h b/include/asm-i386/mce.h deleted file mode 100644 index d56d89742e8f..000000000000 --- a/include/asm-i386/mce.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifdef CONFIG_X86_MCE -extern void mcheck_init(struct cpuinfo_x86 *c); -#else -#define mcheck_init(c) do {} while(0) -#endif - -extern int mce_disabled; - -extern void stop_mce(void); -extern void restart_mce(void); - diff --git a/include/asm-i386/mman.h b/include/asm-i386/mman.h deleted file mode 100644 index 8fd9d7ab7faf..000000000000 --- a/include/asm-i386/mman.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef __I386_MMAN_H__ -#define __I386_MMAN_H__ - -#include <asm-generic/mman.h> - -#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ -#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ -#define MAP_LOCKED 0x2000 /* pages are locked */ -#define MAP_NORESERVE 0x4000 /* don't check for reservations */ -#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ - -#endif /* __I386_MMAN_H__ */ diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h deleted file mode 100644 index 8358dd3df7aa..000000000000 --- a/include/asm-i386/mmu.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef __i386_MMU_H -#define __i386_MMU_H - -#include <asm/semaphore.h> -/* - * The i386 doesn't have a mmu context, but - * we put the segment information here. - * - * cpu_vm_mask is used to optimize ldt flushing. - */ -typedef struct { - int size; - struct semaphore sem; - void *ldt; - void *vdso; -} mm_context_t; - -#endif diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h deleted file mode 100644 index 7eb0b0b1fb3c..000000000000 --- a/include/asm-i386/mmu_context.h +++ /dev/null @@ -1,86 +0,0 @@ -#ifndef __I386_SCHED_H -#define __I386_SCHED_H - -#include <asm/desc.h> -#include <asm/atomic.h> -#include <asm/pgalloc.h> -#include <asm/tlbflush.h> -#include <asm/paravirt.h> -#ifndef CONFIG_PARAVIRT -#include <asm-generic/mm_hooks.h> - -static inline void paravirt_activate_mm(struct mm_struct *prev, - struct mm_struct *next) -{ -} -#endif /* !CONFIG_PARAVIRT */ - - -/* - * Used for LDT copy/destruction. - */ -int init_new_context(struct task_struct *tsk, struct mm_struct *mm); -void destroy_context(struct mm_struct *mm); - - -static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) -{ -#ifdef CONFIG_SMP - unsigned cpu = smp_processor_id(); - if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) - per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; -#endif -} - -void leave_mm(unsigned long cpu); - -static inline void switch_mm(struct mm_struct *prev, - struct mm_struct *next, - struct task_struct *tsk) -{ - int cpu = smp_processor_id(); - - if (likely(prev != next)) { - /* stop flush ipis for the previous mm */ - cpu_clear(cpu, prev->cpu_vm_mask); -#ifdef CONFIG_SMP - per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; - per_cpu(cpu_tlbstate, cpu).active_mm = next; -#endif - cpu_set(cpu, next->cpu_vm_mask); - - /* Re-load page tables */ - load_cr3(next->pgd); - - /* - * load the LDT, if the LDT is different: - */ - if (unlikely(prev->context.ldt != next->context.ldt)) - load_LDT_nolock(&next->context); - } -#ifdef CONFIG_SMP - else { - per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; - BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); - - if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { - /* We were in lazy tlb mode and leave_mm disabled - * tlb flush IPI delivery. We must reload %cr3. - */ - load_cr3(next->pgd); - load_LDT_nolock(&next->context); - } - } -#endif -} - -#define deactivate_mm(tsk, mm) \ - asm("movl %0,%%gs": :"r" (0)); - -#define activate_mm(prev, next) \ - do { \ - paravirt_activate_mm(prev, next); \ - switch_mm((prev),(next),NULL); \ - } while(0); - -#endif diff --git a/include/asm-i386/mmx.h b/include/asm-i386/mmx.h deleted file mode 100644 index 46b71da99869..000000000000 --- a/include/asm-i386/mmx.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _ASM_MMX_H -#define _ASM_MMX_H - -/* - * MMX 3Dnow! helper operations - */ - -#include <linux/types.h> - -extern void *_mmx_memcpy(void *to, const void *from, size_t size); -extern void mmx_clear_page(void *page); -extern void mmx_copy_page(void *to, void *from); - -#endif diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h deleted file mode 100644 index 118e9812778f..000000000000 --- a/include/asm-i386/mmzone.h +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002 - * - */ - -#ifndef _ASM_MMZONE_H_ -#define _ASM_MMZONE_H_ - -#include <asm/smp.h> - -#ifdef CONFIG_NUMA -extern struct pglist_data *node_data[]; -#define NODE_DATA(nid) (node_data[nid]) - -#ifdef CONFIG_X86_NUMAQ - #include <asm/numaq.h> -#elif defined(CONFIG_ACPI_SRAT)/* summit or generic arch */ - #include <asm/srat.h> -#endif - -extern int get_memcfg_numa_flat(void ); -/* - * This allows any one NUMA architecture to be compiled - * for, and still fall back to the flat function if it - * fails. - */ -static inline void get_memcfg_numa(void) -{ -#ifdef CONFIG_X86_NUMAQ - if (get_memcfg_numaq()) - return; -#elif defined(CONFIG_ACPI_SRAT) - if (get_memcfg_from_srat()) - return; -#endif - - get_memcfg_numa_flat(); -} - -extern int early_pfn_to_nid(unsigned long pfn); -extern void numa_kva_reserve(void); - -#else /* !CONFIG_NUMA */ - -#define get_memcfg_numa get_memcfg_numa_flat -#define get_zholes_size(n) (0) - -static inline void numa_kva_reserve(void) -{ -} -#endif /* CONFIG_NUMA */ - -#ifdef CONFIG_DISCONTIGMEM - -/* - * generic node memory support, the following assumptions apply: - * - * 1) memory comes in 256Mb contigious chunks which are either present or not - * 2) we will not have more than 64Gb in total - * - * for now assume that 64Gb is max amount of RAM for whole system - * 64Gb / 4096bytes/page = 16777216 pages - */ -#define MAX_NR_PAGES 16777216 -#define MAX_ELEMENTS 256 -#define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS) - -extern s8 physnode_map[]; - -static inline int pfn_to_nid(unsigned long pfn) -{ -#ifdef CONFIG_NUMA - return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]); -#else - return 0; -#endif -} - -/* - * Following are macros that each numa implmentation must define. - */ - -#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) -#define node_end_pfn(nid) \ -({ \ - pg_data_t *__pgdat = NODE_DATA(nid); \ - __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ -}) - -/* XXX: FIXME -- wli */ -#define kern_addr_valid(kaddr) (0) - -#ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ -#define pfn_valid(pfn) ((pfn) < num_physpages) -#else -static inline int pfn_valid(int pfn) -{ - int nid = pfn_to_nid(pfn); - - if (nid >= 0) - return (pfn < node_end_pfn(nid)); - return 0; -} -#endif /* CONFIG_X86_NUMAQ */ - -#endif /* CONFIG_DISCONTIGMEM */ - -#ifdef CONFIG_NEED_MULTIPLE_NODES - -/* - * Following are macros that are specific to this numa platform. - */ -#define reserve_bootmem(addr, size) \ - reserve_bootmem_node(NODE_DATA(0), (addr), (size)) -#define alloc_bootmem(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) -#define alloc_bootmem_pages(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) -#define alloc_bootmem_low_pages(x) \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) -#define alloc_bootmem_node(pgdat, x) \ -({ \ - struct pglist_data __maybe_unused \ - *__alloc_bootmem_node__pgdat = (pgdat); \ - __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ - __pa(MAX_DMA_ADDRESS)); \ -}) -#define alloc_bootmem_pages_node(pgdat, x) \ -({ \ - struct pglist_data __maybe_unused \ - *__alloc_bootmem_node__pgdat = (pgdat); \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ - __pa(MAX_DMA_ADDRESS)) \ -}) -#define alloc_bootmem_low_pages_node(pgdat, x) \ -({ \ - struct pglist_data __maybe_unused \ - *__alloc_bootmem_node__pgdat = (pgdat); \ - __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \ -}) -#endif /* CONFIG_NEED_MULTIPLE_NODES */ - -#endif /* _ASM_MMZONE_H_ */ diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h deleted file mode 100644 index 7e5fda6c3976..000000000000 --- a/include/asm-i386/module.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef _ASM_I386_MODULE_H -#define _ASM_I386_MODULE_H - -/* x86 is simple */ -struct mod_arch_specific -{ -}; - -#define Elf_Shdr Elf32_Shdr -#define Elf_Sym Elf32_Sym -#define Elf_Ehdr Elf32_Ehdr - -#ifdef CONFIG_M386 -#define MODULE_PROC_FAMILY "386 " -#elif defined CONFIG_M486 -#define MODULE_PROC_FAMILY "486 " -#elif defined CONFIG_M586 -#define MODULE_PROC_FAMILY "586 " -#elif defined CONFIG_M586TSC -#define MODULE_PROC_FAMILY "586TSC " -#elif defined CONFIG_M586MMX -#define MODULE_PROC_FAMILY "586MMX " -#elif defined CONFIG_MCORE2 -#define MODULE_PROC_FAMILY "CORE2 " -#elif defined CONFIG_M686 -#define MODULE_PROC_FAMILY "686 " -#elif defined CONFIG_MPENTIUMII -#define MODULE_PROC_FAMILY "PENTIUMII " -#elif defined CONFIG_MPENTIUMIII -#define MODULE_PROC_FAMILY "PENTIUMIII " -#elif defined CONFIG_MPENTIUMM -#define MODULE_PROC_FAMILY "PENTIUMM " -#elif defined CONFIG_MPENTIUM4 -#define MODULE_PROC_FAMILY "PENTIUM4 " -#elif defined CONFIG_MK6 -#define MODULE_PROC_FAMILY "K6 " -#elif defined CONFIG_MK7 -#define MODULE_PROC_FAMILY "K7 " -#elif defined CONFIG_MK8 -#define MODULE_PROC_FAMILY "K8 " -#elif defined CONFIG_X86_ELAN -#define MODULE_PROC_FAMILY "ELAN " -#elif defined CONFIG_MCRUSOE -#define MODULE_PROC_FAMILY "CRUSOE " -#elif defined CONFIG_MEFFICEON -#define MODULE_PROC_FAMILY "EFFICEON " -#elif defined CONFIG_MWINCHIPC6 -#define MODULE_PROC_FAMILY "WINCHIPC6 " -#elif defined CONFIG_MWINCHIP2 -#define MODULE_PROC_FAMILY "WINCHIP2 " -#elif defined CONFIG_MWINCHIP3D -#define MODULE_PROC_FAMILY "WINCHIP3D " -#elif defined CONFIG_MCYRIXIII -#define MODULE_PROC_FAMILY "CYRIXIII " -#elif defined CONFIG_MVIAC3_2 -#define MODULE_PROC_FAMILY "VIAC3-2 " -#elif defined CONFIG_MVIAC7 -#define MODULE_PROC_FAMILY "VIAC7 " -#elif defined CONFIG_MGEODEGX1 -#define MODULE_PROC_FAMILY "GEODEGX1 " -#elif defined CONFIG_MGEODE_LX -#define MODULE_PROC_FAMILY "GEODE " -#else -#error unknown processor family -#endif - -#ifdef CONFIG_4KSTACKS -#define MODULE_STACKSIZE "4KSTACKS " -#else -#define MODULE_STACKSIZE "" -#endif - -#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE - -#endif /* _ASM_I386_MODULE_H */ diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h deleted file mode 100644 index f21349399d14..000000000000 --- a/include/asm-i386/mpspec.h +++ /dev/null @@ -1,81 +0,0 @@ -#ifndef __ASM_MPSPEC_H -#define __ASM_MPSPEC_H - -#include <linux/cpumask.h> -#include <asm/mpspec_def.h> -#include <mach_mpspec.h> - -extern int mp_bus_id_to_type [MAX_MP_BUSSES]; -extern int mp_bus_id_to_node [MAX_MP_BUSSES]; -extern int mp_bus_id_to_local [MAX_MP_BUSSES]; -extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; -extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; - -extern unsigned int def_to_bigsmp; -extern unsigned int boot_cpu_physical_apicid; -extern int smp_found_config; -extern void find_smp_config (void); -extern void get_smp_config (void); -extern int nr_ioapics; -extern int apic_version [MAX_APICS]; -extern int mp_irq_entries; -extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; -extern int mpc_default_type; -extern unsigned long mp_lapic_addr; -extern int pic_mode; - -#ifdef CONFIG_ACPI -extern void mp_register_lapic (u8 id, u8 enabled); -extern void mp_register_lapic_address (u64 address); -extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base); -extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi); -extern void mp_config_acpi_legacy_irqs (void); -extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low); -#endif /* CONFIG_ACPI */ - -#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS) - -struct physid_mask -{ - unsigned long mask[PHYSID_ARRAY_SIZE]; -}; - -typedef struct physid_mask physid_mask_t; - -#define physid_set(physid, map) set_bit(physid, (map).mask) -#define physid_clear(physid, map) clear_bit(physid, (map).mask) -#define physid_isset(physid, map) test_bit(physid, (map).mask) -#define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask) - -#define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS) -#define physids_clear(map) bitmap_zero((map).mask, MAX_APICS) -#define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS) -#define physids_empty(map) bitmap_empty((map).mask, MAX_APICS) -#define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS) -#define physids_weight(map) bitmap_weight((map).mask, MAX_APICS) -#define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS) -#define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) -#define physids_coerce(map) ((map).mask[0]) - -#define physids_promote(physids) \ - ({ \ - physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ - __physid_mask.mask[0] = physids; \ - __physid_mask; \ - }) - -#define physid_mask_of_physid(physid) \ - ({ \ - physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ - physid_set(physid, __physid_mask); \ - __physid_mask; \ - }) - -#define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} } -#define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} } - -extern physid_mask_t phys_cpu_present_map; - -#endif - diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h deleted file mode 100644 index 13bafb16e7af..000000000000 --- a/include/asm-i386/mpspec_def.h +++ /dev/null @@ -1,186 +0,0 @@ -#ifndef __ASM_MPSPEC_DEF_H -#define __ASM_MPSPEC_DEF_H - -/* - * Structure definitions for SMP machines following the - * Intel Multiprocessing Specification 1.1 and 1.4. - */ - -/* - * This tag identifies where the SMP configuration - * information is. - */ - -#define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') - -#define MAX_MPC_ENTRY 1024 -#define MAX_APICS 256 - -struct intel_mp_floating -{ - char mpf_signature[4]; /* "_MP_" */ - unsigned long mpf_physptr; /* Configuration table address */ - unsigned char mpf_length; /* Our length (paragraphs) */ - unsigned char mpf_specification;/* Specification version */ - unsigned char mpf_checksum; /* Checksum (makes sum 0) */ - unsigned char mpf_feature1; /* Standard or configuration ? */ - unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ - unsigned char mpf_feature3; /* Unused (0) */ - unsigned char mpf_feature4; /* Unused (0) */ - unsigned char mpf_feature5; /* Unused (0) */ -}; - -struct mp_config_table -{ - char mpc_signature[4]; -#define MPC_SIGNATURE "PCMP" - unsigned short mpc_length; /* Size of table */ - char mpc_spec; /* 0x01 */ - char mpc_checksum; - char mpc_oem[8]; - char mpc_productid[12]; - unsigned long mpc_oemptr; /* 0 if not present */ - unsigned short mpc_oemsize; /* 0 if not present */ - unsigned short mpc_oemcount; - unsigned long mpc_lapic; /* APIC address */ - unsigned long reserved; -}; - -/* Followed by entries */ - -#define MP_PROCESSOR 0 -#define MP_BUS 1 -#define MP_IOAPIC 2 -#define MP_INTSRC 3 -#define MP_LINTSRC 4 -#define MP_TRANSLATION 192 /* Used by IBM NUMA-Q to describe node locality */ - -struct mpc_config_processor -{ - unsigned char mpc_type; - unsigned char mpc_apicid; /* Local APIC number */ - unsigned char mpc_apicver; /* Its versions */ - unsigned char mpc_cpuflag; -#define CPU_ENABLED 1 /* Processor is available */ -#define CPU_BOOTPROCESSOR 2 /* Processor is the BP */ - unsigned long mpc_cpufeature; -#define CPU_STEPPING_MASK 0x0F -#define CPU_MODEL_MASK 0xF0 -#define CPU_FAMILY_MASK 0xF00 - unsigned long mpc_featureflag; /* CPUID feature value */ - unsigned long mpc_reserved[2]; -}; - -struct mpc_config_bus -{ - unsigned char mpc_type; - unsigned char mpc_busid; - unsigned char mpc_bustype[6]; -}; - -/* List of Bus Type string values, Intel MP Spec. */ -#define BUSTYPE_EISA "EISA" -#define BUSTYPE_ISA "ISA" -#define BUSTYPE_INTERN "INTERN" /* Internal BUS */ -#define BUSTYPE_MCA "MCA" -#define BUSTYPE_VL "VL" /* Local bus */ -#define BUSTYPE_PCI "PCI" -#define BUSTYPE_PCMCIA "PCMCIA" -#define BUSTYPE_CBUS "CBUS" -#define BUSTYPE_CBUSII "CBUSII" -#define BUSTYPE_FUTURE "FUTURE" -#define BUSTYPE_MBI "MBI" -#define BUSTYPE_MBII "MBII" -#define BUSTYPE_MPI "MPI" -#define BUSTYPE_MPSA "MPSA" -#define BUSTYPE_NUBUS "NUBUS" -#define BUSTYPE_TC "TC" -#define BUSTYPE_VME "VME" -#define BUSTYPE_XPRESS "XPRESS" - -struct mpc_config_ioapic -{ - unsigned char mpc_type; - unsigned char mpc_apicid; - unsigned char mpc_apicver; - unsigned char mpc_flags; -#define MPC_APIC_USABLE 0x01 - unsigned long mpc_apicaddr; -}; - -struct mpc_config_intsrc -{ - unsigned char mpc_type; - unsigned char mpc_irqtype; - unsigned short mpc_irqflag; - unsigned char mpc_srcbus; - unsigned char mpc_srcbusirq; - unsigned char mpc_dstapic; - unsigned char mpc_dstirq; -}; - -enum mp_irq_source_types { - mp_INT = 0, - mp_NMI = 1, - mp_SMI = 2, - mp_ExtINT = 3 -}; - -#define MP_IRQDIR_DEFAULT 0 -#define MP_IRQDIR_HIGH 1 -#define MP_IRQDIR_LOW 3 - - -struct mpc_config_lintsrc -{ - unsigned char mpc_type; - unsigned char mpc_irqtype; - unsigned short mpc_irqflag; - unsigned char mpc_srcbusid; - unsigned char mpc_srcbusirq; - unsigned char mpc_destapic; -#define MP_APIC_ALL 0xFF - unsigned char mpc_destapiclint; -}; - -struct mp_config_oemtable -{ - char oem_signature[4]; -#define MPC_OEM_SIGNATURE "_OEM" - unsigned short oem_length; /* Size of table */ - char oem_rev; /* 0x01 */ - char oem_checksum; - char mpc_oem[8]; -}; - -struct mpc_config_translation -{ - unsigned char mpc_type; - unsigned char trans_len; - unsigned char trans_type; - unsigned char trans_quad; - unsigned char trans_global; - unsigned char trans_local; - unsigned short trans_reserved; -}; - -/* - * Default configurations - * - * 1 2 CPU ISA 82489DX - * 2 2 CPU EISA 82489DX neither IRQ 0 timer nor IRQ 13 DMA chaining - * 3 2 CPU EISA 82489DX - * 4 2 CPU MCA 82489DX - * 5 2 CPU ISA+PCI - * 6 2 CPU EISA+PCI - * 7 2 CPU MCA+PCI - */ - -enum mp_bustype { - MP_BUS_ISA = 1, - MP_BUS_EISA, - MP_BUS_PCI, - MP_BUS_MCA, -}; -#endif - diff --git a/include/asm-i386/msgbuf.h b/include/asm-i386/msgbuf.h deleted file mode 100644 index b8d659c157ae..000000000000 --- a/include/asm-i386/msgbuf.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _I386_MSGBUF_H -#define _I386_MSGBUF_H - -/* - * The msqid64_ds structure for i386 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct msqid64_ds { - struct ipc64_perm msg_perm; - __kernel_time_t msg_stime; /* last msgsnd time */ - unsigned long __unused1; - __kernel_time_t msg_rtime; /* last msgrcv time */ - unsigned long __unused2; - __kernel_time_t msg_ctime; /* last change time */ - unsigned long __unused3; - unsigned long msg_cbytes; /* current number of bytes on queue */ - unsigned long msg_qnum; /* number of messages in queue */ - unsigned long msg_qbytes; /* max number of bytes on queue */ - __kernel_pid_t msg_lspid; /* pid of last msgsnd */ - __kernel_pid_t msg_lrpid; /* last receive pid */ - unsigned long __unused4; - unsigned long __unused5; -}; - -#endif /* _I386_MSGBUF_H */ diff --git a/include/asm-i386/msidef.h b/include/asm-i386/msidef.h deleted file mode 100644 index 5b8acddb70fb..000000000000 --- a/include/asm-i386/msidef.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef ASM_MSIDEF_H -#define ASM_MSIDEF_H - -/* - * Constants for Intel APIC based MSI messages. - */ - -/* - * Shifts for MSI data - */ - -#define MSI_DATA_VECTOR_SHIFT 0 -#define MSI_DATA_VECTOR_MASK 0x000000ff -#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK) - -#define MSI_DATA_DELIVERY_MODE_SHIFT 8 -#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) -#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) - -#define MSI_DATA_LEVEL_SHIFT 14 -#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) -#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) - -#define MSI_DATA_TRIGGER_SHIFT 15 -#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) -#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) - -/* - * Shift/mask fields for msi address - */ - -#define MSI_ADDR_BASE_HI 0 -#define MSI_ADDR_BASE_LO 0xfee00000 - -#define MSI_ADDR_DEST_MODE_SHIFT 2 -#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT) -#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT) - -#define MSI_ADDR_REDIRECTION_SHIFT 3 -#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */ -#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */ - -#define MSI_ADDR_DEST_ID_SHIFT 12 -#define MSI_ADDR_DEST_ID_MASK 0x00ffff0 -#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK) - -#endif /* ASM_MSIDEF_H */ diff --git a/include/asm-i386/msr-index.h b/include/asm-i386/msr-index.h deleted file mode 100644 index a02eb2991349..000000000000 --- a/include/asm-i386/msr-index.h +++ /dev/null @@ -1,278 +0,0 @@ -#ifndef __ASM_MSR_INDEX_H -#define __ASM_MSR_INDEX_H - -/* CPU model specific register (MSR) numbers */ - -/* x86-64 specific MSRs */ -#define MSR_EFER 0xc0000080 /* extended feature register */ -#define MSR_STAR 0xc0000081 /* legacy mode SYSCALL target */ -#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ -#define MSR_CSTAR 0xc0000083 /* compat mode SYSCALL target */ -#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ -#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ -#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ -#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow */ - -/* EFER bits: */ -#define _EFER_SCE 0 /* SYSCALL/SYSRET */ -#define _EFER_LME 8 /* Long mode enable */ -#define _EFER_LMA 10 /* Long mode active (read-only) */ -#define _EFER_NX 11 /* No execute enable */ - -#define EFER_SCE (1<<_EFER_SCE) -#define EFER_LME (1<<_EFER_LME) -#define EFER_LMA (1<<_EFER_LMA) -#define EFER_NX (1<<_EFER_NX) - -/* Intel MSRs. Some also available on other CPUs */ -#define MSR_IA32_PERFCTR0 0x000000c1 -#define MSR_IA32_PERFCTR1 0x000000c2 -#define MSR_FSB_FREQ 0x000000cd - -#define MSR_MTRRcap 0x000000fe -#define MSR_IA32_BBL_CR_CTL 0x00000119 - -#define MSR_IA32_SYSENTER_CS 0x00000174 -#define MSR_IA32_SYSENTER_ESP 0x00000175 -#define MSR_IA32_SYSENTER_EIP 0x00000176 - -#define MSR_IA32_MCG_CAP 0x00000179 -#define MSR_IA32_MCG_STATUS 0x0000017a -#define MSR_IA32_MCG_CTL 0x0000017b - -#define MSR_IA32_PEBS_ENABLE 0x000003f1 -#define MSR_IA32_DS_AREA 0x00000600 -#define MSR_IA32_PERF_CAPABILITIES 0x00000345 - -#define MSR_MTRRfix64K_00000 0x00000250 -#define MSR_MTRRfix16K_80000 0x00000258 -#define MSR_MTRRfix16K_A0000 0x00000259 -#define MSR_MTRRfix4K_C0000 0x00000268 -#define MSR_MTRRfix4K_C8000 0x00000269 -#define MSR_MTRRfix4K_D0000 0x0000026a -#define MSR_MTRRfix4K_D8000 0x0000026b -#define MSR_MTRRfix4K_E0000 0x0000026c -#define MSR_MTRRfix4K_E8000 0x0000026d -#define MSR_MTRRfix4K_F0000 0x0000026e -#define MSR_MTRRfix4K_F8000 0x0000026f -#define MSR_MTRRdefType 0x000002ff - -#define MSR_IA32_DEBUGCTLMSR 0x000001d9 -#define MSR_IA32_LASTBRANCHFROMIP 0x000001db -#define MSR_IA32_LASTBRANCHTOIP 0x000001dc -#define MSR_IA32_LASTINTFROMIP 0x000001dd -#define MSR_IA32_LASTINTTOIP 0x000001de - -#define MSR_IA32_MC0_CTL 0x00000400 -#define MSR_IA32_MC0_STATUS 0x00000401 -#define MSR_IA32_MC0_ADDR 0x00000402 -#define MSR_IA32_MC0_MISC 0x00000403 - -#define MSR_P6_PERFCTR0 0x000000c1 -#define MSR_P6_PERFCTR1 0x000000c2 -#define MSR_P6_EVNTSEL0 0x00000186 -#define MSR_P6_EVNTSEL1 0x00000187 - -/* K7/K8 MSRs. Not complete. See the architecture manual for a more - complete list. */ -#define MSR_K7_EVNTSEL0 0xc0010000 -#define MSR_K7_PERFCTR0 0xc0010004 -#define MSR_K7_EVNTSEL1 0xc0010001 -#define MSR_K7_PERFCTR1 0xc0010005 -#define MSR_K7_EVNTSEL2 0xc0010002 -#define MSR_K7_PERFCTR2 0xc0010006 -#define MSR_K7_EVNTSEL3 0xc0010003 -#define MSR_K7_PERFCTR3 0xc0010007 -#define MSR_K8_TOP_MEM1 0xc001001a -#define MSR_K7_CLK_CTL 0xc001001b -#define MSR_K8_TOP_MEM2 0xc001001d -#define MSR_K8_SYSCFG 0xc0010010 - -#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */ -#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */ -#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */ - -#define MSR_K7_HWCR 0xc0010015 -#define MSR_K8_HWCR 0xc0010015 -#define MSR_K7_FID_VID_CTL 0xc0010041 -#define MSR_K7_FID_VID_STATUS 0xc0010042 -#define MSR_K8_ENABLE_C1E 0xc0010055 - -/* K6 MSRs */ -#define MSR_K6_EFER 0xc0000080 -#define MSR_K6_STAR 0xc0000081 -#define MSR_K6_WHCR 0xc0000082 -#define MSR_K6_UWCCR 0xc0000085 -#define MSR_K6_EPMR 0xc0000086 -#define MSR_K6_PSOR 0xc0000087 -#define MSR_K6_PFIR 0xc0000088 - -/* Centaur-Hauls/IDT defined MSRs. */ -#define MSR_IDT_FCR1 0x00000107 -#define MSR_IDT_FCR2 0x00000108 -#define MSR_IDT_FCR3 0x00000109 -#define MSR_IDT_FCR4 0x0000010a - -#define MSR_IDT_MCR0 0x00000110 -#define MSR_IDT_MCR1 0x00000111 -#define MSR_IDT_MCR2 0x00000112 -#define MSR_IDT_MCR3 0x00000113 -#define MSR_IDT_MCR4 0x00000114 -#define MSR_IDT_MCR5 0x00000115 -#define MSR_IDT_MCR6 0x00000116 -#define MSR_IDT_MCR7 0x00000117 -#define MSR_IDT_MCR_CTRL 0x00000120 - -/* VIA Cyrix defined MSRs*/ -#define MSR_VIA_FCR 0x00001107 -#define MSR_VIA_LONGHAUL 0x0000110a -#define MSR_VIA_RNG 0x0000110b -#define MSR_VIA_BCR2 0x00001147 - -/* Transmeta defined MSRs */ -#define MSR_TMTA_LONGRUN_CTRL 0x80868010 -#define MSR_TMTA_LONGRUN_FLAGS 0x80868011 -#define MSR_TMTA_LRTI_READOUT 0x80868018 -#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a - -/* Intel defined MSRs. */ -#define MSR_IA32_P5_MC_ADDR 0x00000000 -#define MSR_IA32_P5_MC_TYPE 0x00000001 -#define MSR_IA32_TSC 0x00000010 -#define MSR_IA32_PLATFORM_ID 0x00000017 -#define MSR_IA32_EBL_CR_POWERON 0x0000002a - -#define MSR_IA32_APICBASE 0x0000001b -#define MSR_IA32_APICBASE_BSP (1<<8) -#define MSR_IA32_APICBASE_ENABLE (1<<11) -#define MSR_IA32_APICBASE_BASE (0xfffff<<12) - -#define MSR_IA32_UCODE_WRITE 0x00000079 -#define MSR_IA32_UCODE_REV 0x0000008b - -#define MSR_IA32_PERF_STATUS 0x00000198 -#define MSR_IA32_PERF_CTL 0x00000199 - -#define MSR_IA32_MPERF 0x000000e7 -#define MSR_IA32_APERF 0x000000e8 - -#define MSR_IA32_THERM_CONTROL 0x0000019a -#define MSR_IA32_THERM_INTERRUPT 0x0000019b -#define MSR_IA32_THERM_STATUS 0x0000019c -#define MSR_IA32_MISC_ENABLE 0x000001a0 - -/* Intel Model 6 */ -#define MSR_P6_EVNTSEL0 0x00000186 -#define MSR_P6_EVNTSEL1 0x00000187 - -/* P4/Xeon+ specific */ -#define MSR_IA32_MCG_EAX 0x00000180 -#define MSR_IA32_MCG_EBX 0x00000181 -#define MSR_IA32_MCG_ECX 0x00000182 -#define MSR_IA32_MCG_EDX 0x00000183 -#define MSR_IA32_MCG_ESI 0x00000184 -#define MSR_IA32_MCG_EDI 0x00000185 -#define MSR_IA32_MCG_EBP 0x00000186 -#define MSR_IA32_MCG_ESP 0x00000187 -#define MSR_IA32_MCG_EFLAGS 0x00000188 -#define MSR_IA32_MCG_EIP 0x00000189 -#define MSR_IA32_MCG_RESERVED 0x0000018a - -/* Pentium IV performance counter MSRs */ -#define MSR_P4_BPU_PERFCTR0 0x00000300 -#define MSR_P4_BPU_PERFCTR1 0x00000301 -#define MSR_P4_BPU_PERFCTR2 0x00000302 -#define MSR_P4_BPU_PERFCTR3 0x00000303 -#define MSR_P4_MS_PERFCTR0 0x00000304 -#define MSR_P4_MS_PERFCTR1 0x00000305 -#define MSR_P4_MS_PERFCTR2 0x00000306 -#define MSR_P4_MS_PERFCTR3 0x00000307 -#define MSR_P4_FLAME_PERFCTR0 0x00000308 -#define MSR_P4_FLAME_PERFCTR1 0x00000309 -#define MSR_P4_FLAME_PERFCTR2 0x0000030a -#define MSR_P4_FLAME_PERFCTR3 0x0000030b -#define MSR_P4_IQ_PERFCTR0 0x0000030c -#define MSR_P4_IQ_PERFCTR1 0x0000030d -#define MSR_P4_IQ_PERFCTR2 0x0000030e -#define MSR_P4_IQ_PERFCTR3 0x0000030f -#define MSR_P4_IQ_PERFCTR4 0x00000310 -#define MSR_P4_IQ_PERFCTR5 0x00000311 -#define MSR_P4_BPU_CCCR0 0x00000360 -#define MSR_P4_BPU_CCCR1 0x00000361 -#define MSR_P4_BPU_CCCR2 0x00000362 -#define MSR_P4_BPU_CCCR3 0x00000363 -#define MSR_P4_MS_CCCR0 0x00000364 -#define MSR_P4_MS_CCCR1 0x00000365 -#define MSR_P4_MS_CCCR2 0x00000366 -#define MSR_P4_MS_CCCR3 0x00000367 -#define MSR_P4_FLAME_CCCR0 0x00000368 -#define MSR_P4_FLAME_CCCR1 0x00000369 -#define MSR_P4_FLAME_CCCR2 0x0000036a -#define MSR_P4_FLAME_CCCR3 0x0000036b -#define MSR_P4_IQ_CCCR0 0x0000036c -#define MSR_P4_IQ_CCCR1 0x0000036d -#define MSR_P4_IQ_CCCR2 0x0000036e -#define MSR_P4_IQ_CCCR3 0x0000036f -#define MSR_P4_IQ_CCCR4 0x00000370 -#define MSR_P4_IQ_CCCR5 0x00000371 -#define MSR_P4_ALF_ESCR0 0x000003ca -#define MSR_P4_ALF_ESCR1 0x000003cb -#define MSR_P4_BPU_ESCR0 0x000003b2 -#define MSR_P4_BPU_ESCR1 0x000003b3 -#define MSR_P4_BSU_ESCR0 0x000003a0 -#define MSR_P4_BSU_ESCR1 0x000003a1 -#define MSR_P4_CRU_ESCR0 0x000003b8 -#define MSR_P4_CRU_ESCR1 0x000003b9 -#define MSR_P4_CRU_ESCR2 0x000003cc -#define MSR_P4_CRU_ESCR3 0x000003cd -#define MSR_P4_CRU_ESCR4 0x000003e0 -#define MSR_P4_CRU_ESCR5 0x000003e1 -#define MSR_P4_DAC_ESCR0 0x000003a8 -#define MSR_P4_DAC_ESCR1 0x000003a9 -#define MSR_P4_FIRM_ESCR0 0x000003a4 -#define MSR_P4_FIRM_ESCR1 0x000003a5 -#define MSR_P4_FLAME_ESCR0 0x000003a6 -#define MSR_P4_FLAME_ESCR1 0x000003a7 -#define MSR_P4_FSB_ESCR0 0x000003a2 -#define MSR_P4_FSB_ESCR1 0x000003a3 -#define MSR_P4_IQ_ESCR0 0x000003ba -#define MSR_P4_IQ_ESCR1 0x000003bb -#define MSR_P4_IS_ESCR0 0x000003b4 -#define MSR_P4_IS_ESCR1 0x000003b5 -#define MSR_P4_ITLB_ESCR0 0x000003b6 -#define MSR_P4_ITLB_ESCR1 0x000003b7 -#define MSR_P4_IX_ESCR0 0x000003c8 -#define MSR_P4_IX_ESCR1 0x000003c9 -#define MSR_P4_MOB_ESCR0 0x000003aa -#define MSR_P4_MOB_ESCR1 0x000003ab -#define MSR_P4_MS_ESCR0 0x000003c0 -#define MSR_P4_MS_ESCR1 0x000003c1 -#define MSR_P4_PMH_ESCR0 0x000003ac -#define MSR_P4_PMH_ESCR1 0x000003ad -#define MSR_P4_RAT_ESCR0 0x000003bc -#define MSR_P4_RAT_ESCR1 0x000003bd -#define MSR_P4_SAAT_ESCR0 0x000003ae -#define MSR_P4_SAAT_ESCR1 0x000003af -#define MSR_P4_SSU_ESCR0 0x000003be -#define MSR_P4_SSU_ESCR1 0x000003bf /* guess: not in manual */ - -#define MSR_P4_TBPU_ESCR0 0x000003c2 -#define MSR_P4_TBPU_ESCR1 0x000003c3 -#define MSR_P4_TC_ESCR0 0x000003c4 -#define MSR_P4_TC_ESCR1 0x000003c5 -#define MSR_P4_U2L_ESCR0 0x000003b0 -#define MSR_P4_U2L_ESCR1 0x000003b1 - -/* Intel Core-based CPU performance counters */ -#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 -#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a -#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b -#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d -#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e -#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f -#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 - -/* Geode defined MSRs */ -#define MSR_GEODE_BUSCONT_CONF0 0x00001900 - -#endif /* __ASM_MSR_INDEX_H */ diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h deleted file mode 100644 index df21ea049369..000000000000 --- a/include/asm-i386/msr.h +++ /dev/null @@ -1,161 +0,0 @@ -#ifndef __ASM_MSR_H -#define __ASM_MSR_H - -#include <asm/msr-index.h> - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -#include <asm/errno.h> - -static inline unsigned long long native_read_msr(unsigned int msr) -{ - unsigned long long val; - - asm volatile("rdmsr" : "=A" (val) : "c" (msr)); - return val; -} - -static inline unsigned long long native_read_msr_safe(unsigned int msr, - int *err) -{ - unsigned long long val; - - asm volatile("2: rdmsr ; xorl %0,%0\n" - "1:\n\t" - ".section .fixup,\"ax\"\n\t" - "3: movl %3,%0 ; jmp 1b\n\t" - ".previous\n\t" - ".section __ex_table,\"a\"\n" - " .align 4\n\t" - " .long 2b,3b\n\t" - ".previous" - : "=r" (*err), "=A" (val) - : "c" (msr), "i" (-EFAULT)); - - return val; -} - -static inline void native_write_msr(unsigned int msr, unsigned long long val) -{ - asm volatile("wrmsr" : : "c" (msr), "A"(val)); -} - -static inline int native_write_msr_safe(unsigned int msr, - unsigned long long val) -{ - int err; - asm volatile("2: wrmsr ; xorl %0,%0\n" - "1:\n\t" - ".section .fixup,\"ax\"\n\t" - "3: movl %4,%0 ; jmp 1b\n\t" - ".previous\n\t" - ".section __ex_table,\"a\"\n" - " .align 4\n\t" - " .long 2b,3b\n\t" - ".previous" - : "=a" (err) - : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), - "i" (-EFAULT)); - return err; -} - -static inline unsigned long long native_read_tsc(void) -{ - unsigned long long val; - asm volatile("rdtsc" : "=A" (val)); - return val; -} - -static inline unsigned long long native_read_pmc(void) -{ - unsigned long long val; - asm volatile("rdpmc" : "=A" (val)); - return val; -} - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#include <linux/errno.h> -/* - * Access to machine-specific registers (available on 586 and better only) - * Note: the rd* operations modify the parameters directly (without using - * pointer indirection), this allows gcc to optimize better - */ - -#define rdmsr(msr,val1,val2) \ - do { \ - u64 __val = native_read_msr(msr); \ - (val1) = (u32)__val; \ - (val2) = (u32)(__val >> 32); \ - } while(0) - -static inline void wrmsr(u32 __msr, u32 __low, u32 __high) -{ - native_write_msr(__msr, ((u64)__high << 32) | __low); -} - -#define rdmsrl(msr,val) \ - ((val) = native_read_msr(msr)) - -#define wrmsrl(msr,val) native_write_msr(msr, val) - -/* wrmsr with exception handling */ -static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high) -{ - return native_write_msr_safe(__msr, ((u64)__high << 32) | __low); -} - -/* rdmsr with exception handling */ -#define rdmsr_safe(msr,p1,p2) \ - ({ \ - int __err; \ - u64 __val = native_read_msr_safe(msr, &__err); \ - (*p1) = (u32)__val; \ - (*p2) = (u32)(__val >> 32); \ - __err; \ - }) - -#define rdtscl(low) \ - ((low) = (u32)native_read_tsc()) - -#define rdtscll(val) \ - ((val) = native_read_tsc()) - -#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) - -#define rdpmc(counter,low,high) \ - do { \ - u64 _l = native_read_pmc(); \ - (low) = (u32)_l; \ - (high) = (u32)(_l >> 32); \ - } while(0) -#endif /* !CONFIG_PARAVIRT */ - -#ifdef CONFIG_SMP -void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); -void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); -int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -#else /* CONFIG_SMP */ -static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) -{ - rdmsr(msr_no, *l, *h); -} -static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) -{ - wrmsr(msr_no, l, h); -} -static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) -{ - return rdmsr_safe(msr_no, l, h); -} -static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) -{ - return wrmsr_safe(msr_no, l, h); -} -#endif /* CONFIG_SMP */ -#endif -#endif -#endif /* __ASM_MSR_H */ diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h deleted file mode 100644 index 7e9c7ccbdcfe..000000000000 --- a/include/asm-i386/mtrr.h +++ /dev/null @@ -1,115 +0,0 @@ -/* Generic MTRR (Memory Type Range Register) ioctls. - - Copyright (C) 1997-1999 Richard Gooch - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Library General Public License for more details. - - You should have received a copy of the GNU Library General Public - License along with this library; if not, write to the Free - Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - - Richard Gooch may be reached by email at rgooch@atnf.csiro.au - The postal address is: - Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia. -*/ -#ifndef _LINUX_MTRR_H -#define _LINUX_MTRR_H - -#include <linux/ioctl.h> -#include <linux/errno.h> - -#define MTRR_IOCTL_BASE 'M' - -struct mtrr_sentry -{ - unsigned long base; /* Base address */ - unsigned int size; /* Size of region */ - unsigned int type; /* Type of region */ -}; - -struct mtrr_gentry -{ - unsigned int regnum; /* Register number */ - unsigned long base; /* Base address */ - unsigned int size; /* Size of region */ - unsigned int type; /* Type of region */ -}; - -/* These are the various ioctls */ -#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry) -#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry) -#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry) -#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry) -#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry) -#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry) -#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry) -#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry) -#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry) -#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry) - -/* These are the region types */ -#define MTRR_TYPE_UNCACHABLE 0 -#define MTRR_TYPE_WRCOMB 1 -/*#define MTRR_TYPE_ 2*/ -/*#define MTRR_TYPE_ 3*/ -#define MTRR_TYPE_WRTHROUGH 4 -#define MTRR_TYPE_WRPROT 5 -#define MTRR_TYPE_WRBACK 6 -#define MTRR_NUM_TYPES 7 - -#ifdef __KERNEL__ - -/* The following functions are for use by other drivers */ -# ifdef CONFIG_MTRR -extern void mtrr_save_fixed_ranges(void *); -extern void mtrr_save_state(void); -extern int mtrr_add (unsigned long base, unsigned long size, - unsigned int type, char increment); -extern int mtrr_add_page (unsigned long base, unsigned long size, - unsigned int type, char increment); -extern int mtrr_del (int reg, unsigned long base, unsigned long size); -extern int mtrr_del_page (int reg, unsigned long base, unsigned long size); -extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); -extern void mtrr_ap_init(void); -extern void mtrr_bp_init(void); -# else -#define mtrr_save_fixed_ranges(arg) do {} while (0) -#define mtrr_save_state() do {} while (0) -static __inline__ int mtrr_add (unsigned long base, unsigned long size, - unsigned int type, char increment) -{ - return -ENODEV; -} -static __inline__ int mtrr_add_page (unsigned long base, unsigned long size, - unsigned int type, char increment) -{ - return -ENODEV; -} -static __inline__ int mtrr_del (int reg, unsigned long base, - unsigned long size) -{ - return -ENODEV; -} -static __inline__ int mtrr_del_page (int reg, unsigned long base, - unsigned long size) -{ - return -ENODEV; -} - -static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;} - -#define mtrr_ap_init() do {} while (0) -#define mtrr_bp_init() do {} while (0) -# endif - -#endif - -#endif /* _LINUX_MTRR_H */ diff --git a/include/asm-i386/mutex.h b/include/asm-i386/mutex.h deleted file mode 100644 index 7a17d9e58ad6..000000000000 --- a/include/asm-i386/mutex.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Assembly implementation of the mutex fastpath, based on atomic - * decrement/increment. - * - * started by Ingo Molnar: - * - * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> - */ -#ifndef _ASM_MUTEX_H -#define _ASM_MUTEX_H - -#include "asm/alternative.h" - -/** - * __mutex_fastpath_lock - try to take the lock by moving the count - * from 1 to a 0 value - * @count: pointer of type atomic_t - * @fn: function to call if the original value was not 1 - * - * Change the count from 1 to a value lower than 1, and call <fn> if it - * wasn't 1 originally. This function MUST leave the value lower than 1 - * even when the "1" assertion wasn't true. - */ -#define __mutex_fastpath_lock(count, fail_fn) \ -do { \ - unsigned int dummy; \ - \ - typecheck(atomic_t *, count); \ - typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ - \ - __asm__ __volatile__( \ - LOCK_PREFIX " decl (%%eax) \n" \ - " jns 1f \n" \ - " call "#fail_fn" \n" \ - "1: \n" \ - \ - :"=a" (dummy) \ - : "a" (count) \ - : "memory", "ecx", "edx"); \ -} while (0) - - -/** - * __mutex_fastpath_lock_retval - try to take the lock by moving the count - * from 1 to a 0 value - * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 1 - * - * Change the count from 1 to a value lower than 1, and call <fail_fn> if it - * wasn't 1 originally. This function returns 0 if the fastpath succeeds, - * or anything the slow path function returns - */ -static inline int -__mutex_fastpath_lock_retval(atomic_t *count, - int fastcall (*fail_fn)(atomic_t *)) -{ - if (unlikely(atomic_dec_return(count) < 0)) - return fail_fn(count); - else - return 0; -} - -/** - * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1 - * @count: pointer of type atomic_t - * @fail_fn: function to call if the original value was not 0 - * - * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>. - * In the failure case, this function is allowed to either set the value - * to 1, or to set it to a value lower than 1. - * - * If the implementation sets it to a value of lower than 1, the - * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs - * to return 0 otherwise. - */ -#define __mutex_fastpath_unlock(count, fail_fn) \ -do { \ - unsigned int dummy; \ - \ - typecheck(atomic_t *, count); \ - typecheck_fn(fastcall void (*)(atomic_t *), fail_fn); \ - \ - __asm__ __volatile__( \ - LOCK_PREFIX " incl (%%eax) \n" \ - " jg 1f \n" \ - " call "#fail_fn" \n" \ - "1: \n" \ - \ - :"=a" (dummy) \ - : "a" (count) \ - : "memory", "ecx", "edx"); \ -} while (0) - -#define __mutex_slowpath_needs_to_unlock() 1 - -/** - * __mutex_fastpath_trylock - try to acquire the mutex, without waiting - * - * @count: pointer of type atomic_t - * @fail_fn: fallback function - * - * Change the count from 1 to a value lower than 1, and return 0 (failure) - * if it wasn't 1 originally, or return 1 (success) otherwise. This function - * MUST leave the value lower than 1 even when the "1" assertion wasn't true. - * Additionally, if the value was < 0 originally, this function must not leave - * it to 0 on failure. - */ -static inline int -__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) -{ - /* - * We have two variants here. The cmpxchg based one is the best one - * because it never induce a false contention state. It is included - * here because architectures using the inc/dec algorithms over the - * xchg ones are much more likely to support cmpxchg natively. - * - * If not we fall back to the spinlock based variant - that is - * just as efficient (and simpler) as a 'destructive' probing of - * the mutex state would be. - */ -#ifdef __HAVE_ARCH_CMPXCHG - if (likely(atomic_cmpxchg(count, 1, 0) == 1)) - return 1; - return 0; -#else - return fail_fn(count); -#endif -} - -#endif diff --git a/include/asm-i386/namei.h b/include/asm-i386/namei.h deleted file mode 100644 index 814865088617..000000000000 --- a/include/asm-i386/namei.h +++ /dev/null @@ -1,17 +0,0 @@ -/* $Id: namei.h,v 1.1 1996/12/13 14:48:21 jj Exp $ - * linux/include/asm-i386/namei.h - * - * Included from linux/fs/namei.c - */ - -#ifndef __I386_NAMEI_H -#define __I386_NAMEI_H - -/* This dummy routine maybe changed to something useful - * for /usr/gnemul/ emulation stuff. - * Look at asm-sparc/namei.h for details. - */ - -#define __emul_prefix() NULL - -#endif /* __I386_NAMEI_H */ diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h deleted file mode 100644 index 70a958a8e381..000000000000 --- a/include/asm-i386/nmi.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * linux/include/asm-i386/nmi.h - */ -#ifndef ASM_NMI_H -#define ASM_NMI_H - -#include <linux/pm.h> -#include <asm/irq.h> - -#ifdef ARCH_HAS_NMI_WATCHDOG - -/** - * do_nmi_callback - * - * Check to see if a callback exists and execute it. Return 1 - * if the handler exists and was handled successfully. - */ -int do_nmi_callback(struct pt_regs *regs, int cpu); - -extern int nmi_watchdog_enabled; -extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); -extern int avail_to_resrv_perfctr_nmi(unsigned int); -extern int reserve_perfctr_nmi(unsigned int); -extern void release_perfctr_nmi(unsigned int); -extern int reserve_evntsel_nmi(unsigned int); -extern void release_evntsel_nmi(unsigned int); - -extern void setup_apic_nmi_watchdog (void *); -extern void stop_apic_nmi_watchdog (void *); -extern void disable_timer_nmi_watchdog(void); -extern void enable_timer_nmi_watchdog(void); -extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason); - -extern atomic_t nmi_active; -extern unsigned int nmi_watchdog; -#define NMI_DISABLED -1 -#define NMI_NONE 0 -#define NMI_IO_APIC 1 -#define NMI_LOCAL_APIC 2 -#define NMI_INVALID 3 -#define NMI_DEFAULT NMI_DISABLED - -struct ctl_table; -struct file; -extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, - void __user *, size_t *, loff_t *); -extern int unknown_nmi_panic; - -void __trigger_all_cpu_backtrace(void); -#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() - -#endif - -void lapic_watchdog_stop(void); -int lapic_watchdog_init(unsigned nmi_hz); -int lapic_wd_event(unsigned nmi_hz); -unsigned lapic_adjust_nmi_hz(unsigned hz); -int lapic_watchdog_ok(void); -void disable_lapic_nmi_watchdog(void); -void enable_lapic_nmi_watchdog(void); -void stop_nmi(void); -void restart_nmi(void); - -#endif /* ASM_NMI_H */ diff --git a/include/asm-i386/numa.h b/include/asm-i386/numa.h deleted file mode 100644 index 96fcb157db1d..000000000000 --- a/include/asm-i386/numa.h +++ /dev/null @@ -1,3 +0,0 @@ - -int pxm_to_nid(int pxm); - diff --git a/include/asm-i386/numaq.h b/include/asm-i386/numaq.h deleted file mode 100644 index 38f710dc37f2..000000000000 --- a/include/asm-i386/numaq.h +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Written by: Patricia Gaughen, IBM Corporation - * - * Copyright (C) 2002, IBM Corp. - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to <gone@us.ibm.com> - */ - -#ifndef NUMAQ_H -#define NUMAQ_H - -#ifdef CONFIG_X86_NUMAQ - -extern int get_memcfg_numaq(void); - -/* - * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the - */ -#define SYS_CFG_DATA_PRIV_ADDR 0x0009d000 /* place for scd in private quad space */ - -/* - * Communication area for each processor on lynxer-processor tests. - * - * NOTE: If you change the size of this eachproc structure you need - * to change the definition for EACH_QUAD_SIZE. - */ -struct eachquadmem { - unsigned int priv_mem_start; /* Starting address of this */ - /* quad's private memory. */ - /* This is always 0. */ - /* In MB. */ - unsigned int priv_mem_size; /* Size of this quad's */ - /* private memory. */ - /* In MB. */ - unsigned int low_shrd_mem_strp_start;/* Starting address of this */ - /* quad's low shared block */ - /* (untranslated). */ - /* In MB. */ - unsigned int low_shrd_mem_start; /* Starting address of this */ - /* quad's low shared memory */ - /* (untranslated). */ - /* In MB. */ - unsigned int low_shrd_mem_size; /* Size of this quad's low */ - /* shared memory. */ - /* In MB. */ - unsigned int lmmio_copb_start; /* Starting address of this */ - /* quad's local memory */ - /* mapped I/O in the */ - /* compatibility OPB. */ - /* In MB. */ - unsigned int lmmio_copb_size; /* Size of this quad's local */ - /* memory mapped I/O in the */ - /* compatibility OPB. */ - /* In MB. */ - unsigned int lmmio_nopb_start; /* Starting address of this */ - /* quad's local memory */ - /* mapped I/O in the */ - /* non-compatibility OPB. */ - /* In MB. */ - unsigned int lmmio_nopb_size; /* Size of this quad's local */ - /* memory mapped I/O in the */ - /* non-compatibility OPB. */ - /* In MB. */ - unsigned int io_apic_0_start; /* Starting address of I/O */ - /* APIC 0. */ - unsigned int io_apic_0_sz; /* Size I/O APIC 0. */ - unsigned int io_apic_1_start; /* Starting address of I/O */ - /* APIC 1. */ - unsigned int io_apic_1_sz; /* Size I/O APIC 1. */ - unsigned int hi_shrd_mem_start; /* Starting address of this */ - /* quad's high shared memory.*/ - /* In MB. */ - unsigned int hi_shrd_mem_size; /* Size of this quad's high */ - /* shared memory. */ - /* In MB. */ - unsigned int mps_table_addr; /* Address of this quad's */ - /* MPS tables from BIOS, */ - /* in system space.*/ - unsigned int lcl_MDC_pio_addr; /* Port-I/O address for */ - /* local access of MDC. */ - unsigned int rmt_MDC_mmpio_addr; /* MM-Port-I/O address for */ - /* remote access of MDC. */ - unsigned int mm_port_io_start; /* Starting address of this */ - /* quad's memory mapped Port */ - /* I/O space. */ - unsigned int mm_port_io_size; /* Size of this quad's memory*/ - /* mapped Port I/O space. */ - unsigned int mm_rmt_io_apic_start; /* Starting address of this */ - /* quad's memory mapped */ - /* remote I/O APIC space. */ - unsigned int mm_rmt_io_apic_size; /* Size of this quad's memory*/ - /* mapped remote I/O APIC */ - /* space. */ - unsigned int mm_isa_start; /* Starting address of this */ - /* quad's memory mapped ISA */ - /* space (contains MDC */ - /* memory space). */ - unsigned int mm_isa_size; /* Size of this quad's memory*/ - /* mapped ISA space (contains*/ - /* MDC memory space). */ - unsigned int rmt_qmi_addr; /* Remote addr to access QMI.*/ - unsigned int lcl_qmi_addr; /* Local addr to access QMI. */ -}; - -/* - * Note: This structure must be NOT be changed unless the multiproc and - * OS are changed to reflect the new structure. - */ -struct sys_cfg_data { - unsigned int quad_id; - unsigned int bsp_proc_id; /* Boot Strap Processor in this quad. */ - unsigned int scd_version; /* Version number of this table. */ - unsigned int first_quad_id; - unsigned int quads_present31_0; /* 1 bit for each quad */ - unsigned int quads_present63_32; /* 1 bit for each quad */ - unsigned int config_flags; - unsigned int boot_flags; - unsigned int csr_start_addr; /* Absolute value (not in MB) */ - unsigned int csr_size; /* Absolute value (not in MB) */ - unsigned int lcl_apic_start_addr; /* Absolute value (not in MB) */ - unsigned int lcl_apic_size; /* Absolute value (not in MB) */ - unsigned int low_shrd_mem_base; /* 0 or 512MB or 1GB */ - unsigned int low_shrd_mem_quad_offset; /* 0,128M,256M,512M,1G */ - /* may not be totally populated */ - unsigned int split_mem_enbl; /* 0 for no low shared memory */ - unsigned int mmio_sz; /* Size of total system memory mapped I/O */ - /* (in MB). */ - unsigned int quad_spin_lock; /* Spare location used for quad */ - /* bringup. */ - unsigned int nonzero55; /* For checksumming. */ - unsigned int nonzeroaa; /* For checksumming. */ - unsigned int scd_magic_number; - unsigned int system_type; - unsigned int checksum; - /* - * memory configuration area for each quad - */ - struct eachquadmem eq[MAX_NUMNODES]; /* indexed by quad id */ -}; - -static inline unsigned long *get_zholes_size(int nid) -{ - return NULL; -} -#endif /* CONFIG_X86_NUMAQ */ -#endif /* NUMAQ_H */ - diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h deleted file mode 100644 index 80ecc66b6d86..000000000000 --- a/include/asm-i386/page.h +++ /dev/null @@ -1,206 +0,0 @@ -#ifndef _I386_PAGE_H -#define _I386_PAGE_H - -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - -#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) -#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) - -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - -#ifdef CONFIG_X86_USE_3DNOW - -#include <asm/mmx.h> - -#define clear_page(page) mmx_clear_page((void *)(page)) -#define copy_page(to,from) mmx_copy_page(to,from) - -#else - -/* - * On older X86 processors it's not a win to use MMX here it seems. - * Maybe the K6-III ? - */ - -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) - -#endif - -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - -#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ - alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) -#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE - -/* - * These are used to make use of C type-checking.. - */ -extern int nx_enabled; - -#ifdef CONFIG_X86_PAE -typedef struct { unsigned long pte_low, pte_high; } pte_t; -typedef struct { unsigned long long pmd; } pmd_t; -typedef struct { unsigned long long pgd; } pgd_t; -typedef struct { unsigned long long pgprot; } pgprot_t; - -static inline unsigned long long native_pgd_val(pgd_t pgd) -{ - return pgd.pgd; -} - -static inline unsigned long long native_pmd_val(pmd_t pmd) -{ - return pmd.pmd; -} - -static inline unsigned long long native_pte_val(pte_t pte) -{ - return pte.pte_low | ((unsigned long long)pte.pte_high << 32); -} - -static inline pgd_t native_make_pgd(unsigned long long val) -{ - return (pgd_t) { val }; -} - -static inline pmd_t native_make_pmd(unsigned long long val) -{ - return (pmd_t) { val }; -} - -static inline pte_t native_make_pte(unsigned long long val) -{ - return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ; -} - -#ifndef CONFIG_PARAVIRT -#define pmd_val(x) native_pmd_val(x) -#define __pmd(x) native_make_pmd(x) -#endif - -#define HPAGE_SHIFT 21 -#include <asm-generic/pgtable-nopud.h> -#else /* !CONFIG_X86_PAE */ -typedef struct { unsigned long pte_low; } pte_t; -typedef struct { unsigned long pgd; } pgd_t; -typedef struct { unsigned long pgprot; } pgprot_t; -#define boot_pte_t pte_t /* or would you rather have a typedef */ - -static inline unsigned long native_pgd_val(pgd_t pgd) -{ - return pgd.pgd; -} - -static inline unsigned long native_pte_val(pte_t pte) -{ - return pte.pte_low; -} - -static inline pgd_t native_make_pgd(unsigned long val) -{ - return (pgd_t) { val }; -} - -static inline pte_t native_make_pte(unsigned long val) -{ - return (pte_t) { .pte_low = val }; -} - -#define HPAGE_SHIFT 22 -#include <asm-generic/pgtable-nopmd.h> -#endif /* CONFIG_X86_PAE */ - -#define PTE_MASK PAGE_MASK - -#ifdef CONFIG_HUGETLB_PAGE -#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) -#define HPAGE_MASK (~(HPAGE_SIZE - 1)) -#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA -#endif - -#define pgprot_val(x) ((x).pgprot) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -#ifndef CONFIG_PARAVIRT -#define pgd_val(x) native_pgd_val(x) -#define __pgd(x) native_make_pgd(x) -#define pte_val(x) native_pte_val(x) -#define __pte(x) native_make_pte(x) -#endif - -#endif /* !__ASSEMBLY__ */ - -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - -/* - * This handles the memory map.. We could make this a config - * option, but too many people screw it up, and too few need - * it. - * - * A __PAGE_OFFSET of 0xC0000000 means that the kernel has - * a virtual address space of one gigabyte, which limits the - * amount of physical memory you can use to about 950MB. - * - * If you want more physical memory than this then see the CONFIG_HIGHMEM4G - * and CONFIG_HIGHMEM64G options in the kernel configuration. - */ - -#ifndef __ASSEMBLY__ - -struct vm_area_struct; - -/* - * This much address space is reserved for vmalloc() and iomap() - * as well as fixmap mappings. - */ -extern unsigned int __VMALLOC_RESERVE; - -extern int sysctl_legacy_va_layout; - -extern int page_is_ram(unsigned long pagenr); - -#endif /* __ASSEMBLY__ */ - -#ifdef __ASSEMBLY__ -#define __PAGE_OFFSET CONFIG_PAGE_OFFSET -#else -#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) -#endif - - -#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) -#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) -#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) -#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) -/* __pa_symbol should be used for C visible symbols. - This seems to be the official gcc blessed way to do such arithmetic. */ -#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) -#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) -#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#ifdef CONFIG_FLATMEM -#define pfn_valid(pfn) ((pfn) < max_mapnr) -#endif /* CONFIG_FLATMEM */ -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) - -#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) - -#define VM_DATA_DEFAULT_FLAGS \ - (VM_READ | VM_WRITE | \ - ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#include <asm-generic/memory_model.h> -#include <asm-generic/page.h> - -#define __HAVE_ARCH_GATE_AREA 1 -#endif /* __KERNEL__ */ - -#endif /* _I386_PAGE_H */ diff --git a/include/asm-i386/param.h b/include/asm-i386/param.h deleted file mode 100644 index 21b32466fcdc..000000000000 --- a/include/asm-i386/param.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _ASMi386_PARAM_H -#define _ASMi386_PARAM_H - -#ifdef __KERNEL__ -# define HZ CONFIG_HZ /* Internal kernel timer frequency */ -# define USER_HZ 100 /* .. some user interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* like times() */ -#endif - -#ifndef HZ -#define HZ 100 -#endif - -#define EXEC_PAGESIZE 4096 - -#ifndef NOGROUP -#define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 /* max length of hostname */ - -#endif diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h deleted file mode 100644 index 9fa3fa9e62d1..000000000000 --- a/include/asm-i386/paravirt.h +++ /dev/null @@ -1,1085 +0,0 @@ -#ifndef __ASM_PARAVIRT_H -#define __ASM_PARAVIRT_H -/* Various instructions on x86 need to be replaced for - * para-virtualization: those hooks are defined here. */ - -#ifdef CONFIG_PARAVIRT -#include <asm/page.h> - -/* Bitmask of what can be clobbered: usually at least eax. */ -#define CLBR_NONE 0x0 -#define CLBR_EAX 0x1 -#define CLBR_ECX 0x2 -#define CLBR_EDX 0x4 -#define CLBR_ANY 0x7 - -#ifndef __ASSEMBLY__ -#include <linux/types.h> -#include <linux/cpumask.h> -#include <asm/kmap_types.h> - -struct page; -struct thread_struct; -struct Xgt_desc_struct; -struct tss_struct; -struct mm_struct; -struct desc_struct; - -/* Lazy mode for batching updates / context switch */ -enum paravirt_lazy_mode { - PARAVIRT_LAZY_NONE = 0, - PARAVIRT_LAZY_MMU = 1, - PARAVIRT_LAZY_CPU = 2, - PARAVIRT_LAZY_FLUSH = 3, -}; - -struct paravirt_ops -{ - unsigned int kernel_rpl; - int shared_kernel_pmd; - int paravirt_enabled; - const char *name; - - /* - * Patch may replace one of the defined code sequences with arbitrary - * code, subject to the same register constraints. This generally - * means the code is not free to clobber any registers other than EAX. - * The patch function should return the number of bytes of code - * generated, as we nop pad the rest in generic code. - */ - unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, - unsigned long addr, unsigned len); - - /* Basic arch-specific setup */ - void (*arch_setup)(void); - char *(*memory_setup)(void); - void (*post_allocator_init)(void); - - void (*init_IRQ)(void); - void (*time_init)(void); - - /* - * Called before/after init_mm pagetable setup. setup_start - * may reset %cr3, and may pre-install parts of the pagetable; - * pagetable setup is expected to preserve any existing - * mapping. - */ - void (*pagetable_setup_start)(pgd_t *pgd_base); - void (*pagetable_setup_done)(pgd_t *pgd_base); - - /* Print a banner to identify the environment */ - void (*banner)(void); - - /* Set and set time of day */ - unsigned long (*get_wallclock)(void); - int (*set_wallclock)(unsigned long); - - /* cpuid emulation, mostly so that caps bits can be disabled */ - void (*cpuid)(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx); - - /* hooks for various privileged instructions */ - unsigned long (*get_debugreg)(int regno); - void (*set_debugreg)(int regno, unsigned long value); - - void (*clts)(void); - - unsigned long (*read_cr0)(void); - void (*write_cr0)(unsigned long); - - unsigned long (*read_cr2)(void); - void (*write_cr2)(unsigned long); - - unsigned long (*read_cr3)(void); - void (*write_cr3)(unsigned long); - - unsigned long (*read_cr4_safe)(void); - unsigned long (*read_cr4)(void); - void (*write_cr4)(unsigned long); - - /* - * Get/set interrupt state. save_fl and restore_fl are only - * expected to use X86_EFLAGS_IF; all other bits - * returned from save_fl are undefined, and may be ignored by - * restore_fl. - */ - unsigned long (*save_fl)(void); - void (*restore_fl)(unsigned long); - void (*irq_disable)(void); - void (*irq_enable)(void); - void (*safe_halt)(void); - void (*halt)(void); - - void (*wbinvd)(void); - - /* MSR, PMC and TSR operations. - err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ - u64 (*read_msr)(unsigned int msr, int *err); - int (*write_msr)(unsigned int msr, u64 val); - - u64 (*read_tsc)(void); - u64 (*read_pmc)(void); - unsigned long long (*sched_clock)(void); - unsigned long (*get_cpu_khz)(void); - - /* Segment descriptor handling */ - void (*load_tr_desc)(void); - void (*load_gdt)(const struct Xgt_desc_struct *); - void (*load_idt)(const struct Xgt_desc_struct *); - void (*store_gdt)(struct Xgt_desc_struct *); - void (*store_idt)(struct Xgt_desc_struct *); - void (*set_ldt)(const void *desc, unsigned entries); - unsigned long (*store_tr)(void); - void (*load_tls)(struct thread_struct *t, unsigned int cpu); - void (*write_ldt_entry)(struct desc_struct *, - int entrynum, u32 low, u32 high); - void (*write_gdt_entry)(struct desc_struct *, - int entrynum, u32 low, u32 high); - void (*write_idt_entry)(struct desc_struct *, - int entrynum, u32 low, u32 high); - void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t); - - void (*set_iopl_mask)(unsigned mask); - void (*io_delay)(void); - - /* - * Hooks for intercepting the creation/use/destruction of an - * mm_struct. - */ - void (*activate_mm)(struct mm_struct *prev, - struct mm_struct *next); - void (*dup_mmap)(struct mm_struct *oldmm, - struct mm_struct *mm); - void (*exit_mmap)(struct mm_struct *mm); - -#ifdef CONFIG_X86_LOCAL_APIC - /* - * Direct APIC operations, principally for VMI. Ideally - * these shouldn't be in this interface. - */ - void (*apic_write)(unsigned long reg, unsigned long v); - void (*apic_write_atomic)(unsigned long reg, unsigned long v); - unsigned long (*apic_read)(unsigned long reg); - void (*setup_boot_clock)(void); - void (*setup_secondary_clock)(void); - - void (*startup_ipi_hook)(int phys_apicid, - unsigned long start_eip, - unsigned long start_esp); -#endif - - /* TLB operations */ - void (*flush_tlb_user)(void); - void (*flush_tlb_kernel)(void); - void (*flush_tlb_single)(unsigned long addr); - void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, - unsigned long va); - - /* Hooks for allocating/releasing pagetable pages */ - void (*alloc_pt)(struct mm_struct *mm, u32 pfn); - void (*alloc_pd)(u32 pfn); - void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); - void (*release_pt)(u32 pfn); - void (*release_pd)(u32 pfn); - - /* Pagetable manipulation functions */ - void (*set_pte)(pte_t *ptep, pte_t pteval); - void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval); - void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); - void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); - void (*pte_update_defer)(struct mm_struct *mm, - unsigned long addr, pte_t *ptep); - -#ifdef CONFIG_HIGHPTE - void *(*kmap_atomic_pte)(struct page *page, enum km_type type); -#endif - -#ifdef CONFIG_X86_PAE - void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); - void (*set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); - void (*set_pud)(pud_t *pudp, pud_t pudval); - void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); - void (*pmd_clear)(pmd_t *pmdp); - - unsigned long long (*pte_val)(pte_t); - unsigned long long (*pmd_val)(pmd_t); - unsigned long long (*pgd_val)(pgd_t); - - pte_t (*make_pte)(unsigned long long pte); - pmd_t (*make_pmd)(unsigned long long pmd); - pgd_t (*make_pgd)(unsigned long long pgd); -#else - unsigned long (*pte_val)(pte_t); - unsigned long (*pgd_val)(pgd_t); - - pte_t (*make_pte)(unsigned long pte); - pgd_t (*make_pgd)(unsigned long pgd); -#endif - - /* Set deferred update mode, used for batching operations. */ - void (*set_lazy_mode)(enum paravirt_lazy_mode mode); - - /* These two are jmp to, not actually called. */ - void (*irq_enable_sysexit)(void); - void (*iret)(void); -}; - -extern struct paravirt_ops paravirt_ops; - -#define PARAVIRT_PATCH(x) \ - (offsetof(struct paravirt_ops, x) / sizeof(void *)) - -#define paravirt_type(type) \ - [paravirt_typenum] "i" (PARAVIRT_PATCH(type)) -#define paravirt_clobber(clobber) \ - [paravirt_clobber] "i" (clobber) - -/* - * Generate some code, and mark it as patchable by the - * apply_paravirt() alternate instruction patcher. - */ -#define _paravirt_alt(insn_string, type, clobber) \ - "771:\n\t" insn_string "\n" "772:\n" \ - ".pushsection .parainstructions,\"a\"\n" \ - " .long 771b\n" \ - " .byte " type "\n" \ - " .byte 772b-771b\n" \ - " .short " clobber "\n" \ - ".popsection\n" - -/* Generate patchable code, with the default asm parameters. */ -#define paravirt_alt(insn_string) \ - _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") - -unsigned paravirt_patch_nop(void); -unsigned paravirt_patch_ignore(unsigned len); -unsigned paravirt_patch_call(void *insnbuf, - const void *target, u16 tgt_clobbers, - unsigned long addr, u16 site_clobbers, - unsigned len); -unsigned paravirt_patch_jmp(const void *target, void *insnbuf, - unsigned long addr, unsigned len); -unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, - unsigned long addr, unsigned len); - -unsigned paravirt_patch_insns(void *insnbuf, unsigned len, - const char *start, const char *end); - -int paravirt_disable_iospace(void); - -/* - * This generates an indirect call based on the operation type number. - * The type number, computed in PARAVIRT_PATCH, is derived from the - * offset into the paravirt_ops structure, and can therefore be freely - * converted back into a structure offset. - */ -#define PARAVIRT_CALL "call *(paravirt_ops+%c[paravirt_typenum]*4);" - -/* - * These macros are intended to wrap calls into a paravirt_ops - * operation, so that they can be later identified and patched at - * runtime. - * - * Normally, a call to a pv_op function is a simple indirect call: - * (paravirt_ops.operations)(args...). - * - * Unfortunately, this is a relatively slow operation for modern CPUs, - * because it cannot necessarily determine what the destination - * address is. In this case, the address is a runtime constant, so at - * the very least we can patch the call to e a simple direct call, or - * ideally, patch an inline implementation into the callsite. (Direct - * calls are essentially free, because the call and return addresses - * are completely predictable.) - * - * These macros rely on the standard gcc "regparm(3)" calling - * convention, in which the first three arguments are placed in %eax, - * %edx, %ecx (in that order), and the remaining arguments are placed - * on the stack. All caller-save registers (eax,edx,ecx) are expected - * to be modified (either clobbered or used for return values). - * - * The call instruction itself is marked by placing its start address - * and size into the .parainstructions section, so that - * apply_paravirt() in arch/i386/kernel/alternative.c can do the - * appropriate patching under the control of the backend paravirt_ops - * implementation. - * - * Unfortunately there's no way to get gcc to generate the args setup - * for the call, and then allow the call itself to be generated by an - * inline asm. Because of this, we must do the complete arg setup and - * return value handling from within these macros. This is fairly - * cumbersome. - * - * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. - * It could be extended to more arguments, but there would be little - * to be gained from that. For each number of arguments, there are - * the two VCALL and CALL variants for void and non-void functions. - * - * When there is a return value, the invoker of the macro must specify - * the return type. The macro then uses sizeof() on that type to - * determine whether its a 32 or 64 bit value, and places the return - * in the right register(s) (just %eax for 32-bit, and %edx:%eax for - * 64-bit). - * - * 64-bit arguments are passed as a pair of adjacent 32-bit arguments - * in low,high order. - * - * Small structures are passed and returned in registers. The macro - * calling convention can't directly deal with this, so the wrapper - * functions must do this. - * - * These PVOP_* macros are only defined within this header. This - * means that all uses must be wrapped in inline functions. This also - * makes sure the incoming and outgoing types are always correct. - */ -#define __PVOP_CALL(rettype, op, pre, post, ...) \ - ({ \ - rettype __ret; \ - unsigned long __eax, __edx, __ecx; \ - if (sizeof(rettype) > sizeof(unsigned long)) { \ - asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ - post \ - : "=a" (__eax), "=d" (__edx), \ - "=c" (__ecx) \ - : paravirt_type(op), \ - paravirt_clobber(CLBR_ANY), \ - ##__VA_ARGS__ \ - : "memory", "cc"); \ - __ret = (rettype)((((u64)__edx) << 32) | __eax); \ - } else { \ - asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ - post \ - : "=a" (__eax), "=d" (__edx), \ - "=c" (__ecx) \ - : paravirt_type(op), \ - paravirt_clobber(CLBR_ANY), \ - ##__VA_ARGS__ \ - : "memory", "cc"); \ - __ret = (rettype)__eax; \ - } \ - __ret; \ - }) -#define __PVOP_VCALL(op, pre, post, ...) \ - ({ \ - unsigned long __eax, __edx, __ecx; \ - asm volatile(pre \ - paravirt_alt(PARAVIRT_CALL) \ - post \ - : "=a" (__eax), "=d" (__edx), "=c" (__ecx) \ - : paravirt_type(op), \ - paravirt_clobber(CLBR_ANY), \ - ##__VA_ARGS__ \ - : "memory", "cc"); \ - }) - -#define PVOP_CALL0(rettype, op) \ - __PVOP_CALL(rettype, op, "", "") -#define PVOP_VCALL0(op) \ - __PVOP_VCALL(op, "", "") - -#define PVOP_CALL1(rettype, op, arg1) \ - __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1))) -#define PVOP_VCALL1(op, arg1) \ - __PVOP_VCALL(op, "", "", "0" ((u32)(arg1))) - -#define PVOP_CALL2(rettype, op, arg1, arg2) \ - __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2))) -#define PVOP_VCALL2(op, arg1, arg2) \ - __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1" ((u32)(arg2))) - -#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ - __PVOP_CALL(rettype, op, "", "", "0" ((u32)(arg1)), \ - "1"((u32)(arg2)), "2"((u32)(arg3))) -#define PVOP_VCALL3(op, arg1, arg2, arg3) \ - __PVOP_VCALL(op, "", "", "0" ((u32)(arg1)), "1"((u32)(arg2)), \ - "2"((u32)(arg3))) - -#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ - __PVOP_CALL(rettype, op, \ - "push %[_arg4];", "lea 4(%%esp),%%esp;", \ - "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ - "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) -#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ - __PVOP_VCALL(op, \ - "push %[_arg4];", "lea 4(%%esp),%%esp;", \ - "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ - "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) - -static inline int paravirt_enabled(void) -{ - return paravirt_ops.paravirt_enabled; -} - -static inline void load_esp0(struct tss_struct *tss, - struct thread_struct *thread) -{ - PVOP_VCALL2(load_esp0, tss, thread); -} - -#define ARCH_SETUP paravirt_ops.arch_setup(); -static inline unsigned long get_wallclock(void) -{ - return PVOP_CALL0(unsigned long, get_wallclock); -} - -static inline int set_wallclock(unsigned long nowtime) -{ - return PVOP_CALL1(int, set_wallclock, nowtime); -} - -static inline void (*choose_time_init(void))(void) -{ - return paravirt_ops.time_init; -} - -/* The paravirtualized CPUID instruction. */ -static inline void __cpuid(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) -{ - PVOP_VCALL4(cpuid, eax, ebx, ecx, edx); -} - -/* - * These special macros can be used to get or set a debugging register - */ -static inline unsigned long paravirt_get_debugreg(int reg) -{ - return PVOP_CALL1(unsigned long, get_debugreg, reg); -} -#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) -static inline void set_debugreg(unsigned long val, int reg) -{ - PVOP_VCALL2(set_debugreg, reg, val); -} - -static inline void clts(void) -{ - PVOP_VCALL0(clts); -} - -static inline unsigned long read_cr0(void) -{ - return PVOP_CALL0(unsigned long, read_cr0); -} - -static inline void write_cr0(unsigned long x) -{ - PVOP_VCALL1(write_cr0, x); -} - -static inline unsigned long read_cr2(void) -{ - return PVOP_CALL0(unsigned long, read_cr2); -} - -static inline void write_cr2(unsigned long x) -{ - PVOP_VCALL1(write_cr2, x); -} - -static inline unsigned long read_cr3(void) -{ - return PVOP_CALL0(unsigned long, read_cr3); -} - -static inline void write_cr3(unsigned long x) -{ - PVOP_VCALL1(write_cr3, x); -} - -static inline unsigned long read_cr4(void) -{ - return PVOP_CALL0(unsigned long, read_cr4); -} -static inline unsigned long read_cr4_safe(void) -{ - return PVOP_CALL0(unsigned long, read_cr4_safe); -} - -static inline void write_cr4(unsigned long x) -{ - PVOP_VCALL1(write_cr4, x); -} - -static inline void raw_safe_halt(void) -{ - PVOP_VCALL0(safe_halt); -} - -static inline void halt(void) -{ - PVOP_VCALL0(safe_halt); -} - -static inline void wbinvd(void) -{ - PVOP_VCALL0(wbinvd); -} - -#define get_kernel_rpl() (paravirt_ops.kernel_rpl) - -static inline u64 paravirt_read_msr(unsigned msr, int *err) -{ - return PVOP_CALL2(u64, read_msr, msr, err); -} -static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) -{ - return PVOP_CALL3(int, write_msr, msr, low, high); -} - -/* These should all do BUG_ON(_err), but our headers are too tangled. */ -#define rdmsr(msr,val1,val2) do { \ - int _err; \ - u64 _l = paravirt_read_msr(msr, &_err); \ - val1 = (u32)_l; \ - val2 = _l >> 32; \ -} while(0) - -#define wrmsr(msr,val1,val2) do { \ - paravirt_write_msr(msr, val1, val2); \ -} while(0) - -#define rdmsrl(msr,val) do { \ - int _err; \ - val = paravirt_read_msr(msr, &_err); \ -} while(0) - -#define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) -#define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b) - -/* rdmsr with exception handling */ -#define rdmsr_safe(msr,a,b) ({ \ - int _err; \ - u64 _l = paravirt_read_msr(msr, &_err); \ - (*a) = (u32)_l; \ - (*b) = _l >> 32; \ - _err; }) - - -static inline u64 paravirt_read_tsc(void) -{ - return PVOP_CALL0(u64, read_tsc); -} - -#define rdtscl(low) do { \ - u64 _l = paravirt_read_tsc(); \ - low = (int)_l; \ -} while(0) - -#define rdtscll(val) (val = paravirt_read_tsc()) - -static inline unsigned long long paravirt_sched_clock(void) -{ - return PVOP_CALL0(unsigned long long, sched_clock); -} -#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz()) - -#define write_tsc(val1,val2) wrmsr(0x10, val1, val2) - -static inline unsigned long long paravirt_read_pmc(int counter) -{ - return PVOP_CALL1(u64, read_pmc, counter); -} - -#define rdpmc(counter,low,high) do { \ - u64 _l = paravirt_read_pmc(counter); \ - low = (u32)_l; \ - high = _l >> 32; \ -} while(0) - -static inline void load_TR_desc(void) -{ - PVOP_VCALL0(load_tr_desc); -} -static inline void load_gdt(const struct Xgt_desc_struct *dtr) -{ - PVOP_VCALL1(load_gdt, dtr); -} -static inline void load_idt(const struct Xgt_desc_struct *dtr) -{ - PVOP_VCALL1(load_idt, dtr); -} -static inline void set_ldt(const void *addr, unsigned entries) -{ - PVOP_VCALL2(set_ldt, addr, entries); -} -static inline void store_gdt(struct Xgt_desc_struct *dtr) -{ - PVOP_VCALL1(store_gdt, dtr); -} -static inline void store_idt(struct Xgt_desc_struct *dtr) -{ - PVOP_VCALL1(store_idt, dtr); -} -static inline unsigned long paravirt_store_tr(void) -{ - return PVOP_CALL0(unsigned long, store_tr); -} -#define store_tr(tr) ((tr) = paravirt_store_tr()) -static inline void load_TLS(struct thread_struct *t, unsigned cpu) -{ - PVOP_VCALL2(load_tls, t, cpu); -} -static inline void write_ldt_entry(void *dt, int entry, u32 low, u32 high) -{ - PVOP_VCALL4(write_ldt_entry, dt, entry, low, high); -} -static inline void write_gdt_entry(void *dt, int entry, u32 low, u32 high) -{ - PVOP_VCALL4(write_gdt_entry, dt, entry, low, high); -} -static inline void write_idt_entry(void *dt, int entry, u32 low, u32 high) -{ - PVOP_VCALL4(write_idt_entry, dt, entry, low, high); -} -static inline void set_iopl_mask(unsigned mask) -{ - PVOP_VCALL1(set_iopl_mask, mask); -} - -/* The paravirtualized I/O functions */ -static inline void slow_down_io(void) { - paravirt_ops.io_delay(); -#ifdef REALLY_SLOW_IO - paravirt_ops.io_delay(); - paravirt_ops.io_delay(); - paravirt_ops.io_delay(); -#endif -} - -#ifdef CONFIG_X86_LOCAL_APIC -/* - * Basic functions accessing APICs. - */ -static inline void apic_write(unsigned long reg, unsigned long v) -{ - PVOP_VCALL2(apic_write, reg, v); -} - -static inline void apic_write_atomic(unsigned long reg, unsigned long v) -{ - PVOP_VCALL2(apic_write_atomic, reg, v); -} - -static inline unsigned long apic_read(unsigned long reg) -{ - return PVOP_CALL1(unsigned long, apic_read, reg); -} - -static inline void setup_boot_clock(void) -{ - PVOP_VCALL0(setup_boot_clock); -} - -static inline void setup_secondary_clock(void) -{ - PVOP_VCALL0(setup_secondary_clock); -} -#endif - -static inline void paravirt_post_allocator_init(void) -{ - if (paravirt_ops.post_allocator_init) - (*paravirt_ops.post_allocator_init)(); -} - -static inline void paravirt_pagetable_setup_start(pgd_t *base) -{ - if (paravirt_ops.pagetable_setup_start) - (*paravirt_ops.pagetable_setup_start)(base); -} - -static inline void paravirt_pagetable_setup_done(pgd_t *base) -{ - if (paravirt_ops.pagetable_setup_done) - (*paravirt_ops.pagetable_setup_done)(base); -} - -#ifdef CONFIG_SMP -static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip, - unsigned long start_esp) -{ - PVOP_VCALL3(startup_ipi_hook, phys_apicid, start_eip, start_esp); -} -#endif - -static inline void paravirt_activate_mm(struct mm_struct *prev, - struct mm_struct *next) -{ - PVOP_VCALL2(activate_mm, prev, next); -} - -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) -{ - PVOP_VCALL2(dup_mmap, oldmm, mm); -} - -static inline void arch_exit_mmap(struct mm_struct *mm) -{ - PVOP_VCALL1(exit_mmap, mm); -} - -static inline void __flush_tlb(void) -{ - PVOP_VCALL0(flush_tlb_user); -} -static inline void __flush_tlb_global(void) -{ - PVOP_VCALL0(flush_tlb_kernel); -} -static inline void __flush_tlb_single(unsigned long addr) -{ - PVOP_VCALL1(flush_tlb_single, addr); -} - -static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, - unsigned long va) -{ - PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va); -} - -static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) -{ - PVOP_VCALL2(alloc_pt, mm, pfn); -} -static inline void paravirt_release_pt(unsigned pfn) -{ - PVOP_VCALL1(release_pt, pfn); -} - -static inline void paravirt_alloc_pd(unsigned pfn) -{ - PVOP_VCALL1(alloc_pd, pfn); -} - -static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, - unsigned start, unsigned count) -{ - PVOP_VCALL4(alloc_pd_clone, pfn, clonepfn, start, count); -} -static inline void paravirt_release_pd(unsigned pfn) -{ - PVOP_VCALL1(release_pd, pfn); -} - -#ifdef CONFIG_HIGHPTE -static inline void *kmap_atomic_pte(struct page *page, enum km_type type) -{ - unsigned long ret; - ret = PVOP_CALL2(unsigned long, kmap_atomic_pte, page, type); - return (void *)ret; -} -#endif - -static inline void pte_update(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) -{ - PVOP_VCALL3(pte_update, mm, addr, ptep); -} - -static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) -{ - PVOP_VCALL3(pte_update_defer, mm, addr, ptep); -} - -#ifdef CONFIG_X86_PAE -static inline pte_t __pte(unsigned long long val) -{ - unsigned long long ret = PVOP_CALL2(unsigned long long, make_pte, - val, val >> 32); - return (pte_t) { ret, ret >> 32 }; -} - -static inline pmd_t __pmd(unsigned long long val) -{ - return (pmd_t) { PVOP_CALL2(unsigned long long, make_pmd, val, val >> 32) }; -} - -static inline pgd_t __pgd(unsigned long long val) -{ - return (pgd_t) { PVOP_CALL2(unsigned long long, make_pgd, val, val >> 32) }; -} - -static inline unsigned long long pte_val(pte_t x) -{ - return PVOP_CALL2(unsigned long long, pte_val, x.pte_low, x.pte_high); -} - -static inline unsigned long long pmd_val(pmd_t x) -{ - return PVOP_CALL2(unsigned long long, pmd_val, x.pmd, x.pmd >> 32); -} - -static inline unsigned long long pgd_val(pgd_t x) -{ - return PVOP_CALL2(unsigned long long, pgd_val, x.pgd, x.pgd >> 32); -} - -static inline void set_pte(pte_t *ptep, pte_t pteval) -{ - PVOP_VCALL3(set_pte, ptep, pteval.pte_low, pteval.pte_high); -} - -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) -{ - /* 5 arg words */ - paravirt_ops.set_pte_at(mm, addr, ptep, pteval); -} - -static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) -{ - PVOP_VCALL3(set_pte_atomic, ptep, pteval.pte_low, pteval.pte_high); -} - -static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - /* 5 arg words */ - paravirt_ops.set_pte_present(mm, addr, ptep, pte); -} - -static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) -{ - PVOP_VCALL3(set_pmd, pmdp, pmdval.pmd, pmdval.pmd >> 32); -} - -static inline void set_pud(pud_t *pudp, pud_t pudval) -{ - PVOP_VCALL3(set_pud, pudp, pudval.pgd.pgd, pudval.pgd.pgd >> 32); -} - -static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - PVOP_VCALL3(pte_clear, mm, addr, ptep); -} - -static inline void pmd_clear(pmd_t *pmdp) -{ - PVOP_VCALL1(pmd_clear, pmdp); -} - -#else /* !CONFIG_X86_PAE */ - -static inline pte_t __pte(unsigned long val) -{ - return (pte_t) { PVOP_CALL1(unsigned long, make_pte, val) }; -} - -static inline pgd_t __pgd(unsigned long val) -{ - return (pgd_t) { PVOP_CALL1(unsigned long, make_pgd, val) }; -} - -static inline unsigned long pte_val(pte_t x) -{ - return PVOP_CALL1(unsigned long, pte_val, x.pte_low); -} - -static inline unsigned long pgd_val(pgd_t x) -{ - return PVOP_CALL1(unsigned long, pgd_val, x.pgd); -} - -static inline void set_pte(pte_t *ptep, pte_t pteval) -{ - PVOP_VCALL2(set_pte, ptep, pteval.pte_low); -} - -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) -{ - PVOP_VCALL4(set_pte_at, mm, addr, ptep, pteval.pte_low); -} - -static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) -{ - PVOP_VCALL2(set_pmd, pmdp, pmdval.pud.pgd.pgd); -} -#endif /* CONFIG_X86_PAE */ - -#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE -static inline void arch_enter_lazy_cpu_mode(void) -{ - PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_CPU); -} - -static inline void arch_leave_lazy_cpu_mode(void) -{ - PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); -} - -static inline void arch_flush_lazy_cpu_mode(void) -{ - PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); -} - - -#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE -static inline void arch_enter_lazy_mmu_mode(void) -{ - PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_MMU); -} - -static inline void arch_leave_lazy_mmu_mode(void) -{ - PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_NONE); -} - -static inline void arch_flush_lazy_mmu_mode(void) -{ - PVOP_VCALL1(set_lazy_mode, PARAVIRT_LAZY_FLUSH); -} - -void _paravirt_nop(void); -#define paravirt_nop ((void *)_paravirt_nop) - -/* These all sit in the .parainstructions section to tell us what to patch. */ -struct paravirt_patch_site { - u8 *instr; /* original instructions */ - u8 instrtype; /* type of this instruction */ - u8 len; /* length of original instruction */ - u16 clobbers; /* what registers you may clobber */ -}; - -extern struct paravirt_patch_site __parainstructions[], - __parainstructions_end[]; - -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long f; - - asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" - PARAVIRT_CALL - "popl %%edx; popl %%ecx") - : "=a"(f) - : paravirt_type(save_fl), - paravirt_clobber(CLBR_EAX) - : "memory", "cc"); - return f; -} - -static inline void raw_local_irq_restore(unsigned long f) -{ - asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" - PARAVIRT_CALL - "popl %%edx; popl %%ecx") - : "=a"(f) - : "0"(f), - paravirt_type(restore_fl), - paravirt_clobber(CLBR_EAX) - : "memory", "cc"); -} - -static inline void raw_local_irq_disable(void) -{ - asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" - PARAVIRT_CALL - "popl %%edx; popl %%ecx") - : - : paravirt_type(irq_disable), - paravirt_clobber(CLBR_EAX) - : "memory", "eax", "cc"); -} - -static inline void raw_local_irq_enable(void) -{ - asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;" - PARAVIRT_CALL - "popl %%edx; popl %%ecx") - : - : paravirt_type(irq_enable), - paravirt_clobber(CLBR_EAX) - : "memory", "eax", "cc"); -} - -static inline unsigned long __raw_local_irq_save(void) -{ - unsigned long f; - - f = __raw_local_save_flags(); - raw_local_irq_disable(); - return f; -} - -#define CLI_STRING \ - _paravirt_alt("pushl %%ecx; pushl %%edx;" \ - "call *paravirt_ops+%c[paravirt_cli_type]*4;" \ - "popl %%edx; popl %%ecx", \ - "%c[paravirt_cli_type]", "%c[paravirt_clobber]") - -#define STI_STRING \ - _paravirt_alt("pushl %%ecx; pushl %%edx;" \ - "call *paravirt_ops+%c[paravirt_sti_type]*4;" \ - "popl %%edx; popl %%ecx", \ - "%c[paravirt_sti_type]", "%c[paravirt_clobber]") - -#define CLI_STI_CLOBBERS , "%eax" -#define CLI_STI_INPUT_ARGS \ - , \ - [paravirt_cli_type] "i" (PARAVIRT_PATCH(irq_disable)), \ - [paravirt_sti_type] "i" (PARAVIRT_PATCH(irq_enable)), \ - paravirt_clobber(CLBR_EAX) - -/* Make sure as little as possible of this mess escapes. */ -#undef PARAVIRT_CALL -#undef __PVOP_CALL -#undef __PVOP_VCALL -#undef PVOP_VCALL0 -#undef PVOP_CALL0 -#undef PVOP_VCALL1 -#undef PVOP_CALL1 -#undef PVOP_VCALL2 -#undef PVOP_CALL2 -#undef PVOP_VCALL3 -#undef PVOP_CALL3 -#undef PVOP_VCALL4 -#undef PVOP_CALL4 - -#else /* __ASSEMBLY__ */ - -#define PARA_PATCH(off) ((off) / 4) - -#define PARA_SITE(ptype, clobbers, ops) \ -771:; \ - ops; \ -772:; \ - .pushsection .parainstructions,"a"; \ - .long 771b; \ - .byte ptype; \ - .byte 772b-771b; \ - .short clobbers; \ - .popsection - -#define INTERRUPT_RETURN \ - PARA_SITE(PARA_PATCH(PARAVIRT_iret), CLBR_NONE, \ - jmp *%cs:paravirt_ops+PARAVIRT_iret) - -#define DISABLE_INTERRUPTS(clobbers) \ - PARA_SITE(PARA_PATCH(PARAVIRT_irq_disable), clobbers, \ - pushl %eax; pushl %ecx; pushl %edx; \ - call *%cs:paravirt_ops+PARAVIRT_irq_disable; \ - popl %edx; popl %ecx; popl %eax) \ - -#define ENABLE_INTERRUPTS(clobbers) \ - PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable), clobbers, \ - pushl %eax; pushl %ecx; pushl %edx; \ - call *%cs:paravirt_ops+PARAVIRT_irq_enable; \ - popl %edx; popl %ecx; popl %eax) - -#define ENABLE_INTERRUPTS_SYSEXIT \ - PARA_SITE(PARA_PATCH(PARAVIRT_irq_enable_sysexit), CLBR_NONE, \ - jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit) - -#define GET_CR0_INTO_EAX \ - push %ecx; push %edx; \ - call *paravirt_ops+PARAVIRT_read_cr0; \ - pop %edx; pop %ecx - -#endif /* __ASSEMBLY__ */ -#endif /* CONFIG_PARAVIRT */ -#endif /* __ASM_PARAVIRT_H */ diff --git a/include/asm-i386/parport.h b/include/asm-i386/parport.h deleted file mode 100644 index fa0e321e498e..000000000000 --- a/include/asm-i386/parport.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * parport.h: ia32-specific parport initialisation - * - * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk> - * - * This file should only be included by drivers/parport/parport_pc.c. - */ - -#ifndef _ASM_I386_PARPORT_H -#define _ASM_I386_PARPORT_H 1 - -static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); -static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) -{ - return parport_pc_find_isa_ports (autoirq, autodma); -} - -#endif /* !(_ASM_I386_PARPORT_H) */ diff --git a/include/asm-i386/pci-direct.h b/include/asm-i386/pci-direct.h deleted file mode 100644 index 4f6738b08206..000000000000 --- a/include/asm-i386/pci-direct.h +++ /dev/null @@ -1 +0,0 @@ -#include "asm-x86_64/pci-direct.h" diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h deleted file mode 100644 index 4fcacc711385..000000000000 --- a/include/asm-i386/pci.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef __i386_PCI_H -#define __i386_PCI_H - - -#ifdef __KERNEL__ - -struct pci_sysdata { - int node; /* NUMA node */ -}; - -/* scan a bus after allocating a pci_sysdata for it */ -extern struct pci_bus *pci_scan_bus_with_sysdata(int busno); - -#include <linux/mm.h> /* for struct page */ - -/* Can be used to override the logic in pci_scan_bus for skipping - already-configured bus numbers - to be used for buggy BIOSes - or architectures with incomplete PCI setup by the loader */ - -#ifdef CONFIG_PCI -extern unsigned int pcibios_assign_all_busses(void); -#else -#define pcibios_assign_all_busses() 0 -#endif -#define pcibios_scan_all_fns(a, b) 0 - -extern unsigned long pci_mem_start; -#define PCIBIOS_MIN_IO 0x1000 -#define PCIBIOS_MIN_MEM (pci_mem_start) - -#define PCIBIOS_MIN_CARDBUS_IO 0x4000 - -void pcibios_config_init(void); -struct pci_bus * pcibios_scan_root(int bus); - -void pcibios_set_master(struct pci_dev *dev); -void pcibios_penalize_isa_irq(int irq, int active); -struct irq_routing_table *pcibios_get_irq_routing_table(void); -int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq); - -/* Dynamic DMA mapping stuff. - * i386 has everything mapped statically. - */ - -#include <linux/types.h> -#include <linux/slab.h> -#include <asm/scatterlist.h> -#include <linux/string.h> -#include <asm/io.h> - -struct pci_dev; - -/* The PCI address space does equal the physical memory - * address space. The networking and block device layers use - * this boolean for bounce buffer decisions. - */ -#define PCI_DMA_BUS_IS_PHYS (1) - -/* pci_unmap_{page,single} is a nop so... */ -#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) -#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) -#define pci_unmap_addr(PTR, ADDR_NAME) (0) -#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) -#define pci_unmap_len(PTR, LEN_NAME) (0) -#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) - -#define HAVE_PCI_MMAP -extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); - - -#ifdef CONFIG_PCI -static inline void pci_dma_burst_advice(struct pci_dev *pdev, - enum pci_dma_burst_strategy *strat, - unsigned long *strategy_parameter) -{ - *strat = PCI_DMA_BURST_INFINITY; - *strategy_parameter = ~0UL; -} -#endif - -#endif /* __KERNEL__ */ - -/* implement the pci_ DMA API in terms of the generic device dma_ one */ -#include <asm-generic/pci-dma-compat.h> - -/* generic pci stuff */ -#include <asm-generic/pci.h> - -#endif /* __i386_PCI_H */ diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h deleted file mode 100644 index a7ebd436f3cc..000000000000 --- a/include/asm-i386/percpu.h +++ /dev/null @@ -1,154 +0,0 @@ -#ifndef __ARCH_I386_PERCPU__ -#define __ARCH_I386_PERCPU__ - -#ifdef __ASSEMBLY__ - -/* - * PER_CPU finds an address of a per-cpu variable. - * - * Args: - * var - variable name - * reg - 32bit register - * - * The resulting address is stored in the "reg" argument. - * - * Example: - * PER_CPU(cpu_gdt_descr, %ebx) - */ -#ifdef CONFIG_SMP -#define PER_CPU(var, reg) \ - movl %fs:per_cpu__##this_cpu_off, reg; \ - lea per_cpu__##var(reg), reg -#define PER_CPU_VAR(var) %fs:per_cpu__##var -#else /* ! SMP */ -#define PER_CPU(var, reg) \ - movl $per_cpu__##var, reg -#define PER_CPU_VAR(var) per_cpu__##var -#endif /* SMP */ - -#else /* ...!ASSEMBLY */ - -/* - * PER_CPU finds an address of a per-cpu variable. - * - * Args: - * var - variable name - * cpu - 32bit register containing the current CPU number - * - * The resulting address is stored in the "cpu" argument. - * - * Example: - * PER_CPU(cpu_gdt_descr, %ebx) - */ -#ifdef CONFIG_SMP -/* Same as generic implementation except for optimized local access. */ -#define __GENERIC_PER_CPU - -/* This is used for other cpus to find our section. */ -extern unsigned long __per_cpu_offset[]; - -#define per_cpu_offset(x) (__per_cpu_offset[x]) - -/* Separate out the type, so (int[3], foo) works. */ -#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ - __attribute__((__section__(".data.percpu.shared_aligned"))) \ - __typeof__(type) per_cpu__##name \ - ____cacheline_aligned_in_smp - -/* We can use this directly for local CPU (faster). */ -DECLARE_PER_CPU(unsigned long, this_cpu_off); - -/* var is in discarded region: offset to particular copy we want */ -#define per_cpu(var, cpu) (*({ \ - extern int simple_indentifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) - -#define __raw_get_cpu_var(var) (*({ \ - extern int simple_indentifier_##var(void); \ - RELOC_HIDE(&per_cpu__##var, x86_read_percpu(this_cpu_off)); \ -})) - -#define __get_cpu_var(var) __raw_get_cpu_var(var) - -/* A macro to avoid #include hell... */ -#define percpu_modcopy(pcpudst, src, size) \ -do { \ - unsigned int __i; \ - for_each_possible_cpu(__i) \ - memcpy((pcpudst)+__per_cpu_offset[__i], \ - (src), (size)); \ -} while (0) - -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) - -/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */ -#define __percpu_seg "%%fs:" -#else /* !SMP */ -#include <asm-generic/percpu.h> -#define __percpu_seg "" -#endif /* SMP */ - -/* For arch-specific code, we can use direct single-insn ops (they - * don't give an lvalue though). */ -extern void __bad_percpu_size(void); - -#define percpu_to_op(op,var,val) \ - do { \ - typedef typeof(var) T__; \ - if (0) { T__ tmp__; tmp__ = (val); } \ - switch (sizeof(var)) { \ - case 1: \ - asm(op "b %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - case 2: \ - asm(op "w %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - case 4: \ - asm(op "l %1,"__percpu_seg"%0" \ - : "+m" (var) \ - :"ri" ((T__)val)); \ - break; \ - default: __bad_percpu_size(); \ - } \ - } while (0) - -#define percpu_from_op(op,var) \ - ({ \ - typeof(var) ret__; \ - switch (sizeof(var)) { \ - case 1: \ - asm(op "b "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - case 2: \ - asm(op "w "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - case 4: \ - asm(op "l "__percpu_seg"%1,%0" \ - : "=r" (ret__) \ - : "m" (var)); \ - break; \ - default: __bad_percpu_size(); \ - } \ - ret__; }) - -#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var) -#define x86_write_percpu(var,val) percpu_to_op("mov", per_cpu__##var, val) -#define x86_add_percpu(var,val) percpu_to_op("add", per_cpu__##var, val) -#define x86_sub_percpu(var,val) percpu_to_op("sub", per_cpu__##var, val) -#define x86_or_percpu(var,val) percpu_to_op("or", per_cpu__##var, val) -#endif /* !__ASSEMBLY__ */ - -#endif /* __ARCH_I386_PERCPU__ */ diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h deleted file mode 100644 index f2fc33ceb9f2..000000000000 --- a/include/asm-i386/pgalloc.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef _I386_PGALLOC_H -#define _I386_PGALLOC_H - -#include <linux/threads.h> -#include <linux/mm.h> /* for struct page */ - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define paravirt_alloc_pt(mm, pfn) do { } while (0) -#define paravirt_alloc_pd(pfn) do { } while (0) -#define paravirt_alloc_pd(pfn) do { } while (0) -#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0) -#define paravirt_release_pt(pfn) do { } while (0) -#define paravirt_release_pd(pfn) do { } while (0) -#endif - -#define pmd_populate_kernel(mm, pmd, pte) \ -do { \ - paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \ - set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \ -} while (0) - -#define pmd_populate(mm, pmd, pte) \ -do { \ - paravirt_alloc_pt(mm, page_to_pfn(pte)); \ - set_pmd(pmd, __pmd(_PAGE_TABLE + \ - ((unsigned long long)page_to_pfn(pte) << \ - (unsigned long long) PAGE_SHIFT))); \ -} while (0) - -/* - * Allocate and free page tables. - */ -extern pgd_t *pgd_alloc(struct mm_struct *); -extern void pgd_free(pgd_t *pgd); - -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); -extern struct page *pte_alloc_one(struct mm_struct *, unsigned long); - -static inline void pte_free_kernel(pte_t *pte) -{ - free_page((unsigned long)pte); -} - -static inline void pte_free(struct page *pte) -{ - __free_page(pte); -} - - -#define __pte_free_tlb(tlb,pte) \ -do { \ - paravirt_release_pt(page_to_pfn(pte)); \ - tlb_remove_page((tlb),(pte)); \ -} while (0) - -#ifdef CONFIG_X86_PAE -/* - * In the PAE case we free the pmds as part of the pgd. - */ -#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) -#define pmd_free(x) do { } while (0) -#define __pmd_free_tlb(tlb,x) do { } while (0) -#define pud_populate(mm, pmd, pte) BUG() -#endif - -#endif /* _I386_PGALLOC_H */ diff --git a/include/asm-i386/pgtable-2level-defs.h b/include/asm-i386/pgtable-2level-defs.h deleted file mode 100644 index 0f71c9f13da4..000000000000 --- a/include/asm-i386/pgtable-2level-defs.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _I386_PGTABLE_2LEVEL_DEFS_H -#define _I386_PGTABLE_2LEVEL_DEFS_H - -#define SHARED_KERNEL_PMD 0 - -/* - * traditional i386 two-level paging structure: - */ - -#define PGDIR_SHIFT 22 -#define PTRS_PER_PGD 1024 - -/* - * the i386 is two-level, so we don't really have any - * PMD directory physically. - */ - -#define PTRS_PER_PTE 1024 - -#endif /* _I386_PGTABLE_2LEVEL_DEFS_H */ diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h deleted file mode 100644 index 84b03cf56a79..000000000000 --- a/include/asm-i386/pgtable-2level.h +++ /dev/null @@ -1,86 +0,0 @@ -#ifndef _I386_PGTABLE_2LEVEL_H -#define _I386_PGTABLE_2LEVEL_H - -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) - -/* - * Certain architectures need to do special things when PTEs - * within a page table are directly modified. Thus, the following - * hook is made available. - */ -static inline void native_set_pte(pte_t *ptep , pte_t pte) -{ - *ptep = pte; -} -static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep , pte_t pte) -{ - native_set_pte(ptep, pte); -} -static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) -{ - *pmdp = pmd; -} -#ifndef CONFIG_PARAVIRT -#define set_pte(pteptr, pteval) native_set_pte(pteptr, pteval) -#define set_pte_at(mm,addr,ptep,pteval) native_set_pte_at(mm, addr, ptep, pteval) -#define set_pmd(pmdptr, pmdval) native_set_pmd(pmdptr, pmdval) -#endif - -#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) -#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) - -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) -#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) - -static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) -{ - *xp = __pte(0); -} - -#ifdef CONFIG_SMP -static inline pte_t native_ptep_get_and_clear(pte_t *xp) -{ - return __pte(xchg(&xp->pte_low, 0)); -} -#else -#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) -#endif - -#define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pte_none(x) (!(x).pte_low) -#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) -#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) -#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) - -/* - * All present pages are kernel-executable: - */ -static inline int pte_exec_kernel(pte_t pte) -{ - return 1; -} - -/* - * Bits 0, 6 and 7 are taken, split up the 29 bits of offset - * into this range: - */ -#define PTE_FILE_MAX_BITS 29 - -#define pte_to_pgoff(pte) \ - ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 )) - -#define pgoff_to_pte(off) \ - ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE }) - -/* Encode and de-code a swap entry */ -#define __swp_type(x) (((x).val >> 1) & 0x1f) -#define __swp_offset(x) ((x).val >> 8) -#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) - -#endif /* _I386_PGTABLE_2LEVEL_H */ diff --git a/include/asm-i386/pgtable-3level-defs.h b/include/asm-i386/pgtable-3level-defs.h deleted file mode 100644 index c0df89f66e8b..000000000000 --- a/include/asm-i386/pgtable-3level-defs.h +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef _I386_PGTABLE_3LEVEL_DEFS_H -#define _I386_PGTABLE_3LEVEL_DEFS_H - -#ifdef CONFIG_PARAVIRT -#define SHARED_KERNEL_PMD (paravirt_ops.shared_kernel_pmd) -#else -#define SHARED_KERNEL_PMD 1 -#endif - -/* - * PGDIR_SHIFT determines what a top-level page table entry can map - */ -#define PGDIR_SHIFT 30 -#define PTRS_PER_PGD 4 - -/* - * PMD_SHIFT determines the size of the area a middle-level - * page table can map - */ -#define PMD_SHIFT 21 -#define PTRS_PER_PMD 512 - -/* - * entries per page directory level - */ -#define PTRS_PER_PTE 512 - -#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */ diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h deleted file mode 100644 index 948a33414118..000000000000 --- a/include/asm-i386/pgtable-3level.h +++ /dev/null @@ -1,192 +0,0 @@ -#ifndef _I386_PGTABLE_3LEVEL_H -#define _I386_PGTABLE_3LEVEL_H - -/* - * Intel Physical Address Extension (PAE) Mode - three-level page - * tables on PPro+ CPUs. - * - * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> - */ - -#define pte_ERROR(e) \ - printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low) -#define pmd_ERROR(e) \ - printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) -#define pgd_ERROR(e) \ - printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) - -#define pud_none(pud) 0 -#define pud_bad(pud) 0 -#define pud_present(pud) 1 - -/* - * All present pages with !NX bit are kernel-executable: - */ -static inline int pte_exec_kernel(pte_t pte) -{ - return !(pte_val(pte) & _PAGE_NX); -} - -/* Rules for using set_pte: the pte being assigned *must* be - * either not present or in a state where the hardware will - * not attempt to update the pte. In places where this is - * not possible, use pte_get_and_clear to obtain the old pte - * value and then use set_pte to update it. -ben - */ -static inline void native_set_pte(pte_t *ptep, pte_t pte) -{ - ptep->pte_high = pte.pte_high; - smp_wmb(); - ptep->pte_low = pte.pte_low; -} -static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep , pte_t pte) -{ - native_set_pte(ptep, pte); -} - -/* - * Since this is only called on user PTEs, and the page fault handler - * must handle the already racy situation of simultaneous page faults, - * we are justified in merely clearing the PTE present bit, followed - * by a set. The ordering here is important. - */ -static inline void native_set_pte_present(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - ptep->pte_low = 0; - smp_wmb(); - ptep->pte_high = pte.pte_high; - smp_wmb(); - ptep->pte_low = pte.pte_low; -} - -static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) -{ - set_64bit((unsigned long long *)(ptep),native_pte_val(pte)); -} -static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) -{ - set_64bit((unsigned long long *)(pmdp),native_pmd_val(pmd)); -} -static inline void native_set_pud(pud_t *pudp, pud_t pud) -{ - *pudp = pud; -} - -/* - * For PTEs and PDEs, we must clear the P-bit first when clearing a page table - * entry, so clear the bottom half first and enforce ordering with a compiler - * barrier. - */ -static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - ptep->pte_low = 0; - smp_wmb(); - ptep->pte_high = 0; -} - -static inline void native_pmd_clear(pmd_t *pmd) -{ - u32 *tmp = (u32 *)pmd; - *tmp = 0; - smp_wmb(); - *(tmp + 1) = 0; -} - -#ifndef CONFIG_PARAVIRT -#define set_pte(ptep, pte) native_set_pte(ptep, pte) -#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) -#define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte) -#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte) -#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) -#define set_pud(pudp, pud) native_set_pud(pudp, pud) -#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) -#define pmd_clear(pmd) native_pmd_clear(pmd) -#endif - -/* - * Pentium-II erratum A13: in PAE mode we explicitly have to flush - * the TLB via cr3 if the top-level pgd is changed... - * We do not let the generic code free and clear pgd entries due to - * this erratum. - */ -static inline void pud_clear (pud_t * pud) { } - -#define pud_page(pud) \ -((struct page *) __va(pud_val(pud) & PAGE_MASK)) - -#define pud_page_vaddr(pud) \ -((unsigned long) __va(pud_val(pud) & PAGE_MASK)) - - -/* Find an entry in the second-level page table.. */ -#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ - pmd_index(address)) - -#ifdef CONFIG_SMP -static inline pte_t native_ptep_get_and_clear(pte_t *ptep) -{ - pte_t res; - - /* xchg acts as a barrier before the setting of the high bits */ - res.pte_low = xchg(&ptep->pte_low, 0); - res.pte_high = ptep->pte_high; - ptep->pte_high = 0; - - return res; -} -#else -#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp) -#endif - -#define __HAVE_ARCH_PTE_SAME -static inline int pte_same(pte_t a, pte_t b) -{ - return a.pte_low == b.pte_low && a.pte_high == b.pte_high; -} - -#define pte_page(x) pfn_to_page(pte_pfn(x)) - -static inline int pte_none(pte_t pte) -{ - return !pte.pte_low && !pte.pte_high; -} - -static inline unsigned long pte_pfn(pte_t pte) -{ - return pte_val(pte) >> PAGE_SHIFT; -} - -extern unsigned long long __supported_pte_mask; - -static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) -{ - return __pte((((unsigned long long)page_nr << PAGE_SHIFT) | - pgprot_val(pgprot)) & __supported_pte_mask); -} - -static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) -{ - return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | - pgprot_val(pgprot)) & __supported_pte_mask); -} - -/* - * Bits 0, 6 and 7 are taken in the low part of the pte, - * put the 32 bits of offset into the high part. - */ -#define pte_to_pgoff(pte) ((pte).pte_high) -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) -#define PTE_FILE_MAX_BITS 32 - -/* Encode and de-code a swap entry */ -#define __swp_type(x) (((x).val) & 0x1f) -#define __swp_offset(x) ((x).val >> 5) -#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) -#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) -#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) - -#define __pmd_free_tlb(tlb, x) do { } while (0) - -#endif /* _I386_PGTABLE_3LEVEL_H */ diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h deleted file mode 100644 index c7fefa6b12fd..000000000000 --- a/include/asm-i386/pgtable.h +++ /dev/null @@ -1,512 +0,0 @@ -#ifndef _I386_PGTABLE_H -#define _I386_PGTABLE_H - - -/* - * The Linux memory management assumes a three-level page table setup. On - * the i386, we use that, but "fold" the mid level into the top-level page - * table, so that we physically have the same two-level page table as the - * i386 mmu expects. - * - * This file contains the functions and defines necessary to modify and use - * the i386 page table tree. - */ -#ifndef __ASSEMBLY__ -#include <asm/processor.h> -#include <asm/fixmap.h> -#include <linux/threads.h> -#include <asm/paravirt.h> - -#ifndef _I386_BITOPS_H -#include <asm/bitops.h> -#endif - -#include <linux/slab.h> -#include <linux/list.h> -#include <linux/spinlock.h> - -struct mm_struct; -struct vm_area_struct; - -/* - * ZERO_PAGE is a global shared page that is always zero: used - * for zero-mapped memory areas etc.. - */ -#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) -extern unsigned long empty_zero_page[1024]; -extern pgd_t swapper_pg_dir[1024]; -extern struct kmem_cache *pmd_cache; -extern spinlock_t pgd_lock; -extern struct page *pgd_list; -void check_pgt_cache(void); - -void pmd_ctor(void *, struct kmem_cache *, unsigned long); -void pgtable_cache_init(void); -void paging_init(void); - - -/* - * The Linux x86 paging architecture is 'compile-time dual-mode', it - * implements both the traditional 2-level x86 page tables and the - * newer 3-level PAE-mode page tables. - */ -#ifdef CONFIG_X86_PAE -# include <asm/pgtable-3level-defs.h> -# define PMD_SIZE (1UL << PMD_SHIFT) -# define PMD_MASK (~(PMD_SIZE-1)) -#else -# include <asm/pgtable-2level-defs.h> -#endif - -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) - -#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) -#define FIRST_USER_ADDRESS 0 - -#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) -#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) - -#define TWOLEVEL_PGDIR_SHIFT 22 -#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) -#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) - -/* Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the - * physical memory until the kernel virtual memory starts. That means that - * any out-of-bounds memory accesses will hopefully be caught. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced - * area for the same reason. ;) - */ -#define VMALLOC_OFFSET (8*1024*1024) -#define VMALLOC_START (((unsigned long) high_memory + \ - 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) -#ifdef CONFIG_HIGHMEM -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) -#else -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) -#endif - -/* - * _PAGE_PSE set in the page directory entry just means that - * the page directory entry points directly to a 4MB-aligned block of - * memory. - */ -#define _PAGE_BIT_PRESENT 0 -#define _PAGE_BIT_RW 1 -#define _PAGE_BIT_USER 2 -#define _PAGE_BIT_PWT 3 -#define _PAGE_BIT_PCD 4 -#define _PAGE_BIT_ACCESSED 5 -#define _PAGE_BIT_DIRTY 6 -#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */ -#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ -#define _PAGE_BIT_UNUSED2 10 -#define _PAGE_BIT_UNUSED3 11 -#define _PAGE_BIT_NX 63 - -#define _PAGE_PRESENT 0x001 -#define _PAGE_RW 0x002 -#define _PAGE_USER 0x004 -#define _PAGE_PWT 0x008 -#define _PAGE_PCD 0x010 -#define _PAGE_ACCESSED 0x020 -#define _PAGE_DIRTY 0x040 -#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ -#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ -#define _PAGE_UNUSED1 0x200 /* available for programmer */ -#define _PAGE_UNUSED2 0x400 -#define _PAGE_UNUSED3 0x800 - -/* If _PAGE_PRESENT is clear, we use these: */ -#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ -#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE; - pte_present gives true */ -#ifdef CONFIG_X86_PAE -#define _PAGE_NX (1ULL<<_PAGE_BIT_NX) -#else -#define _PAGE_NX 0 -#endif - -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) -#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) - -#define PAGE_NONE \ - __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) -#define PAGE_SHARED \ - __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) - -#define PAGE_SHARED_EXEC \ - __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_COPY_NOEXEC \ - __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_COPY_EXEC \ - __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) -#define PAGE_COPY \ - PAGE_COPY_NOEXEC -#define PAGE_READONLY \ - __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) -#define PAGE_READONLY_EXEC \ - __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) - -#define _PAGE_KERNEL \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) -#define _PAGE_KERNEL_EXEC \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) - -extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; -#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) -#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) -#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD) -#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) -#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) - -#define PAGE_KERNEL __pgprot(__PAGE_KERNEL) -#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) -#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) -#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) -#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) -#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) - -/* - * The i386 can't do page protection for execute, and considers that - * the same are read. Also, write permissions imply read permissions. - * This is the closest we can get.. - */ -#define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_READONLY_EXEC -#define __P101 PAGE_READONLY_EXEC -#define __P110 PAGE_COPY_EXEC -#define __P111 PAGE_COPY_EXEC - -#define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_READONLY_EXEC -#define __S101 PAGE_READONLY_EXEC -#define __S110 PAGE_SHARED_EXEC -#define __S111 PAGE_SHARED_EXEC - -/* - * Define this if things work differently on an i386 and an i486: - * it will (on an i486) warn about kernel memory accesses that are - * done without a 'access_ok(VERIFY_WRITE,..)' - */ -#undef TEST_ACCESS_OK - -/* The boot page tables (all created as a single array) */ -extern unsigned long pg0[]; - -#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) - -/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ -#define pmd_none(x) (!(unsigned long)pmd_val(x)) -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) - - -#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) - -/* - * The following only work if pte_present() is true. - * Undefined behaviour if not.. - */ -static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; } -static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; } -static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; } -static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; } - -/* - * The following only works if pte_present() is not true. - */ -static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; } - -static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; } -static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; } -static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; } -static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; } -static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; } -static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; } -static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; } - -#ifdef CONFIG_X86_PAE -# include <asm/pgtable-3level.h> -#else -# include <asm/pgtable-2level.h> -#endif - -#ifndef CONFIG_PARAVIRT -/* - * Rules for using pte_update - it must be called after any PTE update which - * has not been done using the set_pte / clear_pte interfaces. It is used by - * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE - * updates should either be sets, clears, or set_pte_atomic for P->P - * transitions, which means this hook should only be called for user PTEs. - * This hook implies a P->P protection or access change has taken place, which - * requires a subsequent TLB flush. The notification can optionally be delayed - * until the TLB flush event by using the pte_update_defer form of the - * interface, but care must be taken to assure that the flush happens while - * still holding the same page table lock so that the shadow and primary pages - * do not become out of sync on SMP. - */ -#define pte_update(mm, addr, ptep) do { } while (0) -#define pte_update_defer(mm, addr, ptep) do { } while (0) -#endif - -/* local pte updates need not use xchg for locking */ -static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) -{ - pte_t res = *ptep; - - /* Pure native function needs no input for mm, addr */ - native_pte_clear(NULL, 0, ptep); - return res; -} - -/* - * We only update the dirty/accessed state if we set - * the dirty bit by hand in the kernel, since the hardware - * will do the accessed bit for us, and we don't want to - * race with other CPU's that might be updating the dirty - * bit at the same time. - */ -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ -({ \ - int __changed = !pte_same(*(ptep), entry); \ - if (__changed && dirty) { \ - (ptep)->pte_low = (entry).pte_low; \ - pte_update_defer((vma)->vm_mm, (address), (ptep)); \ - flush_tlb_page(vma, address); \ - } \ - __changed; \ -}) - -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define ptep_test_and_clear_young(vma, addr, ptep) ({ \ - int __ret = 0; \ - if (pte_young(*(ptep))) \ - __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ - &(ptep)->pte_low); \ - if (__ret) \ - pte_update((vma)->vm_mm, addr, ptep); \ - __ret; \ -}) - -#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH -#define ptep_clear_flush_young(vma, address, ptep) \ -({ \ - int __young; \ - __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ - if (__young) \ - flush_tlb_page(vma, address); \ - __young; \ -}) - -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - pte_t pte = native_ptep_get_and_clear(ptep); - pte_update(mm, addr, ptep); - return pte; -} - -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) -{ - pte_t pte; - if (full) { - /* - * Full address destruction in progress; paravirt does not - * care about updates and native needs no locking - */ - pte = native_local_ptep_get_and_clear(ptep); - } else { - pte = ptep_get_and_clear(mm, addr, ptep); - } - return pte; -} - -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - clear_bit(_PAGE_BIT_RW, &ptep->pte_low); - pte_update(mm, addr, ptep); -} - -/* - * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); - * - * dst - pointer to pgd range anwhere on a pgd page - * src - "" - * count - the number of pgds to copy. - * - * dst and src can be on the same page, but the range must not overlap, - * and must not cross a page boundary. - */ -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) -{ - memcpy(dst, src, count * sizeof(pgd_t)); -} - -/* - * Macro to mark a page protection value as "uncacheable". On processors which do not support - * it, this is a no-op. - */ -#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ - ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) - -/* - * Conversion functions: convert a page and protection to a page entry, - * and a page entry and page directory to the page they refer to. - */ - -#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) - -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) -{ - pte.pte_low &= _PAGE_CHG_MASK; - pte.pte_low |= pgprot_val(newprot); -#ifdef CONFIG_X86_PAE - /* - * Chop off the NX bit (if present), and add the NX portion of - * the newprot (if present): - */ - pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); - pte.pte_high |= (pgprot_val(newprot) >> 32) & \ - (__supported_pte_mask >> 32); -#endif - return pte; -} - -#define pmd_large(pmd) \ -((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT)) - -/* - * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] - * - * this macro returns the index of the entry in the pgd page which would - * control the given virtual address - */ -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) -#define pgd_index_k(addr) pgd_index(addr) - -/* - * pgd_offset() returns a (pgd_t *) - * pgd_index() is used get the offset into the pgd page's array of pgd_t's; - */ -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) - -/* - * a shortcut which implies the use of the kernel's pgd, instead - * of a process's - */ -#define pgd_offset_k(address) pgd_offset(&init_mm, address) - -/* - * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] - * - * this macro returns the index of the entry in the pmd page which would - * control the given virtual address - */ -#define pmd_index(address) \ - (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) - -/* - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] - * - * this macro returns the index of the entry in the pte page which would - * control the given virtual address - */ -#define pte_index(address) \ - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) -#define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) - -#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) - -#define pmd_page_vaddr(pmd) \ - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) - -/* - * Helper function that returns the kernel pagetable entry controlling - * the virtual address 'address'. NULL means no pagetable entry present. - * NOTE: the return type is pte_t but if the pmd is PSE then we return it - * as a pte too. - */ -extern pte_t *lookup_address(unsigned long address); - -/* - * Make a given kernel text page executable/non-executable. - * Returns the previous executability setting of that page (which - * is used to restore the previous state). Used by the SMP bootup code. - * NOTE: this is an __init function for security reasons. - */ -#ifdef CONFIG_X86_PAE - extern int set_kernel_exec(unsigned long vaddr, int enable); -#else - static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;} -#endif - -#if defined(CONFIG_HIGHPTE) -#define pte_offset_map(dir, address) \ - ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) -#define pte_offset_map_nested(dir, address) \ - ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) -#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) -#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) -#else -#define pte_offset_map(dir, address) \ - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) -#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) -#define pte_unmap(pte) do { } while (0) -#define pte_unmap_nested(pte) do { } while (0) -#endif - -/* Clear a kernel PTE and flush it from the TLB */ -#define kpte_clear_flush(ptep, vaddr) \ -do { \ - pte_clear(&init_mm, vaddr, ptep); \ - __flush_tlb_one(vaddr); \ -} while (0) - -/* - * The i386 doesn't have any external MMU info: the kernel page - * tables contain all the necessary information. - */ -#define update_mmu_cache(vma,address,pte) do { } while (0) - -void native_pagetable_setup_start(pgd_t *base); -void native_pagetable_setup_done(pgd_t *base); - -#ifndef CONFIG_PARAVIRT -static inline void paravirt_pagetable_setup_start(pgd_t *base) -{ - native_pagetable_setup_start(base); -} - -static inline void paravirt_pagetable_setup_done(pgd_t *base) -{ - native_pagetable_setup_done(base); -} -#endif /* !CONFIG_PARAVIRT */ - -#endif /* !__ASSEMBLY__ */ - -#ifdef CONFIG_FLATMEM -#define kern_addr_valid(addr) (1) -#endif /* CONFIG_FLATMEM */ - -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - -#include <asm-generic/pgtable.h> - -#endif /* _I386_PGTABLE_H */ diff --git a/include/asm-i386/poll.h b/include/asm-i386/poll.h deleted file mode 100644 index c98509d3149e..000000000000 --- a/include/asm-i386/poll.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/poll.h> diff --git a/include/asm-i386/posix_types.h b/include/asm-i386/posix_types.h deleted file mode 100644 index 133e31e7dfde..000000000000 --- a/include/asm-i386/posix_types.h +++ /dev/null @@ -1,82 +0,0 @@ -#ifndef __ARCH_I386_POSIX_TYPES_H -#define __ARCH_I386_POSIX_TYPES_H - -/* - * This file is generally used by user-level software, so you need to - * be a little careful about namespace pollution etc. Also, we cannot - * assume GCC is being used. - */ - -typedef unsigned long __kernel_ino_t; -typedef unsigned short __kernel_mode_t; -typedef unsigned short __kernel_nlink_t; -typedef long __kernel_off_t; -typedef int __kernel_pid_t; -typedef unsigned short __kernel_ipc_pid_t; -typedef unsigned short __kernel_uid_t; -typedef unsigned short __kernel_gid_t; -typedef unsigned int __kernel_size_t; -typedef int __kernel_ssize_t; -typedef int __kernel_ptrdiff_t; -typedef long __kernel_time_t; -typedef long __kernel_suseconds_t; -typedef long __kernel_clock_t; -typedef int __kernel_timer_t; -typedef int __kernel_clockid_t; -typedef int __kernel_daddr_t; -typedef char * __kernel_caddr_t; -typedef unsigned short __kernel_uid16_t; -typedef unsigned short __kernel_gid16_t; -typedef unsigned int __kernel_uid32_t; -typedef unsigned int __kernel_gid32_t; - -typedef unsigned short __kernel_old_uid_t; -typedef unsigned short __kernel_old_gid_t; -typedef unsigned short __kernel_old_dev_t; - -#ifdef __GNUC__ -typedef long long __kernel_loff_t; -#endif - -typedef struct { -#if defined(__KERNEL__) || defined(__USE_ALL) - int val[2]; -#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ - int __val[2]; -#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ -} __kernel_fsid_t; - -#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) - -#undef __FD_SET -#define __FD_SET(fd,fdsetp) \ - __asm__ __volatile__("btsl %1,%0": \ - "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) - -#undef __FD_CLR -#define __FD_CLR(fd,fdsetp) \ - __asm__ __volatile__("btrl %1,%0": \ - "+m" (*(__kernel_fd_set *) (fdsetp)):"r" ((int) (fd))) - -#undef __FD_ISSET -#define __FD_ISSET(fd,fdsetp) (__extension__ ({ \ - unsigned char __result; \ - __asm__ __volatile__("btl %1,%2 ; setb %0" \ - :"=q" (__result) :"r" ((int) (fd)), \ - "m" (*(__kernel_fd_set *) (fdsetp))); \ - __result; })) - -#undef __FD_ZERO -#define __FD_ZERO(fdsetp) \ -do { \ - int __d0, __d1; \ - __asm__ __volatile__("cld ; rep ; stosl" \ - :"=m" (*(__kernel_fd_set *) (fdsetp)), \ - "=&c" (__d0), "=&D" (__d1) \ - :"a" (0), "1" (__FDSET_LONGS), \ - "2" ((__kernel_fd_set *) (fdsetp)) : "memory"); \ -} while (0) - -#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ - -#endif diff --git a/include/asm-i386/processor-cyrix.h b/include/asm-i386/processor-cyrix.h deleted file mode 100644 index 97568ada1f97..000000000000 --- a/include/asm-i386/processor-cyrix.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * NSC/Cyrix CPU indexed register access. Must be inlined instead of - * macros to ensure correct access ordering - * Access order is always 0x22 (=offset), 0x23 (=value) - * - * When using the old macros a line like - * setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); - * gets expanded to: - * do { - * outb((CX86_CCR2), 0x22); - * outb((({ - * outb((CX86_CCR2), 0x22); - * inb(0x23); - * }) | 0x88), 0x23); - * } while (0); - * - * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23). - */ - -static inline u8 getCx86(u8 reg) -{ - outb(reg, 0x22); - return inb(0x23); -} - -static inline void setCx86(u8 reg, u8 data) -{ - outb(reg, 0x22); - outb(data, 0x23); -} diff --git a/include/asm-i386/processor-flags.h b/include/asm-i386/processor-flags.h deleted file mode 100644 index 5404e90edd57..000000000000 --- a/include/asm-i386/processor-flags.h +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef __ASM_I386_PROCESSOR_FLAGS_H -#define __ASM_I386_PROCESSOR_FLAGS_H -/* Various flags defined: can be included from assembler. */ - -/* - * EFLAGS bits - */ -#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ -#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ -#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ -#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ -#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ -#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ -#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ -#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ -#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ -#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ -#define X86_EFLAGS_NT 0x00004000 /* Nested Task */ -#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ -#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ -#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ -#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ -#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ -#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ - -/* - * Basic CPU control in CR0 - */ -#define X86_CR0_PE 0x00000001 /* Protection Enable */ -#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */ -#define X86_CR0_EM 0x00000004 /* Emulation */ -#define X86_CR0_TS 0x00000008 /* Task Switched */ -#define X86_CR0_ET 0x00000010 /* Extension Type */ -#define X86_CR0_NE 0x00000020 /* Numeric Error */ -#define X86_CR0_WP 0x00010000 /* Write Protect */ -#define X86_CR0_AM 0x00040000 /* Alignment Mask */ -#define X86_CR0_NW 0x20000000 /* Not Write-through */ -#define X86_CR0_CD 0x40000000 /* Cache Disable */ -#define X86_CR0_PG 0x80000000 /* Paging */ - -/* - * Paging options in CR3 - */ -#define X86_CR3_PWT 0x00000008 /* Page Write Through */ -#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */ - -/* - * Intel CPU features in CR4 - */ -#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */ -#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */ -#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */ -#define X86_CR4_DE 0x00000008 /* enable debugging extensions */ -#define X86_CR4_PSE 0x00000010 /* enable page size extensions */ -#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */ -#define X86_CR4_MCE 0x00000040 /* Machine check enable */ -#define X86_CR4_PGE 0x00000080 /* enable global pages */ -#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */ -#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ -#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ -#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ - -/* - * x86-64 Task Priority Register, CR8 - */ -#define X86_CR8_TPR 0x00000007 /* task priority register */ - -/* - * AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h> - */ - -/* - * NSC/Cyrix CPU configuration register indexes - */ -#define CX86_PCR0 0x20 -#define CX86_GCR 0xb8 -#define CX86_CCR0 0xc0 -#define CX86_CCR1 0xc1 -#define CX86_CCR2 0xc2 -#define CX86_CCR3 0xc3 -#define CX86_CCR4 0xe8 -#define CX86_CCR5 0xe9 -#define CX86_CCR6 0xea -#define CX86_CCR7 0xeb -#define CX86_PCR1 0xf0 -#define CX86_DIR0 0xfe -#define CX86_DIR1 0xff -#define CX86_ARR_BASE 0xc4 -#define CX86_RCR_BASE 0xdc - -#endif /* __ASM_I386_PROCESSOR_FLAGS_H */ diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h deleted file mode 100644 index 3845fe72383e..000000000000 --- a/include/asm-i386/processor.h +++ /dev/null @@ -1,755 +0,0 @@ -/* - * include/asm-i386/processor.h - * - * Copyright (C) 1994 Linus Torvalds - */ - -#ifndef __ASM_I386_PROCESSOR_H -#define __ASM_I386_PROCESSOR_H - -#include <asm/vm86.h> -#include <asm/math_emu.h> -#include <asm/segment.h> -#include <asm/page.h> -#include <asm/types.h> -#include <asm/sigcontext.h> -#include <asm/cpufeature.h> -#include <asm/msr.h> -#include <asm/system.h> -#include <linux/cache.h> -#include <linux/threads.h> -#include <asm/percpu.h> -#include <linux/cpumask.h> -#include <linux/init.h> -#include <asm/processor-flags.h> - -/* flag for disabling the tsc */ -extern int tsc_disable; - -struct desc_struct { - unsigned long a,b; -}; - -#define desc_empty(desc) \ - (!((desc)->a | (desc)->b)) - -#define desc_equal(desc1, desc2) \ - (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) -/* - * Default implementation of macro that returns current - * instruction pointer ("program counter"). - */ -#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; }) - -/* - * CPU type and hardware bug flags. Kept separately for each CPU. - * Members of this structure are referenced in head.S, so think twice - * before touching them. [mj] - */ - -struct cpuinfo_x86 { - __u8 x86; /* CPU family */ - __u8 x86_vendor; /* CPU vendor */ - __u8 x86_model; - __u8 x86_mask; - char wp_works_ok; /* It doesn't on 386's */ - char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */ - char hard_math; - char rfu; - int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */ - unsigned long x86_capability[NCAPINTS]; - char x86_vendor_id[16]; - char x86_model_id[64]; - int x86_cache_size; /* in KB - valid for CPUS which support this - call */ - int x86_cache_alignment; /* In bytes */ - char fdiv_bug; - char f00f_bug; - char coma_bug; - char pad0; - int x86_power; - unsigned long loops_per_jiffy; -#ifdef CONFIG_SMP - cpumask_t llc_shared_map; /* cpus sharing the last level cache */ -#endif - unsigned char x86_max_cores; /* cpuid returned max cores value */ - unsigned char apicid; - unsigned short x86_clflush_size; -#ifdef CONFIG_SMP - unsigned char booted_cores; /* number of cores as seen by OS */ - __u8 phys_proc_id; /* Physical processor id. */ - __u8 cpu_core_id; /* Core id */ -#endif -} __attribute__((__aligned__(SMP_CACHE_BYTES))); - -#define X86_VENDOR_INTEL 0 -#define X86_VENDOR_CYRIX 1 -#define X86_VENDOR_AMD 2 -#define X86_VENDOR_UMC 3 -#define X86_VENDOR_NEXGEN 4 -#define X86_VENDOR_CENTAUR 5 -#define X86_VENDOR_TRANSMETA 7 -#define X86_VENDOR_NSC 8 -#define X86_VENDOR_NUM 9 -#define X86_VENDOR_UNKNOWN 0xff - -/* - * capabilities of CPUs - */ - -extern struct cpuinfo_x86 boot_cpu_data; -extern struct cpuinfo_x86 new_cpu_data; -extern struct tss_struct doublefault_tss; -DECLARE_PER_CPU(struct tss_struct, init_tss); - -#ifdef CONFIG_SMP -extern struct cpuinfo_x86 cpu_data[]; -#define current_cpu_data cpu_data[smp_processor_id()] -#else -#define cpu_data (&boot_cpu_data) -#define current_cpu_data boot_cpu_data -#endif - -extern int cpu_llc_id[NR_CPUS]; -extern char ignore_fpu_irq; - -void __init cpu_detect(struct cpuinfo_x86 *c); - -extern void identify_boot_cpu(void); -extern void identify_secondary_cpu(struct cpuinfo_x86 *); -extern void print_cpu_info(struct cpuinfo_x86 *); -extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); -extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); -extern unsigned short num_cache_leaves; - -#ifdef CONFIG_X86_HT -extern void detect_ht(struct cpuinfo_x86 *c); -#else -static inline void detect_ht(struct cpuinfo_x86 *c) {} -#endif - -static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, - unsigned int *ecx, unsigned int *edx) -{ - /* ecx is often an input as well as an output. */ - __asm__("cpuid" - : "=a" (*eax), - "=b" (*ebx), - "=c" (*ecx), - "=d" (*edx) - : "0" (*eax), "2" (*ecx)); -} - -#define load_cr3(pgdir) write_cr3(__pa(pgdir)) - -/* - * Save the cr4 feature set we're using (ie - * Pentium 4MB enable and PPro Global page - * enable), so that any CPU's that boot up - * after us can get the correct flags. - */ -extern unsigned long mmu_cr4_features; - -static inline void set_in_cr4 (unsigned long mask) -{ - unsigned cr4; - mmu_cr4_features |= mask; - cr4 = read_cr4(); - cr4 |= mask; - write_cr4(cr4); -} - -static inline void clear_in_cr4 (unsigned long mask) -{ - unsigned cr4; - mmu_cr4_features &= ~mask; - cr4 = read_cr4(); - cr4 &= ~mask; - write_cr4(cr4); -} - -/* Stop speculative execution */ -static inline void sync_core(void) -{ - int tmp; - asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); -} - -static inline void __monitor(const void *eax, unsigned long ecx, - unsigned long edx) -{ - /* "monitor %eax,%ecx,%edx;" */ - asm volatile( - ".byte 0x0f,0x01,0xc8;" - : :"a" (eax), "c" (ecx), "d"(edx)); -} - -static inline void __mwait(unsigned long eax, unsigned long ecx) -{ - /* "mwait %eax,%ecx;" */ - asm volatile( - ".byte 0x0f,0x01,0xc9;" - : :"a" (eax), "c" (ecx)); -} - -extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); - -/* from system description table in BIOS. Mostly for MCA use, but -others may find it useful. */ -extern unsigned int machine_id; -extern unsigned int machine_submodel_id; -extern unsigned int BIOS_revision; -extern unsigned int mca_pentium_flag; - -/* Boot loader type from the setup header */ -extern int bootloader_type; - -/* - * User space process size: 3GB (default). - */ -#define TASK_SIZE (PAGE_OFFSET) - -/* This decides where the kernel will search for a free chunk of vm - * space during mmap's. - */ -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) - -#define HAVE_ARCH_PICK_MMAP_LAYOUT - -extern void hard_disable_TSC(void); -extern void disable_TSC(void); -extern void hard_enable_TSC(void); - -/* - * Size of io_bitmap. - */ -#define IO_BITMAP_BITS 65536 -#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) -#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) -#define INVALID_IO_BITMAP_OFFSET 0x8000 -#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 - -struct i387_fsave_struct { - long cwd; - long swd; - long twd; - long fip; - long fcs; - long foo; - long fos; - long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ - long status; /* software status information */ -}; - -struct i387_fxsave_struct { - unsigned short cwd; - unsigned short swd; - unsigned short twd; - unsigned short fop; - long fip; - long fcs; - long foo; - long fos; - long mxcsr; - long mxcsr_mask; - long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ - long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ - long padding[56]; -} __attribute__ ((aligned (16))); - -struct i387_soft_struct { - long cwd; - long swd; - long twd; - long fip; - long fcs; - long foo; - long fos; - long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ - unsigned char ftop, changed, lookahead, no_update, rm, alimit; - struct info *info; - unsigned long entry_eip; -}; - -union i387_union { - struct i387_fsave_struct fsave; - struct i387_fxsave_struct fxsave; - struct i387_soft_struct soft; -}; - -typedef struct { - unsigned long seg; -} mm_segment_t; - -struct thread_struct; - -/* This is the TSS defined by the hardware. */ -struct i386_hw_tss { - unsigned short back_link,__blh; - unsigned long esp0; - unsigned short ss0,__ss0h; - unsigned long esp1; - unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */ - unsigned long esp2; - unsigned short ss2,__ss2h; - unsigned long __cr3; - unsigned long eip; - unsigned long eflags; - unsigned long eax,ecx,edx,ebx; - unsigned long esp; - unsigned long ebp; - unsigned long esi; - unsigned long edi; - unsigned short es, __esh; - unsigned short cs, __csh; - unsigned short ss, __ssh; - unsigned short ds, __dsh; - unsigned short fs, __fsh; - unsigned short gs, __gsh; - unsigned short ldt, __ldth; - unsigned short trace, io_bitmap_base; -} __attribute__((packed)); - -struct tss_struct { - struct i386_hw_tss x86_tss; - - /* - * The extra 1 is there because the CPU will access an - * additional byte beyond the end of the IO permission - * bitmap. The extra byte must be all 1 bits, and must - * be within the limit. - */ - unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; - /* - * Cache the current maximum and the last task that used the bitmap: - */ - unsigned long io_bitmap_max; - struct thread_struct *io_bitmap_owner; - /* - * pads the TSS to be cacheline-aligned (size is 0x100) - */ - unsigned long __cacheline_filler[35]; - /* - * .. and then another 0x100 bytes for emergency kernel stack - */ - unsigned long stack[64]; -} __attribute__((packed)); - -#define ARCH_MIN_TASKALIGN 16 - -struct thread_struct { -/* cached TLS descriptors. */ - struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; - unsigned long esp0; - unsigned long sysenter_cs; - unsigned long eip; - unsigned long esp; - unsigned long fs; - unsigned long gs; -/* Hardware debugging registers */ - unsigned long debugreg[8]; /* %%db0-7 debug registers */ -/* fault info */ - unsigned long cr2, trap_no, error_code; -/* floating point info */ - union i387_union i387; -/* virtual 86 mode info */ - struct vm86_struct __user * vm86_info; - unsigned long screen_bitmap; - unsigned long v86flags, v86mask, saved_esp0; - unsigned int saved_fs, saved_gs; -/* IO permissions */ - unsigned long *io_bitmap_ptr; - unsigned long iopl; -/* max allowed port in the bitmap, in bytes: */ - unsigned long io_bitmap_max; -}; - -#define INIT_THREAD { \ - .esp0 = sizeof(init_stack) + (long)&init_stack, \ - .vm86_info = NULL, \ - .sysenter_cs = __KERNEL_CS, \ - .io_bitmap_ptr = NULL, \ - .fs = __KERNEL_PERCPU, \ -} - -/* - * Note that the .io_bitmap member must be extra-big. This is because - * the CPU will access an additional byte beyond the end of the IO - * permission bitmap. The extra byte must be all 1 bits, and must - * be within the limit. - */ -#define INIT_TSS { \ - .x86_tss = { \ - .esp0 = sizeof(init_stack) + (long)&init_stack, \ - .ss0 = __KERNEL_DS, \ - .ss1 = __KERNEL_CS, \ - .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ - }, \ - .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ -} - -#define start_thread(regs, new_eip, new_esp) do { \ - __asm__("movl %0,%%gs": :"r" (0)); \ - regs->xfs = 0; \ - set_fs(USER_DS); \ - regs->xds = __USER_DS; \ - regs->xes = __USER_DS; \ - regs->xss = __USER_DS; \ - regs->xcs = __USER_CS; \ - regs->eip = new_eip; \ - regs->esp = new_esp; \ -} while (0) - -/* Forward declaration, a strange C thing */ -struct task_struct; -struct mm_struct; - -/* Free all resources held by a thread. */ -extern void release_thread(struct task_struct *); - -/* Prepare to copy thread state - unlazy all lazy status */ -extern void prepare_to_copy(struct task_struct *tsk); - -/* - * create a kernel thread without removing it from tasklists - */ -extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); - -extern unsigned long thread_saved_pc(struct task_struct *tsk); -void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack); - -unsigned long get_wchan(struct task_struct *p); - -#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) -#define KSTK_TOP(info) \ -({ \ - unsigned long *__ptr = (unsigned long *)(info); \ - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ -}) - -/* - * The below -8 is to reserve 8 bytes on top of the ring0 stack. - * This is necessary to guarantee that the entire "struct pt_regs" - * is accessable even if the CPU haven't stored the SS/ESP registers - * on the stack (interrupt gate does not save these registers - * when switching to the same priv ring). - * Therefore beware: accessing the xss/esp fields of the - * "struct pt_regs" is possible, but they may contain the - * completely wrong values. - */ -#define task_pt_regs(task) \ -({ \ - struct pt_regs *__regs__; \ - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ - __regs__ - 1; \ -}) - -#define KSTK_EIP(task) (task_pt_regs(task)->eip) -#define KSTK_ESP(task) (task_pt_regs(task)->esp) - - -struct microcode_header { - unsigned int hdrver; - unsigned int rev; - unsigned int date; - unsigned int sig; - unsigned int cksum; - unsigned int ldrver; - unsigned int pf; - unsigned int datasize; - unsigned int totalsize; - unsigned int reserved[3]; -}; - -struct microcode { - struct microcode_header hdr; - unsigned int bits[0]; -}; - -typedef struct microcode microcode_t; -typedef struct microcode_header microcode_header_t; - -/* microcode format is extended from prescott processors */ -struct extended_signature { - unsigned int sig; - unsigned int pf; - unsigned int cksum; -}; - -struct extended_sigtable { - unsigned int count; - unsigned int cksum; - unsigned int reserved[3]; - struct extended_signature sigs[0]; -}; - -/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -static inline void rep_nop(void) -{ - __asm__ __volatile__("rep;nop": : :"memory"); -} - -#define cpu_relax() rep_nop() - -static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread) -{ - tss->x86_tss.esp0 = thread->esp0; - /* This can only happen when SEP is enabled, no need to test "SEP"arately */ - if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { - tss->x86_tss.ss1 = thread->sysenter_cs; - wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); - } -} - - -static inline unsigned long native_get_debugreg(int regno) -{ - unsigned long val = 0; /* Damn you, gcc! */ - - switch (regno) { - case 0: - asm("movl %%db0, %0" :"=r" (val)); break; - case 1: - asm("movl %%db1, %0" :"=r" (val)); break; - case 2: - asm("movl %%db2, %0" :"=r" (val)); break; - case 3: - asm("movl %%db3, %0" :"=r" (val)); break; - case 6: - asm("movl %%db6, %0" :"=r" (val)); break; - case 7: - asm("movl %%db7, %0" :"=r" (val)); break; - default: - BUG(); - } - return val; -} - -static inline void native_set_debugreg(int regno, unsigned long value) -{ - switch (regno) { - case 0: - asm("movl %0,%%db0" : /* no output */ :"r" (value)); - break; - case 1: - asm("movl %0,%%db1" : /* no output */ :"r" (value)); - break; - case 2: - asm("movl %0,%%db2" : /* no output */ :"r" (value)); - break; - case 3: - asm("movl %0,%%db3" : /* no output */ :"r" (value)); - break; - case 6: - asm("movl %0,%%db6" : /* no output */ :"r" (value)); - break; - case 7: - asm("movl %0,%%db7" : /* no output */ :"r" (value)); - break; - default: - BUG(); - } -} - -/* - * Set IOPL bits in EFLAGS from given mask - */ -static inline void native_set_iopl_mask(unsigned mask) -{ - unsigned int reg; - __asm__ __volatile__ ("pushfl;" - "popl %0;" - "andl %1, %0;" - "orl %2, %0;" - "pushl %0;" - "popfl" - : "=&r" (reg) - : "i" (~X86_EFLAGS_IOPL), "r" (mask)); -} - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define paravirt_enabled() 0 -#define __cpuid native_cpuid - -static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) -{ - native_load_esp0(tss, thread); -} - -/* - * These special macros can be used to get or set a debugging register - */ -#define get_debugreg(var, register) \ - (var) = native_get_debugreg(register) -#define set_debugreg(value, register) \ - native_set_debugreg(register, value) - -#define set_iopl_mask native_set_iopl_mask -#endif /* CONFIG_PARAVIRT */ - -/* - * Generic CPUID function - * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx - * resulting in stale register contents being returned. - */ -static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) -{ - *eax = op; - *ecx = 0; - __cpuid(eax, ebx, ecx, edx); -} - -/* Some CPUID calls want 'count' to be placed in ecx */ -static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, - int *edx) -{ - *eax = op; - *ecx = count; - __cpuid(eax, ebx, ecx, edx); -} - -/* - * CPUID functions returning a single datum - */ -static inline unsigned int cpuid_eax(unsigned int op) -{ - unsigned int eax, ebx, ecx, edx; - - cpuid(op, &eax, &ebx, &ecx, &edx); - return eax; -} -static inline unsigned int cpuid_ebx(unsigned int op) -{ - unsigned int eax, ebx, ecx, edx; - - cpuid(op, &eax, &ebx, &ecx, &edx); - return ebx; -} -static inline unsigned int cpuid_ecx(unsigned int op) -{ - unsigned int eax, ebx, ecx, edx; - - cpuid(op, &eax, &ebx, &ecx, &edx); - return ecx; -} -static inline unsigned int cpuid_edx(unsigned int op) -{ - unsigned int eax, ebx, ecx, edx; - - cpuid(op, &eax, &ebx, &ecx, &edx); - return edx; -} - -/* generic versions from gas */ -#define GENERIC_NOP1 ".byte 0x90\n" -#define GENERIC_NOP2 ".byte 0x89,0xf6\n" -#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n" -#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n" -#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4 -#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n" -#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n" -#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7 - -/* Opteron nops */ -#define K8_NOP1 GENERIC_NOP1 -#define K8_NOP2 ".byte 0x66,0x90\n" -#define K8_NOP3 ".byte 0x66,0x66,0x90\n" -#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n" -#define K8_NOP5 K8_NOP3 K8_NOP2 -#define K8_NOP6 K8_NOP3 K8_NOP3 -#define K8_NOP7 K8_NOP4 K8_NOP3 -#define K8_NOP8 K8_NOP4 K8_NOP4 - -/* K7 nops */ -/* uses eax dependencies (arbitary choice) */ -#define K7_NOP1 GENERIC_NOP1 -#define K7_NOP2 ".byte 0x8b,0xc0\n" -#define K7_NOP3 ".byte 0x8d,0x04,0x20\n" -#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n" -#define K7_NOP5 K7_NOP4 ASM_NOP1 -#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n" -#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n" -#define K7_NOP8 K7_NOP7 ASM_NOP1 - -#ifdef CONFIG_MK8 -#define ASM_NOP1 K8_NOP1 -#define ASM_NOP2 K8_NOP2 -#define ASM_NOP3 K8_NOP3 -#define ASM_NOP4 K8_NOP4 -#define ASM_NOP5 K8_NOP5 -#define ASM_NOP6 K8_NOP6 -#define ASM_NOP7 K8_NOP7 -#define ASM_NOP8 K8_NOP8 -#elif defined(CONFIG_MK7) -#define ASM_NOP1 K7_NOP1 -#define ASM_NOP2 K7_NOP2 -#define ASM_NOP3 K7_NOP3 -#define ASM_NOP4 K7_NOP4 -#define ASM_NOP5 K7_NOP5 -#define ASM_NOP6 K7_NOP6 -#define ASM_NOP7 K7_NOP7 -#define ASM_NOP8 K7_NOP8 -#else -#define ASM_NOP1 GENERIC_NOP1 -#define ASM_NOP2 GENERIC_NOP2 -#define ASM_NOP3 GENERIC_NOP3 -#define ASM_NOP4 GENERIC_NOP4 -#define ASM_NOP5 GENERIC_NOP5 -#define ASM_NOP6 GENERIC_NOP6 -#define ASM_NOP7 GENERIC_NOP7 -#define ASM_NOP8 GENERIC_NOP8 -#endif - -#define ASM_NOP_MAX 8 - -/* Prefetch instructions for Pentium III and AMD Athlon */ -/* It's not worth to care about 3dnow! prefetches for the K6 - because they are microcoded there and very slow. - However we don't do prefetches for pre XP Athlons currently - That should be fixed. */ -#define ARCH_HAS_PREFETCH -static inline void prefetch(const void *x) -{ - alternative_input(ASM_NOP4, - "prefetchnta (%1)", - X86_FEATURE_XMM, - "r" (x)); -} - -#define ARCH_HAS_PREFETCH -#define ARCH_HAS_PREFETCHW -#define ARCH_HAS_SPINLOCK_PREFETCH - -/* 3dnow! prefetch to get an exclusive cache line. Useful for - spinlocks to avoid one state transition in the cache coherency protocol. */ -static inline void prefetchw(const void *x) -{ - alternative_input(ASM_NOP4, - "prefetchw (%1)", - X86_FEATURE_3DNOW, - "r" (x)); -} -#define spin_lock_prefetch(x) prefetchw(x) - -extern void select_idle_routine(const struct cpuinfo_x86 *c); - -#define cache_line_size() (boot_cpu_data.x86_cache_alignment) - -extern unsigned long boot_option_idle_override; -extern void enable_sep_cpu(void); -extern int sysenter_setup(void); - -/* Defined in head.S */ -extern struct Xgt_desc_struct early_gdt_descr; - -extern void cpu_set_gdt(int); -extern void switch_to_new_gdt(void); -extern void cpu_init(void); -extern void init_gdt(int cpu); - -extern int force_mwait; - -#endif /* __ASM_I386_PROCESSOR_H */ diff --git a/include/asm-i386/ptrace-abi.h b/include/asm-i386/ptrace-abi.h deleted file mode 100644 index a44901817a26..000000000000 --- a/include/asm-i386/ptrace-abi.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef I386_PTRACE_ABI_H -#define I386_PTRACE_ABI_H - -#define EBX 0 -#define ECX 1 -#define EDX 2 -#define ESI 3 -#define EDI 4 -#define EBP 5 -#define EAX 6 -#define DS 7 -#define ES 8 -#define FS 9 -#define GS 10 -#define ORIG_EAX 11 -#define EIP 12 -#define CS 13 -#define EFL 14 -#define UESP 15 -#define SS 16 -#define FRAME_SIZE 17 - -/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ -#define PTRACE_GETREGS 12 -#define PTRACE_SETREGS 13 -#define PTRACE_GETFPREGS 14 -#define PTRACE_SETFPREGS 15 -#define PTRACE_GETFPXREGS 18 -#define PTRACE_SETFPXREGS 19 - -#define PTRACE_OLDSETOPTIONS 21 - -#define PTRACE_GET_THREAD_AREA 25 -#define PTRACE_SET_THREAD_AREA 26 - -#define PTRACE_SYSEMU 31 -#define PTRACE_SYSEMU_SINGLESTEP 32 - -#endif diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h deleted file mode 100644 index 6002597b9e12..000000000000 --- a/include/asm-i386/ptrace.h +++ /dev/null @@ -1,63 +0,0 @@ -#ifndef _I386_PTRACE_H -#define _I386_PTRACE_H - -#include <asm/ptrace-abi.h> - -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -struct pt_regs { - long ebx; - long ecx; - long edx; - long esi; - long edi; - long ebp; - long eax; - int xds; - int xes; - int xfs; - /* int xgs; */ - long orig_eax; - long eip; - int xcs; - long eflags; - long esp; - int xss; -}; - -#ifdef __KERNEL__ - -#include <asm/vm86.h> -#include <asm/segment.h> - -struct task_struct; -extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code); - -/* - * user_mode_vm(regs) determines whether a register set came from user mode. - * This is true if V8086 mode was enabled OR if the register set was from - * protected mode with RPL-3 CS value. This tricky test checks that with - * one comparison. Many places in the kernel can bypass this full check - * if they have already ruled out V8086 mode, so user_mode(regs) can be used. - */ -static inline int user_mode(struct pt_regs *regs) -{ - return (regs->xcs & SEGMENT_RPL_MASK) == USER_RPL; -} -static inline int user_mode_vm(struct pt_regs *regs) -{ - return ((regs->xcs & SEGMENT_RPL_MASK) | (regs->eflags & VM_MASK)) >= USER_RPL; -} -static inline int v8086_mode(struct pt_regs *regs) -{ - return (regs->eflags & VM_MASK); -} - -#define instruction_pointer(regs) ((regs)->eip) -#define regs_return_value(regs) ((regs)->eax) - -extern unsigned long profile_pc(struct pt_regs *regs); -#endif /* __KERNEL__ */ - -#endif diff --git a/include/asm-i386/reboot.h b/include/asm-i386/reboot.h deleted file mode 100644 index e9e3ffc22c07..000000000000 --- a/include/asm-i386/reboot.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _ASM_REBOOT_H -#define _ASM_REBOOT_H - -struct pt_regs; - -struct machine_ops -{ - void (*restart)(char *cmd); - void (*halt)(void); - void (*power_off)(void); - void (*shutdown)(void); - void (*crash_shutdown)(struct pt_regs *); - void (*emergency_restart)(void); -}; - -extern struct machine_ops machine_ops; - -void machine_real_restart(unsigned char *code, int length); - -#endif /* _ASM_REBOOT_H */ diff --git a/include/asm-i386/reboot_fixups.h b/include/asm-i386/reboot_fixups.h deleted file mode 100644 index 0cb7d87c2b68..000000000000 --- a/include/asm-i386/reboot_fixups.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _LINUX_REBOOT_FIXUPS_H -#define _LINUX_REBOOT_FIXUPS_H - -extern void mach_reboot_fixups(void); - -#endif /* _LINUX_REBOOT_FIXUPS_H */ diff --git a/include/asm-i386/required-features.h b/include/asm-i386/required-features.h deleted file mode 100644 index 618feb98f9f5..000000000000 --- a/include/asm-i386/required-features.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef _ASM_REQUIRED_FEATURES_H -#define _ASM_REQUIRED_FEATURES_H 1 - -/* Define minimum CPUID feature set for kernel These bits are checked - really early to actually display a visible error message before the - kernel dies. Make sure to assign features to the proper mask! - - Some requirements that are not in CPUID yet are also in the - CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too. - - The real information is in arch/i386/Kconfig.cpu, this just converts - the CONFIGs into a bitmask */ - -#ifndef CONFIG_MATH_EMULATION -# define NEED_FPU (1<<(X86_FEATURE_FPU & 31)) -#else -# define NEED_FPU 0 -#endif - -#ifdef CONFIG_X86_PAE -# define NEED_PAE (1<<(X86_FEATURE_PAE & 31)) -#else -# define NEED_PAE 0 -#endif - -#ifdef CONFIG_X86_CMOV -# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31)) -#else -# define NEED_CMOV 0 -#endif - -#ifdef CONFIG_X86_PAE -# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31)) -#else -# define NEED_CX8 0 -#endif - -#define REQUIRED_MASK0 (NEED_FPU|NEED_PAE|NEED_CMOV|NEED_CX8) - -#ifdef CONFIG_X86_USE_3DNOW -# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31)) -#else -# define NEED_3DNOW 0 -#endif - -#define REQUIRED_MASK1 (NEED_3DNOW) - -#define REQUIRED_MASK2 0 -#define REQUIRED_MASK3 0 -#define REQUIRED_MASK4 0 -#define REQUIRED_MASK5 0 -#define REQUIRED_MASK6 0 -#define REQUIRED_MASK7 0 - -#endif diff --git a/include/asm-i386/resource.h b/include/asm-i386/resource.h deleted file mode 100644 index 6c1ea37c7718..000000000000 --- a/include/asm-i386/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _I386_RESOURCE_H -#define _I386_RESOURCE_H - -#include <asm-generic/resource.h> - -#endif diff --git a/include/asm-i386/resume-trace.h b/include/asm-i386/resume-trace.h deleted file mode 100644 index ec9cfd656230..000000000000 --- a/include/asm-i386/resume-trace.h +++ /dev/null @@ -1,13 +0,0 @@ -#define TRACE_RESUME(user) do { \ - if (pm_trace_enabled) { \ - void *tracedata; \ - asm volatile("movl $1f,%0\n" \ - ".section .tracedata,\"a\"\n" \ - "1:\t.word %c1\n" \ - "\t.long %c2\n" \ - ".previous" \ - :"=r" (tracedata) \ - : "i" (__LINE__), "i" (__FILE__)); \ - generate_resume_trace(tracedata, user); \ - } \ -} while (0) diff --git a/include/asm-i386/rtc.h b/include/asm-i386/rtc.h deleted file mode 100644 index ffd02109a0e5..000000000000 --- a/include/asm-i386/rtc.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef _I386_RTC_H -#define _I386_RTC_H - -/* - * x86 uses the default access methods for the RTC. - */ - -#include <asm-generic/rtc.h> - -#endif diff --git a/include/asm-i386/rwlock.h b/include/asm-i386/rwlock.h deleted file mode 100644 index c3e5db32fa48..000000000000 --- a/include/asm-i386/rwlock.h +++ /dev/null @@ -1,25 +0,0 @@ -/* include/asm-i386/rwlock.h - * - * Helpers used by both rw spinlocks and rw semaphores. - * - * Based in part on code from semaphore.h and - * spinlock.h Copyright 1996 Linus Torvalds. - * - * Copyright 1999 Red Hat, Inc. - * - * Written by Benjamin LaHaise. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#ifndef _ASM_I386_RWLOCK_H -#define _ASM_I386_RWLOCK_H - -#define RW_LOCK_BIAS 0x01000000 -#define RW_LOCK_BIAS_STR "0x01000000" - -/* Code is in asm-i386/spinlock.h */ - -#endif diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h deleted file mode 100644 index 041906f3c6df..000000000000 --- a/include/asm-i386/rwsem.h +++ /dev/null @@ -1,258 +0,0 @@ -/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ - * - * Written by David Howells (dhowells@redhat.com). - * - * Derived from asm-i386/semaphore.h - * - * - * The MSW of the count is the negated number of active writers and waiting - * lockers, and the LSW is the total number of active locks - * - * The lock count is initialized to 0 (no active and no waiting lockers). - * - * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an - * uncontended lock. This can be determined because XADD returns the old value. - * Readers increment by 1 and see a positive value when uncontended, negative - * if there are writers (and maybe) readers waiting (in which case it goes to - * sleep). - * - * The value of WAITING_BIAS supports up to 32766 waiting processes. This can - * be extended to 65534 by manually checking the whole MSW rather than relying - * on the S flag. - * - * The value of ACTIVE_BIAS supports up to 65535 active processes. - * - * This should be totally fair - if anything is waiting, a process that wants a - * lock will go to the back of the queue. When the currently active lock is - * released, if there's a writer at the front of the queue, then that and only - * that will be woken up; if there's a bunch of consequtive readers at the - * front, then they'll all be woken up, but no other readers will be. - */ - -#ifndef _I386_RWSEM_H -#define _I386_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" -#endif - -#ifdef __KERNEL__ - -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/lockdep.h> - -struct rwsem_waiter; - -extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *)); -extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *sem)); - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; -#define RWSEM_UNLOCKED_VALUE 0x00000000 -#define RWSEM_ACTIVE_BIAS 0x00000001 -#define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS (-0x00010000) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; - struct list_head wait_list; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -}; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - - -#define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern void __init_rwsem(struct rw_semaphore *sem, const char *name, - struct lock_class_key *key); - -#define init_rwsem(sem) \ -do { \ - static struct lock_class_key __key; \ - \ - __init_rwsem((sem), #sem, &__key); \ -} while (0) - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning down_read\n\t" -LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ - " jns 1f\n" - " call call_rwsem_down_read_failed\n" - "1:\n\t" - "# ending down_read\n\t" - : "+m" (sem->count) - : "a" (sem) - : "memory", "cc"); -} - -/* - * trylock for reading -- returns 1 if successful, 0 if contention - */ -static inline int __down_read_trylock(struct rw_semaphore *sem) -{ - __s32 result, tmp; - __asm__ __volatile__( - "# beginning __down_read_trylock\n\t" - " movl %0,%1\n\t" - "1:\n\t" - " movl %1,%2\n\t" - " addl %3,%2\n\t" - " jle 2f\n\t" -LOCK_PREFIX " cmpxchgl %2,%0\n\t" - " jnz 1b\n\t" - "2:\n\t" - "# ending __down_read_trylock\n\t" - : "+m" (sem->count), "=&a" (result), "=&r" (tmp) - : "i" (RWSEM_ACTIVE_READ_BIAS) - : "memory", "cc"); - return result>=0 ? 1 : 0; -} - -/* - * lock for writing - */ -static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) -{ - int tmp; - - tmp = RWSEM_ACTIVE_WRITE_BIAS; - __asm__ __volatile__( - "# beginning down_write\n\t" -LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ - " testl %%edx,%%edx\n\t" /* was the count 0 before? */ - " jz 1f\n" - " call call_rwsem_down_write_failed\n" - "1:\n" - "# ending down_write" - : "+m" (sem->count), "=d" (tmp) - : "a" (sem), "1" (tmp) - : "memory", "cc"); -} - -static inline void __down_write(struct rw_semaphore *sem) -{ - __down_write_nested(sem, 0); -} - -/* - * trylock for writing -- returns 1 if successful, 0 if contention - */ -static inline int __down_write_trylock(struct rw_semaphore *sem) -{ - signed long ret = cmpxchg(&sem->count, - RWSEM_UNLOCKED_VALUE, - RWSEM_ACTIVE_WRITE_BIAS); - if (ret == RWSEM_UNLOCKED_VALUE) - return 1; - return 0; -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - __s32 tmp = -RWSEM_ACTIVE_READ_BIAS; - __asm__ __volatile__( - "# beginning __up_read\n\t" -LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" /* subtracts 1, returns the old value */ - " jns 1f\n\t" - " call call_rwsem_wake\n" - "1:\n" - "# ending __up_read\n" - : "+m" (sem->count), "=d" (tmp) - : "a" (sem), "1" (tmp) - : "memory", "cc"); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning __up_write\n\t" - " movl %2,%%edx\n\t" -LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ - " jz 1f\n" - " call call_rwsem_wake\n" - "1:\n\t" - "# ending __up_write\n" - : "+m" (sem->count) - : "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) - : "memory", "cc", "edx"); -} - -/* - * downgrade write lock to read lock - */ -static inline void __downgrade_write(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning __downgrade_write\n\t" -LOCK_PREFIX " addl %2,(%%eax)\n\t" /* transitions 0xZZZZ0001 -> 0xYYYY0001 */ - " jns 1f\n\t" - " call call_rwsem_downgrade_wake\n" - "1:\n\t" - "# ending __downgrade_write\n" - : "+m" (sem->count) - : "a" (sem), "i" (-RWSEM_WAITING_BIAS) - : "memory", "cc"); -} - -/* - * implement atomic add functionality - */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) -{ - __asm__ __volatile__( -LOCK_PREFIX "addl %1,%0" - : "+m" (sem->count) - : "ir" (delta)); -} - -/* - * implement exchange and add functionality - */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) -{ - int tmp = delta; - - __asm__ __volatile__( -LOCK_PREFIX "xadd %0,%1" - : "+r" (tmp), "+m" (sem->count) - : : "memory"); - - return tmp+delta; -} - -static inline int rwsem_is_locked(struct rw_semaphore *sem) -{ - return (sem->count != 0); -} - -#endif /* __KERNEL__ */ -#endif /* _I386_RWSEM_H */ diff --git a/include/asm-i386/scatterlist.h b/include/asm-i386/scatterlist.h deleted file mode 100644 index d7e45a8f1aae..000000000000 --- a/include/asm-i386/scatterlist.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _I386_SCATTERLIST_H -#define _I386_SCATTERLIST_H - -#include <asm/types.h> - -struct scatterlist { - struct page *page; - unsigned int offset; - dma_addr_t dma_address; - unsigned int length; -}; - -/* These macros should be used after a pci_map_sg call has been done - * to get bus addresses of each of the SG entries and their lengths. - * You should only work with the number of sg entries pci_map_sg - * returns. - */ -#define sg_dma_address(sg) ((sg)->dma_address) -#define sg_dma_len(sg) ((sg)->length) - -#define ISA_DMA_THRESHOLD (0x00ffffff) - -#endif /* !(_I386_SCATTERLIST_H) */ diff --git a/include/asm-i386/seccomp.h b/include/asm-i386/seccomp.h deleted file mode 100644 index 18da19e89bff..000000000000 --- a/include/asm-i386/seccomp.h +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _ASM_SECCOMP_H - -#include <linux/thread_info.h> - -#ifdef TIF_32BIT -#error "unexpected TIF_32BIT on i386" -#endif - -#include <linux/unistd.h> - -#define __NR_seccomp_read __NR_read -#define __NR_seccomp_write __NR_write -#define __NR_seccomp_exit __NR_exit -#define __NR_seccomp_sigreturn __NR_sigreturn - -#endif /* _ASM_SECCOMP_H */ diff --git a/include/asm-i386/sections.h b/include/asm-i386/sections.h deleted file mode 100644 index 2dcbb92918b2..000000000000 --- a/include/asm-i386/sections.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _I386_SECTIONS_H -#define _I386_SECTIONS_H - -/* nothing to see, move along */ -#include <asm-generic/sections.h> - -#endif diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h deleted file mode 100644 index 597a47c2515f..000000000000 --- a/include/asm-i386/segment.h +++ /dev/null @@ -1,148 +0,0 @@ -#ifndef _ASM_SEGMENT_H -#define _ASM_SEGMENT_H - -/* - * The layout of the per-CPU GDT under Linux: - * - * 0 - null - * 1 - reserved - * 2 - reserved - * 3 - reserved - * - * 4 - unused <==== new cacheline - * 5 - unused - * - * ------- start of TLS (Thread-Local Storage) segments: - * - * 6 - TLS segment #1 [ glibc's TLS segment ] - * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] - * 8 - TLS segment #3 - * 9 - reserved - * 10 - reserved - * 11 - reserved - * - * ------- start of kernel segments: - * - * 12 - kernel code segment <==== new cacheline - * 13 - kernel data segment - * 14 - default user CS - * 15 - default user DS - * 16 - TSS - * 17 - LDT - * 18 - PNPBIOS support (16->32 gate) - * 19 - PNPBIOS support - * 20 - PNPBIOS support - * 21 - PNPBIOS support - * 22 - PNPBIOS support - * 23 - APM BIOS support - * 24 - APM BIOS support - * 25 - APM BIOS support - * - * 26 - ESPFIX small SS - * 27 - per-cpu [ offset to per-cpu data area ] - * 28 - unused - * 29 - unused - * 30 - unused - * 31 - TSS for double fault handler - */ -#define GDT_ENTRY_TLS_ENTRIES 3 -#define GDT_ENTRY_TLS_MIN 6 -#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) - -#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) - -#define GDT_ENTRY_DEFAULT_USER_CS 14 -#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3) - -#define GDT_ENTRY_DEFAULT_USER_DS 15 -#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3) - -#define GDT_ENTRY_KERNEL_BASE 12 - -#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) -#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) - -#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) -#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) - -#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) -#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5) - -#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6) -#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11) - -#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) -#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) - -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) -#ifdef CONFIG_SMP -#define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) -#else -#define __KERNEL_PERCPU 0 -#endif - -#define GDT_ENTRY_DOUBLEFAULT_TSS 31 - -/* - * The GDT has 32 entries - */ -#define GDT_ENTRIES 32 -#define GDT_SIZE (GDT_ENTRIES * 8) - -/* Simple and small GDT entries for booting only */ - -#define GDT_ENTRY_BOOT_CS 2 -#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8) - -#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1) -#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8) - -/* The PnP BIOS entries in the GDT */ -#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0) -#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1) -#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2) -#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3) -#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4) - -/* The PnP BIOS selectors */ -#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */ -#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */ -#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */ -#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */ -#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */ - -/* - * The interrupt descriptor table has room for 256 idt's, - * the global descriptor table is dependent on the number - * of tasks we can have.. - */ -#define IDT_ENTRIES 256 - -/* Bottom two bits of selector give the ring privilege level */ -#define SEGMENT_RPL_MASK 0x3 -/* Bit 2 is table indicator (LDT/GDT) */ -#define SEGMENT_TI_MASK 0x4 - -/* User mode is privilege level 3 */ -#define USER_RPL 0x3 -/* LDT segment has TI set, GDT has it cleared */ -#define SEGMENT_LDT 0x4 -#define SEGMENT_GDT 0x0 - -#ifndef CONFIG_PARAVIRT -#define get_kernel_rpl() 0 -#endif -/* - * Matching rules for certain types of segments. - */ - -/* Matches only __KERNEL_CS, ignoring PnP / USER / APM segments */ -#define SEGMENT_IS_KERNEL_CODE(x) (((x) & 0xfc) == GDT_ENTRY_KERNEL_CS * 8) - -/* Matches __KERNEL_CS and __USER_CS (they must be 2 entries apart) */ -#define SEGMENT_IS_FLAT_CODE(x) (((x) & 0xec) == GDT_ENTRY_KERNEL_CS * 8) - -/* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) - -#endif diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h deleted file mode 100644 index 4e34a468c383..000000000000 --- a/include/asm-i386/semaphore.h +++ /dev/null @@ -1,176 +0,0 @@ -#ifndef _I386_SEMAPHORE_H -#define _I386_SEMAPHORE_H - -#include <linux/linkage.h> - -#ifdef __KERNEL__ - -/* - * SMP- and interrupt-safe semaphores.. - * - * (C) Copyright 1996 Linus Torvalds - * - * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in - * the original code and to make semaphore waits - * interruptible so that processes waiting on - * semaphores can be killed. - * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper - * functions in asm/sempahore-helper.h while fixing a - * potential and subtle race discovered by Ulrich Schmid - * in down_interruptible(). Since I started to play here I - * also implemented the `trylock' semaphore operation. - * 1999-07-02 Artur Skawina <skawina@geocities.com> - * Optimized "0(ecx)" -> "(ecx)" (the assembler does not - * do this). Changed calling sequences from push/jmp to - * traditional call/ret. - * Modified 2001-01-01 Andreas Franck <afranck@gmx.de> - * Some hacks to ensure compatibility with recent - * GCC snapshots, to avoid stack corruption when compiling - * with -fomit-frame-pointer. It's not sure if this will - * be fixed in GCC, as our previous implementation was a - * bit dubious. - * - * If you would like to see an analysis of this implementation, please - * ftp to gcom.com and download the file - * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz. - * - */ - -#include <asm/system.h> -#include <asm/atomic.h> -#include <linux/wait.h> -#include <linux/rwsem.h> - -struct semaphore { - atomic_t count; - int sleepers; - wait_queue_head_t wait; -}; - - -#define __SEMAPHORE_INITIALIZER(name, n) \ -{ \ - .count = ATOMIC_INIT(n), \ - .sleepers = 0, \ - .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ -} - -#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ - struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) - -#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) -#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) - -static inline void sema_init (struct semaphore *sem, int val) -{ -/* - * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); - * - * i'd rather use the more flexible initialization above, but sadly - * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well. - */ - atomic_set(&sem->count, val); - sem->sleepers = 0; - init_waitqueue_head(&sem->wait); -} - -static inline void init_MUTEX (struct semaphore *sem) -{ - sema_init(sem, 1); -} - -static inline void init_MUTEX_LOCKED (struct semaphore *sem) -{ - sema_init(sem, 0); -} - -fastcall void __down_failed(void /* special register calling convention */); -fastcall int __down_failed_interruptible(void /* params in registers */); -fastcall int __down_failed_trylock(void /* params in registers */); -fastcall void __up_wakeup(void /* special register calling convention */); - -/* - * This is ugly, but we want the default case to fall through. - * "__down_failed" is a special asm handler that calls the C - * routine that actually waits. See arch/i386/kernel/semaphore.c - */ -static inline void down(struct semaphore * sem) -{ - might_sleep(); - __asm__ __volatile__( - "# atomic down operation\n\t" - LOCK_PREFIX "decl %0\n\t" /* --sem->count */ - "jns 2f\n" - "\tlea %0,%%eax\n\t" - "call __down_failed\n" - "2:" - :"+m" (sem->count) - : - :"memory","ax"); -} - -/* - * Interruptible try to acquire a semaphore. If we obtained - * it, return zero. If we were interrupted, returns -EINTR - */ -static inline int down_interruptible(struct semaphore * sem) -{ - int result; - - might_sleep(); - __asm__ __volatile__( - "# atomic interruptible down operation\n\t" - "xorl %0,%0\n\t" - LOCK_PREFIX "decl %1\n\t" /* --sem->count */ - "jns 2f\n\t" - "lea %1,%%eax\n\t" - "call __down_failed_interruptible\n" - "2:" - :"=&a" (result), "+m" (sem->count) - : - :"memory"); - return result; -} - -/* - * Non-blockingly attempt to down() a semaphore. - * Returns zero if we acquired it - */ -static inline int down_trylock(struct semaphore * sem) -{ - int result; - - __asm__ __volatile__( - "# atomic interruptible down operation\n\t" - "xorl %0,%0\n\t" - LOCK_PREFIX "decl %1\n\t" /* --sem->count */ - "jns 2f\n\t" - "lea %1,%%eax\n\t" - "call __down_failed_trylock\n\t" - "2:\n" - :"=&a" (result), "+m" (sem->count) - : - :"memory"); - return result; -} - -/* - * Note! This is subtle. We jump to wake people up only if - * the semaphore was negative (== somebody was waiting on it). - */ -static inline void up(struct semaphore * sem) -{ - __asm__ __volatile__( - "# atomic up operation\n\t" - LOCK_PREFIX "incl %0\n\t" /* ++sem->count */ - "jg 1f\n\t" - "lea %0,%%eax\n\t" - "call __up_wakeup\n" - "1:" - :"+m" (sem->count) - : - :"memory","ax"); -} - -#endif -#endif diff --git a/include/asm-i386/sembuf.h b/include/asm-i386/sembuf.h deleted file mode 100644 index 323835166c14..000000000000 --- a/include/asm-i386/sembuf.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _I386_SEMBUF_H -#define _I386_SEMBUF_H - -/* - * The semid64_ds structure for i386 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct semid64_ds { - struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ - __kernel_time_t sem_otime; /* last semop time */ - unsigned long __unused1; - __kernel_time_t sem_ctime; /* last change time */ - unsigned long __unused2; - unsigned long sem_nsems; /* no. of semaphores in array */ - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _I386_SEMBUF_H */ diff --git a/include/asm-i386/serial.h b/include/asm-i386/serial.h deleted file mode 100644 index bd67480ca109..000000000000 --- a/include/asm-i386/serial.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * include/asm-i386/serial.h - */ - - -/* - * This assumes you have a 1.8432 MHz clock for your UART. - * - * It'd be nice if someone built a serial card with a 24.576 MHz - * clock, since the 16550A is capable of handling a top speed of 1.5 - * megabits/second; but this requires the faster clock. - */ -#define BASE_BAUD ( 1843200 / 16 ) - -/* Standard COM flags (except for COM4, because of the 8514 problem) */ -#ifdef CONFIG_SERIAL_DETECT_IRQ -#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ) -#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ) -#else -#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST) -#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF -#endif - -#define SERIAL_PORT_DFNS \ - /* UART CLK PORT IRQ FLAGS */ \ - { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \ - { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \ - { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \ - { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */ diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h deleted file mode 100644 index 7862fe858a9e..000000000000 --- a/include/asm-i386/setup.h +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Just a place holder. We don't want to have to test x86 before - * we include stuff - */ - -#ifndef _i386_SETUP_H -#define _i386_SETUP_H - -#define COMMAND_LINE_SIZE 2048 - -#ifdef __KERNEL__ -#include <linux/pfn.h> - -/* - * Reserved space for vmalloc and iomap - defined in asm/page.h - */ -#define MAXMEM_PFN PFN_DOWN(MAXMEM) -#define MAX_NONPAE_PFN (1 << 20) - -#define PARAM_SIZE 4096 - -#define OLD_CL_MAGIC_ADDR 0x90020 -#define OLD_CL_MAGIC 0xA33F -#define OLD_CL_BASE_ADDR 0x90000 -#define OLD_CL_OFFSET 0x90022 -#define NEW_CL_POINTER 0x228 /* Relative to real mode data */ - -#ifndef __ASSEMBLY__ - -#include <asm/bootparam.h> - -/* - * This is set up by the setup-routine at boot-time - */ -extern struct boot_params boot_params; - -#define PARAM ((char *)&boot_params) -#define SCREEN_INFO (*(struct screen_info *) (PARAM+0)) -#define EXT_MEM_K (*(unsigned short *) (PARAM+2)) -#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0)) -#define E820_MAP_NR (*(char*) (PARAM+E820NR)) -#define E820_MAP ((struct e820entry *) (PARAM+E820MAP)) -#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40)) -#define IST_INFO (*(struct ist_info *) (PARAM+0x60)) -#define SYS_DESC_TABLE (*(struct sys_desc_table *)(PARAM+0xa0)) -#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4))) -#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8))) -#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc))) -#define EFI_MEMMAP ((void *) *((unsigned long *)(PARAM+0x1d0))) -#define EFI_MEMMAP_SIZE (*((unsigned long *) (PARAM+0x1d4))) -#define MOUNT_ROOT_RDONLY (*(unsigned short *) (PARAM+0x1F2)) -#define RAMDISK_FLAGS (*(unsigned short *) (PARAM+0x1F8)) -#define VIDEO_MODE (*(unsigned short *) (PARAM+0x1FA)) -#define ORIG_ROOT_DEV (*(unsigned short *) (PARAM+0x1FC)) -#define AUX_DEVICE_INFO (*(unsigned char *) (PARAM+0x1FF)) -#define LOADER_TYPE (*(unsigned char *) (PARAM+0x210)) -#define KERNEL_START (*(unsigned long *) (PARAM+0x214)) -#define INITRD_START (*(unsigned long *) (PARAM+0x218)) -#define INITRD_SIZE (*(unsigned long *) (PARAM+0x21c)) -#define EDID_INFO (*(struct edid_info *) (PARAM+0x140)) -#define EDD_NR (*(unsigned char *) (PARAM+EDDNR)) -#define EDD_MBR_SIG_NR (*(unsigned char *) (PARAM+EDD_MBR_SIG_NR_BUF)) -#define EDD_MBR_SIGNATURE ((unsigned int *) (PARAM+EDD_MBR_SIG_BUF)) -#define EDD_BUF ((struct edd_info *) (PARAM+EDDBUF)) - -/* - * Do NOT EVER look at the BIOS memory size location. - * It does not work on many machines. - */ -#define LOWMEMSIZE() (0x9f000) - -struct e820entry; - -char * __init machine_specific_memory_setup(void); -char *memory_setup(void); - -int __init copy_e820_map(struct e820entry * biosmap, int nr_map); -int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); -void __init add_memory_region(unsigned long long start, - unsigned long long size, int type); - -extern unsigned long init_pg_tables_end; - -#ifndef CONFIG_PARAVIRT -#define paravirt_post_allocator_init() do {} while (0) -#endif - -#endif /* __ASSEMBLY__ */ - -#endif /* __KERNEL__ */ - -#endif /* _i386_SETUP_H */ diff --git a/include/asm-i386/shmbuf.h b/include/asm-i386/shmbuf.h deleted file mode 100644 index d1cdc3cb079b..000000000000 --- a/include/asm-i386/shmbuf.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef _I386_SHMBUF_H -#define _I386_SHMBUF_H - -/* - * The shmid64_ds structure for i386 architecture. - * Note extra padding because this structure is passed back and forth - * between kernel and user space. - * - * Pad space is left for: - * - 64-bit time_t to solve y2038 problem - * - 2 miscellaneous 32-bit values - */ - -struct shmid64_ds { - struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ - __kernel_time_t shm_atime; /* last attach time */ - unsigned long __unused1; - __kernel_time_t shm_dtime; /* last detach time */ - unsigned long __unused2; - __kernel_time_t shm_ctime; /* last change time */ - unsigned long __unused3; - __kernel_pid_t shm_cpid; /* pid of creator */ - __kernel_pid_t shm_lpid; /* pid of last operator */ - unsigned long shm_nattch; /* no. of current attaches */ - unsigned long __unused4; - unsigned long __unused5; -}; - -struct shminfo64 { - unsigned long shmmax; - unsigned long shmmin; - unsigned long shmmni; - unsigned long shmseg; - unsigned long shmall; - unsigned long __unused1; - unsigned long __unused2; - unsigned long __unused3; - unsigned long __unused4; -}; - -#endif /* _I386_SHMBUF_H */ diff --git a/include/asm-i386/shmparam.h b/include/asm-i386/shmparam.h deleted file mode 100644 index 786243a5b319..000000000000 --- a/include/asm-i386/shmparam.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASMI386_SHMPARAM_H -#define _ASMI386_SHMPARAM_H - -#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ - -#endif /* _ASMI386_SHMPARAM_H */ diff --git a/include/asm-i386/sigcontext.h b/include/asm-i386/sigcontext.h deleted file mode 100644 index aaef089a7787..000000000000 --- a/include/asm-i386/sigcontext.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef _ASMi386_SIGCONTEXT_H -#define _ASMi386_SIGCONTEXT_H - -#include <linux/compiler.h> - -/* - * As documented in the iBCS2 standard.. - * - * The first part of "struct _fpstate" is just the normal i387 - * hardware setup, the extra "status" word is used to save the - * coprocessor status word before entering the handler. - * - * Pentium III FXSR, SSE support - * Gareth Hughes <gareth@valinux.com>, May 2000 - * - * The FPU state data structure has had to grow to accommodate the - * extended FPU state required by the Streaming SIMD Extensions. - * There is no documented standard to accomplish this at the moment. - */ -struct _fpreg { - unsigned short significand[4]; - unsigned short exponent; -}; - -struct _fpxreg { - unsigned short significand[4]; - unsigned short exponent; - unsigned short padding[3]; -}; - -struct _xmmreg { - unsigned long element[4]; -}; - -struct _fpstate { - /* Regular FPU environment */ - unsigned long cw; - unsigned long sw; - unsigned long tag; - unsigned long ipoff; - unsigned long cssel; - unsigned long dataoff; - unsigned long datasel; - struct _fpreg _st[8]; - unsigned short status; - unsigned short magic; /* 0xffff = regular FPU data only */ - - /* FXSR FPU environment */ - unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */ - unsigned long mxcsr; - unsigned long reserved; - struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ - struct _xmmreg _xmm[8]; - unsigned long padding[56]; -}; - -#define X86_FXSR_MAGIC 0x0000 - -struct sigcontext { - unsigned short gs, __gsh; - unsigned short fs, __fsh; - unsigned short es, __esh; - unsigned short ds, __dsh; - unsigned long edi; - unsigned long esi; - unsigned long ebp; - unsigned long esp; - unsigned long ebx; - unsigned long edx; - unsigned long ecx; - unsigned long eax; - unsigned long trapno; - unsigned long err; - unsigned long eip; - unsigned short cs, __csh; - unsigned long eflags; - unsigned long esp_at_signal; - unsigned short ss, __ssh; - struct _fpstate __user * fpstate; - unsigned long oldmask; - unsigned long cr2; -}; - - -#endif diff --git a/include/asm-i386/siginfo.h b/include/asm-i386/siginfo.h deleted file mode 100644 index fe18f98fccfa..000000000000 --- a/include/asm-i386/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _I386_SIGINFO_H -#define _I386_SIGINFO_H - -#include <asm-generic/siginfo.h> - -#endif diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h deleted file mode 100644 index c3e8adec5918..000000000000 --- a/include/asm-i386/signal.h +++ /dev/null @@ -1,232 +0,0 @@ -#ifndef _ASMi386_SIGNAL_H -#define _ASMi386_SIGNAL_H - -#include <linux/types.h> -#include <linux/time.h> -#include <linux/compiler.h> - -/* Avoid too many header ordering problems. */ -struct siginfo; - -#ifdef __KERNEL__ - -#include <linux/linkage.h> - -/* Most things should be clean enough to redefine this at will, if care - is taken to make libc match. */ - -#define _NSIG 64 -#define _NSIG_BPW 32 -#define _NSIG_WORDS (_NSIG / _NSIG_BPW) - -typedef unsigned long old_sigset_t; /* at least 32 bits */ - -typedef struct { - unsigned long sig[_NSIG_WORDS]; -} sigset_t; - -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -#define NSIG 32 -typedef unsigned long sigset_t; - -#endif /* __KERNEL__ */ - -#define SIGHUP 1 -#define SIGINT 2 -#define SIGQUIT 3 -#define SIGILL 4 -#define SIGTRAP 5 -#define SIGABRT 6 -#define SIGIOT 6 -#define SIGBUS 7 -#define SIGFPE 8 -#define SIGKILL 9 -#define SIGUSR1 10 -#define SIGSEGV 11 -#define SIGUSR2 12 -#define SIGPIPE 13 -#define SIGALRM 14 -#define SIGTERM 15 -#define SIGSTKFLT 16 -#define SIGCHLD 17 -#define SIGCONT 18 -#define SIGSTOP 19 -#define SIGTSTP 20 -#define SIGTTIN 21 -#define SIGTTOU 22 -#define SIGURG 23 -#define SIGXCPU 24 -#define SIGXFSZ 25 -#define SIGVTALRM 26 -#define SIGPROF 27 -#define SIGWINCH 28 -#define SIGIO 29 -#define SIGPOLL SIGIO -/* -#define SIGLOST 29 -*/ -#define SIGPWR 30 -#define SIGSYS 31 -#define SIGUNUSED 31 - -/* These should not be considered constants from userland. */ -#define SIGRTMIN 32 -#define SIGRTMAX _NSIG - -/* - * SA_FLAGS values: - * - * SA_ONSTACK indicates that a registered stack_t will be used. - * SA_RESTART flag to get restarting signals (which were the default long ago) - * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. - * SA_RESETHAND clears the handler when the signal is delivered. - * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. - * SA_NODEFER prevents the current signal from being masked in the handler. - * - * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single - * Unix names RESETHAND and NODEFER respectively. - */ -#define SA_NOCLDSTOP 0x00000001u -#define SA_NOCLDWAIT 0x00000002u -#define SA_SIGINFO 0x00000004u -#define SA_ONSTACK 0x08000000u -#define SA_RESTART 0x10000000u -#define SA_NODEFER 0x40000000u -#define SA_RESETHAND 0x80000000u - -#define SA_NOMASK SA_NODEFER -#define SA_ONESHOT SA_RESETHAND - -#define SA_RESTORER 0x04000000 - -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - -#define MINSIGSTKSZ 2048 -#define SIGSTKSZ 8192 - -#include <asm-generic/signal.h> - -#ifdef __KERNEL__ -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - __sigrestore_t sa_restorer; -}; - -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - __sigrestore_t sa_restorer; - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -}; -#else -/* Here we must cater to libcs that poke about in kernel headers. */ - -struct sigaction { - union { - __sighandler_t _sa_handler; - void (*_sa_sigaction)(int, struct siginfo *, void *); - } _u; - sigset_t sa_mask; - unsigned long sa_flags; - void (*sa_restorer)(void); -}; - -#define sa_handler _u._sa_handler -#define sa_sigaction _u._sa_sigaction - -#endif /* __KERNEL__ */ - -typedef struct sigaltstack { - void __user *ss_sp; - int ss_flags; - size_t ss_size; -} stack_t; - -#ifdef __KERNEL__ -#include <asm/sigcontext.h> - -#define __HAVE_ARCH_SIG_BITOPS - -#define sigaddset(set,sig) \ - (__builtin_constant_p(sig) ? \ - __const_sigaddset((set),(sig)) : \ - __gen_sigaddset((set),(sig))) - -static __inline__ void __gen_sigaddset(sigset_t *set, int _sig) -{ - __asm__("btsl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); -} - -static __inline__ void __const_sigaddset(sigset_t *set, int _sig) -{ - unsigned long sig = _sig - 1; - set->sig[sig / _NSIG_BPW] |= 1 << (sig % _NSIG_BPW); -} - -#define sigdelset(set,sig) \ - (__builtin_constant_p(sig) ? \ - __const_sigdelset((set),(sig)) : \ - __gen_sigdelset((set),(sig))) - - -static __inline__ void __gen_sigdelset(sigset_t *set, int _sig) -{ - __asm__("btrl %1,%0" : "+m"(*set) : "Ir"(_sig - 1) : "cc"); -} - -static __inline__ void __const_sigdelset(sigset_t *set, int _sig) -{ - unsigned long sig = _sig - 1; - set->sig[sig / _NSIG_BPW] &= ~(1 << (sig % _NSIG_BPW)); -} - -static __inline__ int __const_sigismember(sigset_t *set, int _sig) -{ - unsigned long sig = _sig - 1; - return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW)); -} - -static __inline__ int __gen_sigismember(sigset_t *set, int _sig) -{ - int ret; - __asm__("btl %2,%1\n\tsbbl %0,%0" - : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc"); - return ret; -} - -#define sigismember(set,sig) \ - (__builtin_constant_p(sig) ? \ - __const_sigismember((set),(sig)) : \ - __gen_sigismember((set),(sig))) - -static __inline__ int sigfindinword(unsigned long word) -{ - __asm__("bsfl %1,%0" : "=r"(word) : "rm"(word) : "cc"); - return word; -} - -struct pt_regs; - -#define ptrace_signal_deliver(regs, cookie) \ - do { \ - if (current->ptrace & PT_DTRACE) { \ - current->ptrace &= ~PT_DTRACE; \ - (regs)->eflags &= ~TF_MASK; \ - } \ - } while (0) - -#endif /* __KERNEL__ */ - -#endif diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h deleted file mode 100644 index 1f73bde165b1..000000000000 --- a/include/asm-i386/smp.h +++ /dev/null @@ -1,182 +0,0 @@ -#ifndef __ASM_SMP_H -#define __ASM_SMP_H - -/* - * We need the APIC definitions automatically as part of 'smp.h' - */ -#ifndef __ASSEMBLY__ -#include <linux/kernel.h> -#include <linux/threads.h> -#include <linux/cpumask.h> -#endif - -#if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__) -#include <asm/bitops.h> -#include <asm/mpspec.h> -#include <asm/apic.h> -#ifdef CONFIG_X86_IO_APIC -#include <asm/io_apic.h> -#endif -#endif - -#define BAD_APICID 0xFFu -#ifdef CONFIG_SMP -#ifndef __ASSEMBLY__ - -/* - * Private routines/data - */ - -extern void smp_alloc_memory(void); -extern int pic_mode; -extern int smp_num_siblings; -extern cpumask_t cpu_sibling_map[]; -extern cpumask_t cpu_core_map[]; - -extern void (*mtrr_hook) (void); -extern void zap_low_mappings (void); -extern void lock_ipi_call_lock(void); -extern void unlock_ipi_call_lock(void); - -#define MAX_APICID 256 -extern u8 x86_cpu_to_apicid[]; - -#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu] - -extern void set_cpu_sibling_map(int cpu); - -#ifdef CONFIG_HOTPLUG_CPU -extern void cpu_exit_clear(void); -extern void cpu_uninit(void); -extern void remove_siblinginfo(int cpu); -#endif - -struct smp_ops -{ - void (*smp_prepare_boot_cpu)(void); - void (*smp_prepare_cpus)(unsigned max_cpus); - int (*cpu_up)(unsigned cpu); - void (*smp_cpus_done)(unsigned max_cpus); - - void (*smp_send_stop)(void); - void (*smp_send_reschedule)(int cpu); - int (*smp_call_function_mask)(cpumask_t mask, - void (*func)(void *info), void *info, - int wait); -}; - -extern struct smp_ops smp_ops; - -static inline void smp_prepare_boot_cpu(void) -{ - smp_ops.smp_prepare_boot_cpu(); -} -static inline void smp_prepare_cpus(unsigned int max_cpus) -{ - smp_ops.smp_prepare_cpus(max_cpus); -} -static inline int __cpu_up(unsigned int cpu) -{ - return smp_ops.cpu_up(cpu); -} -static inline void smp_cpus_done(unsigned int max_cpus) -{ - smp_ops.smp_cpus_done(max_cpus); -} - -static inline void smp_send_stop(void) -{ - smp_ops.smp_send_stop(); -} -static inline void smp_send_reschedule(int cpu) -{ - smp_ops.smp_send_reschedule(cpu); -} -static inline int smp_call_function_mask(cpumask_t mask, - void (*func) (void *info), void *info, - int wait) -{ - return smp_ops.smp_call_function_mask(mask, func, info, wait); -} - -void native_smp_prepare_boot_cpu(void); -void native_smp_prepare_cpus(unsigned int max_cpus); -int native_cpu_up(unsigned int cpunum); -void native_smp_cpus_done(unsigned int max_cpus); - -#ifndef CONFIG_PARAVIRT -#define startup_ipi_hook(phys_apicid, start_eip, start_esp) \ -do { } while (0) -#endif - -/* - * This function is needed by all SMP systems. It must _always_ be valid - * from the initial startup. We map APIC_BASE very early in page_setup(), - * so this is correct in the x86 case. - */ -DECLARE_PER_CPU(int, cpu_number); -#define raw_smp_processor_id() (x86_read_percpu(cpu_number)) - -extern cpumask_t cpu_callout_map; -extern cpumask_t cpu_callin_map; -extern cpumask_t cpu_possible_map; - -/* We don't mark CPUs online until __cpu_up(), so we need another measure */ -static inline int num_booting_cpus(void) -{ - return cpus_weight(cpu_callout_map); -} - -extern int safe_smp_processor_id(void); -extern int __cpu_disable(void); -extern void __cpu_die(unsigned int cpu); -extern unsigned int num_processors; - -void __cpuinit smp_store_cpu_info(int id); - -#endif /* !__ASSEMBLY__ */ - -#else /* CONFIG_SMP */ - -#define safe_smp_processor_id() 0 -#define cpu_physical_id(cpu) boot_cpu_physical_apicid - -#define NO_PROC_ID 0xFF /* No processor magic marker */ - -#endif /* CONFIG_SMP */ - -#ifndef __ASSEMBLY__ - -#ifdef CONFIG_X86_LOCAL_APIC - -#ifdef APIC_DEFINITION -extern int hard_smp_processor_id(void); -#else -#include <mach_apicdef.h> -static inline int hard_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID)); -} -#endif /* APIC_DEFINITION */ - -#else /* CONFIG_X86_LOCAL_APIC */ - -#ifndef CONFIG_SMP -#define hard_smp_processor_id() 0 -#endif - -#endif /* CONFIG_X86_LOCAL_APIC */ - -extern u8 apicid_2_node[]; - -#ifdef CONFIG_X86_LOCAL_APIC -static __inline int logical_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR)); -} -#endif -#endif - -#endif diff --git a/include/asm-i386/socket.h b/include/asm-i386/socket.h deleted file mode 100644 index 99ca648b94c5..000000000000 --- a/include/asm-i386/socket.h +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef _ASM_SOCKET_H -#define _ASM_SOCKET_H - -#include <asm/sockios.h> - -/* For setsockopt(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -/* To add :#define SO_REUSEPORT 15 */ -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 - -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#endif /* _ASM_SOCKET_H */ diff --git a/include/asm-i386/sockios.h b/include/asm-i386/sockios.h deleted file mode 100644 index ff528c7d255c..000000000000 --- a/include/asm-i386/sockios.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef __ARCH_I386_SOCKIOS__ -#define __ARCH_I386_SOCKIOS__ - -/* Socket-level I/O control calls. */ -#define FIOSETOWN 0x8901 -#define SIOCSPGRP 0x8902 -#define FIOGETOWN 0x8903 -#define SIOCGPGRP 0x8904 -#define SIOCATMARK 0x8905 -#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ -#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ - -#endif diff --git a/include/asm-i386/sparsemem.h b/include/asm-i386/sparsemem.h deleted file mode 100644 index cfeed990585f..000000000000 --- a/include/asm-i386/sparsemem.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _I386_SPARSEMEM_H -#define _I386_SPARSEMEM_H -#ifdef CONFIG_SPARSEMEM - -/* - * generic non-linear memory support: - * - * 1) we will not split memory into more chunks than will fit into the - * flags field of the struct page - */ - -/* - * SECTION_SIZE_BITS 2^N: how big each section will be - * MAX_PHYSADDR_BITS 2^N: how much physical address space we have - * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space - */ -#ifdef CONFIG_X86_PAE -#define SECTION_SIZE_BITS 30 -#define MAX_PHYSADDR_BITS 36 -#define MAX_PHYSMEM_BITS 36 -#else -#define SECTION_SIZE_BITS 26 -#define MAX_PHYSADDR_BITS 32 -#define MAX_PHYSMEM_BITS 32 -#endif - -/* XXX: FIXME -- wli */ -#define kern_addr_valid(kaddr) (0) - -#endif /* CONFIG_SPARSEMEM */ -#endif /* _I386_SPARSEMEM_H */ diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h deleted file mode 100644 index d3bcebed60ca..000000000000 --- a/include/asm-i386/spinlock.h +++ /dev/null @@ -1,221 +0,0 @@ -#ifndef __ASM_SPINLOCK_H -#define __ASM_SPINLOCK_H - -#include <asm/atomic.h> -#include <asm/rwlock.h> -#include <asm/page.h> -#include <asm/processor.h> -#include <linux/compiler.h> - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define CLI_STRING "cli" -#define STI_STRING "sti" -#define CLI_STI_CLOBBERS -#define CLI_STI_INPUT_ARGS -#endif /* CONFIG_PARAVIRT */ - -/* - * Your basic SMP spinlocks, allowing only a single CPU anywhere - * - * Simple spin lock operations. There are two variants, one clears IRQ's - * on the local processor, one does not. - * - * We make no fairness assumptions. They have a cost. - * - * (the type definitions are in asm/spinlock_types.h) - */ - -static inline int __raw_spin_is_locked(raw_spinlock_t *x) -{ - return *(volatile signed char *)(&(x)->slock) <= 0; -} - -static inline void __raw_spin_lock(raw_spinlock_t *lock) -{ - asm volatile("\n1:\t" - LOCK_PREFIX " ; decb %0\n\t" - "jns 3f\n" - "2:\t" - "rep;nop\n\t" - "cmpb $0,%0\n\t" - "jle 2b\n\t" - "jmp 1b\n" - "3:\n\t" - : "+m" (lock->slock) : : "memory"); -} - -/* - * It is easier for the lock validator if interrupts are not re-enabled - * in the middle of a lock-acquire. This is a performance feature anyway - * so we turn it off: - * - * NOTE: there's an irqs-on section here, which normally would have to be - * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. - */ -#ifndef CONFIG_PROVE_LOCKING -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) -{ - asm volatile( - "\n1:\t" - LOCK_PREFIX " ; decb %[slock]\n\t" - "jns 5f\n" - "2:\t" - "testl $0x200, %[flags]\n\t" - "jz 4f\n\t" - STI_STRING "\n" - "3:\t" - "rep;nop\n\t" - "cmpb $0, %[slock]\n\t" - "jle 3b\n\t" - CLI_STRING "\n\t" - "jmp 1b\n" - "4:\t" - "rep;nop\n\t" - "cmpb $0, %[slock]\n\t" - "jg 1b\n\t" - "jmp 4b\n" - "5:\n\t" - : [slock] "+m" (lock->slock) - : [flags] "r" (flags) - CLI_STI_INPUT_ARGS - : "memory" CLI_STI_CLOBBERS); -} -#endif - -static inline int __raw_spin_trylock(raw_spinlock_t *lock) -{ - char oldval; - asm volatile( - "xchgb %b0,%1" - :"=q" (oldval), "+m" (lock->slock) - :"0" (0) : "memory"); - return oldval > 0; -} - -/* - * __raw_spin_unlock based on writing $1 to the low byte. - * This method works. Despite all the confusion. - * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) - * (PPro errata 66, 92) - */ - -#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) - -static inline void __raw_spin_unlock(raw_spinlock_t *lock) -{ - asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); -} - -#else - -static inline void __raw_spin_unlock(raw_spinlock_t *lock) -{ - char oldval = 1; - - asm volatile("xchgb %b0, %1" - : "=q" (oldval), "+m" (lock->slock) - : "0" (oldval) : "memory"); -} - -#endif - -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) -{ - while (__raw_spin_is_locked(lock)) - cpu_relax(); -} - -/* - * Read-write spinlocks, allowing multiple readers - * but only one writer. - * - * NOTE! it is quite common to have readers in interrupts - * but no interrupt writers. For those circumstances we - * can "mix" irq-safe locks - any writer needs to get a - * irq-safe write-lock, but readers can get non-irqsafe - * read-locks. - * - * On x86, we implement read-write locks as a 32-bit counter - * with the high bit (sign) being the "contended" bit. - * - * The inline assembly is non-obvious. Think about it. - * - * Changed to use the same technique as rw semaphores. See - * semaphore.h for details. -ben - * - * the helpers are in arch/i386/kernel/semaphore.c - */ - -/** - * read_can_lock - would read_trylock() succeed? - * @lock: the rwlock in question. - */ -static inline int __raw_read_can_lock(raw_rwlock_t *x) -{ - return (int)(x)->lock > 0; -} - -/** - * write_can_lock - would write_trylock() succeed? - * @lock: the rwlock in question. - */ -static inline int __raw_write_can_lock(raw_rwlock_t *x) -{ - return (x)->lock == RW_LOCK_BIAS; -} - -static inline void __raw_read_lock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" - "jns 1f\n" - "call __read_lock_failed\n\t" - "1:\n" - ::"a" (rw) : "memory"); -} - -static inline void __raw_write_lock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" - "jz 1f\n" - "call __write_lock_failed\n\t" - "1:\n" - ::"a" (rw) : "memory"); -} - -static inline int __raw_read_trylock(raw_rwlock_t *lock) -{ - atomic_t *count = (atomic_t *)lock; - atomic_dec(count); - if (atomic_read(count) >= 0) - return 1; - atomic_inc(count); - return 0; -} - -static inline int __raw_write_trylock(raw_rwlock_t *lock) -{ - atomic_t *count = (atomic_t *)lock; - if (atomic_sub_and_test(RW_LOCK_BIAS, count)) - return 1; - atomic_add(RW_LOCK_BIAS, count); - return 0; -} - -static inline void __raw_read_unlock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); -} - -static inline void __raw_write_unlock(raw_rwlock_t *rw) -{ - asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" - : "+m" (rw->lock) : : "memory"); -} - -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() - -#endif /* __ASM_SPINLOCK_H */ diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h deleted file mode 100644 index 4da9345c1500..000000000000 --- a/include/asm-i386/spinlock_types.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef __ASM_SPINLOCK_TYPES_H -#define __ASM_SPINLOCK_TYPES_H - -#ifndef __LINUX_SPINLOCK_TYPES_H -# error "please don't include this file directly" -#endif - -typedef struct { - unsigned int slock; -} raw_spinlock_t; - -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } - -typedef struct { - unsigned int lock; -} raw_rwlock_t; - -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } - -#endif diff --git a/include/asm-i386/srat.h b/include/asm-i386/srat.h deleted file mode 100644 index 165ab4bdc02b..000000000000 --- a/include/asm-i386/srat.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Some of the code in this file has been gleaned from the 64 bit - * discontigmem support code base. - * - * Copyright (C) 2002, IBM Corp. - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to Pat Gaughen <gone@us.ibm.com> - */ - -#ifndef _ASM_SRAT_H_ -#define _ASM_SRAT_H_ - -#ifndef CONFIG_ACPI_SRAT -#error CONFIG_ACPI_SRAT not defined, and srat.h header has been included -#endif - -extern int get_memcfg_from_srat(void); -extern unsigned long *get_zholes_size(int); - -#endif /* _ASM_SRAT_H_ */ diff --git a/include/asm-i386/stacktrace.h b/include/asm-i386/stacktrace.h deleted file mode 100644 index 7d1f6a5cbfca..000000000000 --- a/include/asm-i386/stacktrace.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-x86_64/stacktrace.h> diff --git a/include/asm-i386/stat.h b/include/asm-i386/stat.h deleted file mode 100644 index 67eae78323ba..000000000000 --- a/include/asm-i386/stat.h +++ /dev/null @@ -1,77 +0,0 @@ -#ifndef _I386_STAT_H -#define _I386_STAT_H - -struct __old_kernel_stat { - unsigned short st_dev; - unsigned short st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned short st_rdev; - unsigned long st_size; - unsigned long st_atime; - unsigned long st_mtime; - unsigned long st_ctime; -}; - -struct stat { - unsigned long st_dev; - unsigned long st_ino; - unsigned short st_mode; - unsigned short st_nlink; - unsigned short st_uid; - unsigned short st_gid; - unsigned long st_rdev; - unsigned long st_size; - unsigned long st_blksize; - unsigned long st_blocks; - unsigned long st_atime; - unsigned long st_atime_nsec; - unsigned long st_mtime; - unsigned long st_mtime_nsec; - unsigned long st_ctime; - unsigned long st_ctime_nsec; - unsigned long __unused4; - unsigned long __unused5; -}; - -/* This matches struct stat64 in glibc2.1, hence the absolutely - * insane amounts of padding around dev_t's. - */ -struct stat64 { - unsigned long long st_dev; - unsigned char __pad0[4]; - -#define STAT64_HAS_BROKEN_ST_INO 1 - unsigned long __st_ino; - - unsigned int st_mode; - unsigned int st_nlink; - - unsigned long st_uid; - unsigned long st_gid; - - unsigned long long st_rdev; - unsigned char __pad3[4]; - - long long st_size; - unsigned long st_blksize; - - unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ - - unsigned long st_atime; - unsigned long st_atime_nsec; - - unsigned long st_mtime; - unsigned int st_mtime_nsec; - - unsigned long st_ctime; - unsigned long st_ctime_nsec; - - unsigned long long st_ino; -}; - -#define STAT_HAVE_NSEC 1 - -#endif diff --git a/include/asm-i386/statfs.h b/include/asm-i386/statfs.h deleted file mode 100644 index 24972c175132..000000000000 --- a/include/asm-i386/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _I386_STATFS_H -#define _I386_STATFS_H - -#include <asm-generic/statfs.h> - -#endif diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h deleted file mode 100644 index a9b64453bdf5..000000000000 --- a/include/asm-i386/string.h +++ /dev/null @@ -1,276 +0,0 @@ -#ifndef _I386_STRING_H_ -#define _I386_STRING_H_ - -#ifdef __KERNEL__ - -/* Let gcc decide wether to inline or use the out of line functions */ - -#define __HAVE_ARCH_STRCPY -extern char *strcpy(char *dest, const char *src); - -#define __HAVE_ARCH_STRNCPY -extern char *strncpy(char *dest, const char *src, size_t count); - -#define __HAVE_ARCH_STRCAT -extern char *strcat(char *dest, const char *src); - -#define __HAVE_ARCH_STRNCAT -extern char *strncat(char *dest, const char *src, size_t count); - -#define __HAVE_ARCH_STRCMP -extern int strcmp(const char *cs, const char *ct); - -#define __HAVE_ARCH_STRNCMP -extern int strncmp(const char *cs, const char *ct, size_t count); - -#define __HAVE_ARCH_STRCHR -extern char *strchr(const char *s, int c); - -#define __HAVE_ARCH_STRRCHR -extern char *strrchr(const char *s, int c); - -#define __HAVE_ARCH_STRLEN -extern size_t strlen(const char *s); - -static __always_inline void * __memcpy(void * to, const void * from, size_t n) -{ -int d0, d1, d2; -__asm__ __volatile__( - "rep ; movsl\n\t" - "movl %4,%%ecx\n\t" - "andl $3,%%ecx\n\t" - "jz 1f\n\t" - "rep ; movsb\n\t" - "1:" - : "=&c" (d0), "=&D" (d1), "=&S" (d2) - : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from) - : "memory"); -return (to); -} - -/* - * This looks ugly, but the compiler can optimize it totally, - * as the count is constant. - */ -static __always_inline void * __constant_memcpy(void * to, const void * from, size_t n) -{ - long esi, edi; - if (!n) return to; -#if 1 /* want to do small copies with non-string ops? */ - switch (n) { - case 1: *(char*)to = *(char*)from; return to; - case 2: *(short*)to = *(short*)from; return to; - case 4: *(int*)to = *(int*)from; return to; -#if 1 /* including those doable with two moves? */ - case 3: *(short*)to = *(short*)from; - *((char*)to+2) = *((char*)from+2); return to; - case 5: *(int*)to = *(int*)from; - *((char*)to+4) = *((char*)from+4); return to; - case 6: *(int*)to = *(int*)from; - *((short*)to+2) = *((short*)from+2); return to; - case 8: *(int*)to = *(int*)from; - *((int*)to+1) = *((int*)from+1); return to; -#endif - } -#endif - esi = (long) from; - edi = (long) to; - if (n >= 5*4) { - /* large block: use rep prefix */ - int ecx; - __asm__ __volatile__( - "rep ; movsl" - : "=&c" (ecx), "=&D" (edi), "=&S" (esi) - : "0" (n/4), "1" (edi),"2" (esi) - : "memory" - ); - } else { - /* small block: don't clobber ecx + smaller code */ - if (n >= 4*4) __asm__ __volatile__("movsl" - :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); - if (n >= 3*4) __asm__ __volatile__("movsl" - :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); - if (n >= 2*4) __asm__ __volatile__("movsl" - :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); - if (n >= 1*4) __asm__ __volatile__("movsl" - :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); - } - switch (n % 4) { - /* tail */ - case 0: return to; - case 1: __asm__ __volatile__("movsb" - :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); - return to; - case 2: __asm__ __volatile__("movsw" - :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); - return to; - default: __asm__ __volatile__("movsw\n\tmovsb" - :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); - return to; - } -} - -#define __HAVE_ARCH_MEMCPY - -#ifdef CONFIG_X86_USE_3DNOW - -#include <asm/mmx.h> - -/* - * This CPU favours 3DNow strongly (eg AMD Athlon) - */ - -static inline void * __constant_memcpy3d(void * to, const void * from, size_t len) -{ - if (len < 512) - return __constant_memcpy(to, from, len); - return _mmx_memcpy(to, from, len); -} - -static __inline__ void *__memcpy3d(void *to, const void *from, size_t len) -{ - if (len < 512) - return __memcpy(to, from, len); - return _mmx_memcpy(to, from, len); -} - -#define memcpy(t, f, n) \ -(__builtin_constant_p(n) ? \ - __constant_memcpy3d((t),(f),(n)) : \ - __memcpy3d((t),(f),(n))) - -#else - -/* - * No 3D Now! - */ - -#define memcpy(t, f, n) \ -(__builtin_constant_p(n) ? \ - __constant_memcpy((t),(f),(n)) : \ - __memcpy((t),(f),(n))) - -#endif - -#define __HAVE_ARCH_MEMMOVE -void *memmove(void * dest,const void * src, size_t n); - -#define memcmp __builtin_memcmp - -#define __HAVE_ARCH_MEMCHR -extern void *memchr(const void * cs,int c,size_t count); - -static inline void * __memset_generic(void * s, char c,size_t count) -{ -int d0, d1; -__asm__ __volatile__( - "rep\n\t" - "stosb" - : "=&c" (d0), "=&D" (d1) - :"a" (c),"1" (s),"0" (count) - :"memory"); -return s; -} - -/* we might want to write optimized versions of these later */ -#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count)) - -/* - * memset(x,0,y) is a reasonably common thing to do, so we want to fill - * things 32 bits at a time even when we don't know the size of the - * area at compile-time.. - */ -static __always_inline void * __constant_c_memset(void * s, unsigned long c, size_t count) -{ -int d0, d1; -__asm__ __volatile__( - "rep ; stosl\n\t" - "testb $2,%b3\n\t" - "je 1f\n\t" - "stosw\n" - "1:\ttestb $1,%b3\n\t" - "je 2f\n\t" - "stosb\n" - "2:" - :"=&c" (d0), "=&D" (d1) - :"a" (c), "q" (count), "0" (count/4), "1" ((long) s) - :"memory"); -return (s); -} - -/* Added by Gertjan van Wingerde to make minix and sysv module work */ -#define __HAVE_ARCH_STRNLEN -extern size_t strnlen(const char * s, size_t count); -/* end of additional stuff */ - -#define __HAVE_ARCH_STRSTR -extern char *strstr(const char *cs, const char *ct); - -/* - * This looks horribly ugly, but the compiler can optimize it totally, - * as we by now know that both pattern and count is constant.. - */ -static __always_inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count) -{ - switch (count) { - case 0: - return s; - case 1: - *(unsigned char *)s = pattern; - return s; - case 2: - *(unsigned short *)s = pattern; - return s; - case 3: - *(unsigned short *)s = pattern; - *(2+(unsigned char *)s) = pattern; - return s; - case 4: - *(unsigned long *)s = pattern; - return s; - } -#define COMMON(x) \ -__asm__ __volatile__( \ - "rep ; stosl" \ - x \ - : "=&c" (d0), "=&D" (d1) \ - : "a" (pattern),"0" (count/4),"1" ((long) s) \ - : "memory") -{ - int d0, d1; - switch (count % 4) { - case 0: COMMON(""); return s; - case 1: COMMON("\n\tstosb"); return s; - case 2: COMMON("\n\tstosw"); return s; - default: COMMON("\n\tstosw\n\tstosb"); return s; - } -} - -#undef COMMON -} - -#define __constant_c_x_memset(s, c, count) \ -(__builtin_constant_p(count) ? \ - __constant_c_and_count_memset((s),(c),(count)) : \ - __constant_c_memset((s),(c),(count))) - -#define __memset(s, c, count) \ -(__builtin_constant_p(count) ? \ - __constant_count_memset((s),(c),(count)) : \ - __memset_generic((s),(c),(count))) - -#define __HAVE_ARCH_MEMSET -#define memset(s, c, count) \ -(__builtin_constant_p(c) ? \ - __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \ - __memset((s),(c),(count))) - -/* - * find the first occurrence of byte 'c', or 1 past the area if none - */ -#define __HAVE_ARCH_MEMSCAN -extern void *memscan(void * addr, int c, size_t size); - -#endif /* __KERNEL__ */ - -#endif diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h deleted file mode 100644 index a2520732ffd6..000000000000 --- a/include/asm-i386/suspend.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2001-2002 Pavel Machek <pavel@suse.cz> - * Based on code - * Copyright 2001 Patrick Mochel <mochel@osdl.org> - */ -#include <asm/desc.h> -#include <asm/i387.h> - -static inline int arch_prepare_suspend(void) { return 0; } - -/* image of the saved processor state */ -struct saved_context { - u16 es, fs, gs, ss; - unsigned long cr0, cr2, cr3, cr4; - struct Xgt_desc_struct gdt; - struct Xgt_desc_struct idt; - u16 ldt; - u16 tss; - unsigned long tr; - unsigned long safety; - unsigned long return_address; -} __attribute__((packed)); - -#ifdef CONFIG_ACPI -extern unsigned long saved_eip; -extern unsigned long saved_esp; -extern unsigned long saved_ebp; -extern unsigned long saved_ebx; -extern unsigned long saved_esi; -extern unsigned long saved_edi; - -static inline void acpi_save_register_state(unsigned long return_point) -{ - saved_eip = return_point; - asm volatile ("movl %%esp,%0" : "=m" (saved_esp)); - asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp)); - asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx)); - asm volatile ("movl %%edi,%0" : "=m" (saved_edi)); - asm volatile ("movl %%esi,%0" : "=m" (saved_esi)); -} - -#define acpi_restore_register_state() do {} while (0) - -/* routines for saving/restoring kernel state */ -extern int acpi_save_state_mem(void); -#endif diff --git a/include/asm-i386/sync_bitops.h b/include/asm-i386/sync_bitops.h deleted file mode 100644 index cbce08a2d135..000000000000 --- a/include/asm-i386/sync_bitops.h +++ /dev/null @@ -1,156 +0,0 @@ -#ifndef _I386_SYNC_BITOPS_H -#define _I386_SYNC_BITOPS_H - -/* - * Copyright 1992, Linus Torvalds. - */ - -/* - * These have to be done with inline assembly: that way the bit-setting - * is guaranteed to be atomic. All bit operations return 0 if the bit - * was cleared before the operation and != 0 if it was not. - * - * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). - */ - -#define ADDR (*(volatile long *) addr) - -/** - * sync_set_bit - Atomically set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. - * - * Note: there are no guarantees that this function will not be reordered - * on non-x86 architectures, so if you are writing portable code, - * make sure not to rely on its reordering guarantees. - * - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void sync_set_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__("lock; btsl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); -} - -/** - * sync_clear_bit - Clears a bit in memory - * @nr: Bit to clear - * @addr: Address to start counting from - * - * sync_clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. - */ -static inline void sync_clear_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__("lock; btrl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); -} - -/** - * sync_change_bit - Toggle a bit in memory - * @nr: Bit to change - * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. It may be - * reordered on other architectures than x86. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. - */ -static inline void sync_change_bit(int nr, volatile unsigned long * addr) -{ - __asm__ __volatile__("lock; btcl %1,%0" - :"+m" (ADDR) - :"Ir" (nr) - : "memory"); -} - -/** - * sync_test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It may be reordered on other architectures than x86. - * It also implies a memory barrier. - */ -static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -/** - * sync_test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It can be reorderdered on other architectures other than x86. - * It also implies a memory barrier. - */ -static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -/** - * sync_test_and_change_bit - Change a bit and return its old value - * @nr: Bit to change - * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. - */ -static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) -{ - int oldbit; - - __asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit),"+m" (ADDR) - :"Ir" (nr) : "memory"); - return oldbit; -} - -static __always_inline int sync_constant_test_bit(int nr, const volatile unsigned long *addr) -{ - return ((1UL << (nr & 31)) & - (((const volatile unsigned int *)addr)[nr >> 5])) != 0; -} - -static inline int sync_var_test_bit(int nr, const volatile unsigned long * addr) -{ - int oldbit; - - __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" - :"=r" (oldbit) - :"m" (ADDR),"Ir" (nr)); - return oldbit; -} - -#define sync_test_bit(nr,addr) \ - (__builtin_constant_p(nr) ? \ - sync_constant_test_bit((nr),(addr)) : \ - sync_var_test_bit((nr),(addr))) - -#undef ADDR - -#endif /* _I386_SYNC_BITOPS_H */ diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h deleted file mode 100644 index d69ba937e092..000000000000 --- a/include/asm-i386/system.h +++ /dev/null @@ -1,313 +0,0 @@ -#ifndef __ASM_SYSTEM_H -#define __ASM_SYSTEM_H - -#include <linux/kernel.h> -#include <asm/segment.h> -#include <asm/cpufeature.h> -#include <asm/cmpxchg.h> - -#ifdef __KERNEL__ - -struct task_struct; /* one of the stranger aspects of C forward declarations.. */ -extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); - -/* - * Saving eflags is important. It switches not only IOPL between tasks, - * it also protects other tasks from NT leaking through sysenter etc. - */ -#define switch_to(prev,next,last) do { \ - unsigned long esi,edi; \ - asm volatile("pushfl\n\t" /* Save flags */ \ - "pushl %%ebp\n\t" \ - "movl %%esp,%0\n\t" /* save ESP */ \ - "movl %5,%%esp\n\t" /* restore ESP */ \ - "movl $1f,%1\n\t" /* save EIP */ \ - "pushl %6\n\t" /* restore EIP */ \ - "jmp __switch_to\n" \ - "1:\t" \ - "popl %%ebp\n\t" \ - "popfl" \ - :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ - "=a" (last),"=S" (esi),"=D" (edi) \ - :"m" (next->thread.esp),"m" (next->thread.eip), \ - "2" (prev), "d" (next)); \ -} while (0) - -#define _set_base(addr,base) do { unsigned long __pr; \ -__asm__ __volatile__ ("movw %%dx,%1\n\t" \ - "rorl $16,%%edx\n\t" \ - "movb %%dl,%2\n\t" \ - "movb %%dh,%3" \ - :"=&d" (__pr) \ - :"m" (*((addr)+2)), \ - "m" (*((addr)+4)), \ - "m" (*((addr)+7)), \ - "0" (base) \ - ); } while(0) - -#define _set_limit(addr,limit) do { unsigned long __lr; \ -__asm__ __volatile__ ("movw %%dx,%1\n\t" \ - "rorl $16,%%edx\n\t" \ - "movb %2,%%dh\n\t" \ - "andb $0xf0,%%dh\n\t" \ - "orb %%dh,%%dl\n\t" \ - "movb %%dl,%2" \ - :"=&d" (__lr) \ - :"m" (*(addr)), \ - "m" (*((addr)+6)), \ - "0" (limit) \ - ); } while(0) - -#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) -#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) - -/* - * Load a segment. Fall back on loading the zero - * segment if something goes wrong.. - */ -#define loadsegment(seg,value) \ - asm volatile("\n" \ - "1:\t" \ - "mov %0,%%" #seg "\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3:\t" \ - "pushl $0\n\t" \ - "popl %%" #seg "\n\t" \ - "jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".long 1b,3b\n" \ - ".previous" \ - : :"rm" (value)) - -/* - * Save a segment register away - */ -#define savesegment(seg, value) \ - asm volatile("mov %%" #seg ",%0":"=rm" (value)) - - -static inline void native_clts(void) -{ - asm volatile ("clts"); -} - -static inline unsigned long native_read_cr0(void) -{ - unsigned long val; - asm volatile("movl %%cr0,%0\n\t" :"=r" (val)); - return val; -} - -static inline void native_write_cr0(unsigned long val) -{ - asm volatile("movl %0,%%cr0": :"r" (val)); -} - -static inline unsigned long native_read_cr2(void) -{ - unsigned long val; - asm volatile("movl %%cr2,%0\n\t" :"=r" (val)); - return val; -} - -static inline void native_write_cr2(unsigned long val) -{ - asm volatile("movl %0,%%cr2": :"r" (val)); -} - -static inline unsigned long native_read_cr3(void) -{ - unsigned long val; - asm volatile("movl %%cr3,%0\n\t" :"=r" (val)); - return val; -} - -static inline void native_write_cr3(unsigned long val) -{ - asm volatile("movl %0,%%cr3": :"r" (val)); -} - -static inline unsigned long native_read_cr4(void) -{ - unsigned long val; - asm volatile("movl %%cr4,%0\n\t" :"=r" (val)); - return val; -} - -static inline unsigned long native_read_cr4_safe(void) -{ - unsigned long val; - /* This could fault if %cr4 does not exist */ - asm("1: movl %%cr4, %0 \n" - "2: \n" - ".section __ex_table,\"a\" \n" - ".long 1b,2b \n" - ".previous \n" - : "=r" (val): "0" (0)); - return val; -} - -static inline void native_write_cr4(unsigned long val) -{ - asm volatile("movl %0,%%cr4": :"r" (val)); -} - -static inline void native_wbinvd(void) -{ - asm volatile("wbinvd": : :"memory"); -} - - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define read_cr0() (native_read_cr0()) -#define write_cr0(x) (native_write_cr0(x)) -#define read_cr2() (native_read_cr2()) -#define write_cr2(x) (native_write_cr2(x)) -#define read_cr3() (native_read_cr3()) -#define write_cr3(x) (native_write_cr3(x)) -#define read_cr4() (native_read_cr4()) -#define read_cr4_safe() (native_read_cr4_safe()) -#define write_cr4(x) (native_write_cr4(x)) -#define wbinvd() (native_wbinvd()) - -/* Clear the 'TS' bit */ -#define clts() (native_clts()) - -#endif/* CONFIG_PARAVIRT */ - -/* Set the 'TS' bit */ -#define stts() write_cr0(8 | read_cr0()) - -#endif /* __KERNEL__ */ - -static inline unsigned long get_limit(unsigned long segment) -{ - unsigned long __limit; - __asm__("lsll %1,%0" - :"=r" (__limit):"r" (segment)); - return __limit+1; -} - -#define nop() __asm__ __volatile__ ("nop") - -/* - * Force strict CPU ordering. - * And yes, this is required on UP too when we're talking - * to devices. - * - * For now, "wmb()" doesn't actually do anything, as all - * Intel CPU's follow what Intel calls a *Processor Order*, - * in which all writes are seen in the program order even - * outside the CPU. - * - * I expect future Intel CPU's to have a weaker ordering, - * but I'd also expect them to finally get their act together - * and add some real memory barriers if so. - * - * Some non intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ - - -#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) -#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) - -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while(0) - -#ifdef CONFIG_X86_OOSTORE -/* Actually there are no OOO store capable CPUs for now that do SSE, - but make it already an possibility. */ -#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) -#else -#define wmb() __asm__ __volatile__ ("": : :"memory") -#endif - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif - -#include <linux/irqflags.h> - -/* - * disable hlt during certain critical i/o operations - */ -#define HAVE_DISABLE_HLT -void disable_hlt(void); -void enable_hlt(void); - -extern int es7000_plat; -void cpu_idle_wait(void); - -extern unsigned long arch_align_stack(unsigned long sp); -extern void free_init_pages(char *what, unsigned long begin, unsigned long end); - -void default_idle(void); - -#endif diff --git a/include/asm-i386/termbits.h b/include/asm-i386/termbits.h deleted file mode 100644 index a21700352e7b..000000000000 --- a/include/asm-i386/termbits.h +++ /dev/null @@ -1,198 +0,0 @@ -#ifndef __ARCH_I386_TERMBITS_H__ -#define __ARCH_I386_TERMBITS_H__ - -#include <linux/posix_types.h> - -typedef unsigned char cc_t; -typedef unsigned int speed_t; -typedef unsigned int tcflag_t; - -#define NCCS 19 -struct termios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ -}; - -struct termios2 { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -struct ktermios { - tcflag_t c_iflag; /* input mode flags */ - tcflag_t c_oflag; /* output mode flags */ - tcflag_t c_cflag; /* control mode flags */ - tcflag_t c_lflag; /* local mode flags */ - cc_t c_line; /* line discipline */ - cc_t c_cc[NCCS]; /* control characters */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ -}; - -/* c_cc characters */ -#define VINTR 0 -#define VQUIT 1 -#define VERASE 2 -#define VKILL 3 -#define VEOF 4 -#define VTIME 5 -#define VMIN 6 -#define VSWTC 7 -#define VSTART 8 -#define VSTOP 9 -#define VSUSP 10 -#define VEOL 11 -#define VREPRINT 12 -#define VDISCARD 13 -#define VWERASE 14 -#define VLNEXT 15 -#define VEOL2 16 - -/* c_iflag bits */ -#define IGNBRK 0000001 -#define BRKINT 0000002 -#define IGNPAR 0000004 -#define PARMRK 0000010 -#define INPCK 0000020 -#define ISTRIP 0000040 -#define INLCR 0000100 -#define IGNCR 0000200 -#define ICRNL 0000400 -#define IUCLC 0001000 -#define IXON 0002000 -#define IXANY 0004000 -#define IXOFF 0010000 -#define IMAXBEL 0020000 -#define IUTF8 0040000 - -/* c_oflag bits */ -#define OPOST 0000001 -#define OLCUC 0000002 -#define ONLCR 0000004 -#define OCRNL 0000010 -#define ONOCR 0000020 -#define ONLRET 0000040 -#define OFILL 0000100 -#define OFDEL 0000200 -#define NLDLY 0000400 -#define NL0 0000000 -#define NL1 0000400 -#define CRDLY 0003000 -#define CR0 0000000 -#define CR1 0001000 -#define CR2 0002000 -#define CR3 0003000 -#define TABDLY 0014000 -#define TAB0 0000000 -#define TAB1 0004000 -#define TAB2 0010000 -#define TAB3 0014000 -#define XTABS 0014000 -#define BSDLY 0020000 -#define BS0 0000000 -#define BS1 0020000 -#define VTDLY 0040000 -#define VT0 0000000 -#define VT1 0040000 -#define FFDLY 0100000 -#define FF0 0000000 -#define FF1 0100000 - -/* c_cflag bit meaning */ -#define CBAUD 0010017 -#define B0 0000000 /* hang up */ -#define B50 0000001 -#define B75 0000002 -#define B110 0000003 -#define B134 0000004 -#define B150 0000005 -#define B200 0000006 -#define B300 0000007 -#define B600 0000010 -#define B1200 0000011 -#define B1800 0000012 -#define B2400 0000013 -#define B4800 0000014 -#define B9600 0000015 -#define B19200 0000016 -#define B38400 0000017 -#define EXTA B19200 -#define EXTB B38400 -#define CSIZE 0000060 -#define CS5 0000000 -#define CS6 0000020 -#define CS7 0000040 -#define CS8 0000060 -#define CSTOPB 0000100 -#define CREAD 0000200 -#define PARENB 0000400 -#define PARODD 0001000 -#define HUPCL 0002000 -#define CLOCAL 0004000 -#define CBAUDEX 0010000 -#define BOTHER 0010000 -#define B57600 0010001 -#define B115200 0010002 -#define B230400 0010003 -#define B460800 0010004 -#define B500000 0010005 -#define B576000 0010006 -#define B921600 0010007 -#define B1000000 0010010 -#define B1152000 0010011 -#define B1500000 0010012 -#define B2000000 0010013 -#define B2500000 0010014 -#define B3000000 0010015 -#define B3500000 0010016 -#define B4000000 0010017 -#define CIBAUD 002003600000 -#define CMSPAR 010000000000 /* mark or space (stick) parity */ -#define CRTSCTS 020000000000 /* flow control */ - -#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ - -/* c_lflag bits */ -#define ISIG 0000001 -#define ICANON 0000002 -#define XCASE 0000004 -#define ECHO 0000010 -#define ECHOE 0000020 -#define ECHOK 0000040 -#define ECHONL 0000100 -#define NOFLSH 0000200 -#define TOSTOP 0000400 -#define ECHOCTL 0001000 -#define ECHOPRT 0002000 -#define ECHOKE 0004000 -#define FLUSHO 0010000 -#define PENDIN 0040000 -#define IEXTEN 0100000 - -/* tcflow() and TCXONC use these */ -#define TCOOFF 0 -#define TCOON 1 -#define TCIOFF 2 -#define TCION 3 - -/* tcflush() and TCFLSH use these */ -#define TCIFLUSH 0 -#define TCOFLUSH 1 -#define TCIOFLUSH 2 - -/* tcsetattr uses these */ -#define TCSANOW 0 -#define TCSADRAIN 1 -#define TCSAFLUSH 2 - -#endif diff --git a/include/asm-i386/termios.h b/include/asm-i386/termios.h deleted file mode 100644 index 6fdb2c841b73..000000000000 --- a/include/asm-i386/termios.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef _I386_TERMIOS_H -#define _I386_TERMIOS_H - -#include <asm/termbits.h> -#include <asm/ioctls.h> - -struct winsize { - unsigned short ws_row; - unsigned short ws_col; - unsigned short ws_xpixel; - unsigned short ws_ypixel; -}; - -#define NCC 8 -struct termio { - unsigned short c_iflag; /* input mode flags */ - unsigned short c_oflag; /* output mode flags */ - unsigned short c_cflag; /* control mode flags */ - unsigned short c_lflag; /* local mode flags */ - unsigned char c_line; /* line discipline */ - unsigned char c_cc[NCC]; /* control characters */ -}; - -/* modem lines */ -#define TIOCM_LE 0x001 -#define TIOCM_DTR 0x002 -#define TIOCM_RTS 0x004 -#define TIOCM_ST 0x008 -#define TIOCM_SR 0x010 -#define TIOCM_CTS 0x020 -#define TIOCM_CAR 0x040 -#define TIOCM_RNG 0x080 -#define TIOCM_DSR 0x100 -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RI TIOCM_RNG -#define TIOCM_OUT1 0x2000 -#define TIOCM_OUT2 0x4000 -#define TIOCM_LOOP 0x8000 - -/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ - -#ifdef __KERNEL__ - -/* intr=^C quit=^\ erase=del kill=^U - eof=^D vtime=\0 vmin=\1 sxtc=\0 - start=^Q stop=^S susp=^Z eol=\0 - reprint=^R discard=^U werase=^W lnext=^V - eol2=\0 -*/ -#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" - -/* - * Translate a "termio" structure into a "termios". Ugh. - */ -#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ - unsigned short __tmp; \ - get_user(__tmp,&(termio)->x); \ - *(unsigned short *) &(termios)->x = __tmp; \ -} - -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ - SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ - SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) - -/* - * Translate a "termios" structure into a "termio". Ugh. - */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) -#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) - -#endif /* __KERNEL__ */ - -#endif /* _I386_TERMIOS_H */ diff --git a/include/asm-i386/therm_throt.h b/include/asm-i386/therm_throt.h deleted file mode 100644 index 399bf6026b16..000000000000 --- a/include/asm-i386/therm_throt.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_I386_THERM_THROT_H__ -#define __ASM_I386_THERM_THROT_H__ 1 - -#include <asm/atomic.h> - -extern atomic_t therm_throt_en; -int therm_throt_process(int curr); - -#endif /* __ASM_I386_THERM_THROT_H__ */ diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h deleted file mode 100644 index 22a8cbcd35e2..000000000000 --- a/include/asm-i386/thread_info.h +++ /dev/null @@ -1,180 +0,0 @@ -/* thread_info.h: i386 low-level thread information - * - * Copyright (C) 2002 David Howells (dhowells@redhat.com) - * - Incorporating suggestions made by Linus Torvalds and Dave Miller - */ - -#ifndef _ASM_THREAD_INFO_H -#define _ASM_THREAD_INFO_H - -#ifdef __KERNEL__ - -#include <linux/compiler.h> -#include <asm/page.h> - -#ifndef __ASSEMBLY__ -#include <asm/processor.h> -#endif - -/* - * low level task data that entry.S needs immediate access to - * - this struct should fit entirely inside of one cache line - * - this struct shares the supervisor stack pages - * - if the contents of this structure are changed, the assembly constants must also be changed - */ -#ifndef __ASSEMBLY__ - -struct thread_info { - struct task_struct *task; /* main task structure */ - struct exec_domain *exec_domain; /* execution domain */ - unsigned long flags; /* low level flags */ - unsigned long status; /* thread-synchronous flags */ - __u32 cpu; /* current CPU */ - int preempt_count; /* 0 => preemptable, <0 => BUG */ - - - mm_segment_t addr_limit; /* thread address space: - 0-0xBFFFFFFF for user-thead - 0-0xFFFFFFFF for kernel-thread - */ - void *sysenter_return; - struct restart_block restart_block; - - unsigned long previous_esp; /* ESP of the previous stack in case - of nested (IRQ) stacks - */ - __u8 supervisor_stack[0]; -}; - -#else /* !__ASSEMBLY__ */ - -#include <asm/asm-offsets.h> - -#endif - -#define PREEMPT_ACTIVE 0x10000000 -#ifdef CONFIG_4KSTACKS -#define THREAD_SIZE (4096) -#else -#define THREAD_SIZE (8192) -#endif - -#define STACK_WARN (THREAD_SIZE/8) -/* - * macros/functions for gaining access to the thread information structure - * - * preempt_count needs to be 1 initially, until the scheduler is functional. - */ -#ifndef __ASSEMBLY__ - -#define INIT_THREAD_INFO(tsk) \ -{ \ - .task = &tsk, \ - .exec_domain = &default_exec_domain, \ - .flags = 0, \ - .cpu = 0, \ - .preempt_count = 1, \ - .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ -} - -#define init_thread_info (init_thread_union.thread_info) -#define init_stack (init_thread_union.stack) - - -/* how to get the current stack pointer from C */ -register unsigned long current_stack_pointer asm("esp") __attribute_used__; - -/* how to get the thread information struct from C */ -static inline struct thread_info *current_thread_info(void) -{ - return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1)); -} - -/* thread information allocation */ -#ifdef CONFIG_DEBUG_STACK_USAGE -#define alloc_thread_info(tsk) ((struct thread_info *) \ - __get_free_pages(GFP_KERNEL| __GFP_ZERO, get_order(THREAD_SIZE))) -#else -#define alloc_thread_info(tsk) ((struct thread_info *) \ - __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE))) -#endif - -#define free_thread_info(info) free_pages((unsigned long)(info), get_order(THREAD_SIZE)) - -#else /* !__ASSEMBLY__ */ - -/* how to get the thread information struct from ASM */ -#define GET_THREAD_INFO(reg) \ - movl $-THREAD_SIZE, reg; \ - andl %esp, reg - -/* use this one if reg already contains %esp */ -#define GET_THREAD_INFO_WITH_ESP(reg) \ - andl $-THREAD_SIZE, reg - -#endif - -/* - * thread information flags - * - these are process state flags that various assembly files may need to access - * - pending work-to-be-done flags are in LSW - * - other flags in MSW - */ -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ -#define TIF_SIGPENDING 1 /* signal pending */ -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */ -#define TIF_IRET 4 /* return with iret */ -#define TIF_SYSCALL_EMU 5 /* syscall emulation active */ -#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ -#define TIF_SECCOMP 7 /* secure computing */ -#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ -#define TIF_MEMDIE 16 -#define TIF_DEBUG 17 /* uses debug registers */ -#define TIF_IO_BITMAP 18 /* uses I/O bitmap */ -#define TIF_FREEZE 19 /* is freezing for suspend */ -#define TIF_NOTSC 20 /* TSC is not accessible in userland */ - -#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) -#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) -#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) -#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) -#define _TIF_IRET (1<<TIF_IRET) -#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU) -#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) -#define _TIF_SECCOMP (1<<TIF_SECCOMP) -#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) -#define _TIF_DEBUG (1<<TIF_DEBUG) -#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) -#define _TIF_FREEZE (1<<TIF_FREEZE) -#define _TIF_NOTSC (1<<TIF_NOTSC) - -/* work to do on interrupt/exception return */ -#define _TIF_WORK_MASK \ - (0x0000FFFF & ~(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ - _TIF_SECCOMP | _TIF_SYSCALL_EMU)) -/* work to do on any return to u-space */ -#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) - -/* flags to check in __switch_to() */ -#define _TIF_WORK_CTXSW_NEXT (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUG) -#define _TIF_WORK_CTXSW_PREV (_TIF_IO_BITMAP | _TIF_NOTSC) - -/* - * Thread-synchronous status. - * - * This is different from the flags in that nobody else - * ever touches our thread-synchronous status, so we don't - * have to worry about atomic accesses. - */ -#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ -#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */ - -#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) - -#endif /* __KERNEL__ */ - -#endif /* _ASM_THREAD_INFO_H */ diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h deleted file mode 100644 index eac011366dc2..000000000000 --- a/include/asm-i386/time.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef _ASMi386_TIME_H -#define _ASMi386_TIME_H - -#include <linux/efi.h> -#include "mach_time.h" - -static inline unsigned long native_get_wallclock(void) -{ - unsigned long retval; - - if (efi_enabled) - retval = efi_get_time(); - else - retval = mach_get_cmos_time(); - - return retval; -} - -static inline int native_set_wallclock(unsigned long nowtime) -{ - int retval; - - if (efi_enabled) - retval = efi_set_rtc_mmss(nowtime); - else - retval = mach_set_rtc_mmss(nowtime); - - return retval; -} - -extern void (*late_time_init)(void); -extern void hpet_time_init(void); - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else /* !CONFIG_PARAVIRT */ - -#define get_wallclock() native_get_wallclock() -#define set_wallclock(x) native_set_wallclock(x) -#define choose_time_init() hpet_time_init - -#endif /* CONFIG_PARAVIRT */ - -#endif diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h deleted file mode 100644 index 0db7e994fb8b..000000000000 --- a/include/asm-i386/timer.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef _ASMi386_TIMER_H -#define _ASMi386_TIMER_H -#include <linux/init.h> -#include <linux/pm.h> - -#define TICK_SIZE (tick_nsec / 1000) - -unsigned long long native_sched_clock(void); -unsigned long native_calculate_cpu_khz(void); - -extern int timer_ack; -extern int no_timer_check; -extern int recalibrate_cpu_khz(void); - -#ifndef CONFIG_PARAVIRT -#define calculate_cpu_khz() native_calculate_cpu_khz() -#endif - -/* Accellerators for sched_clock() - * convert from cycles(64bits) => nanoseconds (64bits) - * basic equation: - * ns = cycles / (freq / ns_per_sec) - * ns = cycles * (ns_per_sec / freq) - * ns = cycles * (10^9 / (cpu_khz * 10^3)) - * ns = cycles * (10^6 / cpu_khz) - * - * Then we use scaling math (suggested by george@mvista.com) to get: - * ns = cycles * (10^6 * SC / cpu_khz) / SC - * ns = cycles * cyc2ns_scale / SC - * - * And since SC is a constant power of two, we can convert the div - * into a shift. - * - * We can use khz divisor instead of mhz to keep a better percision, since - * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. - * (mathieu.desnoyers@polymtl.ca) - * - * -johnstul@us.ibm.com "math is hard, lets go shopping!" - */ -extern unsigned long cyc2ns_scale __read_mostly; - -#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ - -static inline unsigned long long cycles_2_ns(unsigned long long cyc) -{ - return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; -} - - -#endif diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h deleted file mode 100644 index 3666044409f0..000000000000 --- a/include/asm-i386/timex.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * linux/include/asm-i386/timex.h - * - * i386 architecture timex specifications - */ -#ifndef _ASMi386_TIMEX_H -#define _ASMi386_TIMEX_H - -#include <asm/processor.h> -#include <asm/tsc.h> - -#ifdef CONFIG_X86_ELAN -# define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */ -#else -# define CLOCK_TICK_RATE 1193182 /* Underlying HZ */ -#endif - - -extern int read_current_timer(unsigned long *timer_value); -#define ARCH_HAS_READ_CURRENT_TIMER 1 - -#endif diff --git a/include/asm-i386/tlb.h b/include/asm-i386/tlb.h deleted file mode 100644 index c006c5c92bea..000000000000 --- a/include/asm-i386/tlb.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef _I386_TLB_H -#define _I386_TLB_H - -/* - * x86 doesn't need any special per-pte or - * per-vma handling.. - */ -#define tlb_start_vma(tlb, vma) do { } while (0) -#define tlb_end_vma(tlb, vma) do { } while (0) -#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) - -/* - * .. because we flush the whole mm when it - * fills up. - */ -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) - -#include <asm-generic/tlb.h> - -#endif diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h deleted file mode 100644 index a50fa6741486..000000000000 --- a/include/asm-i386/tlbflush.h +++ /dev/null @@ -1,175 +0,0 @@ -#ifndef _I386_TLBFLUSH_H -#define _I386_TLBFLUSH_H - -#include <linux/mm.h> -#include <asm/processor.h> - -#ifdef CONFIG_PARAVIRT -#include <asm/paravirt.h> -#else -#define __flush_tlb() __native_flush_tlb() -#define __flush_tlb_global() __native_flush_tlb_global() -#define __flush_tlb_single(addr) __native_flush_tlb_single(addr) -#endif - -#define __native_flush_tlb() \ - do { \ - unsigned int tmpreg; \ - \ - __asm__ __volatile__( \ - "movl %%cr3, %0; \n" \ - "movl %0, %%cr3; # flush TLB \n" \ - : "=r" (tmpreg) \ - :: "memory"); \ - } while (0) - -/* - * Global pages have to be flushed a bit differently. Not a real - * performance problem because this does not happen often. - */ -#define __native_flush_tlb_global() \ - do { \ - unsigned int tmpreg, cr4, cr4_orig; \ - \ - __asm__ __volatile__( \ - "movl %%cr4, %2; # turn off PGE \n" \ - "movl %2, %1; \n" \ - "andl %3, %1; \n" \ - "movl %1, %%cr4; \n" \ - "movl %%cr3, %0; \n" \ - "movl %0, %%cr3; # flush TLB \n" \ - "movl %2, %%cr4; # turn PGE back on \n" \ - : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \ - : "i" (~X86_CR4_PGE) \ - : "memory"); \ - } while (0) - -#define __native_flush_tlb_single(addr) \ - __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") - -# define __flush_tlb_all() \ - do { \ - if (cpu_has_pge) \ - __flush_tlb_global(); \ - else \ - __flush_tlb(); \ - } while (0) - -#define cpu_has_invlpg (boot_cpu_data.x86 > 3) - -#ifdef CONFIG_X86_INVLPG -# define __flush_tlb_one(addr) __flush_tlb_single(addr) -#else -# define __flush_tlb_one(addr) \ - do { \ - if (cpu_has_invlpg) \ - __flush_tlb_single(addr); \ - else \ - __flush_tlb(); \ - } while (0) -#endif - -/* - * TLB flushing: - * - * - flush_tlb() flushes the current mm struct TLBs - * - flush_tlb_all() flushes all processes TLBs - * - flush_tlb_mm(mm) flushes the specified mm context TLB's - * - flush_tlb_page(vma, vmaddr) flushes one page - * - flush_tlb_range(vma, start, end) flushes a range of pages - * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages - * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables - * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus - * - * ..but the i386 has somewhat limited tlb flushing capabilities, - * and page-granular flushes are available only on i486 and up. - */ - -#define TLB_FLUSH_ALL 0xffffffff - - -#ifndef CONFIG_SMP - -#include <linux/sched.h> - -#define flush_tlb() __flush_tlb() -#define flush_tlb_all() __flush_tlb_all() -#define local_flush_tlb() __flush_tlb() - -static inline void flush_tlb_mm(struct mm_struct *mm) -{ - if (mm == current->active_mm) - __flush_tlb(); -} - -static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) -{ - if (vma->vm_mm == current->active_mm) - __flush_tlb_one(addr); -} - -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - if (vma->vm_mm == current->active_mm) - __flush_tlb(); -} - -static inline void native_flush_tlb_others(const cpumask_t *cpumask, - struct mm_struct *mm, unsigned long va) -{ -} - -#else /* SMP */ - -#include <asm/smp.h> - -#define local_flush_tlb() \ - __flush_tlb() - -extern void flush_tlb_all(void); -extern void flush_tlb_current_task(void); -extern void flush_tlb_mm(struct mm_struct *); -extern void flush_tlb_page(struct vm_area_struct *, unsigned long); - -#define flush_tlb() flush_tlb_current_task() - -static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end) -{ - flush_tlb_mm(vma->vm_mm); -} - -void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, - unsigned long va); - -#define TLBSTATE_OK 1 -#define TLBSTATE_LAZY 2 - -struct tlb_state -{ - struct mm_struct *active_mm; - int state; - char __cacheline_padding[L1_CACHE_BYTES-8]; -}; -DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); -#endif /* SMP */ - -#ifndef CONFIG_PARAVIRT -#define flush_tlb_others(mask, mm, va) \ - native_flush_tlb_others(&mask, mm, va) -#endif - -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - flush_tlb_all(); -} - -static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - /* i386 does not keep any page table caches in TLB */ -} - -#endif /* _I386_TLBFLUSH_H */ diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h deleted file mode 100644 index 19b2dafd0c81..000000000000 --- a/include/asm-i386/topology.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * linux/include/asm-i386/topology.h - * - * Written by: Matthew Dobson, IBM Corporation - * - * Copyright (C) 2002, IBM Corp. - * - * All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to <colpatch@us.ibm.com> - */ -#ifndef _ASM_I386_TOPOLOGY_H -#define _ASM_I386_TOPOLOGY_H - -#ifdef CONFIG_X86_HT -#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) -#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) -#define topology_core_siblings(cpu) (cpu_core_map[cpu]) -#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) -#endif - -#ifdef CONFIG_NUMA - -#include <asm/mpspec.h> - -#include <linux/cpumask.h> - -/* Mappings between logical cpu number and node number */ -extern cpumask_t node_2_cpu_mask[]; -extern int cpu_2_node[]; - -/* Returns the number of the node containing CPU 'cpu' */ -static inline int cpu_to_node(int cpu) -{ - return cpu_2_node[cpu]; -} - -/* Returns the number of the node containing Node 'node'. This architecture is flat, - so it is a pretty simple function! */ -#define parent_node(node) (node) - -/* Returns a bitmask of CPUs on Node 'node'. */ -static inline cpumask_t node_to_cpumask(int node) -{ - return node_2_cpu_mask[node]; -} - -/* Returns the number of the first CPU on Node 'node'. */ -static inline int node_to_first_cpu(int node) -{ - cpumask_t mask = node_to_cpumask(node); - return first_cpu(mask); -} - -#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node -#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus)) - -/* sched_domains SD_NODE_INIT for NUMAQ machines */ -#define SD_NODE_INIT (struct sched_domain) { \ - .span = CPU_MASK_NONE, \ - .parent = NULL, \ - .child = NULL, \ - .groups = NULL, \ - .min_interval = 8, \ - .max_interval = 32, \ - .busy_factor = 32, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .busy_idx = 3, \ - .idle_idx = 1, \ - .newidle_idx = 2, \ - .wake_idx = 1, \ - .flags = SD_LOAD_BALANCE \ - | SD_BALANCE_EXEC \ - | SD_BALANCE_FORK \ - | SD_SERIALIZE \ - | SD_WAKE_BALANCE, \ - .last_balance = jiffies, \ - .balance_interval = 1, \ - .nr_balance_failed = 0, \ -} - -extern unsigned long node_start_pfn[]; -extern unsigned long node_end_pfn[]; -extern unsigned long node_remap_size[]; - -#define node_has_online_mem(nid) (node_start_pfn[nid] != node_end_pfn[nid]) - -#else /* !CONFIG_NUMA */ -/* - * Other i386 platforms should define their own version of the - * above macros here. - */ - -#include <asm-generic/topology.h> - -#endif /* CONFIG_NUMA */ - -extern cpumask_t cpu_coregroup_map(int cpu); - -#ifdef CONFIG_SMP -#define mc_capable() (boot_cpu_data.x86_max_cores > 1) -#define smt_capable() (smp_num_siblings > 1) -#endif - -#endif /* _ASM_I386_TOPOLOGY_H */ diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h deleted file mode 100644 index a4d806610b7f..000000000000 --- a/include/asm-i386/tsc.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * linux/include/asm-i386/tsc.h - * - * i386 TSC related functions - */ -#ifndef _ASM_i386_TSC_H -#define _ASM_i386_TSC_H - -#include <asm/processor.h> - -/* - * Standard way to access the cycle counter. - */ -typedef unsigned long long cycles_t; - -extern unsigned int cpu_khz; -extern unsigned int tsc_khz; - -static inline cycles_t get_cycles(void) -{ - unsigned long long ret = 0; - -#ifndef CONFIG_X86_TSC - if (!cpu_has_tsc) - return 0; -#endif - -#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC) - rdtscll(ret); -#endif - return ret; -} - -/* Like get_cycles, but make sure the CPU is synchronized. */ -static __always_inline cycles_t get_cycles_sync(void) -{ - unsigned long long ret; - unsigned eax, edx; - - /* - * Use RDTSCP if possible; it is guaranteed to be synchronous - * and doesn't cause a VMEXIT on Hypervisors - */ - alternative_io(ASM_NOP3, ".byte 0x0f,0x01,0xf9", X86_FEATURE_RDTSCP, - ASM_OUTPUT2("=a" (eax), "=d" (edx)), - "a" (0U), "d" (0U) : "ecx", "memory"); - ret = (((unsigned long long)edx) << 32) | ((unsigned long long)eax); - if (ret) - return ret; - - /* - * Don't do an additional sync on CPUs where we know - * RDTSC is already synchronous: - */ - alternative_io("cpuid", ASM_NOP2, X86_FEATURE_SYNC_RDTSC, - "=a" (eax), "0" (1) : "ebx","ecx","edx","memory"); - rdtscll(ret); - - return ret; -} - -extern void tsc_init(void); -extern void mark_tsc_unstable(char *reason); -extern int unsynchronized_tsc(void); -extern void init_tsc_clocksource(void); -int check_tsc_unstable(void); - -/* - * Boot-time check whether the TSCs are synchronized across - * all CPUs/cores: - */ -extern void check_tsc_sync_source(int cpu); -extern void check_tsc_sync_target(void); - -#endif diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h deleted file mode 100644 index ad0a55bd782f..000000000000 --- a/include/asm-i386/types.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef _I386_TYPES_H -#define _I386_TYPES_H - -#ifndef __ASSEMBLY__ - -typedef unsigned short umode_t; - -/* - * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the - * header files exported to user space - */ - -typedef __signed__ char __s8; -typedef unsigned char __u8; - -typedef __signed__ short __s16; -typedef unsigned short __u16; - -typedef __signed__ int __s32; -typedef unsigned int __u32; - -#if defined(__GNUC__) && !defined(__STRICT_ANSI__) -typedef __signed__ long long __s64; -typedef unsigned long long __u64; -#endif - -#endif /* __ASSEMBLY__ */ - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __KERNEL__ - -#define BITS_PER_LONG 32 - -#ifndef __ASSEMBLY__ - - -typedef signed char s8; -typedef unsigned char u8; - -typedef signed short s16; -typedef unsigned short u16; - -typedef signed int s32; -typedef unsigned int u32; - -typedef signed long long s64; -typedef unsigned long long u64; - -/* DMA addresses come in generic and 64-bit flavours. */ - -#ifdef CONFIG_HIGHMEM64G -typedef u64 dma_addr_t; -#else -typedef u32 dma_addr_t; -#endif -typedef u64 dma64_addr_t; - -#endif /* __ASSEMBLY__ */ - -#endif /* __KERNEL__ */ - -#endif diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h deleted file mode 100644 index d2a4f7be9c2c..000000000000 --- a/include/asm-i386/uaccess.h +++ /dev/null @@ -1,590 +0,0 @@ -#ifndef __i386_UACCESS_H -#define __i386_UACCESS_H - -/* - * User space memory access functions - */ -#include <linux/errno.h> -#include <linux/thread_info.h> -#include <linux/prefetch.h> -#include <linux/string.h> -#include <asm/page.h> - -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - */ - -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) - - -#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) -#define USER_DS MAKE_MM_SEG(PAGE_OFFSET) - -#define get_ds() (KERNEL_DS) -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -#define segment_eq(a,b) ((a).seg == (b).seg) - -/* - * movsl can be slow when source and dest are not both 8-byte aligned - */ -#ifdef CONFIG_X86_INTEL_USERCOPY -extern struct movsl_mask { - int mask; -} ____cacheline_aligned_in_smp movsl_mask; -#endif - -#define __addr_ok(addr) ((unsigned long __force)(addr) < (current_thread_info()->addr_limit.seg)) - -/* - * Test whether a block of memory is a valid user space address. - * Returns 0 if the range is valid, nonzero otherwise. - * - * This is equivalent to the following test: - * (u33)addr + (u33)size >= (u33)current->addr_limit.seg - * - * This needs 33-bit arithmetic. We have a carry... - */ -#define __range_ok(addr,size) ({ \ - unsigned long flag,roksum; \ - __chk_user_ptr(addr); \ - asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ - :"=&r" (flag), "=r" (roksum) \ - :"1" (addr),"g" ((int)(size)),"rm" (current_thread_info()->addr_limit.seg)); \ - flag; }) - -/** - * access_ok: - Checks if a user space pointer is valid - * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that - * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe - * to write to a block, it is always safe to read from it. - * @addr: User space pointer to start of block to check - * @size: Size of block to check - * - * Context: User context only. This function may sleep. - * - * Checks if a pointer to a block of memory in user space is valid. - * - * Returns true (nonzero) if the memory block may be valid, false (zero) - * if it is definitely invalid. - * - * Note that, depending on architecture, this function probably just - * checks that the pointer is in the user space range - after calling - * this function, memory access functions may still return -EFAULT. - */ -#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) - -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -extern int fixup_exception(struct pt_regs *regs); - -/* - * These are the main single-value transfer routines. They automatically - * use the right size if we just have the right pointer type. - * - * This gets kind of ugly. We want to return _two_ values in "get_user()" - * and yet we don't want to do any pointers, because that is too much - * of a performance impact. Thus we have a few rather ugly macros here, - * and hide all the ugliness from the user. - * - * The "__xxx" versions of the user access functions are versions that - * do not verify the address space, that must have been done previously - * with a separate "access_ok()" call (this is used when we do multiple - * accesses to the same area of user memory). - */ - -extern void __get_user_1(void); -extern void __get_user_2(void); -extern void __get_user_4(void); - -#define __get_user_x(size,ret,x,ptr) \ - __asm__ __volatile__("call __get_user_" #size \ - :"=a" (ret),"=d" (x) \ - :"0" (ptr)) - - -/* Careful: we have to cast the result to the type of the pointer for sign reasons */ -/** - * get_user: - Get a simple variable from user space. - * @x: Variable to store result. - * @ptr: Source address, in user space. - * - * Context: User context only. This function may sleep. - * - * This macro copies a single simple variable from user space to kernel - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and the result of - * dereferencing @ptr must be assignable to @x without a cast. - * - * Returns zero on success, or -EFAULT on error. - * On error, the variable @x is set to zero. - */ -#define get_user(x,ptr) \ -({ int __ret_gu; \ - unsigned long __val_gu; \ - __chk_user_ptr(ptr); \ - switch(sizeof (*(ptr))) { \ - case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ - case 2: __get_user_x(2,__ret_gu,__val_gu,ptr); break; \ - case 4: __get_user_x(4,__ret_gu,__val_gu,ptr); break; \ - default: __get_user_x(X,__ret_gu,__val_gu,ptr); break; \ - } \ - (x) = (__typeof__(*(ptr)))__val_gu; \ - __ret_gu; \ -}) - -extern void __put_user_bad(void); - -/* - * Strange magic calling convention: pointer in %ecx, - * value in %eax(:%edx), return value in %eax, no clobbers. - */ -extern void __put_user_1(void); -extern void __put_user_2(void); -extern void __put_user_4(void); -extern void __put_user_8(void); - -#define __put_user_1(x, ptr) __asm__ __volatile__("call __put_user_1":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_2(x, ptr) __asm__ __volatile__("call __put_user_2":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_4(x, ptr) __asm__ __volatile__("call __put_user_4":"=a" (__ret_pu):"0" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_8(x, ptr) __asm__ __volatile__("call __put_user_8":"=a" (__ret_pu):"A" ((typeof(*(ptr)))(x)), "c" (ptr)) -#define __put_user_X(x, ptr) __asm__ __volatile__("call __put_user_X":"=a" (__ret_pu):"c" (ptr)) - -/** - * put_user: - Write a simple value into user space. - * @x: Value to copy to user space. - * @ptr: Destination address, in user space. - * - * Context: User context only. This function may sleep. - * - * This macro copies a single simple value from kernel space to user - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and @x must be assignable - * to the result of dereferencing @ptr. - * - * Returns zero on success, or -EFAULT on error. - */ -#ifdef CONFIG_X86_WP_WORKS_OK - -#define put_user(x,ptr) \ -({ int __ret_pu; \ - __typeof__(*(ptr)) __pu_val; \ - __chk_user_ptr(ptr); \ - __pu_val = x; \ - switch(sizeof(*(ptr))) { \ - case 1: __put_user_1(__pu_val, ptr); break; \ - case 2: __put_user_2(__pu_val, ptr); break; \ - case 4: __put_user_4(__pu_val, ptr); break; \ - case 8: __put_user_8(__pu_val, ptr); break; \ - default:__put_user_X(__pu_val, ptr); break; \ - } \ - __ret_pu; \ -}) - -#else -#define put_user(x,ptr) \ -({ \ - int __ret_pu; \ - __typeof__(*(ptr)) __pus_tmp = x; \ - __ret_pu=0; \ - if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ - sizeof(*(ptr))) != 0)) \ - __ret_pu=-EFAULT; \ - __ret_pu; \ - }) - - -#endif - -/** - * __get_user: - Get a simple variable from user space, with less checking. - * @x: Variable to store result. - * @ptr: Source address, in user space. - * - * Context: User context only. This function may sleep. - * - * This macro copies a single simple variable from user space to kernel - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and the result of - * dereferencing @ptr must be assignable to @x without a cast. - * - * Caller must check the pointer with access_ok() before calling this - * function. - * - * Returns zero on success, or -EFAULT on error. - * On error, the variable @x is set to zero. - */ -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) - - -/** - * __put_user: - Write a simple value into user space, with less checking. - * @x: Value to copy to user space. - * @ptr: Destination address, in user space. - * - * Context: User context only. This function may sleep. - * - * This macro copies a single simple value from kernel space to user - * space. It supports simple types like char and int, but not larger - * data types like structures or arrays. - * - * @ptr must have pointer-to-simple-variable type, and @x must be assignable - * to the result of dereferencing @ptr. - * - * Caller must check the pointer with access_ok() before calling this - * function. - * - * Returns zero on success, or -EFAULT on error. - */ -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) - -#define __put_user_nocheck(x,ptr,size) \ -({ \ - long __pu_err; \ - __put_user_size((x),(ptr),(size),__pu_err,-EFAULT); \ - __pu_err; \ -}) - - -#define __put_user_u64(x, addr, err) \ - __asm__ __volatile__( \ - "1: movl %%eax,0(%2)\n" \ - "2: movl %%edx,4(%2)\n" \ - "3:\n" \ - ".section .fixup,\"ax\"\n" \ - "4: movl %3,%0\n" \ - " jmp 3b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,4b\n" \ - " .long 2b,4b\n" \ - ".previous" \ - : "=r"(err) \ - : "A" (x), "r" (addr), "i"(-EFAULT), "0"(err)) - -#ifdef CONFIG_X86_WP_WORKS_OK - -#define __put_user_size(x,ptr,size,retval,errret) \ -do { \ - retval = 0; \ - __chk_user_ptr(ptr); \ - switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,"b","b","iq",errret);break; \ - case 2: __put_user_asm(x,ptr,retval,"w","w","ir",errret);break; \ - case 4: __put_user_asm(x,ptr,retval,"l","","ir",errret); break; \ - case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ - default: __put_user_bad(); \ - } \ -} while (0) - -#else - -#define __put_user_size(x,ptr,size,retval,errret) \ -do { \ - __typeof__(*(ptr)) __pus_tmp = x; \ - retval = 0; \ - \ - if(unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ - retval = errret; \ -} while (0) - -#endif -struct __large_struct { unsigned long buf[100]; }; -#define __m(x) (*(struct __large_struct __user *)(x)) - -/* - * Tell gcc we read from memory instead of writing: this is because - * we do not write to any memory gcc knows about, so there are no - * aliasing issues. - */ -#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - __asm__ __volatile__( \ - "1: mov"itype" %"rtype"1,%2\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %3,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,3b\n" \ - ".previous" \ - : "=r"(err) \ - : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)) - - -#define __get_user_nocheck(x,ptr,size) \ -({ \ - long __gu_err; \ - unsigned long __gu_val; \ - __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ - (x) = (__typeof__(*(ptr)))__gu_val; \ - __gu_err; \ -}) - -extern long __get_user_bad(void); - -#define __get_user_size(x,ptr,size,retval,errret) \ -do { \ - retval = 0; \ - __chk_user_ptr(ptr); \ - switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,"b","b","=q",errret);break; \ - case 2: __get_user_asm(x,ptr,retval,"w","w","=r",errret);break; \ - case 4: __get_user_asm(x,ptr,retval,"l","","=r",errret);break; \ - default: (x) = __get_user_bad(); \ - } \ -} while (0) - -#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - __asm__ __volatile__( \ - "1: mov"itype" %2,%"rtype"1\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %3,%0\n" \ - " xor"itype" %"rtype"1,%"rtype"1\n" \ - " jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,3b\n" \ - ".previous" \ - : "=r"(err), ltype (x) \ - : "m"(__m(addr)), "i"(errret), "0"(err)) - - -unsigned long __must_check __copy_to_user_ll(void __user *to, - const void *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nozero(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nocache(void *to, - const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nocache_nozero(void *to, - const void __user *from, unsigned long n); - -/** - * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * The caller should also make sure he pins the user space address - * so that the we don't result in page fault and sleep. - * - * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault - * we return the initial request size (1, 2 or 4), as copy_*_user should do. - * If a store crosses a page boundary and gets a fault, the x86 will not write - * anything, so this is accurate. - */ - -static __always_inline unsigned long __must_check -__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) -{ - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret, 1); - return ret; - case 2: - __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret, 2); - return ret; - case 4: - __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret, 4); - return ret; - } - } - return __copy_to_user_ll(to, from, n); -} - -/** - * __copy_to_user: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -static __always_inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) -{ - might_sleep(); - return __copy_to_user_inatomic(to, from, n); -} - -static __always_inline unsigned long -__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) -{ - /* Avoid zeroing the tail if the copy fails.. - * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, - * but as the zeroing behaviour is only significant when n is not - * constant, that shouldn't be a problem. - */ - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - __get_user_size(*(u8 *)to, from, 1, ret, 1); - return ret; - case 2: - __get_user_size(*(u16 *)to, from, 2, ret, 2); - return ret; - case 4: - __get_user_size(*(u32 *)to, from, 4, ret, 4); - return ret; - } - } - return __copy_from_user_ll_nozero(to, from, n); -} - -/** - * __copy_from_user: - Copy a block of data from user space, with less checking. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - * - * An alternate version - __copy_from_user_inatomic() - may be called from - * atomic context and will fail rather than sleep. In this case the - * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h - * for explanation of why this is needed. - */ -static __always_inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) -{ - might_sleep(); - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - __get_user_size(*(u8 *)to, from, 1, ret, 1); - return ret; - case 2: - __get_user_size(*(u16 *)to, from, 2, ret, 2); - return ret; - case 4: - __get_user_size(*(u32 *)to, from, 4, ret, 4); - return ret; - } - } - return __copy_from_user_ll(to, from, n); -} - -#define ARCH_HAS_NOCACHE_UACCESS - -static __always_inline unsigned long __copy_from_user_nocache(void *to, - const void __user *from, unsigned long n) -{ - might_sleep(); - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - __get_user_size(*(u8 *)to, from, 1, ret, 1); - return ret; - case 2: - __get_user_size(*(u16 *)to, from, 2, ret, 2); - return ret; - case 4: - __get_user_size(*(u32 *)to, from, 4, ret, 4); - return ret; - } - } - return __copy_from_user_ll_nocache(to, from, n); -} - -static __always_inline unsigned long -__copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) -{ - return __copy_from_user_ll_nocache_nozero(to, from, n); -} - -unsigned long __must_check copy_to_user(void __user *to, - const void *from, unsigned long n); -unsigned long __must_check copy_from_user(void *to, - const void __user *from, unsigned long n); -long __must_check strncpy_from_user(char *dst, const char __user *src, - long count); -long __must_check __strncpy_from_user(char *dst, - const char __user *src, long count); - -/** - * strlen_user: - Get the size of a string in user space. - * @str: The string to measure. - * - * Context: User context only. This function may sleep. - * - * Get the size of a NUL-terminated string in user space. - * - * Returns the size of the string INCLUDING the terminating NUL. - * On exception, returns 0. - * - * If there is a limit on the length of a valid string, you may wish to - * consider using strnlen_user() instead. - */ -#define strlen_user(str) strnlen_user(str, LONG_MAX) - -long strnlen_user(const char __user *str, long n); -unsigned long __must_check clear_user(void __user *mem, unsigned long len); -unsigned long __must_check __clear_user(void __user *mem, unsigned long len); - -#endif /* __i386_UACCESS_H */ diff --git a/include/asm-i386/ucontext.h b/include/asm-i386/ucontext.h deleted file mode 100644 index b0db36925f55..000000000000 --- a/include/asm-i386/ucontext.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASMi386_UCONTEXT_H -#define _ASMi386_UCONTEXT_H - -struct ucontext { - unsigned long uc_flags; - struct ucontext *uc_link; - stack_t uc_stack; - struct sigcontext uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ -}; - -#endif /* !_ASMi386_UCONTEXT_H */ diff --git a/include/asm-i386/unaligned.h b/include/asm-i386/unaligned.h deleted file mode 100644 index 7acd7957621e..000000000000 --- a/include/asm-i386/unaligned.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef __I386_UNALIGNED_H -#define __I386_UNALIGNED_H - -/* - * The i386 can do unaligned accesses itself. - * - * The strange macros are there to make sure these can't - * be misused in a way that makes them not work on other - * architectures where unaligned accesses aren't as simple. - */ - -/** - * get_unaligned - get value from possibly mis-aligned location - * @ptr: pointer to value - * - * This macro should be used for accessing values larger in size than - * single bytes at locations that are expected to be improperly aligned, - * e.g. retrieving a u16 value from a location not u16-aligned. - * - * Note that unaligned accesses can be very expensive on some architectures. - */ -#define get_unaligned(ptr) (*(ptr)) - -/** - * put_unaligned - put value to a possibly mis-aligned location - * @val: value to place - * @ptr: pointer to location - * - * This macro should be used for placing values larger in size than - * single bytes at locations that are expected to be improperly aligned, - * e.g. writing a u16 value to a location not u16-aligned. - * - * Note that unaligned accesses can be very expensive on some architectures. - */ -#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) - -#endif diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h deleted file mode 100644 index 9b15545eb9b5..000000000000 --- a/include/asm-i386/unistd.h +++ /dev/null @@ -1,373 +0,0 @@ -#ifndef _ASM_I386_UNISTD_H_ -#define _ASM_I386_UNISTD_H_ - -/* - * This file contains the system call numbers. - */ - -#define __NR_restart_syscall 0 -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_open 5 -#define __NR_close 6 -#define __NR_waitpid 7 -#define __NR_creat 8 -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_execve 11 -#define __NR_chdir 12 -#define __NR_time 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_lchown 16 -#define __NR_break 17 -#define __NR_oldstat 18 -#define __NR_lseek 19 -#define __NR_getpid 20 -#define __NR_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 -#define __NR_ptrace 26 -#define __NR_alarm 27 -#define __NR_oldfstat 28 -#define __NR_pause 29 -#define __NR_utime 30 -#define __NR_stty 31 -#define __NR_gtty 32 -#define __NR_access 33 -#define __NR_nice 34 -#define __NR_ftime 35 -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_rename 38 -#define __NR_mkdir 39 -#define __NR_rmdir 40 -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_times 43 -#define __NR_prof 44 -#define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 -#define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 -#define __NR_acct 51 -#define __NR_umount2 52 -#define __NR_lock 53 -#define __NR_ioctl 54 -#define __NR_fcntl 55 -#define __NR_mpx 56 -#define __NR_setpgid 57 -#define __NR_ulimit 58 -#define __NR_oldolduname 59 -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_ustat 62 -#define __NR_dup2 63 -#define __NR_getppid 64 -#define __NR_getpgrp 65 -#define __NR_setsid 66 -#define __NR_sigaction 67 -#define __NR_sgetmask 68 -#define __NR_ssetmask 69 -#define __NR_setreuid 70 -#define __NR_setregid 71 -#define __NR_sigsuspend 72 -#define __NR_sigpending 73 -#define __NR_sethostname 74 -#define __NR_setrlimit 75 -#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ -#define __NR_getrusage 77 -#define __NR_gettimeofday 78 -#define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 -#define __NR_select 82 -#define __NR_symlink 83 -#define __NR_oldlstat 84 -#define __NR_readlink 85 -#define __NR_uselib 86 -#define __NR_swapon 87 -#define __NR_reboot 88 -#define __NR_readdir 89 -#define __NR_mmap 90 -#define __NR_munmap 91 -#define __NR_truncate 92 -#define __NR_ftruncate 93 -#define __NR_fchmod 94 -#define __NR_fchown 95 -#define __NR_getpriority 96 -#define __NR_setpriority 97 -#define __NR_profil 98 -#define __NR_statfs 99 -#define __NR_fstatfs 100 -#define __NR_ioperm 101 -#define __NR_socketcall 102 -#define __NR_syslog 103 -#define __NR_setitimer 104 -#define __NR_getitimer 105 -#define __NR_stat 106 -#define __NR_lstat 107 -#define __NR_fstat 108 -#define __NR_olduname 109 -#define __NR_iopl 110 -#define __NR_vhangup 111 -#define __NR_idle 112 -#define __NR_vm86old 113 -#define __NR_wait4 114 -#define __NR_swapoff 115 -#define __NR_sysinfo 116 -#define __NR_ipc 117 -#define __NR_fsync 118 -#define __NR_sigreturn 119 -#define __NR_clone 120 -#define __NR_setdomainname 121 -#define __NR_uname 122 -#define __NR_modify_ldt 123 -#define __NR_adjtimex 124 -#define __NR_mprotect 125 -#define __NR_sigprocmask 126 -#define __NR_create_module 127 -#define __NR_init_module 128 -#define __NR_delete_module 129 -#define __NR_get_kernel_syms 130 -#define __NR_quotactl 131 -#define __NR_getpgid 132 -#define __NR_fchdir 133 -#define __NR_bdflush 134 -#define __NR_sysfs 135 -#define __NR_personality 136 -#define __NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 -#define __NR_getdents 141 -#define __NR__newselect 142 -#define __NR_flock 143 -#define __NR_msync 144 -#define __NR_readv 145 -#define __NR_writev 146 -#define __NR_getsid 147 -#define __NR_fdatasync 148 -#define __NR__sysctl 149 -#define __NR_mlock 150 -#define __NR_munlock 151 -#define __NR_mlockall 152 -#define __NR_munlockall 153 -#define __NR_sched_setparam 154 -#define __NR_sched_getparam 155 -#define __NR_sched_setscheduler 156 -#define __NR_sched_getscheduler 157 -#define __NR_sched_yield 158 -#define __NR_sched_get_priority_max 159 -#define __NR_sched_get_priority_min 160 -#define __NR_sched_rr_get_interval 161 -#define __NR_nanosleep 162 -#define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 -#define __NR_vm86 166 -#define __NR_query_module 167 -#define __NR_poll 168 -#define __NR_nfsservctl 169 -#define __NR_setresgid 170 -#define __NR_getresgid 171 -#define __NR_prctl 172 -#define __NR_rt_sigreturn 173 -#define __NR_rt_sigaction 174 -#define __NR_rt_sigprocmask 175 -#define __NR_rt_sigpending 176 -#define __NR_rt_sigtimedwait 177 -#define __NR_rt_sigqueueinfo 178 -#define __NR_rt_sigsuspend 179 -#define __NR_pread64 180 -#define __NR_pwrite64 181 -#define __NR_chown 182 -#define __NR_getcwd 183 -#define __NR_capget 184 -#define __NR_capset 185 -#define __NR_sigaltstack 186 -#define __NR_sendfile 187 -#define __NR_getpmsg 188 /* some people actually want streams */ -#define __NR_putpmsg 189 /* some people actually want streams */ -#define __NR_vfork 190 -#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#define __NR_lchown32 198 -#define __NR_getuid32 199 -#define __NR_getgid32 200 -#define __NR_geteuid32 201 -#define __NR_getegid32 202 -#define __NR_setreuid32 203 -#define __NR_setregid32 204 -#define __NR_getgroups32 205 -#define __NR_setgroups32 206 -#define __NR_fchown32 207 -#define __NR_setresuid32 208 -#define __NR_getresuid32 209 -#define __NR_setresgid32 210 -#define __NR_getresgid32 211 -#define __NR_chown32 212 -#define __NR_setuid32 213 -#define __NR_setgid32 214 -#define __NR_setfsuid32 215 -#define __NR_setfsgid32 216 -#define __NR_pivot_root 217 -#define __NR_mincore 218 -#define __NR_madvise 219 -#define __NR_madvise1 219 /* delete when C lib stub is removed */ -#define __NR_getdents64 220 -#define __NR_fcntl64 221 -/* 223 is unused */ -#define __NR_gettid 224 -#define __NR_readahead 225 -#define __NR_setxattr 226 -#define __NR_lsetxattr 227 -#define __NR_fsetxattr 228 -#define __NR_getxattr 229 -#define __NR_lgetxattr 230 -#define __NR_fgetxattr 231 -#define __NR_listxattr 232 -#define __NR_llistxattr 233 -#define __NR_flistxattr 234 -#define __NR_removexattr 235 -#define __NR_lremovexattr 236 -#define __NR_fremovexattr 237 -#define __NR_tkill 238 -#define __NR_sendfile64 239 -#define __NR_futex 240 -#define __NR_sched_setaffinity 241 -#define __NR_sched_getaffinity 242 -#define __NR_set_thread_area 243 -#define __NR_get_thread_area 244 -#define __NR_io_setup 245 -#define __NR_io_destroy 246 -#define __NR_io_getevents 247 -#define __NR_io_submit 248 -#define __NR_io_cancel 249 -#define __NR_fadvise64 250 -/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */ -#define __NR_exit_group 252 -#define __NR_lookup_dcookie 253 -#define __NR_epoll_create 254 -#define __NR_epoll_ctl 255 -#define __NR_epoll_wait 256 -#define __NR_remap_file_pages 257 -#define __NR_set_tid_address 258 -#define __NR_timer_create 259 -#define __NR_timer_settime (__NR_timer_create+1) -#define __NR_timer_gettime (__NR_timer_create+2) -#define __NR_timer_getoverrun (__NR_timer_create+3) -#define __NR_timer_delete (__NR_timer_create+4) -#define __NR_clock_settime (__NR_timer_create+5) -#define __NR_clock_gettime (__NR_timer_create+6) -#define __NR_clock_getres (__NR_timer_create+7) -#define __NR_clock_nanosleep (__NR_timer_create+8) -#define __NR_statfs64 268 -#define __NR_fstatfs64 269 -#define __NR_tgkill 270 -#define __NR_utimes 271 -#define __NR_fadvise64_64 272 -#define __NR_vserver 273 -#define __NR_mbind 274 -#define __NR_get_mempolicy 275 -#define __NR_set_mempolicy 276 -#define __NR_mq_open 277 -#define __NR_mq_unlink (__NR_mq_open+1) -#define __NR_mq_timedsend (__NR_mq_open+2) -#define __NR_mq_timedreceive (__NR_mq_open+3) -#define __NR_mq_notify (__NR_mq_open+4) -#define __NR_mq_getsetattr (__NR_mq_open+5) -#define __NR_kexec_load 283 -#define __NR_waitid 284 -/* #define __NR_sys_setaltroot 285 */ -#define __NR_add_key 286 -#define __NR_request_key 287 -#define __NR_keyctl 288 -#define __NR_ioprio_set 289 -#define __NR_ioprio_get 290 -#define __NR_inotify_init 291 -#define __NR_inotify_add_watch 292 -#define __NR_inotify_rm_watch 293 -#define __NR_migrate_pages 294 -#define __NR_openat 295 -#define __NR_mkdirat 296 -#define __NR_mknodat 297 -#define __NR_fchownat 298 -#define __NR_futimesat 299 -#define __NR_fstatat64 300 -#define __NR_unlinkat 301 -#define __NR_renameat 302 -#define __NR_linkat 303 -#define __NR_symlinkat 304 -#define __NR_readlinkat 305 -#define __NR_fchmodat 306 -#define __NR_faccessat 307 -#define __NR_pselect6 308 -#define __NR_ppoll 309 -#define __NR_unshare 310 -#define __NR_set_robust_list 311 -#define __NR_get_robust_list 312 -#define __NR_splice 313 -#define __NR_sync_file_range 314 -#define __NR_tee 315 -#define __NR_vmsplice 316 -#define __NR_move_pages 317 -#define __NR_getcpu 318 -#define __NR_epoll_pwait 319 -#define __NR_utimensat 320 -#define __NR_signalfd 321 -#define __NR_timerfd 322 -#define __NR_eventfd 323 -#define __NR_fallocate 324 - -#ifdef __KERNEL__ - -#define NR_syscalls 325 - -#define __ARCH_WANT_IPC_PARSE_VERSION -#define __ARCH_WANT_OLD_READDIR -#define __ARCH_WANT_OLD_STAT -#define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SYS_ALARM -#define __ARCH_WANT_SYS_GETHOSTNAME -#define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK -#define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME -#define __ARCH_WANT_SYS_WAITPID -#define __ARCH_WANT_SYS_SOCKETCALL -#define __ARCH_WANT_SYS_FADVISE64 -#define __ARCH_WANT_SYS_GETPGRP -#define __ARCH_WANT_SYS_LLSEEK -#define __ARCH_WANT_SYS_NICE -#define __ARCH_WANT_SYS_OLD_GETRLIMIT -#define __ARCH_WANT_SYS_OLDUMOUNT -#define __ARCH_WANT_SYS_SIGPENDING -#define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_RT_SIGACTION -#define __ARCH_WANT_SYS_RT_SIGSUSPEND - -/* - * "Conditional" syscalls - * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), - * but it doesn't work on all toolchains, so we just do it by hand - */ -#ifndef cond_syscall -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") -#endif - -#endif /* __KERNEL__ */ -#endif /* _ASM_I386_UNISTD_H_ */ diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h deleted file mode 100644 index 43c70c3de2f9..000000000000 --- a/include/asm-i386/unwind.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef _ASM_I386_UNWIND_H -#define _ASM_I386_UNWIND_H - -#define UNW_PC(frame) ((void)(frame), 0) -#define UNW_SP(frame) ((void)(frame), 0) -#define UNW_FP(frame) ((void)(frame), 0) - -static inline int arch_unw_user_mode(const void *info) -{ - return 0; -} - -#endif /* _ASM_I386_UNWIND_H */ diff --git a/include/asm-i386/user.h b/include/asm-i386/user.h deleted file mode 100644 index 0e85d2a5e33a..000000000000 --- a/include/asm-i386/user.h +++ /dev/null @@ -1,121 +0,0 @@ -#ifndef _I386_USER_H -#define _I386_USER_H - -#include <asm/page.h> -/* Core file format: The core file is written in such a way that gdb - can understand it and provide useful information to the user (under - linux we use the 'trad-core' bfd). There are quite a number of - obstacles to being able to view the contents of the floating point - registers, and until these are solved you will not be able to view the - contents of them. Actually, you can read in the core file and look at - the contents of the user struct to find out what the floating point - registers contain. - The actual file contents are as follows: - UPAGE: 1 page consisting of a user struct that tells gdb what is present - in the file. Directly after this is a copy of the task_struct, which - is currently not used by gdb, but it may come in useful at some point. - All of the registers are stored as part of the upage. The upage should - always be only one page. - DATA: The data area is stored. We use current->end_text to - current->brk to pick up all of the user variables, plus any memory - that may have been malloced. No attempt is made to determine if a page - is demand-zero or if a page is totally unused, we just cover the entire - range. All of the addresses are rounded in such a way that an integral - number of pages is written. - STACK: We need the stack information in order to get a meaningful - backtrace. We need to write the data from (esp) to - current->start_stack, so we round each of these off in order to be able - to write an integer number of pages. - The minimum core file size is 3 pages, or 12288 bytes. -*/ - -/* - * Pentium III FXSR, SSE support - * Gareth Hughes <gareth@valinux.com>, May 2000 - * - * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for - * interacting with the FXSR-format floating point environment. Floating - * point data can be accessed in the regular format in the usual manner, - * and both the standard and SIMD floating point data can be accessed via - * the new ptrace requests. In either case, changes to the FPU environment - * will be reflected in the task's state as expected. - */ - -struct user_i387_struct { - long cwd; - long swd; - long twd; - long fip; - long fcs; - long foo; - long fos; - long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */ -}; - -struct user_fxsr_struct { - unsigned short cwd; - unsigned short swd; - unsigned short twd; - unsigned short fop; - long fip; - long fcs; - long foo; - long fos; - long mxcsr; - long reserved; - long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ - long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ - long padding[56]; -}; - -/* - * This is the old layout of "struct pt_regs", and - * is still the layout used by user mode (the new - * pt_regs doesn't have all registers as the kernel - * doesn't use the extra segment registers) - */ -struct user_regs_struct { - long ebx, ecx, edx, esi, edi, ebp, eax; - unsigned short ds, __ds, es, __es; - unsigned short fs, __fs, gs, __gs; - long orig_eax, eip; - unsigned short cs, __cs; - long eflags, esp; - unsigned short ss, __ss; -}; - -/* When the kernel dumps core, it starts by dumping the user struct - - this will be used by gdb to figure out where the data and stack segments - are within the file, and what virtual addresses to use. */ -struct user{ -/* We start with the registers, to mimic the way that "memory" is returned - from the ptrace(3,...) function. */ - struct user_regs_struct regs; /* Where the registers are actually stored */ -/* ptrace does not yet supply these. Someday.... */ - int u_fpvalid; /* True if math co-processor being used. */ - /* for this mess. Not yet used. */ - struct user_i387_struct i387; /* Math Co-processor registers. */ -/* The rest of this junk is to help gdb figure out what goes where */ - unsigned long int u_tsize; /* Text segment size (pages). */ - unsigned long int u_dsize; /* Data segment size (pages). */ - unsigned long int u_ssize; /* Stack segment size (pages). */ - unsigned long start_code; /* Starting virtual address of text. */ - unsigned long start_stack; /* Starting virtual address of stack area. - This is actually the bottom of the stack, - the top of the stack is always found in the - esp register. */ - long int signal; /* Signal that caused the core dump. */ - int reserved; /* No longer used */ - struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */ - /* the registers. */ - struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */ - unsigned long magic; /* To uniquely identify a core file */ - char u_comm[32]; /* User command that was responsible */ - int u_debugreg[8]; -}; -#define NBPG PAGE_SIZE -#define UPAGES 1 -#define HOST_TEXT_START_ADDR (u.start_code) -#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) - -#endif /* _I386_USER_H */ diff --git a/include/asm-i386/vga.h b/include/asm-i386/vga.h deleted file mode 100644 index 0ecf68ac03aa..000000000000 --- a/include/asm-i386/vga.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Access to VGA videoram - * - * (c) 1998 Martin Mares <mj@ucw.cz> - */ - -#ifndef _LINUX_ASM_VGA_H_ -#define _LINUX_ASM_VGA_H_ - -/* - * On the PC, we can just recalculate addresses and then - * access the videoram directly without any black magic. - */ - -#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x) - -#define vga_readb(x) (*(x)) -#define vga_writeb(x,y) (*(y) = (x)) - -#endif diff --git a/include/asm-i386/vic.h b/include/asm-i386/vic.h deleted file mode 100644 index 53100f353612..000000000000 --- a/include/asm-i386/vic.h +++ /dev/null @@ -1,61 +0,0 @@ -/* Copyright (C) 1999,2001 - * - * Author: J.E.J.Bottomley@HansenPartnership.com - * - * Standard include definitions for the NCR Voyager Interrupt Controller */ - -/* The eight CPI vectors. To activate a CPI, you write a bit mask - * corresponding to the processor set to be interrupted into the - * relevant register. That set of CPUs will then be interrupted with - * the CPI */ -static const int VIC_CPI_Registers[] = - {0xFC00, 0xFC01, 0xFC08, 0xFC09, - 0xFC10, 0xFC11, 0xFC18, 0xFC19 }; - -#define VIC_PROC_WHO_AM_I 0xfc29 -# define QUAD_IDENTIFIER 0xC0 -# define EIGHT_SLOT_IDENTIFIER 0xE0 -#define QIC_EXTENDED_PROCESSOR_SELECT 0xFC72 -#define VIC_CPI_BASE_REGISTER 0xFC41 -#define VIC_PROCESSOR_ID 0xFC21 -# define VIC_CPU_MASQUERADE_ENABLE 0x8 - -#define VIC_CLAIM_REGISTER_0 0xFC38 -#define VIC_CLAIM_REGISTER_1 0xFC39 -#define VIC_REDIRECT_REGISTER_0 0xFC60 -#define VIC_REDIRECT_REGISTER_1 0xFC61 -#define VIC_PRIORITY_REGISTER 0xFC20 - -#define VIC_PRIMARY_MC_BASE 0xFC48 -#define VIC_SECONDARY_MC_BASE 0xFC49 - -#define QIC_PROCESSOR_ID 0xFC71 -# define QIC_CPUID_ENABLE 0x08 - -#define QIC_VIC_CPI_BASE_REGISTER 0xFC79 -#define QIC_CPI_BASE_REGISTER 0xFC7A - -#define QIC_MASK_REGISTER0 0xFC80 -/* NOTE: these are masked high, enabled low */ -# define QIC_PERF_TIMER 0x01 -# define QIC_LPE 0x02 -# define QIC_SYS_INT 0x04 -# define QIC_CMN_INT 0x08 -/* at the moment, just enable CMN_INT, disable SYS_INT */ -# define QIC_DEFAULT_MASK0 (~(QIC_CMN_INT /* | VIC_SYS_INT */)) -#define QIC_MASK_REGISTER1 0xFC81 -# define QIC_BOOT_CPI_MASK 0xFE -/* Enable CPI's 1-6 inclusive */ -# define QIC_CPI_ENABLE 0x81 - -#define QIC_INTERRUPT_CLEAR0 0xFC8A -#define QIC_INTERRUPT_CLEAR1 0xFC8B - -/* this is where we place the CPI vectors */ -#define VIC_DEFAULT_CPI_BASE 0xC0 -/* this is where we place the QIC CPI vectors */ -#define QIC_DEFAULT_CPI_BASE 0xD0 - -#define VIC_BOOT_INTERRUPT_MASK 0xfe - -extern void smp_vic_timer_interrupt(void); diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h deleted file mode 100644 index a5edf517b992..000000000000 --- a/include/asm-i386/vm86.h +++ /dev/null @@ -1,215 +0,0 @@ -#ifndef _LINUX_VM86_H -#define _LINUX_VM86_H - -/* - * I'm guessing at the VIF/VIP flag usage, but hope that this is how - * the Pentium uses them. Linux will return from vm86 mode when both - * VIF and VIP is set. - * - * On a Pentium, we could probably optimize the virtual flags directly - * in the eflags register instead of doing it "by hand" in vflags... - * - * Linus - */ - -#define TF_MASK 0x00000100 -#define IF_MASK 0x00000200 -#define IOPL_MASK 0x00003000 -#define NT_MASK 0x00004000 -#ifdef CONFIG_VM86 -#define VM_MASK 0x00020000 -#else -#define VM_MASK 0 /* ignored */ -#endif -#define AC_MASK 0x00040000 -#define VIF_MASK 0x00080000 /* virtual interrupt flag */ -#define VIP_MASK 0x00100000 /* virtual interrupt pending */ -#define ID_MASK 0x00200000 - -#define BIOSSEG 0x0f000 - -#define CPU_086 0 -#define CPU_186 1 -#define CPU_286 2 -#define CPU_386 3 -#define CPU_486 4 -#define CPU_586 5 - -/* - * Return values for the 'vm86()' system call - */ -#define VM86_TYPE(retval) ((retval) & 0xff) -#define VM86_ARG(retval) ((retval) >> 8) - -#define VM86_SIGNAL 0 /* return due to signal */ -#define VM86_UNKNOWN 1 /* unhandled GP fault - IO-instruction or similar */ -#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */ -#define VM86_STI 3 /* sti/popf/iret instruction enabled virtual interrupts */ - -/* - * Additional return values when invoking new vm86() - */ -#define VM86_PICRETURN 4 /* return due to pending PIC request */ -#define VM86_TRAP 6 /* return due to DOS-debugger request */ - -/* - * function codes when invoking new vm86() - */ -#define VM86_PLUS_INSTALL_CHECK 0 -#define VM86_ENTER 1 -#define VM86_ENTER_NO_BYPASS 2 -#define VM86_REQUEST_IRQ 3 -#define VM86_FREE_IRQ 4 -#define VM86_GET_IRQ_BITS 5 -#define VM86_GET_AND_RESET_IRQ 6 - -/* - * This is the stack-layout seen by the user space program when we have - * done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout - * is 'kernel_vm86_regs' (see below). - */ - -struct vm86_regs { -/* - * normal regs, with special meaning for the segment descriptors.. - */ - long ebx; - long ecx; - long edx; - long esi; - long edi; - long ebp; - long eax; - long __null_ds; - long __null_es; - long __null_fs; - long __null_gs; - long orig_eax; - long eip; - unsigned short cs, __csh; - long eflags; - long esp; - unsigned short ss, __ssh; -/* - * these are specific to v86 mode: - */ - unsigned short es, __esh; - unsigned short ds, __dsh; - unsigned short fs, __fsh; - unsigned short gs, __gsh; -}; - -struct revectored_struct { - unsigned long __map[8]; /* 256 bits */ -}; - -struct vm86_struct { - struct vm86_regs regs; - unsigned long flags; - unsigned long screen_bitmap; - unsigned long cpu_type; - struct revectored_struct int_revectored; - struct revectored_struct int21_revectored; -}; - -/* - * flags masks - */ -#define VM86_SCREEN_BITMAP 0x0001 - -struct vm86plus_info_struct { - unsigned long force_return_for_pic:1; - unsigned long vm86dbg_active:1; /* for debugger */ - unsigned long vm86dbg_TFpendig:1; /* for debugger */ - unsigned long unused:28; - unsigned long is_vm86pus:1; /* for vm86 internal use */ - unsigned char vm86dbg_intxxtab[32]; /* for debugger */ -}; - -struct vm86plus_struct { - struct vm86_regs regs; - unsigned long flags; - unsigned long screen_bitmap; - unsigned long cpu_type; - struct revectored_struct int_revectored; - struct revectored_struct int21_revectored; - struct vm86plus_info_struct vm86plus; -}; - -#ifdef __KERNEL__ -/* - * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 - * mode - the main change is that the old segment descriptors aren't - * useful any more and are forced to be zero by the kernel (and the - * hardware when a trap occurs), and the real segment descriptors are - * at the end of the structure. Look at ptrace.h to see the "normal" - * setup. For user space layout see 'struct vm86_regs' above. - */ -#include <asm/ptrace.h> - -struct kernel_vm86_regs { -/* - * normal regs, with special meaning for the segment descriptors.. - */ - struct pt_regs pt; -/* - * these are specific to v86 mode: - */ - unsigned short es, __esh; - unsigned short ds, __dsh; - unsigned short fs, __fsh; - unsigned short gs, __gsh; -}; - -struct kernel_vm86_struct { - struct kernel_vm86_regs regs; -/* - * the below part remains on the kernel stack while we are in VM86 mode. - * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we - * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above - * 'struct kernel_vm86_regs' with the then actual values. - * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct' - * in kernelspace, hence we need not reget the data from userspace. - */ -#define VM86_TSS_ESP0 flags - unsigned long flags; - unsigned long screen_bitmap; - unsigned long cpu_type; - struct revectored_struct int_revectored; - struct revectored_struct int21_revectored; - struct vm86plus_info_struct vm86plus; - struct pt_regs *regs32; /* here we save the pointer to the old regs */ -/* - * The below is not part of the structure, but the stack layout continues - * this way. In front of 'return-eip' may be some data, depending on - * compilation, so we don't rely on this and save the pointer to 'oldregs' - * in 'regs32' above. - * However, with GCC-2.7.2 and the current CFLAGS you see exactly this: - - long return-eip; from call to vm86() - struct pt_regs oldregs; user space registers as saved by syscall - */ -}; - -#ifdef CONFIG_VM86 - -void handle_vm86_fault(struct kernel_vm86_regs *, long); -int handle_vm86_trap(struct kernel_vm86_regs *, long, int); - -struct task_struct; -void release_vm86_irqs(struct task_struct *); - -#else - -#define handle_vm86_fault(a, b) -#define release_vm86_irqs(a) - -static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) { - return 0; -} - -#endif /* CONFIG_VM86 */ - -#endif /* __KERNEL__ */ - -#endif diff --git a/include/asm-i386/vmi.h b/include/asm-i386/vmi.h deleted file mode 100644 index eb8bd892c01e..000000000000 --- a/include/asm-i386/vmi.h +++ /dev/null @@ -1,263 +0,0 @@ -/* - * VMI interface definition - * - * Copyright (C) 2005, VMware, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Maintained by: Zachary Amsden zach@vmware.com - * - */ -#include <linux/types.h> - -/* - *--------------------------------------------------------------------- - * - * VMI Option ROM API - * - *--------------------------------------------------------------------- - */ -#define VMI_SIGNATURE 0x696d5663 /* "cVmi" */ - -#define PCI_VENDOR_ID_VMWARE 0x15AD -#define PCI_DEVICE_ID_VMWARE_VMI 0x0801 - -/* - * We use two version numbers for compatibility, with the major - * number signifying interface breakages, and the minor number - * interface extensions. - */ -#define VMI_API_REV_MAJOR 3 -#define VMI_API_REV_MINOR 0 - -#define VMI_CALL_CPUID 0 -#define VMI_CALL_WRMSR 1 -#define VMI_CALL_RDMSR 2 -#define VMI_CALL_SetGDT 3 -#define VMI_CALL_SetLDT 4 -#define VMI_CALL_SetIDT 5 -#define VMI_CALL_SetTR 6 -#define VMI_CALL_GetGDT 7 -#define VMI_CALL_GetLDT 8 -#define VMI_CALL_GetIDT 9 -#define VMI_CALL_GetTR 10 -#define VMI_CALL_WriteGDTEntry 11 -#define VMI_CALL_WriteLDTEntry 12 -#define VMI_CALL_WriteIDTEntry 13 -#define VMI_CALL_UpdateKernelStack 14 -#define VMI_CALL_SetCR0 15 -#define VMI_CALL_SetCR2 16 -#define VMI_CALL_SetCR3 17 -#define VMI_CALL_SetCR4 18 -#define VMI_CALL_GetCR0 19 -#define VMI_CALL_GetCR2 20 -#define VMI_CALL_GetCR3 21 -#define VMI_CALL_GetCR4 22 -#define VMI_CALL_WBINVD 23 -#define VMI_CALL_SetDR 24 -#define VMI_CALL_GetDR 25 -#define VMI_CALL_RDPMC 26 -#define VMI_CALL_RDTSC 27 -#define VMI_CALL_CLTS 28 -#define VMI_CALL_EnableInterrupts 29 -#define VMI_CALL_DisableInterrupts 30 -#define VMI_CALL_GetInterruptMask 31 -#define VMI_CALL_SetInterruptMask 32 -#define VMI_CALL_IRET 33 -#define VMI_CALL_SYSEXIT 34 -#define VMI_CALL_Halt 35 -#define VMI_CALL_Reboot 36 -#define VMI_CALL_Shutdown 37 -#define VMI_CALL_SetPxE 38 -#define VMI_CALL_SetPxELong 39 -#define VMI_CALL_UpdatePxE 40 -#define VMI_CALL_UpdatePxELong 41 -#define VMI_CALL_MachineToPhysical 42 -#define VMI_CALL_PhysicalToMachine 43 -#define VMI_CALL_AllocatePage 44 -#define VMI_CALL_ReleasePage 45 -#define VMI_CALL_InvalPage 46 -#define VMI_CALL_FlushTLB 47 -#define VMI_CALL_SetLinearMapping 48 - -#define VMI_CALL_SetIOPLMask 61 -#define VMI_CALL_SetInitialAPState 62 -#define VMI_CALL_APICWrite 63 -#define VMI_CALL_APICRead 64 -#define VMI_CALL_IODelay 65 -#define VMI_CALL_SetLazyMode 73 - -/* - *--------------------------------------------------------------------- - * - * MMU operation flags - * - *--------------------------------------------------------------------- - */ - -/* Flags used by VMI_{Allocate|Release}Page call */ -#define VMI_PAGE_PAE 0x10 /* Allocate PAE shadow */ -#define VMI_PAGE_CLONE 0x20 /* Clone from another shadow */ -#define VMI_PAGE_ZEROED 0x40 /* Page is pre-zeroed */ - - -/* Flags shared by Allocate|Release Page and PTE updates */ -#define VMI_PAGE_PT 0x01 -#define VMI_PAGE_PD 0x02 -#define VMI_PAGE_PDP 0x04 -#define VMI_PAGE_PML4 0x08 - -#define VMI_PAGE_NORMAL 0x00 /* for debugging */ - -/* Flags used by PTE updates */ -#define VMI_PAGE_CURRENT_AS 0x10 /* implies VMI_PAGE_VA_MASK is valid */ -#define VMI_PAGE_DEFER 0x20 /* may queue update until TLB inval */ -#define VMI_PAGE_VA_MASK 0xfffff000 - -#ifdef CONFIG_X86_PAE -#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_PAE | VMI_PAGE_ZEROED) -#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_PAE | VMI_PAGE_ZEROED) -#else -#define VMI_PAGE_L1 (VMI_PAGE_PT | VMI_PAGE_ZEROED) -#define VMI_PAGE_L2 (VMI_PAGE_PD | VMI_PAGE_ZEROED) -#endif - -/* Flags used by VMI_FlushTLB call */ -#define VMI_FLUSH_TLB 0x01 -#define VMI_FLUSH_GLOBAL 0x02 - -/* - *--------------------------------------------------------------------- - * - * VMI relocation definitions for ROM call get_reloc - * - *--------------------------------------------------------------------- - */ - -/* VMI Relocation types */ -#define VMI_RELOCATION_NONE 0 -#define VMI_RELOCATION_CALL_REL 1 -#define VMI_RELOCATION_JUMP_REL 2 -#define VMI_RELOCATION_NOP 3 - -#ifndef __ASSEMBLY__ -struct vmi_relocation_info { - unsigned char *eip; - unsigned char type; - unsigned char reserved[3]; -}; -#endif - - -/* - *--------------------------------------------------------------------- - * - * Generic ROM structures and definitions - * - *--------------------------------------------------------------------- - */ - -#ifndef __ASSEMBLY__ - -struct vrom_header { - u16 rom_signature; // option ROM signature - u8 rom_length; // ROM length in 512 byte chunks - u8 rom_entry[4]; // 16-bit code entry point - u8 rom_pad0; // 4-byte align pad - u32 vrom_signature; // VROM identification signature - u8 api_version_min;// Minor version of API - u8 api_version_maj;// Major version of API - u8 jump_slots; // Number of jump slots - u8 reserved1; // Reserved for expansion - u32 virtual_top; // Hypervisor virtual address start - u16 reserved2; // Reserved for expansion - u16 license_offs; // Offset to License string - u16 pci_header_offs;// Offset to PCI OPROM header - u16 pnp_header_offs;// Offset to PnP OPROM header - u32 rom_pad3; // PnP reserverd / VMI reserved - u8 reserved[96]; // Reserved for headers - char vmi_init[8]; // VMI_Init jump point - char get_reloc[8]; // VMI_GetRelocationInfo jump point -} __attribute__((packed)); - -struct pnp_header { - char sig[4]; - char rev; - char size; - short next; - short res; - long devID; - unsigned short manufacturer_offset; - unsigned short product_offset; -} __attribute__((packed)); - -struct pci_header { - char sig[4]; - short vendorID; - short deviceID; - short vpdData; - short size; - char rev; - char class; - char subclass; - char interface; - short chunks; - char rom_version_min; - char rom_version_maj; - char codetype; - char lastRom; - short reserved; -} __attribute__((packed)); - -/* Function prototypes for bootstrapping */ -extern void vmi_init(void); -extern void vmi_bringup(void); -extern void vmi_apply_boot_page_allocations(void); - -/* State needed to start an application processor in an SMP system. */ -struct vmi_ap_state { - u32 cr0; - u32 cr2; - u32 cr3; - u32 cr4; - - u64 efer; - - u32 eip; - u32 eflags; - u32 eax; - u32 ebx; - u32 ecx; - u32 edx; - u32 esp; - u32 ebp; - u32 esi; - u32 edi; - u16 cs; - u16 ss; - u16 ds; - u16 es; - u16 fs; - u16 gs; - u16 ldtr; - - u16 gdtr_limit; - u32 gdtr_base; - u32 idtr_base; - u16 idtr_limit; -}; - -#endif diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h deleted file mode 100644 index 478188130328..000000000000 --- a/include/asm-i386/vmi_time.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * VMI Time wrappers - * - * Copyright (C) 2006, VMware, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or - * NON INFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Send feedback to dhecht@vmware.com - * - */ - -#ifndef __VMI_TIME_H -#define __VMI_TIME_H - -/* - * Raw VMI call indices for timer functions - */ -#define VMI_CALL_GetCycleFrequency 66 -#define VMI_CALL_GetCycleCounter 67 -#define VMI_CALL_SetAlarm 68 -#define VMI_CALL_CancelAlarm 69 -#define VMI_CALL_GetWallclockTime 70 -#define VMI_CALL_WallclockUpdated 71 - -/* Cached VMI timer operations */ -extern struct vmi_timer_ops { - u64 (*get_cycle_frequency)(void); - u64 (*get_cycle_counter)(int); - u64 (*get_wallclock)(void); - int (*wallclock_updated)(void); - void (*set_alarm)(u32 flags, u64 expiry, u64 period); - void (*cancel_alarm)(u32 flags); -} vmi_timer_ops; - -/* Prototypes */ -extern void __init vmi_time_init(void); -extern unsigned long vmi_get_wallclock(void); -extern int vmi_set_wallclock(unsigned long now); -extern unsigned long long vmi_sched_clock(void); -extern unsigned long vmi_cpu_khz(void); - -#ifdef CONFIG_X86_LOCAL_APIC -extern void __devinit vmi_time_bsp_init(void); -extern void __devinit vmi_time_ap_init(void); -#endif - -/* - * When run under a hypervisor, a vcpu is always in one of three states: - * running, halted, or ready. The vcpu is in the 'running' state if it - * is executing. When the vcpu executes the halt interface, the vcpu - * enters the 'halted' state and remains halted until there is some work - * pending for the vcpu (e.g. an alarm expires, host I/O completes on - * behalf of virtual I/O). At this point, the vcpu enters the 'ready' - * state (waiting for the hypervisor to reschedule it). Finally, at any - * time when the vcpu is not in the 'running' state nor the 'halted' - * state, it is in the 'ready' state. - * - * Real time is advances while the vcpu is 'running', 'ready', or - * 'halted'. Stolen time is the time in which the vcpu is in the - * 'ready' state. Available time is the remaining time -- the vcpu is - * either 'running' or 'halted'. - * - * All three views of time are accessible through the VMI cycle - * counters. - */ - -/* The cycle counters. */ -#define VMI_CYCLES_REAL 0 -#define VMI_CYCLES_AVAILABLE 1 -#define VMI_CYCLES_STOLEN 2 - -/* The alarm interface 'flags' bits */ -#define VMI_ALARM_COUNTERS 2 - -#define VMI_ALARM_COUNTER_MASK 0x000000ff - -#define VMI_ALARM_WIRED_IRQ0 0x00000000 -#define VMI_ALARM_WIRED_LVTT 0x00010000 - -#define VMI_ALARM_IS_ONESHOT 0x00000000 -#define VMI_ALARM_IS_PERIODIC 0x00000100 - -#define CONFIG_VMI_ALARM_HZ 100 - -#endif diff --git a/include/asm-i386/voyager.h b/include/asm-i386/voyager.h deleted file mode 100644 index 91a9932937ab..000000000000 --- a/include/asm-i386/voyager.h +++ /dev/null @@ -1,517 +0,0 @@ -/* Copyright (C) 1999,2001 - * - * Author: J.E.J.Bottomley@HansenPartnership.com - * - * Standard include definitions for the NCR Voyager system */ - -#undef VOYAGER_DEBUG -#undef VOYAGER_CAT_DEBUG - -#ifdef VOYAGER_DEBUG -#define VDEBUG(x) printk x -#else -#define VDEBUG(x) -#endif - -/* There are three levels of voyager machine: 3,4 and 5. The rule is - * if it's less than 3435 it's a Level 3 except for a 3360 which is - * a level 4. A 3435 or above is a Level 5 */ -#define VOYAGER_LEVEL5_AND_ABOVE 0x3435 -#define VOYAGER_LEVEL4 0x3360 - -/* The L4 DINO ASIC */ -#define VOYAGER_DINO 0x43 - -/* voyager ports in standard I/O space */ -#define VOYAGER_MC_SETUP 0x96 - - -#define VOYAGER_CAT_CONFIG_PORT 0x97 -# define VOYAGER_CAT_DESELECT 0xff -#define VOYAGER_SSPB_RELOCATION_PORT 0x98 - -/* Valid CAT controller commands */ -/* start instruction register cycle */ -#define VOYAGER_CAT_IRCYC 0x01 -/* start data register cycle */ -#define VOYAGER_CAT_DRCYC 0x02 -/* move to execute state */ -#define VOYAGER_CAT_RUN 0x0F -/* end operation */ -#define VOYAGER_CAT_END 0x80 -/* hold in idle state */ -#define VOYAGER_CAT_HOLD 0x90 -/* single step an "intest" vector */ -#define VOYAGER_CAT_STEP 0xE0 -/* return cat controller to CLEMSON mode */ -#define VOYAGER_CAT_CLEMSON 0xFF - -/* the default cat command header */ -#define VOYAGER_CAT_HEADER 0x7F - -/* the range of possible CAT module ids in the system */ -#define VOYAGER_MIN_MODULE 0x10 -#define VOYAGER_MAX_MODULE 0x1f - -/* The voyager registers per asic */ -#define VOYAGER_ASIC_ID_REG 0x00 -#define VOYAGER_ASIC_TYPE_REG 0x01 -/* the sub address registers can be made auto incrementing on reads */ -#define VOYAGER_AUTO_INC_REG 0x02 -# define VOYAGER_AUTO_INC 0x04 -# define VOYAGER_NO_AUTO_INC 0xfb -#define VOYAGER_SUBADDRDATA 0x03 -#define VOYAGER_SCANPATH 0x05 -# define VOYAGER_CONNECT_ASIC 0x01 -# define VOYAGER_DISCONNECT_ASIC 0xfe -#define VOYAGER_SUBADDRLO 0x06 -#define VOYAGER_SUBADDRHI 0x07 -#define VOYAGER_SUBMODSELECT 0x08 -#define VOYAGER_SUBMODPRESENT 0x09 - -#define VOYAGER_SUBADDR_LO 0xff -#define VOYAGER_SUBADDR_HI 0xffff - -/* the maximum size of a scan path -- used to form instructions */ -#define VOYAGER_MAX_SCAN_PATH 0x100 -/* the biggest possible register size (in bytes) */ -#define VOYAGER_MAX_REG_SIZE 4 - -/* Total number of possible modules (including submodules) */ -#define VOYAGER_MAX_MODULES 16 -/* Largest number of asics per module */ -#define VOYAGER_MAX_ASICS_PER_MODULE 7 - -/* the CAT asic of each module is always the first one */ -#define VOYAGER_CAT_ID 0 -#define VOYAGER_PSI 0x1a - -/* voyager instruction operations and registers */ -#define VOYAGER_READ_CONFIG 0x1 -#define VOYAGER_WRITE_CONFIG 0x2 -#define VOYAGER_BYPASS 0xff - -typedef struct voyager_asic -{ - __u8 asic_addr; /* ASIC address; Level 4 */ - __u8 asic_type; /* ASIC type */ - __u8 asic_id; /* ASIC id */ - __u8 jtag_id[4]; /* JTAG id */ - __u8 asic_location; /* Location within scan path; start w/ 0 */ - __u8 bit_location; /* Location within bit stream; start w/ 0 */ - __u8 ireg_length; /* Instruction register length */ - __u16 subaddr; /* Amount of sub address space */ - struct voyager_asic *next; /* Next asic in linked list */ -} voyager_asic_t; - -typedef struct voyager_module { - __u8 module_addr; /* Module address */ - __u8 scan_path_connected; /* Scan path connected */ - __u16 ee_size; /* Size of the EEPROM */ - __u16 num_asics; /* Number of Asics */ - __u16 inst_bits; /* Instruction bits in the scan path */ - __u16 largest_reg; /* Largest register in the scan path */ - __u16 smallest_reg; /* Smallest register in the scan path */ - voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */ - struct voyager_module *submodule; /* Submodule pointer */ - struct voyager_module *next; /* Next module in linked list */ -} voyager_module_t; - -typedef struct voyager_eeprom_hdr { - __u8 module_id[4]; - __u8 version_id; - __u8 config_id; - __u16 boundry_id; /* boundary scan id */ - __u16 ee_size; /* size of EEPROM */ - __u8 assembly[11]; /* assembly # */ - __u8 assembly_rev; /* assembly rev */ - __u8 tracer[4]; /* tracer number */ - __u16 assembly_cksum; /* asm checksum */ - __u16 power_consump; /* pwr requirements */ - __u16 num_asics; /* number of asics */ - __u16 bist_time; /* min. bist time */ - __u16 err_log_offset; /* error log offset */ - __u16 scan_path_offset;/* scan path offset */ - __u16 cct_offset; - __u16 log_length; /* length of err log */ - __u16 xsum_end; /* offset to end of - checksum */ - __u8 reserved[4]; - __u8 sflag; /* starting sentinal */ - __u8 part_number[13]; /* prom part number */ - __u8 version[10]; /* version number */ - __u8 signature[8]; - __u16 eeprom_chksum; - __u32 data_stamp_offset; - __u8 eflag ; /* ending sentinal */ -} __attribute__((packed)) voyager_eprom_hdr_t; - - - -#define VOYAGER_EPROM_SIZE_OFFSET ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size))) -#define VOYAGER_XSUM_END_OFFSET 0x2a - -/* the following three definitions are for internal table layouts - * in the module EPROMs. We really only care about the IDs and - * offsets */ -typedef struct voyager_sp_table { - __u8 asic_id; - __u8 bypass_flag; - __u16 asic_data_offset; - __u16 config_data_offset; -} __attribute__((packed)) voyager_sp_table_t; - -typedef struct voyager_jtag_table { - __u8 icode[4]; - __u8 runbist[4]; - __u8 intest[4]; - __u8 samp_preld[4]; - __u8 ireg_len; -} __attribute__((packed)) voyager_jtt_t; - -typedef struct voyager_asic_data_table { - __u8 jtag_id[4]; - __u16 length_bsr; - __u16 length_bist_reg; - __u32 bist_clk; - __u16 subaddr_bits; - __u16 seed_bits; - __u16 sig_bits; - __u16 jtag_offset; -} __attribute__((packed)) voyager_at_t; - -/* Voyager Interrupt Controller (VIC) registers */ - -/* Base to add to Cross Processor Interrupts (CPIs) when triggering - * the CPU IRQ line */ -/* register defines for the WCBICs (one per processor) */ -#define VOYAGER_WCBIC0 0x41 /* bus A node P1 processor 0 */ -#define VOYAGER_WCBIC1 0x49 /* bus A node P1 processor 1 */ -#define VOYAGER_WCBIC2 0x51 /* bus A node P2 processor 0 */ -#define VOYAGER_WCBIC3 0x59 /* bus A node P2 processor 1 */ -#define VOYAGER_WCBIC4 0x61 /* bus B node P1 processor 0 */ -#define VOYAGER_WCBIC5 0x69 /* bus B node P1 processor 1 */ -#define VOYAGER_WCBIC6 0x71 /* bus B node P2 processor 0 */ -#define VOYAGER_WCBIC7 0x79 /* bus B node P2 processor 1 */ - - -/* top of memory registers */ -#define VOYAGER_WCBIC_TOM_L 0x4 -#define VOYAGER_WCBIC_TOM_H 0x5 - -/* register defines for Voyager Memory Contol (VMC) - * these are present on L4 machines only */ -#define VOYAGER_VMC1 0x81 -#define VOYAGER_VMC2 0x91 -#define VOYAGER_VMC3 0xa1 -#define VOYAGER_VMC4 0xb1 - -/* VMC Ports */ -#define VOYAGER_VMC_MEMORY_SETUP 0x9 -# define VMC_Interleaving 0x01 -# define VMC_4Way 0x02 -# define VMC_EvenCacheLines 0x04 -# define VMC_HighLine 0x08 -# define VMC_Start0_Enable 0x20 -# define VMC_Start1_Enable 0x40 -# define VMC_Vremap 0x80 -#define VOYAGER_VMC_BANK_DENSITY 0xa -# define VMC_BANK_EMPTY 0 -# define VMC_BANK_4MB 1 -# define VMC_BANK_16MB 2 -# define VMC_BANK_64MB 3 -# define VMC_BANK0_MASK 0x03 -# define VMC_BANK1_MASK 0x0C -# define VMC_BANK2_MASK 0x30 -# define VMC_BANK3_MASK 0xC0 - -/* Magellan Memory Controller (MMC) defines - present on L5 */ -#define VOYAGER_MMC_ASIC_ID 1 -/* the two memory modules corresponding to memory cards in the system */ -#define VOYAGER_MMC_MEMORY0_MODULE 0x14 -#define VOYAGER_MMC_MEMORY1_MODULE 0x15 -/* the Magellan Memory Address (MMA) defines */ -#define VOYAGER_MMA_ASIC_ID 2 - -/* Submodule number for the Quad Baseboard */ -#define VOYAGER_QUAD_BASEBOARD 1 - -/* ASIC defines for the Quad Baseboard */ -#define VOYAGER_QUAD_QDATA0 1 -#define VOYAGER_QUAD_QDATA1 2 -#define VOYAGER_QUAD_QABC 3 - -/* Useful areas in extended CMOS */ -#define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a -#define VOYAGER_MEMORY_CLICKMAP 0xa23 -#define VOYAGER_DUMP_LOCATION 0xb1a - -/* SUS In Control bit - used to tell SUS that we don't need to be - * babysat anymore */ -#define VOYAGER_SUS_IN_CONTROL_PORT 0x3ff -# define VOYAGER_IN_CONTROL_FLAG 0x80 - -/* Voyager PSI defines */ -#define VOYAGER_PSI_STATUS_REG 0x08 -# define PSI_DC_FAIL 0x01 -# define PSI_MON 0x02 -# define PSI_FAULT 0x04 -# define PSI_ALARM 0x08 -# define PSI_CURRENT 0x10 -# define PSI_DVM 0x20 -# define PSI_PSCFAULT 0x40 -# define PSI_STAT_CHG 0x80 - -#define VOYAGER_PSI_SUPPLY_REG 0x8000 - /* read */ -# define PSI_FAIL_DC 0x01 -# define PSI_FAIL_AC 0x02 -# define PSI_MON_INT 0x04 -# define PSI_SWITCH_OFF 0x08 -# define PSI_HX_OFF 0x10 -# define PSI_SECURITY 0x20 -# define PSI_CMOS_BATT_LOW 0x40 -# define PSI_CMOS_BATT_FAIL 0x80 - /* write */ -# define PSI_CLR_SWITCH_OFF 0x13 -# define PSI_CLR_HX_OFF 0x14 -# define PSI_CLR_CMOS_BATT_FAIL 0x17 - -#define VOYAGER_PSI_MASK 0x8001 -# define PSI_MASK_MASK 0x10 - -#define VOYAGER_PSI_AC_FAIL_REG 0x8004 -#define AC_FAIL_STAT_CHANGE 0x80 - -#define VOYAGER_PSI_GENERAL_REG 0x8007 - /* read */ -# define PSI_SWITCH_ON 0x01 -# define PSI_SWITCH_ENABLED 0x02 -# define PSI_ALARM_ENABLED 0x08 -# define PSI_SECURE_ENABLED 0x10 -# define PSI_COLD_RESET 0x20 -# define PSI_COLD_START 0x80 - /* write */ -# define PSI_POWER_DOWN 0x10 -# define PSI_SWITCH_DISABLE 0x01 -# define PSI_SWITCH_ENABLE 0x11 -# define PSI_CLEAR 0x12 -# define PSI_ALARM_DISABLE 0x03 -# define PSI_ALARM_ENABLE 0x13 -# define PSI_CLEAR_COLD_RESET 0x05 -# define PSI_SET_COLD_RESET 0x15 -# define PSI_CLEAR_COLD_START 0x07 -# define PSI_SET_COLD_START 0x17 - - - -struct voyager_bios_info { - __u8 len; - __u8 major; - __u8 minor; - __u8 debug; - __u8 num_classes; - __u8 class_1; - __u8 class_2; -}; - -/* The following structures and definitions are for the Kernel/SUS - * interface these are needed to find out how SUS initialised any Quad - * boards in the system */ - -#define NUMBER_OF_MC_BUSSES 2 -#define SLOTS_PER_MC_BUS 8 -#define MAX_CPUS 16 /* 16 way CPU system */ -#define MAX_PROCESSOR_BOARDS 4 /* 4 processor slot system */ -#define MAX_CACHE_LEVELS 4 /* # of cache levels supported */ -#define MAX_SHARED_CPUS 4 /* # of CPUs that can share a LARC */ -#define NUMBER_OF_POS_REGS 8 - -typedef struct { - __u8 MC_Slot; - __u8 POS_Values[NUMBER_OF_POS_REGS]; -} __attribute__((packed)) MC_SlotInformation_t; - -struct QuadDescription { - __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields - * will be zero except for slot */ - __u8 StructureVersion; - __u32 CPI_BaseAddress; - __u32 LARC_BankSize; - __u32 LocalMemoryStateBits; - __u8 Slot; /* Processor slots 1 - 4 */ -} __attribute__((packed)); - -struct ProcBoardInfo { - __u8 Type; - __u8 StructureVersion; - __u8 NumberOfBoards; - struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS]; -} __attribute__((packed)); - -struct CacheDescription { - __u8 Level; - __u32 TotalSize; - __u16 LineSize; - __u8 Associativity; - __u8 CacheType; - __u8 WriteType; - __u8 Number_CPUs_SharedBy; - __u8 Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS]; - -} __attribute__((packed)); - -struct CPU_Description { - __u8 CPU_HardwareId; - char *FRU_String; - __u8 NumberOfCacheLevels; - struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS]; -} __attribute__((packed)); - -struct CPU_Info { - __u8 Type; - __u8 StructureVersion; - __u8 NumberOf_CPUs; - struct CPU_Description CPU_Data[MAX_CPUS]; -} __attribute__((packed)); - - -/* - * This structure will be used by SUS and the OS. - * The assumption about this structure is that no blank space is - * packed in it by our friend the compiler. - */ -typedef struct { - __u8 Mailbox_SUS; /* Written to by SUS to give commands/response to the OS */ - __u8 Mailbox_OS; /* Written to by the OS to give commands/response to SUS */ - __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the interface SUS supports */ - __u8 OS_MailboxVersion; /* Tells SUS which iteration of the interface the OS supports */ - __u32 OS_Flags; /* Flags set by the OS as info for SUS */ - __u32 SUS_Flags; /* Flags set by SUS as info for the OS */ - __u32 WatchDogPeriod; /* Watchdog period (in seconds) which the DP uses to see if the OS is dead */ - __u32 WatchDogCount; /* Updated by the OS on every tic. */ - __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS where to stuff the SUS error log on a dump */ - MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS]; /* Storage for MCA POS data */ - /* All new SECOND_PASS_INTERFACE fields added from this point */ - struct ProcBoardInfo *BoardData; - struct CPU_Info *CPU_Data; - /* All new fields must be added from this point */ -} Voyager_KernelSUS_Mbox_t; - -/* structure for finding the right memory address to send a QIC CPI to */ -struct voyager_qic_cpi { - /* Each cache line (32 bytes) can trigger a cpi. The cpi - * read/write may occur anywhere in the cache line---pick the - * middle to be safe */ - struct { - __u32 pad1[3]; - __u32 cpi; - __u32 pad2[4]; - } qic_cpi[8]; -}; - -struct voyager_status { - __u32 power_fail:1; - __u32 switch_off:1; - __u32 request_from_kernel:1; -}; - -struct voyager_psi_regs { - __u8 cat_id; - __u8 cat_dev; - __u8 cat_control; - __u8 subaddr; - __u8 dummy4; - __u8 checkbit; - __u8 subaddr_low; - __u8 subaddr_high; - __u8 intstatus; - __u8 stat1; - __u8 stat3; - __u8 fault; - __u8 tms; - __u8 gen; - __u8 sysconf; - __u8 dummy15; -}; - -struct voyager_psi_subregs { - __u8 supply; - __u8 mask; - __u8 present; - __u8 DCfail; - __u8 ACfail; - __u8 fail; - __u8 UPSfail; - __u8 genstatus; -}; - -struct voyager_psi { - struct voyager_psi_regs regs; - struct voyager_psi_subregs subregs; -}; - -struct voyager_SUS { -#define VOYAGER_DUMP_BUTTON_NMI 0x1 -#define VOYAGER_SUS_VALID 0x2 -#define VOYAGER_SYSINT_COMPLETE 0x3 - __u8 SUS_mbox; -#define VOYAGER_NO_COMMAND 0x0 -#define VOYAGER_IGNORE_DUMP 0x1 -#define VOYAGER_DO_DUMP 0x2 -#define VOYAGER_SYSINT_HANDSHAKE 0x3 -#define VOYAGER_DO_MEM_DUMP 0x4 -#define VOYAGER_SYSINT_WAS_RECOVERED 0x5 - __u8 kernel_mbox; -#define VOYAGER_MAILBOX_VERSION 0x10 - __u8 SUS_version; - __u8 kernel_version; -#define VOYAGER_OS_HAS_SYSINT 0x1 -#define VOYAGER_OS_IN_PROGRESS 0x2 -#define VOYAGER_UPDATING_WDPERIOD 0x4 - __u32 kernel_flags; -#define VOYAGER_SUS_BOOTING 0x1 -#define VOYAGER_SUS_IN_PROGRESS 0x2 - __u32 SUS_flags; - __u32 watchdog_period; - __u32 watchdog_count; - __u32 SUS_errorlog; - /* lots of system configuration stuff under here */ -}; - -/* Variables exported by voyager_smp */ -extern __u32 voyager_extended_vic_processors; -extern __u32 voyager_allowed_boot_processors; -extern __u32 voyager_quad_processors; -extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS]; -extern struct voyager_SUS *voyager_SUS; - -/* variables exported always */ -extern struct task_struct *voyager_thread; -extern int voyager_level; -extern struct voyager_status voyager_status; - -/* functions exported by the voyager and voyager_smp modules */ -extern int voyager_cat_readb(__u8 module, __u8 asic, int reg); -extern void voyager_cat_init(void); -extern void voyager_detect(struct voyager_bios_info *); -extern void voyager_trap_init(void); -extern void voyager_setup_irqs(void); -extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length); -extern void voyager_smp_intr_init(void); -extern __u8 voyager_extended_cmos_read(__u16 cmos_address); -extern void voyager_smp_dump(void); -extern void voyager_timer_interrupt(void); -extern void smp_local_timer_interrupt(void); -extern void voyager_power_off(void); -extern void smp_voyager_power_off(void *dummy); -extern void voyager_restart(void); -extern void voyager_cat_power_off(void); -extern void voyager_cat_do_common_interrupt(void); -extern void voyager_handle_nmi(void); -/* Commands for the following are */ -#define VOYAGER_PSI_READ 0 -#define VOYAGER_PSI_WRITE 1 -#define VOYAGER_PSI_SUBREAD 2 -#define VOYAGER_PSI_SUBWRITE 3 -extern void voyager_cat_psi(__u8, __u16, __u8 *); diff --git a/include/asm-i386/xen/hypercall.h b/include/asm-i386/xen/hypercall.h deleted file mode 100644 index bc0ee7d961ca..000000000000 --- a/include/asm-i386/xen/hypercall.h +++ /dev/null @@ -1,413 +0,0 @@ -/****************************************************************************** - * hypercall.h - * - * Linux-specific hypervisor handling. - * - * Copyright (c) 2002-2004, K A Fraser - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef __HYPERCALL_H__ -#define __HYPERCALL_H__ - -#include <linux/errno.h> -#include <linux/string.h> - -#include <xen/interface/xen.h> -#include <xen/interface/sched.h> -#include <xen/interface/physdev.h> - -extern struct { char _entry[32]; } hypercall_page[]; - -#define _hypercall0(type, name) \ -({ \ - long __res; \ - asm volatile ( \ - "call %[call]" \ - : "=a" (__res) \ - : [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall1(type, name, a1) \ -({ \ - long __res, __ign1; \ - asm volatile ( \ - "call %[call]" \ - : "=a" (__res), "=b" (__ign1) \ - : "1" ((long)(a1)), \ - [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall2(type, name, a1, a2) \ -({ \ - long __res, __ign1, __ign2; \ - asm volatile ( \ - "call %[call]" \ - : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \ - : "1" ((long)(a1)), "2" ((long)(a2)), \ - [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall3(type, name, a1, a2, a3) \ -({ \ - long __res, __ign1, __ign2, __ign3; \ - asm volatile ( \ - "call %[call]" \ - : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ - "=d" (__ign3) \ - : "1" ((long)(a1)), "2" ((long)(a2)), \ - "3" ((long)(a3)), \ - [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall4(type, name, a1, a2, a3, a4) \ -({ \ - long __res, __ign1, __ign2, __ign3, __ign4; \ - asm volatile ( \ - "call %[call]" \ - : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ - "=d" (__ign3), "=S" (__ign4) \ - : "1" ((long)(a1)), "2" ((long)(a2)), \ - "3" ((long)(a3)), "4" ((long)(a4)), \ - [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ - : "memory" ); \ - (type)__res; \ -}) - -#define _hypercall5(type, name, a1, a2, a3, a4, a5) \ -({ \ - long __res, __ign1, __ign2, __ign3, __ign4, __ign5; \ - asm volatile ( \ - "call %[call]" \ - : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \ - "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \ - : "1" ((long)(a1)), "2" ((long)(a2)), \ - "3" ((long)(a3)), "4" ((long)(a4)), \ - "5" ((long)(a5)), \ - [call] "m" (hypercall_page[__HYPERVISOR_##name]) \ - : "memory" ); \ - (type)__res; \ -}) - -static inline int -HYPERVISOR_set_trap_table(struct trap_info *table) -{ - return _hypercall1(int, set_trap_table, table); -} - -static inline int -HYPERVISOR_mmu_update(struct mmu_update *req, int count, - int *success_count, domid_t domid) -{ - return _hypercall4(int, mmu_update, req, count, success_count, domid); -} - -static inline int -HYPERVISOR_mmuext_op(struct mmuext_op *op, int count, - int *success_count, domid_t domid) -{ - return _hypercall4(int, mmuext_op, op, count, success_count, domid); -} - -static inline int -HYPERVISOR_set_gdt(unsigned long *frame_list, int entries) -{ - return _hypercall2(int, set_gdt, frame_list, entries); -} - -static inline int -HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp) -{ - return _hypercall2(int, stack_switch, ss, esp); -} - -static inline int -HYPERVISOR_set_callbacks(unsigned long event_selector, - unsigned long event_address, - unsigned long failsafe_selector, - unsigned long failsafe_address) -{ - return _hypercall4(int, set_callbacks, - event_selector, event_address, - failsafe_selector, failsafe_address); -} - -static inline int -HYPERVISOR_fpu_taskswitch(int set) -{ - return _hypercall1(int, fpu_taskswitch, set); -} - -static inline int -HYPERVISOR_sched_op(int cmd, unsigned long arg) -{ - return _hypercall2(int, sched_op, cmd, arg); -} - -static inline long -HYPERVISOR_set_timer_op(u64 timeout) -{ - unsigned long timeout_hi = (unsigned long)(timeout>>32); - unsigned long timeout_lo = (unsigned long)timeout; - return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi); -} - -static inline int -HYPERVISOR_set_debugreg(int reg, unsigned long value) -{ - return _hypercall2(int, set_debugreg, reg, value); -} - -static inline unsigned long -HYPERVISOR_get_debugreg(int reg) -{ - return _hypercall1(unsigned long, get_debugreg, reg); -} - -static inline int -HYPERVISOR_update_descriptor(u64 ma, u64 desc) -{ - return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); -} - -static inline int -HYPERVISOR_memory_op(unsigned int cmd, void *arg) -{ - return _hypercall2(int, memory_op, cmd, arg); -} - -static inline int -HYPERVISOR_multicall(void *call_list, int nr_calls) -{ - return _hypercall2(int, multicall, call_list, nr_calls); -} - -static inline int -HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, - unsigned long flags) -{ - unsigned long pte_hi = 0; -#ifdef CONFIG_X86_PAE - pte_hi = new_val.pte_high; -#endif - return _hypercall4(int, update_va_mapping, va, - new_val.pte_low, pte_hi, flags); -} - -static inline int -HYPERVISOR_event_channel_op(int cmd, void *arg) -{ - int rc = _hypercall2(int, event_channel_op, cmd, arg); - if (unlikely(rc == -ENOSYS)) { - struct evtchn_op op; - op.cmd = cmd; - memcpy(&op.u, arg, sizeof(op.u)); - rc = _hypercall1(int, event_channel_op_compat, &op); - memcpy(arg, &op.u, sizeof(op.u)); - } - return rc; -} - -static inline int -HYPERVISOR_xen_version(int cmd, void *arg) -{ - return _hypercall2(int, xen_version, cmd, arg); -} - -static inline int -HYPERVISOR_console_io(int cmd, int count, char *str) -{ - return _hypercall3(int, console_io, cmd, count, str); -} - -static inline int -HYPERVISOR_physdev_op(int cmd, void *arg) -{ - int rc = _hypercall2(int, physdev_op, cmd, arg); - if (unlikely(rc == -ENOSYS)) { - struct physdev_op op; - op.cmd = cmd; - memcpy(&op.u, arg, sizeof(op.u)); - rc = _hypercall1(int, physdev_op_compat, &op); - memcpy(arg, &op.u, sizeof(op.u)); - } - return rc; -} - -static inline int -HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) -{ - return _hypercall3(int, grant_table_op, cmd, uop, count); -} - -static inline int -HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val, - unsigned long flags, domid_t domid) -{ - unsigned long pte_hi = 0; -#ifdef CONFIG_X86_PAE - pte_hi = new_val.pte_high; -#endif - return _hypercall5(int, update_va_mapping_otherdomain, va, - new_val.pte_low, pte_hi, flags, domid); -} - -static inline int -HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type) -{ - return _hypercall2(int, vm_assist, cmd, type); -} - -static inline int -HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args) -{ - return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args); -} - -static inline int -HYPERVISOR_suspend(unsigned long srec) -{ - return _hypercall3(int, sched_op, SCHEDOP_shutdown, - SHUTDOWN_suspend, srec); -} - -static inline int -HYPERVISOR_nmi_op(unsigned long op, unsigned long arg) -{ - return _hypercall2(int, nmi_op, op, arg); -} - -static inline void -MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, - pte_t new_val, unsigned long flags) -{ - mcl->op = __HYPERVISOR_update_va_mapping; - mcl->args[0] = va; -#ifdef CONFIG_X86_PAE - mcl->args[1] = new_val.pte_low; - mcl->args[2] = new_val.pte_high; -#else - mcl->args[1] = new_val.pte_low; - mcl->args[2] = 0; -#endif - mcl->args[3] = flags; -} - -static inline void -MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd, - void *uop, unsigned int count) -{ - mcl->op = __HYPERVISOR_grant_table_op; - mcl->args[0] = cmd; - mcl->args[1] = (unsigned long)uop; - mcl->args[2] = count; -} - -static inline void -MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va, - pte_t new_val, unsigned long flags, - domid_t domid) -{ - mcl->op = __HYPERVISOR_update_va_mapping_otherdomain; - mcl->args[0] = va; -#ifdef CONFIG_X86_PAE - mcl->args[1] = new_val.pte_low; - mcl->args[2] = new_val.pte_high; -#else - mcl->args[1] = new_val.pte_low; - mcl->args[2] = 0; -#endif - mcl->args[3] = flags; - mcl->args[4] = domid; -} - -static inline void -MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr, - struct desc_struct desc) -{ - mcl->op = __HYPERVISOR_update_descriptor; - mcl->args[0] = maddr; - mcl->args[1] = maddr >> 32; - mcl->args[2] = desc.a; - mcl->args[3] = desc.b; -} - -static inline void -MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg) -{ - mcl->op = __HYPERVISOR_memory_op; - mcl->args[0] = cmd; - mcl->args[1] = (unsigned long)arg; -} - -static inline void -MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, - int count, int *success_count, domid_t domid) -{ - mcl->op = __HYPERVISOR_mmu_update; - mcl->args[0] = (unsigned long)req; - mcl->args[1] = count; - mcl->args[2] = (unsigned long)success_count; - mcl->args[3] = domid; -} - -static inline void -MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count, - int *success_count, domid_t domid) -{ - mcl->op = __HYPERVISOR_mmuext_op; - mcl->args[0] = (unsigned long)op; - mcl->args[1] = count; - mcl->args[2] = (unsigned long)success_count; - mcl->args[3] = domid; -} - -static inline void -MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries) -{ - mcl->op = __HYPERVISOR_set_gdt; - mcl->args[0] = (unsigned long)frames; - mcl->args[1] = entries; -} - -static inline void -MULTI_stack_switch(struct multicall_entry *mcl, - unsigned long ss, unsigned long esp) -{ - mcl->op = __HYPERVISOR_stack_switch; - mcl->args[0] = ss; - mcl->args[1] = esp; -} - -#endif /* __HYPERCALL_H__ */ diff --git a/include/asm-i386/xen/hypervisor.h b/include/asm-i386/xen/hypervisor.h deleted file mode 100644 index 8e15dd28c91f..000000000000 --- a/include/asm-i386/xen/hypervisor.h +++ /dev/null @@ -1,73 +0,0 @@ -/****************************************************************************** - * hypervisor.h - * - * Linux-specific hypervisor handling. - * - * Copyright (c) 2002-2004, K A Fraser - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef __HYPERVISOR_H__ -#define __HYPERVISOR_H__ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/version.h> - -#include <xen/interface/xen.h> -#include <xen/interface/version.h> - -#include <asm/ptrace.h> -#include <asm/page.h> -#include <asm/desc.h> -#if defined(__i386__) -# ifdef CONFIG_X86_PAE -# include <asm-generic/pgtable-nopud.h> -# else -# include <asm-generic/pgtable-nopmd.h> -# endif -#endif -#include <asm/xen/hypercall.h> - -/* arch/i386/kernel/setup.c */ -extern struct shared_info *HYPERVISOR_shared_info; -extern struct start_info *xen_start_info; -#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN) - -/* arch/i386/mach-xen/evtchn.c */ -/* Force a proper event-channel callback from Xen. */ -extern void force_evtchn_callback(void); - -/* Turn jiffies into Xen system time. */ -u64 jiffies_to_st(unsigned long jiffies); - - -#define MULTI_UVMFLAGS_INDEX 3 -#define MULTI_UVMDOMID_INDEX 4 - -#define is_running_on_xen() (xen_start_info ? 1 : 0) - -#endif /* __HYPERVISOR_H__ */ diff --git a/include/asm-i386/xen/interface.h b/include/asm-i386/xen/interface.h deleted file mode 100644 index 165c3968e138..000000000000 --- a/include/asm-i386/xen/interface.h +++ /dev/null @@ -1,188 +0,0 @@ -/****************************************************************************** - * arch-x86_32.h - * - * Guest OS interface to x86 32-bit Xen. - * - * Copyright (c) 2004, K A Fraser - */ - -#ifndef __XEN_PUBLIC_ARCH_X86_32_H__ -#define __XEN_PUBLIC_ARCH_X86_32_H__ - -#ifdef __XEN__ -#define __DEFINE_GUEST_HANDLE(name, type) \ - typedef struct { type *p; } __guest_handle_ ## name -#else -#define __DEFINE_GUEST_HANDLE(name, type) \ - typedef type * __guest_handle_ ## name -#endif - -#define DEFINE_GUEST_HANDLE_STRUCT(name) \ - __DEFINE_GUEST_HANDLE(name, struct name) -#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) -#define GUEST_HANDLE(name) __guest_handle_ ## name - -#ifndef __ASSEMBLY__ -/* Guest handles for primitive C types. */ -__DEFINE_GUEST_HANDLE(uchar, unsigned char); -__DEFINE_GUEST_HANDLE(uint, unsigned int); -__DEFINE_GUEST_HANDLE(ulong, unsigned long); -DEFINE_GUEST_HANDLE(char); -DEFINE_GUEST_HANDLE(int); -DEFINE_GUEST_HANDLE(long); -DEFINE_GUEST_HANDLE(void); -#endif - -/* - * SEGMENT DESCRIPTOR TABLES - */ -/* - * A number of GDT entries are reserved by Xen. These are not situated at the - * start of the GDT because some stupid OSes export hard-coded selector values - * in their ABI. These hard-coded values are always near the start of the GDT, - * so Xen places itself out of the way, at the far end of the GDT. - */ -#define FIRST_RESERVED_GDT_PAGE 14 -#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096) -#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8) - -/* - * These flat segments are in the Xen-private section of every GDT. Since these - * are also present in the initial GDT, many OSes will be able to avoid - * installing their own GDT. - */ -#define FLAT_RING1_CS 0xe019 /* GDT index 259 */ -#define FLAT_RING1_DS 0xe021 /* GDT index 260 */ -#define FLAT_RING1_SS 0xe021 /* GDT index 260 */ -#define FLAT_RING3_CS 0xe02b /* GDT index 261 */ -#define FLAT_RING3_DS 0xe033 /* GDT index 262 */ -#define FLAT_RING3_SS 0xe033 /* GDT index 262 */ - -#define FLAT_KERNEL_CS FLAT_RING1_CS -#define FLAT_KERNEL_DS FLAT_RING1_DS -#define FLAT_KERNEL_SS FLAT_RING1_SS -#define FLAT_USER_CS FLAT_RING3_CS -#define FLAT_USER_DS FLAT_RING3_DS -#define FLAT_USER_SS FLAT_RING3_SS - -/* And the trap vector is... */ -#define TRAP_INSTR "int $0x82" - -/* - * Virtual addresses beyond this are not modifiable by guest OSes. The - * machine->physical mapping table starts at this address, read-only. - */ -#ifdef CONFIG_X86_PAE -#define __HYPERVISOR_VIRT_START 0xF5800000 -#else -#define __HYPERVISOR_VIRT_START 0xFC000000 -#endif - -#ifndef HYPERVISOR_VIRT_START -#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) -#endif - -#ifndef machine_to_phys_mapping -#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) -#endif - -/* Maximum number of virtual CPUs in multi-processor guests. */ -#define MAX_VIRT_CPUS 32 - -#ifndef __ASSEMBLY__ - -/* - * Send an array of these to HYPERVISOR_set_trap_table() - */ -#define TI_GET_DPL(_ti) ((_ti)->flags & 3) -#define TI_GET_IF(_ti) ((_ti)->flags & 4) -#define TI_SET_DPL(_ti, _dpl) ((_ti)->flags |= (_dpl)) -#define TI_SET_IF(_ti, _if) ((_ti)->flags |= ((!!(_if))<<2)) - -struct trap_info { - uint8_t vector; /* exception vector */ - uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */ - uint16_t cs; /* code selector */ - unsigned long address; /* code offset */ -}; -DEFINE_GUEST_HANDLE_STRUCT(trap_info); - -struct cpu_user_regs { - uint32_t ebx; - uint32_t ecx; - uint32_t edx; - uint32_t esi; - uint32_t edi; - uint32_t ebp; - uint32_t eax; - uint16_t error_code; /* private */ - uint16_t entry_vector; /* private */ - uint32_t eip; - uint16_t cs; - uint8_t saved_upcall_mask; - uint8_t _pad0; - uint32_t eflags; /* eflags.IF == !saved_upcall_mask */ - uint32_t esp; - uint16_t ss, _pad1; - uint16_t es, _pad2; - uint16_t ds, _pad3; - uint16_t fs, _pad4; - uint16_t gs, _pad5; -}; -DEFINE_GUEST_HANDLE_STRUCT(cpu_user_regs); - -typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */ - -/* - * The following is all CPU context. Note that the fpu_ctxt block is filled - * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. - */ -struct vcpu_guest_context { - /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */ - struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */ -#define VGCF_I387_VALID (1<<0) -#define VGCF_HVM_GUEST (1<<1) -#define VGCF_IN_KERNEL (1<<2) - unsigned long flags; /* VGCF_* flags */ - struct cpu_user_regs user_regs; /* User-level CPU registers */ - struct trap_info trap_ctxt[256]; /* Virtual IDT */ - unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ - unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */ - unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */ - unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */ - unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */ - unsigned long event_callback_cs; /* CS:EIP of event callback */ - unsigned long event_callback_eip; - unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ - unsigned long failsafe_callback_eip; - unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ -}; -DEFINE_GUEST_HANDLE_STRUCT(vcpu_guest_context); - -struct arch_shared_info { - unsigned long max_pfn; /* max pfn that appears in table */ - /* Frame containing list of mfns containing list of mfns containing p2m. */ - unsigned long pfn_to_mfn_frame_list_list; - unsigned long nmi_reason; -}; - -struct arch_vcpu_info { - unsigned long cr2; - unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */ -}; - -#endif /* !__ASSEMBLY__ */ - -/* - * Prefix forces emulation of some non-trapping instructions. - * Currently only CPUID. - */ -#ifdef __ASSEMBLY__ -#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ; -#define XEN_CPUID XEN_EMULATE_PREFIX cpuid -#else -#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; " -#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid" -#endif - -#endif diff --git a/include/asm-i386/xor.h b/include/asm-i386/xor.h deleted file mode 100644 index 23c86cef3b25..000000000000 --- a/include/asm-i386/xor.h +++ /dev/null @@ -1,883 +0,0 @@ -/* - * include/asm-i386/xor.h - * - * Optimized RAID-5 checksumming functions for MMX and SSE. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * You should have received a copy of the GNU General Public License - * (for example /usr/src/linux/COPYING); if not, write to the Free - * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -/* - * High-speed RAID5 checksumming functions utilizing MMX instructions. - * Copyright (C) 1998 Ingo Molnar. - */ - -#define LD(x,y) " movq 8*("#x")(%1), %%mm"#y" ;\n" -#define ST(x,y) " movq %%mm"#y", 8*("#x")(%1) ;\n" -#define XO1(x,y) " pxor 8*("#x")(%2), %%mm"#y" ;\n" -#define XO2(x,y) " pxor 8*("#x")(%3), %%mm"#y" ;\n" -#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" -#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" - -#include <asm/i387.h> - -static void -xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) -{ - unsigned long lines = bytes >> 7; - - kernel_fpu_begin(); - - __asm__ __volatile__ ( -#undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - ST(i,0) \ - XO1(i+1,1) \ - ST(i+1,1) \ - XO1(i+2,2) \ - ST(i+2,2) \ - XO1(i+3,3) \ - ST(i+3,3) - - " .align 32 ;\n" - " 1: ;\n" - - BLOCK(0) - BLOCK(4) - BLOCK(8) - BLOCK(12) - - " addl $128, %1 ;\n" - " addl $128, %2 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2) - : - : "memory"); - - kernel_fpu_end(); -} - -static void -xor_pII_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) -{ - unsigned long lines = bytes >> 7; - - kernel_fpu_begin(); - - __asm__ __volatile__ ( -#undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - ST(i,0) \ - XO2(i+1,1) \ - ST(i+1,1) \ - XO2(i+2,2) \ - ST(i+2,2) \ - XO2(i+3,3) \ - ST(i+3,3) - - " .align 32 ;\n" - " 1: ;\n" - - BLOCK(0) - BLOCK(4) - BLOCK(8) - BLOCK(12) - - " addl $128, %1 ;\n" - " addl $128, %2 ;\n" - " addl $128, %3 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2), "+r" (p3) - : - : "memory"); - - kernel_fpu_end(); -} - -static void -xor_pII_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) -{ - unsigned long lines = bytes >> 7; - - kernel_fpu_begin(); - - __asm__ __volatile__ ( -#undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - XO3(i,0) \ - ST(i,0) \ - XO3(i+1,1) \ - ST(i+1,1) \ - XO3(i+2,2) \ - ST(i+2,2) \ - XO3(i+3,3) \ - ST(i+3,3) - - " .align 32 ;\n" - " 1: ;\n" - - BLOCK(0) - BLOCK(4) - BLOCK(8) - BLOCK(12) - - " addl $128, %1 ;\n" - " addl $128, %2 ;\n" - " addl $128, %3 ;\n" - " addl $128, %4 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) - : - : "memory"); - - kernel_fpu_end(); -} - - -static void -xor_pII_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) -{ - unsigned long lines = bytes >> 7; - - kernel_fpu_begin(); - - /* Make sure GCC forgets anything it knows about p4 or p5, - such that it won't pass to the asm volatile below a - register that is shared with any other variable. That's - because we modify p4 and p5 there, but we can't mark them - as read/write, otherwise we'd overflow the 10-asm-operands - limit of GCC < 3.1. */ - __asm__ ("" : "+r" (p4), "+r" (p5)); - - __asm__ __volatile__ ( -#undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ - XO4(i,0) \ - ST(i,0) \ - XO4(i+1,1) \ - ST(i+1,1) \ - XO4(i+2,2) \ - ST(i+2,2) \ - XO4(i+3,3) \ - ST(i+3,3) - - " .align 32 ;\n" - " 1: ;\n" - - BLOCK(0) - BLOCK(4) - BLOCK(8) - BLOCK(12) - - " addl $128, %1 ;\n" - " addl $128, %2 ;\n" - " addl $128, %3 ;\n" - " addl $128, %4 ;\n" - " addl $128, %5 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2), "+r" (p3) - : "r" (p4), "r" (p5) - : "memory"); - - /* p4 and p5 were modified, and now the variables are dead. - Clobber them just to be sure nobody does something stupid - like assuming they have some legal value. */ - __asm__ ("" : "=r" (p4), "=r" (p5)); - - kernel_fpu_end(); -} - -#undef LD -#undef XO1 -#undef XO2 -#undef XO3 -#undef XO4 -#undef ST -#undef BLOCK - -static void -xor_p5_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) -{ - unsigned long lines = bytes >> 6; - - kernel_fpu_begin(); - - __asm__ __volatile__ ( - " .align 32 ;\n" - " 1: ;\n" - " movq (%1), %%mm0 ;\n" - " movq 8(%1), %%mm1 ;\n" - " pxor (%2), %%mm0 ;\n" - " movq 16(%1), %%mm2 ;\n" - " movq %%mm0, (%1) ;\n" - " pxor 8(%2), %%mm1 ;\n" - " movq 24(%1), %%mm3 ;\n" - " movq %%mm1, 8(%1) ;\n" - " pxor 16(%2), %%mm2 ;\n" - " movq 32(%1), %%mm4 ;\n" - " movq %%mm2, 16(%1) ;\n" - " pxor 24(%2), %%mm3 ;\n" - " movq 40(%1), %%mm5 ;\n" - " movq %%mm3, 24(%1) ;\n" - " pxor 32(%2), %%mm4 ;\n" - " movq 48(%1), %%mm6 ;\n" - " movq %%mm4, 32(%1) ;\n" - " pxor 40(%2), %%mm5 ;\n" - " movq 56(%1), %%mm7 ;\n" - " movq %%mm5, 40(%1) ;\n" - " pxor 48(%2), %%mm6 ;\n" - " pxor 56(%2), %%mm7 ;\n" - " movq %%mm6, 48(%1) ;\n" - " movq %%mm7, 56(%1) ;\n" - - " addl $64, %1 ;\n" - " addl $64, %2 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2) - : - : "memory"); - - kernel_fpu_end(); -} - -static void -xor_p5_mmx_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) -{ - unsigned long lines = bytes >> 6; - - kernel_fpu_begin(); - - __asm__ __volatile__ ( - " .align 32,0x90 ;\n" - " 1: ;\n" - " movq (%1), %%mm0 ;\n" - " movq 8(%1), %%mm1 ;\n" - " pxor (%2), %%mm0 ;\n" - " movq 16(%1), %%mm2 ;\n" - " pxor 8(%2), %%mm1 ;\n" - " pxor (%3), %%mm0 ;\n" - " pxor 16(%2), %%mm2 ;\n" - " movq %%mm0, (%1) ;\n" - " pxor 8(%3), %%mm1 ;\n" - " pxor 16(%3), %%mm2 ;\n" - " movq 24(%1), %%mm3 ;\n" - " movq %%mm1, 8(%1) ;\n" - " movq 32(%1), %%mm4 ;\n" - " movq 40(%1), %%mm5 ;\n" - " pxor 24(%2), %%mm3 ;\n" - " movq %%mm2, 16(%1) ;\n" - " pxor 32(%2), %%mm4 ;\n" - " pxor 24(%3), %%mm3 ;\n" - " pxor 40(%2), %%mm5 ;\n" - " movq %%mm3, 24(%1) ;\n" - " pxor 32(%3), %%mm4 ;\n" - " pxor 40(%3), %%mm5 ;\n" - " movq 48(%1), %%mm6 ;\n" - " movq %%mm4, 32(%1) ;\n" - " movq 56(%1), %%mm7 ;\n" - " pxor 48(%2), %%mm6 ;\n" - " movq %%mm5, 40(%1) ;\n" - " pxor 56(%2), %%mm7 ;\n" - " pxor 48(%3), %%mm6 ;\n" - " pxor 56(%3), %%mm7 ;\n" - " movq %%mm6, 48(%1) ;\n" - " movq %%mm7, 56(%1) ;\n" - - " addl $64, %1 ;\n" - " addl $64, %2 ;\n" - " addl $64, %3 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2), "+r" (p3) - : - : "memory" ); - - kernel_fpu_end(); -} - -static void -xor_p5_mmx_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) -{ - unsigned long lines = bytes >> 6; - - kernel_fpu_begin(); - - __asm__ __volatile__ ( - " .align 32,0x90 ;\n" - " 1: ;\n" - " movq (%1), %%mm0 ;\n" - " movq 8(%1), %%mm1 ;\n" - " pxor (%2), %%mm0 ;\n" - " movq 16(%1), %%mm2 ;\n" - " pxor 8(%2), %%mm1 ;\n" - " pxor (%3), %%mm0 ;\n" - " pxor 16(%2), %%mm2 ;\n" - " pxor 8(%3), %%mm1 ;\n" - " pxor (%4), %%mm0 ;\n" - " movq 24(%1), %%mm3 ;\n" - " pxor 16(%3), %%mm2 ;\n" - " pxor 8(%4), %%mm1 ;\n" - " movq %%mm0, (%1) ;\n" - " movq 32(%1), %%mm4 ;\n" - " pxor 24(%2), %%mm3 ;\n" - " pxor 16(%4), %%mm2 ;\n" - " movq %%mm1, 8(%1) ;\n" - " movq 40(%1), %%mm5 ;\n" - " pxor 32(%2), %%mm4 ;\n" - " pxor 24(%3), %%mm3 ;\n" - " movq %%mm2, 16(%1) ;\n" - " pxor 40(%2), %%mm5 ;\n" - " pxor 32(%3), %%mm4 ;\n" - " pxor 24(%4), %%mm3 ;\n" - " movq %%mm3, 24(%1) ;\n" - " movq 56(%1), %%mm7 ;\n" - " movq 48(%1), %%mm6 ;\n" - " pxor 40(%3), %%mm5 ;\n" - " pxor 32(%4), %%mm4 ;\n" - " pxor 48(%2), %%mm6 ;\n" - " movq %%mm4, 32(%1) ;\n" - " pxor 56(%2), %%mm7 ;\n" - " pxor 40(%4), %%mm5 ;\n" - " pxor 48(%3), %%mm6 ;\n" - " pxor 56(%3), %%mm7 ;\n" - " movq %%mm5, 40(%1) ;\n" - " pxor 48(%4), %%mm6 ;\n" - " pxor 56(%4), %%mm7 ;\n" - " movq %%mm6, 48(%1) ;\n" - " movq %%mm7, 56(%1) ;\n" - - " addl $64, %1 ;\n" - " addl $64, %2 ;\n" - " addl $64, %3 ;\n" - " addl $64, %4 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) - : - : "memory"); - - kernel_fpu_end(); -} - -static void -xor_p5_mmx_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) -{ - unsigned long lines = bytes >> 6; - - kernel_fpu_begin(); - - /* Make sure GCC forgets anything it knows about p4 or p5, - such that it won't pass to the asm volatile below a - register that is shared with any other variable. That's - because we modify p4 and p5 there, but we can't mark them - as read/write, otherwise we'd overflow the 10-asm-operands - limit of GCC < 3.1. */ - __asm__ ("" : "+r" (p4), "+r" (p5)); - - __asm__ __volatile__ ( - " .align 32,0x90 ;\n" - " 1: ;\n" - " movq (%1), %%mm0 ;\n" - " movq 8(%1), %%mm1 ;\n" - " pxor (%2), %%mm0 ;\n" - " pxor 8(%2), %%mm1 ;\n" - " movq 16(%1), %%mm2 ;\n" - " pxor (%3), %%mm0 ;\n" - " pxor 8(%3), %%mm1 ;\n" - " pxor 16(%2), %%mm2 ;\n" - " pxor (%4), %%mm0 ;\n" - " pxor 8(%4), %%mm1 ;\n" - " pxor 16(%3), %%mm2 ;\n" - " movq 24(%1), %%mm3 ;\n" - " pxor (%5), %%mm0 ;\n" - " pxor 8(%5), %%mm1 ;\n" - " movq %%mm0, (%1) ;\n" - " pxor 16(%4), %%mm2 ;\n" - " pxor 24(%2), %%mm3 ;\n" - " movq %%mm1, 8(%1) ;\n" - " pxor 16(%5), %%mm2 ;\n" - " pxor 24(%3), %%mm3 ;\n" - " movq 32(%1), %%mm4 ;\n" - " movq %%mm2, 16(%1) ;\n" - " pxor 24(%4), %%mm3 ;\n" - " pxor 32(%2), %%mm4 ;\n" - " movq 40(%1), %%mm5 ;\n" - " pxor 24(%5), %%mm3 ;\n" - " pxor 32(%3), %%mm4 ;\n" - " pxor 40(%2), %%mm5 ;\n" - " movq %%mm3, 24(%1) ;\n" - " pxor 32(%4), %%mm4 ;\n" - " pxor 40(%3), %%mm5 ;\n" - " movq 48(%1), %%mm6 ;\n" - " movq 56(%1), %%mm7 ;\n" - " pxor 32(%5), %%mm4 ;\n" - " pxor 40(%4), %%mm5 ;\n" - " pxor 48(%2), %%mm6 ;\n" - " pxor 56(%2), %%mm7 ;\n" - " movq %%mm4, 32(%1) ;\n" - " pxor 48(%3), %%mm6 ;\n" - " pxor 56(%3), %%mm7 ;\n" - " pxor 40(%5), %%mm5 ;\n" - " pxor 48(%4), %%mm6 ;\n" - " pxor 56(%4), %%mm7 ;\n" - " movq %%mm5, 40(%1) ;\n" - " pxor 48(%5), %%mm6 ;\n" - " pxor 56(%5), %%mm7 ;\n" - " movq %%mm6, 48(%1) ;\n" - " movq %%mm7, 56(%1) ;\n" - - " addl $64, %1 ;\n" - " addl $64, %2 ;\n" - " addl $64, %3 ;\n" - " addl $64, %4 ;\n" - " addl $64, %5 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2), "+r" (p3) - : "r" (p4), "r" (p5) - : "memory"); - - /* p4 and p5 were modified, and now the variables are dead. - Clobber them just to be sure nobody does something stupid - like assuming they have some legal value. */ - __asm__ ("" : "=r" (p4), "=r" (p5)); - - kernel_fpu_end(); -} - -static struct xor_block_template xor_block_pII_mmx = { - .name = "pII_mmx", - .do_2 = xor_pII_mmx_2, - .do_3 = xor_pII_mmx_3, - .do_4 = xor_pII_mmx_4, - .do_5 = xor_pII_mmx_5, -}; - -static struct xor_block_template xor_block_p5_mmx = { - .name = "p5_mmx", - .do_2 = xor_p5_mmx_2, - .do_3 = xor_p5_mmx_3, - .do_4 = xor_p5_mmx_4, - .do_5 = xor_p5_mmx_5, -}; - -/* - * Cache avoiding checksumming functions utilizing KNI instructions - * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) - */ - -#define XMMS_SAVE do { \ - preempt_disable(); \ - cr0 = read_cr0(); \ - clts(); \ - __asm__ __volatile__ ( \ - "movups %%xmm0,(%0) ;\n\t" \ - "movups %%xmm1,0x10(%0) ;\n\t" \ - "movups %%xmm2,0x20(%0) ;\n\t" \ - "movups %%xmm3,0x30(%0) ;\n\t" \ - : \ - : "r" (xmm_save) \ - : "memory"); \ -} while(0) - -#define XMMS_RESTORE do { \ - __asm__ __volatile__ ( \ - "sfence ;\n\t" \ - "movups (%0),%%xmm0 ;\n\t" \ - "movups 0x10(%0),%%xmm1 ;\n\t" \ - "movups 0x20(%0),%%xmm2 ;\n\t" \ - "movups 0x30(%0),%%xmm3 ;\n\t" \ - : \ - : "r" (xmm_save) \ - : "memory"); \ - write_cr0(cr0); \ - preempt_enable(); \ -} while(0) - -#define ALIGN16 __attribute__((aligned(16))) - -#define OFFS(x) "16*("#x")" -#define PF_OFFS(x) "256+16*("#x")" -#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" -#define LD(x,y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n" -#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n" -#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n" -#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n" -#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n" -#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n" -#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n" -#define XO1(x,y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n" -#define XO2(x,y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n" -#define XO3(x,y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n" -#define XO4(x,y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n" -#define XO5(x,y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n" - - -static void -xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) -{ - unsigned long lines = bytes >> 8; - char xmm_save[16*4] ALIGN16; - int cr0; - - XMMS_SAVE; - - __asm__ __volatile__ ( -#undef BLOCK -#define BLOCK(i) \ - LD(i,0) \ - LD(i+1,1) \ - PF1(i) \ - PF1(i+2) \ - LD(i+2,2) \ - LD(i+3,3) \ - PF0(i+4) \ - PF0(i+6) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ - - - PF0(0) - PF0(2) - - " .align 32 ;\n" - " 1: ;\n" - - BLOCK(0) - BLOCK(4) - BLOCK(8) - BLOCK(12) - - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2) - : - : "memory"); - - XMMS_RESTORE; -} - -static void -xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) -{ - unsigned long lines = bytes >> 8; - char xmm_save[16*4] ALIGN16; - int cr0; - - XMMS_SAVE; - - __asm__ __volatile__ ( -#undef BLOCK -#define BLOCK(i) \ - PF1(i) \ - PF1(i+2) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - PF2(i) \ - PF2(i+2) \ - PF0(i+4) \ - PF0(i+6) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ - - - PF0(0) - PF0(2) - - " .align 32 ;\n" - " 1: ;\n" - - BLOCK(0) - BLOCK(4) - BLOCK(8) - BLOCK(12) - - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " addl $256, %3 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r"(p2), "+r"(p3) - : - : "memory" ); - - XMMS_RESTORE; -} - -static void -xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) -{ - unsigned long lines = bytes >> 8; - char xmm_save[16*4] ALIGN16; - int cr0; - - XMMS_SAVE; - - __asm__ __volatile__ ( -#undef BLOCK -#define BLOCK(i) \ - PF1(i) \ - PF1(i+2) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - PF2(i) \ - PF2(i+2) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - PF3(i) \ - PF3(i+2) \ - PF0(i+4) \ - PF0(i+6) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ - - - PF0(0) - PF0(2) - - " .align 32 ;\n" - " 1: ;\n" - - BLOCK(0) - BLOCK(4) - BLOCK(8) - BLOCK(12) - - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " addl $256, %3 ;\n" - " addl $256, %4 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4) - : - : "memory" ); - - XMMS_RESTORE; -} - -static void -xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) -{ - unsigned long lines = bytes >> 8; - char xmm_save[16*4] ALIGN16; - int cr0; - - XMMS_SAVE; - - /* Make sure GCC forgets anything it knows about p4 or p5, - such that it won't pass to the asm volatile below a - register that is shared with any other variable. That's - because we modify p4 and p5 there, but we can't mark them - as read/write, otherwise we'd overflow the 10-asm-operands - limit of GCC < 3.1. */ - __asm__ ("" : "+r" (p4), "+r" (p5)); - - __asm__ __volatile__ ( -#undef BLOCK -#define BLOCK(i) \ - PF1(i) \ - PF1(i+2) \ - LD(i,0) \ - LD(i+1,1) \ - LD(i+2,2) \ - LD(i+3,3) \ - PF2(i) \ - PF2(i+2) \ - XO1(i,0) \ - XO1(i+1,1) \ - XO1(i+2,2) \ - XO1(i+3,3) \ - PF3(i) \ - PF3(i+2) \ - XO2(i,0) \ - XO2(i+1,1) \ - XO2(i+2,2) \ - XO2(i+3,3) \ - PF4(i) \ - PF4(i+2) \ - PF0(i+4) \ - PF0(i+6) \ - XO3(i,0) \ - XO3(i+1,1) \ - XO3(i+2,2) \ - XO3(i+3,3) \ - XO4(i,0) \ - XO4(i+1,1) \ - XO4(i+2,2) \ - XO4(i+3,3) \ - ST(i,0) \ - ST(i+1,1) \ - ST(i+2,2) \ - ST(i+3,3) \ - - - PF0(0) - PF0(2) - - " .align 32 ;\n" - " 1: ;\n" - - BLOCK(0) - BLOCK(4) - BLOCK(8) - BLOCK(12) - - " addl $256, %1 ;\n" - " addl $256, %2 ;\n" - " addl $256, %3 ;\n" - " addl $256, %4 ;\n" - " addl $256, %5 ;\n" - " decl %0 ;\n" - " jnz 1b ;\n" - : "+r" (lines), - "+r" (p1), "+r" (p2), "+r" (p3) - : "r" (p4), "r" (p5) - : "memory"); - - /* p4 and p5 were modified, and now the variables are dead. - Clobber them just to be sure nobody does something stupid - like assuming they have some legal value. */ - __asm__ ("" : "=r" (p4), "=r" (p5)); - - XMMS_RESTORE; -} - -static struct xor_block_template xor_block_pIII_sse = { - .name = "pIII_sse", - .do_2 = xor_sse_2, - .do_3 = xor_sse_3, - .do_4 = xor_sse_4, - .do_5 = xor_sse_5, -}; - -/* Also try the generic routines. */ -#include <asm-generic/xor.h> - -#undef XOR_TRY_TEMPLATES -#define XOR_TRY_TEMPLATES \ - do { \ - xor_speed(&xor_block_8regs); \ - xor_speed(&xor_block_8regs_p); \ - xor_speed(&xor_block_32regs); \ - xor_speed(&xor_block_32regs_p); \ - if (cpu_has_xmm) \ - xor_speed(&xor_block_pIII_sse); \ - if (cpu_has_mmx) { \ - xor_speed(&xor_block_pII_mmx); \ - xor_speed(&xor_block_p5_mmx); \ - } \ - } while (0) - -/* We force the use of the SSE xor block because it can write around L2. - We may also be able to load into the L1 only depending on how the cpu - deals with a load to a line that is being prefetched. */ -#define XOR_SELECT_TEMPLATE(FASTEST) \ - (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) |