summaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2009-02-24 16:11:51 -0800
committerH. Peter Anvin <hpa@zytor.com>2009-02-24 16:11:51 -0800
commit638bee71c83a2837b48062fdc5b222163cf53d79 (patch)
tree9c1699c07a5aa547d769138317279b8ee1ba89e8 /arch/x86/include
parent2aaa822984b97efd894d10d1e1382206ef0291a7 (diff)
parenta852cbfaaf8122827602027b1614971cfd832304 (diff)
downloadblackbird-op-linux-638bee71c83a2837b48062fdc5b222163cf53d79.tar.gz
blackbird-op-linux-638bee71c83a2837b48062fdc5b222163cf53d79.zip
Merge branch 'x86/core' into x86/mce2
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/a.out-core.h2
-rw-r--r--arch/x86/include/asm/acpi.h3
-rw-r--r--arch/x86/include/asm/apic.h424
-rw-r--r--arch/x86/include/asm/apicnum.h12
-rw-r--r--arch/x86/include/asm/apm.h (renamed from arch/x86/include/asm/mach-default/apm.h)0
-rw-r--r--arch/x86/include/asm/arch_hooks.h26
-rw-r--r--arch/x86/include/asm/bigsmp/apic.h155
-rw-r--r--arch/x86/include/asm/bigsmp/apicdef.h13
-rw-r--r--arch/x86/include/asm/bigsmp/ipi.h22
-rw-r--r--arch/x86/include/asm/calling.h56
-rw-r--r--arch/x86/include/asm/cpu.h17
-rw-r--r--arch/x86/include/asm/cpumask.h32
-rw-r--r--arch/x86/include/asm/current.h24
-rw-r--r--arch/x86/include/asm/do_timer.h (renamed from arch/x86/include/asm/mach-default/do_timer.h)0
-rw-r--r--arch/x86/include/asm/elf.h15
-rw-r--r--arch/x86/include/asm/entry_arch.h (renamed from arch/x86/include/asm/mach-default/entry_arch.h)25
-rw-r--r--arch/x86/include/asm/es7000/apic.h242
-rw-r--r--arch/x86/include/asm/es7000/apicdef.h13
-rw-r--r--arch/x86/include/asm/es7000/ipi.h22
-rw-r--r--arch/x86/include/asm/es7000/mpparse.h29
-rw-r--r--arch/x86/include/asm/es7000/wakecpu.h37
-rw-r--r--arch/x86/include/asm/fixmap_32.h4
-rw-r--r--arch/x86/include/asm/fixmap_64.h4
-rw-r--r--arch/x86/include/asm/genapic.h6
-rw-r--r--arch/x86/include/asm/genapic_32.h148
-rw-r--r--arch/x86/include/asm/genapic_64.h66
-rw-r--r--arch/x86/include/asm/hardirq.h49
-rw-r--r--arch/x86/include/asm/hardirq_32.h30
-rw-r--r--arch/x86/include/asm/hardirq_64.h25
-rw-r--r--arch/x86/include/asm/hw_irq.h24
-rw-r--r--arch/x86/include/asm/i8259.h4
-rw-r--r--arch/x86/include/asm/io.h98
-rw-r--r--arch/x86/include/asm/io_32.h88
-rw-r--r--arch/x86/include/asm/io_64.h61
-rw-r--r--arch/x86/include/asm/io_apic.h41
-rw-r--r--arch/x86/include/asm/ipi.h75
-rw-r--r--arch/x86/include/asm/irq.h4
-rw-r--r--arch/x86/include/asm/irq_regs.h36
-rw-r--r--arch/x86/include/asm/irq_regs_32.h31
-rw-r--r--arch/x86/include/asm/irq_regs_64.h1
-rw-r--r--arch/x86/include/asm/irq_vectors.h209
-rw-r--r--arch/x86/include/asm/kexec.h27
-rw-r--r--arch/x86/include/asm/linkage.h64
-rw-r--r--arch/x86/include/asm/mach-default/mach_apic.h168
-rw-r--r--arch/x86/include/asm/mach-default/mach_apicdef.h24
-rw-r--r--arch/x86/include/asm/mach-default/mach_ipi.h64
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpparse.h17
-rw-r--r--arch/x86/include/asm/mach-default/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-default/mach_wakecpu.h41
-rw-r--r--arch/x86/include/asm/mach-generic/gpio.h15
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apic.h35
-rw-r--r--arch/x86/include/asm/mach-generic/mach_apicdef.h11
-rw-r--r--arch/x86/include/asm/mach-generic/mach_ipi.h10
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpparse.h9
-rw-r--r--arch/x86/include/asm/mach-generic/mach_mpspec.h12
-rw-r--r--arch/x86/include/asm/mach-generic/mach_wakecpu.h12
-rw-r--r--arch/x86/include/asm/mach-rdc321x/gpio.h60
-rw-r--r--arch/x86/include/asm/mach-voyager/do_timer.h17
-rw-r--r--arch/x86/include/asm/mach-voyager/entry_arch.h26
-rw-r--r--arch/x86/include/asm/mach-voyager/setup_arch.h12
-rw-r--r--arch/x86/include/asm/mach_timer.h (renamed from arch/x86/include/asm/mach-default/mach_timer.h)0
-rw-r--r--arch/x86/include/asm/mach_traps.h (renamed from arch/x86/include/asm/mach-default/mach_traps.h)0
-rw-r--r--arch/x86/include/asm/mmu_context.h63
-rw-r--r--arch/x86/include/asm/mmu_context_32.h55
-rw-r--r--arch/x86/include/asm/mmu_context_64.h54
-rw-r--r--arch/x86/include/asm/mpspec.h33
-rw-r--r--arch/x86/include/asm/mpspec_def.h23
-rw-r--r--arch/x86/include/asm/numaq.h2
-rw-r--r--arch/x86/include/asm/numaq/apic.h142
-rw-r--r--arch/x86/include/asm/numaq/apicdef.h14
-rw-r--r--arch/x86/include/asm/numaq/ipi.h22
-rw-r--r--arch/x86/include/asm/numaq/mpparse.h6
-rw-r--r--arch/x86/include/asm/numaq/wakecpu.h45
-rw-r--r--arch/x86/include/asm/page.h152
-rw-r--r--arch/x86/include/asm/page_32.h87
-rw-r--r--arch/x86/include/asm/page_32_types.h60
-rw-r--r--arch/x86/include/asm/page_64.h101
-rw-r--r--arch/x86/include/asm/page_64_types.h89
-rw-r--r--arch/x86/include/asm/page_types.h57
-rw-r--r--arch/x86/include/asm/paravirt.h465
-rw-r--r--arch/x86/include/asm/pat.h4
-rw-r--r--arch/x86/include/asm/pci-functions.h (renamed from arch/x86/include/asm/mach-default/pci-functions.h)0
-rw-r--r--arch/x86/include/asm/pda.h137
-rw-r--r--arch/x86/include/asm/percpu.h169
-rw-r--r--arch/x86/include/asm/pgtable-2level.h2
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h (renamed from arch/x86/include/asm/pgtable-2level-defs.h)17
-rw-r--r--arch/x86/include/asm/pgtable-3level.h35
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h (renamed from arch/x86/include/asm/pgtable-3level-defs.h)20
-rw-r--r--arch/x86/include/asm/pgtable.h507
-rw-r--r--arch/x86/include/asm/pgtable_32.h88
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h46
-rw-r--r--arch/x86/include/asm/pgtable_64.h113
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h63
-rw-r--r--arch/x86/include/asm/pgtable_types.h328
-rw-r--r--arch/x86/include/asm/prctl.h4
-rw-r--r--arch/x86/include/asm/processor.h39
-rw-r--r--arch/x86/include/asm/proto.h4
-rw-r--r--arch/x86/include/asm/ptrace.h4
-rw-r--r--arch/x86/include/asm/rdc321x_defs.h (renamed from arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h)0
-rw-r--r--arch/x86/include/asm/segment.h9
-rw-r--r--arch/x86/include/asm/setup.h61
-rw-r--r--arch/x86/include/asm/setup_arch.h (renamed from arch/x86/include/asm/mach-default/setup_arch.h)0
-rw-r--r--arch/x86/include/asm/smp.h69
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h (renamed from arch/x86/include/asm/mach-default/smpboot_hooks.h)6
-rw-r--r--arch/x86/include/asm/spinlock.h69
-rw-r--r--arch/x86/include/asm/stackprotector.h124
-rw-r--r--arch/x86/include/asm/summit/apic.h202
-rw-r--r--arch/x86/include/asm/summit/apicdef.h13
-rw-r--r--arch/x86/include/asm/summit/ipi.h26
-rw-r--r--arch/x86/include/asm/summit/mpparse.h109
-rw-r--r--arch/x86/include/asm/syscalls.h23
-rw-r--r--arch/x86/include/asm/system.h67
-rw-r--r--arch/x86/include/asm/thread_info.h21
-rw-r--r--arch/x86/include/asm/timer.h2
-rw-r--r--arch/x86/include/asm/tlbflush.h17
-rw-r--r--arch/x86/include/asm/topology.h31
-rw-r--r--arch/x86/include/asm/trampoline.h1
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/include/asm/uaccess.h138
-rw-r--r--arch/x86/include/asm/uaccess_64.h16
-rw-r--r--arch/x86/include/asm/uv/uv.h36
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h1
-rw-r--r--arch/x86/include/asm/vic.h61
-rw-r--r--arch/x86/include/asm/voyager.h529
-rw-r--r--arch/x86/include/asm/xen/events.h6
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h28
126 files changed, 2841 insertions, 4685 deletions
diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h
index 3c601f8224be..bb70e397aa84 100644
--- a/arch/x86/include/asm/a.out-core.h
+++ b/arch/x86/include/asm/a.out-core.h
@@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->regs.ds = (u16)regs->ds;
dump->regs.es = (u16)regs->es;
dump->regs.fs = (u16)regs->fs;
- savesegment(gs, dump->regs.gs);
+ dump->regs.gs = get_user_gs(regs);
dump->regs.orig_ax = regs->orig_ax;
dump->regs.ip = regs->ip;
dump->regs.cs = (u16)regs->cs;
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 9830681446ad..4518dc500903 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -102,9 +102,6 @@ static inline void disable_acpi(void)
acpi_noirq = 1;
}
-/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
-#define FIX_ACPI_PAGES 4
-
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index ab1d51a8855e..a6208dc74633 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -1,15 +1,18 @@
#ifndef _ASM_X86_APIC_H
#define _ASM_X86_APIC_H
-#include <linux/pm.h>
+#include <linux/cpumask.h>
#include <linux/delay.h>
+#include <linux/pm.h>
#include <asm/alternative.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
+#include <asm/cpufeature.h>
#include <asm/processor.h>
+#include <asm/apicdef.h>
+#include <asm/atomic.h>
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
#include <asm/system.h>
-#include <asm/cpufeature.h>
#include <asm/msr.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -33,7 +36,13 @@
} while (0)
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
extern void generic_apic_probe(void);
+#else
+static inline void generic_apic_probe(void)
+{
+}
+#endif
#ifdef CONFIG_X86_LOCAL_APIC
@@ -41,6 +50,21 @@ extern unsigned int apic_verbosity;
extern int local_apic_timer_c2_ok;
extern int disable_apic;
+
+#ifdef CONFIG_SMP
+extern void __inquire_remote_apic(int apicid);
+#else /* CONFIG_SMP */
+static inline void __inquire_remote_apic(int apicid)
+{
+}
+#endif /* CONFIG_SMP */
+
+static inline void default_inquire_remote_apic(int apicid)
+{
+ if (apic_verbosity >= APIC_DEBUG)
+ __inquire_remote_apic(apicid);
+}
+
/*
* Basic functions accessing APICs.
*/
@@ -71,6 +95,12 @@ static inline u32 native_apic_mem_read(u32 reg)
return *((volatile u32 *)(APIC_BASE + reg));
}
+extern void native_apic_wait_icr_idle(void);
+extern u32 native_safe_apic_wait_icr_idle(void);
+extern void native_apic_icr_write(u32 low, u32 id);
+extern u64 native_apic_icr_read(void);
+
+#ifdef CONFIG_X86_X2APIC
static inline void native_apic_msr_write(u32 reg, u32 v)
{
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
@@ -91,8 +121,32 @@ static inline u32 native_apic_msr_read(u32 reg)
return low;
}
-#ifndef CONFIG_X86_32
-extern int x2apic;
+static inline void native_x2apic_wait_icr_idle(void)
+{
+ /* no need to wait for icr idle in x2apic */
+ return;
+}
+
+static inline u32 native_safe_x2apic_wait_icr_idle(void)
+{
+ /* no need to wait for icr idle in x2apic */
+ return 0;
+}
+
+static inline void native_x2apic_icr_write(u32 low, u32 id)
+{
+ wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
+}
+
+static inline u64 native_x2apic_icr_read(void)
+{
+ unsigned long val;
+
+ rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
+ return val;
+}
+
+extern int x2apic, x2apic_phys;
extern void check_x2apic(void);
extern void enable_x2apic(void);
extern void enable_IR_x2apic(void);
@@ -110,30 +164,24 @@ static inline int x2apic_enabled(void)
return 0;
}
#else
-#define x2apic_enabled() 0
+static inline void check_x2apic(void)
+{
+}
+static inline void enable_x2apic(void)
+{
+}
+static inline void enable_IR_x2apic(void)
+{
+}
+static inline int x2apic_enabled(void)
+{
+ return 0;
+}
#endif
-struct apic_ops {
- u32 (*read)(u32 reg);
- void (*write)(u32 reg, u32 v);
- u64 (*icr_read)(void);
- void (*icr_write)(u32 low, u32 high);
- void (*wait_icr_idle)(void);
- u32 (*safe_wait_icr_idle)(void);
-};
-
-extern struct apic_ops *apic_ops;
-
-#define apic_read (apic_ops->read)
-#define apic_write (apic_ops->write)
-#define apic_icr_read (apic_ops->icr_read)
-#define apic_icr_write (apic_ops->icr_write)
-#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
-#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
-
extern int get_physical_broadcast(void);
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_X86_X2APIC
static inline void ack_x2APIC_irq(void)
{
/* Docs say use 0 for future compatibility */
@@ -141,18 +189,6 @@ static inline void ack_x2APIC_irq(void)
}
#endif
-
-static inline void ack_APIC_irq(void)
-{
- /*
- * ack_APIC_irq() actually gets compiled as a single instruction
- * ... yummie.
- */
-
- /* Docs say use 0 for future compatibility */
- apic_write(APIC_EOI, 0);
-}
-
extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
extern void connect_bsp_APIC(void);
@@ -196,4 +232,316 @@ static inline void disable_local_APIC(void) { }
#endif /* !CONFIG_X86_LOCAL_APIC */
+#ifdef CONFIG_X86_64
+#define SET_APIC_ID(x) (apic->set_apic_id(x))
+#else
+
+#endif
+
+/*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic APIC sub-arch data struct.
+ *
+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
+ * James Cleverdon.
+ */
+struct apic {
+ char *name;
+
+ int (*probe)(void);
+ int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
+ int (*apic_id_registered)(void);
+
+ u32 irq_delivery_mode;
+ u32 irq_dest_mode;
+
+ const struct cpumask *(*target_cpus)(void);
+
+ int disable_esr;
+
+ int dest_logical;
+ unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
+ unsigned long (*check_apicid_present)(int apicid);
+
+ void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
+ void (*init_apic_ldr)(void);
+
+ physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
+
+ void (*setup_apic_routing)(void);
+ int (*multi_timer_check)(int apic, int irq);
+ int (*apicid_to_node)(int logical_apicid);
+ int (*cpu_to_logical_apicid)(int cpu);
+ int (*cpu_present_to_apicid)(int mps_cpu);
+ physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
+ void (*setup_portio_remap)(void);
+ int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
+ void (*enable_apic_mode)(void);
+ int (*phys_pkg_id)(int cpuid_apic, int index_msb);
+
+ /*
+ * When one of the next two hooks returns 1 the apic
+ * is switched to this. Essentially they are additional
+ * probe functions:
+ */
+ int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
+
+ unsigned int (*get_apic_id)(unsigned long x);
+ unsigned long (*set_apic_id)(unsigned int id);
+ unsigned long apic_id_mask;
+
+ unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
+ unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
+ const struct cpumask *andmask);
+
+ /* ipi */
+ void (*send_IPI_mask)(const struct cpumask *mask, int vector);
+ void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
+ int vector);
+ void (*send_IPI_allbutself)(int vector);
+ void (*send_IPI_all)(int vector);
+ void (*send_IPI_self)(int vector);
+
+ /* wakeup_secondary_cpu */
+ int (*wakeup_cpu)(int apicid, unsigned long start_eip);
+
+ int trampoline_phys_low;
+ int trampoline_phys_high;
+
+ void (*wait_for_init_deassert)(atomic_t *deassert);
+ void (*smp_callin_clear_local_apic)(void);
+ void (*inquire_remote_apic)(int apicid);
+
+ /* apic ops */
+ u32 (*read)(u32 reg);
+ void (*write)(u32 reg, u32 v);
+ u64 (*icr_read)(void);
+ void (*icr_write)(u32 low, u32 high);
+ void (*wait_icr_idle)(void);
+ u32 (*safe_wait_icr_idle)(void);
+};
+
+extern struct apic *apic;
+
+static inline u32 apic_read(u32 reg)
+{
+ return apic->read(reg);
+}
+
+static inline void apic_write(u32 reg, u32 val)
+{
+ apic->write(reg, val);
+}
+
+static inline u64 apic_icr_read(void)
+{
+ return apic->icr_read();
+}
+
+static inline void apic_icr_write(u32 low, u32 high)
+{
+ apic->icr_write(low, high);
+}
+
+static inline void apic_wait_icr_idle(void)
+{
+ apic->wait_icr_idle();
+}
+
+static inline u32 safe_apic_wait_icr_idle(void)
+{
+ return apic->safe_wait_icr_idle();
+}
+
+
+static inline void ack_APIC_irq(void)
+{
+ /*
+ * ack_APIC_irq() actually gets compiled as a single instruction
+ * ... yummie.
+ */
+
+ /* Docs say use 0 for future compatibility */
+ apic_write(APIC_EOI, 0);
+}
+
+static inline unsigned default_get_apic_id(unsigned long x)
+{
+ unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
+
+ if (APIC_XAPIC(ver))
+ return (x >> 24) & 0xFF;
+ else
+ return (x >> 24) & 0x0F;
+}
+
+/*
+ * Warm reset vector default position:
+ */
+#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
+#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
+
+#ifdef CONFIG_X86_32
+extern void es7000_update_apic_to_cluster(void);
+#else
+extern struct apic apic_flat;
+extern struct apic apic_physflat;
+extern struct apic apic_x2apic_cluster;
+extern struct apic apic_x2apic_phys;
+extern int default_acpi_madt_oem_check(char *, char *);
+
+extern void apic_send_IPI_self(int vector);
+
+extern struct apic apic_x2apic_uv_x;
+DECLARE_PER_CPU(int, x2apic_extra_bits);
+
+extern int default_cpu_present_to_apicid(int mps_cpu);
+extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
+#endif
+
+static inline void default_wait_for_init_deassert(atomic_t *deassert)
+{
+ while (!atomic_read(deassert))
+ cpu_relax();
+ return;
+}
+
+extern void generic_bigsmp_probe(void);
+
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+#include <asm/smp.h>
+
+#define APIC_DFR_VALUE (APIC_DFR_FLAT)
+
+static inline const struct cpumask *default_target_cpus(void)
+{
+#ifdef CONFIG_SMP
+ return cpu_online_mask;
+#else
+ return cpumask_of(0);
+#endif
+}
+
+DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+
+
+static inline unsigned int read_apic_id(void)
+{
+ unsigned int reg;
+
+ reg = apic_read(APIC_ID);
+
+ return apic->get_apic_id(reg);
+}
+
+extern void default_setup_apic_routing(void);
+
+#ifdef CONFIG_X86_32
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116). So here it goes...
+ */
+extern void default_init_apic_ldr(void);
+
+static inline int default_apic_id_registered(void)
+{
+ return physid_isset(read_apic_id(), phys_cpu_present_map);
+}
+
+static inline unsigned int
+default_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+ return cpumask_bits(cpumask)[0];
+}
+
+static inline unsigned int
+default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask)
+{
+ unsigned long mask1 = cpumask_bits(cpumask)[0];
+ unsigned long mask2 = cpumask_bits(andmask)[0];
+ unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
+
+ return (unsigned int)(mask1 & mask2 & mask3);
+}
+
+static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+ return cpuid_apic >> index_msb;
+}
+
+extern int default_apicid_to_node(int logical_apicid);
+
+#endif
+
+static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+ return physid_isset(apicid, bitmap);
+}
+
+static inline unsigned long default_check_apicid_present(int bit)
+{
+ return physid_isset(bit, phys_cpu_present_map);
+}
+
+static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+ return phys_map;
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int default_cpu_to_logical_apicid(int cpu)
+{
+ return 1 << cpu;
+}
+
+static inline int __default_cpu_present_to_apicid(int mps_cpu)
+{
+ if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
+ return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+ else
+ return BAD_APICID;
+}
+
+static inline int
+__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+ return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
+}
+
+#ifdef CONFIG_X86_32
+static inline int default_cpu_present_to_apicid(int mps_cpu)
+{
+ return __default_cpu_present_to_apicid(mps_cpu);
+}
+
+static inline int
+default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+ return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
+}
+#else
+extern int default_cpu_present_to_apicid(int mps_cpu);
+extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
+#endif
+
+static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
+{
+ return physid_mask_of_physid(phys_apicid);
+}
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#ifdef CONFIG_X86_32
+extern u8 cpu_2_logical_apicid[NR_CPUS];
+#endif
+
#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h
new file mode 100644
index 000000000000..82f613c607ce
--- /dev/null
+++ b/arch/x86/include/asm/apicnum.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_APICNUM_H
+#define _ASM_X86_APICNUM_H
+
+/* define MAX_IO_APICS */
+#ifdef CONFIG_X86_32
+# define MAX_IO_APICS 64
+#else
+# define MAX_IO_APICS 128
+# define MAX_LOCAL_APIC 32768
+#endif
+
+#endif /* _ASM_X86_APICNUM_H */
diff --git a/arch/x86/include/asm/mach-default/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74b..20370c6db74b 100644
--- a/arch/x86/include/asm/mach-default/apm.h
+++ b/arch/x86/include/asm/apm.h
diff --git a/arch/x86/include/asm/arch_hooks.h b/arch/x86/include/asm/arch_hooks.h
deleted file mode 100644
index cbd4957838a6..000000000000
--- a/arch/x86/include/asm/arch_hooks.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _ASM_X86_ARCH_HOOKS_H
-#define _ASM_X86_ARCH_HOOKS_H
-
-#include <linux/interrupt.h>
-
-/*
- * linux/include/asm/arch_hooks.h
- *
- * define the architecture specific hooks
- */
-
-/* these aren't arch hooks, they are generic routines
- * that can be used by the hooks */
-extern void init_ISA_irqs(void);
-extern irqreturn_t timer_interrupt(int irq, void *dev_id);
-
-/* these are the defined hooks */
-extern void intr_init_hook(void);
-extern void pre_intr_init_hook(void);
-extern void pre_setup_arch_hook(void);
-extern void trap_init_hook(void);
-extern void pre_time_init_hook(void);
-extern void time_init_hook(void);
-extern void mca_nmi_hook(void);
-
-#endif /* _ASM_X86_ARCH_HOOKS_H */
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
deleted file mode 100644
index d8dd9f537911..000000000000
--- a/arch/x86/include/asm/bigsmp/apic.h
+++ /dev/null
@@ -1,155 +0,0 @@
-#ifndef __ASM_MACH_APIC_H
-#define __ASM_MACH_APIC_H
-
-#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
-#define esr_disable (1)
-
-static inline int apic_id_registered(void)
-{
- return (1);
-}
-
-static inline const cpumask_t *target_cpus(void)
-{
-#ifdef CONFIG_SMP
- return &cpu_online_map;
-#else
- return &cpumask_of_cpu(0);
-#endif
-}
-
-#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL 0
-#define APIC_DFR_VALUE (APIC_DFR_FLAT)
-#define INT_DELIVERY_MODE (dest_Fixed)
-#define INT_DEST_MODE (0) /* phys delivery to target proc */
-#define NO_BALANCE_IRQ (0)
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return (0);
-}
-
-static inline unsigned long check_apicid_present(int bit)
-{
- return (1);
-}
-
-static inline unsigned long calculate_ldr(int cpu)
-{
- unsigned long val, id;
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- id = xapic_phys_to_log_apicid(cpu);
- val |= SET_APIC_LOGICAL_ID(id);
- return val;
-}
-
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116). So here it goes...
- */
-static inline void init_apic_ldr(void)
-{
- unsigned long val;
- int cpu = smp_processor_id();
-
- apic_write(APIC_DFR, APIC_DFR_VALUE);
- val = calculate_ldr(cpu);
- apic_write(APIC_LDR, val);
-}
-
-static inline void setup_apic_routing(void)
-{
- printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
- "Physflat", nr_ioapics);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
- return (0);
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
- return apicid_2_node[hard_smp_processor_id()];
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (mps_cpu < nr_cpu_ids)
- return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
-
- return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
- return physid_mask_of_physid(phys_apicid);
-}
-
-extern u8 cpu_2_logical_apicid[];
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return cpu_physical_id(cpu);
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
- /* For clustered we don't have a good way to do this yet - hack */
- return physids_promote(0xFFL);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
- return (1);
-}
-
-/* As we are using single CPU as destination, pick only one CPU here */
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
- int cpu;
- int apicid;
-
- cpu = first_cpu(*cpumask);
- apicid = cpu_to_logical_apicid(cpu);
- return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- int cpu;
-
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- for_each_cpu_and(cpu, cpumask, andmask)
- if (cpumask_test_cpu(cpu, cpu_online_mask))
- break;
- if (cpu < nr_cpu_ids)
- return cpu_to_logical_apicid(cpu);
-
- return BAD_APICID;
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_MACH_APIC_H */
diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h
deleted file mode 100644
index 392c3f5ef2fe..000000000000
--- a/arch/x86/include/asm/bigsmp/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_MACH_APICDEF_H
-#define __ASM_MACH_APICDEF_H
-
-#define APIC_ID_MASK (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (((x)>>24)&0xFF);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
deleted file mode 100644
index 27fcd01b3ae6..000000000000
--- a/arch/x86/include/asm/bigsmp/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_MACH_IPI_H
-#define __ASM_MACH_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
- send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
- send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
- send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_MACH_IPI_H */
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
index 2bc162e0ec6e..0e63c9a2a8d0 100644
--- a/arch/x86/include/asm/calling.h
+++ b/arch/x86/include/asm/calling.h
@@ -1,5 +1,55 @@
/*
- * Some macros to handle stack frames in assembly.
+
+ x86 function call convention, 64-bit:
+ -------------------------------------
+ arguments | callee-saved | extra caller-saved | return
+ [callee-clobbered] | | [callee-clobbered] |
+ ---------------------------------------------------------------------------
+ rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
+
+ ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
+ functions when it sees tail-call optimization possibilities) rflags is
+ clobbered. Leftover arguments are passed over the stack frame.)
+
+ [*] In the frame-pointers case rbp is fixed to the stack frame.
+
+ [**] for struct return values wider than 64 bits the return convention is a
+ bit more complex: up to 128 bits width we return small structures
+ straight in rax, rdx. For structures larger than that (3 words or
+ larger) the caller puts a pointer to an on-stack return struct
+ [allocated in the caller's stack frame] into the first argument - i.e.
+ into rdi. All other arguments shift up by one in this case.
+ Fortunately this case is rare in the kernel.
+
+For 32-bit we have the following conventions - kernel is built with
+-mregparm=3 and -freg-struct-return:
+
+ x86 function calling convention, 32-bit:
+ ----------------------------------------
+ arguments | callee-saved | extra caller-saved | return
+ [callee-clobbered] | | [callee-clobbered] |
+ -------------------------------------------------------------------------
+ eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
+
+ ( here too esp is obviously invariant across normal function calls. eflags
+ is clobbered. Leftover arguments are passed over the stack frame. )
+
+ [*] In the frame-pointers case ebp is fixed to the stack frame.
+
+ [**] We build with -freg-struct-return, which on 32-bit means similar
+ semantics as on 64-bit: edx can be used for a second return value
+ (i.e. covering integer and structure sizes up to 64 bits) - after that
+ it gets more complex and more expensive: 3-word or larger struct returns
+ get done in the caller's frame and the pointer to the return struct goes
+ into regparm0, i.e. eax - the other arguments shift up and the
+ function's register parameters degenerate to regparm=2 in essence.
+
+*/
+
+
+/*
+ * 64-bit system call stack frame layout defines and helpers,
+ * for assembly code:
*/
#define R15 0
@@ -9,7 +59,7 @@
#define RBP 32
#define RBX 40
-/* arguments: interrupts/non tracing syscalls only save upto here*/
+/* arguments: interrupts/non tracing syscalls only save up to here: */
#define R11 48
#define R10 56
#define R9 64
@@ -22,7 +72,7 @@
#define ORIG_RAX 120 /* + error_code */
/* end of arguments */
-/* cpu exception frame or undefined in case of fast syscall. */
+/* cpu exception frame or undefined in case of fast syscall: */
#define RIP 128
#define CS 136
#define EFLAGS 144
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index bae482df6039..b185091bf19c 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -7,6 +7,20 @@
#include <linux/nodemask.h>
#include <linux/percpu.h>
+#ifdef CONFIG_SMP
+
+extern void prefill_possible_map(void);
+
+#else /* CONFIG_SMP */
+
+static inline void prefill_possible_map(void) {}
+
+#define cpu_physical_id(cpu) boot_cpu_physical_apicid
+#define safe_smp_processor_id() 0
+#define stack_smp_processor_id() 0
+
+#endif /* CONFIG_SMP */
+
struct x86_cpu {
struct cpu cpu;
};
@@ -17,4 +31,7 @@ extern void arch_unregister_cpu(int);
#endif
DECLARE_PER_CPU(int, cpu_state);
+
+extern unsigned int boot_cpu_id;
+
#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
new file mode 100644
index 000000000000..a7f3c75f8ad7
--- /dev/null
+++ b/arch/x86/include/asm/cpumask.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_X86_CPUMASK_H
+#define _ASM_X86_CPUMASK_H
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_X86_64
+
+extern cpumask_var_t cpu_callin_mask;
+extern cpumask_var_t cpu_callout_mask;
+extern cpumask_var_t cpu_initialized_mask;
+extern cpumask_var_t cpu_sibling_setup_mask;
+
+extern void setup_cpu_local_masks(void);
+
+#else /* CONFIG_X86_32 */
+
+extern cpumask_t cpu_callin_map;
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_initialized;
+extern cpumask_t cpu_sibling_setup_map;
+
+#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
+#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
+#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
+#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
+
+static inline void setup_cpu_local_masks(void) { }
+
+#endif /* CONFIG_X86_32 */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_CPUMASK_H */
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 0930b4f8d672..c68c361697e1 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -1,39 +1,21 @@
#ifndef _ASM_X86_CURRENT_H
#define _ASM_X86_CURRENT_H
-#ifdef CONFIG_X86_32
#include <linux/compiler.h>
#include <asm/percpu.h>
+#ifndef __ASSEMBLY__
struct task_struct;
DECLARE_PER_CPU(struct task_struct *, current_task);
-static __always_inline struct task_struct *get_current(void)
-{
- return x86_read_percpu(current_task);
-}
-
-#else /* X86_32 */
-
-#ifndef __ASSEMBLY__
-#include <asm/pda.h>
-
-struct task_struct;
static __always_inline struct task_struct *get_current(void)
{
- return read_pda(pcurrent);
+ return percpu_read(current_task);
}
-#else /* __ASSEMBLY__ */
-
-#include <asm/asm-offsets.h>
-#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
+#define current get_current()
#endif /* __ASSEMBLY__ */
-#endif /* X86_32 */
-
-#define current get_current()
-
#endif /* _ASM_X86_CURRENT_H */
diff --git a/arch/x86/include/asm/mach-default/do_timer.h b/arch/x86/include/asm/do_timer.h
index 23ecda0b28a0..23ecda0b28a0 100644
--- a/arch/x86/include/asm/mach-default/do_timer.h
+++ b/arch/x86/include/asm/do_timer.h
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index f51a3ddde01a..83c1bc8d2e8a 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -112,7 +112,7 @@ extern unsigned int vdso_enabled;
* now struct_user_regs, they are different)
*/
-#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs) \
do { \
pr_reg[0] = regs->bx; \
pr_reg[1] = regs->cx; \
@@ -124,7 +124,6 @@ do { \
pr_reg[7] = regs->ds & 0xffff; \
pr_reg[8] = regs->es & 0xffff; \
pr_reg[9] = regs->fs & 0xffff; \
- savesegment(gs, pr_reg[10]); \
pr_reg[11] = regs->orig_ax; \
pr_reg[12] = regs->ip; \
pr_reg[13] = regs->cs & 0xffff; \
@@ -133,6 +132,18 @@ do { \
pr_reg[16] = regs->ss & 0xffff; \
} while (0);
+#define ELF_CORE_COPY_REGS(pr_reg, regs) \
+do { \
+ ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
+ pr_reg[10] = get_user_gs(regs); \
+} while (0);
+
+#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs) \
+do { \
+ ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
+ savesegment(gs, pr_reg[10]); \
+} while (0);
+
#define ELF_PLATFORM (utsname()->machine)
#define set_personality_64bit() do { } while (0)
diff --git a/arch/x86/include/asm/mach-default/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index 6b1add8e31dd..854d538ae857 100644
--- a/arch/x86/include/asm/mach-default/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -9,12 +9,28 @@
* is no hardware IRQ pin equivalent for them, they are triggered
* through the ICC by us (IPIs)
*/
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
-BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
+
+BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
+ smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
+ smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
+ smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
+ smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
+ smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
+ smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
+ smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
+ smp_invalidate_interrupt)
#endif
/*
@@ -25,10 +41,15 @@ BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
* a much simpler SMP time architecture:
*/
#ifdef CONFIG_X86_LOCAL_APIC
+
BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
+#ifdef CONFIG_PERF_COUNTERS
+BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
+#endif
+
#ifdef CONFIG_X86_MCE_P4THERMAL
BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
#endif
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
deleted file mode 100644
index c58b9cc74465..000000000000
--- a/arch/x86/include/asm/es7000/apic.h
+++ /dev/null
@@ -1,242 +0,0 @@
-#ifndef __ASM_ES7000_APIC_H
-#define __ASM_ES7000_APIC_H
-
-#include <linux/gfp.h>
-
-#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
-#define esr_disable (1)
-
-static inline int apic_id_registered(void)
-{
- return (1);
-}
-
-static inline const cpumask_t *target_cpus_cluster(void)
-{
- return &CPU_MASK_ALL;
-}
-
-static inline const cpumask_t *target_cpus(void)
-{
- return &cpumask_of_cpu(smp_processor_id());
-}
-
-#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
-#define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
-#define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
-#define NO_BALANCE_IRQ_CLUSTER (1)
-
-#define APIC_DFR_VALUE (APIC_DFR_FLAT)
-#define INT_DELIVERY_MODE (dest_Fixed)
-#define INT_DEST_MODE (0) /* phys delivery to target procs */
-#define NO_BALANCE_IRQ (0)
-#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL 0x0
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return 0;
-}
-static inline unsigned long check_apicid_present(int bit)
-{
- return physid_isset(bit, phys_cpu_present_map);
-}
-
-#define apicid_cluster(apicid) (apicid & 0xF0)
-
-static inline unsigned long calculate_ldr(int cpu)
-{
- unsigned long id;
- id = xapic_phys_to_log_apicid(cpu);
- return (SET_APIC_LOGICAL_ID(id));
-}
-
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LdR and TPR before enabling
- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116). So here it goes...
- */
-static inline void init_apic_ldr_cluster(void)
-{
- unsigned long val;
- int cpu = smp_processor_id();
-
- apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
- val = calculate_ldr(cpu);
- apic_write(APIC_LDR, val);
-}
-
-static inline void init_apic_ldr(void)
-{
- unsigned long val;
- int cpu = smp_processor_id();
-
- apic_write(APIC_DFR, APIC_DFR_VALUE);
- val = calculate_ldr(cpu);
- apic_write(APIC_LDR, val);
-}
-
-extern int apic_version [MAX_APICS];
-static inline void setup_apic_routing(void)
-{
- int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
- printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
- (apic_version[apic] == 0x14) ?
- "Physical Cluster" : "Logical Cluster",
- nr_ioapics, cpus_addr(*target_cpus())[0]);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
- return 0;
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
- return 0;
-}
-
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (!mps_cpu)
- return boot_cpu_physical_apicid;
- else if (mps_cpu < nr_cpu_ids)
- return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
- else
- return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
- static int id = 0;
- physid_mask_t mask;
- mask = physid_mask_of_physid(id);
- ++id;
- return mask;
-}
-
-extern u8 cpu_2_logical_apicid[];
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return (int)cpu_2_logical_apicid[cpu];
-#else
- return logical_smp_processor_id();
-#endif
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
- /* For clustered we don't have a good way to do this yet - hack */
- return physids_promote(0xff);
-}
-
-
-static inline void setup_portio_remap(void)
-{
-}
-
-extern unsigned int boot_cpu_physical_apicid;
-static inline int check_phys_apicid_present(int cpu_physical_apicid)
-{
- boot_cpu_physical_apicid = read_apic_id();
- return (1);
-}
-
-static inline unsigned int
-cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
-{
- int num_bits_set;
- int cpus_found = 0;
- int cpu;
- int apicid;
-
- num_bits_set = cpumask_weight(cpumask);
- /* Return id to all */
- if (num_bits_set == nr_cpu_ids)
- return 0xFF;
- /*
- * The cpus in the mask must all be on the apic cluster. If are not
- * on the same apicid cluster return default value of TARGET_CPUS.
- */
- cpu = cpumask_first(cpumask);
- apicid = cpu_to_logical_apicid(cpu);
- while (cpus_found < num_bits_set) {
- if (cpumask_test_cpu(cpu, cpumask)) {
- int new_apicid = cpu_to_logical_apicid(cpu);
- if (apicid_cluster(apicid) !=
- apicid_cluster(new_apicid)){
- printk ("%s: Not a valid mask!\n", __func__);
- return 0xFF;
- }
- apicid = new_apicid;
- cpus_found++;
- }
- cpu++;
- }
- return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
- int num_bits_set;
- int cpus_found = 0;
- int cpu;
- int apicid;
-
- num_bits_set = cpus_weight(*cpumask);
- /* Return id to all */
- if (num_bits_set == nr_cpu_ids)
- return cpu_to_logical_apicid(0);
- /*
- * The cpus in the mask must all be on the apic cluster. If are not
- * on the same apicid cluster return default value of TARGET_CPUS.
- */
- cpu = first_cpu(*cpumask);
- apicid = cpu_to_logical_apicid(cpu);
- while (cpus_found < num_bits_set) {
- if (cpu_isset(cpu, *cpumask)) {
- int new_apicid = cpu_to_logical_apicid(cpu);
- if (apicid_cluster(apicid) !=
- apicid_cluster(new_apicid)){
- printk ("%s: Not a valid mask!\n", __func__);
- return cpu_to_logical_apicid(0);
- }
- apicid = new_apicid;
- cpus_found++;
- }
- cpu++;
- }
- return apicid;
-}
-
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
- const struct cpumask *andmask)
-{
- int apicid = cpu_to_logical_apicid(0);
- cpumask_var_t cpumask;
-
- if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
- return apicid;
-
- cpumask_and(cpumask, inmask, andmask);
- cpumask_and(cpumask, cpumask, cpu_online_mask);
- apicid = cpu_mask_to_apicid(cpumask);
-
- free_cpumask_var(cpumask);
- return apicid;
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_ES7000_APIC_H */
diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h
deleted file mode 100644
index 8b234a3cb851..000000000000
--- a/arch/x86/include/asm/es7000/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_ES7000_APICDEF_H
-#define __ASM_ES7000_APICDEF_H
-
-#define APIC_ID_MASK (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (((x)>>24)&0xFF);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
deleted file mode 100644
index 7e8ed24d4b8a..000000000000
--- a/arch/x86/include/asm/es7000/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_ES7000_IPI_H
-#define __ASM_ES7000_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
- send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
- send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
- send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h
deleted file mode 100644
index c1629b090ec2..000000000000
--- a/arch/x86/include/asm/es7000/mpparse.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_ES7000_MPPARSE_H
-#define __ASM_ES7000_MPPARSE_H
-
-#include <linux/acpi.h>
-
-extern int parse_unisys_oem (char *oemptr);
-extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
-extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
-extern void setup_unisys(void);
-
-#ifndef CONFIG_X86_GENERICARCH
-extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
-extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
-#endif
-
-#ifdef CONFIG_ACPI
-
-static inline int es7000_check_dsdt(void)
-{
- struct acpi_table_header header;
-
- if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
- !strncmp(header.oem_id, "UNISYS", 6))
- return 1;
- return 0;
-}
-#endif
-
-#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h
deleted file mode 100644
index 78f0daaee436..000000000000
--- a/arch/x86/include/asm/es7000/wakecpu.h
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef __ASM_ES7000_WAKECPU_H
-#define __ASM_ES7000_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW 0x467
-#define TRAMPOLINE_PHYS_HIGH 0x469
-
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
-#ifndef CONFIG_ES7000_CLUSTERED_APIC
- while (!atomic_read(deassert))
- cpu_relax();
-#endif
- return;
-}
-
-/* Nothing to do for most platforms, since cleared by the INIT cycle */
-static inline void smp_callin_clear_local_apic(void)
-{
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-extern void __inquire_remote_apic(int apicid);
-
-static inline void inquire_remote_apic(int apicid)
-{
- if (apic_verbosity >= APIC_DEBUG)
- __inquire_remote_apic(apicid);
-}
-
-#endif /* __ASM_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h
index c7115c1d7217..047d9bab2b31 100644
--- a/arch/x86/include/asm/fixmap_32.h
+++ b/arch/x86/include/asm/fixmap_32.h
@@ -95,10 +95,6 @@ enum fixed_addresses {
(__end_of_permanent_fixed_addresses & 255),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
FIX_WP_TEST,
-#ifdef CONFIG_ACPI
- FIX_ACPI_BEGIN,
- FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-#endif
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h
index 00a30ab9b1a5..298d9ba3faeb 100644
--- a/arch/x86/include/asm/fixmap_64.h
+++ b/arch/x86/include/asm/fixmap_64.h
@@ -50,10 +50,6 @@ enum fixed_addresses {
FIX_PARAVIRT_BOOTMAP,
#endif
__end_of_permanent_fixed_addresses,
-#ifdef CONFIG_ACPI
- FIX_ACPI_BEGIN,
- FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-#endif
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
diff --git a/arch/x86/include/asm/genapic.h b/arch/x86/include/asm/genapic.h
index d48bee663a6f..4b8b98fa7f25 100644
--- a/arch/x86/include/asm/genapic.h
+++ b/arch/x86/include/asm/genapic.h
@@ -1,5 +1 @@
-#ifdef CONFIG_X86_32
-# include "genapic_32.h"
-#else
-# include "genapic_64.h"
-#endif
+#include <asm/apic.h>
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
deleted file mode 100644
index 2c05b737ee22..000000000000
--- a/arch/x86/include/asm/genapic_32.h
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef _ASM_X86_GENAPIC_32_H
-#define _ASM_X86_GENAPIC_32_H
-
-#include <asm/mpspec.h>
-#include <asm/atomic.h>
-
-/*
- * Generic APIC driver interface.
- *
- * An straight forward mapping of the APIC related parts of the
- * x86 subarchitecture interface to a dynamic object.
- *
- * This is used by the "generic" x86 subarchitecture.
- *
- * Copyright 2003 Andi Kleen, SuSE Labs.
- */
-
-struct mpc_bus;
-struct mpc_table;
-struct mpc_cpu;
-
-struct genapic {
- char *name;
- int (*probe)(void);
-
- int (*apic_id_registered)(void);
- const struct cpumask *(*target_cpus)(void);
- int int_delivery_mode;
- int int_dest_mode;
- int ESR_DISABLE;
- int apic_destination_logical;
- unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
- unsigned long (*check_apicid_present)(int apicid);
- int no_balance_irq;
- int no_ioapic_check;
- void (*init_apic_ldr)(void);
- physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
-
- void (*setup_apic_routing)(void);
- int (*multi_timer_check)(int apic, int irq);
- int (*apicid_to_node)(int logical_apicid);
- int (*cpu_to_logical_apicid)(int cpu);
- int (*cpu_present_to_apicid)(int mps_cpu);
- physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
- void (*setup_portio_remap)(void);
- int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
- void (*enable_apic_mode)(void);
- u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
-
- /* mpparse */
- /* When one of the next two hooks returns 1 the genapic
- is switched to this. Essentially they are additional probe
- functions. */
- int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
- char *productid);
- int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
-
- unsigned (*get_apic_id)(unsigned long x);
- unsigned long apic_id_mask;
- unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
- unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
- const struct cpumask *andmask);
- void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
-
-#ifdef CONFIG_SMP
- /* ipi */
- void (*send_IPI_mask)(const struct cpumask *mask, int vector);
- void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
- int vector);
- void (*send_IPI_allbutself)(int vector);
- void (*send_IPI_all)(int vector);
-#endif
- int (*wakeup_cpu)(int apicid, unsigned long start_eip);
- int trampoline_phys_low;
- int trampoline_phys_high;
- void (*wait_for_init_deassert)(atomic_t *deassert);
- void (*smp_callin_clear_local_apic)(void);
- void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
- void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
- void (*inquire_remote_apic)(int apicid);
-};
-
-#define APICFUNC(x) .x = x,
-
-/* More functions could be probably marked IPIFUNC and save some space
- in UP GENERICARCH kernels, but I don't have the nerve right now
- to untangle this mess. -AK */
-#ifdef CONFIG_SMP
-#define IPIFUNC(x) APICFUNC(x)
-#else
-#define IPIFUNC(x)
-#endif
-
-#define APIC_INIT(aname, aprobe) \
-{ \
- .name = aname, \
- .probe = aprobe, \
- .int_delivery_mode = INT_DELIVERY_MODE, \
- .int_dest_mode = INT_DEST_MODE, \
- .no_balance_irq = NO_BALANCE_IRQ, \
- .ESR_DISABLE = esr_disable, \
- .apic_destination_logical = APIC_DEST_LOGICAL, \
- APICFUNC(apic_id_registered) \
- APICFUNC(target_cpus) \
- APICFUNC(check_apicid_used) \
- APICFUNC(check_apicid_present) \
- APICFUNC(init_apic_ldr) \
- APICFUNC(ioapic_phys_id_map) \
- APICFUNC(setup_apic_routing) \
- APICFUNC(multi_timer_check) \
- APICFUNC(apicid_to_node) \
- APICFUNC(cpu_to_logical_apicid) \
- APICFUNC(cpu_present_to_apicid) \
- APICFUNC(apicid_to_cpu_present) \
- APICFUNC(setup_portio_remap) \
- APICFUNC(check_phys_apicid_present) \
- APICFUNC(mps_oem_check) \
- APICFUNC(get_apic_id) \
- .apic_id_mask = APIC_ID_MASK, \
- APICFUNC(cpu_mask_to_apicid) \
- APICFUNC(cpu_mask_to_apicid_and) \
- APICFUNC(vector_allocation_domain) \
- APICFUNC(acpi_madt_oem_check) \
- IPIFUNC(send_IPI_mask) \
- IPIFUNC(send_IPI_allbutself) \
- IPIFUNC(send_IPI_all) \
- APICFUNC(enable_apic_mode) \
- APICFUNC(phys_pkg_id) \
- .trampoline_phys_low = TRAMPOLINE_PHYS_LOW, \
- .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH, \
- APICFUNC(wait_for_init_deassert) \
- APICFUNC(smp_callin_clear_local_apic) \
- APICFUNC(store_NMI_vector) \
- APICFUNC(restore_NMI_vector) \
- APICFUNC(inquire_remote_apic) \
-}
-
-extern struct genapic *genapic;
-extern void es7000_update_genapic_to_cluster(void);
-
-enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
-#define get_uv_system_type() UV_NONE
-#define is_uv_system() 0
-#define uv_wakeup_secondary(a, b) 1
-#define uv_system_init() do {} while (0)
-
-
-#endif /* _ASM_X86_GENAPIC_32_H */
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
deleted file mode 100644
index adf32fb56aa6..000000000000
--- a/arch/x86/include/asm/genapic_64.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef _ASM_X86_GENAPIC_64_H
-#define _ASM_X86_GENAPIC_64_H
-
-#include <linux/cpumask.h>
-
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Generic APIC sub-arch data struct.
- *
- * Hacked for x86-64 by James Cleverdon from i386 architecture code by
- * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
- * James Cleverdon.
- */
-
-struct genapic {
- char *name;
- int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
- u32 int_delivery_mode;
- u32 int_dest_mode;
- int (*apic_id_registered)(void);
- const struct cpumask *(*target_cpus)(void);
- void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
- void (*init_apic_ldr)(void);
- /* ipi */
- void (*send_IPI_mask)(const struct cpumask *mask, int vector);
- void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
- int vector);
- void (*send_IPI_allbutself)(int vector);
- void (*send_IPI_all)(int vector);
- void (*send_IPI_self)(int vector);
- /* */
- unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
- unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
- const struct cpumask *andmask);
- unsigned int (*phys_pkg_id)(int index_msb);
- unsigned int (*get_apic_id)(unsigned long x);
- unsigned long (*set_apic_id)(unsigned int id);
- unsigned long apic_id_mask;
- /* wakeup_secondary_cpu */
- int (*wakeup_cpu)(int apicid, unsigned long start_eip);
-};
-
-extern struct genapic *genapic;
-
-extern struct genapic apic_flat;
-extern struct genapic apic_physflat;
-extern struct genapic apic_x2apic_cluster;
-extern struct genapic apic_x2apic_phys;
-extern int acpi_madt_oem_check(char *, char *);
-
-extern void apic_send_IPI_self(int vector);
-enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
-extern enum uv_system_type get_uv_system_type(void);
-extern int is_uv_system(void);
-
-extern struct genapic apic_x2apic_uv_x;
-DECLARE_PER_CPU(int, x2apic_extra_bits);
-extern void uv_cpu_init(void);
-extern void uv_system_init(void);
-extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
-
-extern void setup_apic_routing(void);
-
-#endif /* _ASM_X86_GENAPIC_64_H */
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 000787df66e6..176f058e7159 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -1,11 +1,52 @@
-#ifdef CONFIG_X86_32
-# include "hardirq_32.h"
-#else
-# include "hardirq_64.h"
+#ifndef _ASM_X86_HARDIRQ_H
+#define _ASM_X86_HARDIRQ_H
+
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+ unsigned int __softirq_pending;
+ unsigned int __nmi_count; /* arch dependent */
+ unsigned int irq0_irqs;
+#ifdef CONFIG_X86_LOCAL_APIC
+ unsigned int apic_timer_irqs; /* arch dependent */
+ unsigned int irq_spurious_count;
+#endif
+#ifdef CONFIG_SMP
+ unsigned int irq_resched_count;
+ unsigned int irq_call_count;
+ unsigned int irq_tlb_count;
+#endif
+#ifdef CONFIG_X86_MCE
+ unsigned int irq_thermal_count;
+# ifdef CONFIG_X86_64
+ unsigned int irq_threshold_count;
+# endif
#endif
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
+
+/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
+#define MAX_HARDIRQS_PER_CPU NR_VECTORS
+
+#define __ARCH_IRQ_STAT
+
+#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
+
+#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
+
+#define __ARCH_SET_SOFTIRQ_PENDING
+
+#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
+
+extern void ack_bad_irq(unsigned int irq);
extern u64 arch_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu arch_irq_stat_cpu
extern u64 arch_irq_stat(void);
#define arch_irq_stat arch_irq_stat
+
+#endif /* _ASM_X86_HARDIRQ_H */
diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h
deleted file mode 100644
index cf7954d1405f..000000000000
--- a/arch/x86/include/asm/hardirq_32.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef _ASM_X86_HARDIRQ_32_H
-#define _ASM_X86_HARDIRQ_32_H
-
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-typedef struct {
- unsigned int __softirq_pending;
- unsigned long idle_timestamp;
- unsigned int __nmi_count; /* arch dependent */
- unsigned int apic_timer_irqs; /* arch dependent */
- unsigned int irq0_irqs;
- unsigned int irq_resched_count;
- unsigned int irq_call_count;
- unsigned int irq_tlb_count;
- unsigned int irq_thermal_count;
- unsigned int irq_spurious_count;
-} ____cacheline_aligned irq_cpustat_t;
-
-DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
-
-#define __ARCH_IRQ_STAT
-#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
-
-#define inc_irq_stat(member) (__get_cpu_var(irq_stat).member++)
-
-void ack_bad_irq(unsigned int irq);
-#include <linux/irq_cpustat.h>
-
-#endif /* _ASM_X86_HARDIRQ_32_H */
diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h
deleted file mode 100644
index b5a6b5d56704..000000000000
--- a/arch/x86/include/asm/hardirq_64.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _ASM_X86_HARDIRQ_64_H
-#define _ASM_X86_HARDIRQ_64_H
-
-#include <linux/threads.h>
-#include <linux/irq.h>
-#include <asm/pda.h>
-#include <asm/apic.h>
-
-/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
-#define MAX_HARDIRQS_PER_CPU NR_VECTORS
-
-#define __ARCH_IRQ_STAT 1
-
-#define inc_irq_stat(member) add_pda(member, 1)
-
-#define local_softirq_pending() read_pda(__softirq_pending)
-
-#define __ARCH_SET_SOFTIRQ_PENDING 1
-
-#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
-#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
-
-extern void ack_bad_irq(unsigned int irq);
-
-#endif /* _ASM_X86_HARDIRQ_64_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 8de644b6b959..370e1c83bb49 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -25,8 +25,6 @@
#include <asm/irq.h>
#include <asm/sections.h>
-#define platform_legacy_irq(irq) ((irq) < 16)
-
/* Interrupt handlers registered during init_IRQ */
extern void apic_timer_interrupt(void);
extern void error_interrupt(void);
@@ -58,7 +56,7 @@ extern void make_8259A_irq(unsigned int irq);
extern void init_8259A(int aeoi);
/* IOAPIC */
-#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
+#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
extern unsigned long io_apic_irqs;
extern void init_VISWS_APIC_irqs(void);
@@ -67,15 +65,7 @@ extern void disable_IO_APIC(void);
extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
extern void setup_ioapic_dest(void);
-#ifdef CONFIG_X86_64
extern void enable_IO_APIC(void);
-#endif
-
-/* IPI functions */
-#ifdef CONFIG_X86_32
-extern void send_IPI_self(int vector);
-#endif
-extern void send_IPI(int dest, int vector);
/* Statistics */
extern atomic_t irq_err_count;
@@ -84,21 +74,11 @@ extern atomic_t irq_mis_count;
/* EISA */
extern void eisa_set_level_irq(unsigned int irq);
-/* Voyager functions */
-extern asmlinkage void vic_cpi_interrupt(void);
-extern asmlinkage void vic_sys_interrupt(void);
-extern asmlinkage void vic_cmn_interrupt(void);
-extern asmlinkage void qic_timer_interrupt(void);
-extern asmlinkage void qic_invalidate_interrupt(void);
-extern asmlinkage void qic_reschedule_interrupt(void);
-extern asmlinkage void qic_enable_irq_interrupt(void);
-extern asmlinkage void qic_call_function_interrupt(void);
-
/* SMP */
extern void smp_apic_timer_interrupt(struct pt_regs *);
extern void smp_spurious_interrupt(struct pt_regs *);
extern void smp_error_interrupt(struct pt_regs *);
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
extern void smp_reschedule_interrupt(struct pt_regs *);
extern void smp_call_function_interrupt(struct pt_regs *);
extern void smp_call_function_single_interrupt(struct pt_regs *);
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 58d7091eeb1f..1a99e6c092af 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -60,4 +60,8 @@ extern struct irq_chip i8259A_chip;
extern void mask_8259A(void);
extern void unmask_8259A(void);
+#ifdef CONFIG_X86_32
+extern void init_ISA_irqs(void);
+#endif
+
#endif /* _ASM_X86_I8259_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 1dbbdf4be9b4..683d0b4c00fc 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -5,6 +5,7 @@
#include <linux/compiler.h>
#include <asm-generic/int-ll64.h>
+#include <asm/page.h>
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
@@ -80,6 +81,100 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
#define readq readq
#define writeq writeq
+/**
+ * virt_to_phys - map virtual addresses to physical
+ * @address: address to remap
+ *
+ * The returned physical address is the physical (CPU) mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses directly mapped or allocated via kmalloc.
+ *
+ * This function does not give bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline phys_addr_t virt_to_phys(volatile void *address)
+{
+ return __pa(address);
+}
+
+/**
+ * phys_to_virt - map physical address to virtual
+ * @address: address to remap
+ *
+ * The returned virtual address is a current CPU mapping for
+ * the memory address given. It is only valid to use this function on
+ * addresses that have a kernel mapping
+ *
+ * This function does not handle bus mappings for DMA transfers. In
+ * almost all conceivable cases a device driver should not be using
+ * this function
+ */
+
+static inline void *phys_to_virt(phys_addr_t address)
+{
+ return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ * However, we truncate the address to unsigned int to avoid undesirable
+ * promitions in legacy drivers.
+ */
+static inline unsigned int isa_virt_to_bus(volatile void *address)
+{
+ return (unsigned int)virt_to_phys(address);
+}
+#define isa_page_to_bus(page) ((unsigned int)page_to_phys(page))
+#define isa_bus_to_virt phys_to_virt
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/**
+ * ioremap - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * If the area you are trying to map is a PCI BAR you should have a
+ * look at pci_iomap().
+ */
+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
+ unsigned long prot_val);
+
+/*
+ * The default ioremap() behavior is non-cached:
+ */
+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+{
+ return ioremap_nocache(offset, size);
+}
+
+extern void iounmap(volatile void __iomem *addr);
+
+extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
+
+
#ifdef CONFIG_X86_32
# include "io_32.h"
#else
@@ -91,7 +186,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
unsigned long prot_val);
-extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
+extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
/*
* early_ioremap() and early_iounmap() are for temporary early boot-time
@@ -105,5 +200,6 @@ extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
+#define IO_SPACE_LIMIT 0xffff
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
index d8e242e1b396..a299900f5920 100644
--- a/arch/x86/include/asm/io_32.h
+++ b/arch/x86/include/asm/io_32.h
@@ -37,8 +37,6 @@
* - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*/
-#define IO_SPACE_LIMIT 0xffff
-
#define XQUAD_PORTIO_BASE 0xfe400000
#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
@@ -53,92 +51,6 @@
*/
#define xlate_dev_kmem_ptr(p) p
-/**
- * virt_to_phys - map virtual addresses to physical
- * @address: address to remap
- *
- * The returned physical address is the physical (CPU) mapping for
- * the memory address given. It is only valid to use this function on
- * addresses directly mapped or allocated via kmalloc.
- *
- * This function does not give bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-
-static inline unsigned long virt_to_phys(volatile void *address)
-{
- return __pa(address);
-}
-
-/**
- * phys_to_virt - map physical address to virtual
- * @address: address to remap
- *
- * The returned virtual address is a current CPU mapping for
- * the memory address given. It is only valid to use this function on
- * addresses that have a kernel mapping
- *
- * This function does not handle bus mappings for DMA transfers. In
- * almost all conceivable cases a device driver should not be using
- * this function
- */
-
-static inline void *phys_to_virt(unsigned long address)
-{
- return __va(address);
-}
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
-/**
- * ioremap - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * If the area you are trying to map is a PCI BAR you should have a
- * look at pci_iomap().
- */
-extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
- unsigned long prot_val);
-
-/*
- * The default ioremap() behavior is non-cached:
- */
-static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
-{
- return ioremap_nocache(offset, size);
-}
-
-extern void iounmap(volatile void __iomem *addr);
-
-/*
- * ISA I/O bus memory addresses are 1:1 with the physical address.
- */
-#define isa_virt_to_bus virt_to_phys
-#define isa_page_to_bus page_to_phys
-#define isa_bus_to_virt phys_to_virt
-
-/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them on x86 for legacy drivers, though.
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
static inline void
memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
index 563c16270ba6..244067893af4 100644
--- a/arch/x86/include/asm/io_64.h
+++ b/arch/x86/include/asm/io_64.h
@@ -136,73 +136,12 @@ __OUTS(b)
__OUTS(w)
__OUTS(l)
-#define IO_SPACE_LIMIT 0xffff
-
#if defined(__KERNEL__) && defined(__x86_64__)
#include <linux/vmalloc.h>
-#ifndef __i386__
-/*
- * Change virtual addresses to physical addresses and vv.
- * These are pretty trivial
- */
-static inline unsigned long virt_to_phys(volatile void *address)
-{
- return __pa(address);
-}
-
-static inline void *phys_to_virt(unsigned long address)
-{
- return __va(address);
-}
-#endif
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
#include <asm-generic/iomap.h>
-/*
- * This one maps high address device memory and turns off caching for that area.
- * it's useful if some control registers are in such an area and write combining
- * or read caching is not desirable:
- */
-extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
- unsigned long prot_val);
-
-/*
- * The default ioremap() behavior is non-cached:
- */
-static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
-{
- return ioremap_nocache(offset, size);
-}
-
-extern void iounmap(volatile void __iomem *addr);
-
-extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
-
-/*
- * ISA I/O bus memory addresses are 1:1 with the physical address.
- */
-#define isa_virt_to_bus virt_to_phys
-#define isa_page_to_bus page_to_phys
-#define isa_bus_to_virt phys_to_virt
-
-/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them on x86 for legacy drivers, though.
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
void __memcpy_fromio(void *, unsigned long, unsigned);
void __memcpy_toio(unsigned long, const void *, unsigned);
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 7a1f44ac1f17..59cb4a1317b7 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
extern int nr_ioapics;
extern int nr_ioapic_registers[MAX_IO_APICS];
-/*
- * MP-BIOS irq configuration table structures:
- */
-
#define MP_MAX_IOAPIC_PIN 127
-struct mp_config_ioapic {
- unsigned long mp_apicaddr;
- unsigned int mp_apicid;
- unsigned char mp_type;
- unsigned char mp_apicver;
- unsigned char mp_flags;
-};
-
-struct mp_config_intsrc {
- unsigned int mp_dstapic;
- unsigned char mp_type;
- unsigned char mp_irqtype;
- unsigned short mp_irqflag;
- unsigned char mp_srcbus;
- unsigned char mp_srcbusirq;
- unsigned char mp_dstirq;
-};
-
/* I/O APIC entries */
-extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
+extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
/* # of MP IRQ source entries */
extern int mp_irq_entries;
/* MP IRQ source entries */
-extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* non-0 if default (table-less) MP configuration */
extern int mpc_default_type;
@@ -165,15 +143,6 @@ extern int noioapicreroute;
/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
extern int timer_through_8259;
-static inline void disable_ioapic_setup(void)
-{
-#ifdef CONFIG_PCI
- noioapicquirk = 1;
- noioapicreroute = -1;
-#endif
- skip_ioapic_setup = 1;
-}
-
/*
* If we use the IO-APIC for IRQ routing, disable automatic
* assignment of PCI IRQ's.
@@ -200,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int);
extern void probe_nr_irqs_gsi(void);
+extern int setup_ioapic_entry(int apic, int irq,
+ struct IO_APIC_route_entry *entry,
+ unsigned int destination, int trigger,
+ int polarity, int vector);
+extern void ioapic_write_entry(int apic, int pin,
+ struct IO_APIC_route_entry e);
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
static const int timer_through_8259 = 0;
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index c745a306f7d3..0b7228268a63 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_IPI_H
#define _ASM_X86_IPI_H
+#ifdef CONFIG_X86_LOCAL_APIC
+
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
@@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
cpu_relax();
}
-static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
- unsigned int dest)
+static inline void
+__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
{
/*
* Subtle. In the case of the 'never do double writes' workaround
@@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
* This is used to send an IPI with no shorthand notation (the destination is
* specified in bits 56 to 63 of the ICR).
*/
-static inline void __send_IPI_dest_field(unsigned int mask, int vector,
- unsigned int dest)
+static inline void
+ __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
{
unsigned long cfg;
@@ -117,41 +119,44 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
native_apic_mem_write(APIC_ICR, cfg);
}
-static inline void send_IPI_mask_sequence(const struct cpumask *mask,
- int vector)
-{
- unsigned long flags;
- unsigned long query_cpu;
+extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+ int vector);
- /*
- * Hack. The clustered APIC addressing mode doesn't allow us to send
- * to an arbitrary mask, so I do a unicast to each CPU instead.
- * - mbligh
- */
- local_irq_save(flags);
- for_each_cpu(query_cpu, mask) {
- __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
- vector, APIC_DEST_PHYSICAL);
- }
- local_irq_restore(flags);
+/* Avoid include hell */
+#define NMI_VECTOR 0x02
+
+extern int no_broadcast;
+
+static inline void __default_local_send_IPI_allbutself(int vector)
+{
+ if (no_broadcast || vector == NMI_VECTOR)
+ apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
+ else
+ __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
}
-static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
- int vector)
+static inline void __default_local_send_IPI_all(int vector)
{
- unsigned long flags;
- unsigned int query_cpu;
- unsigned int this_cpu = smp_processor_id();
-
- /* See Hack comment above */
-
- local_irq_save(flags);
- for_each_cpu(query_cpu, mask)
- if (query_cpu != this_cpu)
- __send_IPI_dest_field(
- per_cpu(x86_cpu_to_apicid, query_cpu),
- vector, APIC_DEST_PHYSICAL);
- local_irq_restore(flags);
+ if (no_broadcast || vector == NMI_VECTOR)
+ apic->send_IPI_mask(cpu_online_mask, vector);
+ else
+ __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
}
+#ifdef CONFIG_X86_32
+extern void default_send_IPI_mask_logical(const struct cpumask *mask,
+ int vector);
+extern void default_send_IPI_allbutself(int vector);
+extern void default_send_IPI_all(int vector);
+extern void default_send_IPI_self(int vector);
+#endif
+
+#endif
+
#endif /* _ASM_X86_IPI_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 592688ed04d3..107eb2196691 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -36,9 +36,11 @@ static inline int irq_canonicalize(int irq)
extern void fixup_irqs(void);
#endif
-extern unsigned int do_IRQ(struct pt_regs *regs);
extern void init_IRQ(void);
extern void native_init_IRQ(void);
+extern bool handle_irq(unsigned irq, struct pt_regs *regs);
+
+extern unsigned int do_IRQ(struct pt_regs *regs);
/* Interrupt vector management */
extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/include/asm/irq_regs.h b/arch/x86/include/asm/irq_regs.h
index 89c898ab298b..77843225b7ea 100644
--- a/arch/x86/include/asm/irq_regs.h
+++ b/arch/x86/include/asm/irq_regs.h
@@ -1,5 +1,31 @@
-#ifdef CONFIG_X86_32
-# include "irq_regs_32.h"
-#else
-# include "irq_regs_64.h"
-#endif
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack, stored in the per-cpu area.
+ *
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+#ifndef _ASM_X86_IRQ_REGS_H
+#define _ASM_X86_IRQ_REGS_H
+
+#include <asm/percpu.h>
+
+#define ARCH_HAS_OWN_IRQ_REGS
+
+DECLARE_PER_CPU(struct pt_regs *, irq_regs);
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+ return percpu_read(irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = get_irq_regs();
+ percpu_write(irq_regs, new_regs);
+
+ return old_regs;
+}
+
+#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h
deleted file mode 100644
index 86afd7473457..000000000000
--- a/arch/x86/include/asm/irq_regs_32.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Per-cpu current frame pointer - the location of the last exception frame on
- * the stack, stored in the per-cpu area.
- *
- * Jeremy Fitzhardinge <jeremy@goop.org>
- */
-#ifndef _ASM_X86_IRQ_REGS_32_H
-#define _ASM_X86_IRQ_REGS_32_H
-
-#include <asm/percpu.h>
-
-#define ARCH_HAS_OWN_IRQ_REGS
-
-DECLARE_PER_CPU(struct pt_regs *, irq_regs);
-
-static inline struct pt_regs *get_irq_regs(void)
-{
- return x86_read_percpu(irq_regs);
-}
-
-static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
-{
- struct pt_regs *old_regs;
-
- old_regs = get_irq_regs();
- x86_write_percpu(irq_regs, new_regs);
-
- return old_regs;
-}
-
-#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_64.h b/arch/x86/include/asm/irq_regs_64.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/x86/include/asm/irq_regs_64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index f7ff65032b9d..8a285f356f8a 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -1,47 +1,69 @@
#ifndef _ASM_X86_IRQ_VECTORS_H
#define _ASM_X86_IRQ_VECTORS_H
-#include <linux/threads.h>
+/*
+ * Linux IRQ vector layout.
+ *
+ * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
+ * be defined by Linux. They are used as a jump table by the CPU when a
+ * given vector is triggered - by a CPU-external, CPU-internal or
+ * software-triggered event.
+ *
+ * Linux sets the kernel code address each entry jumps to early during
+ * bootup, and never changes them. This is the general layout of the
+ * IDT entries:
+ *
+ * Vectors 0 ... 31 : system traps and exceptions - hardcoded events
+ * Vectors 32 ... 127 : device interrupts
+ * Vector 128 : legacy int80 syscall interface
+ * Vectors 129 ... 237 : device interrupts
+ * Vectors 238 ... 255 : special interrupts
+ *
+ * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
+ *
+ * This file enumerates the exact layout of them:
+ */
-#define NMI_VECTOR 0x02
+#define NMI_VECTOR 0x02
/*
* IDT vectors usable for external interrupt sources start
* at 0x20:
*/
-#define FIRST_EXTERNAL_VECTOR 0x20
+#define FIRST_EXTERNAL_VECTOR 0x20
#ifdef CONFIG_X86_32
-# define SYSCALL_VECTOR 0x80
+# define SYSCALL_VECTOR 0x80
#else
-# define IA32_SYSCALL_VECTOR 0x80
+# define IA32_SYSCALL_VECTOR 0x80
#endif
/*
* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
* cleanup after irq migration.
*/
-#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
+#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
*/
-#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
-#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
-#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
-#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
-#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
-#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
-#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
-#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
-#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
-#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
-#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
-#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
-#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
-#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
-#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
-#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
+#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10)
+
+#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
+#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
+#define IRQ3_VECTOR (IRQ0_VECTOR + 3)
+#define IRQ4_VECTOR (IRQ0_VECTOR + 4)
+#define IRQ5_VECTOR (IRQ0_VECTOR + 5)
+#define IRQ6_VECTOR (IRQ0_VECTOR + 6)
+#define IRQ7_VECTOR (IRQ0_VECTOR + 7)
+#define IRQ8_VECTOR (IRQ0_VECTOR + 8)
+#define IRQ9_VECTOR (IRQ0_VECTOR + 9)
+#define IRQ10_VECTOR (IRQ0_VECTOR + 10)
+#define IRQ11_VECTOR (IRQ0_VECTOR + 11)
+#define IRQ12_VECTOR (IRQ0_VECTOR + 12)
+#define IRQ13_VECTOR (IRQ0_VECTOR + 13)
+#define IRQ14_VECTOR (IRQ0_VECTOR + 14)
+#define IRQ15_VECTOR (IRQ0_VECTOR + 15)
/*
* Special IRQ vectors used by the SMP architecture, 0xf0-0xff
@@ -49,119 +71,98 @@
* some of the following vectors are 'rare', they are merged
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
* TLB, reschedule and local APIC vectors are performance-critical.
- *
- * Vectors 0xf0-0xfa are free (reserved for future Linux use).
*/
-#ifdef CONFIG_X86_32
-
-# define SPURIOUS_APIC_VECTOR 0xff
-# define ERROR_APIC_VECTOR 0xfe
-# define INVALIDATE_TLB_VECTOR 0xfd
-# define RESCHEDULE_VECTOR 0xfc
-# define CALL_FUNCTION_VECTOR 0xfb
-# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
-# define THERMAL_APIC_VECTOR 0xf0
-
-#else
#define SPURIOUS_APIC_VECTOR 0xff
+/*
+ * Sanity check
+ */
+#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
+# error SPURIOUS_APIC_VECTOR definition error
+#endif
+
#define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc
#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
#define THERMAL_APIC_VECTOR 0xfa
-#define THRESHOLD_APIC_VECTOR 0xf9
-#define UV_BAU_MESSAGE 0xf8
-#define INVALIDATE_TLB_VECTOR_END 0xf7
-#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
-
-#define NUM_INVALIDATE_TLB_VECTORS 8
+#ifdef CONFIG_X86_32
+/* 0xf8 - 0xf9 : free */
+#else
+# define THRESHOLD_APIC_VECTOR 0xf9
+# define UV_BAU_MESSAGE 0xf8
#endif
+/* f0-f7 used for spreading out TLB flushes: */
+#define INVALIDATE_TLB_VECTOR_END 0xf7
+#define INVALIDATE_TLB_VECTOR_START 0xf0
+#define NUM_INVALIDATE_TLB_VECTORS 8
+
/*
* Local APIC timer IRQ vector is on a different priority level,
* to work around the 'lost local interrupt if more than 2 IRQ
* sources per level' errata.
*/
-#define LOCAL_TIMER_VECTOR 0xef
+#define LOCAL_TIMER_VECTOR 0xef
+
+/*
+ * Performance monitoring interrupt vector:
+ */
+#define LOCAL_PERF_VECTOR 0xee
/*
* First APIC vector available to drivers: (vectors 0x30-0xee) we
* start at 0x31(0x41) to spread out vectors evenly between priority
* levels. (0x80 is the syscall vector)
*/
-#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
-
-#define NR_VECTORS 256
+#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
-#define FPU_IRQ 13
+#define NR_VECTORS 256
-#define FIRST_VM86_IRQ 3
-#define LAST_VM86_IRQ 15
-#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
+#define FPU_IRQ 13
-#define NR_IRQS_LEGACY 16
+#define FIRST_VM86_IRQ 3
+#define LAST_VM86_IRQ 15
-#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
-
-#ifndef CONFIG_SPARSE_IRQ
-# if NR_CPUS < MAX_IO_APICS
-# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
-# else
-# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
-# endif
-#else
-# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
-# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
-# else
-# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
-# endif
+#ifndef __ASSEMBLY__
+static inline int invalid_vm86_irq(int irq)
+{
+ return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
+}
#endif
-#elif defined(CONFIG_X86_VOYAGER)
-
-# define NR_IRQS 224
+/*
+ * Size the maximum number of interrupts.
+ *
+ * If the irq_desc[] array has a sparse layout, we can size things
+ * generously - it scales up linearly with the maximum number of CPUs,
+ * and the maximum number of IO-APICs, whichever is higher.
+ *
+ * In other cases we size more conservatively, to not create too large
+ * static arrays.
+ */
-#else /* IO_APIC || VOYAGER */
+#define NR_IRQS_LEGACY 16
-# define NR_IRQS 16
+#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
+#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
+#ifdef CONFIG_X86_IO_APIC
+# ifdef CONFIG_SPARSE_IRQ
+# define NR_IRQS \
+ (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
+ (NR_VECTORS + CPU_VECTOR_LIMIT) : \
+ (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
+# else
+# if NR_CPUS < MAX_IO_APICS
+# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
+# else
+# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
+# endif
+# endif
+#else /* !CONFIG_X86_IO_APIC: */
+# define NR_IRQS NR_IRQS_LEGACY
#endif
-/* Voyager specific defines */
-/* These define the CPIs we use in linux */
-#define VIC_CPI_LEVEL0 0
-#define VIC_CPI_LEVEL1 1
-/* now the fake CPIs */
-#define VIC_TIMER_CPI 2
-#define VIC_INVALIDATE_CPI 3
-#define VIC_RESCHEDULE_CPI 4
-#define VIC_ENABLE_IRQ_CPI 5
-#define VIC_CALL_FUNCTION_CPI 6
-#define VIC_CALL_FUNCTION_SINGLE_CPI 7
-
-/* Now the QIC CPIs: Since we don't need the two initial levels,
- * these are 2 less than the VIC CPIs */
-#define QIC_CPI_OFFSET 1
-#define QIC_TIMER_CPI (VIC_TIMER_CPI - QIC_CPI_OFFSET)
-#define QIC_INVALIDATE_CPI (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
-#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
-#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
-#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
-#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
-
-#define VIC_START_FAKE_CPI VIC_TIMER_CPI
-#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
-
-/* this is the SYS_INT CPI. */
-#define VIC_SYS_INT 8
-#define VIC_CMN_INT 15
-
-/* This is the boot CPI for alternate processors. It gets overwritten
- * by the above once the system has activated all available processors */
-#define VIC_CPU_BOOT_CPI VIC_CPI_LEVEL0
-#define VIC_CPU_BOOT_ERRATA_CPI (VIC_CPI_LEVEL0 + 8)
-
-
#endif /* _ASM_X86_IRQ_VECTORS_H */
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index c61d8b2ab8b9..0ceb6d19ed30 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -9,23 +9,8 @@
# define PAGES_NR 4
#else
# define PA_CONTROL_PAGE 0
-# define VA_CONTROL_PAGE 1
-# define PA_PGD 2
-# define VA_PGD 3
-# define PA_PUD_0 4
-# define VA_PUD_0 5
-# define PA_PMD_0 6
-# define VA_PMD_0 7
-# define PA_PTE_0 8
-# define VA_PTE_0 9
-# define PA_PUD_1 10
-# define VA_PUD_1 11
-# define PA_PMD_1 12
-# define VA_PMD_1 13
-# define PA_PTE_1 14
-# define VA_PTE_1 15
-# define PA_TABLE_PAGE 16
-# define PAGES_NR 17
+# define PA_TABLE_PAGE 1
+# define PAGES_NR 2
#endif
#ifdef CONFIG_X86_32
@@ -157,9 +142,9 @@ relocate_kernel(unsigned long indirection_page,
unsigned long start_address) ATTRIB_NORET;
#endif
-#ifdef CONFIG_X86_32
#define ARCH_HAS_KIMAGE_ARCH
+#ifdef CONFIG_X86_32
struct kimage_arch {
pgd_t *pgd;
#ifdef CONFIG_X86_PAE
@@ -169,6 +154,12 @@ struct kimage_arch {
pte_t *pte0;
pte_t *pte1;
};
+#else
+struct kimage_arch {
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+};
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 5d98d0b68ffc..9320e2a8a26a 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -52,70 +52,14 @@
#endif
+#define GLOBAL(name) \
+ .globl name; \
+ name:
+
#ifdef CONFIG_X86_ALIGNMENT_16
#define __ALIGN .align 16,0x90
#define __ALIGN_STR ".align 16,0x90"
#endif
-/*
- * to check ENTRY_X86/END_X86 and
- * KPROBE_ENTRY_X86/KPROBE_END_X86
- * unbalanced-missed-mixed appearance
- */
-#define __set_entry_x86 .set ENTRY_X86_IN, 0
-#define __unset_entry_x86 .set ENTRY_X86_IN, 1
-#define __set_kprobe_x86 .set KPROBE_X86_IN, 0
-#define __unset_kprobe_x86 .set KPROBE_X86_IN, 1
-
-#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed"
-
-#define __check_entry_x86 \
- .ifdef ENTRY_X86_IN; \
- .ifeq ENTRY_X86_IN; \
- __macro_err_x86; \
- .abort; \
- .endif; \
- .endif
-
-#define __check_kprobe_x86 \
- .ifdef KPROBE_X86_IN; \
- .ifeq KPROBE_X86_IN; \
- __macro_err_x86; \
- .abort; \
- .endif; \
- .endif
-
-#define __check_entry_kprobe_x86 \
- __check_entry_x86; \
- __check_kprobe_x86
-
-#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86
-
-#define ENTRY_X86(name) \
- __check_entry_kprobe_x86; \
- __set_entry_x86; \
- .globl name; \
- __ALIGN; \
- name:
-
-#define END_X86(name) \
- __unset_entry_x86; \
- __check_entry_kprobe_x86; \
- .size name, .-name
-
-#define KPROBE_ENTRY_X86(name) \
- __check_entry_kprobe_x86; \
- __set_kprobe_x86; \
- .pushsection .kprobes.text, "ax"; \
- .globl name; \
- __ALIGN; \
- name:
-
-#define KPROBE_END_X86(name) \
- __unset_kprobe_x86; \
- __check_entry_kprobe_x86; \
- .size name, .-name; \
- .popsection
-
#endif /* _ASM_X86_LINKAGE_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
deleted file mode 100644
index cc09cbbee27e..000000000000
--- a/arch/x86/include/asm/mach-default/mach_apic.h
+++ /dev/null
@@ -1,168 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H
-#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H
-
-#ifdef CONFIG_X86_LOCAL_APIC
-
-#include <mach_apicdef.h>
-#include <asm/smp.h>
-
-#define APIC_DFR_VALUE (APIC_DFR_FLAT)
-
-static inline const struct cpumask *target_cpus(void)
-{
-#ifdef CONFIG_SMP
- return cpu_online_mask;
-#else
- return cpumask_of(0);
-#endif
-}
-
-#define NO_BALANCE_IRQ (0)
-#define esr_disable (0)
-
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#define TARGET_CPUS (genapic->target_cpus())
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
-#define phys_pkg_id (genapic->phys_pkg_id)
-#define vector_allocation_domain (genapic->vector_allocation_domain)
-#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
-#define send_IPI_self (genapic->send_IPI_self)
-#define wakeup_secondary_cpu (genapic->wakeup_cpu)
-extern void setup_apic_routing(void);
-#else
-#define INT_DELIVERY_MODE dest_LowestPrio
-#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
-#define TARGET_CPUS (target_cpus())
-#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116). So here it goes...
- */
-static inline void init_apic_ldr(void)
-{
- unsigned long val;
-
- apic_write(APIC_DFR, APIC_DFR_VALUE);
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
- apic_write(APIC_LDR, val);
-}
-
-static inline int apic_id_registered(void)
-{
- return physid_isset(read_apic_id(), phys_cpu_present_map);
-}
-
-static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
- return cpumask_bits(cpumask)[0];
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- unsigned long mask1 = cpumask_bits(cpumask)[0];
- unsigned long mask2 = cpumask_bits(andmask)[0];
- unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
-
- return (unsigned int)(mask1 & mask2 & mask3);
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return cpuid_apic >> index_msb;
-}
-
-static inline void setup_apic_routing(void)
-{
-#ifdef CONFIG_X86_IO_APIC
- printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
- "Flat", nr_ioapics);
-#endif
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-#ifdef CONFIG_SMP
- return apicid_2_node[hard_smp_processor_id()];
-#else
- return 0;
-#endif
-}
-
-static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- /* Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
-}
-#endif
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return physid_isset(apicid, bitmap);
-}
-
-static inline unsigned long check_apicid_present(int bit)
-{
- return physid_isset(bit, phys_cpu_present_map);
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
- return phys_map;
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
- return 0;
-}
-
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
- return 1 << cpu;
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
- return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
- else
- return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
- return physid_mask_of_physid(phys_apicid);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
- return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-#endif /* CONFIG_X86_LOCAL_APIC */
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h
deleted file mode 100644
index 53179936d6c6..000000000000
--- a/arch/x86/include/asm/mach-default/mach_apicdef.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
-#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
-
-#include <asm/apic.h>
-
-#ifdef CONFIG_X86_64
-#define APIC_ID_MASK (genapic->apic_id_mask)
-#define GET_APIC_ID(x) (genapic->get_apic_id(x))
-#define SET_APIC_ID(x) (genapic->set_apic_id(x))
-#else
-#define APIC_ID_MASK (0xF<<24)
-static inline unsigned get_apic_id(unsigned long x)
-{
- unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
- if (APIC_XAPIC(ver))
- return (((x)>>24)&0xFF);
- else
- return (((x)>>24)&0xF);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
deleted file mode 100644
index 191312d155da..000000000000
--- a/arch/x86/include/asm/mach-default/mach_ipi.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H
-#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H
-
-/* Avoid include hell */
-#define NMI_VECTOR 0x02
-
-void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-void __send_IPI_shortcut(unsigned int shortcut, int vector);
-
-extern int no_broadcast;
-
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
-#else
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
- send_IPI_mask_bitmask(mask, vector);
-}
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-#endif
-
-static inline void __local_send_IPI_allbutself(int vector)
-{
- if (no_broadcast || vector == NMI_VECTOR)
- send_IPI_mask_allbutself(cpu_online_mask, vector);
- else
- __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
-}
-
-static inline void __local_send_IPI_all(int vector)
-{
- if (no_broadcast || vector == NMI_VECTOR)
- send_IPI_mask(cpu_online_mask, vector);
- else
- __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
-}
-
-#ifdef CONFIG_X86_64
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-#else
-static inline void send_IPI_allbutself(int vector)
-{
- /*
- * if there are no other CPUs in the system then we get an APIC send
- * error if we try to broadcast, thus avoid sending IPIs in this case.
- */
- if (!(num_online_cpus() > 1))
- return;
-
- __local_send_IPI_allbutself(vector);
- return;
-}
-
-static inline void send_IPI_all(int vector)
-{
- __local_send_IPI_all(vector);
-}
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h
deleted file mode 100644
index c70a263d68cd..000000000000
--- a/arch/x86/include/asm/mach-default/mach_mpparse.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
-#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
-
-static inline int
-mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
- return 0;
-}
-
-/* Hook from generic ACPI tables.c */
-static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- return 0;
-}
-
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpspec.h b/arch/x86/include/asm/mach-default/mach_mpspec.h
deleted file mode 100644
index e85ede686be8..000000000000
--- a/arch/x86/include/asm/mach-default/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
-#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
-
-#define MAX_IRQ_SOURCES 256
-
-#if CONFIG_BASE_SMALL == 0
-#define MAX_MP_BUSSES 256
-#else
-#define MAX_MP_BUSSES 32
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
deleted file mode 100644
index 89897a6a65b9..000000000000
--- a/arch/x86/include/asm/mach-default/mach_wakecpu.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
-#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW (0x467)
-#define TRAMPOLINE_PHYS_HIGH (0x469)
-
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
- while (!atomic_read(deassert))
- cpu_relax();
- return;
-}
-
-/* Nothing to do for most platforms, since cleared by the INIT cycle */
-static inline void smp_callin_clear_local_apic(void)
-{
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-#ifdef CONFIG_SMP
-extern void __inquire_remote_apic(int apicid);
-#else /* CONFIG_SMP */
-static inline void __inquire_remote_apic(int apicid)
-{
-}
-#endif /* CONFIG_SMP */
-
-static inline void inquire_remote_apic(int apicid)
-{
- if (apic_verbosity >= APIC_DEBUG)
- __inquire_remote_apic(apicid);
-}
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/mach-generic/gpio.h b/arch/x86/include/asm/mach-generic/gpio.h
deleted file mode 100644
index 995c45efdb33..000000000000
--- a/arch/x86/include/asm/mach-generic/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_GPIO_H
-#define _ASM_X86_MACH_GENERIC_GPIO_H
-
-int gpio_request(unsigned gpio, const char *label);
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-int gpio_to_irq(unsigned gpio);
-int irq_to_gpio(unsigned irq);
-
-#include <asm-generic/gpio.h> /* cansleep wrappers */
-
-#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
deleted file mode 100644
index 48553e958ad5..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_apic.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H
-#define _ASM_X86_MACH_GENERIC_MACH_APIC_H
-
-#include <asm/genapic.h>
-
-#define esr_disable (genapic->ESR_DISABLE)
-#define NO_BALANCE_IRQ (genapic->no_balance_irq)
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
-#define TARGET_CPUS (genapic->target_cpus())
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
-#define setup_apic_routing (genapic->setup_apic_routing)
-#define multi_timer_check (genapic->multi_timer_check)
-#define apicid_to_node (genapic->apicid_to_node)
-#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid)
-#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
-#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
-#define setup_portio_remap (genapic->setup_portio_remap)
-#define check_apicid_present (genapic->check_apicid_present)
-#define check_phys_apicid_present (genapic->check_phys_apicid_present)
-#define check_apicid_used (genapic->check_apicid_used)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
-#define vector_allocation_domain (genapic->vector_allocation_domain)
-#define enable_apic_mode (genapic->enable_apic_mode)
-#define phys_pkg_id (genapic->phys_pkg_id)
-#define wakeup_secondary_cpu (genapic->wakeup_cpu)
-
-extern void generic_bigsmp_probe(void);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h
deleted file mode 100644
index 68041f3802f4..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_apicdef.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
-#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
-
-#ifndef APIC_DEFINITION
-#include <asm/genapic.h>
-
-#define GET_APIC_ID (genapic->get_apic_id)
-#define APIC_ID_MASK (genapic->apic_id_mask)
-#endif
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h
deleted file mode 100644
index ffd637e3c3d9..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_ipi.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H
-#define _ASM_X86_MACH_GENERIC_MACH_IPI_H
-
-#include <asm/genapic.h>
-
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h
deleted file mode 100644
index 9444ab8dca94..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpparse.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
-#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
-
-
-extern int mps_oem_check(struct mpc_table *, char *, char *);
-
-extern int acpi_madt_oem_check(char *, char *);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h
deleted file mode 100644
index 3bc407226578..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_mpspec.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
-#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
-
-#define MAX_IRQ_SOURCES 256
-
-/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
-/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
-#define MAX_MP_BUSSES 260
-
-extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
deleted file mode 100644
index 1ab16b168c8a..000000000000
--- a/arch/x86/include/asm/mach-generic/mach_wakecpu.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
-#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
-#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
-#define wait_for_init_deassert (genapic->wait_for_init_deassert)
-#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
-#define store_NMI_vector (genapic->store_NMI_vector)
-#define restore_NMI_vector (genapic->restore_NMI_vector)
-#define inquire_remote_apic (genapic->inquire_remote_apic)
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-rdc321x/gpio.h b/arch/x86/include/asm/mach-rdc321x/gpio.h
deleted file mode 100644
index c210ab5788b0..000000000000
--- a/arch/x86/include/asm/mach-rdc321x/gpio.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
-#define _ASM_X86_MACH_RDC321X_GPIO_H
-
-#include <linux/kernel.h>
-
-extern int rdc_gpio_get_value(unsigned gpio);
-extern void rdc_gpio_set_value(unsigned gpio, int value);
-extern int rdc_gpio_direction_input(unsigned gpio);
-extern int rdc_gpio_direction_output(unsigned gpio, int value);
-extern int rdc_gpio_request(unsigned gpio, const char *label);
-extern void rdc_gpio_free(unsigned gpio);
-extern void __init rdc321x_gpio_setup(void);
-
-/* Wrappers for the arch-neutral GPIO API */
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
- return rdc_gpio_request(gpio, label);
-}
-
-static inline void gpio_free(unsigned gpio)
-{
- might_sleep();
- rdc_gpio_free(gpio);
-}
-
-static inline int gpio_direction_input(unsigned gpio)
-{
- return rdc_gpio_direction_input(gpio);
-}
-
-static inline int gpio_direction_output(unsigned gpio, int value)
-{
- return rdc_gpio_direction_output(gpio, value);
-}
-
-static inline int gpio_get_value(unsigned gpio)
-{
- return rdc_gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
- rdc_gpio_set_value(gpio, value);
-}
-
-static inline int gpio_to_irq(unsigned gpio)
-{
- return gpio;
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
- return irq;
-}
-
-/* For cansleep */
-#include <asm-generic/gpio.h>
-
-#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
diff --git a/arch/x86/include/asm/mach-voyager/do_timer.h b/arch/x86/include/asm/mach-voyager/do_timer.h
deleted file mode 100644
index 9e5a459fd15b..000000000000
--- a/arch/x86/include/asm/mach-voyager/do_timer.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* defines for inline arch setup functions */
-#include <linux/clockchips.h>
-
-#include <asm/voyager.h>
-#include <asm/i8253.h>
-
-/**
- * do_timer_interrupt_hook - hook into timer tick
- *
- * Call the pit clock event handler. see asm/i8253.h
- **/
-static inline void do_timer_interrupt_hook(void)
-{
- global_clock_event->event_handler(global_clock_event);
- voyager_timer_interrupt();
-}
-
diff --git a/arch/x86/include/asm/mach-voyager/entry_arch.h b/arch/x86/include/asm/mach-voyager/entry_arch.h
deleted file mode 100644
index ae52624b5937..000000000000
--- a/arch/x86/include/asm/mach-voyager/entry_arch.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* Copyright (C) 2002
- *
- * Author: James.Bottomley@HansenPartnership.com
- *
- * linux/arch/i386/voyager/entry_arch.h
- *
- * This file builds the VIC and QIC CPI gates
- */
-
-/* initialise the voyager interrupt gates
- *
- * This uses the macros in irq.h to set up assembly jump gates. The
- * calls are then redirected to the same routine with smp_ prefixed */
-BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT)
-BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT)
-BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0);
-
-/* do all the QIC interrupts */
-BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI);
-BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
-BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
-BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
-BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
-BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/arch/x86/include/asm/mach-voyager/setup_arch.h b/arch/x86/include/asm/mach-voyager/setup_arch.h
deleted file mode 100644
index 71729ca05cd7..000000000000
--- a/arch/x86/include/asm/mach-voyager/setup_arch.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#include <asm/voyager.h>
-#include <asm/setup.h>
-#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \
- (&boot_params.apm_bios_info))
-
-/* Hook to call BIOS initialisation function */
-
-/* for voyager, pass the voyager BIOS/SUS info area to the detection
- * routines */
-
-#define ARCH_SETUP voyager_detect(VOYAGER_BIOS_INFO);
-
diff --git a/arch/x86/include/asm/mach-default/mach_timer.h b/arch/x86/include/asm/mach_timer.h
index 853728519ae9..853728519ae9 100644
--- a/arch/x86/include/asm/mach-default/mach_timer.h
+++ b/arch/x86/include/asm/mach_timer.h
diff --git a/arch/x86/include/asm/mach-default/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f7920601e472..f7920601e472 100644
--- a/arch/x86/include/asm/mach-default/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 8aeeb3fd73db..f923203dc39a 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
-#ifdef CONFIG_X86_32
-# include "mmu_context_32.h"
-#else
-# include "mmu_context_64.h"
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+#ifdef CONFIG_SMP
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+#endif
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned cpu = smp_processor_id();
+
+ if (likely(prev != next)) {
+ /* stop flush ipis for the previous mm */
+ cpu_clear(cpu, prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
#endif
+ cpu_set(cpu, next->cpu_vm_mask);
+
+ /* Re-load page tables */
+ load_cr3(next->pgd);
+
+ /*
+ * load the LDT, if the LDT is different:
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context);
+ }
+#ifdef CONFIG_SMP
+ else {
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
+ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+ /* We were in lazy tlb mode and leave_mm disabled
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
+ load_cr3(next->pgd);
+ load_LDT_nolock(&next->context);
+ }
+ }
+#endif
+}
#define activate_mm(prev, next) \
do { \
@@ -33,5 +76,17 @@ do { \
switch_mm((prev), (next), NULL); \
} while (0);
+#ifdef CONFIG_X86_32
+#define deactivate_mm(tsk, mm) \
+do { \
+ lazy_load_gs(0); \
+} while (0)
+#else
+#define deactivate_mm(tsk, mm) \
+do { \
+ load_gs_index(0); \
+ loadsegment(fs, 0); \
+} while (0)
+#endif
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h
deleted file mode 100644
index 7e98ce1d2c0e..000000000000
--- a/arch/x86/include/asm/mmu_context_32.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _ASM_X86_MMU_CONTEXT_32_H
-#define _ASM_X86_MMU_CONTEXT_32_H
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-#ifdef CONFIG_SMP
- if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
- x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev,
- struct mm_struct *next,
- struct task_struct *tsk)
-{
- int cpu = smp_processor_id();
-
- if (likely(prev != next)) {
- /* stop flush ipis for the previous mm */
- cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
- x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
- x86_write_percpu(cpu_tlbstate.active_mm, next);
-#endif
- cpu_set(cpu, next->cpu_vm_mask);
-
- /* Re-load page tables */
- load_cr3(next->pgd);
-
- /*
- * load the LDT, if the LDT is different:
- */
- if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context);
- }
-#ifdef CONFIG_SMP
- else {
- x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
- BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
-
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
- * tlb flush IPI delivery. We must reload %cr3.
- */
- load_cr3(next->pgd);
- load_LDT_nolock(&next->context);
- }
- }
-#endif
-}
-
-#define deactivate_mm(tsk, mm) \
- asm("movl %0,%%gs": :"r" (0));
-
-#endif /* _ASM_X86_MMU_CONTEXT_32_H */
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h
deleted file mode 100644
index 677d36e9540a..000000000000
--- a/arch/x86/include/asm/mmu_context_64.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef _ASM_X86_MMU_CONTEXT_64_H
-#define _ASM_X86_MMU_CONTEXT_64_H
-
-#include <asm/pda.h>
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-#ifdef CONFIG_SMP
- if (read_pda(mmu_state) == TLBSTATE_OK)
- write_pda(mmu_state, TLBSTATE_LAZY);
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
-{
- unsigned cpu = smp_processor_id();
- if (likely(prev != next)) {
- /* stop flush ipis for the previous mm */
- cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
- write_pda(mmu_state, TLBSTATE_OK);
- write_pda(active_mm, next);
-#endif
- cpu_set(cpu, next->cpu_vm_mask);
- load_cr3(next->pgd);
-
- if (unlikely(next->context.ldt != prev->context.ldt))
- load_LDT_nolock(&next->context);
- }
-#ifdef CONFIG_SMP
- else {
- write_pda(mmu_state, TLBSTATE_OK);
- if (read_pda(active_mm) != next)
- BUG();
- if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
- /* We were in lazy tlb mode and leave_mm disabled
- * tlb flush IPI delivery. We must reload CR3
- * to make sure to use no freed page tables.
- */
- load_cr3(next->pgd);
- load_LDT_nolock(&next->context);
- }
- }
-#endif
-}
-
-#define deactivate_mm(tsk, mm) \
-do { \
- load_gs_index(0); \
- asm volatile("movl %0,%%fs"::"r"(0)); \
-} while (0)
-
-#endif /* _ASM_X86_MMU_CONTEXT_64_H */
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index bd22f2a3713f..642fc7fc8cdc 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS];
extern int pic_mode;
#ifdef CONFIG_X86_32
-#include <mach_mpspec.h>
+
+/*
+ * Summit or generic (i.e. installer) kernels need lots of bus entries.
+ * Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
+ */
+#if CONFIG_BASE_SMALL == 0
+# define MAX_MP_BUSSES 260
+#else
+# define MAX_MP_BUSSES 32
+#endif
+
+#define MAX_IRQ_SOURCES 256
extern unsigned int def_to_bigsmp;
extern u8 apicid_2_node[];
@@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
#endif
-#define MAX_APICID 256
+#define MAX_APICID 256
-#else
+#else /* CONFIG_X86_64: */
-#define MAX_MP_BUSSES 256
+#define MAX_MP_BUSSES 256
/* Each PCI slot may be a combo card with its own bus. 4 IRQ pins per slot. */
-#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
+#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
-#endif
+#endif /* CONFIG_X86_64 */
extern void early_find_smp_config(void);
extern void early_get_smp_config(void);
@@ -45,11 +56,13 @@ extern int smp_found_config;
extern int mpc_default_type;
extern unsigned long mp_lapic_addr;
-extern void find_smp_config(void);
extern void get_smp_config(void);
+
#ifdef CONFIG_X86_MPPARSE
+extern void find_smp_config(void);
extern void early_reserve_e820_mpc_new(void);
#else
+static inline void find_smp_config(void) { }
static inline void early_reserve_e820_mpc_new(void) { }
#endif
@@ -64,6 +77,8 @@ extern int acpi_probe_gsi(void);
#ifdef CONFIG_X86_IO_APIC
extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
u32 gsi, int triggering, int polarity);
+extern int mp_find_ioapic(int gsi);
+extern int mp_find_ioapic_pin(int ioapic, int gsi);
#else
static inline int
mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
@@ -148,4 +163,8 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
extern physid_mask_t phys_cpu_present_map;
+extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
+
+extern int default_acpi_madt_oem_check(char *, char *);
+
#endif /* _ASM_X86_MPSPEC_H */
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index 59568bc4767f..4a7f96d7c188 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -24,17 +24,18 @@
# endif
#endif
-struct intel_mp_floating {
- char mpf_signature[4]; /* "_MP_" */
- unsigned int mpf_physptr; /* Configuration table address */
- unsigned char mpf_length; /* Our length (paragraphs) */
- unsigned char mpf_specification;/* Specification version */
- unsigned char mpf_checksum; /* Checksum (makes sum 0) */
- unsigned char mpf_feature1; /* Standard or configuration ? */
- unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
- unsigned char mpf_feature3; /* Unused (0) */
- unsigned char mpf_feature4; /* Unused (0) */
- unsigned char mpf_feature5; /* Unused (0) */
+/* Intel MP Floating Pointer Structure */
+struct mpf_intel {
+ char signature[4]; /* "_MP_" */
+ unsigned int physptr; /* Configuration table address */
+ unsigned char length; /* Our length (paragraphs) */
+ unsigned char specification; /* Specification version */
+ unsigned char checksum; /* Checksum (makes sum 0) */
+ unsigned char feature1; /* Standard or configuration ? */
+ unsigned char feature2; /* Bit7 set for IMCR|PIC */
+ unsigned char feature3; /* Unused (0) */
+ unsigned char feature4; /* Unused (0) */
+ unsigned char feature5; /* Unused (0) */
};
#define MPC_SIGNATURE "PCMP"
diff --git a/arch/x86/include/asm/numaq.h b/arch/x86/include/asm/numaq.h
index 1e8bd30b4c16..9f0a5f5d29ec 100644
--- a/arch/x86/include/asm/numaq.h
+++ b/arch/x86/include/asm/numaq.h
@@ -31,6 +31,8 @@
extern int found_numaq;
extern int get_memcfg_numaq(void);
+extern void *xquad_portio;
+
/*
* SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
*/
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
deleted file mode 100644
index bf37bc49bd8e..000000000000
--- a/arch/x86/include/asm/numaq/apic.h
+++ /dev/null
@@ -1,142 +0,0 @@
-#ifndef __ASM_NUMAQ_APIC_H
-#define __ASM_NUMAQ_APIC_H
-
-#include <asm/io.h>
-#include <linux/mmzone.h>
-#include <linux/nodemask.h>
-
-#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
-
-static inline const cpumask_t *target_cpus(void)
-{
- return &CPU_MASK_ALL;
-}
-
-#define NO_BALANCE_IRQ (1)
-#define esr_disable (1)
-
-#define INT_DELIVERY_MODE dest_LowestPrio
-#define INT_DEST_MODE 0 /* physical delivery on LOCAL quad */
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return physid_isset(apicid, bitmap);
-}
-static inline unsigned long check_apicid_present(int bit)
-{
- return physid_isset(bit, phys_cpu_present_map);
-}
-#define apicid_cluster(apicid) (apicid & 0xF0)
-
-static inline int apic_id_registered(void)
-{
- return 1;
-}
-
-static inline void init_apic_ldr(void)
-{
- /* Already done in NUMA-Q firmware */
-}
-
-static inline void setup_apic_routing(void)
-{
- printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
- "NUMA-Q", nr_ioapics);
-}
-
-/*
- * Skip adding the timer int on secondary nodes, which causes
- * a small but painful rift in the time-space continuum.
- */
-static inline int multi_timer_check(int apic, int irq)
-{
- return apic != 0 && irq == 0;
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
- /* We don't have a good way to do this yet - hack */
- return physids_promote(0xFUL);
-}
-
-/* Mapping from cpu number to logical apicid */
-extern u8 cpu_2_logical_apicid[];
-static inline int cpu_to_logical_apicid(int cpu)
-{
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return (int)cpu_2_logical_apicid[cpu];
-}
-
-/*
- * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
- * cpu to APIC ID relation to properly interact with the intelligent
- * mode of the cluster controller.
- */
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (mps_cpu < 60)
- return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
- else
- return BAD_APICID;
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
- return logical_apicid >> 4;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
-{
- int node = apicid_to_node(logical_apicid);
- int cpu = __ffs(logical_apicid & 0xf);
-
- return physid_mask_of_physid(cpu + 4*node);
-}
-
-extern void *xquad_portio;
-
-static inline void setup_portio_remap(void)
-{
- int num_quads = num_online_nodes();
-
- if (num_quads <= 1)
- return;
-
- printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
- xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
- printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
- (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
- return (1);
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-/*
- * We use physical apicids here, not logical, so just return the default
- * physical broadcast to stop people from breaking us
- */
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
- return (int) 0xF;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- return (int) 0xF;
-}
-
-/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h
deleted file mode 100644
index e012a46cc22a..000000000000
--- a/arch/x86/include/asm/numaq/apicdef.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __ASM_NUMAQ_APICDEF_H
-#define __ASM_NUMAQ_APICDEF_H
-
-
-#define APIC_ID_MASK (0xF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (((x)>>24)&0x0F);
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
deleted file mode 100644
index a8374c652778..000000000000
--- a/arch/x86/include/asm/numaq/ipi.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_NUMAQ_IPI_H
-#define __ASM_NUMAQ_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
- send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
- send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
- send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h
deleted file mode 100644
index a2eeefcd1cc7..000000000000
--- a/arch/x86/include/asm/numaq/mpparse.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_NUMAQ_MPPARSE_H
-#define __ASM_NUMAQ_MPPARSE_H
-
-extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
-
-#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h
deleted file mode 100644
index 6f499df8eddb..000000000000
--- a/arch/x86/include/asm/numaq/wakecpu.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __ASM_NUMAQ_WAKECPU_H
-#define __ASM_NUMAQ_WAKECPU_H
-
-/* This file copes with machines that wakeup secondary CPUs by NMIs */
-
-#define TRAMPOLINE_PHYS_LOW (0x8)
-#define TRAMPOLINE_PHYS_HIGH (0xa)
-
-/* We don't do anything here because we use NMI's to boot instead */
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
-}
-
-/*
- * Because we use NMIs rather than the INIT-STARTUP sequence to
- * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
- */
-static inline void smp_callin_clear_local_apic(void)
-{
- clear_local_APIC();
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
- printk("Storing NMI vector\n");
- *high =
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
- *low =
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
- printk("Restoring NMI vector\n");
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
- *high;
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
- *low;
-}
-
-static inline void inquire_remote_apic(int apicid)
-{
-}
-
-#endif /* __ASM_NUMAQ_WAKECPU_H */
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 776579119a00..89ed9d70b0aa 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -1,42 +1,11 @@
#ifndef _ASM_X86_PAGE_H
#define _ASM_X86_PAGE_H
-#include <linux/const.h>
-
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT 12
-#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#include <linux/types.h>
#ifdef __KERNEL__
-#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
-#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
-
-/* Cast PAGE_MASK to a signed type so that it is sign-extended if
- virtual addresses are 32-bits but physical addresses are larger
- (ie, 32-bit PAE). */
-#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
-
-/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
-#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
-
-/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
-#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
-
-#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
-#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
-
-#define HPAGE_SHIFT PMD_SHIFT
-#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
-#define HPAGE_MASK (~(HPAGE_SIZE - 1))
-#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-
-#define HUGE_MAX_HSTATE 2
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#endif
+#include <asm/page_types.h>
#ifdef CONFIG_X86_64
#include <asm/page_64.h>
@@ -44,38 +13,18 @@
#include <asm/page_32.h>
#endif /* CONFIG_X86_64 */
-#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
-
-#define VM_DATA_DEFAULT_FLAGS \
- (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
- VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-
#ifndef __ASSEMBLY__
-typedef struct { pgdval_t pgd; } pgd_t;
-typedef struct { pgprotval_t pgprot; } pgprot_t;
-
-extern int page_is_ram(unsigned long pagenr);
-extern int devmem_is_allowed(unsigned long pagenr);
-extern void map_devmem(unsigned long pfn, unsigned long size,
- pgprot_t vma_prot);
-extern void unmap_devmem(unsigned long pfn, unsigned long size,
- pgprot_t vma_prot);
-
-extern unsigned long max_low_pfn_mapped;
-extern unsigned long max_pfn_mapped;
-
struct page;
static inline void clear_user_page(void *page, unsigned long vaddr,
- struct page *pg)
+ struct page *pg)
{
clear_page(page);
}
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
- struct page *topage)
+ struct page *topage)
{
copy_page(to, from);
}
@@ -84,99 +33,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-static inline pgd_t native_make_pgd(pgdval_t val)
-{
- return (pgd_t) { val };
-}
-
-static inline pgdval_t native_pgd_val(pgd_t pgd)
-{
- return pgd.pgd;
-}
-
-#if PAGETABLE_LEVELS >= 3
-#if PAGETABLE_LEVELS == 4
-typedef struct { pudval_t pud; } pud_t;
-
-static inline pud_t native_make_pud(pmdval_t val)
-{
- return (pud_t) { val };
-}
-
-static inline pudval_t native_pud_val(pud_t pud)
-{
- return pud.pud;
-}
-#else /* PAGETABLE_LEVELS == 3 */
-#include <asm-generic/pgtable-nopud.h>
-
-static inline pudval_t native_pud_val(pud_t pud)
-{
- return native_pgd_val(pud.pgd);
-}
-#endif /* PAGETABLE_LEVELS == 4 */
-
-typedef struct { pmdval_t pmd; } pmd_t;
-
-static inline pmd_t native_make_pmd(pmdval_t val)
-{
- return (pmd_t) { val };
-}
-
-static inline pmdval_t native_pmd_val(pmd_t pmd)
-{
- return pmd.pmd;
-}
-#else /* PAGETABLE_LEVELS == 2 */
-#include <asm-generic/pgtable-nopmd.h>
-
-static inline pmdval_t native_pmd_val(pmd_t pmd)
-{
- return native_pgd_val(pmd.pud.pgd);
-}
-#endif /* PAGETABLE_LEVELS >= 3 */
-
-static inline pte_t native_make_pte(pteval_t val)
-{
- return (pte_t) { .pte = val };
-}
-
-static inline pteval_t native_pte_val(pte_t pte)
-{
- return pte.pte;
-}
-
-static inline pteval_t native_pte_flags(pte_t pte)
-{
- return native_pte_val(pte) & PTE_FLAGS_MASK;
-}
-
-#define pgprot_val(x) ((x).pgprot)
-#define __pgprot(x) ((pgprot_t) { (x) } )
-
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else /* !CONFIG_PARAVIRT */
-
-#define pgd_val(x) native_pgd_val(x)
-#define __pgd(x) native_make_pgd(x)
-
-#ifndef __PAGETABLE_PUD_FOLDED
-#define pud_val(x) native_pud_val(x)
-#define __pud(x) native_make_pud(x)
-#endif
-
-#ifndef __PAGETABLE_PMD_FOLDED
-#define pmd_val(x) native_pmd_val(x)
-#define __pmd(x) native_make_pmd(x)
-#endif
-
-#define pte_val(x) native_pte_val(x)
-#define pte_flags(x) native_pte_flags(x)
-#define __pte(x) native_make_pte(x)
-
-#endif /* CONFIG_PARAVIRT */
-
#define __pa(x) __phys_addr((unsigned long)(x))
#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
/* __pa_symbol should be used for C visible symbols.
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
index bcde0d7b4325..da4e762406f7 100644
--- a/arch/x86/include/asm/page_32.h
+++ b/arch/x86/include/asm/page_32.h
@@ -1,82 +1,14 @@
#ifndef _ASM_X86_PAGE_32_H
#define _ASM_X86_PAGE_32_H
-/*
- * This handles the memory map.
- *
- * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
- * a virtual address space of one gigabyte, which limits the
- * amount of physical memory you can use to about 950MB.
- *
- * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
- * and CONFIG_HIGHMEM64G options in the kernel configuration.
- */
-#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
-
-#ifdef CONFIG_4KSTACKS
-#define THREAD_ORDER 0
-#else
-#define THREAD_ORDER 1
-#endif
-#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
-
-#define STACKFAULT_STACK 0
-#define DOUBLEFAULT_STACK 1
-#define NMI_STACK 0
-#define DEBUG_STACK 0
-#define MCE_STACK 0
-#define N_EXCEPTION_STACKS 1
-
-#ifdef CONFIG_X86_PAE
-/* 44=32+12, the limit we can fit into an unsigned long pfn */
-#define __PHYSICAL_MASK_SHIFT 44
-#define __VIRTUAL_MASK_SHIFT 32
-#define PAGETABLE_LEVELS 3
-
-#ifndef __ASSEMBLY__
-typedef u64 pteval_t;
-typedef u64 pmdval_t;
-typedef u64 pudval_t;
-typedef u64 pgdval_t;
-typedef u64 pgprotval_t;
-
-typedef union {
- struct {
- unsigned long pte_low, pte_high;
- };
- pteval_t pte;
-} pte_t;
-#endif /* __ASSEMBLY__
- */
-#else /* !CONFIG_X86_PAE */
-#define __PHYSICAL_MASK_SHIFT 32
-#define __VIRTUAL_MASK_SHIFT 32
-#define PAGETABLE_LEVELS 2
-
-#ifndef __ASSEMBLY__
-typedef unsigned long pteval_t;
-typedef unsigned long pmdval_t;
-typedef unsigned long pudval_t;
-typedef unsigned long pgdval_t;
-typedef unsigned long pgprotval_t;
-
-typedef union {
- pteval_t pte;
- pteval_t pte_low;
-} pte_t;
-
-#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_X86_PAE */
+#include <asm/page_32_types.h>
#ifndef __ASSEMBLY__
-typedef struct page *pgtable_t;
-#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
-#ifndef __ASSEMBLY__
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long);
@@ -89,23 +21,6 @@ extern unsigned long __phys_addr(unsigned long);
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_FLATMEM */
-extern int nx_enabled;
-
-/*
- * This much address space is reserved for vmalloc() and iomap()
- * as well as fixmap mappings.
- */
-extern unsigned int __VMALLOC_RESERVE;
-extern int sysctl_legacy_va_layout;
-
-extern void find_low_pfn_range(void);
-extern unsigned long init_memory_mapping(unsigned long start,
- unsigned long end);
-extern void initmem_init(unsigned long, unsigned long);
-extern void free_initmem(void);
-extern void setup_bootmem_allocator(void);
-
-
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
new file mode 100644
index 000000000000..f1e4a79a6e41
--- /dev/null
+++ b/arch/x86/include/asm/page_32_types.h
@@ -0,0 +1,60 @@
+#ifndef _ASM_X86_PAGE_32_DEFS_H
+#define _ASM_X86_PAGE_32_DEFS_H
+
+#include <linux/const.h>
+
+/*
+ * This handles the memory map.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB.
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+
+#ifdef CONFIG_4KSTACKS
+#define THREAD_ORDER 0
+#else
+#define THREAD_ORDER 1
+#endif
+#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+
+#define STACKFAULT_STACK 0
+#define DOUBLEFAULT_STACK 1
+#define NMI_STACK 0
+#define DEBUG_STACK 0
+#define MCE_STACK 0
+#define N_EXCEPTION_STACKS 1
+
+#ifdef CONFIG_X86_PAE
+/* 44=32+12, the limit we can fit into an unsigned long pfn */
+#define __PHYSICAL_MASK_SHIFT 44
+#define __VIRTUAL_MASK_SHIFT 32
+
+#else /* !CONFIG_X86_PAE */
+#define __PHYSICAL_MASK_SHIFT 32
+#define __VIRTUAL_MASK_SHIFT 32
+#endif /* CONFIG_X86_PAE */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This much address space is reserved for vmalloc() and iomap()
+ * as well as fixmap mappings.
+ */
+extern unsigned int __VMALLOC_RESERVE;
+extern int sysctl_legacy_va_layout;
+
+extern void find_low_pfn_range(void);
+extern unsigned long init_memory_mapping(unsigned long start,
+ unsigned long end);
+extern void initmem_init(unsigned long, unsigned long);
+extern void free_initmem(void);
+extern void setup_bootmem_allocator(void);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PAGE_32_DEFS_H */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 5ebca29f44f0..072694ed81a5 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -1,105 +1,6 @@
#ifndef _ASM_X86_PAGE_64_H
#define _ASM_X86_PAGE_64_H
-#define PAGETABLE_LEVELS 4
-
-#define THREAD_ORDER 1
-#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
-#define CURRENT_MASK (~(THREAD_SIZE - 1))
-
-#define EXCEPTION_STACK_ORDER 0
-#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-
-#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
-#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
-
-#define IRQSTACK_ORDER 2
-#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
-
-#define STACKFAULT_STACK 1
-#define DOUBLEFAULT_STACK 2
-#define NMI_STACK 3
-#define DEBUG_STACK 4
-#define MCE_STACK 5
-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
-
-#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
-#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
-
-/*
- * Set __PAGE_OFFSET to the most negative possible address +
- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
- * what Xen requires.
- */
-#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
-
-#define __PHYSICAL_START CONFIG_PHYSICAL_START
-#define __KERNEL_ALIGN 0x200000
-
-/*
- * Make sure kernel is aligned to 2MB address. Catching it at compile
- * time is better. Change your config file and compile the kernel
- * for a 2MB aligned address (CONFIG_PHYSICAL_START)
- */
-#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
-#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
-#endif
-
-#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
-#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
-
-/* See Documentation/x86_64/mm.txt for a description of the memory map. */
-#define __PHYSICAL_MASK_SHIFT 46
-#define __VIRTUAL_MASK_SHIFT 48
-
-/*
- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
- * arch/x86/kernel/head_64.S), and it is mapped here:
- */
-#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
-#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
-
-#ifndef __ASSEMBLY__
-void clear_page(void *page);
-void copy_page(void *to, void *from);
-
-/* duplicated to the one in bootmem.h */
-extern unsigned long max_pfn;
-extern unsigned long phys_base;
-
-extern unsigned long __phys_addr(unsigned long);
-#define __phys_reloc_hide(x) (x)
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef unsigned long pteval_t;
-typedef unsigned long pmdval_t;
-typedef unsigned long pudval_t;
-typedef unsigned long pgdval_t;
-typedef unsigned long pgprotval_t;
-
-typedef struct page *pgtable_t;
-
-typedef struct { pteval_t pte; } pte_t;
-
-#define vmemmap ((struct page *)VMEMMAP_START)
-
-extern unsigned long init_memory_mapping(unsigned long start,
- unsigned long end);
-
-extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
-extern void free_initmem(void);
-
-extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
-extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
-
-#endif /* !__ASSEMBLY__ */
-
-#ifdef CONFIG_FLATMEM
-#define pfn_valid(pfn) ((pfn) < max_pfn)
-#endif
-
+#include <asm/page_64_types.h>
#endif /* _ASM_X86_PAGE_64_H */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
new file mode 100644
index 000000000000..d38c91b70248
--- /dev/null
+++ b/arch/x86/include/asm/page_64_types.h
@@ -0,0 +1,89 @@
+#ifndef _ASM_X86_PAGE_64_DEFS_H
+#define _ASM_X86_PAGE_64_DEFS_H
+
+#define THREAD_ORDER 1
+#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
+#define CURRENT_MASK (~(THREAD_SIZE - 1))
+
+#define EXCEPTION_STACK_ORDER 0
+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
+
+#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
+#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
+
+#define IRQ_STACK_ORDER 2
+#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
+
+#define STACKFAULT_STACK 1
+#define DOUBLEFAULT_STACK 2
+#define NMI_STACK 3
+#define DEBUG_STACK 4
+#define MCE_STACK 5
+#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
+
+#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
+
+/*
+ * Set __PAGE_OFFSET to the most negative possible address +
+ * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
+ * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
+ * what Xen requires.
+ */
+#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
+
+#define __PHYSICAL_START CONFIG_PHYSICAL_START
+#define __KERNEL_ALIGN 0x200000
+
+/*
+ * Make sure kernel is aligned to 2MB address. Catching it at compile
+ * time is better. Change your config file and compile the kernel
+ * for a 2MB aligned address (CONFIG_PHYSICAL_START)
+ */
+#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
+#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
+#endif
+
+#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
+#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
+
+/* See Documentation/x86_64/mm.txt for a description of the memory map. */
+#define __PHYSICAL_MASK_SHIFT 46
+#define __VIRTUAL_MASK_SHIFT 48
+
+/*
+ * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
+ * arch/x86/kernel/head_64.S), and it is mapped here:
+ */
+#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
+#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
+
+#ifndef __ASSEMBLY__
+void clear_page(void *page);
+void copy_page(void *to, void *from);
+
+/* duplicated to the one in bootmem.h */
+extern unsigned long max_pfn;
+extern unsigned long phys_base;
+
+extern unsigned long __phys_addr(unsigned long);
+#define __phys_reloc_hide(x) (x)
+
+#define vmemmap ((struct page *)VMEMMAP_START)
+
+extern unsigned long init_memory_mapping(unsigned long start,
+ unsigned long end);
+
+extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
+extern void free_initmem(void);
+
+extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
+extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_FLATMEM
+#define pfn_valid(pfn) ((pfn) < max_pfn)
+#endif
+
+#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
new file mode 100644
index 000000000000..2d625da6603c
--- /dev/null
+++ b/arch/x86/include/asm/page_types.h
@@ -0,0 +1,57 @@
+#ifndef _ASM_X86_PAGE_DEFS_H
+#define _ASM_X86_PAGE_DEFS_H
+
+#include <linux/const.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
+#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
+
+/* Cast PAGE_MASK to a signed type so that it is sign-extended if
+ virtual addresses are 32-bits but physical addresses are larger
+ (ie, 32-bit PAE). */
+#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
+
+#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
+
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#define HUGE_MAX_HSTATE 2
+
+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#ifdef CONFIG_X86_64
+#include <asm/page_64_types.h>
+#else
+#include <asm/page_32_types.h>
+#endif /* CONFIG_X86_64 */
+
+#ifndef __ASSEMBLY__
+
+struct pgprot;
+
+extern int page_is_ram(unsigned long pagenr);
+extern int devmem_is_allowed(unsigned long pagenr);
+extern void map_devmem(unsigned long pfn, unsigned long size,
+ struct pgprot vma_prot);
+extern void unmap_devmem(unsigned long pfn, unsigned long size,
+ struct pgprot vma_prot);
+
+extern unsigned long max_low_pfn_mapped;
+extern unsigned long max_pfn_mapped;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PAGE_DEFS_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index e299287e8e33..0617d5cc9712 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -4,7 +4,7 @@
* para-virtualization: those hooks are defined here. */
#ifdef CONFIG_PARAVIRT
-#include <asm/page.h>
+#include <asm/pgtable_types.h>
#include <asm/asm.h>
/* Bitmask of what can be clobbered: usually at least eax. */
@@ -12,21 +12,38 @@
#define CLBR_EAX (1 << 0)
#define CLBR_ECX (1 << 1)
#define CLBR_EDX (1 << 2)
+#define CLBR_EDI (1 << 3)
-#ifdef CONFIG_X86_64
-#define CLBR_RSI (1 << 3)
-#define CLBR_RDI (1 << 4)
+#ifdef CONFIG_X86_32
+/* CLBR_ANY should match all regs platform has. For i386, that's just it */
+#define CLBR_ANY ((1 << 4) - 1)
+
+#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
+#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
+#define CLBR_SCRATCH (0)
+#else
+#define CLBR_RAX CLBR_EAX
+#define CLBR_RCX CLBR_ECX
+#define CLBR_RDX CLBR_EDX
+#define CLBR_RDI CLBR_EDI
+#define CLBR_RSI (1 << 4)
#define CLBR_R8 (1 << 5)
#define CLBR_R9 (1 << 6)
#define CLBR_R10 (1 << 7)
#define CLBR_R11 (1 << 8)
+
#define CLBR_ANY ((1 << 9) - 1)
+
+#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
+ CLBR_RCX | CLBR_R8 | CLBR_R9)
+#define CLBR_RET_REG (CLBR_RAX)
+#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
+
#include <asm/desc_defs.h>
-#else
-/* CLBR_ANY should match all regs platform has. For i386, that's just it */
-#define CLBR_ANY ((1 << 3) - 1)
#endif /* X86_64 */
+#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/cpumask.h>
@@ -40,6 +57,14 @@ struct tss_struct;
struct mm_struct;
struct desc_struct;
+/*
+ * Wrapper type for pointers to code which uses the non-standard
+ * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
+ */
+struct paravirt_callee_save {
+ void *func;
+};
+
/* general info */
struct pv_info {
unsigned int kernel_rpl;
@@ -189,11 +214,15 @@ struct pv_irq_ops {
* expected to use X86_EFLAGS_IF; all other bits
* returned from save_fl are undefined, and may be ignored by
* restore_fl.
+ *
+ * NOTE: These functions callers expect the callee to preserve
+ * more registers than the standard C calling convention.
*/
- unsigned long (*save_fl)(void);
- void (*restore_fl)(unsigned long);
- void (*irq_disable)(void);
- void (*irq_enable)(void);
+ struct paravirt_callee_save save_fl;
+ struct paravirt_callee_save restore_fl;
+ struct paravirt_callee_save irq_disable;
+ struct paravirt_callee_save irq_enable;
+
void (*safe_halt)(void);
void (*halt)(void);
@@ -244,7 +273,8 @@ struct pv_mmu_ops {
void (*flush_tlb_user)(void);
void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(unsigned long addr);
- void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
+ void (*flush_tlb_others)(const struct cpumask *cpus,
+ struct mm_struct *mm,
unsigned long va);
/* Hooks for allocating and freeing a pagetable top-level */
@@ -278,12 +308,11 @@ struct pv_mmu_ops {
void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
- pteval_t (*pte_val)(pte_t);
- pteval_t (*pte_flags)(pte_t);
- pte_t (*make_pte)(pteval_t pte);
+ struct paravirt_callee_save pte_val;
+ struct paravirt_callee_save make_pte;
- pgdval_t (*pgd_val)(pgd_t);
- pgd_t (*make_pgd)(pgdval_t pgd);
+ struct paravirt_callee_save pgd_val;
+ struct paravirt_callee_save make_pgd;
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
@@ -298,12 +327,12 @@ struct pv_mmu_ops {
void (*set_pud)(pud_t *pudp, pud_t pudval);
- pmdval_t (*pmd_val)(pmd_t);
- pmd_t (*make_pmd)(pmdval_t pmd);
+ struct paravirt_callee_save pmd_val;
+ struct paravirt_callee_save make_pmd;
#if PAGETABLE_LEVELS == 4
- pudval_t (*pud_val)(pud_t);
- pud_t (*make_pud)(pudval_t pud);
+ struct paravirt_callee_save pud_val;
+ struct paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
#endif /* PAGETABLE_LEVELS == 4 */
@@ -388,6 +417,8 @@ extern struct pv_lock_ops pv_lock_ops;
asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
unsigned paravirt_patch_nop(void);
+unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
+unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
unsigned paravirt_patch_ignore(unsigned len);
unsigned paravirt_patch_call(void *insnbuf,
const void *target, u16 tgt_clobbers,
@@ -479,25 +510,45 @@ int paravirt_disable_iospace(void);
* makes sure the incoming and outgoing types are always correct.
*/
#ifdef CONFIG_X86_32
-#define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
+#define PVOP_VCALL_ARGS \
+ unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
+
+#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
+
#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
+
+#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
+#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
+
#define EXTRA_CLOBBERS
#define VEXTRA_CLOBBERS
-#else
-#define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
+#else /* CONFIG_X86_64 */
+#define PVOP_VCALL_ARGS \
+ unsigned long __edi = __edi, __esi = __esi, \
+ __edx = __edx, __ecx = __ecx
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
+
+#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
+
#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
"=S" (__esi), "=d" (__edx), \
"=c" (__ecx)
-
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
+#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
+#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
+
#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
-#endif
+#endif /* CONFIG_X86_32 */
#ifdef CONFIG_PARAVIRT_DEBUG
#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
@@ -505,10 +556,11 @@ int paravirt_disable_iospace(void);
#define PVOP_TEST_NULL(op) ((void)op)
#endif
-#define __PVOP_CALL(rettype, op, pre, post, ...) \
+#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
+ pre, post, ...) \
({ \
rettype __ret; \
- PVOP_CALL_ARGS; \
+ PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
/* This is 32-bit specific, but is okay in 64-bit */ \
/* since this condition will never hold */ \
@@ -516,70 +568,113 @@ int paravirt_disable_iospace(void);
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : PVOP_CALL_CLOBBERS \
+ : call_clbr \
: paravirt_type(op), \
- paravirt_clobber(CLBR_ANY), \
+ paravirt_clobber(clbr), \
##__VA_ARGS__ \
- : "memory", "cc" EXTRA_CLOBBERS); \
+ : "memory", "cc" extra_clbr); \
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
} else { \
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : PVOP_CALL_CLOBBERS \
+ : call_clbr \
: paravirt_type(op), \
- paravirt_clobber(CLBR_ANY), \
+ paravirt_clobber(clbr), \
##__VA_ARGS__ \
- : "memory", "cc" EXTRA_CLOBBERS); \
+ : "memory", "cc" extra_clbr); \
__ret = (rettype)__eax; \
} \
__ret; \
})
-#define __PVOP_VCALL(op, pre, post, ...) \
+
+#define __PVOP_CALL(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
+ EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
+
+#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
+ PVOP_CALLEE_CLOBBERS, , \
+ pre, post, ##__VA_ARGS__)
+
+
+#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
({ \
PVOP_VCALL_ARGS; \
PVOP_TEST_NULL(op); \
asm volatile(pre \
paravirt_alt(PARAVIRT_CALL) \
post \
- : PVOP_VCALL_CLOBBERS \
+ : call_clbr \
: paravirt_type(op), \
- paravirt_clobber(CLBR_ANY), \
+ paravirt_clobber(clbr), \
##__VA_ARGS__ \
- : "memory", "cc" VEXTRA_CLOBBERS); \
+ : "memory", "cc" extra_clbr); \
})
+#define __PVOP_VCALL(op, pre, post, ...) \
+ ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
+ VEXTRA_CLOBBERS, \
+ pre, post, ##__VA_ARGS__)
+
+#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
+ ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
+ PVOP_VCALLEE_CLOBBERS, , \
+ pre, post, ##__VA_ARGS__)
+
+
+
#define PVOP_CALL0(rettype, op) \
__PVOP_CALL(rettype, op, "", "")
#define PVOP_VCALL0(op) \
__PVOP_VCALL(op, "", "")
+#define PVOP_CALLEE0(rettype, op) \
+ __PVOP_CALLEESAVE(rettype, op, "", "")
+#define PVOP_VCALLEE0(op) \
+ __PVOP_VCALLEESAVE(op, "", "")
+
+
#define PVOP_CALL1(rettype, op, arg1) \
- __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
#define PVOP_VCALL1(op, arg1) \
- __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+
+#define PVOP_CALLEE1(rettype, op, arg1) \
+ __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define PVOP_VCALLEE1(op, arg1) \
+ __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
+
#define PVOP_CALL2(rettype, op, arg1, arg2) \
- __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
- "1" ((unsigned long)(arg2)))
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
#define PVOP_VCALL2(op, arg1, arg2) \
- __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
- "1" ((unsigned long)(arg2)))
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+
+#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
+ __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+#define PVOP_VCALLEE2(op, arg1, arg2) \
+ __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+
#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
- __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
- "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
+ __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
#define PVOP_VCALL3(op, arg1, arg2, arg3) \
- __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
- "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
/* This is the only difference in x86_64. We can make it much simpler */
#ifdef CONFIG_X86_32
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
__PVOP_CALL(rettype, op, \
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
- "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
- "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
__PVOP_VCALL(op, \
"push %[_arg4];", "lea 4(%%esp),%%esp;", \
@@ -587,13 +682,13 @@ int paravirt_disable_iospace(void);
"2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
#else
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
- __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
- "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
- "3"((unsigned long)(arg4)))
+ __PVOP_CALL(rettype, op, "", "", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
- __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
- "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
- "3"((unsigned long)(arg4)))
+ __PVOP_VCALL(op, "", "", \
+ PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
#endif
static inline int paravirt_enabled(void)
@@ -984,10 +1079,11 @@ static inline void __flush_tlb_single(unsigned long addr)
PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
}
-static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+static inline void flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm,
unsigned long va)
{
- PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
+ PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
}
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@ -1059,13 +1155,13 @@ static inline pte_t __pte(pteval_t val)
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
- ret = PVOP_CALL2(pteval_t,
- pv_mmu_ops.make_pte,
- val, (u64)val >> 32);
+ ret = PVOP_CALLEE2(pteval_t,
+ pv_mmu_ops.make_pte,
+ val, (u64)val >> 32);
else
- ret = PVOP_CALL1(pteval_t,
- pv_mmu_ops.make_pte,
- val);
+ ret = PVOP_CALLEE1(pteval_t,
+ pv_mmu_ops.make_pte,
+ val);
return (pte_t) { .pte = ret };
}
@@ -1075,29 +1171,12 @@ static inline pteval_t pte_val(pte_t pte)
pteval_t ret;
if (sizeof(pteval_t) > sizeof(long))
- ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
- pte.pte, (u64)pte.pte >> 32);
- else
- ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
- pte.pte);
-
- return ret;
-}
-
-static inline pteval_t pte_flags(pte_t pte)
-{
- pteval_t ret;
-
- if (sizeof(pteval_t) > sizeof(long))
- ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
- pte.pte, (u64)pte.pte >> 32);
+ ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
+ pte.pte, (u64)pte.pte >> 32);
else
- ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
- pte.pte);
+ ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
+ pte.pte);
-#ifdef CONFIG_PARAVIRT_DEBUG
- BUG_ON(ret & PTE_PFN_MASK);
-#endif
return ret;
}
@@ -1106,11 +1185,11 @@ static inline pgd_t __pgd(pgdval_t val)
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
- ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
- val, (u64)val >> 32);
+ ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
+ val, (u64)val >> 32);
else
- ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
- val);
+ ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
+ val);
return (pgd_t) { ret };
}
@@ -1120,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
pgdval_t ret;
if (sizeof(pgdval_t) > sizeof(long))
- ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
- pgd.pgd, (u64)pgd.pgd >> 32);
+ ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
+ pgd.pgd, (u64)pgd.pgd >> 32);
else
- ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
- pgd.pgd);
+ ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
+ pgd.pgd);
return ret;
}
@@ -1188,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val)
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
- ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
- val, (u64)val >> 32);
+ ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
+ val, (u64)val >> 32);
else
- ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
- val);
+ ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
+ val);
return (pmd_t) { ret };
}
@@ -1202,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
pmdval_t ret;
if (sizeof(pmdval_t) > sizeof(long))
- ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
- pmd.pmd, (u64)pmd.pmd >> 32);
+ ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
+ pmd.pmd, (u64)pmd.pmd >> 32);
else
- ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
- pmd.pmd);
+ ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
+ pmd.pmd);
return ret;
}
@@ -1228,11 +1307,11 @@ static inline pud_t __pud(pudval_t val)
pudval_t ret;
if (sizeof(pudval_t) > sizeof(long))
- ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
- val, (u64)val >> 32);
+ ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
+ val, (u64)val >> 32);
else
- ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
- val);
+ ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
+ val);
return (pud_t) { ret };
}
@@ -1242,11 +1321,11 @@ static inline pudval_t pud_val(pud_t pud)
pudval_t ret;
if (sizeof(pudval_t) > sizeof(long))
- ret = PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
- pud.pud, (u64)pud.pud >> 32);
+ ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
+ pud.pud, (u64)pud.pud >> 32);
else
- ret = PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
- pud.pud);
+ ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
+ pud.pud);
return ret;
}
@@ -1374,9 +1453,10 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
}
void _paravirt_nop(void);
-#define paravirt_nop ((void *)_paravirt_nop)
+u32 _paravirt_ident_32(u32);
+u64 _paravirt_ident_64(u64);
-void paravirt_use_bytelocks(void);
+#define paravirt_nop ((void *)_paravirt_nop)
#ifdef CONFIG_SMP
@@ -1426,12 +1506,37 @@ extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
#ifdef CONFIG_X86_32
-#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
-#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
+#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
+#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
+
+/* save and restore all caller-save registers, except return value */
+#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
+#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
+
#define PV_FLAGS_ARG "0"
#define PV_EXTRA_CLOBBERS
#define PV_VEXTRA_CLOBBERS
#else
+/* save and restore all caller-save registers, except return value */
+#define PV_SAVE_ALL_CALLER_REGS \
+ "push %rcx;" \
+ "push %rdx;" \
+ "push %rsi;" \
+ "push %rdi;" \
+ "push %r8;" \
+ "push %r9;" \
+ "push %r10;" \
+ "push %r11;"
+#define PV_RESTORE_ALL_CALLER_REGS \
+ "pop %r11;" \
+ "pop %r10;" \
+ "pop %r9;" \
+ "pop %r8;" \
+ "pop %rdi;" \
+ "pop %rsi;" \
+ "pop %rdx;" \
+ "pop %rcx;"
+
/* We save some registers, but all of them, that's too much. We clobber all
* caller saved registers but the argument parameter */
#define PV_SAVE_REGS "pushq %%rdi;"
@@ -1441,52 +1546,76 @@ extern struct paravirt_patch_site __parainstructions[],
#define PV_FLAGS_ARG "D"
#endif
+/*
+ * Generate a thunk around a function which saves all caller-save
+ * registers except for the return value. This allows C functions to
+ * be called from assembler code where fewer than normal registers are
+ * available. It may also help code generation around calls from C
+ * code if the common case doesn't use many registers.
+ *
+ * When a callee is wrapped in a thunk, the caller can assume that all
+ * arg regs and all scratch registers are preserved across the
+ * call. The return value in rax/eax will not be saved, even for void
+ * functions.
+ */
+#define PV_CALLEE_SAVE_REGS_THUNK(func) \
+ extern typeof(func) __raw_callee_save_##func; \
+ static void *__##func##__ __used = func; \
+ \
+ asm(".pushsection .text;" \
+ "__raw_callee_save_" #func ": " \
+ PV_SAVE_ALL_CALLER_REGS \
+ "call " #func ";" \
+ PV_RESTORE_ALL_CALLER_REGS \
+ "ret;" \
+ ".popsection")
+
+/* Get a reference to a callee-save function */
+#define PV_CALLEE_SAVE(func) \
+ ((struct paravirt_callee_save) { __raw_callee_save_##func })
+
+/* Promise that "func" already uses the right calling convention */
+#define __PV_IS_CALLEE_SAVE(func) \
+ ((struct paravirt_callee_save) { func })
+
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long f;
- asm volatile(paravirt_alt(PV_SAVE_REGS
- PARAVIRT_CALL
- PV_RESTORE_REGS)
+ asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: paravirt_type(pv_irq_ops.save_fl),
paravirt_clobber(CLBR_EAX)
- : "memory", "cc" PV_VEXTRA_CLOBBERS);
+ : "memory", "cc");
return f;
}
static inline void raw_local_irq_restore(unsigned long f)
{
- asm volatile(paravirt_alt(PV_SAVE_REGS
- PARAVIRT_CALL
- PV_RESTORE_REGS)
+ asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: PV_FLAGS_ARG(f),
paravirt_type(pv_irq_ops.restore_fl),
paravirt_clobber(CLBR_EAX)
- : "memory", "cc" PV_EXTRA_CLOBBERS);
+ : "memory", "cc");
}
static inline void raw_local_irq_disable(void)
{
- asm volatile(paravirt_alt(PV_SAVE_REGS
- PARAVIRT_CALL
- PV_RESTORE_REGS)
+ asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_disable),
paravirt_clobber(CLBR_EAX)
- : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
+ : "memory", "eax", "cc");
}
static inline void raw_local_irq_enable(void)
{
- asm volatile(paravirt_alt(PV_SAVE_REGS
- PARAVIRT_CALL
- PV_RESTORE_REGS)
+ asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_enable),
paravirt_clobber(CLBR_EAX)
- : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
+ : "memory", "eax", "cc");
}
static inline unsigned long __raw_local_irq_save(void)
@@ -1529,33 +1658,49 @@ static inline unsigned long __raw_local_irq_save(void)
.popsection
+#define COND_PUSH(set, mask, reg) \
+ .if ((~(set)) & mask); push %reg; .endif
+#define COND_POP(set, mask, reg) \
+ .if ((~(set)) & mask); pop %reg; .endif
+
#ifdef CONFIG_X86_64
-#define PV_SAVE_REGS \
- push %rax; \
- push %rcx; \
- push %rdx; \
- push %rsi; \
- push %rdi; \
- push %r8; \
- push %r9; \
- push %r10; \
- push %r11
-#define PV_RESTORE_REGS \
- pop %r11; \
- pop %r10; \
- pop %r9; \
- pop %r8; \
- pop %rdi; \
- pop %rsi; \
- pop %rdx; \
- pop %rcx; \
- pop %rax
+
+#define PV_SAVE_REGS(set) \
+ COND_PUSH(set, CLBR_RAX, rax); \
+ COND_PUSH(set, CLBR_RCX, rcx); \
+ COND_PUSH(set, CLBR_RDX, rdx); \
+ COND_PUSH(set, CLBR_RSI, rsi); \
+ COND_PUSH(set, CLBR_RDI, rdi); \
+ COND_PUSH(set, CLBR_R8, r8); \
+ COND_PUSH(set, CLBR_R9, r9); \
+ COND_PUSH(set, CLBR_R10, r10); \
+ COND_PUSH(set, CLBR_R11, r11)
+#define PV_RESTORE_REGS(set) \
+ COND_POP(set, CLBR_R11, r11); \
+ COND_POP(set, CLBR_R10, r10); \
+ COND_POP(set, CLBR_R9, r9); \
+ COND_POP(set, CLBR_R8, r8); \
+ COND_POP(set, CLBR_RDI, rdi); \
+ COND_POP(set, CLBR_RSI, rsi); \
+ COND_POP(set, CLBR_RDX, rdx); \
+ COND_POP(set, CLBR_RCX, rcx); \
+ COND_POP(set, CLBR_RAX, rax)
+
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
#define PARA_INDIRECT(addr) *addr(%rip)
#else
-#define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
-#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
+#define PV_SAVE_REGS(set) \
+ COND_PUSH(set, CLBR_EAX, eax); \
+ COND_PUSH(set, CLBR_EDI, edi); \
+ COND_PUSH(set, CLBR_ECX, ecx); \
+ COND_PUSH(set, CLBR_EDX, edx)
+#define PV_RESTORE_REGS(set) \
+ COND_POP(set, CLBR_EDX, edx); \
+ COND_POP(set, CLBR_ECX, ecx); \
+ COND_POP(set, CLBR_EDI, edi); \
+ COND_POP(set, CLBR_EAX, eax)
+
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
#define PARA_INDIRECT(addr) *%cs:addr
@@ -1567,15 +1712,15 @@ static inline unsigned long __raw_local_irq_save(void)
#define DISABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
- PV_SAVE_REGS; \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
- PV_RESTORE_REGS;) \
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#define ENABLE_INTERRUPTS(clobbers) \
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
- PV_SAVE_REGS; \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
- PV_RESTORE_REGS;)
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
#define USERGS_SYSRET32 \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
@@ -1605,11 +1750,15 @@ static inline unsigned long __raw_local_irq_save(void)
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
swapgs)
+/*
+ * Note: swapgs is very special, and in practise is either going to be
+ * implemented with a single "swapgs" instruction or something very
+ * special. Either way, we don't need to save any registers for
+ * it.
+ */
#define SWAPGS \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
- PV_SAVE_REGS; \
- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
- PV_RESTORE_REGS \
+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
)
#define GET_CR2_INTO_RCX \
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index b8493b3b9890..9709fdff6615 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -5,10 +5,8 @@
#ifdef CONFIG_X86_PAT
extern int pat_enabled;
-extern void validate_pat_support(struct cpuinfo_x86 *c);
#else
static const int pat_enabled;
-static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
#endif
extern void pat_init(void);
@@ -17,6 +15,4 @@ extern int reserve_memtype(u64 start, u64 end,
unsigned long req_type, unsigned long *ret_type);
extern int free_memtype(u64 start, u64 end);
-extern void pat_disable(char *reason);
-
#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/mach-default/pci-functions.h b/arch/x86/include/asm/pci-functions.h
index ed0bab427354..ed0bab427354 100644
--- a/arch/x86/include/asm/mach-default/pci-functions.h
+++ b/arch/x86/include/asm/pci-functions.h
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
deleted file mode 100644
index 2fbfff88df37..000000000000
--- a/arch/x86/include/asm/pda.h
+++ /dev/null
@@ -1,137 +0,0 @@
-#ifndef _ASM_X86_PDA_H
-#define _ASM_X86_PDA_H
-
-#ifndef __ASSEMBLY__
-#include <linux/stddef.h>
-#include <linux/types.h>
-#include <linux/cache.h>
-#include <asm/page.h>
-
-/* Per processor datastructure. %gs points to it while the kernel runs */
-struct x8664_pda {
- struct task_struct *pcurrent; /* 0 Current process */
- unsigned long data_offset; /* 8 Per cpu data offset from linker
- address */
- unsigned long kernelstack; /* 16 top of kernel stack for current */
- unsigned long oldrsp; /* 24 user rsp for system call */
- int irqcount; /* 32 Irq nesting counter. Starts -1 */
- unsigned int cpunumber; /* 36 Logical CPU number */
-#ifdef CONFIG_CC_STACKPROTECTOR
- unsigned long stack_canary; /* 40 stack canary value */
- /* gcc-ABI: this canary MUST be at
- offset 40!!! */
-#endif
- char *irqstackptr;
- short nodenumber; /* number of current node (32k max) */
- short in_bootmem; /* pda lives in bootmem */
- unsigned int __softirq_pending;
- unsigned int __nmi_count; /* number of NMI on this CPUs */
- short mmu_state;
- short isidle;
- struct mm_struct *active_mm;
- unsigned apic_timer_irqs;
- unsigned irq0_irqs;
- unsigned irq_resched_count;
- unsigned irq_call_count;
- unsigned irq_tlb_count;
- unsigned irq_thermal_count;
- unsigned irq_threshold_count;
- unsigned irq_spurious_count;
-} ____cacheline_aligned_in_smp;
-
-extern struct x8664_pda **_cpu_pda;
-extern void pda_init(int);
-
-#define cpu_pda(i) (_cpu_pda[i])
-
-/*
- * There is no fast way to get the base address of the PDA, all the accesses
- * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
- */
-extern void __bad_pda_field(void) __attribute__((noreturn));
-
-/*
- * proxy_pda doesn't actually exist, but tell gcc it is accessed for
- * all PDA accesses so it gets read/write dependencies right.
- */
-extern struct x8664_pda _proxy_pda;
-
-#define pda_offset(field) offsetof(struct x8664_pda, field)
-
-#define pda_to_op(op, field, val) \
-do { \
- typedef typeof(_proxy_pda.field) T__; \
- if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
- switch (sizeof(_proxy_pda.field)) { \
- case 2: \
- asm(op "w %1,%%gs:%c2" : \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i"(pda_offset(field))); \
- break; \
- case 4: \
- asm(op "l %1,%%gs:%c2" : \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i" (pda_offset(field))); \
- break; \
- case 8: \
- asm(op "q %1,%%gs:%c2": \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i"(pda_offset(field))); \
- break; \
- default: \
- __bad_pda_field(); \
- } \
-} while (0)
-
-#define pda_from_op(op, field) \
-({ \
- typeof(_proxy_pda.field) ret__; \
- switch (sizeof(_proxy_pda.field)) { \
- case 2: \
- asm(op "w %%gs:%c1,%0" : \
- "=r" (ret__) : \
- "i" (pda_offset(field)), \
- "m" (_proxy_pda.field)); \
- break; \
- case 4: \
- asm(op "l %%gs:%c1,%0": \
- "=r" (ret__): \
- "i" (pda_offset(field)), \
- "m" (_proxy_pda.field)); \
- break; \
- case 8: \
- asm(op "q %%gs:%c1,%0": \
- "=r" (ret__) : \
- "i" (pda_offset(field)), \
- "m" (_proxy_pda.field)); \
- break; \
- default: \
- __bad_pda_field(); \
- } \
- ret__; \
-})
-
-#define read_pda(field) pda_from_op("mov", field)
-#define write_pda(field, val) pda_to_op("mov", field, val)
-#define add_pda(field, val) pda_to_op("add", field, val)
-#define sub_pda(field, val) pda_to_op("sub", field, val)
-#define or_pda(field, val) pda_to_op("or", field, val)
-
-/* This is not atomic against other CPUs -- CPU preemption needs to be off */
-#define test_and_clear_bit_pda(bit, field) \
-({ \
- int old__; \
- asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
- : "=r" (old__), "+m" (_proxy_pda.field) \
- : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
- old__; \
-})
-
-#endif
-
-#define PDA_STACKOFFSET (5*8)
-
-#endif /* _ASM_X86_PDA_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index ece72053ba63..aee103b26d01 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -2,53 +2,12 @@
#define _ASM_X86_PERCPU_H
#ifdef CONFIG_X86_64
-#include <linux/compiler.h>
-
-/* Same as asm-generic/percpu.h, except that we store the per cpu offset
- in the PDA. Longer term the PDA and every per cpu variable
- should be just put into a single section and referenced directly
- from %gs */
-
-#ifdef CONFIG_SMP
-#include <asm/pda.h>
-
-#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
-#define __my_cpu_offset read_pda(data_offset)
-
-#define per_cpu_offset(x) (__per_cpu_offset(x))
-
+#define __percpu_seg gs
+#define __percpu_mov_op movq
+#else
+#define __percpu_seg fs
+#define __percpu_mov_op movl
#endif
-#include <asm-generic/percpu.h>
-
-DECLARE_PER_CPU(struct x8664_pda, pda);
-
-/*
- * These are supposed to be implemented as a single instruction which
- * operates on the per-cpu data base segment. x86-64 doesn't have
- * that yet, so this is a fairly inefficient workaround for the
- * meantime. The single instruction is atomic with respect to
- * preemption and interrupts, so we need to explicitly disable
- * interrupts here to achieve the same effect. However, because it
- * can be used from within interrupt-disable/enable, we can't actually
- * disable interrupts; disabling preemption is enough.
- */
-#define x86_read_percpu(var) \
- ({ \
- typeof(per_cpu_var(var)) __tmp; \
- preempt_disable(); \
- __tmp = __get_cpu_var(var); \
- preempt_enable(); \
- __tmp; \
- })
-
-#define x86_write_percpu(var, val) \
- do { \
- preempt_disable(); \
- __get_cpu_var(var) = (val); \
- preempt_enable(); \
- } while(0)
-
-#else /* CONFIG_X86_64 */
#ifdef __ASSEMBLY__
@@ -65,47 +24,48 @@ DECLARE_PER_CPU(struct x8664_pda, pda);
* PER_CPU(cpu_gdt_descr, %ebx)
*/
#ifdef CONFIG_SMP
-#define PER_CPU(var, reg) \
- movl %fs:per_cpu__##this_cpu_off, reg; \
+#define PER_CPU(var, reg) \
+ __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
lea per_cpu__##var(reg), reg
-#define PER_CPU_VAR(var) %fs:per_cpu__##var
+#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
#else /* ! SMP */
-#define PER_CPU(var, reg) \
- movl $per_cpu__##var, reg
+#define PER_CPU(var, reg) \
+ __percpu_mov_op $per_cpu__##var, reg
#define PER_CPU_VAR(var) per_cpu__##var
#endif /* SMP */
+#ifdef CONFIG_X86_64_SMP
+#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
+#else
+#define INIT_PER_CPU_VAR(var) per_cpu__##var
+#endif
+
#else /* ...!ASSEMBLY */
+#include <linux/stringify.h>
+
+#ifdef CONFIG_SMP
+#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
+#define __my_cpu_offset percpu_read(this_cpu_off)
+#else
+#define __percpu_arg(x) "%" #x
+#endif
+
/*
- * PER_CPU finds an address of a per-cpu variable.
+ * Initialized pointers to per-cpu variables needed for the boot
+ * processor need to use these macros to get the proper address
+ * offset from __per_cpu_load on SMP.
*
- * Args:
- * var - variable name
- * cpu - 32bit register containing the current CPU number
- *
- * The resulting address is stored in the "cpu" argument.
- *
- * Example:
- * PER_CPU(cpu_gdt_descr, %ebx)
+ * There also must be an entry in vmlinux_64.lds.S
*/
-#ifdef CONFIG_SMP
-
-#define __my_cpu_offset x86_read_percpu(this_cpu_off)
-
-/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
-#define __percpu_seg "%%fs:"
-
-#else /* !SMP */
-
-#define __percpu_seg ""
-
-#endif /* SMP */
-
-#include <asm-generic/percpu.h>
+#define DECLARE_INIT_PER_CPU(var) \
+ extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
-/* We can use this directly for local CPU (faster). */
-DECLARE_PER_CPU(unsigned long, this_cpu_off);
+#ifdef CONFIG_X86_64_SMP
+#define init_per_cpu_var(var) init_per_cpu__##var
+#else
+#define init_per_cpu_var(var) per_cpu_var(var)
+#endif
/* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though). */
@@ -120,20 +80,25 @@ do { \
} \
switch (sizeof(var)) { \
case 1: \
- asm(op "b %1,"__percpu_seg"%0" \
+ asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 2: \
- asm(op "w %1,"__percpu_seg"%0" \
+ asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 4: \
- asm(op "l %1,"__percpu_seg"%0" \
+ asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
+ case 8: \
+ asm(op "q %1,"__percpu_arg(0) \
+ : "+m" (var) \
+ : "re" ((T__)val)); \
+ break; \
default: __bad_percpu_size(); \
} \
} while (0)
@@ -143,17 +108,22 @@ do { \
typeof(var) ret__; \
switch (sizeof(var)) { \
case 1: \
- asm(op "b "__percpu_seg"%1,%0" \
+ asm(op "b "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 2: \
- asm(op "w "__percpu_seg"%1,%0" \
+ asm(op "w "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 4: \
- asm(op "l "__percpu_seg"%1,%0" \
+ asm(op "l "__percpu_arg(1)",%0" \
+ : "=r" (ret__) \
+ : "m" (var)); \
+ break; \
+ case 8: \
+ asm(op "q "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
@@ -162,13 +132,30 @@ do { \
ret__; \
})
-#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
-#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
-#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
-#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
-#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
+#define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
+#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
+#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
+#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
+#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
+#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
+#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
+
+/* This is not atomic against other CPUs -- CPU preemption needs to be off */
+#define x86_test_and_clear_bit_percpu(bit, var) \
+({ \
+ int old__; \
+ asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
+ : "=r" (old__), "+m" (per_cpu__##var) \
+ : "dIr" (bit)); \
+ old__; \
+})
+
+#include <asm-generic/percpu.h>
+
+/* We can use this directly for local CPU (faster). */
+DECLARE_PER_CPU(unsigned long, this_cpu_off);
+
#endif /* !__ASSEMBLY__ */
-#endif /* !CONFIG_X86_64 */
#ifdef CONFIG_SMP
@@ -195,9 +182,9 @@ do { \
#define early_per_cpu_ptr(_name) (_name##_early_ptr)
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
#define early_per_cpu(_name, _cpu) \
- (early_per_cpu_ptr(_name) ? \
- early_per_cpu_ptr(_name)[_cpu] : \
- per_cpu(_name, _cpu))
+ *(early_per_cpu_ptr(_name) ? \
+ &early_per_cpu_ptr(_name)[_cpu] : \
+ &per_cpu(_name, _cpu))
#else /* !CONFIG_SMP */
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index e0d199fe1d83..c1774ac9da7a 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
-#define pte_none(x) (!(x).pte_low)
-
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
* split up the 29 bits of offset into this range:
diff --git a/arch/x86/include/asm/pgtable-2level-defs.h b/arch/x86/include/asm/pgtable-2level_types.h
index d77db8990eaa..daacc23e3fb9 100644
--- a/arch/x86/include/asm/pgtable-2level-defs.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -1,7 +1,23 @@
#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+typedef unsigned long pteval_t;
+typedef unsigned long pmdval_t;
+typedef unsigned long pudval_t;
+typedef unsigned long pgdval_t;
+typedef unsigned long pgprotval_t;
+
+typedef union {
+ pteval_t pte;
+ pteval_t pte_low;
+} pte_t;
+#endif /* !__ASSEMBLY__ */
+
#define SHARED_KERNEL_PMD 0
+#define PAGETABLE_LEVELS 2
/*
* traditional i386 two-level paging structure:
@@ -10,6 +26,7 @@
#define PGDIR_SHIFT 22
#define PTRS_PER_PGD 1024
+
/*
* the i386 is two-level, so we don't really have any
* PMD directory physically.
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 447da43cddb3..3f13cdf61156 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -18,21 +18,6 @@
printk("%s:%d: bad pgd %p(%016Lx).\n", \
__FILE__, __LINE__, &(e), pgd_val(e))
-static inline int pud_none(pud_t pud)
-{
- return pud_val(pud) == 0;
-}
-
-static inline int pud_bad(pud_t pud)
-{
- return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
-}
-
-static inline int pud_present(pud_t pud)
-{
- return pud_val(pud) & _PAGE_PRESENT;
-}
-
/* Rules for using set_pte: the pte being assigned *must* be
* either not present or in a state where the hardware will
* not attempt to update the pte. In places where this is
@@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
write_cr3(pgd);
}
-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
-
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
- pmd_index(address))
-
#ifdef CONFIG_SMP
static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{
@@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
-#define __HAVE_ARCH_PTE_SAME
-static inline int pte_same(pte_t a, pte_t b)
-{
- return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
-}
-
-static inline int pte_none(pte_t pte)
-{
- return !pte.pte_low && !pte.pte_high;
-}
-
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
diff --git a/arch/x86/include/asm/pgtable-3level-defs.h b/arch/x86/include/asm/pgtable-3level_types.h
index 62561367653c..1bd5876c8649 100644
--- a/arch/x86/include/asm/pgtable-3level-defs.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -1,12 +1,31 @@
#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H
#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+typedef u64 pteval_t;
+typedef u64 pmdval_t;
+typedef u64 pudval_t;
+typedef u64 pgdval_t;
+typedef u64 pgprotval_t;
+
+typedef union {
+ struct {
+ unsigned long pte_low, pte_high;
+ };
+ pteval_t pte;
+} pte_t;
+#endif /* !__ASSEMBLY__ */
+
#ifdef CONFIG_PARAVIRT
#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
#else
#define SHARED_KERNEL_PMD 1
#endif
+#define PAGETABLE_LEVELS 3
+
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/
@@ -25,4 +44,5 @@
*/
#define PTRS_PER_PTE 512
+
#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 4f5af8447d54..1c097a3a6669 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1,164 +1,9 @@
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
-#define FIRST_USER_ADDRESS 0
-
-#define _PAGE_BIT_PRESENT 0 /* is present */
-#define _PAGE_BIT_RW 1 /* writeable */
-#define _PAGE_BIT_USER 2 /* userspace addressable */
-#define _PAGE_BIT_PWT 3 /* page write through */
-#define _PAGE_BIT_PCD 4 /* page cache disabled */
-#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
-#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
-#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
-#define _PAGE_BIT_PAT 7 /* on 4KB pages */
-#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
-#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
-#define _PAGE_BIT_UNUSED3 11
-#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
-#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
-
-/* If _PAGE_BIT_PRESENT is clear, we use these: */
-/* - if the user mapped it with PROT_NONE; pte_present gives true */
-#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
-/* - set: nonlinear file mapping, saved PTE; unset:swap */
-#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
-
-#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
-#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
-#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
-#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
-#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
-#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
-#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
-#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
-#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
-#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
-#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
-#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
-#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
-#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
-#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
-#define __HAVE_ARCH_PTE_SPECIAL
-
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
-#else
-#define _PAGE_NX (_AT(pteval_t, 0))
-#endif
+#include <asm/page.h>
-#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
-#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
-
-#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
- _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
- _PAGE_DIRTY)
-
-/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
- _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
-#define _PAGE_CACHE_WB (0)
-#define _PAGE_CACHE_WC (_PAGE_PWT)
-#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
-#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
-
-#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
- _PAGE_ACCESSED | _PAGE_NX)
-
-#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
- _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_ACCESSED)
-#define PAGE_COPY PAGE_COPY_NOEXEC
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
- _PAGE_ACCESSED)
-
-#define __PAGE_KERNEL_EXEC \
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
-#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
-
-#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
-#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
-#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
-#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
-#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
-#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
-#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
-#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
-#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-
-#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
-#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
-#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
-#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
-
-#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
-#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
-#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
-#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
-#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
-#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
-#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
-#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
-#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
-#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
-
-#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
-#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
-#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
-#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
-
-/* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_EXEC
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
-/*
- * early identity mapping pte attrib macros.
- */
-#ifdef CONFIG_X86_64
-#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
-#else
-/*
- * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
- * bits are combined, this will alow user to access the high address mapped
- * VDSO in the presence of CONFIG_COMPAT_VDSO
- */
-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
-#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
-#endif
+#include <asm/pgtable_types.h>
/*
* Macro to mark a page protection value as UC-
@@ -170,9 +15,6 @@
#ifndef __ASSEMBLY__
-#define pgprot_writecombine pgprot_writecombine
-extern pgprot_t pgprot_writecombine(pgprot_t prot);
-
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -183,6 +25,66 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else /* !CONFIG_PARAVIRT */
+#define set_pte(ptep, pte) native_set_pte(ptep, pte)
+#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
+
+#define set_pte_present(mm, addr, ptep, pte) \
+ native_set_pte_present(mm, addr, ptep, pte)
+#define set_pte_atomic(ptep, pte) \
+ native_set_pte_atomic(ptep, pte)
+
+#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
+
+#ifndef __PAGETABLE_PUD_FOLDED
+#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
+#define pgd_clear(pgd) native_pgd_clear(pgd)
+#endif
+
+#ifndef set_pud
+# define set_pud(pudp, pud) native_set_pud(pudp, pud)
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pud_clear(pud) native_pud_clear(pud)
+#endif
+
+#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
+#define pmd_clear(pmd) native_pmd_clear(pmd)
+
+#define pte_update(mm, addr, ptep) do { } while (0)
+#define pte_update_defer(mm, addr, ptep) do { } while (0)
+
+static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
+{
+ native_pagetable_setup_start(base);
+}
+
+static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
+{
+ native_pagetable_setup_done(base);
+}
+
+#define pgd_val(x) native_pgd_val(x)
+#define __pgd(x) native_make_pgd(x)
+
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_val(x) native_pud_val(x)
+#define __pud(x) native_make_pud(x)
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pmd_val(x) native_pmd_val(x)
+#define __pmd(x) native_make_pmd(x)
+#endif
+
+#define pte_val(x) native_pte_val(x)
+#define __pte(x) native_make_pte(x)
+
+#endif /* CONFIG_PARAVIRT */
+
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@@ -236,72 +138,84 @@ static inline unsigned long pte_pfn(pte_t pte)
static inline int pmd_large(pmd_t pte)
{
- return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+ return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
}
+static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
+{
+ pteval_t v = native_pte_val(pte);
+
+ return native_make_pte(v | set);
+}
+
+static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
+{
+ pteval_t v = native_pte_val(pte);
+
+ return native_make_pte(v & ~clear);
+}
+
static inline pte_t pte_mkclean(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+ return pte_clear_flags(pte, _PAGE_DIRTY);
}
static inline pte_t pte_mkold(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+ return pte_clear_flags(pte, _PAGE_ACCESSED);
}
static inline pte_t pte_wrprotect(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_RW);
+ return pte_clear_flags(pte, _PAGE_RW);
}
static inline pte_t pte_mkexec(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_NX);
+ return pte_clear_flags(pte, _PAGE_NX);
}
static inline pte_t pte_mkdirty(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_DIRTY);
+ return pte_set_flags(pte, _PAGE_DIRTY);
}
static inline pte_t pte_mkyoung(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_ACCESSED);
+ return pte_set_flags(pte, _PAGE_ACCESSED);
}
static inline pte_t pte_mkwrite(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_RW);
+ return pte_set_flags(pte, _PAGE_RW);
}
static inline pte_t pte_mkhuge(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_PSE);
+ return pte_set_flags(pte, _PAGE_PSE);
}
static inline pte_t pte_clrhuge(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_PSE);
+ return pte_clear_flags(pte, _PAGE_PSE);
}
static inline pte_t pte_mkglobal(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_GLOBAL);
+ return pte_set_flags(pte, _PAGE_GLOBAL);
}
static inline pte_t pte_clrglobal(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
+ return pte_clear_flags(pte, _PAGE_GLOBAL);
}
static inline pte_t pte_mkspecial(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SPECIAL);
+ return pte_set_flags(pte, _PAGE_SPECIAL);
}
-extern pteval_t __supported_pte_mask;
-
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.
@@ -374,82 +288,195 @@ static inline int is_new_memtype_allowed(unsigned long flags,
return 1;
}
-#ifndef __ASSEMBLY__
-/* Indicate that x86 has its own track and untrack pfn vma functions */
-#define __HAVE_PFNMAP_TRACKING
-
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-struct file;
-pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t *vma_prot);
-#endif
-
-/* Install a pte for a particular vaddr in kernel space. */
-void set_pte_vaddr(unsigned long vaddr, pte_t pte);
+#endif /* __ASSEMBLY__ */
#ifdef CONFIG_X86_32
-extern void native_pagetable_setup_start(pgd_t *base);
-extern void native_pagetable_setup_done(pgd_t *base);
+# include "pgtable_32.h"
#else
-static inline void native_pagetable_setup_start(pgd_t *base) {}
-static inline void native_pagetable_setup_done(pgd_t *base) {}
+# include "pgtable_64.h"
#endif
-struct seq_file;
-extern void arch_report_meminfo(struct seq_file *m);
+#ifndef __ASSEMBLY__
+#include <linux/mm_types.h>
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else /* !CONFIG_PARAVIRT */
-#define set_pte(ptep, pte) native_set_pte(ptep, pte)
-#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
+static inline int pte_none(pte_t pte)
+{
+ return !pte.pte;
+}
-#define set_pte_present(mm, addr, ptep, pte) \
- native_set_pte_present(mm, addr, ptep, pte)
-#define set_pte_atomic(ptep, pte) \
- native_set_pte_atomic(ptep, pte)
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t a, pte_t b)
+{
+ return a.pte == b.pte;
+}
-#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
+static inline int pte_present(pte_t a)
+{
+ return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
+}
-#ifndef __PAGETABLE_PUD_FOLDED
-#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
-#define pgd_clear(pgd) native_pgd_clear(pgd)
-#endif
+static inline int pmd_present(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_PRESENT;
+}
-#ifndef set_pud
-# define set_pud(pudp, pud) native_set_pud(pudp, pud)
-#endif
+static inline int pmd_none(pmd_t pmd)
+{
+ /* Only check low word on 32-bit platforms, since it might be
+ out of sync with upper half. */
+ return (unsigned long)native_pmd_val(pmd) == 0;
+}
-#ifndef __PAGETABLE_PMD_FOLDED
-#define pud_clear(pud) native_pud_clear(pud)
-#endif
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
+}
-#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
-#define pmd_clear(pmd) native_pmd_clear(pmd)
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
-#define pte_update(mm, addr, ptep) do { } while (0)
-#define pte_update_defer(mm, addr, ptep) do { } while (0)
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+static inline unsigned pmd_index(unsigned long address)
+{
+ return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+}
-static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * (Currently stuck as a macro because of indirect forward reference
+ * to linux/mm.h:page_to_nid())
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this function returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+static inline unsigned pte_index(unsigned long address)
{
- native_pagetable_setup_start(base);
+ return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
-static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
- native_pagetable_setup_done(base);
+ return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
-#endif /* CONFIG_PARAVIRT */
-#endif /* __ASSEMBLY__ */
+static inline int pmd_bad(pmd_t pmd)
+{
+ return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
+}
-#ifdef CONFIG_X86_32
-# include "pgtable_32.h"
+static inline unsigned long pages_to_mb(unsigned long npg)
+{
+ return npg >> (20 - PAGE_SHIFT);
+}
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#if PAGETABLE_LEVELS > 2
+static inline int pud_none(pud_t pud)
+{
+ return native_pud_val(pud) == 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+ return pud_flags(pud) & _PAGE_PRESENT;
+}
+
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+ return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
+}
+
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
+
+/* Find an entry in the second-level page table.. */
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
+static inline int pud_large(pud_t pud)
+{
+ return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+ (_PAGE_PSE | _PAGE_PRESENT);
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
+}
#else
-# include "pgtable_64.h"
-#endif
+static inline int pud_large(pud_t pud)
+{
+ return 0;
+}
+#endif /* PAGETABLE_LEVELS > 2 */
+
+#if PAGETABLE_LEVELS > 3
+static inline int pgd_present(pgd_t pgd)
+{
+ return pgd_flags(pgd) & _PAGE_PRESENT;
+}
+
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+ return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
+}
+
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+
+/* to find an entry in a page-table-directory. */
+static inline unsigned pud_index(unsigned long address)
+{
+ return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+ return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+ return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+}
+
+static inline int pgd_none(pgd_t pgd)
+{
+ return !native_pgd_val(pgd);
+}
+#endif /* PAGETABLE_LEVELS > 3 */
+
+#endif /* __ASSEMBLY__ */
/*
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
@@ -476,28 +503,6 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
#ifndef __ASSEMBLY__
-enum {
- PG_LEVEL_NONE,
- PG_LEVEL_4K,
- PG_LEVEL_2M,
- PG_LEVEL_1G,
- PG_LEVEL_NUM
-};
-
-#ifdef CONFIG_PROC_FS
-extern void update_page_count(int level, unsigned long pages);
-#else
-static inline void update_page_count(int level, unsigned long pages) { }
-#endif
-
-/*
- * Helper function that returns the kernel pagetable entry controlling
- * the virtual address 'address'. NULL means no pagetable entry present.
- * NOTE: the return type is pte_t but if the pmd is PSE then we return it
- * as a pte too.
- */
-extern pte_t *lookup_address(unsigned long address, unsigned int *level);
-
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
{
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 72b020deb46b..97612fc7632f 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -1,6 +1,7 @@
#ifndef _ASM_X86_PGTABLE_32_H
#define _ASM_X86_PGTABLE_32_H
+#include <asm/pgtable_32_types.h>
/*
* The Linux memory management assumes a three-level page table setup. On
@@ -33,47 +34,6 @@ void paging_init(void);
extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
-/*
- * The Linux x86 paging architecture is 'compile-time dual-mode', it
- * implements both the traditional 2-level x86 page tables and the
- * newer 3-level PAE-mode page tables.
- */
-#ifdef CONFIG_X86_PAE
-# include <asm/pgtable-3level-defs.h>
-# define PMD_SIZE (1UL << PMD_SHIFT)
-# define PMD_MASK (~(PMD_SIZE - 1))
-#else
-# include <asm/pgtable-2level-defs.h>
-#endif
-
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE - 1))
-
-/* Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts. That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_OFFSET (8 * 1024 * 1024)
-#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 512
-#else
-#define LAST_PKMAP 1024
-#endif
-
-#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
- & PMD_MASK)
-
-#ifdef CONFIG_HIGHMEM
-# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
-#else
-# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
-#endif
-
-#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
/*
* Define this if things work differently on an i386 and an i486:
@@ -85,55 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
/* The boot page tables (all created as a single array) */
extern unsigned long pg0[];
-#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-
-/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
-#define pmd_none(x) (!(unsigned long)pmd_val((x)))
-#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
-#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
#else
# include <asm/pgtable-2level.h>
#endif
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
-
-static inline int pud_large(pud_t pud) { return 0; }
-
-/*
- * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- *
- * this macro returns the index of the entry in the pmd page which would
- * control the given virtual address
- */
-#define pmd_index(address) \
- (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this macro returns the index of the entry in the pte page which would
- * control the given virtual address
- */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
- ((pte_t *)pmd_page_vaddr(*(dir)) + pte_index((address)))
-
-#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
-
-#define pmd_page_vaddr(pmd) \
- ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
-
#if defined(CONFIG_HIGHPTE)
#define pte_offset_map(dir, address) \
((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \
@@ -176,7 +93,4 @@ do { \
#define kern_addr_valid(kaddr) (0)
#endif
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
#endif /* _ASM_X86_PGTABLE_32_H */
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
new file mode 100644
index 000000000000..bd8df3b2fe04
--- /dev/null
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -0,0 +1,46 @@
+#ifndef _ASM_X86_PGTABLE_32_DEFS_H
+#define _ASM_X86_PGTABLE_32_DEFS_H
+
+/*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level_types.h>
+# define PMD_SIZE (1UL << PMD_SHIFT)
+# define PMD_MASK (~(PMD_SIZE - 1))
+#else
+# include <asm/pgtable-2level_types.h>
+#endif
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET (8 * 1024 * 1024)
+#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
+#ifdef CONFIG_X86_PAE
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+
+#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
+ & PMD_MASK)
+
+#ifdef CONFIG_HIGHMEM
+# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
+#else
+# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
+#endif
+
+#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
+
+#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index ba09289accaa..6b87bc6d5018 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -2,6 +2,8 @@
#define _ASM_X86_PGTABLE_64_H
#include <linux/const.h>
+#include <asm/pgtable_64_types.h>
+
#ifndef __ASSEMBLY__
/*
@@ -11,7 +13,6 @@
#include <asm/processor.h>
#include <linux/bitops.h>
#include <linux/threads.h>
-#include <asm/pda.h>
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];
@@ -26,32 +27,6 @@ extern void paging_init(void);
#endif /* !__ASSEMBLY__ */
-#define SHARED_KERNEL_PMD 0
-
-/*
- * PGDIR_SHIFT determines what a top-level page table entry can map
- */
-#define PGDIR_SHIFT 39
-#define PTRS_PER_PGD 512
-
-/*
- * 3rd level page
- */
-#define PUD_SHIFT 30
-#define PTRS_PER_PUD 512
-
-/*
- * PMD_SHIFT determines the size of the area a middle-level
- * page table can map
- */
-#define PMD_SHIFT 21
-#define PTRS_PER_PMD 512
-
-/*
- * entries per page directory level
- */
-#define PTRS_PER_PTE 512
-
#ifndef __ASSEMBLY__
#define pte_ERROR(e) \
@@ -67,9 +42,6 @@ extern void paging_init(void);
printk("%s:%d: bad pgd %p(%016lx).\n", \
__FILE__, __LINE__, &(e), pgd_val(e))
-#define pgd_none(x) (!pgd_val(x))
-#define pud_none(x) (!pud_val(x))
-
struct mm_struct;
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
@@ -134,48 +106,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
native_set_pgd(pgd, native_make_pgd(0));
}
-#define pte_same(a, b) ((a).pte == (b).pte)
-
-#endif /* !__ASSEMBLY__ */
-
-#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
-#define PMD_MASK (~(PMD_SIZE - 1))
-#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
-#define PUD_MASK (~(PUD_SIZE - 1))
-#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
-#define PGDIR_MASK (~(PGDIR_SIZE - 1))
-
-
-#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
-#define VMALLOC_START _AC(0xffffc20000000000, UL)
-#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
-#define VMEMMAP_START _AC(0xffffe20000000000, UL)
-#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
-#define MODULES_END _AC(0xffffffffff000000, UL)
-#define MODULES_LEN (MODULES_END - MODULES_VADDR)
-
-#ifndef __ASSEMBLY__
-
-static inline int pgd_bad(pgd_t pgd)
-{
- return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
-}
-
-static inline int pud_bad(pud_t pud)
-{
- return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
-}
-
-static inline int pmd_bad(pmd_t pmd)
-{
- return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
-}
-
-#define pte_none(x) (!pte_val((x)))
-#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-
-#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
-
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
@@ -184,41 +114,12 @@ static inline int pmd_bad(pmd_t pmd)
/*
* Level 4 access.
*/
-#define pgd_page_vaddr(pgd) \
- ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
-#define pgd_page(pgd) (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
-#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
static inline int pgd_large(pgd_t pgd) { return 0; }
#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
/* PUD - Level3 access */
-/* to find an entry in a page-table-directory. */
-#define pud_page_vaddr(pud) \
- ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
-#define pud_page(pud) (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-#define pud_offset(pgd, address) \
- ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
-#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
-
-static inline int pud_large(pud_t pte)
-{
- return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
- (_PAGE_PSE | _PAGE_PRESENT);
-}
/* PMD - Level 2 access */
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
-#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
-
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
- pmd_index(address))
-#define pmd_none(x) (!pmd_val((x)))
-#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
-#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
-#define pmd_pfn(x) ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-
#define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
#define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
_PAGE_FILE })
@@ -226,13 +127,6 @@ static inline int pud_large(pud_t pte)
/* PTE - Level 1 access. */
-/* page, protection -> pte */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn((page)), (pgprot))
-
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
- pte_index((address)))
-
/* x86-64 always has all page tables mapped. */
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
@@ -266,9 +160,6 @@ extern int direct_gbpages;
extern int kern_addr_valid(unsigned long addr);
extern void cleanup_highmap(void);
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
new file mode 100644
index 000000000000..fbf42b8e0383
--- /dev/null
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -0,0 +1,63 @@
+#ifndef _ASM_X86_PGTABLE_64_DEFS_H
+#define _ASM_X86_PGTABLE_64_DEFS_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef unsigned long pteval_t;
+typedef unsigned long pmdval_t;
+typedef unsigned long pudval_t;
+typedef unsigned long pgdval_t;
+typedef unsigned long pgprotval_t;
+
+typedef struct { pteval_t pte; } pte_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#define SHARED_KERNEL_PMD 0
+#define PAGETABLE_LEVELS 4
+
+/*
+ * PGDIR_SHIFT determines what a top-level page table entry can map
+ */
+#define PGDIR_SHIFT 39
+#define PTRS_PER_PGD 512
+
+/*
+ * 3rd level page
+ */
+#define PUD_SHIFT 30
+#define PTRS_PER_PUD 512
+
+/*
+ * PMD_SHIFT determines the size of the area a middle-level
+ * page table can map
+ */
+#define PMD_SHIFT 21
+#define PTRS_PER_PMD 512
+
+/*
+ * entries per page directory level
+ */
+#define PTRS_PER_PTE 512
+
+#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE - 1))
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+
+#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+#define VMALLOC_START _AC(0xffffc20000000000, UL)
+#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
+#define VMEMMAP_START _AC(0xffffe20000000000, UL)
+#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+#define MODULES_END _AC(0xffffffffff000000, UL)
+#define MODULES_LEN (MODULES_END - MODULES_VADDR)
+
+#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
new file mode 100644
index 000000000000..4d258ad76a0f
--- /dev/null
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -0,0 +1,328 @@
+#ifndef _ASM_X86_PGTABLE_DEFS_H
+#define _ASM_X86_PGTABLE_DEFS_H
+
+#include <linux/const.h>
+#include <asm/page_types.h>
+
+#define FIRST_USER_ADDRESS 0
+
+#define _PAGE_BIT_PRESENT 0 /* is present */
+#define _PAGE_BIT_RW 1 /* writeable */
+#define _PAGE_BIT_USER 2 /* userspace addressable */
+#define _PAGE_BIT_PWT 3 /* page write through */
+#define _PAGE_BIT_PCD 4 /* page cache disabled */
+#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
+#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+#define _PAGE_BIT_PAT 7 /* on 4KB pages */
+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
+#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
+#define _PAGE_BIT_UNUSED3 11
+#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
+#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+/* If _PAGE_BIT_PRESENT is clear, we use these: */
+/* - if the user mapped it with PROT_NONE; pte_present gives true */
+#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
+/* - set: nonlinear file mapping, saved PTE; unset:swap */
+#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
+
+#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
+#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
+#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
+#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
+#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
+#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
+#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
+#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
+#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
+#define __HAVE_ARCH_PTE_SPECIAL
+
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+#else
+#define _PAGE_NX (_AT(pteval_t, 0))
+#endif
+
+#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
+
+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
+ _PAGE_DIRTY)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
+#define _PAGE_CACHE_WB (0)
+#define _PAGE_CACHE_WC (_PAGE_PWT)
+#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
+#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
+
+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
+ _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+#define PAGE_COPY PAGE_COPY_NOEXEC
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+
+#define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
+
+#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
+#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
+#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+
+#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
+#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
+#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
+#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
+
+#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
+#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
+#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
+#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
+#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
+#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
+#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
+
+#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
+#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
+#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
+#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
+
+/* xwr */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+/*
+ * early identity mapping pte attrib macros.
+ */
+#ifdef CONFIG_X86_64
+#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
+#else
+/*
+ * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
+#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
+#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+#endif
+
+#ifdef CONFIG_X86_32
+# include "pgtable_32_types.h"
+#else
+# include "pgtable_64_types.h"
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
+#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
+
+/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
+#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
+
+typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
+
+typedef struct { pgdval_t pgd; } pgd_t;
+
+static inline pgd_t native_make_pgd(pgdval_t val)
+{
+ return (pgd_t) { val };
+}
+
+static inline pgdval_t native_pgd_val(pgd_t pgd)
+{
+ return pgd.pgd;
+}
+
+static inline pgdval_t pgd_flags(pgd_t pgd)
+{
+ return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+}
+
+#if PAGETABLE_LEVELS > 3
+typedef struct { pudval_t pud; } pud_t;
+
+static inline pud_t native_make_pud(pmdval_t val)
+{
+ return (pud_t) { val };
+}
+
+static inline pudval_t native_pud_val(pud_t pud)
+{
+ return pud.pud;
+}
+#else
+#include <asm-generic/pgtable-nopud.h>
+
+static inline pudval_t native_pud_val(pud_t pud)
+{
+ return native_pgd_val(pud.pgd);
+}
+#endif
+
+#if PAGETABLE_LEVELS > 2
+typedef struct { pmdval_t pmd; } pmd_t;
+
+static inline pmd_t native_make_pmd(pmdval_t val)
+{
+ return (pmd_t) { val };
+}
+
+static inline pmdval_t native_pmd_val(pmd_t pmd)
+{
+ return pmd.pmd;
+}
+#else
+#include <asm-generic/pgtable-nopmd.h>
+
+static inline pmdval_t native_pmd_val(pmd_t pmd)
+{
+ return native_pgd_val(pmd.pud.pgd);
+}
+#endif
+
+static inline pudval_t pud_flags(pud_t pud)
+{
+ return native_pud_val(pud) & PTE_FLAGS_MASK;
+}
+
+static inline pmdval_t pmd_flags(pmd_t pmd)
+{
+ return native_pmd_val(pmd) & PTE_FLAGS_MASK;
+}
+
+static inline pte_t native_make_pte(pteval_t val)
+{
+ return (pte_t) { .pte = val };
+}
+
+static inline pteval_t native_pte_val(pte_t pte)
+{
+ return pte.pte;
+}
+
+static inline pteval_t pte_flags(pte_t pte)
+{
+ return native_pte_val(pte) & PTE_FLAGS_MASK;
+}
+
+#define pgprot_val(x) ((x).pgprot)
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+
+typedef struct page *pgtable_t;
+
+extern pteval_t __supported_pte_mask;
+extern int nx_enabled;
+
+#define pgprot_writecombine pgprot_writecombine
+extern pgprot_t pgprot_writecombine(pgprot_t prot);
+
+/* Indicate that x86 has its own track and untrack pfn vma functions */
+#define __HAVE_PFNMAP_TRACKING
+
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t *vma_prot);
+
+/* Install a pte for a particular vaddr in kernel space. */
+void set_pte_vaddr(unsigned long vaddr, pte_t pte);
+
+#ifdef CONFIG_X86_32
+extern void native_pagetable_setup_start(pgd_t *base);
+extern void native_pagetable_setup_done(pgd_t *base);
+#else
+static inline void native_pagetable_setup_start(pgd_t *base) {}
+static inline void native_pagetable_setup_done(pgd_t *base) {}
+#endif
+
+struct seq_file;
+extern void arch_report_meminfo(struct seq_file *m);
+
+enum {
+ PG_LEVEL_NONE,
+ PG_LEVEL_4K,
+ PG_LEVEL_2M,
+ PG_LEVEL_1G,
+ PG_LEVEL_NUM
+};
+
+#ifdef CONFIG_PROC_FS
+extern void update_page_count(int level, unsigned long pages);
+#else
+static inline void update_page_count(int level, unsigned long pages) { }
+#endif
+
+/*
+ * Helper function that returns the kernel pagetable entry controlling
+ * the virtual address 'address'. NULL means no pagetable entry present.
+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
+ * as a pte too.
+ */
+extern pte_t *lookup_address(unsigned long address, unsigned int *level);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/arch/x86/include/asm/prctl.h b/arch/x86/include/asm/prctl.h
index a8894647dd9a..3ac5032fae09 100644
--- a/arch/x86/include/asm/prctl.h
+++ b/arch/x86/include/asm/prctl.h
@@ -6,8 +6,4 @@
#define ARCH_GET_FS 0x1003
#define ARCH_GET_GS 0x1004
-#ifdef CONFIG_X86_64
-extern long sys_arch_prctl(int, unsigned long);
-#endif /* CONFIG_X86_64 */
-
#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 3bfd5235a9eb..c7a98f738210 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -16,6 +16,7 @@ struct mm_struct;
#include <asm/cpufeature.h>
#include <asm/system.h>
#include <asm/page.h>
+#include <asm/pgtable_types.h>
#include <asm/percpu.h>
#include <asm/msr.h>
#include <asm/desc_defs.h>
@@ -73,7 +74,7 @@ struct cpuinfo_x86 {
char pad0;
#else
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
- int x86_tlbsize;
+ int x86_tlbsize;
__u8 x86_virt_bits;
__u8 x86_phys_bits;
#endif
@@ -378,9 +379,30 @@ union thread_xstate {
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
+
+union irq_stack_union {
+ char irq_stack[IRQ_STACK_SIZE];
+ /*
+ * GCC hardcodes the stack canary as %gs:40. Since the
+ * irq_stack is the object at %gs:0, we reserve the bottom
+ * 48 bytes of the irq stack for the canary.
+ */
+ struct {
+ char gs_base[40];
+ unsigned long stack_canary;
+ };
+};
+
+DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
+DECLARE_INIT_PER_CPU(irq_stack_union);
+
+DECLARE_PER_CPU(char *, irq_stack_ptr);
+#else /* X86_64 */
+#ifdef CONFIG_CC_STACKPROTECTOR
+DECLARE_PER_CPU(unsigned long, stack_canary);
#endif
+#endif /* X86_64 */
-extern void print_cpu_info(struct cpuinfo_x86 *);
extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
@@ -752,9 +774,9 @@ extern int sysenter_setup(void);
extern struct desc_ptr early_gdt_descr;
extern void cpu_set_gdt(int);
-extern void switch_to_new_gdt(void);
+extern void switch_to_new_gdt(int);
+extern void load_percpu_segment(int);
extern void cpu_init(void);
-extern void init_gdt(int cpu);
static inline unsigned long get_debugctlmsr(void)
{
@@ -839,6 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
* User space process size: 3GB (default).
*/
#define TASK_SIZE PAGE_OFFSET
+#define TASK_SIZE_MAX TASK_SIZE
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
@@ -898,7 +921,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
/*
* User space process size. 47bits minus one guard page.
*/
-#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE)
+#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
@@ -907,12 +930,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
0xc0000000 : 0xFFFFe000)
#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
- IA32_PAGE_OFFSET : TASK_SIZE64)
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
- IA32_PAGE_OFFSET : TASK_SIZE64)
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
#define STACK_TOP TASK_SIZE
-#define STACK_TOP_MAX TASK_SIZE64
+#define STACK_TOP_MAX TASK_SIZE_MAX
#define INIT_THREAD { \
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index d6a22f92ba77..49fb3ecf3bb3 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -18,11 +18,7 @@ extern void syscall32_cpu_init(void);
extern void check_efer(void);
-#ifdef CONFIG_X86_BIOS_REBOOT
extern int reboot_force;
-#else
-static const int reboot_force = 0;
-#endif
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 6d34d954c228..e304b66abeea 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -28,7 +28,7 @@ struct pt_regs {
int xds;
int xes;
int xfs;
- /* int gs; */
+ int xgs;
long orig_eax;
long eip;
int xcs;
@@ -50,7 +50,7 @@ struct pt_regs {
unsigned long ds;
unsigned long es;
unsigned long fs;
- /* int gs; */
+ unsigned long gs;
unsigned long orig_ax;
unsigned long ip;
unsigned long cs;
diff --git a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
index c8e9c8bed3d0..c8e9c8bed3d0 100644
--- a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h
+++ b/arch/x86/include/asm/rdc321x_defs.h
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 1dc1b51ac623..14e0ed86a6f9 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -61,7 +61,7 @@
*
* 26 - ESPFIX small SS
* 27 - per-cpu [ offset to per-cpu data area ]
- * 28 - unused
+ * 28 - stack_canary-20 [ for stack protector ]
* 29 - unused
* 30 - unused
* 31 - TSS for double fault handler
@@ -95,6 +95,13 @@
#define __KERNEL_PERCPU 0
#endif
+#define GDT_ENTRY_STACK_CANARY (GDT_ENTRY_KERNEL_BASE + 16)
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY * 8)
+#else
+#define __KERNEL_STACK_CANARY 0
+#endif
+
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
/*
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ebe858cdc8a3..66801cb72f69 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -1,33 +1,19 @@
#ifndef _ASM_X86_SETUP_H
#define _ASM_X86_SETUP_H
+#ifdef __KERNEL__
+
#define COMMAND_LINE_SIZE 2048
#ifndef __ASSEMBLY__
-/* Interrupt control for vSMPowered x86_64 systems */
-void vsmp_init(void);
-
-
-void setup_bios_corruption_check(void);
-
-
-#ifdef CONFIG_X86_VISWS
-extern void visws_early_detect(void);
-extern int is_visws_box(void);
-#else
-static inline void visws_early_detect(void) { }
-static inline int is_visws_box(void) { return 0; }
-#endif
-
-extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
-extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
/*
* Any setup quirks to be performed?
*/
struct mpc_cpu;
struct mpc_bus;
struct mpc_oemtable;
+
struct x86_quirks {
int (*arch_pre_time_init)(void);
int (*arch_time_init)(void);
@@ -43,20 +29,20 @@ struct x86_quirks {
void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
void (*mpc_oem_pci_bus)(struct mpc_bus *m);
void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
- unsigned short oemsize);
+ unsigned short oemsize);
int (*setup_ioapic_ids)(void);
- int (*update_genapic)(void);
+ int (*update_apic)(void);
};
-extern struct x86_quirks *x86_quirks;
-extern unsigned long saved_video_mode;
+extern void x86_quirk_pre_intr_init(void);
+extern void x86_quirk_intr_init(void);
-#ifndef CONFIG_PARAVIRT
-#define paravirt_post_allocator_init() do {} while (0)
-#endif
-#endif /* __ASSEMBLY__ */
+extern void x86_quirk_trap_init(void);
-#ifdef __KERNEL__
+extern void x86_quirk_pre_time_init(void);
+extern void x86_quirk_time_init(void);
+
+#endif /* __ASSEMBLY__ */
#ifdef __i386__
@@ -78,6 +64,28 @@ extern unsigned long saved_video_mode;
#ifndef __ASSEMBLY__
#include <asm/bootparam.h>
+/* Interrupt control for vSMPowered x86_64 systems */
+void vsmp_init(void);
+
+void setup_bios_corruption_check(void);
+
+#ifdef CONFIG_X86_VISWS
+extern void visws_early_detect(void);
+extern int is_visws_box(void);
+#else
+static inline void visws_early_detect(void) { }
+static inline int is_visws_box(void) { return 0; }
+#endif
+
+extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
+extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
+extern struct x86_quirks *x86_quirks;
+extern unsigned long saved_video_mode;
+
+#ifndef CONFIG_PARAVIRT
+#define paravirt_post_allocator_init() do {} while (0)
+#endif
+
#ifndef _SETUP
/*
@@ -100,7 +108,6 @@ extern unsigned long init_pg_tables_start;
extern unsigned long init_pg_tables_end;
#else
-void __init x86_64_init_pda(void);
void __init x86_64_start_kernel(char *real_mode);
void __init x86_64_start_reservations(char *real_mode_data);
diff --git a/arch/x86/include/asm/mach-default/setup_arch.h b/arch/x86/include/asm/setup_arch.h
index 38846208b548..38846208b548 100644
--- a/arch/x86/include/asm/mach-default/setup_arch.h
+++ b/arch/x86/include/asm/setup_arch.h
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 19953df61c52..47d0e21f2b9e 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -15,34 +15,8 @@
# include <asm/io_apic.h>
# endif
#endif
-#include <asm/pda.h>
#include <asm/thread_info.h>
-
-#ifdef CONFIG_X86_64
-
-extern cpumask_var_t cpu_callin_mask;
-extern cpumask_var_t cpu_callout_mask;
-extern cpumask_var_t cpu_initialized_mask;
-extern cpumask_var_t cpu_sibling_setup_mask;
-
-#else /* CONFIG_X86_32 */
-
-extern cpumask_t cpu_callin_map;
-extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_initialized;
-extern cpumask_t cpu_sibling_setup_map;
-
-#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
-#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
-#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
-#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
-
-#endif /* CONFIG_X86_32 */
-
-extern void (*mtrr_hook)(void);
-extern void zap_low_mappings(void);
-
-extern int __cpuinit get_local_pda(int cpu);
+#include <asm/cpumask.h>
extern int smp_num_siblings;
extern unsigned int num_processors;
@@ -50,9 +24,7 @@ extern unsigned int num_processors;
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
-#ifdef CONFIG_X86_32
DECLARE_PER_CPU(int, cpu_number);
-#endif
static inline struct cpumask *cpu_sibling_mask(int cpu)
{
@@ -167,8 +139,6 @@ void play_dead_common(void);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
-extern void prefill_possible_map(void);
-
void smp_store_cpu_info(int id);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
@@ -177,10 +147,6 @@ static inline int num_booting_cpus(void)
{
return cpumask_weight(cpu_callout_mask);
}
-#else
-static inline void prefill_possible_map(void)
-{
-}
#endif /* CONFIG_SMP */
extern unsigned disabled_cpus __cpuinitdata;
@@ -191,11 +157,11 @@ extern unsigned disabled_cpus __cpuinitdata;
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
-#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
+#define raw_smp_processor_id() (percpu_read(cpu_number))
extern int safe_smp_processor_id(void);
#elif defined(CONFIG_X86_64_SMP)
-#define raw_smp_processor_id() read_pda(cpunumber)
+#define raw_smp_processor_id() (percpu_read(cpu_number))
#define stack_smp_processor_id() \
({ \
@@ -205,10 +171,6 @@ extern int safe_smp_processor_id(void);
})
#define safe_smp_processor_id() smp_processor_id()
-#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
-#define cpu_physical_id(cpu) boot_cpu_physical_apicid
-#define safe_smp_processor_id() 0
-#define stack_smp_processor_id() 0
#endif
#ifdef CONFIG_X86_LOCAL_APIC
@@ -220,28 +182,9 @@ static inline int logical_smp_processor_id(void)
return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
}
-#include <mach_apicdef.h>
-static inline unsigned int read_apic_id(void)
-{
- unsigned int reg;
-
- reg = *(u32 *)(APIC_BASE + APIC_ID);
-
- return GET_APIC_ID(reg);
-}
#endif
-
-# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
extern int hard_smp_processor_id(void);
-# else
-#include <mach_apicdef.h>
-static inline int hard_smp_processor_id(void)
-{
- /* we don't want to mark this access volatile - bad code generation */
- return read_apic_id();
-}
-# endif /* APIC_DEFINITION */
#else /* CONFIG_X86_LOCAL_APIC */
@@ -251,11 +194,5 @@ static inline int hard_smp_processor_id(void)
#endif /* CONFIG_X86_LOCAL_APIC */
-#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
-extern unsigned char boot_cpu_id;
-#else
-#define boot_cpu_id 0
-#endif
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SMP_H */
diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 23bf52103b89..1def60114906 100644
--- a/arch/x86/include/asm/mach-default/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -13,10 +13,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
CMOS_WRITE(0xa, 0xf);
local_flush_tlb();
pr_debug("1.\n");
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
+ *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
start_eip >> 4;
pr_debug("2.\n");
- *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
+ *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
start_eip & 0xf;
pr_debug("3.\n");
}
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
*/
CMOS_WRITE(0, 0xf);
- *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
+ *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
}
static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 8247e94ac6b1..3a5696656680 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}
-#ifdef CONFIG_PARAVIRT
-/*
- * Define virtualization-friendly old-style lock byte lock, for use in
- * pv_lock_ops if desired.
- *
- * This differs from the pre-2.6.24 spinlock by always using xchgb
- * rather than decb to take the lock; this allows it to use a
- * zero-initialized lock structure. It also maintains a 1-byte
- * contention counter, so that we can implement
- * __byte_spin_is_contended.
- */
-struct __byte_spinlock {
- s8 lock;
- s8 spinners;
-};
-
-static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
-{
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
- return bl->lock != 0;
-}
-
-static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
-{
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
- return bl->spinners != 0;
-}
-
-static inline void __byte_spin_lock(raw_spinlock_t *lock)
-{
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
- s8 val = 1;
-
- asm("1: xchgb %1, %0\n"
- " test %1,%1\n"
- " jz 3f\n"
- " " LOCK_PREFIX "incb %2\n"
- "2: rep;nop\n"
- " cmpb $1, %0\n"
- " je 2b\n"
- " " LOCK_PREFIX "decb %2\n"
- " jmp 1b\n"
- "3:"
- : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
-}
-
-static inline int __byte_spin_trylock(raw_spinlock_t *lock)
-{
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
- u8 old = 1;
-
- asm("xchgb %1,%0"
- : "+m" (bl->lock), "+q" (old) : : "memory");
+#ifndef CONFIG_PARAVIRT
- return old == 0;
-}
-
-static inline void __byte_spin_unlock(raw_spinlock_t *lock)
-{
- struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
- smp_wmb();
- bl->lock = 0;
-}
-#else /* !CONFIG_PARAVIRT */
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
@@ -268,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
__raw_spin_lock(lock);
}
-#endif /* CONFIG_PARAVIRT */
+#endif
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{
@@ -330,8 +268,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
{
atomic_t *count = (atomic_t *)lock;
- atomic_dec(count);
- if (atomic_read(count) >= 0)
+ if (atomic_dec_return(count) >= 0)
return 1;
atomic_inc(count);
return 0;
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
new file mode 100644
index 000000000000..c2d742c6e15f
--- /dev/null
+++ b/arch/x86/include/asm/stackprotector.h
@@ -0,0 +1,124 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and unfortunately gcc requires it to be at a fixed offset from %gs.
+ * On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64
+ * and x86_32 use segment registers differently and thus handles this
+ * requirement differently.
+ *
+ * On x86_64, %gs is shared by percpu area and stack canary. All
+ * percpu symbols are zero based and %gs points to the base of percpu
+ * area. The first occupant of the percpu area is always
+ * irq_stack_union which contains stack_canary at offset 40. Userland
+ * %gs is always saved and restored on kernel entry and exit using
+ * swapgs, so stack protector doesn't add any complexity there.
+ *
+ * On x86_32, it's slightly more complicated. As in x86_64, %gs is
+ * used for userland TLS. Unfortunately, some processors are much
+ * slower at loading segment registers with different value when
+ * entering and leaving the kernel, so the kernel uses %fs for percpu
+ * area and manages %gs lazily so that %gs is switched only when
+ * necessary, usually during task switch.
+ *
+ * As gcc requires the stack canary at %gs:20, %gs can't be managed
+ * lazily if stack protector is enabled, so the kernel saves and
+ * restores userland %gs on kernel entry and exit. This behavior is
+ * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
+ * system.h to hide the details.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+
+#include <asm/tsc.h>
+#include <asm/processor.h>
+#include <asm/percpu.h>
+#include <asm/system.h>
+#include <asm/desc.h>
+#include <linux/random.h>
+
+/*
+ * 24 byte read-only segment initializer for stack canary. Linker
+ * can't handle the address bit shifting. Address will be set in
+ * head_32 for boot CPU and setup_per_cpu_areas() for others.
+ */
+#define GDT_STACK_CANARY_INIT \
+ [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } },
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ u64 canary;
+ u64 tsc;
+
+#ifdef CONFIG_X86_64
+ BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
+#endif
+ /*
+ * We both use the random pool and the current TSC as a source
+ * of randomness. The TSC only matters for very early init,
+ * there it already has some randomness on most systems. Later
+ * on during the bootup the random pool has true entropy too.
+ */
+ get_random_bytes(&canary, sizeof(canary));
+ tsc = __native_read_tsc();
+ canary += tsc + (tsc << 32UL);
+
+ current->stack_canary = canary;
+#ifdef CONFIG_X86_64
+ percpu_write(irq_stack_union.stack_canary, canary);
+#else
+ percpu_write(stack_canary, canary);
+#endif
+}
+
+static inline void setup_stack_canary_segment(int cpu)
+{
+#ifdef CONFIG_X86_32
+ unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
+ struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
+ struct desc_struct desc;
+
+ desc = gdt_table[GDT_ENTRY_STACK_CANARY];
+ desc.base0 = canary & 0xffff;
+ desc.base1 = (canary >> 16) & 0xff;
+ desc.base2 = (canary >> 24) & 0xff;
+ write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
+#endif
+}
+
+static inline void load_stack_canary_segment(void)
+{
+#ifdef CONFIG_X86_32
+ asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
+#endif
+}
+
+#else /* CC_STACKPROTECTOR */
+
+#define GDT_STACK_CANARY_INIT
+
+/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
+
+static inline void setup_stack_canary_segment(int cpu)
+{ }
+
+static inline void load_stack_canary_segment(void)
+{
+#ifdef CONFIG_X86_32
+ asm volatile ("mov %0, %%gs" : : "r" (0));
+#endif
+}
+
+#endif /* CC_STACKPROTECTOR */
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
deleted file mode 100644
index 93d2c8667cfe..000000000000
--- a/arch/x86/include/asm/summit/apic.h
+++ /dev/null
@@ -1,202 +0,0 @@
-#ifndef __ASM_SUMMIT_APIC_H
-#define __ASM_SUMMIT_APIC_H
-
-#include <asm/smp.h>
-#include <linux/gfp.h>
-
-#define esr_disable (1)
-#define NO_BALANCE_IRQ (0)
-
-/* In clustered mode, the high nibble of APIC ID is a cluster number.
- * The low nibble is a 4-bit bitmap. */
-#define XAPIC_DEST_CPUS_SHIFT 4
-#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
-#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
-
-#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
-
-static inline const cpumask_t *target_cpus(void)
-{
- /* CPU_MASK_ALL (0xff) has undefined behaviour with
- * dest_LowestPrio mode logical clustered apic interrupt routing
- * Just start on cpu 0. IRQ balancing will spread load
- */
- return &cpumask_of_cpu(0);
-}
-
-#define INT_DELIVERY_MODE (dest_LowestPrio)
-#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
- return 0;
-}
-
-/* we don't use the phys_cpu_present_map to indicate apicid presence */
-static inline unsigned long check_apicid_present(int bit)
-{
- return 1;
-}
-
-#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
-
-extern u8 cpu_2_logical_apicid[];
-
-static inline void init_apic_ldr(void)
-{
- unsigned long val, id;
- int count = 0;
- u8 my_id = (u8)hard_smp_processor_id();
- u8 my_cluster = (u8)apicid_cluster(my_id);
-#ifdef CONFIG_SMP
- u8 lid;
- int i;
-
- /* Create logical APIC IDs by counting CPUs already in cluster. */
- for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
- lid = cpu_2_logical_apicid[i];
- if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
- ++count;
- }
-#endif
- /* We only have a 4 wide bitmap in cluster mode. If a deranged
- * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
- BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
- id = my_cluster | (1UL << count);
- apic_write(APIC_DFR, APIC_DFR_VALUE);
- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
- val |= SET_APIC_LOGICAL_ID(id);
- apic_write(APIC_LDR, val);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
- return 0;
-}
-
-static inline int apic_id_registered(void)
-{
- return 1;
-}
-
-static inline void setup_apic_routing(void)
-{
- printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
- nr_ioapics);
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-#ifdef CONFIG_SMP
- return apicid_2_node[hard_smp_processor_id()];
-#else
- return 0;
-#endif
-}
-
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
- if (cpu >= nr_cpu_ids)
- return BAD_APICID;
- return (int)cpu_2_logical_apicid[cpu];
-#else
- return logical_smp_processor_id();
-#endif
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
- if (mps_cpu < nr_cpu_ids)
- return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
- else
- return BAD_APICID;
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
-{
- /* For clustered we don't have a good way to do this yet - hack */
- return physids_promote(0x0F);
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int apicid)
-{
- return physid_mask_of_physid(0);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
- return 1;
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
- int num_bits_set;
- int cpus_found = 0;
- int cpu;
- int apicid;
-
- num_bits_set = cpus_weight(*cpumask);
- /* Return id to all */
- if (num_bits_set >= nr_cpu_ids)
- return (int) 0xFF;
- /*
- * The cpus in the mask must all be on the apic cluster. If are not
- * on the same apicid cluster return default value of TARGET_CPUS.
- */
- cpu = first_cpu(*cpumask);
- apicid = cpu_to_logical_apicid(cpu);
- while (cpus_found < num_bits_set) {
- if (cpu_isset(cpu, *cpumask)) {
- int new_apicid = cpu_to_logical_apicid(cpu);
- if (apicid_cluster(apicid) !=
- apicid_cluster(new_apicid)){
- printk ("%s: Not a valid mask!\n", __func__);
- return 0xFF;
- }
- apicid = apicid | new_apicid;
- cpus_found++;
- }
- cpu++;
- }
- return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
- const struct cpumask *andmask)
-{
- int apicid = cpu_to_logical_apicid(0);
- cpumask_var_t cpumask;
-
- if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
- return apicid;
-
- cpumask_and(cpumask, inmask, andmask);
- cpumask_and(cpumask, cpumask, cpu_online_mask);
- apicid = cpu_mask_to_apicid(cpumask);
-
- free_cpumask_var(cpumask);
- return apicid;
-}
-
-/* cpuid returns the value latched in the HW at reset, not the APIC ID
- * register's value. For any box whose BIOS changes APIC IDs, like
- * clustered APIC systems, we must use hard_smp_processor_id.
- *
- * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
- */
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
- return hard_smp_processor_id() >> index_msb;
-}
-
-#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h
deleted file mode 100644
index f3fbca1f61c1..000000000000
--- a/arch/x86/include/asm/summit/apicdef.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_SUMMIT_APICDEF_H
-#define __ASM_SUMMIT_APICDEF_H
-
-#define APIC_ID_MASK (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
- return (x>>24)&0xFF;
-}
-
-#define GET_APIC_ID(x) get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
deleted file mode 100644
index a8a2c24f50cc..000000000000
--- a/arch/x86/include/asm/summit/ipi.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __ASM_SUMMIT_IPI_H
-#define __ASM_SUMMIT_IPI_H
-
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
-
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
-{
- send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
-
- if (!cpus_empty(mask))
- send_IPI_mask(&mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
- send_IPI_mask(&cpu_online_map, vector);
-}
-
-#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h
deleted file mode 100644
index 380e86c02363..000000000000
--- a/arch/x86/include/asm/summit/mpparse.h
+++ /dev/null
@@ -1,109 +0,0 @@
-#ifndef __ASM_SUMMIT_MPPARSE_H
-#define __ASM_SUMMIT_MPPARSE_H
-
-#include <asm/tsc.h>
-
-extern int use_cyclone;
-
-#ifdef CONFIG_X86_SUMMIT_NUMA
-extern void setup_summit(void);
-#else
-#define setup_summit() {}
-#endif
-
-static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
- char *productid)
-{
- if (!strncmp(oem, "IBM ENSW", 8) &&
- (!strncmp(productid, "VIGIL SMP", 9)
- || !strncmp(productid, "EXA", 3)
- || !strncmp(productid, "RUTHLESS SMP", 12))){
- mark_tsc_unstable("Summit based system");
- use_cyclone = 1; /*enable cyclone-timer*/
- setup_summit();
- return 1;
- }
- return 0;
-}
-
-/* Hook from generic ACPI tables.c */
-static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
- if (!strncmp(oem_id, "IBM", 3) &&
- (!strncmp(oem_table_id, "SERVIGIL", 8)
- || !strncmp(oem_table_id, "EXA", 3))){
- mark_tsc_unstable("Summit based system");
- use_cyclone = 1; /*enable cyclone-timer*/
- setup_summit();
- return 1;
- }
- return 0;
-}
-
-struct rio_table_hdr {
- unsigned char version; /* Version number of this data structure */
- /* Version 3 adds chassis_num & WP_index */
- unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil) */
- unsigned char num_rio_dev; /* # of RIO I/O devices (Cyclones and Winnipegs) */
-} __attribute__((packed));
-
-struct scal_detail {
- unsigned char node_id; /* Scalability Node ID */
- unsigned long CBAR; /* Address of 1MB register space */
- unsigned char port0node; /* Node ID port connected to: 0xFF=None */
- unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
- unsigned char port1node; /* Node ID port connected to: 0xFF = None */
- unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
- unsigned char port2node; /* Node ID port connected to: 0xFF = None */
- unsigned char port2port; /* Port num port connected to: 0,1,2, or 0xFF=None */
- unsigned char chassis_num; /* 1 based Chassis number (1 = boot node) */
-} __attribute__((packed));
-
-struct rio_detail {
- unsigned char node_id; /* RIO Node ID */
- unsigned long BBAR; /* Address of 1MB register space */
- unsigned char type; /* Type of device */
- unsigned char owner_id; /* For WPEG: Node ID of Cyclone that owns this WPEG*/
- /* For CYC: Node ID of Twister that owns this CYC */
- unsigned char port0node; /* Node ID port connected to: 0xFF=None */
- unsigned char port0port; /* Port num port connected to: 0,1,2, or 0xFF=None */
- unsigned char port1node; /* Node ID port connected to: 0xFF=None */
- unsigned char port1port; /* Port num port connected to: 0,1,2, or 0xFF=None */
- unsigned char first_slot; /* For WPEG: Lowest slot number below this WPEG */
- /* For CYC: 0 */
- unsigned char status; /* For WPEG: Bit 0 = 1 : the XAPIC is used */
- /* = 0 : the XAPIC is not used, ie:*/
- /* ints fwded to another XAPIC */
- /* Bits1:7 Reserved */
- /* For CYC: Bits0:7 Reserved */
- unsigned char WP_index; /* For WPEG: WPEG instance index - lower ones have */
- /* lower slot numbers/PCI bus numbers */
- /* For CYC: No meaning */
- unsigned char chassis_num; /* 1 based Chassis number */
- /* For LookOut WPEGs this field indicates the */
- /* Expansion Chassis #, enumerated from Boot */
- /* Node WPEG external port, then Boot Node CYC */
- /* external port, then Next Vigil chassis WPEG */
- /* external port, etc. */
- /* Shared Lookouts have only 1 chassis number (the */
- /* first one assigned) */
-} __attribute__((packed));
-
-
-typedef enum {
- CompatTwister = 0, /* Compatibility Twister */
- AltTwister = 1, /* Alternate Twister of internal 8-way */
- CompatCyclone = 2, /* Compatibility Cyclone */
- AltCyclone = 3, /* Alternate Cyclone of internal 8-way */
- CompatWPEG = 4, /* Compatibility WPEG */
- AltWPEG = 5, /* Second Planar WPEG */
- LookOutAWPEG = 6, /* LookOut WPEG */
- LookOutBWPEG = 7, /* LookOut WPEG */
-} node_type;
-
-static inline int is_WPEG(struct rio_detail *rio){
- return (rio->type == CompatWPEG || rio->type == AltWPEG ||
- rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
-}
-
-#endif /* __ASM_SUMMIT_MPPARSE_H */
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index c0b0bda754ee..7043408f6904 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -29,21 +29,21 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
/* X86_32 only */
#ifdef CONFIG_X86_32
/* kernel/process_32.c */
-asmlinkage int sys_fork(struct pt_regs);
-asmlinkage int sys_clone(struct pt_regs);
-asmlinkage int sys_vfork(struct pt_regs);
-asmlinkage int sys_execve(struct pt_regs);
+int sys_fork(struct pt_regs *);
+int sys_clone(struct pt_regs *);
+int sys_vfork(struct pt_regs *);
+int sys_execve(struct pt_regs *);
/* kernel/signal_32.c */
asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
struct old_sigaction __user *);
-asmlinkage int sys_sigaltstack(unsigned long);
-asmlinkage unsigned long sys_sigreturn(unsigned long);
-asmlinkage int sys_rt_sigreturn(unsigned long);
+int sys_sigaltstack(struct pt_regs *);
+unsigned long sys_sigreturn(struct pt_regs *);
+long sys_rt_sigreturn(struct pt_regs *);
/* kernel/ioport.c */
-asmlinkage long sys_iopl(unsigned long);
+long sys_iopl(struct pt_regs *);
/* kernel/sys_i386_32.c */
asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
@@ -59,8 +59,8 @@ struct oldold_utsname;
asmlinkage int sys_olduname(struct oldold_utsname __user *);
/* kernel/vm86_32.c */
-asmlinkage int sys_vm86old(struct pt_regs);
-asmlinkage int sys_vm86(struct pt_regs);
+int sys_vm86old(struct pt_regs *);
+int sys_vm86(struct pt_regs *);
#else /* CONFIG_X86_32 */
@@ -74,6 +74,7 @@ asmlinkage long sys_vfork(struct pt_regs *);
asmlinkage long sys_execve(char __user *, char __user * __user *,
char __user * __user *,
struct pt_regs *);
+long sys_arch_prctl(int, unsigned long);
/* kernel/ioport.c */
asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
@@ -81,7 +82,7 @@ asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
/* kernel/signal_64.c */
asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
struct pt_regs *);
-asmlinkage long sys_rt_sigreturn(struct pt_regs *);
+long sys_rt_sigreturn(struct pt_regs *);
/* kernel/sys_x86_64.c */
asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 8e626ea33a1a..c00bfdbdd456 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -23,6 +23,20 @@ struct task_struct *__switch_to(struct task_struct *prev,
#ifdef CONFIG_X86_32
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __switch_canary \
+ "movl %P[task_canary](%[next]), %%ebx\n\t" \
+ "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
+#define __switch_canary_oparam \
+ , [stack_canary] "=m" (per_cpu_var(stack_canary))
+#define __switch_canary_iparam \
+ , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+#else /* CC_STACKPROTECTOR */
+#define __switch_canary
+#define __switch_canary_oparam
+#define __switch_canary_iparam
+#endif /* CC_STACKPROTECTOR */
+
/*
* Saving eflags is important. It switches not only IOPL between tasks,
* it also protects other tasks from NT leaking through sysenter etc.
@@ -44,6 +58,7 @@ do { \
"movl %[next_sp],%%esp\n\t" /* restore ESP */ \
"movl $1f,%[prev_ip]\n\t" /* save EIP */ \
"pushl %[next_ip]\n\t" /* restore EIP */ \
+ __switch_canary \
"jmp __switch_to\n" /* regparm call */ \
"1:\t" \
"popl %%ebp\n\t" /* restore EBP */ \
@@ -58,6 +73,8 @@ do { \
"=b" (ebx), "=c" (ecx), "=d" (edx), \
"=S" (esi), "=D" (edi) \
\
+ __switch_canary_oparam \
+ \
/* input parameters: */ \
: [next_sp] "m" (next->thread.sp), \
[next_ip] "m" (next->thread.ip), \
@@ -66,6 +83,8 @@ do { \
[prev] "a" (prev), \
[next] "d" (next) \
\
+ __switch_canary_iparam \
+ \
: /* reloaded segment registers */ \
"memory"); \
} while (0)
@@ -86,27 +105,44 @@ do { \
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
"r12", "r13", "r14", "r15"
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __switch_canary \
+ "movq %P[task_canary](%%rsi),%%r8\n\t" \
+ "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
+#define __switch_canary_oparam \
+ , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
+#define __switch_canary_iparam \
+ , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+#else /* CC_STACKPROTECTOR */
+#define __switch_canary
+#define __switch_canary_oparam
+#define __switch_canary_iparam
+#endif /* CC_STACKPROTECTOR */
+
/* Save restore flags to clear handle leaking NT */
#define switch_to(prev, next, last) \
- asm volatile(SAVE_CONTEXT \
+ asm volatile(SAVE_CONTEXT \
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
"movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
"call __switch_to\n\t" \
".globl thread_return\n" \
"thread_return:\n\t" \
- "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
+ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
+ __switch_canary \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
- LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
"movq %%rax,%%rdi\n\t" \
- "jc ret_from_fork\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "jnz ret_from_fork\n\t" \
RESTORE_CONTEXT \
: "=a" (last) \
+ __switch_canary_oparam \
: [next] "S" (next), [prev] "D" (prev), \
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
- [tif_fork] "i" (TIF_FORK), \
+ [_tif_fork] "i" (_TIF_FORK), \
[thread_info] "i" (offsetof(struct task_struct, stack)), \
- [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
+ [current_task] "m" (per_cpu_var(current_task)) \
+ __switch_canary_iparam \
: "memory", "cc" __EXTRA_CLOBBER)
#endif
@@ -165,6 +201,25 @@ extern void native_load_gs_index(unsigned);
#define savesegment(seg, value) \
asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
+/*
+ * x86_32 user gs accessors.
+ */
+#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_32_LAZY_GS
+#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
+#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
+#define task_user_gs(tsk) ((tsk)->thread.gs)
+#define lazy_save_gs(v) savesegment(gs, (v))
+#define lazy_load_gs(v) loadsegment(gs, (v))
+#else /* X86_32_LAZY_GS */
+#define get_user_gs(regs) (u16)((regs)->gs)
+#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
+#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
+#define lazy_save_gs(v) do { } while (0)
+#define lazy_load_gs(v) do { } while (0)
+#endif /* X86_32_LAZY_GS */
+#endif /* X86_32 */
+
static inline unsigned long get_limit(unsigned long segment)
{
unsigned long __limit;
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 98789647baa9..df9d5f78385e 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -40,6 +40,7 @@ struct thread_info {
*/
__u8 supervisor_stack[0];
#endif
+ int uaccess_err;
};
#define INIT_THREAD_INFO(tsk) \
@@ -194,25 +195,21 @@ static inline struct thread_info *current_thread_info(void)
#else /* X86_32 */
-#include <asm/pda.h>
+#include <asm/percpu.h>
+#define KERNEL_STACK_OFFSET (5*8)
/*
* macros/functions for gaining access to the thread information structure
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#ifndef __ASSEMBLY__
-static inline struct thread_info *current_thread_info(void)
-{
- struct thread_info *ti;
- ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
- return ti;
-}
+DECLARE_PER_CPU(unsigned long, kernel_stack);
-/* do not use in interrupt context */
-static inline struct thread_info *stack_thread_info(void)
+static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
- asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
+ ti = (void *)(percpu_read(kernel_stack) +
+ KERNEL_STACK_OFFSET - THREAD_SIZE);
return ti;
}
@@ -220,8 +217,8 @@ static inline struct thread_info *stack_thread_info(void)
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg) \
- movq %gs:pda_kernelstack,reg ; \
- subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
+ movq PER_CPU_VAR(kernel_stack),reg ; \
+ subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
#endif
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 2bb6a835c453..a81195eaa2b3 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -3,6 +3,7 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/percpu.h>
+#include <linux/interrupt.h>
#define TICK_SIZE (tick_nsec / 1000)
@@ -12,6 +13,7 @@ unsigned long native_calibrate_tsc(void);
#ifdef CONFIG_X86_32
extern int timer_ack;
extern int recalibrate_cpu_khz(void);
+extern irqreturn_t timer_interrupt(int irq, void *dev_id);
#endif /* CONFIG_X86_32 */
extern int no_timer_check;
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 0e7bbb549116..d3539f998f88 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb();
}
-static inline void native_flush_tlb_others(const cpumask_t *cpumask,
+static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long va)
{
@@ -142,31 +142,28 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
flush_tlb_mm(vma->vm_mm);
}
-void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
- unsigned long va);
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm, unsigned long va);
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
-#ifdef CONFIG_X86_32
struct tlb_state {
struct mm_struct *active_mm;
int state;
- char __cacheline_padding[L1_CACHE_BYTES-8];
};
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
-void reset_lazy_tlbstate(void);
-#else
static inline void reset_lazy_tlbstate(void)
{
+ percpu_write(cpu_tlbstate.state, 0);
+ percpu_write(cpu_tlbstate.active_mm, &init_mm);
}
-#endif
#endif /* SMP */
#ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va)
+#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
#endif
static inline void flush_tlb_kernel_range(unsigned long start,
@@ -175,4 +172,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_all();
}
+extern void zap_low_mappings(void);
+
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 4e2f2e0aab27..77cfb2cfb386 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
return &node_to_cpumask_map[node];
}
+static inline void setup_node_to_cpumask_map(void) { }
+
#else /* CONFIG_X86_64 */
/* Mappings between node number and cpus on that node. */
@@ -83,7 +85,8 @@ extern cpumask_t *node_to_cpumask_map;
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
/* Returns the number of the current Node. */
-#define numa_node_id() read_pda(nodenumber)
+DECLARE_PER_CPU(int, node_number);
+#define numa_node_id() percpu_read(node_number)
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
extern int cpu_to_node(int cpu);
@@ -102,10 +105,7 @@ static inline int cpu_to_node(int cpu)
/* Same function but used if called before per_cpu areas are setup */
static inline int early_cpu_to_node(int cpu)
{
- if (early_per_cpu_ptr(x86_cpu_to_node_map))
- return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
- return per_cpu(x86_cpu_to_node_map, cpu);
+ return early_per_cpu(x86_cpu_to_node_map, cpu);
}
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
@@ -122,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
+extern void setup_node_to_cpumask_map(void);
+
/*
* Replace default node_to_cpumask_ptr with optimized version
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
@@ -192,9 +194,20 @@ extern int __node_distance(int, int);
#else /* !CONFIG_NUMA */
-#define numa_node_id() 0
-#define cpu_to_node(cpu) 0
-#define early_cpu_to_node(cpu) 0
+static inline int numa_node_id(void)
+{
+ return 0;
+}
+
+static inline int cpu_to_node(int cpu)
+{
+ return 0;
+}
+
+static inline int early_cpu_to_node(int cpu)
+{
+ return 0;
+}
static inline const cpumask_t *cpumask_of_node(int node)
{
@@ -209,6 +222,8 @@ static inline int node_to_first_cpu(int node)
return first_cpu(cpu_online_map);
}
+static inline void setup_node_to_cpumask_map(void) { }
+
/*
* Replace default node_to_cpumask_ptr with optimized version
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index 780ba0ab94f9..90f06c25221d 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -13,6 +13,7 @@ extern unsigned char *trampoline_base;
extern unsigned long init_rsp;
extern unsigned long initial_code;
+extern unsigned long initial_gs;
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
#define TRAMPOLINE_BASE 0x6000
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index cf3bb053da0b..0d5342515b86 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long);
dotraplinkage void do_overflow(struct pt_regs *, long);
dotraplinkage void do_bounds(struct pt_regs *, long);
dotraplinkage void do_invalid_op(struct pt_regs *, long);
-dotraplinkage void do_device_not_available(struct pt_regs);
+dotraplinkage void do_device_not_available(struct pt_regs *, long);
dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
dotraplinkage void do_segment_not_present(struct pt_regs *, long);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 4340055b7559..b685ece89d5c 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
#define __get_user_x(size, ret, x, ptr) \
asm volatile("call __get_user_" #size \
- : "=a" (ret),"=d" (x) \
+ : "=a" (ret), "=d" (x) \
: "0" (ptr)) \
/* Careful: we have to cast the result to the type of the pointer
@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
#define __put_user_x(size, x, ptr, __ret_pu) \
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
- :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#ifdef CONFIG_X86_32
-#define __put_user_u64(x, addr, err) \
+#define __put_user_asm_u64(x, addr, err, errret) \
asm volatile("1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \
"3:\n" \
@@ -197,14 +197,24 @@ extern int __get_user_bad(void);
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=r" (err) \
- : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
+ : "A" (x), "r" (addr), "i" (errret), "0" (err))
+
+#define __put_user_asm_ex_u64(x, addr) \
+ asm volatile("1: movl %%eax,0(%1)\n" \
+ "2: movl %%edx,4(%1)\n" \
+ "3:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE(2b, 3b - 2b) \
+ : : "A" (x), "r" (addr))
#define __put_user_x8(x, ptr, __ret_pu) \
asm volatile("call __put_user_8" : "=a" (__ret_pu) \
: "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
#else
-#define __put_user_u64(x, ptr, retval) \
- __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
+#define __put_user_asm_u64(x, ptr, retval, errret) \
+ __put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
+#define __put_user_asm_ex_u64(x, addr) \
+ __put_user_asm_ex(x, addr, "q", "", "Zr")
#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
#endif
@@ -276,10 +286,32 @@ do { \
__put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
break; \
case 4: \
- __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\
+ __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \
case 8: \
- __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \
+ __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
+ errret); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ } \
+} while (0)
+
+#define __put_user_size_ex(x, ptr, size) \
+do { \
+ __chk_user_ptr(ptr); \
+ switch (size) { \
+ case 1: \
+ __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
+ break; \
+ case 2: \
+ __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
+ break; \
+ case 4: \
+ __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
+ break; \
+ case 8: \
+ __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
break; \
default: \
__put_user_bad(); \
@@ -311,9 +343,12 @@ do { \
#ifdef CONFIG_X86_32
#define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
+#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
#else
#define __get_user_asm_u64(x, ptr, retval, errret) \
__get_user_asm(x, ptr, retval, "q", "", "=r", errret)
+#define __get_user_asm_ex_u64(x, ptr) \
+ __get_user_asm_ex(x, ptr, "q", "", "=r")
#endif
#define __get_user_size(x, ptr, size, retval, errret) \
@@ -350,6 +385,33 @@ do { \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
+#define __get_user_size_ex(x, ptr, size) \
+do { \
+ __chk_user_ptr(ptr); \
+ switch (size) { \
+ case 1: \
+ __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
+ break; \
+ case 2: \
+ __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
+ break; \
+ case 4: \
+ __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
+ break; \
+ case 8: \
+ __get_user_asm_ex_u64(x, ptr); \
+ break; \
+ default: \
+ (x) = __get_user_bad(); \
+ } \
+} while (0)
+
+#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+ asm volatile("1: mov"itype" %1,%"rtype"0\n" \
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : ltype(x) : "m" (__m(addr)))
+
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
@@ -385,6 +447,26 @@ struct __large_struct { unsigned long buf[100]; };
_ASM_EXTABLE(1b, 3b) \
: "=r"(err) \
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+ asm volatile("1: mov"itype" %"rtype"0,%1\n" \
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : : ltype(x), "m" (__m(addr)))
+
+/*
+ * uaccess_try and catch
+ */
+#define uaccess_try do { \
+ int prev_err = current_thread_info()->uaccess_err; \
+ current_thread_info()->uaccess_err = 0; \
+ barrier();
+
+#define uaccess_catch(err) \
+ (err) |= current_thread_info()->uaccess_err; \
+ current_thread_info()->uaccess_err = prev_err; \
+} while (0)
+
/**
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
@@ -408,6 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
#define __get_user(x, ptr) \
__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
/**
* __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
@@ -435,6 +518,45 @@ struct __large_struct { unsigned long buf[100]; };
#define __put_user_unaligned __put_user
/*
+ * {get|put}_user_try and catch
+ *
+ * get_user_try {
+ * get_user_ex(...);
+ * } get_user_catch(err)
+ */
+#define get_user_try uaccess_try
+#define get_user_catch(err) uaccess_catch(err)
+
+#define get_user_ex(x, ptr) do { \
+ unsigned long __gue_val; \
+ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
+ (x) = (__force __typeof__(*(ptr)))__gue_val; \
+} while (0)
+
+#ifdef CONFIG_X86_WP_WORKS_OK
+
+#define put_user_try uaccess_try
+#define put_user_catch(err) uaccess_catch(err)
+
+#define put_user_ex(x, ptr) \
+ __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#else /* !CONFIG_X86_WP_WORKS_OK */
+
+#define put_user_try do { \
+ int __uaccess_err = 0;
+
+#define put_user_catch(err) \
+ (err) |= __uaccess_err; \
+} while (0)
+
+#define put_user_ex(x, ptr) do { \
+ __uaccess_err |= __put_user(x, ptr); \
+} while (0)
+
+#endif /* CONFIG_X86_WP_WORKS_OK */
+
+/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
#ifdef CONFIG_X86_INTEL_USERCOPY
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 84210c479fca..987a2c10fe20 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -192,14 +192,26 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src,
unsigned size)
{
might_sleep();
- return __copy_user_nocache(dst, src, size, 1);
+ /*
+ * In practice this limit means that large file write()s
+ * which get chunked to 4K copies get handled via
+ * non-temporal stores here. Smaller writes get handled
+ * via regular __copy_from_user():
+ */
+ if (likely(size >= PAGE_SIZE))
+ return __copy_user_nocache(dst, src, size, 1);
+ else
+ return __copy_from_user(dst, src, size);
}
static inline int __copy_from_user_inatomic_nocache(void *dst,
const void __user *src,
unsigned size)
{
- return __copy_user_nocache(dst, src, size, 0);
+ if (likely(size >= PAGE_SIZE))
+ return __copy_user_nocache(dst, src, size, 0);
+ else
+ return __copy_from_user_inatomic(dst, src, size);
}
unsigned long
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
new file mode 100644
index 000000000000..8242bf965812
--- /dev/null
+++ b/arch/x86/include/asm/uv/uv.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_X86_UV_UV_H
+#define _ASM_X86_UV_UV_H
+
+enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
+
+struct cpumask;
+struct mm_struct;
+
+#ifdef CONFIG_X86_UV
+
+extern enum uv_system_type get_uv_system_type(void);
+extern int is_uv_system(void);
+extern void uv_cpu_init(void);
+extern void uv_system_init(void);
+extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
+extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm,
+ unsigned long va,
+ unsigned int cpu);
+
+#else /* X86_UV */
+
+static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
+static inline int is_uv_system(void) { return 0; }
+static inline void uv_cpu_init(void) { }
+static inline void uv_system_init(void) { }
+static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
+{ return 1; }
+static inline const struct cpumask *
+uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
+ unsigned long va, unsigned int cpu)
+{ return cpumask; }
+
+#endif /* X86_UV */
+
+#endif /* _ASM_X86_UV_UV_H */
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 50423c7b56b2..9b0e61bf7a88 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -325,7 +325,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
#define cpubit_isset(cpu, bau_local_cpumask) \
test_bit((cpu), (bau_local_cpumask).bits)
-extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
extern void uv_bau_message_intr1(void);
extern void uv_bau_timeout_intr1(void);
diff --git a/arch/x86/include/asm/vic.h b/arch/x86/include/asm/vic.h
deleted file mode 100644
index 53100f353612..000000000000
--- a/arch/x86/include/asm/vic.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Copyright (C) 1999,2001
- *
- * Author: J.E.J.Bottomley@HansenPartnership.com
- *
- * Standard include definitions for the NCR Voyager Interrupt Controller */
-
-/* The eight CPI vectors. To activate a CPI, you write a bit mask
- * corresponding to the processor set to be interrupted into the
- * relevant register. That set of CPUs will then be interrupted with
- * the CPI */
-static const int VIC_CPI_Registers[] =
- {0xFC00, 0xFC01, 0xFC08, 0xFC09,
- 0xFC10, 0xFC11, 0xFC18, 0xFC19 };
-
-#define VIC_PROC_WHO_AM_I 0xfc29
-# define QUAD_IDENTIFIER 0xC0
-# define EIGHT_SLOT_IDENTIFIER 0xE0
-#define QIC_EXTENDED_PROCESSOR_SELECT 0xFC72
-#define VIC_CPI_BASE_REGISTER 0xFC41
-#define VIC_PROCESSOR_ID 0xFC21
-# define VIC_CPU_MASQUERADE_ENABLE 0x8
-
-#define VIC_CLAIM_REGISTER_0 0xFC38
-#define VIC_CLAIM_REGISTER_1 0xFC39
-#define VIC_REDIRECT_REGISTER_0 0xFC60
-#define VIC_REDIRECT_REGISTER_1 0xFC61
-#define VIC_PRIORITY_REGISTER 0xFC20
-
-#define VIC_PRIMARY_MC_BASE 0xFC48
-#define VIC_SECONDARY_MC_BASE 0xFC49
-
-#define QIC_PROCESSOR_ID 0xFC71
-# define QIC_CPUID_ENABLE 0x08
-
-#define QIC_VIC_CPI_BASE_REGISTER 0xFC79
-#define QIC_CPI_BASE_REGISTER 0xFC7A
-
-#define QIC_MASK_REGISTER0 0xFC80
-/* NOTE: these are masked high, enabled low */
-# define QIC_PERF_TIMER 0x01
-# define QIC_LPE 0x02
-# define QIC_SYS_INT 0x04
-# define QIC_CMN_INT 0x08
-/* at the moment, just enable CMN_INT, disable SYS_INT */
-# define QIC_DEFAULT_MASK0 (~(QIC_CMN_INT /* | VIC_SYS_INT */))
-#define QIC_MASK_REGISTER1 0xFC81
-# define QIC_BOOT_CPI_MASK 0xFE
-/* Enable CPI's 1-6 inclusive */
-# define QIC_CPI_ENABLE 0x81
-
-#define QIC_INTERRUPT_CLEAR0 0xFC8A
-#define QIC_INTERRUPT_CLEAR1 0xFC8B
-
-/* this is where we place the CPI vectors */
-#define VIC_DEFAULT_CPI_BASE 0xC0
-/* this is where we place the QIC CPI vectors */
-#define QIC_DEFAULT_CPI_BASE 0xD0
-
-#define VIC_BOOT_INTERRUPT_MASK 0xfe
-
-extern void smp_vic_timer_interrupt(void);
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h
deleted file mode 100644
index b3e647307625..000000000000
--- a/arch/x86/include/asm/voyager.h
+++ /dev/null
@@ -1,529 +0,0 @@
-/* Copyright (C) 1999,2001
- *
- * Author: J.E.J.Bottomley@HansenPartnership.com
- *
- * Standard include definitions for the NCR Voyager system */
-
-#undef VOYAGER_DEBUG
-#undef VOYAGER_CAT_DEBUG
-
-#ifdef VOYAGER_DEBUG
-#define VDEBUG(x) printk x
-#else
-#define VDEBUG(x)
-#endif
-
-/* There are three levels of voyager machine: 3,4 and 5. The rule is
- * if it's less than 3435 it's a Level 3 except for a 3360 which is
- * a level 4. A 3435 or above is a Level 5 */
-#define VOYAGER_LEVEL5_AND_ABOVE 0x3435
-#define VOYAGER_LEVEL4 0x3360
-
-/* The L4 DINO ASIC */
-#define VOYAGER_DINO 0x43
-
-/* voyager ports in standard I/O space */
-#define VOYAGER_MC_SETUP 0x96
-
-
-#define VOYAGER_CAT_CONFIG_PORT 0x97
-# define VOYAGER_CAT_DESELECT 0xff
-#define VOYAGER_SSPB_RELOCATION_PORT 0x98
-
-/* Valid CAT controller commands */
-/* start instruction register cycle */
-#define VOYAGER_CAT_IRCYC 0x01
-/* start data register cycle */
-#define VOYAGER_CAT_DRCYC 0x02
-/* move to execute state */
-#define VOYAGER_CAT_RUN 0x0F
-/* end operation */
-#define VOYAGER_CAT_END 0x80
-/* hold in idle state */
-#define VOYAGER_CAT_HOLD 0x90
-/* single step an "intest" vector */
-#define VOYAGER_CAT_STEP 0xE0
-/* return cat controller to CLEMSON mode */
-#define VOYAGER_CAT_CLEMSON 0xFF
-
-/* the default cat command header */
-#define VOYAGER_CAT_HEADER 0x7F
-
-/* the range of possible CAT module ids in the system */
-#define VOYAGER_MIN_MODULE 0x10
-#define VOYAGER_MAX_MODULE 0x1f
-
-/* The voyager registers per asic */
-#define VOYAGER_ASIC_ID_REG 0x00
-#define VOYAGER_ASIC_TYPE_REG 0x01
-/* the sub address registers can be made auto incrementing on reads */
-#define VOYAGER_AUTO_INC_REG 0x02
-# define VOYAGER_AUTO_INC 0x04
-# define VOYAGER_NO_AUTO_INC 0xfb
-#define VOYAGER_SUBADDRDATA 0x03
-#define VOYAGER_SCANPATH 0x05
-# define VOYAGER_CONNECT_ASIC 0x01
-# define VOYAGER_DISCONNECT_ASIC 0xfe
-#define VOYAGER_SUBADDRLO 0x06
-#define VOYAGER_SUBADDRHI 0x07
-#define VOYAGER_SUBMODSELECT 0x08
-#define VOYAGER_SUBMODPRESENT 0x09
-
-#define VOYAGER_SUBADDR_LO 0xff
-#define VOYAGER_SUBADDR_HI 0xffff
-
-/* the maximum size of a scan path -- used to form instructions */
-#define VOYAGER_MAX_SCAN_PATH 0x100
-/* the biggest possible register size (in bytes) */
-#define VOYAGER_MAX_REG_SIZE 4
-
-/* Total number of possible modules (including submodules) */
-#define VOYAGER_MAX_MODULES 16
-/* Largest number of asics per module */
-#define VOYAGER_MAX_ASICS_PER_MODULE 7
-
-/* the CAT asic of each module is always the first one */
-#define VOYAGER_CAT_ID 0
-#define VOYAGER_PSI 0x1a
-
-/* voyager instruction operations and registers */
-#define VOYAGER_READ_CONFIG 0x1
-#define VOYAGER_WRITE_CONFIG 0x2
-#define VOYAGER_BYPASS 0xff
-
-typedef struct voyager_asic {
- __u8 asic_addr; /* ASIC address; Level 4 */
- __u8 asic_type; /* ASIC type */
- __u8 asic_id; /* ASIC id */
- __u8 jtag_id[4]; /* JTAG id */
- __u8 asic_location; /* Location within scan path; start w/ 0 */
- __u8 bit_location; /* Location within bit stream; start w/ 0 */
- __u8 ireg_length; /* Instruction register length */
- __u16 subaddr; /* Amount of sub address space */
- struct voyager_asic *next; /* Next asic in linked list */
-} voyager_asic_t;
-
-typedef struct voyager_module {
- __u8 module_addr; /* Module address */
- __u8 scan_path_connected; /* Scan path connected */
- __u16 ee_size; /* Size of the EEPROM */
- __u16 num_asics; /* Number of Asics */
- __u16 inst_bits; /* Instruction bits in the scan path */
- __u16 largest_reg; /* Largest register in the scan path */
- __u16 smallest_reg; /* Smallest register in the scan path */
- voyager_asic_t *asic; /* First ASIC in scan path (CAT_I) */
- struct voyager_module *submodule; /* Submodule pointer */
- struct voyager_module *next; /* Next module in linked list */
-} voyager_module_t;
-
-typedef struct voyager_eeprom_hdr {
- __u8 module_id[4];
- __u8 version_id;
- __u8 config_id;
- __u16 boundry_id; /* boundary scan id */
- __u16 ee_size; /* size of EEPROM */
- __u8 assembly[11]; /* assembly # */
- __u8 assembly_rev; /* assembly rev */
- __u8 tracer[4]; /* tracer number */
- __u16 assembly_cksum; /* asm checksum */
- __u16 power_consump; /* pwr requirements */
- __u16 num_asics; /* number of asics */
- __u16 bist_time; /* min. bist time */
- __u16 err_log_offset; /* error log offset */
- __u16 scan_path_offset;/* scan path offset */
- __u16 cct_offset;
- __u16 log_length; /* length of err log */
- __u16 xsum_end; /* offset to end of
- checksum */
- __u8 reserved[4];
- __u8 sflag; /* starting sentinal */
- __u8 part_number[13]; /* prom part number */
- __u8 version[10]; /* version number */
- __u8 signature[8];
- __u16 eeprom_chksum;
- __u32 data_stamp_offset;
- __u8 eflag ; /* ending sentinal */
-} __attribute__((packed)) voyager_eprom_hdr_t;
-
-
-
-#define VOYAGER_EPROM_SIZE_OFFSET \
- ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
-#define VOYAGER_XSUM_END_OFFSET 0x2a
-
-/* the following three definitions are for internal table layouts
- * in the module EPROMs. We really only care about the IDs and
- * offsets */
-typedef struct voyager_sp_table {
- __u8 asic_id;
- __u8 bypass_flag;
- __u16 asic_data_offset;
- __u16 config_data_offset;
-} __attribute__((packed)) voyager_sp_table_t;
-
-typedef struct voyager_jtag_table {
- __u8 icode[4];
- __u8 runbist[4];
- __u8 intest[4];
- __u8 samp_preld[4];
- __u8 ireg_len;
-} __attribute__((packed)) voyager_jtt_t;
-
-typedef struct voyager_asic_data_table {
- __u8 jtag_id[4];
- __u16 length_bsr;
- __u16 length_bist_reg;
- __u32 bist_clk;
- __u16 subaddr_bits;
- __u16 seed_bits;
- __u16 sig_bits;
- __u16 jtag_offset;
-} __attribute__((packed)) voyager_at_t;
-
-/* Voyager Interrupt Controller (VIC) registers */
-
-/* Base to add to Cross Processor Interrupts (CPIs) when triggering
- * the CPU IRQ line */
-/* register defines for the WCBICs (one per processor) */
-#define VOYAGER_WCBIC0 0x41 /* bus A node P1 processor 0 */
-#define VOYAGER_WCBIC1 0x49 /* bus A node P1 processor 1 */
-#define VOYAGER_WCBIC2 0x51 /* bus A node P2 processor 0 */
-#define VOYAGER_WCBIC3 0x59 /* bus A node P2 processor 1 */
-#define VOYAGER_WCBIC4 0x61 /* bus B node P1 processor 0 */
-#define VOYAGER_WCBIC5 0x69 /* bus B node P1 processor 1 */
-#define VOYAGER_WCBIC6 0x71 /* bus B node P2 processor 0 */
-#define VOYAGER_WCBIC7 0x79 /* bus B node P2 processor 1 */
-
-
-/* top of memory registers */
-#define VOYAGER_WCBIC_TOM_L 0x4
-#define VOYAGER_WCBIC_TOM_H 0x5
-
-/* register defines for Voyager Memory Contol (VMC)
- * these are present on L4 machines only */
-#define VOYAGER_VMC1 0x81
-#define VOYAGER_VMC2 0x91
-#define VOYAGER_VMC3 0xa1
-#define VOYAGER_VMC4 0xb1
-
-/* VMC Ports */
-#define VOYAGER_VMC_MEMORY_SETUP 0x9
-# define VMC_Interleaving 0x01
-# define VMC_4Way 0x02
-# define VMC_EvenCacheLines 0x04
-# define VMC_HighLine 0x08
-# define VMC_Start0_Enable 0x20
-# define VMC_Start1_Enable 0x40
-# define VMC_Vremap 0x80
-#define VOYAGER_VMC_BANK_DENSITY 0xa
-# define VMC_BANK_EMPTY 0
-# define VMC_BANK_4MB 1
-# define VMC_BANK_16MB 2
-# define VMC_BANK_64MB 3
-# define VMC_BANK0_MASK 0x03
-# define VMC_BANK1_MASK 0x0C
-# define VMC_BANK2_MASK 0x30
-# define VMC_BANK3_MASK 0xC0
-
-/* Magellan Memory Controller (MMC) defines - present on L5 */
-#define VOYAGER_MMC_ASIC_ID 1
-/* the two memory modules corresponding to memory cards in the system */
-#define VOYAGER_MMC_MEMORY0_MODULE 0x14
-#define VOYAGER_MMC_MEMORY1_MODULE 0x15
-/* the Magellan Memory Address (MMA) defines */
-#define VOYAGER_MMA_ASIC_ID 2
-
-/* Submodule number for the Quad Baseboard */
-#define VOYAGER_QUAD_BASEBOARD 1
-
-/* ASIC defines for the Quad Baseboard */
-#define VOYAGER_QUAD_QDATA0 1
-#define VOYAGER_QUAD_QDATA1 2
-#define VOYAGER_QUAD_QABC 3
-
-/* Useful areas in extended CMOS */
-#define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a
-#define VOYAGER_MEMORY_CLICKMAP 0xa23
-#define VOYAGER_DUMP_LOCATION 0xb1a
-
-/* SUS In Control bit - used to tell SUS that we don't need to be
- * babysat anymore */
-#define VOYAGER_SUS_IN_CONTROL_PORT 0x3ff
-# define VOYAGER_IN_CONTROL_FLAG 0x80
-
-/* Voyager PSI defines */
-#define VOYAGER_PSI_STATUS_REG 0x08
-# define PSI_DC_FAIL 0x01
-# define PSI_MON 0x02
-# define PSI_FAULT 0x04
-# define PSI_ALARM 0x08
-# define PSI_CURRENT 0x10
-# define PSI_DVM 0x20
-# define PSI_PSCFAULT 0x40
-# define PSI_STAT_CHG 0x80
-
-#define VOYAGER_PSI_SUPPLY_REG 0x8000
- /* read */
-# define PSI_FAIL_DC 0x01
-# define PSI_FAIL_AC 0x02
-# define PSI_MON_INT 0x04
-# define PSI_SWITCH_OFF 0x08
-# define PSI_HX_OFF 0x10
-# define PSI_SECURITY 0x20
-# define PSI_CMOS_BATT_LOW 0x40
-# define PSI_CMOS_BATT_FAIL 0x80
- /* write */
-# define PSI_CLR_SWITCH_OFF 0x13
-# define PSI_CLR_HX_OFF 0x14
-# define PSI_CLR_CMOS_BATT_FAIL 0x17
-
-#define VOYAGER_PSI_MASK 0x8001
-# define PSI_MASK_MASK 0x10
-
-#define VOYAGER_PSI_AC_FAIL_REG 0x8004
-#define AC_FAIL_STAT_CHANGE 0x80
-
-#define VOYAGER_PSI_GENERAL_REG 0x8007
- /* read */
-# define PSI_SWITCH_ON 0x01
-# define PSI_SWITCH_ENABLED 0x02
-# define PSI_ALARM_ENABLED 0x08
-# define PSI_SECURE_ENABLED 0x10
-# define PSI_COLD_RESET 0x20
-# define PSI_COLD_START 0x80
- /* write */
-# define PSI_POWER_DOWN 0x10
-# define PSI_SWITCH_DISABLE 0x01
-# define PSI_SWITCH_ENABLE 0x11
-# define PSI_CLEAR 0x12
-# define PSI_ALARM_DISABLE 0x03
-# define PSI_ALARM_ENABLE 0x13
-# define PSI_CLEAR_COLD_RESET 0x05
-# define PSI_SET_COLD_RESET 0x15
-# define PSI_CLEAR_COLD_START 0x07
-# define PSI_SET_COLD_START 0x17
-
-
-
-struct voyager_bios_info {
- __u8 len;
- __u8 major;
- __u8 minor;
- __u8 debug;
- __u8 num_classes;
- __u8 class_1;
- __u8 class_2;
-};
-
-/* The following structures and definitions are for the Kernel/SUS
- * interface these are needed to find out how SUS initialised any Quad
- * boards in the system */
-
-#define NUMBER_OF_MC_BUSSES 2
-#define SLOTS_PER_MC_BUS 8
-#define MAX_CPUS 16 /* 16 way CPU system */
-#define MAX_PROCESSOR_BOARDS 4 /* 4 processor slot system */
-#define MAX_CACHE_LEVELS 4 /* # of cache levels supported */
-#define MAX_SHARED_CPUS 4 /* # of CPUs that can share a LARC */
-#define NUMBER_OF_POS_REGS 8
-
-typedef struct {
- __u8 MC_Slot;
- __u8 POS_Values[NUMBER_OF_POS_REGS];
-} __attribute__((packed)) MC_SlotInformation_t;
-
-struct QuadDescription {
- __u8 Type; /* for type 0 (DYADIC or MONADIC) all fields
- * will be zero except for slot */
- __u8 StructureVersion;
- __u32 CPI_BaseAddress;
- __u32 LARC_BankSize;
- __u32 LocalMemoryStateBits;
- __u8 Slot; /* Processor slots 1 - 4 */
-} __attribute__((packed));
-
-struct ProcBoardInfo {
- __u8 Type;
- __u8 StructureVersion;
- __u8 NumberOfBoards;
- struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS];
-} __attribute__((packed));
-
-struct CacheDescription {
- __u8 Level;
- __u32 TotalSize;
- __u16 LineSize;
- __u8 Associativity;
- __u8 CacheType;
- __u8 WriteType;
- __u8 Number_CPUs_SharedBy;
- __u8 Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS];
-
-} __attribute__((packed));
-
-struct CPU_Description {
- __u8 CPU_HardwareId;
- char *FRU_String;
- __u8 NumberOfCacheLevels;
- struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS];
-} __attribute__((packed));
-
-struct CPU_Info {
- __u8 Type;
- __u8 StructureVersion;
- __u8 NumberOf_CPUs;
- struct CPU_Description CPU_Data[MAX_CPUS];
-} __attribute__((packed));
-
-
-/*
- * This structure will be used by SUS and the OS.
- * The assumption about this structure is that no blank space is
- * packed in it by our friend the compiler.
- */
-typedef struct {
- __u8 Mailbox_SUS; /* Written to by SUS to give
- commands/response to the OS */
- __u8 Mailbox_OS; /* Written to by the OS to give
- commands/response to SUS */
- __u8 SUS_MailboxVersion; /* Tells the OS which iteration of the
- interface SUS supports */
- __u8 OS_MailboxVersion; /* Tells SUS which iteration of the
- interface the OS supports */
- __u32 OS_Flags; /* Flags set by the OS as info for
- SUS */
- __u32 SUS_Flags; /* Flags set by SUS as info
- for the OS */
- __u32 WatchDogPeriod; /* Watchdog period (in seconds) which
- the DP uses to see if the OS
- is dead */
- __u32 WatchDogCount; /* Updated by the OS on every tic. */
- __u32 MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS
- where to stuff the SUS error log
- on a dump */
- MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS];
- /* Storage for MCA POS data */
- /* All new SECOND_PASS_INTERFACE fields added from this point */
- struct ProcBoardInfo *BoardData;
- struct CPU_Info *CPU_Data;
- /* All new fields must be added from this point */
-} Voyager_KernelSUS_Mbox_t;
-
-/* structure for finding the right memory address to send a QIC CPI to */
-struct voyager_qic_cpi {
- /* Each cache line (32 bytes) can trigger a cpi. The cpi
- * read/write may occur anywhere in the cache line---pick the
- * middle to be safe */
- struct {
- __u32 pad1[3];
- __u32 cpi;
- __u32 pad2[4];
- } qic_cpi[8];
-};
-
-struct voyager_status {
- __u32 power_fail:1;
- __u32 switch_off:1;
- __u32 request_from_kernel:1;
-};
-
-struct voyager_psi_regs {
- __u8 cat_id;
- __u8 cat_dev;
- __u8 cat_control;
- __u8 subaddr;
- __u8 dummy4;
- __u8 checkbit;
- __u8 subaddr_low;
- __u8 subaddr_high;
- __u8 intstatus;
- __u8 stat1;
- __u8 stat3;
- __u8 fault;
- __u8 tms;
- __u8 gen;
- __u8 sysconf;
- __u8 dummy15;
-};
-
-struct voyager_psi_subregs {
- __u8 supply;
- __u8 mask;
- __u8 present;
- __u8 DCfail;
- __u8 ACfail;
- __u8 fail;
- __u8 UPSfail;
- __u8 genstatus;
-};
-
-struct voyager_psi {
- struct voyager_psi_regs regs;
- struct voyager_psi_subregs subregs;
-};
-
-struct voyager_SUS {
-#define VOYAGER_DUMP_BUTTON_NMI 0x1
-#define VOYAGER_SUS_VALID 0x2
-#define VOYAGER_SYSINT_COMPLETE 0x3
- __u8 SUS_mbox;
-#define VOYAGER_NO_COMMAND 0x0
-#define VOYAGER_IGNORE_DUMP 0x1
-#define VOYAGER_DO_DUMP 0x2
-#define VOYAGER_SYSINT_HANDSHAKE 0x3
-#define VOYAGER_DO_MEM_DUMP 0x4
-#define VOYAGER_SYSINT_WAS_RECOVERED 0x5
- __u8 kernel_mbox;
-#define VOYAGER_MAILBOX_VERSION 0x10
- __u8 SUS_version;
- __u8 kernel_version;
-#define VOYAGER_OS_HAS_SYSINT 0x1
-#define VOYAGER_OS_IN_PROGRESS 0x2
-#define VOYAGER_UPDATING_WDPERIOD 0x4
- __u32 kernel_flags;
-#define VOYAGER_SUS_BOOTING 0x1
-#define VOYAGER_SUS_IN_PROGRESS 0x2
- __u32 SUS_flags;
- __u32 watchdog_period;
- __u32 watchdog_count;
- __u32 SUS_errorlog;
- /* lots of system configuration stuff under here */
-};
-
-/* Variables exported by voyager_smp */
-extern __u32 voyager_extended_vic_processors;
-extern __u32 voyager_allowed_boot_processors;
-extern __u32 voyager_quad_processors;
-extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS];
-extern struct voyager_SUS *voyager_SUS;
-
-/* variables exported always */
-extern struct task_struct *voyager_thread;
-extern int voyager_level;
-extern struct voyager_status voyager_status;
-
-/* functions exported by the voyager and voyager_smp modules */
-extern int voyager_cat_readb(__u8 module, __u8 asic, int reg);
-extern void voyager_cat_init(void);
-extern void voyager_detect(struct voyager_bios_info *);
-extern void voyager_trap_init(void);
-extern void voyager_setup_irqs(void);
-extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length);
-extern void voyager_smp_intr_init(void);
-extern __u8 voyager_extended_cmos_read(__u16 cmos_address);
-extern void voyager_smp_dump(void);
-extern void voyager_timer_interrupt(void);
-extern void smp_local_timer_interrupt(void);
-extern void voyager_power_off(void);
-extern void smp_voyager_power_off(void *dummy);
-extern void voyager_restart(void);
-extern void voyager_cat_power_off(void);
-extern void voyager_cat_do_common_interrupt(void);
-extern void voyager_handle_nmi(void);
-extern void voyager_smp_intr_init(void);
-/* Commands for the following are */
-#define VOYAGER_PSI_READ 0
-#define VOYAGER_PSI_WRITE 1
-#define VOYAGER_PSI_SUBREAD 2
-#define VOYAGER_PSI_SUBWRITE 3
-extern void voyager_cat_psi(__u8, __u16, __u8 *);
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 19144184983a..1df35417c412 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -15,10 +15,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->flags);
}
-static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
-{
- regs->orig_ax = ~irq;
- do_IRQ(regs);
-}
-
#endif /* _ASM_X86_XEN_EVENTS_H */
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 81fbd735aec4..d5b7e90c0edf 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -38,22 +38,30 @@ extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
enum xen_domain_type {
- XEN_NATIVE,
- XEN_PV_DOMAIN,
- XEN_HVM_DOMAIN,
+ XEN_NATIVE, /* running on bare hardware */
+ XEN_PV_DOMAIN, /* running in a PV domain */
+ XEN_HVM_DOMAIN, /* running in a Xen hvm domain */
};
-extern enum xen_domain_type xen_domain_type;
-
#ifdef CONFIG_XEN
-#define xen_domain() (xen_domain_type != XEN_NATIVE)
+extern enum xen_domain_type xen_domain_type;
#else
-#define xen_domain() (0)
+#define xen_domain_type XEN_NATIVE
#endif
-#define xen_pv_domain() (xen_domain() && xen_domain_type == XEN_PV_DOMAIN)
-#define xen_hvm_domain() (xen_domain() && xen_domain_type == XEN_HVM_DOMAIN)
+#define xen_domain() (xen_domain_type != XEN_NATIVE)
+#define xen_pv_domain() (xen_domain() && \
+ xen_domain_type == XEN_PV_DOMAIN)
+#define xen_hvm_domain() (xen_domain() && \
+ xen_domain_type == XEN_HVM_DOMAIN)
+
+#ifdef CONFIG_XEN_DOM0
+#include <xen/interface/xen.h>
-#define xen_initial_domain() (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
+#define xen_initial_domain() (xen_pv_domain() && \
+ xen_start_info->flags & SIF_INITDOMAIN)
+#else /* !CONFIG_XEN_DOM0 */
+#define xen_initial_domain() (0)
+#endif /* CONFIG_XEN_DOM0 */
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
OpenPOWER on IntegriCloud