diff options
author | Len Brown <len.brown@intel.com> | 2006-06-29 19:57:46 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2006-06-29 19:57:46 -0400 |
commit | d120cfb544ed6161b9d32fb6c4648c471807ee6b (patch) | |
tree | 7757ad0198d8df76ff5c60f939a687687c41da00 /arch/i386/kernel | |
parent | 9dce0e950dbfab4148f35ac6f297d8638cdc63c4 (diff) | |
parent | bf7e8511088963078484132636839b59e25cf14f (diff) | |
download | blackbird-op-linux-d120cfb544ed6161b9d32fb6c4648c471807ee6b.tar.gz blackbird-op-linux-d120cfb544ed6161b9d32fb6c4648c471807ee6b.zip |
merge linus into release branch
Conflicts:
drivers/acpi/acpi_memhotplug.c
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/asm-offsets.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/amd.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 25 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/intel_cacheinfo.c | 16 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/proc.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/cpuid.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/efi.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/entry.S | 22 | ||||
-rw-r--r-- | arch/i386/kernel/i8259.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 27 | ||||
-rw-r--r-- | arch/i386/kernel/irq.c | 27 | ||||
-rw-r--r-- | arch/i386/kernel/microcode.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/msr.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/scx200.c | 66 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/signal.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 37 | ||||
-rw-r--r-- | arch/i386/kernel/sysenter.c | 126 | ||||
-rw-r--r-- | arch/i386/kernel/topology.c | 28 | ||||
-rw-r--r-- | arch/i386/kernel/vsyscall-sysenter.S | 4 | ||||
-rw-r--r-- | arch/i386/kernel/vsyscall.lds.S | 4 |
21 files changed, 270 insertions, 153 deletions
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c index 1c3a809e6421..c80271f8f084 100644 --- a/arch/i386/kernel/asm-offsets.c +++ b/arch/i386/kernel/asm-offsets.c @@ -14,6 +14,7 @@ #include <asm/fixmap.h> #include <asm/processor.h> #include <asm/thread_info.h> +#include <asm/elf.h> #define DEFINE(sym, val) \ asm volatile("\n->" #sym " %0 " #val : : "i" (val)) @@ -54,6 +55,7 @@ void foo(void) OFFSET(TI_preempt_count, thread_info, preempt_count); OFFSET(TI_addr_limit, thread_info, addr_limit); OFFSET(TI_restart_block, thread_info, restart_block); + OFFSET(TI_sysenter_return, thread_info, sysenter_return); BLANK(); OFFSET(EXEC_DOMAIN_handler, exec_domain, handler); @@ -69,7 +71,7 @@ void foo(void) sizeof(struct tss_struct)); DEFINE(PAGE_SIZE_asm, PAGE_SIZE); - DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL)); + DEFINE(VDSO_PRELINK, VDSO_PRELINK); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); } diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index fd0457c9c827..e6a2d6b80cda 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -235,10 +235,10 @@ static void __init init_amd(struct cpuinfo_x86 *c) while ((1 << bits) < c->x86_max_cores) bits++; } - cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); - phys_proc_id[cpu] >>= bits; + c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1); + c->phys_proc_id >>= bits; printk(KERN_INFO "CPU %d(%d) -> Core %d\n", - cpu, c->x86_max_cores, cpu_core_id[cpu]); + cpu, c->x86_max_cores, c->cpu_core_id); } #endif diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 44f2c5f2dda1..70c87de582c7 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -294,7 +294,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; c->x86_mask = tfms & 15; -#ifdef CONFIG_SMP +#ifdef CONFIG_X86_HT c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0); #else c->apicid = (ebx >> 24) & 0xFF; @@ -319,7 +319,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c) early_intel_workaround(c); #ifdef CONFIG_X86_HT - phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff; + c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; #endif } @@ -477,11 +477,9 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; int index_msb, core_bits; - int cpu = smp_processor_id(); cpuid(1, &eax, &ebx, &ecx, &edx); - if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) return; @@ -492,16 +490,17 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) } else if (smp_num_siblings > 1 ) { if (smp_num_siblings > NR_CPUS) { - printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); + printk(KERN_WARNING "CPU: Unsupported number of the " + "siblings %d", smp_num_siblings); smp_num_siblings = 1; return; } index_msb = get_count_order(smp_num_siblings); - phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); + c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); printk(KERN_INFO "CPU: Physical Processor ID: %d\n", - phys_proc_id[cpu]); + c->phys_proc_id); smp_num_siblings = smp_num_siblings / c->x86_max_cores; @@ -509,12 +508,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) core_bits = get_count_order(c->x86_max_cores); - cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & + c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) & ((1 << core_bits) - 1); if (c->x86_max_cores > 1) printk(KERN_INFO "CPU: Processor Core ID: %d\n", - cpu_core_id[cpu]); + c->cpu_core_id); } } #endif @@ -613,6 +612,12 @@ void __cpuinit cpu_init(void) set_in_cr4(X86_CR4_TSD); } + /* The CPU hotplug case */ + if (cpu_gdt_descr->address) { + gdt = (struct desc_struct *)cpu_gdt_descr->address; + memset(gdt, 0, PAGE_SIZE); + goto old_gdt; + } /* * This is a horrible hack to allocate the GDT. The problem * is that cpu_init() is called really early for the boot CPU @@ -631,7 +636,7 @@ void __cpuinit cpu_init(void) local_irq_enable(); } } - +old_gdt: /* * Initialize the per-CPU GDT with the boot GDT, * and set up the GDT descriptor: diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index 6c37b4fd8ce2..e9f0b928b0a9 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c @@ -159,13 +159,13 @@ union l2_cache { unsigned val; }; -static unsigned short assocs[] = { +static const unsigned short assocs[] = { [1] = 1, [2] = 2, [4] = 4, [6] = 8, [8] = 16, [0xf] = 0xffff // ?? }; -static unsigned char levels[] = { 1, 1, 2 }; -static unsigned char types[] = { 1, 2, 3 }; +static const unsigned char levels[] = { 1, 1, 2 }; +static const unsigned char types[] = { 1, 2, 3 }; static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, union _cpuid4_leaf_ebx *ebx, @@ -261,7 +261,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; -#ifdef CONFIG_SMP +#ifdef CONFIG_X86_HT unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); #endif @@ -383,14 +383,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) if (new_l2) { l2 = new_l2; -#ifdef CONFIG_SMP +#ifdef CONFIG_X86_HT cpu_llc_id[cpu] = l2_id; #endif } if (new_l3) { l3 = new_l3; -#ifdef CONFIG_SMP +#ifdef CONFIG_X86_HT cpu_llc_id[cpu] = l3_id; #endif } @@ -729,7 +729,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev) return; } -static int cacheinfo_cpu_callback(struct notifier_block *nfb, +static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; @@ -747,7 +747,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block cacheinfo_cpu_notifier = +static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { .notifier_call = cacheinfo_cpu_callback, }; diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c index a19fcb262dbb..f54a15268ed7 100644 --- a/arch/i386/kernel/cpu/proc.c +++ b/arch/i386/kernel/cpu/proc.c @@ -18,7 +18,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) * applications want to get the raw CPUID data, they should access * /dev/cpu/<cpu_nr>/cpuid instead. */ - static char *x86_cap_flags[] = { + static const char * const x86_cap_flags[] = { /* Intel-defined */ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov", @@ -62,7 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; - static char *x86_power_flags[] = { + static const char * const x86_power_flags[] = { "ts", /* temperature sensor */ "fid", /* frequency id control */ "vid", /* voltage id control */ @@ -109,9 +109,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); #ifdef CONFIG_X86_HT if (c->x86_max_cores * smp_num_siblings > 1) { - seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]); + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n])); - seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); } #endif diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c index 1d9a4abcdfc7..f6dfa9fb675c 100644 --- a/arch/i386/kernel/cpuid.c +++ b/arch/i386/kernel/cpuid.c @@ -183,7 +183,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long ac return NOTIFY_OK; } -static struct notifier_block cpuid_class_cpu_notifier = +static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = { .notifier_call = cpuid_class_cpu_callback, }; diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c index 9202b67c4b2e..8beb0f07d999 100644 --- a/arch/i386/kernel/efi.c +++ b/arch/i386/kernel/efi.c @@ -601,8 +601,10 @@ efi_initialize_iomem_resources(struct resource *code_resource, res->end = res->start + ((md->num_pages << EFI_PAGE_SHIFT) - 1); res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; if (request_resource(&iomem_resource, res) < 0) - printk(KERN_ERR PFX "Failed to allocate res %s : 0x%lx-0x%lx\n", - res->name, res->start, res->end); + printk(KERN_ERR PFX "Failed to allocate res %s : " + "0x%llx-0x%llx\n", res->name, + (unsigned long long)res->start, + (unsigned long long)res->end); /* * We don't know which region contains kernel data so we try * it repeatedly and let the resource manager test it. diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index e6e4506e749a..fbdb933251b6 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -83,6 +83,12 @@ VM_MASK = 0x00020000 #define resume_kernel restore_nocheck #endif +#ifdef CONFIG_VM86 +#define resume_userspace_sig check_userspace +#else +#define resume_userspace_sig resume_userspace +#endif + #define SAVE_ALL \ cld; \ pushl %es; \ @@ -211,6 +217,7 @@ ret_from_exception: preempt_stop ret_from_intr: GET_THREAD_INFO(%ebp) +check_userspace: movl EFLAGS(%esp), %eax # mix EFLAGS and CS movb CS(%esp), %al testl $(VM_MASK | 3), %eax @@ -263,7 +270,12 @@ sysenter_past_esp: pushl $(__USER_CS) CFI_ADJUST_CFA_OFFSET 4 /*CFI_REL_OFFSET cs, 0*/ - pushl $SYSENTER_RETURN + /* + * Push current_thread_info()->sysenter_return to the stack. + * A tiny bit of offset fixup is necessary - 4*4 means the 4 words + * pushed above; +8 corresponds to copy_thread's esp0 setting. + */ + pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) CFI_ADJUST_CFA_OFFSET 4 CFI_REL_OFFSET eip, 0 @@ -415,7 +427,7 @@ work_notifysig: # deal with pending signals and # vm86-space xorl %edx, %edx call do_notify_resume - jmp resume_userspace + jmp resume_userspace_sig ALIGN work_notifysig_v86: @@ -428,7 +440,7 @@ work_notifysig_v86: movl %eax, %esp xorl %edx, %edx call do_notify_resume - jmp resume_userspace + jmp resume_userspace_sig #endif # perform syscall exit tracing @@ -515,7 +527,7 @@ ENTRY(irq_entries_start) .if vector CFI_ADJUST_CFA_OFFSET -4 .endif -1: pushl $vector-256 +1: pushl $~(vector) CFI_ADJUST_CFA_OFFSET 4 jmp common_interrupt .data @@ -535,7 +547,7 @@ common_interrupt: #define BUILD_INTERRUPT(name, nr) \ ENTRY(name) \ RING0_INT_FRAME; \ - pushl $nr-256; \ + pushl $~(nr); \ CFI_ADJUST_CFA_OFFSET 4; \ SAVE_ALL; \ movl %esp,%eax; \ diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c index c1a42feba286..3c6063671a9f 100644 --- a/arch/i386/kernel/i8259.c +++ b/arch/i386/kernel/i8259.c @@ -132,7 +132,7 @@ void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); io_apic_irqs &= ~(1<<irq); - irq_desc[irq].handler = &i8259A_irq_type; + irq_desc[irq].chip = &i8259A_irq_type; enable_irq(irq); } @@ -386,12 +386,12 @@ void __init init_ISA_irqs (void) /* * 16 old-style INTA-cycle interrupts: */ - irq_desc[i].handler = &i8259A_irq_type; + irq_desc[i].chip = &i8259A_irq_type; } else { /* * 'high' PCI IRQs filled in on demand */ - irq_desc[i].handler = &no_irq_type; + irq_desc[i].chip = &no_irq_type; } } } diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 72ae414e4d49..ec9ea0269d36 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -581,7 +581,7 @@ static int balanced_irq(void *unused) /* push everything to CPU 0 to give us a starting point. */ for (i = 0 ; i < NR_IRQS ; i++) { - pending_irq_cpumask[i] = cpumask_of_cpu(0); + irq_desc[i].pending_mask = cpumask_of_cpu(0); set_pending_irq(i, cpumask_of_cpu(0)); } @@ -1205,15 +1205,17 @@ static struct hw_interrupt_type ioapic_edge_type; #define IOAPIC_EDGE 0 #define IOAPIC_LEVEL 1 -static inline void ioapic_register_intr(int irq, int vector, unsigned long trigger) +static void ioapic_register_intr(int irq, int vector, unsigned long trigger) { - unsigned idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq; + unsigned idx; + + idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq; if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || trigger == IOAPIC_LEVEL) - irq_desc[idx].handler = &ioapic_level_type; + irq_desc[idx].chip = &ioapic_level_type; else - irq_desc[idx].handler = &ioapic_edge_type; + irq_desc[idx].chip = &ioapic_edge_type; set_intr_gate(vector, interrupt[idx]); } @@ -1325,7 +1327,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in * The timer IRQ doesn't have to know that behind the * scene we have a 8259A-master in AEOI mode ... */ - irq_desc[0].handler = &ioapic_edge_type; + irq_desc[0].chip = &ioapic_edge_type; /* * Add it to the IO-APIC irq-routing table: @@ -2069,6 +2071,13 @@ static void set_ioapic_affinity_vector (unsigned int vector, #endif #endif +static int ioapic_retrigger(unsigned int irq) +{ + send_IPI_self(IO_APIC_VECTOR(irq)); + + return 1; +} + /* * Level and edge triggered IO-APIC interrupts need different handling, * so we use two separate IRQ descriptors. Edge triggered IRQs can be @@ -2088,6 +2097,7 @@ static struct hw_interrupt_type ioapic_edge_type __read_mostly = { #ifdef CONFIG_SMP .set_affinity = set_ioapic_affinity, #endif + .retrigger = ioapic_retrigger, }; static struct hw_interrupt_type ioapic_level_type __read_mostly = { @@ -2101,6 +2111,7 @@ static struct hw_interrupt_type ioapic_level_type __read_mostly = { #ifdef CONFIG_SMP .set_affinity = set_ioapic_affinity, #endif + .retrigger = ioapic_retrigger, }; static inline void init_IO_APIC_traps(void) @@ -2135,7 +2146,7 @@ static inline void init_IO_APIC_traps(void) make_8259A_irq(irq); else /* Strange. Oh, well.. */ - irq_desc[irq].handler = &no_irq_type; + irq_desc[irq].chip = &no_irq_type; } } } @@ -2351,7 +2362,7 @@ static inline void check_timer(void) printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); disable_8259A_irq(0); - irq_desc[0].handler = &lapic_irq_type; + irq_desc[0].chip = &lapic_irq_type; apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ enable_8259A_irq(0); diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 061533e0cb5e..16b491703967 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c @@ -53,13 +53,19 @@ static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; */ fastcall unsigned int do_IRQ(struct pt_regs *regs) { - /* high bits used in ret_from_ code */ - int irq = regs->orig_eax & 0xff; + /* high bit used in ret_from_ code */ + int irq = ~regs->orig_eax; #ifdef CONFIG_4KSTACKS union irq_ctx *curctx, *irqctx; u32 *isp; #endif + if (unlikely((unsigned)irq >= NR_IRQS)) { + printk(KERN_EMERG "%s: cannot handle IRQ %d\n", + __FUNCTION__, irq); + BUG(); + } + irq_enter(); #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ @@ -76,6 +82,10 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs) } #endif + if (!irq_desc[irq].handle_irq) { + __do_IRQ(irq, regs); + goto out_exit; + } #ifdef CONFIG_4KSTACKS curctx = (union irq_ctx *) current_thread_info(); @@ -100,8 +110,8 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs) * softirq checks work in the hardirq context. */ irqctx->tinfo.preempt_count = - irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK | - curctx->tinfo.preempt_count & SOFTIRQ_MASK; + (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | + (curctx->tinfo.preempt_count & SOFTIRQ_MASK); asm volatile( " xchgl %%ebx,%%esp \n" @@ -115,6 +125,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs) #endif __do_IRQ(irq, regs); +out_exit: irq_exit(); return 1; @@ -243,7 +254,7 @@ int show_interrupts(struct seq_file *p, void *v) for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); #endif - seq_printf(p, " %14s", irq_desc[i].handler->typename); + seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) @@ -285,13 +296,13 @@ void fixup_irqs(cpumask_t map) if (irq == 2) continue; - cpus_and(mask, irq_affinity[irq], map); + cpus_and(mask, irq_desc[irq].affinity, map); if (any_online_cpu(mask) == NR_CPUS) { printk("Breaking affinity for irq %i\n", irq); mask = map; } - if (irq_desc[irq].handler->set_affinity) - irq_desc[irq].handler->set_affinity(irq, mask); + if (irq_desc[irq].chip->set_affinity) + irq_desc[irq].chip->set_affinity(irq, mask); else if (irq_desc[irq].action && !(warned++)) printk("Cannot set affinity for irq %i\n", irq); } diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index 0a865889b2a9..40b44cc0d14b 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c @@ -493,7 +493,6 @@ static struct file_operations microcode_fops = { static struct miscdevice microcode_dev = { .minor = MICROCODE_MINOR, .name = "microcode", - .devfs_name = "cpu/microcode", .fops = µcode_fops, }; diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c index 7a328230e540..d022cb8fd725 100644 --- a/arch/i386/kernel/msr.c +++ b/arch/i386/kernel/msr.c @@ -266,7 +266,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long acti return NOTIFY_OK; } -static struct notifier_block msr_class_cpu_notifier = +static struct notifier_block __cpuinitdata msr_class_cpu_notifier = { .notifier_call = msr_class_cpu_callback, }; diff --git a/arch/i386/kernel/scx200.c b/arch/i386/kernel/scx200.c index 321f5fd26e75..9bf590cefc7d 100644 --- a/arch/i386/kernel/scx200.c +++ b/arch/i386/kernel/scx200.c @@ -9,6 +9,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/scx200.h> @@ -45,11 +46,19 @@ static struct pci_driver scx200_pci_driver = { .probe = scx200_probe, }; -static DEFINE_SPINLOCK(scx200_gpio_config_lock); +static DEFINE_MUTEX(scx200_gpio_config_lock); -static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static void __devinit scx200_init_shadow(void) { int bank; + + /* read the current values driven on the GPIO signals */ + for (bank = 0; bank < 2; ++bank) + scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank); +} + +static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ unsigned base; if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE || @@ -63,10 +72,7 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_ } scx200_gpio_base = base; - - /* read the current values driven on the GPIO signals */ - for (bank = 0; bank < 2; ++bank) - scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank); + scx200_init_shadow(); } else { /* find the base of the Configuration Block */ @@ -87,12 +93,11 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_ return 0; } -u32 scx200_gpio_configure(int index, u32 mask, u32 bits) +u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits) { u32 config, new_config; - unsigned long flags; - spin_lock_irqsave(&scx200_gpio_config_lock, flags); + mutex_lock(&scx200_gpio_config_lock); outl(index, scx200_gpio_base + 0x20); config = inl(scx200_gpio_base + 0x24); @@ -100,45 +105,11 @@ u32 scx200_gpio_configure(int index, u32 mask, u32 bits) new_config = (config & mask) | bits; outl(new_config, scx200_gpio_base + 0x24); - spin_unlock_irqrestore(&scx200_gpio_config_lock, flags); + mutex_unlock(&scx200_gpio_config_lock); return config; } -#if 0 -void scx200_gpio_dump(unsigned index) -{ - u32 config = scx200_gpio_configure(index, ~0, 0); - printk(KERN_DEBUG "GPIO%02u: 0x%08lx", index, (unsigned long)config); - - if (config & 1) - printk(" OE"); /* output enabled */ - else - printk(" TS"); /* tristate */ - if (config & 2) - printk(" PP"); /* push pull */ - else - printk(" OD"); /* open drain */ - if (config & 4) - printk(" PUE"); /* pull up enabled */ - else - printk(" PUD"); /* pull up disabled */ - if (config & 8) - printk(" LOCKED"); /* locked */ - if (config & 16) - printk(" LEVEL"); /* level input */ - else - printk(" EDGE"); /* edge input */ - if (config & 32) - printk(" HI"); /* trigger on rising edge */ - else - printk(" LO"); /* trigger on falling edge */ - if (config & 64) - printk(" DEBOUNCE"); /* debounce */ - printk("\n"); -} -#endif /* 0 */ - static int __init scx200_init(void) { printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n"); @@ -159,10 +130,3 @@ EXPORT_SYMBOL(scx200_gpio_base); EXPORT_SYMBOL(scx200_gpio_shadow); EXPORT_SYMBOL(scx200_gpio_configure); EXPORT_SYMBOL(scx200_cb_base); - -/* - Local variables: - compile-command: "make -k -C ../../.. SUBDIRS=arch/i386/kernel modules" - c-basic-offset: 8 - End: -*/ diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 4a65040cc624..6712f0d2eb37 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -1314,8 +1314,10 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat probe_roms(); for (i = 0; i < e820.nr_map; i++) { struct resource *res; +#ifndef CONFIG_RESOURCES_64BIT if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) continue; +#endif res = kzalloc(sizeof(struct resource), GFP_ATOMIC); switch (e820.map[i].type) { case E820_RAM: res->name = "System RAM"; break; diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index 5c352c3a9e7f..43002cfb40c4 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c @@ -351,7 +351,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, goto give_sigsegv; } - restorer = &__kernel_sigreturn; + restorer = (void *)VDSO_SYM(&__kernel_sigreturn); if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; @@ -447,7 +447,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, goto give_sigsegv; /* Set up to return from userspace. */ - restorer = &__kernel_rt_sigreturn; + restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn); if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; err |= __put_user(restorer, &frame->pretcode); diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index bce5470ecb42..89e7315e539c 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -67,12 +67,6 @@ int smp_num_siblings = 1; EXPORT_SYMBOL(smp_num_siblings); #endif -/* Package ID of each logical CPU */ -int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; - -/* Core ID of each logical CPU */ -int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; - /* Last level cache ID of each logical CPU */ int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; @@ -454,10 +448,12 @@ cpumask_t cpu_coregroup_map(int cpu) struct cpuinfo_x86 *c = cpu_data + cpu; /* * For perf, we return last level cache shared map. - * TBD: when power saving sched policy is added, we will return - * cpu_core_map when power saving policy is enabled + * And for power savings, we return cpu_core_map */ - return c->llc_shared_map; + if (sched_mc_power_savings || sched_smt_power_savings) + return cpu_core_map[cpu]; + else + return c->llc_shared_map; } /* representing cpus for which sibling maps can be computed */ @@ -473,8 +469,8 @@ set_cpu_sibling_map(int cpu) if (smp_num_siblings > 1) { for_each_cpu_mask(i, cpu_sibling_setup_map) { - if (phys_proc_id[cpu] == phys_proc_id[i] && - cpu_core_id[cpu] == cpu_core_id[i]) { + if (c[cpu].phys_proc_id == c[i].phys_proc_id && + c[cpu].cpu_core_id == c[i].cpu_core_id) { cpu_set(i, cpu_sibling_map[cpu]); cpu_set(cpu, cpu_sibling_map[i]); cpu_set(i, cpu_core_map[cpu]); @@ -501,7 +497,7 @@ set_cpu_sibling_map(int cpu) cpu_set(i, c[cpu].llc_shared_map); cpu_set(cpu, c[i].llc_shared_map); } - if (phys_proc_id[cpu] == phys_proc_id[i]) { + if (c[cpu].phys_proc_id == c[i].phys_proc_id) { cpu_set(i, cpu_core_map[cpu]); cpu_set(cpu, cpu_core_map[i]); /* @@ -1056,6 +1052,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) struct warm_boot_cpu_info info; struct work_struct task; int apicid, ret; + struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); apicid = x86_cpu_to_apicid[cpu]; if (apicid == BAD_APICID) { @@ -1063,6 +1060,18 @@ static int __cpuinit __smp_prepare_cpu(int cpu) goto exit; } + /* + * the CPU isn't initialized at boot time, allocate gdt table here. + * cpu_init will initialize it + */ + if (!cpu_gdt_descr->address) { + cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL); + if (!cpu_gdt_descr->address) + printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu); + ret = -ENOMEM; + goto exit; + } + info.complete = &done; info.apicid = apicid; info.cpu = cpu; @@ -1340,8 +1349,8 @@ remove_siblinginfo(int cpu) cpu_clear(cpu, cpu_sibling_map[sibling]); cpus_clear(cpu_sibling_map[cpu]); cpus_clear(cpu_core_map[cpu]); - phys_proc_id[cpu] = BAD_APICID; - cpu_core_id[cpu] = BAD_APICID; + c[cpu].phys_proc_id = 0; + c[cpu].cpu_core_id = 0; cpu_clear(cpu, cpu_sibling_setup_map); } diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c index 0bada1870bdf..713ba39d32c6 100644 --- a/arch/i386/kernel/sysenter.c +++ b/arch/i386/kernel/sysenter.c @@ -2,6 +2,8 @@ * linux/arch/i386/kernel/sysenter.c * * (C) Copyright 2002 Linus Torvalds + * Portions based on the vdso-randomization code from exec-shield: + * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar * * This file contains the needed initializations to support sysenter. */ @@ -13,12 +15,31 @@ #include <linux/gfp.h> #include <linux/string.h> #include <linux/elf.h> +#include <linux/mm.h> +#include <linux/module.h> #include <asm/cpufeature.h> #include <asm/msr.h> #include <asm/pgtable.h> #include <asm/unistd.h> +/* + * Should the kernel map a VDSO page into processes and pass its + * address down to glibc upon exec()? + */ +unsigned int __read_mostly vdso_enabled = 1; + +EXPORT_SYMBOL_GPL(vdso_enabled); + +static int __init vdso_setup(char *s) +{ + vdso_enabled = simple_strtoul(s, NULL, 0); + + return 1; +} + +__setup("vdso=", vdso_setup); + extern asmlinkage void sysenter_entry(void); void enable_sep_cpu(void) @@ -45,23 +66,120 @@ void enable_sep_cpu(void) */ extern const char vsyscall_int80_start, vsyscall_int80_end; extern const char vsyscall_sysenter_start, vsyscall_sysenter_end; +static void *syscall_page; int __init sysenter_setup(void) { - void *page = (void *)get_zeroed_page(GFP_ATOMIC); + syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); - __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC); +#ifdef CONFIG_COMPAT_VDSO + __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY); + printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); +#else + /* + * In the non-compat case the ELF coredumping code needs the fixmap: + */ + __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO); +#endif if (!boot_cpu_has(X86_FEATURE_SEP)) { - memcpy(page, + memcpy(syscall_page, &vsyscall_int80_start, &vsyscall_int80_end - &vsyscall_int80_start); return 0; } - memcpy(page, + memcpy(syscall_page, &vsyscall_sysenter_start, &vsyscall_sysenter_end - &vsyscall_sysenter_start); return 0; } + +static struct page *syscall_nopage(struct vm_area_struct *vma, + unsigned long adr, int *type) +{ + struct page *p = virt_to_page(adr - vma->vm_start + syscall_page); + get_page(p); + return p; +} + +/* Prevent VMA merging */ +static void syscall_vma_close(struct vm_area_struct *vma) +{ +} + +static struct vm_operations_struct syscall_vm_ops = { + .close = syscall_vma_close, + .nopage = syscall_nopage, +}; + +/* Defined in vsyscall-sysenter.S */ +extern void SYSENTER_RETURN; + +/* Setup a VMA at program startup for the vsyscall page */ +int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr; + int ret; + + down_write(&mm->mmap_sem); + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); + if (!vma) { + ret = -ENOMEM; + goto up_fail; + } + + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + /* MAYWRITE to allow gdb to COW and set breakpoints */ + vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; + vma->vm_flags |= mm->def_flags; + vma->vm_page_prot = protection_map[vma->vm_flags & 7]; + vma->vm_ops = &syscall_vm_ops; + vma->vm_mm = mm; + + ret = insert_vm_struct(mm, vma); + if (unlikely(ret)) { + kmem_cache_free(vm_area_cachep, vma); + goto up_fail; + } + + current->mm->context.vdso = (void *)addr; + current_thread_info()->sysenter_return = + (void *)VDSO_SYM(&SYSENTER_RETURN); + mm->total_vm++; +up_fail: + up_write(&mm->mmap_sem); + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + return "[vdso]"; + return NULL; +} + +struct vm_area_struct *get_gate_vma(struct task_struct *tsk) +{ + return NULL; +} + +int in_gate_area(struct task_struct *task, unsigned long addr) +{ + return 0; +} + +int in_gate_area_no_task(unsigned long addr) +{ + return 0; +} diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c index 296355292c7c..e2e281d4bcc8 100644 --- a/arch/i386/kernel/topology.c +++ b/arch/i386/kernel/topology.c @@ -32,15 +32,8 @@ static struct i386_cpu cpu_devices[NR_CPUS]; -int arch_register_cpu(int num){ - struct node *parent = NULL; - -#ifdef CONFIG_NUMA - int node = cpu_to_node(num); - if (node_online(node)) - parent = &node_devices[node].node; -#endif /* CONFIG_NUMA */ - +int arch_register_cpu(int num) +{ /* * CPU0 cannot be offlined due to several * restrictions and assumptions in kernel. This basically @@ -50,21 +43,13 @@ int arch_register_cpu(int num){ if (!num) cpu_devices[num].cpu.no_control = 1; - return register_cpu(&cpu_devices[num].cpu, num, parent); + return register_cpu(&cpu_devices[num].cpu, num); } #ifdef CONFIG_HOTPLUG_CPU void arch_unregister_cpu(int num) { - struct node *parent = NULL; - -#ifdef CONFIG_NUMA - int node = cpu_to_node(num); - if (node_online(node)) - parent = &node_devices[node].node; -#endif /* CONFIG_NUMA */ - - return unregister_cpu(&cpu_devices[num].cpu, parent); + return unregister_cpu(&cpu_devices[num].cpu); } EXPORT_SYMBOL(arch_register_cpu); EXPORT_SYMBOL(arch_unregister_cpu); @@ -74,16 +59,13 @@ EXPORT_SYMBOL(arch_unregister_cpu); #ifdef CONFIG_NUMA #include <linux/mmzone.h> -#include <asm/node.h> - -struct i386_node node_devices[MAX_NUMNODES]; static int __init topology_init(void) { int i; for_each_online_node(i) - arch_register_node(i); + register_one_node(i); for_each_present_cpu(i) arch_register_cpu(i); diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S index 3b62baa6a371..1a36d26e15eb 100644 --- a/arch/i386/kernel/vsyscall-sysenter.S +++ b/arch/i386/kernel/vsyscall-sysenter.S @@ -42,10 +42,10 @@ __kernel_vsyscall: /* 7: align return point with nop's to make disassembly easier */ .space 7,0x90 - /* 14: System call restart point is here! (SYSENTER_RETURN - 2) */ + /* 14: System call restart point is here! (SYSENTER_RETURN-2) */ jmp .Lenter_kernel /* 16: System call normal return point is here! */ - .globl SYSENTER_RETURN /* Symbol used by entry.S. */ + .globl SYSENTER_RETURN /* Symbol used by sysenter.c */ SYSENTER_RETURN: pop %ebp .Lpop_ebp: diff --git a/arch/i386/kernel/vsyscall.lds.S b/arch/i386/kernel/vsyscall.lds.S index 98699ca6e52d..e26975fc68b6 100644 --- a/arch/i386/kernel/vsyscall.lds.S +++ b/arch/i386/kernel/vsyscall.lds.S @@ -7,7 +7,7 @@ SECTIONS { - . = VSYSCALL_BASE + SIZEOF_HEADERS; + . = VDSO_PRELINK + SIZEOF_HEADERS; .hash : { *(.hash) } :text .dynsym : { *(.dynsym) } @@ -20,7 +20,7 @@ SECTIONS For the layouts to match, we need to skip more than enough space for the dynamic symbol table et al. If this amount is insufficient, ld -shared will barf. Just increase it here. */ - . = VSYSCALL_BASE + 0x400; + . = VDSO_PRELINK + 0x400; .text : { *(.text) } :text =0x90909090 .note : { *(.note.*) } :text :note |